diff --git a/ckpts/universal/global_step120/zero/17.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/17.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..f5e9e41afd2560ca8b8de691a5597e561cd83c62 --- /dev/null +++ b/ckpts/universal/global_step120/zero/17.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91cf845b100cc8fd139d8148727274a6239222275c34f11b14b9b2b53b2c3c45 +size 50332828 diff --git a/ckpts/universal/global_step120/zero/17.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step120/zero/17.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..047cb1401e83356532ea0a04f40746c49d071b61 --- /dev/null +++ b/ckpts/universal/global_step120/zero/17.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ae855954f7aff680421fa2f9ce5d93710e462a53c85aa29d7b1df053fa2c766 +size 50332749 diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..34539d633f8fa0532e31a2be38719eaa527a2306 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/__init__.py @@ -0,0 +1,6 @@ +from .api import ( + _shard_tensor, + load_with_process_group, + shard_module, + shard_parameter, +) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34080d20f84ab515edbe5aa23c377c665e7132e1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/common_op_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/common_op_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..419f38337ad46fa61310d515c69a8c573d2c6cd0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/common_op_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/metadata.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/metadata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10cde12f0dcb96e423ff263c9659d6df4be99169 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/metadata.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/op_registry_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/op_registry_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..323d30262d0ccc828e4833386efb48e0f5f02ce9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/__pycache__/op_registry_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..26305b99cce306b2b6770f3731d6b1c276e8392c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/_utils.py @@ -0,0 +1,28 @@ +import torch +from torch.distributed._shard.metadata import ShardMetadata +from typing import Sequence + +DEPRECATE_MSG = "Please use DTensor instead and we are deprecating ShardedTensor." + +def narrow_tensor_by_index(tensor: torch.Tensor, offsets: Sequence[int], sizes: Sequence[int]) -> torch.Tensor: + """ + Narrow the tensor according to ``offsets`` and ``sizes``. + """ + narrowed_tensor = tensor + for idx, (offset, size) in enumerate(zip(offsets, sizes)): + if size < tensor.size(idx): + # Reshape to get shard for this rank and we don't want autograd + # recording here for the narrow op and 'local_shard' should be a + # leaf variable in the autograd graph. + narrowed_tensor = narrowed_tensor.narrow( + idx, + offset, + size + ) + return narrowed_tensor + +def narrow_tensor(tensor: torch.Tensor, metadata: ShardMetadata) -> torch.Tensor: + """ + Narrow the tensor according to the metadata + """ + return narrow_tensor_by_index(tensor, metadata.shard_offsets, metadata.shard_sizes) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/api.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/api.py new file mode 100644 index 0000000000000000000000000000000000000000..5f17237ab50b965e09a51251fa1cb8b901fd22ca --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/api.py @@ -0,0 +1,290 @@ +from contextlib import contextmanager +from typing import Optional +import torch +import torch.distributed as dist +import torch.nn as nn +from torch.distributed import distributed_c10d +from torch.distributed._shard.sharded_tensor import ( + ShardedTensor, +) +from .sharding_spec import ( + ShardingSpec, + ChunkShardingSpec +) +from .sharding_plan import ( + ShardingPlan +) +from .sharder import Sharder + +def _shard_tensor( + tensor: torch.Tensor, sharding_spec: ShardingSpec, src_rank=0, process_group=None +) -> ShardedTensor: + """ + Given a :class:`torch.Tensor`, it shards that tensor according to the provided + ``sharding_spec``. ``src_rank`` denotes the source rank which would be + used as the ground truth of the data which would be scattered as shards + across the rest of the ranks. + + Args: + tensor (:class:`torch.Tensor`): Tensor needs to be sharded. + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification + describing how to shard the Tensor. + + Keyword args: + src_rank (int, optional): The source rank which is used as the ground truth of + the data for the parameter that would be sharded and scattered + across the rest of the ranks. + Default: 0. + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + + Returns: + A :class:`ShardedTensor` sharded from the given tensor. + + .. warning:: + Only :class:`torch.distributed._shard.sharding_spec.ChunkShardingSpec` is + currently supported as the ``sharding_spec``. + """ + if not tensor.is_contiguous(): + raise ValueError('input tensor is not a contiguous Tensor') + + pg = process_group if process_group is not None else distributed_c10d._get_default_group() + world_size = dist.get_world_size(pg) + current_rank = dist.get_rank(pg) + + # Validate src_rank and sharding_spec are same across all ranks. + gathered_list = [None] * world_size + dist.all_gather_object(gathered_list, (src_rank, sharding_spec), group=pg) + + for idx, entry in enumerate(gathered_list): + if src_rank != entry[0]: # type: ignore[index] + raise ValueError( + f'src_rank={src_rank} on rank: {current_rank} does not ' # type: ignore[index] + f'match with src_rank={entry[0]} on rank: {idx}') + if sharding_spec != entry[1]: # type: ignore[index] + raise ValueError( + f'sharding_spec={sharding_spec} on rank: {current_rank} does not ' # type: ignore[index] + f'match with sharding_spec={entry[1]} on rank: {idx}') + + st = sharding_spec.shard(tensor, src_rank=src_rank, process_group=process_group) + + return st + +def shard_parameter( + module: torch.nn.Module, + param_name: str, + sharding_spec: ShardingSpec, + src_rank=0, + process_group=None): + """ + Given a :class:`torch.nn.Module`, a ``param_name`` for a parameter in that + module, it shards that parameter according to the provided + ``sharding_spec``. ``src_rank`` denotes the source rank which would be + used as the ground truth of the data which would be scattered as shards + across the rest of the ranks. + + This method replaces ``module.param_name`` with a + :class:`torch.distributed._sharded_tensor.ShardedTensor` + + Args: + module (:class:`torch.nn.Module`): Module whose parameter needs to be sharded. + param_name (str): Name of the parameter of ``module`` that needs to be sharded. + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification + describing how to shard the Tensor. + + Keyword args: + src_rank (int, optional): The source rank which is used as the ground truth of + the data for the parameter that would be sharded and scattered + across the rest of the ranks. + Default: 0. + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + + .. warning:: + Only :class:`torch.distributed._shard.sharding_spec.ChunkShardingSpec` is + currently supported as the ``sharding_spec``. + """ + # Perform some validation first. + if not hasattr(module, param_name): + raise AttributeError(f'{module._get_name()} has no attribute `{param_name}`') + + tensor = getattr(module, param_name) + if not isinstance(tensor, torch.Tensor): + raise ValueError(f'Expected {type(module).__name__}.{param_name} to be a Tensor, but found {type(tensor).__name__}') + + if not tensor.is_contiguous(): + raise ValueError(f'param: {param_name} is not a contiguous Tensor') + + st = _shard_tensor(tensor, sharding_spec, src_rank, process_group) + + # Replace param with ShardedTensor. + module.register_parameter(param_name, nn.Parameter(st)) + +# Tracks the current process group in the load context manager. +_CURRENT_PROCESS_GROUP: Optional[dist.ProcessGroup] = None + +@contextmanager +def load_with_process_group(process_group): + """ + Context manager to set the process group with which to load a ShardedTensor. + """ + global _CURRENT_PROCESS_GROUP + if _CURRENT_PROCESS_GROUP is not None: + raise RuntimeError( + 'ProcessGroup already set by previous "load_with_process_group" ' + 'context manager') + _CURRENT_PROCESS_GROUP = process_group + try: + yield process_group + finally: + _CURRENT_PROCESS_GROUP = None + +def _get_current_process_group(): + """ + Retrieves the current process group set by ``load_with_process_group``. + If not set, it just returns the default group. + """ + global _CURRENT_PROCESS_GROUP + if _CURRENT_PROCESS_GROUP is None: + return distributed_c10d._get_default_group() + else: + return _CURRENT_PROCESS_GROUP + +def _reshard_output( + module: torch.nn.Module, + resharding_spec: ShardingSpec) -> torch.nn.Module: + """ + Hook a module with output resharding in the forward pass according + to the given ``resharding_spec``. + + Args: + module (:class:`torch.nn.Module`): Module whose output needs to be resharded. + resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): + The specification describing how the output of the module will be resharded. + + Returns: + A :class:`torch.nn.Module` object with reshard API hooked. + """ + def hook_func(_module, _input, output): + if isinstance(output, ShardedTensor): + return output.reshard(resharding_spec) + return output + module.register_forward_hook(hook_func) + return module + +def _collect_local_shard(module: torch.nn.Module) -> torch.nn.Module: + """ + Hook a module with local shards collection in the forward pass. + + This API is typically used to convert a sharded representation back to data parallel + representation. In particular, it returns the local tensor for this Shard. If the + size along the sharding dimension for the local tensor is 1, this dimension is removed + from the final result. For example a [4, 16] ShardedTensor across 4 ranks is typically + a local Tensor of size [16] across each rank and not [1, 16] across each rank. + + Args: + module (:class:`torch.nn.Module`): Module whose output is ShardedTensor and the + local tensor value needs to be returned. + + Returns: + A :class:`torch.nn.Module` object with collection API hooked. + """ + + def hook_func(_module, _input, output): + if isinstance(output, ShardedTensor): + local_tensor = output.local_tensor() + # Squeeze the # of dimensions manually, only applicable to ChunkShardingSpec + sharding_spec = output._sharding_spec + if isinstance(sharding_spec, ChunkShardingSpec) \ + and local_tensor.size(sharding_spec.dim) == 1: # type: ignore[attr-defined, arg-type] + local_tensor = local_tensor.squeeze( + output._sharding_spec.dim # type: ignore[attr-defined] + ) + return local_tensor + module.register_forward_hook(hook_func) + return module + +def shard_module( + module: nn.Module, + plan: ShardingPlan, + src_rank=0, + process_group=None +): + """ + Shards a given module according to the provided sharding `plan`. This method + first shards all the parameters according to the given sharding `plan`. Then if + `output_plan` and `return_local_tensor` are specified in the sharding `plan`, it + will tag the output of modules according `output_plan`, convert the module's + output back to data parallel according to `return_local_tensor`. + + Needs to be called on all ranks in an SPMD fashion. + + Args: + module (:class:`torch.nn.Module`): The module to apply sharding to + plan (:class:`torch.distributed._shard.sharding_plan.ShardingPlan`): + The ShardingPlan which specified param name to ShardingSpec to apply to + each parameter. + + Keyword args: + src_rank (int, optional): The source rank which is used as the ground truth of + the data for the module that would be sharded and scattered across the rest + of the ranks. + Default: 0. + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + """ + # record Sharder paths for sanity check on the plan to ensure items in the plan + # does not conflict with the submodule tree that the Sharder is working with + sharder_paths = [] + for name, spec in plan.plan.items(): + if isinstance(spec, Sharder): + sharder_paths.append(name) + + # shard the parameter according to the ShardingPlan + for name, spec in plan.plan.items(): + if isinstance(spec, ShardingSpec): + # if found a sharding spec, try to shard the parameter + module_path, _, param_name = name.rpartition(".") + + for sharder_path in sharder_paths: + if module_path.startswith(sharder_path): + raise RuntimeError(f"ShardingPlan is in-valid, trying to shard a parameter: {name}," + f" but there's already a Sharder entry for module {sharder_path}," + f" parameter sharding should not conflict with the submodule tree" + f" that a Sharder is working with!") + + mod = module.get_submodule(module_path) + shard_parameter( + mod, + param_name, + spec, + src_rank=src_rank, + process_group=process_group + ) + elif isinstance(spec, Sharder): + parent_mod_path, _, mod_name = name.rpartition(".") + if name == "": + raise KeyError("Module path must not be empty for custom sharder!") + mod = module.get_submodule(name) + parent_mod = module.get_submodule(parent_mod_path) + sharded_mod = spec.shard(mod) + # swap this submodule with the sharded module + parent_mod.mod_name = sharded_mod + else: + raise TypeError(f"Only `ShardingSpec` and `Sharder` are supported to shard '{name}'") + + # reshard output if there's an entry in `reshard_output` for this module + if plan.output_plan is not None: + for module_path, output_spec in plan.output_plan.items(): + if isinstance(output_spec, ShardingSpec): + mod = module.get_submodule(module_path) + _reshard_output(mod, output_spec) + else: + raise TypeError(f"Only `ShardingSpec` is supported as output_plan for '{module_path}'") + # convert the output back to data parallel for the modules appears in + # `return_local_tensor` of the plan, we will call `_collect_local_shard` + # to collect the local tensor for output of modules + if plan.return_local_tensor is not None: + for module_path in plan.return_local_tensor: + mod = module.get_submodule(module_path) + _collect_local_shard(mod) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/common_op_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/common_op_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c426503161c7cc8425eff62864c5cd5fa834631d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/common_op_utils.py @@ -0,0 +1,61 @@ +import torch +from torch.utils import _pytree as pytree +from typing import Optional + +def _basic_validation(op, args=(), kwargs=None): + """ + Common validation across all ops go in here. + """ + from torch.distributed._shard.sharded_tensor import ShardedTensor + + if len(args) == 0 and (kwargs is None or len(kwargs) == 0): + raise ValueError(f" No input for '{op.__name__}'!") + + # Validate types + has_distributed_tensor = False + + def is_distributed_tensor(e): + nonlocal has_distributed_tensor + if isinstance(e, ShardedTensor): + has_distributed_tensor = True + + pytree.tree_map_(is_distributed_tensor, args) + pytree.tree_map_(is_distributed_tensor, kwargs) + + if not has_distributed_tensor: + raise TypeError( + f"torch function '{op.__name__}', with args: {args} and " + f"kwargs: {kwargs} are called without any distributed tensor!" + ) + + # Validate all distributed tensors use the same PG. + cur_pg: Optional[torch.distributed.ProcessGroup] = None + + def validate_pg(e): + nonlocal cur_pg + if isinstance(e, ShardedTensor): + if cur_pg is not None and e._process_group is not cur_pg: + raise RuntimeError( + 'All distributed tensors should use the ' + 'same ProcessGroup if used together in an op.' + ) + cur_pg = e._process_group + + pytree.tree_map_(validate_pg, args) + pytree.tree_map_(validate_pg, kwargs) + +def _register_default_op(op, decorator): + @decorator(op) + def tensor_default_op(types, args=(), kwargs=None, pg=None): + """ + Handles ``__torch_function__`` dispatch for the default tensor ops that + behave the same as ``torch.Tensor`` such as ``torch.Tensor.shape`` or + ``torch.Tensor.dtype``. We simply lower to the real op call with + DisableTorchFunctionSubclass context like ``torch.Tensor.__torch_function__`` + to avoid recursions. + """ + if kwargs is None: + kwargs = {} + + with torch._C.DisableTorchFunctionSubclass(): + return op(*args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/metadata.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..b7bae9e6664a6ef4bbbf9f0c52623a1f68c9649e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/metadata.py @@ -0,0 +1,61 @@ +from dataclasses import dataclass +from typing import List, Union, Optional +from functools import reduce + +from torch.distributed.remote_device import _remote_device + +@dataclass +class ShardMetadata: + """ + Represents a shard of the overall Tensor including its + offsets, lengths and device placement. + + Args: + shard_offsets(List[int]): Offsets in the original tensor indicating + the start offsets for this shard. Should have the same rank as + the original tensor. + shard_sizes(List[int]): Integers indicating the size of each + dimension for this shard. Should have the same rank as the + original tensor. + placement(:class:`torch.distributed._remote_device`): + Specifies the placement of this shard. + """ + + __slots__ = ['shard_offsets', 'shard_sizes', 'placement'] + + shard_offsets: List[int] + shard_sizes: List[int] + placement: Optional[_remote_device] + + def __init__( + self, + shard_offsets: List[int], + shard_sizes: List[int], + placement: Optional[Union[str, _remote_device]] = None + ): + self.shard_offsets = shard_offsets + self.shard_sizes = shard_sizes + if isinstance(placement, str): + self.placement = _remote_device(placement) + else: + self.placement = placement + if len(self.shard_offsets) != len(self.shard_sizes): + raise ValueError( + f'shard_offsets and shard_sizes should have ' + f'the same number of elements, found {len(self.shard_offsets)} ' + f'and {self.shard_sizes} respectively') + + for i in range(len(self.shard_offsets)): + if self.shard_offsets[i] < 0: + raise ValueError('shard_offsets should be >=0') + if self.shard_sizes[i] < 0: + raise ValueError('shard_sizes should be >= 0') + + def __hash__(self): + def _hash_reduce(a, b): + return (a << 8) + hash(b) + + res = reduce(_hash_reduce, self.shard_offsets, 37) + res = reduce(_hash_reduce, self.shard_sizes, res) + res = _hash_reduce(res, self.placement) + return res diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/op_registry_utils.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/op_registry_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4febe841186a5f19d49dcb2aecee385255b4955a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/op_registry_utils.py @@ -0,0 +1,35 @@ +import functools +from inspect import signature +from .common_op_utils import _basic_validation + +""" +Common utilities to register ops on ShardedTensor +and PartialTensor. +""" + +def _register_op(op, func, op_table): + """ + Performs basic validation and registers the provided op in the given + op_table. + """ + if len(signature(func).parameters) != 4: + raise TypeError( + f'Custom sharded op function expects signature: ' + f'(types, args, kwargs, process_group), but received ' + f'signature: {signature(func)}') + + op_table[op] = func + +def _decorator_func(wrapped_func, op, op_table): + """ + Decorator function to register the given ``op`` in the provided + ``op_table`` + """ + + @functools.wraps(wrapped_func) + def wrapper(types, args, kwargs, process_group): + _basic_validation(op, args, kwargs) + return wrapped_func(types, args, kwargs, process_group) + + _register_op(op, wrapper, op_table) + return wrapper diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..152c287ee7036f71593a9d3e0aa7f5d5176a750e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__init__.py @@ -0,0 +1,469 @@ +import functools +from typing import List, TYPE_CHECKING + +import torch + +if TYPE_CHECKING: + from torch.distributed._shard.sharding_spec import ShardingSpec +else: + ShardingSpec = "ShardingSpec" + +from .api import ( + _CUSTOM_SHARDED_OPS, + _SHARDED_OPS, + Shard, + ShardedTensorBase, + ShardedTensor, + ShardedTensorMetadata, + TensorProperties, +) +from .metadata import ShardMetadata # noqa: F401 +from torch.distributed._shard.op_registry_utils import _decorator_func + + +def empty(sharding_spec: ShardingSpec, + *size, + dtype=None, + layout=torch.strided, + requires_grad=False, + pin_memory=False, + memory_format=torch.contiguous_format, + process_group=None, + init_rrefs=False) -> ShardedTensor: + """ + Returns a :class:`ShardedTensor` filled with uninitialized data. + Needs to be called on all ranks in an SPMD fashion. + + Args: + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification + describing how to shard the Tensor. + size (int...): a sequence of integers defining the shape of the output + tensor. Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.contiguous_format``. + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + init_rrefs (bool, optional): Whether or not to initialize + :class:`torch.distributed.rpc.RRef`s pointing to remote shards. + Need to initialize the RPC Framework if specified as ``True``. + Default: ``False``. + + Returns: + A :class:`ShardedTensor` object on each rank + """ + return ShardedTensor( + sharding_spec, + *size, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + pin_memory=pin_memory, + memory_format=memory_format, + process_group=process_group, + init_rrefs=init_rrefs, + ) + +def ones(sharding_spec: ShardingSpec, + *size, + dtype=None, + layout=torch.strided, + requires_grad=False, + pin_memory=False, + memory_format=torch.contiguous_format, + process_group=None, + init_rrefs=False) -> ShardedTensor: + """ + Returns a :class:`ShardedTensor` with the scalar value 1. + Needs to be called on all ranks in an SPMD fashion. + + Args: + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification + describing how to shard the Tensor. + size (int...): a sequence of integers defining the shape of the output + tensor. Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + init_rrefs (bool, optional): Whether or not to initialize + :class:`torch.distributed.rpc.RRef`s pointing to remote shards. + Need to initialize the RPC Framework if specified as ``True``. + Default: ``False``. + + Returns: + A :class:`ShardedTensor` object on each rank + """ + return full( + sharding_spec, + size, + fill_value=1, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + pin_memory=pin_memory, + memory_format=memory_format, + process_group=process_group, + init_rrefs=init_rrefs + ) + +def zeros(sharding_spec: ShardingSpec, + *size, + dtype=None, + layout=torch.strided, + requires_grad=False, + pin_memory=False, + memory_format=torch.contiguous_format, + process_group=None, + init_rrefs=False) -> ShardedTensor: + """ + Returns a :class:`ShardedTensor` filled with the scalar value 0. + Needs to be called on all ranks in an SPMD fashion. + + Args: + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification + describing how to shard the Tensor. + size (int...): a sequence of integers defining the shape of the output + tensor. Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + init_rrefs (bool, optional): Whether or not to initialize + :class:`torch.distributed.rpc.RRef`s pointing to remote shards. + Need to initialize the RPC Framework if specified as ``True``. + Default: ``False``. + + Returns: + A :class:`ShardedTensor` object on each rank + """ + return full( + sharding_spec, + size, + fill_value=0, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + pin_memory=pin_memory, + memory_format=memory_format, + process_group=process_group, + init_rrefs=init_rrefs + ) + +def full(sharding_spec: ShardingSpec, + size, + fill_value, + *, + dtype=None, + layout=torch.strided, + requires_grad=False, + pin_memory=False, + memory_format=torch.contiguous_format, + process_group=None, + init_rrefs=False) -> ShardedTensor: + """ + Creates a :class:`ShardedTensor` filled with fill_value. The tensor’s dtype + is inferred from fill_value. If dtype is specified, it will override the + inferred type from fill_value. Needs to be called on all ranks in an SPMD fashion. + Args: + sharding_spec (:class:`torch.distributed._sharding_spec.ShardingSpec`): The specification + describing how to shard the Tensor. + size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the + output tensor. + fill_value (Scalar) – the value to fill the output tensor with. + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + init_rrefs (bool, optional): Whether or not to initialize + :class:`torch.distributed.rpc.RRef`s pointing to remote shards. + Need to initialize the RPC Framework if specified as ``True``. + Default: ``False``. + Returns: + A :class:`ShardedTensor` object on each rank + """ + sharded_tensor = ShardedTensor( + sharding_spec, + *size, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + pin_memory=pin_memory, + memory_format=memory_format, + process_group=process_group, + init_rrefs=init_rrefs, + ) + torch.nn.init.constant_(sharded_tensor, fill_value) # type: ignore[arg-type] + return sharded_tensor + +def rand(sharding_spec: ShardingSpec, + *size, + dtype=None, + layout=torch.strided, + requires_grad=False, + pin_memory=False, + memory_format=torch.contiguous_format, + process_group=None, + init_rrefs=False) -> ShardedTensor: + """ + Creates a :class:`ShardedTensor` filled with random numbers from a uniform distribution + on the interval :math:`[0, 1)`. The shape of the tensor is defined by the + variable argument `size`. Needs to be called on all ranks in an SPMD fashion. + + Args: + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification + describing how to shard the Tensor. + size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the + output tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + init_rrefs (bool, optional): Whether or not to initialize + :class:`torch.distributed.rpc.RRef`s pointing to remote shards. + Need to initialize the RPC Framework if specified as ``True``. + Default: ``False``. + + Returns: + A :class:`ShardedTensor` object on each rank + """ + sharded_tensor = ShardedTensor( + sharding_spec, + *size, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + pin_memory=pin_memory, + memory_format=memory_format, + process_group=process_group, + init_rrefs=init_rrefs, + ) + torch.nn.init.uniform_(sharded_tensor, 0, 1) # type: ignore[arg-type] + return sharded_tensor + +def randn(sharding_spec: ShardingSpec, + *size, + dtype=None, + layout=torch.strided, + requires_grad=False, + pin_memory=False, + memory_format=torch.contiguous_format, + process_group=None, + init_rrefs=False) -> ShardedTensor: + """ + Creates a :class:`ShardedTensor` filled with random numbers from a uniform distribution + with mean `0` and variance `1` (also called standard normal distribution). The shape + of the tensor is defined by the variable argument `size`. Needs to be called on all ranks + in an SPMD fashion. + + Args: + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification + describing how to shard the Tensor. + size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the + output tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + init_rrefs (bool, optional): Whether or not to initialize + :class:`torch.distributed.rpc.RRef`s pointing to remote shards. + Need to initialize the RPC Framework if specified as ``True``. + Default: ``False``. + + Returns: + A :class:`ShardedTensor` object on each rank + """ + sharded_tensor = ShardedTensor( + sharding_spec, + *size, + dtype=dtype, + layout=layout, + requires_grad=requires_grad, + pin_memory=pin_memory, + memory_format=memory_format, + process_group=process_group, + init_rrefs=init_rrefs, + ) + torch.nn.init.normal_(sharded_tensor, 0, 1) # type: ignore[arg-type] + return sharded_tensor + +def init_from_local_shards( + local_shards: List[Shard], + *global_size, + process_group=None, + init_rrefs=False) -> ShardedTensor: + """ + Creates an :class:`ShardedTensor` from local shards and the global metadata. + Needs to be called on all ranks in an SPMD fashion. + + Args: + local_shards (List[:class `torch.distributed._shard.sharded_tensor.Shard`]): A list + of shards that represent the local shards on this rank. + global_size (int...): a list, tuple, or `torch.Size` of integers defining the + shape of the overall sharded tensor. + + Keyword args: + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + init_rrefs (bool, optional): Whether or not to initialize + :class:`torch.distributed.rpc.RRef`s pointing to remote shards. + Need to initialize the RPC Framework if specified as ``True``. + Default: ``False``. + + Returns: + A :class:`ShardedTensor` object handle on this rank + + + Examples: + Suppose we want construct a sharded tensor on two ranks, global size = (10, 5), + each shard have a (5, 5) local tensor, we can do it like below: + + on rank 0: + >>> # xdoctest: +SKIP("not distributed") + >>> local_shard_metadata = ShardMetadata( + >>> shard_offsets=[0, 0], + >>> shard_lengths=[5, 5], + >>> placement="rank:0/cuda:0" + >>> ) + >>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)] + >>> sharded_tensor = init_from_local_shards(local_shards, [10, 5]) + + on rank 1: + >>> # xdoctest: +SKIP("not distributed") + >>> local_shard_metadata = ShardMetadata( + >>> shard_offsets=[5, 0], + >>> shard_lengths=[5, 5], + >>> placement="rank:1/cuda:1" + >>> ) + >>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)] + >>> sharded_tensor = init_from_local_shards(local_shards, [10, 5]) + """ + return ShardedTensor._init_from_local_shards( + local_shards, + *global_size, + process_group=process_group, + init_rrefs=init_rrefs + ) + +def state_dict_hook(module, destination, prefix, local_metadata): + """ + Hook to add ShardedTensor to Module's ``state_dict``. Needs to be + registered to the Module using + :meth:`torch.nn.Module._register_state_dict_hook`. + """ + for submodule_name, submodule in module.named_modules(): + for attr_name, attr in submodule.__dict__.items(): + if isinstance(attr, ShardedTensor): + mod_prefix = prefix + submodule_name + key = mod_prefix + ('.' if mod_prefix else '') + attr_name + destination[key] = attr + +def pre_load_state_dict_hook(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): + """ + Pre-load state dict hook to add ShardedTensor to the module. + """ + for submodule_name, submodule in module.named_modules(): + for attr_name in submodule.__dict__.keys(): + mod_prefix = prefix + submodule_name + key = mod_prefix + ('.' if mod_prefix else '') + attr_name + if key in state_dict: + if isinstance(state_dict[key], ShardedTensor): + setattr(submodule, attr_name, state_dict[key]) + +def custom_sharded_op_impl(func): + """ + Provides a way for users to write their own custom sharded operator. This + can be used to override existing ShardedTensor operators or write a new + one not supported by ShardedTensor. If the operator in question is covered + by ``__torch_function__`` dispatch and has a ShardedTensor as any of its + parameters, the function provided will be invoked for that operator. + + Example:: + >>> # xdoctest: +SKIP + >>> @custom_sharded_op_impl(torch.nn.functional.linear) + >>> def my_custom_sharded_linear(types, args, kwargs, process_group): + >>> ... + >>> # xdoctest: +SKIP("Undefined variables") + >>> input = torch.rand(10, 32) + >>> weight = sharded_tensor.rand(32, 16) + >>> bias = torch.rand(16) + >>> # This will call 'my_custom_sharded_linear' + >>> torch.nn.functional.linear(input, weight, bias) + + The types, args and kwargs parameters are the same parameters that are + passed to ``__torch_function__`` dispatch API + (https://pytorch.org/docs/stable/notes/extending.html#extending-torch). + There is an additional ``process_group`` parameter which is the + process_group used for the ShardedTensor and can be used by + implementations for communications within a sharded implementation. + + Args: + func(Callable): Torch function for which we want to provide a sharded + implementation (ex: torch.nn.functional.linear) + """ + return functools.partial( + _decorator_func, + op=func, + op_table=_CUSTOM_SHARDED_OPS + ) + +def _sharded_op_impl(func): + """ + Decorator to register a default sharded op. + """ + return functools.partial( + _decorator_func, + op=func, + op_table=_SHARDED_OPS + ) + +# Import all builtin sharded ops +from ._ops import * # noqa: F403 diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e42547e5303ab433937e895e79eed30d86a33ce Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b0a817b27d28dce2a0b4240e18ef2073ba3f8d7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logger.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logger.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e47539ea27322d0a446a7c537b5f67dad6dd58f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logger.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logging_handlers.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logging_handlers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e496a12987b0b204270bdd4355782d87930babb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/logging_handlers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/metadata.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/metadata.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ebf7f6bc23cc72a773f4b5520213ebbba0b15b54 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/metadata.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/reshard.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/reshard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06ba21afd93bcad01e44e96b4a36b022975220f8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/reshard.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/shard.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/shard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1ad1145d6a660eedc771bbc8afa0d0992e250bd Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/shard.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bfc4dfd06a1257572e882828d5274491cb323d6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c233840f1eccee36974b96d6a2c1a226866dd3d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__init__.py @@ -0,0 +1,9 @@ +import torch.distributed._shard.sharded_tensor._ops.misc_ops +import torch.distributed._shard.sharded_tensor._ops.tensor_ops + +from .binary_cmp import equal, allclose +from .init import kaiming_uniform_, normal_, uniform_, constant_ + +# Import all ChunkShardingSpec ops +from torch.distributed._shard.sharding_spec.chunk_sharding_spec_ops.embedding import sharded_embedding +from torch.distributed._shard.sharding_spec.chunk_sharding_spec_ops.embedding_bag import sharded_embedding_bag diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd2e21d393a0b229310096907fbeb27a7f86432c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ceeba81d84378af922b82e1c60b8adfc18f8b56 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/binary_cmp.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/binary_cmp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2a9b30eb0779095fec5edb4a4b4a221b44c624f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/binary_cmp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/init.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f12a65c1a06a9bdbdcb08139a3df3941527c98b1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/init.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/misc_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/misc_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60dfacdbc2f72de6d0ce2e0606d1e309a1bc5765 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/misc_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/tensor_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/tensor_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..065c4f10bf2a3185e324c336873e68a6880fa33c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/__pycache__/tensor_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/_common.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..e672c54927dbd5344ce9ec6b763c4ed7e3b518e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/_common.py @@ -0,0 +1,107 @@ +import functools +from torch.distributed._shard.sharded_tensor import ( + _sharded_op_impl, + Shard, + ShardedTensor, +) +from torch.distributed._shard.common_op_utils import _basic_validation + +def _sharded_op_common(op, early_stop_func, extra_check): + """ + Inject sharded tensor op registration with common logics executed before + different behaviors are done on either local shards or a local tensor. + + Example:: + >>> # xdoctest: +SKIP("Undefined variables") + >>> op = torch.transpose + >>> @_sharded_op_impl(op) + >>> @_sharded_op_common(op, early_stop_func, extra_check) + >>> def sharded_tensor_op(types, args, kwargs, process_group): + >>> ... + >>> + >>> st = sharded_tensor.rand(32, 16) + >>> st.transpose(1, 2) + >>> # This will call '_sharded_op_common' + + Args: + op: The op to be registered and applied to all shards of the st. + early_stop_func (Callable, optional): the func for early stop. + Default: if ``None``, no early stop. + extra_check (Callable, optional): the func for extra condition check. + Default: if ``None``, no extra check. + + Return: + func (Callable): Torch function for which we want to provide a sharded + implementation (ex: torch.transpose) + """ + def decorator_sharded_func(wrapped_func): + @functools.wraps(wrapped_func) + def wrapper(types, args=(), kwargs=None, pg=None): + _basic_validation(op, args, kwargs) + + st = args[0] + if kwargs is None: + kwargs = {} + if extra_check: + extra_check(*args, **kwargs) + if early_stop_func: + early_stop = early_stop_func(*args, **kwargs) + if early_stop: + return st + return wrapped_func(types, args, kwargs, pg) + + return wrapper + + return decorator_sharded_func + +def _register_sharded_op_on_local_shards( + op, early_stop_func=None, extra_check=None, customized_func=None +): + """ + Handles ``__torch_function__`` dispatch for ops which are performed on + each shard of the sharded tensor such as elementwise op like + ``torch.nn.functional.gelu`` or ``torch.nn.functional.relu``. + + For more complicated ops, a customized func can be used to generate + the new shards and sharded tensor size. + + This function expects that the original ShardingSpec for the ShardedTensor + is preserved irrespective of whether or not a customized function is used. + + Args: + op: The op to be registered and applied to all shards of the st. + early_stop_func (Callable, optional): the func for early stop. + Default: if ``None``, no early stop. + extra_check (Callable, optional): the func for extra condition check. + Default: if ``None``, no extra check. + customized_func (Callable, optional): the func for customized logic + to generate new shards and sharded tensor size. + Default: if ``None``, we simply lower to the real op call with + all local shards of the st. + + Return: + func (Callable): registered implementation for sharded op for + ``__torch_function__`` dispatch. + """ + @_sharded_op_impl(op) + @_sharded_op_common(op, early_stop_func, extra_check) + def sharded_tensor_op_on_local_shards(types, args=(), kwargs=None, pg=None): + st = args[0] + st_metadata = st.metadata() + local_shards = st.local_shards() + local_shards_new = [] + if customized_func: + local_shards_new, st_metadata = customized_func(args, kwargs, pg) + else: + for local_shard in local_shards: + args = (local_shard.tensor, *args[1:]) + local_shards_new.append( + Shard(op(*args, **kwargs), local_shard.metadata) + ) + return ShardedTensor._init_from_local_shards_and_global_metadata( + local_shards_new, + st_metadata, + process_group=pg, + init_rrefs=st._init_rrefs, + sharding_spec=st.sharding_spec() + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/binary_cmp.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/binary_cmp.py new file mode 100644 index 0000000000000000000000000000000000000000..0a7999a4c263a16cc0f743af54fc0e39a378b755 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/binary_cmp.py @@ -0,0 +1,68 @@ +import torch +import torch.distributed as dist +import torch.distributed.distributed_c10d as distributed_c10d +from torch.distributed._shard.sharded_tensor import ( + ShardedTensor, + _sharded_op_impl +) + +def _communicate_result(result, pg): + # Gather results from all ranks. + if result: + result_tensor = torch.ones(1, device=torch.device(torch.cuda.current_device())) + else: + result_tensor = torch.zeros(1, device=torch.device(torch.cuda.current_device())) + + dist.all_reduce(result_tensor, group=pg) + + expected_result = torch.ones(1, device=torch.device(torch.cuda.current_device())) * dist.get_world_size(pg) + + return torch.equal(result_tensor, expected_result) + +def binary_cmp(cmp_fun, types, args, kwargs=None, process_group=None): + if len(args) != 2: + raise ValueError(f'Expected two arguments for torch.{cmp_fun.__name__}') + + result = True + st1 = args[0] + st2 = args[1] + if not (isinstance(st1, ShardedTensor) and isinstance(st2, ShardedTensor)): + raise TypeError(f'Both arguments to torch.{cmp_fun.__name__} need to be of type ShardedTensor') + + # Verify same PG + if st1._process_group != st2._process_group: + return False + + if distributed_c10d._rank_not_in_group(st1._process_group) or distributed_c10d._rank_not_in_group(st2._process_group): + return distributed_c10d._rank_not_in_group(st1._process_group) == distributed_c10d._rank_not_in_group(st2._process_group) + + # Verify metadata + if st1.metadata() != st2.metadata(): + return _communicate_result(False, st1._process_group) + + # Verify number of local shards + st1_local_shards = st1.local_shards() + st2_local_shards = st2.local_shards() + if len(st1_local_shards) != len(st2_local_shards): + return _communicate_result(False, st1._process_group) + + # kwargs must be dict-like + if kwargs is None: + kwargs = {} + # Verify each local shard + for idx in range(len(st1_local_shards)): + if st1_local_shards[idx].metadata != st2_local_shards[idx].metadata: + return _communicate_result(False, st1._process_group) + if not cmp_fun(st1_local_shards[idx].tensor, st2_local_shards[idx].tensor, **kwargs): + return _communicate_result(False, st1._process_group) + + + return _communicate_result(True, st1._process_group) + +@_sharded_op_impl(torch.equal) +def equal(types, args, kwargs, process_group): + return binary_cmp(torch.equal, types, args, kwargs, process_group) + +@_sharded_op_impl(torch.allclose) +def allclose(types, args, kwargs, process_group): + return binary_cmp(torch.allclose, types, args, kwargs, process_group) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/init.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/init.py new file mode 100644 index 0000000000000000000000000000000000000000..dfb661653e71b7f1e46ed111a02a87d2fe04e478 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/init.py @@ -0,0 +1,143 @@ +import torch +import torch.distributed._shard.sharded_tensor as sharded_tensor +from torch.distributed._shard.sharded_tensor import ( + _sharded_op_impl, +) + +def validate_param(param, param_name): + if param is None: + raise ValueError(f"param: {param_name} shouldn't be None!") + +@_sharded_op_impl(torch.nn.init.uniform_) +def uniform_(types, args=(), kwargs=None, pg=None): + r""" + Fills the Tensor in tensor.local_shards with values drawn from the uniform + distribution :math:`\mathcal{U}(a, b)`. + Args: + tensor: tensor sharded across devices + a: the lower bound of the uniform distribution + b: the upper bound of the uniform distribution + """ + validate_param(kwargs, "kwargs") + sharded_tensor = kwargs["tensor"] + validate_param(sharded_tensor, "tensor") + a = kwargs['a'] + validate_param(a, "a") + b = kwargs['b'] + validate_param(b, "b") + + for shard in sharded_tensor.local_shards(): + torch.nn.init.uniform_(shard.tensor, a=a, b=b) + return sharded_tensor + +@_sharded_op_impl(torch.nn.init.normal_) +def normal_(types, args=(), kwargs=None, pg=None): + r""" + Fills the Tensors in tensor.local_shards with values drawn from the normal + distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`. + Args: + tensor: tensor sharded across devices + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + """ + validate_param(kwargs, "kwargs") + sharded_tensor = kwargs["tensor"] + validate_param(sharded_tensor, "tensor") + mean = kwargs['mean'] + validate_param(mean, "mean") + std = kwargs['std'] + validate_param(std, "std") + + for shard in sharded_tensor.local_shards(): + torch.nn.init.normal_(shard.tensor, mean=mean, std=std) + return sharded_tensor + +@_sharded_op_impl(torch.nn.init.kaiming_uniform_) +def kaiming_uniform_(types, args=(), kwargs=None, pg=None): + r""" + Fills the Tensors in tensor.local_shards with values according to the method + described in `Delving deep into rectifiers: Surpassing human-level + performance on ImageNet classification` - He, K. et al. (2015), using a + uniform distribution. The resulting tensor will have values sampled from + :math:`\mathcal{U}(-\text{bound}, \text{bound})` where + .. math:: + \text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}} + Also known as He initialization. + Args: + tensor: tensor sharded across devices + a: the negative slope of the rectifier used after this layer (only + used with ``'leaky_relu'``) + mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'`` + preserves the magnitude of the variance of the weights in the + forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the + backwards pass. + nonlinearity: the non-linear function (`nn.functional` name), + recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default). + """ + validate_param(kwargs, "kwargs") + sharded_tensor = kwargs["tensor"] + validate_param(sharded_tensor, "tensor") + a = kwargs['a'] + validate_param(a, "a") + mode = kwargs['mode'] + validate_param(mode, "mode") + nonlinearity = kwargs['nonlinearity'] + validate_param(nonlinearity, "nonlinearity") + + for shard in sharded_tensor.local_shards(): + torch.nn.init.kaiming_uniform_(shard.tensor, a=a, mode=mode, nonlinearity=nonlinearity) + return sharded_tensor + +@_sharded_op_impl(torch.nn.init.constant_) +def constant_(types, args=(), kwargs=None, pg=None): + r""" + Fills the input ShardedTensor with the value \text{val}val. + Args: + tensor: tensor sharded across devices + val: the value to fill the tensor with + """ + validate_param(kwargs, "kwargs") + sharded_tensor = kwargs["tensor"] + validate_param(sharded_tensor, "tensor") + val = kwargs['val'] + validate_param(val, "val") + for shard in sharded_tensor.local_shards(): + torch.nn.init.constant_(shard.tensor, val=val) + return sharded_tensor + +tensor_like_creation_op_map = { + torch.full_like: sharded_tensor.full, + torch.empty_like: sharded_tensor.empty, + torch.zeros_like: sharded_tensor.zeros, + torch.ones_like: sharded_tensor.ones, + torch.rand_like: sharded_tensor.rand, + torch.randn_like: sharded_tensor.randn, +} + +# tensor ops that behave the same as the default tensor +def register_tensor_creation_op(op): + @_sharded_op_impl(op) + def tensor_creation_op(types, args=(), kwargs=None, pg=None): + """ + Handles ``__torch_function__`` dispatch for tensor creation ops that + takes a ShardedTensor as argument, such as ``torch.zeros_like`` or + ``torch.full_like``. + """ + creation_op = tensor_like_creation_op_map.get(op, None) + if creation_op is None: + raise RuntimeError(f"Tensor creation {op} not supported!") + if kwargs is None: + kwargs = {} + + st = args[0] + + new_st = creation_op(st.sharding_spec(), st.size(), *args[1:], **kwargs) # type: ignore[operator] + return new_st + + +register_tensor_creation_op(torch.full_like) +register_tensor_creation_op(torch.empty_like) +register_tensor_creation_op(torch.zeros_like) +register_tensor_creation_op(torch.ones_like) +register_tensor_creation_op(torch.rand_like) +register_tensor_creation_op(torch.randn_like) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/misc_ops.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/misc_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..0e0911bb1d18c9f90726436a9efe6dc5ef44019b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/misc_ops.py @@ -0,0 +1,12 @@ +import torch +from torch.distributed._shard.sharded_tensor import ( + _sharded_op_impl, +) + +# This is used by `_apply()` within module.py to set new +# parameters after apply a certain method, we should follow +# the future behavior of overwriting the existing tensor +# instead of doing in-place change using `.data = `. +@_sharded_op_impl(torch._has_compatible_shallow_copy_type) +def tensor_has_compatible_shallow_copy_type(types, args=(), kwargs=None, pg=None): + return False diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/tensor_ops.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/tensor_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..f96eded95f310d59803ccbb328fe5b1311d2ebe2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/_ops/tensor_ops.py @@ -0,0 +1,215 @@ +import copy +import torch +from torch.distributed._shard.sharded_tensor import ( + _sharded_op_impl, + Shard, + ShardedTensor, +) +from ._common import ( + _register_sharded_op_on_local_shards, +) +from torch.distributed._shard.common_op_utils import _register_default_op + + +# Tensor properties access +_register_default_op(torch.Tensor.shape.__get__, _sharded_op_impl) # type: ignore[attr-defined] +_register_default_op(torch.Tensor.dtype.__get__, _sharded_op_impl) # type: ignore[attr-defined] +_register_default_op(torch.Tensor.layout.__get__, _sharded_op_impl) # type: ignore[attr-defined] +_register_default_op(torch.Tensor.size, _sharded_op_impl) +_register_default_op(torch.Tensor.dim, _sharded_op_impl) +_register_default_op(torch.Tensor.ndim.__get__, _sharded_op_impl) # type: ignore[attr-defined] +_register_default_op(torch.Tensor.is_contiguous, _sharded_op_impl) +_register_default_op(torch.Tensor.contiguous, _sharded_op_impl) +_register_default_op(torch.Tensor.is_floating_point, _sharded_op_impl) + +# __reduce_ex__ to dispatch to get_state/set_state +_register_default_op(torch.Tensor.__reduce_ex__, _sharded_op_impl) + +# autograd related properties +_register_default_op(torch.Tensor.requires_grad.__get__, _sharded_op_impl) # type: ignore[attr-defined] +# TODO: set grad with a ShardedTensor that consists of all local grads +_register_default_op(torch.Tensor.grad.__get__, _sharded_op_impl) # type: ignore[union-attr] +_register_default_op(torch.Tensor.grad_fn.__get__, _sharded_op_impl) # type: ignore[union-attr] +_register_default_op(torch.Tensor.is_leaf.__get__, _sharded_op_impl) # type: ignore[attr-defined] + +# device property is ambiguous as from a global prospective, +# ShardedTensor.device consists of multiple devices (might even across hosts) +# We choose to return the current device of the local tensor to represent +# the device property on each rank +@_sharded_op_impl(torch.Tensor.device.__get__) +def tensor_device(types, args=(), kwargs=None, pg=None): + self_st = args[0] + # Validate types + if not isinstance(self_st, ShardedTensor): + raise TypeError("input needs to be a ShardedTensor") + dev: torch.device + if self_st._local_shards: + dev = self_st._local_shards[0].tensor.device + elif pg and pg._get_backend_name() == "gloo": + dev = torch.device("cpu") + else: + dev = torch.device(torch.cuda.current_device()) + return dev + +@_sharded_op_impl(torch.Tensor.is_meta.__get__) # type: ignore[attr-defined] +def st_is_meta(types, args=(), kwargs=None, pg=None): + return args[0].local_tensor().is_meta + + +def sharded_type_as_check(*args, **kwargs): + """ + Perform extra checks for the sharded_type_as op such as the input needs to + be either a Tensor or ShardedTensor. + + Args: same as ``torch.Tensor.type_as``. + + Return: None + """ + if len(args) < 2: + raise ValueError("Needs to give a tensor to cast type as!") + if not isinstance(args[1], torch.Tensor) and not isinstance(args[1], ShardedTensor): + raise ValueError("Needs to give a Tensor or ShardedTensor to cast type as!") + + +def same_dtype(*args, **kwargs): + """ + When the dtype is the same, return the original ShardedTensor. + + Args: same as ``torch.Tensor.type_as``. + + Return (bool): Whether to return early or not. + """ + return args[0].dtype == args[1].dtype + + +def sharded_type_as(args, kwargs, pg): + """ + Handles ``__torch_function__`` dispatch for the ``torch.Tensor.type_as`` op. + + Args: same as ``torch.Tensor.type_as``. + + Return: + new_local_shards (List[Shard]): Local shards for the new sharded tensor. + st_meta (ShardedTensorMetadata): Metadata of the new sharded tensor. + """ + st = args[0] + tensor = args[1] + if isinstance(tensor, ShardedTensor): + tensor = tensor.local_tensor() + new_local_shards = [] + for shard in st.local_shards(): + new_local_shards.append(Shard(shard.tensor.type_as(tensor), shard.metadata)) + st_meta = copy.deepcopy(st._metadata) + st_meta.tensor_properties.dtype = tensor.dtype + return new_local_shards, st_meta + + +_register_sharded_op_on_local_shards( + torch.Tensor.type_as, + early_stop_func=same_dtype, + extra_check=sharded_type_as_check, + customized_func=sharded_type_as, +) + + +def sharded_deepcopy(args, kwargs, pg): + # NOTE: we directly implement deepcopy magic method + # instead of using the default tensor.__deepcopy__ + # and implement clone(). This is because the default + # tensor deepcopy copies every attribute, but the + # process_group in ShardedTensor cannot be deep copied. + self_st = args[0] + new_local_shards = copy.deepcopy(self_st.local_shards()) + new_metadata = copy.deepcopy(self_st.metadata()) + return new_local_shards, new_metadata + + +_register_sharded_op_on_local_shards( + torch.Tensor.__deepcopy__, + customized_func=sharded_deepcopy, +) + + +@_sharded_op_impl(torch.Tensor.copy_) +def sharded_inplace_copy(types, args, kwargs, pg): + # NOTE: inplace op don't need to rewrap + kwargs = {} if kwargs is None else kwargs + self_st = args[0] + new_st = args[1] + nonblocking = kwargs.get("non_blocking", False) + for local_shard, new_shard in zip(self_st.local_shards(), new_st.local_shards()): + if local_shard.metadata != new_shard.metadata: + raise RuntimeError( + "inplace copy can only happen between two ShardedTensor with same metadata!" + ) + for local_shard, new_shard in zip(self_st.local_shards(), new_st.local_shards()): + local_shard.tensor.copy_(new_shard.tensor, nonblocking) + + return self_st + + +def sharded_clone(args, kwargs, pg): + self_st = args[0] + desire_memory_format = kwargs.get("memory_format", None) + if desire_memory_format and desire_memory_format != torch.preserve_format: + raise RuntimeError("Only support torch.preserve_format for ShardedTensor!") + cloned_local_shards = [ + Shard( + local_shard.tensor.clone(memory_format=desire_memory_format), + metadata=copy.deepcopy(local_shard.metadata), + ) + for local_shard in self_st.local_shards() + ] + new_metadata = copy.deepcopy(self_st.metadata()) + return cloned_local_shards, new_metadata + + +_register_sharded_op_on_local_shards( + torch.Tensor.clone, + customized_func=sharded_clone, +) + + +def sharded_detach(args, kwargs, pg): + self_st = args[0] + detached_local_shards = [ + Shard( + local_shard.tensor.detach(), + metadata=copy.deepcopy(local_shard.metadata), + ) + for local_shard in self_st.local_shards() + ] + new_metadata = copy.deepcopy(self_st.metadata()) + new_metadata.tensor_properties.requires_grad = False + return detached_local_shards, new_metadata + + +_register_sharded_op_on_local_shards( + torch.Tensor.detach, + customized_func=sharded_detach, +) + + +@_sharded_op_impl(torch.Tensor.requires_grad_) +def tensor_requires_grad_set(types, args=(), kwargs=None, pg=None): + self_st = args[0] + # Validate types + if not isinstance(self_st, ShardedTensor): + raise TypeError("input needs to be a ShardedTensor") + + if kwargs is None: + kwargs = {} + + requires_grad = args[1] if len(args) > 1 else kwargs.get("requires_grad", True) + if requires_grad == self_st.requires_grad: + return self_st + + for local_shard in self_st.local_shards(): + local_shard.tensor.requires_grad_(requires_grad) + + # update the wrapper class property + with torch._C.DisableTorchFunctionSubclass(): + self_st.requires_grad_(requires_grad) + # update the metadata in the meanwhile + self_st._metadata.tensor_properties.requires_grad = requires_grad + return self_st diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/api.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/api.py new file mode 100644 index 0000000000000000000000000000000000000000..06141fd20c923f6de3d555233f21bcd2fc750d20 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/api.py @@ -0,0 +1,1253 @@ +from __future__ import annotations # type: ignore[attr-defined] +from dataclasses import dataclass +from typing import ( + Callable, + Dict, + List, + Optional, + Sequence, + Tuple, + cast, +) +import copy +import warnings +from functools import reduce +import weakref + +import threading +import torch +import torch.distributed as dist +from torch.distributed import rpc +from torch.distributed import distributed_c10d +from torch.distributed._shard.metadata import ShardMetadata +import torch.distributed._shard.sharding_spec as shard_spec +from torch.distributed._shard.sharding_spec.api import ( + _dispatch_custom_op, + _has_custom_op, +) +from torch.distributed._shard.sharding_spec._internals import ( + check_tensor, + validate_non_overlapping_shards_metadata, +) +from torch.distributed._shard._utils import ( + DEPRECATE_MSG, +) + +from .metadata import TensorProperties, ShardedTensorMetadata +from .shard import Shard +from .reshard import reshuffle_local_shard, reshard_local_shard +from .utils import ( + _flatten_tensor_size, + _parse_and_validate_remote_device, + _validate_output_tensor_for_gather, + build_metadata_from_local_shards, + build_global_metadata +) +from torch.distributed.remote_device import _remote_device +from torch.utils import _pytree as pytree +import operator + +# Tracking for sharded tensor objects. +_sharded_tensor_lock = threading.Lock() +_sharded_tensor_current_id = 0 +_sharded_tensor_map: Dict[int, weakref.ReferenceType[ShardedTensor]] = {} + +# Default sharded ops +_SHARDED_OPS: Dict[Callable, Callable] = {} + +# Customized user ops +_CUSTOM_SHARDED_OPS: Dict[Callable, Callable] = {} + +def _register_remote_shards(sharded_tensor_id: int, rrefs: List[rpc.RRef[Shard]], rpc_rank: int): + with _sharded_tensor_lock: + if sharded_tensor_id not in _sharded_tensor_map: + raise RuntimeError( + f'Could not find sharded_tensor_id: {sharded_tensor_id} in map: {_sharded_tensor_map.keys()}') + + sharded_tensor = _sharded_tensor_map[sharded_tensor_id]() + if sharded_tensor is None: + raise RuntimeError('ShardedTensor weakref has been deallocated') + else: + sharded_tensor._register_remote_shards(rrefs, rpc_rank) + +class ShardedTensorBase(torch.Tensor): + _sharding_spec: shard_spec.ShardingSpec + _metadata: ShardedTensorMetadata + _local_shards: List[Shard] + + def __new__(cls, sharding_spec: shard_spec.ShardingSpec, *size, **kwargs): + # Use __new__ to construct a wrapper tensor, for recording tensor + # properties and logging purposes. + torch._C._log_api_usage_once("torch.distributed._shard.sharded_tensor") + + # check sharding spec and build sharded tensor metadata + if not isinstance(sharding_spec, shard_spec.ShardingSpec): + raise ValueError(f"Expecting ShardingSpec but got: {type(sharding_spec)}") + + sizes = _flatten_tensor_size(size) + dtype = kwargs["dtype"] + layout = kwargs["layout"] + pin_memory = kwargs["pin_memory"] + requires_grad = kwargs["requires_grad"] + + if dtype is None: + dtype = torch.get_default_dtype() + + tensor_properties = TensorProperties( + dtype, layout, requires_grad, pin_memory=pin_memory + ) + sharded_tensor_metadata = sharding_spec.build_metadata( + sizes, tensor_properties=tensor_properties + ) + + r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined] + cls, + sizes, + dtype=dtype, + layout=layout, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + # set sharding spec + r._sharding_spec = sharding_spec + # set metadata + r._metadata = sharded_tensor_metadata + # set local shards + r._local_shards = [] + return r + + def metadata(self) -> ShardedTensorMetadata: + """ + Returns a :class:`ShardedTensorMetadata` object corresponding to the + metadata for the entire tensor. + """ + return self._metadata + + def local_shards(self) -> List[Shard]: + """ + Returns a list of :class:`Shard' corresponding to the + local shards for this rank. Returns an empty list if the current rank + does not host any shards for this Tensor. + """ + return self._local_shards + + @classmethod + def _init_from_local_shards_and_global_metadata( + cls, + local_shards: List[Shard], + sharded_tensor_metadata: ShardedTensorMetadata, + sharding_spec=None, + ) -> ShardedTensorBase: + """ + Initialize a ShardedTensorBase with local shards and a global + ShardedTensorMetadata built on each rank. + Warning: This API is experimental and subject to change. It does + not do cross rank validations, and fully rely on the user + for the correctness of sharded_tensor_metadata on each rank + """ + shards_metadata = sharded_tensor_metadata.shards_metadata + tensor_properties = sharded_tensor_metadata.tensor_properties + + if len(shards_metadata) == 0: + raise ValueError("shards_metadata must not be empty!") + + if tensor_properties.layout != torch.strided: + raise ValueError("Only torch.strided layout is currently supported") + + if sharding_spec is None: + spec = shard_spec._infer_sharding_spec_from_shards_metadata(shards_metadata) + else: + spec = sharding_spec + + sharded_tensor_base = ShardedTensorBase.__new__( + ShardedTensor, + spec, + sharded_tensor_metadata.size, + dtype=tensor_properties.dtype, + layout=tensor_properties.layout, + pin_memory=tensor_properties.pin_memory, + requires_grad=tensor_properties.requires_grad, + ) + + # check if shards_metadata have overlap shards + validate_non_overlapping_shards_metadata(shards_metadata) + + # check if the shards_metadata is compatible with overall size of the sharded tensor. + check_tensor(shards_metadata, list(sharded_tensor_metadata.size)) + + # done validation, add local_shards + sharded_tensor_base._local_shards = local_shards + return sharded_tensor_base + + @classmethod + def __torch_dispatch__(cls, func, types, args=(), kwargs=None): + raise RuntimeError( + f"A {cls.__name__} object is being used from c++ while calling {func.__module__}.{func.__name__} " + "but the there is no custom __torch_dispatch__ implementation for it." + ) + +class ShardedTensor(ShardedTensorBase): + """ + ShardedTensor is an torch.Tensor subclass to represent Tensors that are sharded + across multiple devices and multiple processes. + + ShardedTensor is initialized in an SPMD like fashion where each rank + initializes the ShardedTensor. The ShardedTensor object on each rank + then only stores the local shard for the Tensor and provides global + metadata for all the shards. + + ShardedTensor doesn't provide any Tensor like operations but is a wrapper + providing the Tensor representing the local shard and the global metadata. + Using these, users can build their custom distributed._sharded computations + on top of this primitive. The local shards are all initialized using the + create_op specified by tensor_init_params.create_op, e.g., torch.ones, or + torch.empty + + Args: + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification + describing how to shard the Tensor. + size (int...): a sequence of integers defining the shape of the output + tensor. Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.contiguous_format``. + init_rrefs (bool, optional): Whether or not to initialize + :class:`torch.distributed.rpc.RRef`s pointing to remote shards. + Need to initialize the RPC Framework if specified as ``True``. + Default: ``False``. + + .. note:: ShardedTensor uses collectives to do various operations, i.e. it + uses all_gather to do cross rank validations. For NCCL-based process + groups, internal tensor representations of objects must be moved to the + GPU device before communication takes place. In this case, the device + used is given by ``torch.cuda.current_device()`` and it is the user's + responsibility to ensure that this is set so that each rank has an + individual GPU, via ``torch.cuda.set_device()`` + + """ + def __new__(cls, sharding_spec: shard_spec.ShardingSpec, *size, **kwargs): + self = super().__new__(cls, sharding_spec, *size, **kwargs) + return self + + def __init__( + self, + sharding_spec: shard_spec.ShardingSpec, + *size, + dtype=None, + layout=torch.strided, + requires_grad=False, + pin_memory=False, + memory_format=torch.contiguous_format, + process_group=None, + init_rrefs=False, + ): + # prepare initialization, initialize fields like + # _process_group, _local_shards, etc. + self._prepare_init(process_group=process_group, init_rrefs=init_rrefs) + + if layout != torch.strided: + raise ValueError('Only torch.strided layout is currently supported') + + if memory_format != torch.contiguous_format: + raise ValueError('Only torch.contiguous_format memory_format is currently supported') + + self._metadata.tensor_properties.memory_format = memory_format + + current_rank = dist.get_rank(self._process_group) + + for shard_metadata in self._metadata.shards_metadata: + rank, device = _parse_and_validate_remote_device(self._process_group, shard_metadata.placement) + if rank == current_rank: + local_tensor = _create_tensor_from_params( + shard_metadata.shard_sizes, + local_device=device, + tensor_properties=self._metadata.tensor_properties + ) + self._local_shards.append(Shard(local_tensor, shard_metadata)) + + # do post initialization (i.e. register sharded_tensor_id, initialize_rpc) + self._post_init() + + def _prepare_init(self, process_group=None, init_rrefs=False): + self._init_rrefs = init_rrefs + self._sharded_tensor_id = None + + self._process_group = ( + process_group + if process_group is not None + else distributed_c10d._get_default_group() + ) + + self._remote_shards: Dict[int, List[rpc.RRef[Shard]]] = {} + + def _post_init(self): + # Initialize RPC if available. + if self._init_rrefs: + with _sharded_tensor_lock: + global _sharded_tensor_current_id, _sharded_tensor_map + self._sharded_tensor_id = _sharded_tensor_current_id + _sharded_tensor_map[self._sharded_tensor_id] = weakref.ref(self) + _sharded_tensor_current_id += 1 + + if not rpc._is_current_rpc_agent_set(): + raise RuntimeError( + 'RPC Framework needs to be initialized using' + ' torch.distributed.rpc.init_rpc if init_rrefs is set to True') + self._init_rpc() + + def __del__(self): + # Clean up the global map. + with _sharded_tensor_lock: + global _sharded_tensor_current_id, _sharded_tensor_map + if ( + hasattr(self, "_sharded_tensor_id") + and self._sharded_tensor_id in _sharded_tensor_map + ): + _sharded_tensor_map.pop(self._sharded_tensor_id) # type: ignore[call-overload] + + def _init_rpc(self): + # Validate PG and RPC ranks match. + pg_rank = dist.get_rank() + rpc_rank = rpc.get_worker_info().id + if pg_rank != rpc_rank: + raise ValueError( + f'Default ProcessGroup and RPC ranks must be ' + f'the same for ShardedTensor, found process group rank: ' + f'{pg_rank} and RPC rank: {rpc_rank}' + ) + + self._remote_shards = {} + + # Gather all the sharded tensor ids. + worker_infos = rpc._get_current_rpc_agent().get_worker_infos() + rank_to_name = {} + name_to_rank = {} + + for worker_info in worker_infos: + rank_to_name[worker_info.id] = worker_info.name + name_to_rank[worker_info.name] = worker_info.id + + all_tensor_ids = rpc.api._all_gather(self._sharded_tensor_id) + + # Share the local shards to the entire world. + futs = [] + rpc_rank = rpc.get_worker_info().id + for rank in range(dist.get_world_size()): + # Skip self. + if rank == dist.get_rank(): + continue + + if len(self.local_shards()) != 0: + rrefs: List[rpc.RRef[Shard]] = [rpc.RRef(shard) for shard in self.local_shards()] + fut = rpc.rpc_async( + rank, + _register_remote_shards, + args=(all_tensor_ids[rank_to_name[rank]], rrefs, rpc_rank)) + futs.append(fut) + + torch.futures.wait_all(futs) + + # Barrier for all RPCs to finish on all ranks. + rpc.api._all_gather(None) + + def _get_preferred_device(self) -> torch.device: + """ + Return the preferred device to be used when creating tensors for collectives. + This method takes into account the associated process group + """ + if dist.get_backend(self._process_group) == dist.Backend.NCCL: + return torch.device(torch.cuda.current_device()) + return torch.device("cpu") + + def gather( # type: ignore[override] + self, + dst: int = 0, + out: Optional[torch.Tensor] = None, + enforce_dtype: bool = False, + dtype: Optional[torch.dtype] = None, + ) -> None: + """ + Creates a full :class:`Tensor` on rank ``dst`` by gathering all shards of the + sharded tensor. + + The API needs to be called on all ranks in SPMD fashion. All ranks should have + the same ``dst``. ``out`` should be a tensor of the same size as the overall + size of the sharded tensor on ``dst`` and ``None`` on all other ranks. + + Args: + dst(int): The rank where full tensor is constructed. + Default: 0 + out (:class `torch.Tensor`, optional): The output full tensor. + Must to be provided ONLY on ``dst`` rank. + Default: ``None`` + enforce_dtype (bool): Deprecated, please use dtype instead. Force the + gathered tensors to be the same type as input and output. + dtype (torch.dtype): Force the gathered tensors to be this dtype. + Default: ``None`` + """ + def shard_size(shard_md): + return reduce(operator.mul, shard_md.shard_sizes) # type: ignore[attr-defined] + + if enforce_dtype: + warnings.warn("enforce_dtype is deprecated. Please use dtype instead.") + + rank = dist.get_rank(self._process_group) + full_size = self.metadata().size + _validate_output_tensor_for_gather(rank, dst, full_size, out) + + local_shards = self.local_shards() + world_size = dist.get_world_size(self._process_group) + rank_sizes = [0 for _ in range(world_size)] + max_rank_size = 0 + shard_placement: Dict[ShardMetadata, Tuple[int, int]] = {} + # collect sizes + for shard_md in self.metadata().shards_metadata: + shard_rank = cast(_remote_device, shard_md.placement).rank() + assert shard_rank is not None + + shard_placement[shard_md] = (shard_rank, rank_sizes[shard_rank]) + rank_sizes[shard_rank] += shard_size(shard_md) + max_rank_size = max(max_rank_size, rank_sizes[shard_rank]) + + gather_list: Optional[List[torch.Tensor]] + if rank == dst: + assert out is not None + if enforce_dtype: + # enforce_dtype is deprecated. Do it for backward compatibility. + dtype = out.dtype + # TODO make it as a view of out tensor + gather_list = [torch.empty((max_rank_size,), device=out.device, dtype=dtype) for _ in range(world_size)] + else: + gather_list = None + + with torch.no_grad(): + if enforce_dtype and len(local_shards) > 0: + # enforce_dtype is deprecated. Do it for backward compatibility. + dtype = local_shards[0].tensor.dtype + data = torch.empty(max_rank_size, device=self._get_preferred_device(), dtype=dtype) + + for shard in local_shards: + src = shard.tensor.flatten() + if src.nelement() == 0 : + warnings.warn("Gathering a tensor with zero elements on rank " + str(rank)) + return + shard_offset = shard_placement[shard.metadata][1] + data[shard_offset: shard_offset + src.numel()].copy_(src) + + dist.gather( + tensor=data, + gather_list=gather_list, + dst=dst, + group=self._process_group, + ) + if rank != dst: + return + # In _validate_output_tensor_for_gather, we raise if out == None and rank == dst + out = cast(torch.Tensor, out) + assert gather_list is not None + + full_size = self.metadata().size + dims = len(full_size) + for shard_md in self.metadata().shards_metadata: + rank, rank_offset = shard_placement[shard_md] + tensor = gather_list[rank] + tensor = tensor[rank_offset : rank_offset + shard_size(shard_md)] + tensor = tensor.view(shard_md.shard_sizes) + + out_narrow_view = out + for dim in range(dims): + out_narrow_view = out_narrow_view.narrow( + dim, + shard_md.shard_offsets[dim], + shard_md.shard_sizes[dim], + ) + + out_narrow_view.copy_(tensor) + + def cpu( + self, + memory_format=torch.preserve_format, + process_group=None + ) -> ShardedTensor: + """ + Returns a copy of this object in CPU memory. + + If this ShardedTensor is already on CPU memory, then no copy is + performed and original object is returned. + + .. note:: When moving a ShardedTensor from GPU to CPU, the ShardedTensor might + need to be managed by a different type of ProcessGroup(i.e. ProcessGroupGloo), + it is the user's responsiblity to explicitly pass in a new process_group that + is compatible with CPU. + """ + # TODO: make this a __torch_function__ op once ShardedTensor becomes a + # torch.Tensor subclass, see https://github.com/pytorch/pytorch/issues/75402 + if memory_format != torch.preserve_format and \ + memory_format != torch.contiguous_format: + raise RuntimeError("Only `torch.contiguous_format` or " + "`torch.preserve_format` is supported!") + all_on_cpu = True + for meta in self.metadata().shards_metadata: + all_on_cpu &= (meta.placement.device().type == "cpu") # type: ignore[union-attr] + + # if every shard is already on CPU, return the original object + if all_on_cpu: + return self + + # if not, returns a copy of this object on CPU + list_shards: List[Shard] = [] + # move all local shards to cpu, and change metadata + for shard in self._local_shards: + cpu_tensor = shard.tensor.cpu(memory_format=memory_format) # type: ignore[call-arg] + metadata = copy.deepcopy(shard.metadata) + metadata.placement._device = torch.device("cpu") # type: ignore[union-attr] + list_shards.append( + Shard(cpu_tensor, metadata) + ) + + st_meta = copy.deepcopy(self.metadata()) + for meta in st_meta.shards_metadata: + if meta.placement.device().type != "cpu": # type: ignore[union-attr] + meta.placement._device = torch.device("cpu") # type: ignore[union-attr] + + pg = self._process_group if process_group is None else process_group + st_cpu = ShardedTensor._init_from_local_shards_and_global_metadata( + list_shards, + sharded_tensor_metadata=st_meta, + process_group=pg, + init_rrefs=self._init_rrefs + ) + return st_cpu + + def cuda( + self, + device=None, + non_blocking=False, + memory_format=torch.preserve_format, + process_group=None + ) -> ShardedTensor: + """ + Returns a copy of this object in CUDA memory, if the original ShardedTensor + is on CPU, we will move the local shard to the current GPU device of each + process in a SPMD fashion. + If this ShardedTensor is already on CUDA memory and local shards on each rank are + already on current device, we still returns a new ShardedTensor object with new + metadata, but no underlying data movements are performed. + .. note:: When moving a ShardedTensor from CPU to GPU, the ShardedTensor might + need to be managed by a different type of ProcessGroup(i.e. ProcessGroupNCCL), + it is the user's responsiblity to explicitly pass in a new process_group that + is compatible with GPU. + """ + if memory_format != torch.preserve_format and \ + memory_format != torch.contiguous_format: + raise RuntimeError("Only `torch.contiguous_format` or " + "`torch.preserve_format` is supported!") + + if device is not None: + device = torch.device(device) if isinstance(device, str) else device + assert isinstance(device, torch.device) and device.index == torch.cuda.current_device(), \ + '''Only device without device id (e.g. "cpu" or "cuda") is expected for ShardedTensor!''' + + current_device = torch.device(torch.cuda.current_device()) + # returns a copy of ShardedTensor on CUDA current device + list_shards: List[Shard] = [] + # move all local shards to current device, and change metadata + # if local shards already on the current device, there's no + # real data movement, only the metadata are copied. + for shard in self._local_shards: + cuda_tensor = shard.tensor.cuda( + device=current_device, + non_blocking=non_blocking, + memory_format=memory_format + ) # type: ignore[call-arg] + metadata = copy.deepcopy(shard.metadata) + metadata.placement._device = current_device # type: ignore[union-attr] + + list_shards.append( + Shard(cuda_tensor, metadata) + ) + + st_meta = copy.deepcopy(self.metadata()) + for meta in st_meta.shards_metadata: + if meta.placement.device().type != "cuda": # type: ignore[union-attr] + meta.placement._device = current_device # type: ignore[union-attr] + + pg = self._process_group if process_group is None else process_group + # we need to use `init_from_local_shards` to communicate between ranks + # and update the sharding spec/shards metadata. + st_cuda = ShardedTensor._init_from_local_shards_and_global_metadata( + list_shards, + sharded_tensor_metadata=st_meta, + process_group=pg, + init_rrefs=self._init_rrefs + ) + return st_cuda + + def to(self, *args, **kwargs) -> ShardedTensor: + current_device: torch.device + if self._local_shards: + current_device = self._local_shards[0].tensor.device + elif self._process_group._get_backend_name() == "gloo": + current_device = torch.device("cpu") + else: + current_device = torch.device(torch.cuda.current_device()) + current_dtype = self.dtype + device_to = current_device + dtype_to = current_dtype + if len(args) == 1: + if isinstance(args[0], torch.dtype): + dtype_to = args[0] + elif isinstance(args[0], torch.device): + device_to = args[0] + elif isinstance(args[0], (str, int)): + device_to = torch.device(args[0]) + elif isinstance(args[0], torch.Tensor): + dtype_to = args[0].dtype + device_to = args[0].device + else: + raise RuntimeError(f"ShardedTensor.to() have wrong arguments: {args}") + elif len(args) == 2: + device_to, dtype_to = args + else: + dtype_to = kwargs.get("dtype", current_dtype) + device_to = kwargs.get("device", current_device) + + device_to = torch.device(device_to) if isinstance(device_to, (str, int)) else device_to + + if device_to.type == "cuda": + # if device_to set to cuda, set to current device even + # if user specify the device index. + current_idx = torch.cuda.current_device() + if device_to.index != current_idx: + warnings.warn("ShardedTensor.to only move tensor to its current device" + "If you want to put to different device, use `reshard` instead.") + device_to = torch.device(current_idx) + + copy_tensor = kwargs.get("copy", False) + non_blocking = kwargs.get("non_blocking", False) + memory_format = kwargs.get("memory_format", torch.preserve_format) + process_group = kwargs.get("process_group", None) + + if not copy_tensor and dtype_to == current_dtype and device_to == current_device: + # already have correct dtype and device, return itself + return self + + # returns a copy of ShardedTensor on CUDA current device + list_shards: List[Shard] = [] + + for shard in self._local_shards: + new_tensor = shard.tensor.to( # type: ignore[call-overload] + device=device_to, + dtype=dtype_to, + non_blocking=non_blocking, + copy=copy_tensor, + memory_format=memory_format + ) + metadata = copy.deepcopy(shard.metadata) + if metadata.placement is not None: + metadata.placement._device = device_to + list_shards.append(Shard(new_tensor, metadata)) + + # update metadata + st_meta = copy.deepcopy(self.metadata()) + st_meta.tensor_properties.dtype = dtype_to + for meta in st_meta.shards_metadata: + meta.placement._device = device_to # type: ignore[union-attr] + + pg = self._process_group if process_group is None else process_group + # we need to use `init_from_local_shards` to communicate between ranks + # and update the sharding spec/shards metadata. + st_to = ShardedTensor._init_from_local_shards_and_global_metadata( + list_shards, + sharded_tensor_metadata=st_meta, + process_group=pg, + init_rrefs=self._init_rrefs + ) + return st_to + + + @classmethod + def _init_from_local_shards( + cls, + local_shards: List[Shard], + *global_size, + process_group=None, + init_rrefs=False, + ): + # STEP 1: Validate the Shardmetadatas locally + process_group = ( + process_group + if process_group is not None + else distributed_c10d._get_default_group() + ) + current_rank = dist.get_rank(process_group) + world_size = dist.get_world_size(process_group) + + local_sharded_tensor_metadata: Optional[ShardedTensorMetadata] = None + global_tensor_size = _flatten_tensor_size(global_size) + + if len(local_shards) > 0: + local_sharded_tensor_metadata = \ + build_metadata_from_local_shards(local_shards, global_tensor_size, current_rank, process_group) + + # STEP 2. Validate metadata across ranks, and build a global sharded tensor + # metadata by gathering local ShardedTensorMetadata + gathered_metadatas: List[Optional[ShardedTensorMetadata]] = [] + if world_size > 1: + gathered_metadatas = [None for _ in range(world_size)] + + dist.all_gather_object( + gathered_metadatas, + local_sharded_tensor_metadata, + group=process_group + ) + else: + gathered_metadatas = [local_sharded_tensor_metadata] + + global_sharded_tensor_metadata = build_global_metadata(gathered_metadatas) + tensor_properties = global_sharded_tensor_metadata.tensor_properties + + # STEP 3: Validation done, create the actual ShardedTensor and populate fields + # prepare initialization + spec = shard_spec._infer_sharding_spec_from_shards_metadata( + global_sharded_tensor_metadata.shards_metadata + ) + sharded_tensor = cls.__new__(cls, + spec, + global_sharded_tensor_metadata.size, + dtype=tensor_properties.dtype, + layout=tensor_properties.layout, + pin_memory=tensor_properties.pin_memory, + requires_grad=tensor_properties.requires_grad) + sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs) + + # attach local_shards to the ShardedTensor created + sharded_tensor._local_shards = local_shards + + # run post initialization, i.e. map registration, rpc initialization + sharded_tensor._post_init() + return sharded_tensor + + @classmethod + def _init_from_local_tensor( + cls, + local_tensor: torch.Tensor, + sharding_spec: shard_spec.ShardingSpec, + *global_size: Sequence[int], + process_group: Optional[dist.ProcessGroup] = None, + init_rrefs=False, + ) -> ShardedTensor: + """ + Initialize a ShardedTensor given only one local tensor, global sharded tensor + size and sharding spec on each rank. + + Args: + local_tensor (Tensor): Single tensor of local shard stored in each rank. + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): + The specification describing how to shard the Tensor. + global_size (Sequence[int]): Size of the sharded tensor. + process_group (ProcessGroup, optional): The process group to aggregate on. + Default: None + init_rrefs (bool, optional): Whether or not to initialize + :class:`torch.distributed.rpc.RRef`s pointing to remote shards. + Need to initialize the RPC Framework if specified as ``True``. + Default: ``False``. + + Returns: + A :class:`ShardedTensor` sharded based on the given sharding_spec with local + tensor stored in the current rank. + + Examples: + >>> # xdoctest: +SKIP + >>> # All tensors below are of torch.int64 type. + >>> # We have 2 process groups, 2 ranks. + >>> tensor = torch.arange(2, dtype=torch.int64) + 1 + 2 * rank + >>> local_tensor = torch.unsqueeze(torch.cat([tensor, tensor + 2])) + >>> local_tensor + tensor([[1, 2, 3, 4]]) # Rank 0 + tensor([[3, 4, 5, 6]]) # Rank 1 + >>> sharding_dim = 0 + >>> sharding_spec = ChunkShardingSpec( + dim=sharding_dim, + placements=[ + "rank:0/cuda:0", + "rank:1/cuda:1", + ], + ) + >>> st = ShardedTensor._init_from_local_tensor(local_tensor, sharding_spec, [2, 4]) + >>> st + ShardedTensor( + ShardedTensorMetadata( + shards_metadata=[ + ShardMetadata(shard_offsets=[0, 0], shard_sizes=[1, 4], placement=rank:0/cuda:0), + ShardMetadata(shard_offsets=[1, 0], shard_sizes=[1, 4], placement=rank:1/cuda:1), + ], + size=torch.Size([2, 4]) + ) + >>> st.local_tensor() + tensor([1, 2, 3, 4]) # Rank 0 + tensor([3, 4, 5, 6]) # Rank 1 + + Warning: This API is experimental and subject to change. It lacks of a fully across + rank validations, and we only validate the local shard on the current rank. + We fully rely on the user to ensure local tensor is sharded based on the + sharding spec. + """ + warnings.warn(DEPRECATE_MSG) + + if not local_tensor.is_contiguous(): + raise ValueError('local_tensor is not a contiguous Tensor.') + + global_tensor_size = _flatten_tensor_size(global_size) + tensor_properties = TensorProperties( + dtype=local_tensor.dtype, + layout=local_tensor.layout, + requires_grad=local_tensor.requires_grad, + memory_format=torch.contiguous_format, + pin_memory=local_tensor.is_pinned()) + sharded_tensor_metadata = sharding_spec.build_metadata( + global_tensor_size, + tensor_properties + ) + + process_group = ( + process_group + if process_group is not None + else distributed_c10d._get_default_group() + ) + current_rank = dist.get_rank(process_group) + + local_shards: List[Shard] = [] + for shard_metadata in sharded_tensor_metadata.shards_metadata: + rank, device = _parse_and_validate_remote_device(process_group, shard_metadata.placement) + if rank == current_rank: + local_shards.append(Shard(local_tensor, shard_metadata)) + + # TODO: figure out what the API should behave when some rank have no shard + # see https://github.com/pytorch/pytorch/issues/7313 + return ShardedTensor._init_from_local_shards_and_global_metadata( + local_shards, + sharded_tensor_metadata, + process_group=process_group, + init_rrefs=init_rrefs, + sharding_spec=sharding_spec, + ) + + @classmethod + def _init_from_local_shards_and_global_metadata( # type: ignore[override] + cls, + local_shards: List[Shard], + sharded_tensor_metadata: ShardedTensorMetadata, + process_group=None, + init_rrefs=False, + sharding_spec=None, + ) -> ShardedTensor: + """ + Initialize a ShardedTensor with local shards and a global + ShardedTensorMetadata built on each rank. + + Warning: This API is experimental and subject to change. It does + not do cross rank validations, and fully rely on the user + for the correctness of sharded_tensor_metadata on each rank + """ + process_group = ( + process_group + if process_group is not None + else distributed_c10d._get_default_group() + ) + current_rank = dist.get_rank(process_group) + + shards_metadata = sharded_tensor_metadata.shards_metadata + + local_shard_metadatas = [] + + # collect local shard metadatas from the global sharded_tensor_metadata + for shard_metadata in shards_metadata: # type: ignore[attr-defined] + rank, local_device = _parse_and_validate_remote_device(process_group, shard_metadata.placement) + + if current_rank == rank: + local_shard_metadatas.append(shard_metadata) + + if len(local_shards) != len(local_shard_metadatas): + raise RuntimeError( + f'Number of local shards ({len(local_shards)}) does not match number of local ' + f'shards metadata in sharded_tensor_metadata ({len(local_shard_metadatas)}) ' + f'on rank ({current_rank}) ' + ) + + shards_metadata = sharded_tensor_metadata.shards_metadata + tensor_properties = sharded_tensor_metadata.tensor_properties + + if len(shards_metadata) == 0: + raise ValueError("shards_metadata must not be empty!") + + if tensor_properties.layout != torch.strided: + raise ValueError("Only torch.strided layout is currently supported") + + if sharding_spec is None: + spec = shard_spec._infer_sharding_spec_from_shards_metadata(shards_metadata) + else: + spec = sharding_spec + + sharded_tensor = ShardedTensor.__new__( + ShardedTensor, + spec, + sharded_tensor_metadata.size, + dtype=tensor_properties.dtype, + layout=tensor_properties.layout, + pin_memory=tensor_properties.pin_memory, + requires_grad=tensor_properties.requires_grad, + ) + + def _raise_if_mismatch(expected, actual, prop_name, rank, is_property=False): + tensor_property_or_metadata = ( + "tensor property" if is_property else "local ShardMetadata" + ) + if expected != actual: + raise ValueError( + f"Local shards' tensor {prop_name} property is incompatible with " + f"{tensor_property_or_metadata} on rank {rank}: " + f"{tensor_property_or_metadata} {prop_name}={expected}, " + f"local shard tensor {prop_name}={actual}." + ) + + for shard in local_shards: + shard_meta = shard.metadata + local_shard_tensor = shard.tensor + placement = shard_meta.placement + assert placement is not None, "Must specify placement for `Shard`!" + rank = placement.rank() + local_device = placement.device() + + _raise_if_mismatch( + tensor_properties.layout, + local_shard_tensor.layout, + "layout", + rank, + True, + ) + if not local_shard_tensor.is_contiguous(): + raise ValueError( + "Only torch.contiguous_format memory_format is currently supported" + ) + + _raise_if_mismatch( + shard_meta.shard_sizes, + list(local_shard_tensor.size()), + "size", + rank, + ) + _raise_if_mismatch( + tensor_properties.pin_memory, + local_shard_tensor.is_pinned(), + "pin_memory", + rank, + True, + ) + _raise_if_mismatch(local_device, local_shard_tensor.device, "device", rank) + _raise_if_mismatch( + tensor_properties.dtype, + local_shard_tensor.dtype, + "dtype", + rank, + True, + ) + _raise_if_mismatch( + tensor_properties.requires_grad, + local_shard_tensor.requires_grad, + "requires_grad", + rank, + True, + ) + + # check if shards_metadata have overlap shards + validate_non_overlapping_shards_metadata(shards_metadata) + + # check if the shards_metadata is compatible with overall size of the sharded tensor. + check_tensor(shards_metadata, list(sharded_tensor_metadata.size)) + + # done validation, add local_shards + sharded_tensor._local_shards = local_shards + sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs) + + # run post initialization, i.e. map registration, rpc initialization + sharded_tensor._post_init() + return sharded_tensor + + def sharding_spec(self) -> shard_spec.ShardingSpec: + """ + Returns the ShardingSpec for the tensor. + """ + return self._sharding_spec + + def reshard(self, resharding_spec: shard_spec.ShardingSpec) -> ShardedTensor: + """ + Reshard a sharded tensor given the ``resharding_spec``. For now, we only support + single local shard. + + If ``resharding_spec`` is same as the original one, this becomes a no-op. + If only ``resharding_spec`` shares the same sharding dim with the original one, + we swap local shards directly. + For more generic cases, we merge different shards across different ranks and split + the local shards based on the ``resharding_spec`` via `all_to_all` collective API. + + Args: + resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The + specification describing how the tensor is sharded. + + Returns: + A :class:`ShardedTensor` object whose local shards are resharded. + + Examples: + >>> # xdoctest: +SKIP + >>> # We have 2 process groups, 2 ranks. + >>> tensor = torch.arange(4, dtype=torch.int64) + 1 + 2 * rank + >>> tensor = torch.stack([tensor, tensor]) + >>> tensor + tensor([[1, 2, 3, 4], [1, 2, 3, 4]]) # Rank 0 + tensor([[3, 4, 5, 6], [3, 4, 5, 6]]) # Rank 1 + tensor([[5, 6, 7, 8], [5, 6, 7, 8]]) # Rank 2 + tensor([[7, 8, 9, 10], [7, 8, 9, 10]]) # Rank 3 + >>> sharding_dim = 0 + >>> spec = ChunkShardingSpec( + dim=sharding_dim, + placements=[ + "rank:0/cuda:0", + "rank:1/cuda:1", + "rank:2/cuda:2", + "rank:3/cuda:3", + ], + ) + >>> current_offsets = [0] * 2 + >>> current_offsets[0] = rank * 2 + >>> shard_metadata = ShardMetadata( + shard_offsets=copy.deepcopy(current_offsets), + shard_sizes=tensor.size(), + placement=spec.placements[rank], + ) + >>> local_shards = [ + Shard( + tensor=tensor, + metadata=shard_metadata, + ) + ] + >>> st = ShardedTensor._init_from_local_shards(local_shards, tensor.size()) + >>> sharding_dim = 1 + >>> resharding_spec = ChunkShardingSpec( + dim=sharding_dim, + placements=[ + "rank:0/cuda:0", + "rank:1/cuda:1", + "rank:2/cuda:2", + "rank:3/cuda:3", + ], + ) + >>> st.reshard(resharding_spec) + >>> tensor = st.local_shards()[0].tensor + >>> tensor + tensor([[1], [1], [3], [3], [5], [5], [7], [7]]) # Rank 0 + tensor([[2], [2], [4], [4], [6], [6], [8], [8]]) # Rank 1 + tensor([[3], [3], [5], [5], [7], [7], [9], [9]]) # Rank 2 + tensor([[4], [4], [6], [6], [8], [8], [10], [10]]) # Rank 3 + """ + warnings.warn(DEPRECATE_MSG) + + if ( + not isinstance(resharding_spec, shard_spec.ChunkShardingSpec) or + not isinstance(self._sharding_spec, shard_spec.ChunkShardingSpec) + ): + raise NotImplementedError("Only ChunkShardingSpec supported for reshard.") + if (len(self.local_shards()) != 1): + raise NotImplementedError("Only single local shard supported for reshard.") + + if self._sharding_spec.dim == resharding_spec.dim: # type: ignore[attr-defined] + if self._sharding_spec.placements == resharding_spec.placements: # type: ignore[attr-defined] + return self + else: + local_shards, shards_metadata = reshuffle_local_shard( + self.local_tensor(), + self.size(), # type: ignore[arg-type] + self._sharding_spec, + resharding_spec, + self._process_group, + ) + else: + local_shards, shards_metadata = reshard_local_shard( + self.local_tensor(), + self.size(), # type: ignore[arg-type] + self._sharding_spec, + resharding_spec, + self._process_group, + ) + self._local_shards = local_shards + self._metadata.shards_metadata = shards_metadata + self._sharding_spec = resharding_spec + return self + + def local_tensor(self) -> torch.Tensor: + """ + Return local tensor for a sharded_tensor. For now we only support single local shard. + + Returns: + A :class:`torch.Tensor` of the local shard. + """ + if len(self.local_shards()) != 1: + raise NotImplementedError("Only single local shard is supported.") + return self.local_shards()[0].tensor + + @classmethod + def __torch_function__(cls, func, types, args=(), kwargs=None): + def dispatch(st: ShardedTensor, func: Callable): + # Dispatch to custom user provided op first if it exists. + if func in _CUSTOM_SHARDED_OPS: + return _CUSTOM_SHARDED_OPS[func](types, args, kwargs, st._process_group) + + # Dispatch to custom sharding spec op if it has one. + if _has_custom_op(st._sharding_spec, func): + return _dispatch_custom_op( + st._sharding_spec, + func, + types, + args, + kwargs, + st._process_group + ) + + if func in _SHARDED_OPS: + return _SHARDED_OPS[func](types, args, kwargs, st._process_group) + + raise RuntimeError( + f"torch function '{func.__name__}', with args: {args} and " + f"kwargs: {kwargs} not supported for ShardedTensor!") + + warnings.warn(DEPRECATE_MSG) + # Find ShardedTensor instance to get process_group and sharding_spec. + st_instance = None + + def find_sharded_tensor(e): + nonlocal st_instance + if st_instance is None and isinstance(e, ShardedTensor): + st_instance = e + + pytree.tree_map_(find_sharded_tensor, args) + pytree.tree_map_(find_sharded_tensor, kwargs) + + if st_instance is not None: + return dispatch(st_instance, func) + + raise RuntimeError( + f"torch function '{func.__name__}', with args: {args} and " + f"kwargs: {kwargs} not supported for ShardedTensor!") + + def is_pinned(self) -> bool: # type: ignore[override] + """ + Returns True if the sharded tensor (each local shard) resides in pinned memory. + """ + return self._metadata.tensor_properties.pin_memory + + def _register_remote_shards(self, remote_shards: List[rpc.RRef[Shard]], rpc_rank: int): + self._remote_shards[rpc_rank] = remote_shards + + def remote_shards(self) -> Dict[int, List[rpc.RRef[Shard]]]: + """ + Returns a Dict[int, RRef] with keys being the RPC rank and values + being RRefs to shards on that rank. Need to initialize the + RPC framework for this functionality. + + Raises an exception if ShardedTensor was created with ``init_rrefs=False`` + """ + if not self._init_rrefs: + raise RuntimeError( + 'ShardedTensor created with init_rrefs=False, no RRefs to remote shards available' + ) + return self._remote_shards + + def __hash__(self): + return id(self) + + def __repr__(self): + return f'ShardedTensor({self._metadata})' + + @dataclass + class ProcessGroupState: + """ + State for ser-de of process group + """ + local_rank: int + global_rank: int + local_world_size: int + global_world_size: int + + def __getstate__(self): + pg_state = ShardedTensor.ProcessGroupState( + distributed_c10d.get_rank(self._process_group), + distributed_c10d.get_rank(), + distributed_c10d.get_world_size(self._process_group), + distributed_c10d.get_world_size(), + ) + + return self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs + + def __setstate__(self, state): + self._sharded_tensor_id = None + if not distributed_c10d.is_initialized(): + raise RuntimeError( + 'Need to initialize default process group using ' + '"init_process_group" before loading ShardedTensor') + + self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs = state + + # Setup process group + from torch.distributed._shard.api import _get_current_process_group + self._process_group = _get_current_process_group() + + # Validate process group. + local_rank = distributed_c10d.get_rank(self._process_group) + if pg_state.local_rank != local_rank: + raise RuntimeError( + f'Local rank at save time was {pg_state.local_rank}, but at ' + f'load time was {local_rank}') + + global_rank = distributed_c10d.get_rank() + if pg_state.global_rank != global_rank: + raise RuntimeError( + f'Global rank at save time was {pg_state.global_rank}, but at ' + f'load time was {global_rank}') + + local_world_size = distributed_c10d.get_world_size(self._process_group) + if pg_state.local_world_size != local_world_size: + raise RuntimeError( + f'Local world size at save time was {pg_state.local_world_size}, ' + f'but at load time was {local_world_size}') + + global_world_size = distributed_c10d.get_world_size() + if pg_state.global_world_size != global_world_size: + raise RuntimeError( + f'Global world size at save time was {pg_state.global_world_size}, ' + f'but at load time was {global_world_size}') + + self._post_init() + + +def _create_tensor_from_params(*size, local_device, tensor_properties: TensorProperties): + """ Helper to construct tensor from size, device and common params. """ + dtype = tensor_properties.dtype + layout = tensor_properties.layout + requires_grad = tensor_properties.requires_grad + memory_format = tensor_properties.memory_format + pin_memory = tensor_properties.pin_memory + + return torch.empty( + *size, dtype=dtype, layout=layout, + device=local_device, requires_grad=requires_grad, + memory_format=memory_format, pin_memory=pin_memory + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/logger.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..87cb74fbd01d20dd41fe475184f25929b9ab8833 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/logger.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import logging +from typing import List, Tuple + +from torch.distributed._shard.sharded_tensor.logging_handlers import ( + _log_handlers, +) + +__all__: List[str] = [] + + +def _get_or_create_logger() -> logging.Logger: + logging_handler, log_handler_name = _get_logging_handler() + logger = logging.getLogger(f"sharding-spec-{log_handler_name}") + logger.setLevel(logging.DEBUG) + formatter = logging.Formatter( + "%(asctime)s %(filename)s:%(lineno)s %(levelname)s p:%(processName)s t:%(threadName)s: %(message)s" + ) + logging_handler.setFormatter(formatter) + logger.propagate = False + logger.addHandler(logging_handler) + return logger + + +def _get_logging_handler( + destination: str = "default", +) -> Tuple[logging.Handler, str]: + log_handler = _log_handlers[destination] + log_handler_name = type(log_handler).__name__ + return (log_handler, log_handler_name) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/logging_handlers.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/logging_handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..3c607fe45da7713dc52ca01ce70abb53cdebb42f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/logging_handlers.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 + +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import logging +from typing import Dict, List + +__all__: List[str] = [] + +_log_handlers: Dict[str, logging.Handler] = { + "default": logging.NullHandler(), +} diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/metadata.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..cb112da5686b5b92809be393d035cc674eaddd86 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/metadata.py @@ -0,0 +1,82 @@ +from dataclasses import dataclass, field +from enum import Enum +from typing import List + +import torch +from torch.distributed._shard.metadata import ShardMetadata + +class MEM_FORMAT_ENCODING(Enum): + TORCH_CONTIGUOUS_FORMAT = 0 + TORCH_CHANNELS_LAST = 1 + TORCH_PRESERVE_FORMAT = 2 + +@dataclass +class TensorProperties: + """ Properties used to create :class:`Tensor` """ + + # Regular tensor fields + dtype: torch.dtype = field(default=torch.get_default_dtype()) + layout: torch.layout = field(default=torch.strided) + requires_grad: bool = False + memory_format: torch.memory_format = field(default=torch.contiguous_format) + pin_memory: bool = False + + def __getstate__(self): + # Since torch.memory_format cannot be pickled! + memory_format = self.memory_format + if memory_format == torch.contiguous_format: + mem_format_encoding = MEM_FORMAT_ENCODING.TORCH_CONTIGUOUS_FORMAT + elif memory_format == torch.channels_last: + mem_format_encoding = MEM_FORMAT_ENCODING.TORCH_CHANNELS_LAST + elif memory_format == torch.preserve_format: + mem_format_encoding = MEM_FORMAT_ENCODING.TORCH_PRESERVE_FORMAT + else: + raise RuntimeError(f'Invalid torch.memory_format: {memory_format}') + + return ( + self.dtype, + self.layout, + self.requires_grad, + mem_format_encoding, + self.pin_memory, + ) + + def __setstate__( + self, + state, + ): + (self.dtype, self.layout, self.requires_grad, mem_format_encoding, self.pin_memory) = state + + if mem_format_encoding == MEM_FORMAT_ENCODING.TORCH_CONTIGUOUS_FORMAT: + memory_format = torch.contiguous_format + elif mem_format_encoding == MEM_FORMAT_ENCODING.TORCH_CHANNELS_LAST: + memory_format = torch.channels_last + elif mem_format_encoding == MEM_FORMAT_ENCODING.TORCH_PRESERVE_FORMAT: + memory_format = torch.preserve_format + else: + raise RuntimeError(f'Invalid torch.memory_format encoding: {mem_format_encoding}') + + self.memory_format = memory_format + + @staticmethod + def create_from_tensor(tensor: torch.Tensor) -> "TensorProperties": + return TensorProperties( + dtype=tensor.dtype, + layout=tensor.layout, + requires_grad=tensor.requires_grad, + memory_format=torch.contiguous_format, + pin_memory=tensor.is_pinned() + ) +@dataclass +class ShardedTensorMetadata: + """ + Represents metadata for :class:`ShardedTensor` + """ + + # Metadata about each shard of the Tensor + shards_metadata: List[ShardMetadata] = field(default_factory=list) + + # Size of each dim of the overall Tensor. + size: torch.Size = field(default=torch.Size([])) + + tensor_properties: TensorProperties = field(default_factory=TensorProperties) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/reshard.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/reshard.py new file mode 100644 index 0000000000000000000000000000000000000000..de7a44bb820090e34bbbf7e80788e93948c801ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/reshard.py @@ -0,0 +1,248 @@ +import copy +from typing import List, Tuple + +import torch +import torch.distributed as dist +from torch._C._distributed_c10d import ( + ProcessGroup, +) +import torch.distributed._shard.sharding_spec as shard_spec +from torch.distributed._shard.sharding_spec._internals import ( + get_split_size, + get_chunked_dim_size, +) +from torch.distributed.nn.functional import ( + all_to_all, + all_to_all_single, +) +from torch.distributed._shard.metadata import ShardMetadata + +from .shard import Shard + + +def get_idx_from_placements(placements, current_rank) -> int: + """ + Return the position of the current rank in the given placements. + + Args: + placements(List[Union[_remote_device, str]]): + Specifies the placement of each shard of the Tensor. The size of + the list represents the number of shards to be created. This could + be a list of + :class:`torch.distributed._remote_device`'s. This list + could also contain a string which represents remote + device as accepted by + :class:`torch.distributed._remote_device` + current_rank (int): number of current device. + + Returns: + A int which contains the position of current device in the placement list. + """ + for idx, placement in enumerate(placements): # type: ignore[attr-defined] + if current_rank == placement.rank(): # type: ignore[union-attr] + return idx + raise RuntimeError('current_rank not in the placement.') + + +def build_reshard_metadata( + st_size: torch.Size, + sharding_spec: shard_spec.ShardingSpec, + world_size: int, +) -> Tuple[List[ShardMetadata], List[int]]: + """ + Based the given sharding spec, we calculate the offset and local shard size. + We then build a ShardMetadata on top of the calculation result. + + Args: + st_size (torch.Size): The size of the sharded tensor. + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The + specification describing how the tensor is sharded. + world_size (int): number of ranks. + + Returns: + A Tuple of the followings: + A List[`ShardMetadata`] which contains the metadata for the shard, including + offsets, lengths and device placement. + A List[int] which contains the ranks in the order of placement. + """ + shard_dim = int(sharding_spec.dim) # type: ignore[attr-defined] + shards_metadata = [None] * world_size + ranks = [] + offsets = [0] * len(st_size) + split_size = get_split_size(st_size[shard_dim], world_size) + for idx, placement in enumerate(sharding_spec.placements): # type: ignore[attr-defined] + ranks.append(placement.rank()) + sharded_dim_size = get_chunked_dim_size(st_size[shard_dim], split_size, idx) + local_tensor_size = list(st_size) + local_tensor_size[shard_dim] = sharded_dim_size + shards_metadata[placement.rank()] = ShardMetadata( # type: ignore[call-overload] + shard_offsets=copy.deepcopy(offsets), + shard_sizes=local_tensor_size, + placement=placement, + ) + offsets[shard_dim] += sharded_dim_size + return shards_metadata, ranks # type: ignore[return-value] + + +def reshuffle_local_shard( + local_shard: torch.Tensor, + st_size: torch.Size, + sharding_spec: shard_spec.ShardingSpec, + resharding_spec: shard_spec.ShardingSpec, + pg: ProcessGroup, +) -> Tuple[List[Shard], List[ShardMetadata]]: + """ + Reshuffle the local shard directly when the reshard dim is same as the original + sharding dim. Logically we do this in two step: + 1. To collect all shards based on original sharding spec. + 2. Reshard the tensor based on the given resharding spec. + + In reality, we consolidate the two steps into one by sending the local tensor to + the new shard directly based on the resharding spec. + + Args: + local_shard (Tensor): Local tensor stored in the current rank. + st_size (torch.Size): The size of the sharded tensor. + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The + specification describing how the tensor is sharded originally. + resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The + specification describing how the tensor will be resharded. + pg (ProcessGroup): The process group to aggregate on. + + Returns: + A Tuple of the followings: + A List[`Shard`] which contains the local tensor and its metadata. + A List[`ShardMetadata`] which contains the metadata for the shard, including + offsets, lengths and device placement. + """ + current_rank = dist.get_rank(pg) + world_size = dist.get_world_size(pg) + # Build shards_metadata first. + shards_metadata, ranks = build_reshard_metadata( + st_size, resharding_spec, world_size + ) + # Get input split size for all2all. + reshard_dim = int(resharding_spec.dim) # type: ignore[attr-defined] + split_size = get_split_size(st_size[reshard_dim], world_size) + input_split_sizes = [0] * world_size + idx = get_idx_from_placements(sharding_spec.placements, current_rank) # type: ignore[attr-defined] + new_rank = resharding_spec.placements[idx].rank() # type: ignore[union-attr, attr-defined] + input_split_sizes[new_rank] = local_shard.size(reshard_dim) + # Get output split size for all2all. + output_split_sizes = [0] * world_size + new_idx = ranks.index(current_rank) + sharded_dim_size = get_chunked_dim_size(st_size[reshard_dim], split_size, new_idx) + output_split_sizes[new_rank] = sharded_dim_size + # Get gathered_input for all2all. + local_shard = local_shard.transpose(0, reshard_dim).contiguous() + gathered_input_size = list(local_shard.size()) + gathered_input_size[0] = sharded_dim_size + gathered_input = torch.empty(gathered_input_size, device=local_shard.device, dtype=local_shard.dtype) + # all2all. + local_shard = all_to_all_single( + gathered_input, + local_shard, + input_split_sizes=input_split_sizes, + output_split_sizes=output_split_sizes, + group=pg, + ) + local_tensor = local_shard.transpose(0, reshard_dim).contiguous() + local_shards = [Shard(local_tensor, shards_metadata[current_rank])] + return local_shards, shards_metadata + + +def reshard_local_shard( + local_tensor: torch.Tensor, + st_size: torch.Size, + sharding_spec: shard_spec.ShardingSpec, + resharding_spec: shard_spec.ShardingSpec, + pg: ProcessGroup, +) -> Tuple[List[Shard], List[ShardMetadata]]: + """ + Reshard a sharded tensor given the ``resharding_spec``. When the reshard dim is + different from the original sharding dim, we need to do two steps logically: + 1. To collect all shards based on original sharding spec. + 2. Reshard the tensor based on the given resharding spec. + + In reality, we consolidate the two steps into one by sending each rank the new + shard based on the resharding spec. + + Args: + local_tensor (Tensor): Local tensor stored in the current rank. + st_size (torch.Size): The size of the sharded tensor. + sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The + specification describing how the tensor is sharded originally. + resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The + specification describing how the tensor will be resharded. + pg (ProcessGroup): The process group to aggregate on. + + Returns: + A Tuple of the followings: + A List[`Shard`] which contains the local tensor and its metadata. + A List[`ShardMetadata`] which contains the metadata for the shard, including + offsets, lengths and device placement. + """ + current_rank = dist.get_rank(pg) + world_size = dist.get_world_size(pg) + current_sharding_dim = int(sharding_spec.dim) # type: ignore[attr-defined] + reshard_dim = int(resharding_spec.dim) # type: ignore[attr-defined] + + # Build shards_metadata first. + shards_metadata, ranks = build_reshard_metadata( + st_size, resharding_spec, world_size + ) + + # Compute expected size + input_split_sizes = [] + for metadata in shards_metadata: + input_split_sizes.append(metadata.shard_sizes[reshard_dim]) + rearrange_input = any(ranks[i] > ranks[i + 1] for i in range(len(ranks) - 1)) + + if rearrange_input: + # Need to re-arrange reshard_dim of local_tensor before all2all. + indices: List[int] = [] + for metadata in shards_metadata: + offset_start_idx = metadata.shard_offsets[reshard_dim] + split_size = metadata.shard_sizes[reshard_dim] + indices += range(offset_start_idx, offset_start_idx + split_size) + local_tensor = local_tensor.index_select( + reshard_dim, torch.tensor(indices, device=local_tensor.device) + ) + + # Because reshard_dim != original shard_dim. We need to compute the + # size of tensor from each rank. + output_tensor_list = [torch.tensor(1)] * world_size + split_size = get_split_size(st_size[current_sharding_dim], world_size) + rearrange_output_list = False + indices = [] + for idx, placement in enumerate(sharding_spec.placements): # type: ignore[attr-defined] + sharded_dim_size = get_chunked_dim_size( + st_size[current_sharding_dim], split_size, idx + ) + output_tensor_size = list(st_size) + output_tensor_size[current_sharding_dim] = sharded_dim_size + output_tensor_size[reshard_dim] = input_split_sizes[current_rank] + output_tensor_list[ + placement.rank() + ] = torch.empty( # type: ignore[union-attr, index] + output_tensor_size, device=local_tensor.device, dtype=local_tensor.dtype + ) + indices.append(placement.rank()) # type: ignore[union-attr, index, arg-type] + if idx != placement.rank(): # type: ignore[union-attr] + rearrange_output_list = True + + # Perform autograd enabled all2all. + input_tensor_tuple = torch.split(local_tensor, input_split_sizes, dim=reshard_dim) + input_tensor_list = [tensor.contiguous() for tensor in input_tensor_tuple] + output_tensor_list = all_to_all( + output_tensor_list, + input_tensor_list, + group=pg, + ) + + if rearrange_output_list: + # Need to re-arrange original shard_dim of output_tensor_list. + output_tensor_list = [output_tensor_list[idx] for idx in indices] # type: ignore[call-overload] + local_tensor = torch.cat(output_tensor_list, dim=current_sharding_dim) + local_shards = [Shard(local_tensor, shards_metadata[current_rank])] + return local_shards, shards_metadata diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/shard.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/shard.py new file mode 100644 index 0000000000000000000000000000000000000000..d448cc6321b1015498815bb2c243be60896bcbf4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/shard.py @@ -0,0 +1,58 @@ +from dataclasses import dataclass +from typing import List + +import torch +from torch.distributed._shard.metadata import ShardMetadata +from torch.distributed.remote_device import _remote_device + + +@dataclass +class Shard: + """ + Container which holds the data for a shard as a Tensor and also + the associated metadata for that shard. + + Args: + tensor(torch.Tensor): Local tensor for the shard. + metadata(:class `torch.distributed._shard.sharded_tensor.ShardMetadata`): + The metadata for the shard, including offsets, lengths and device placement. + """ + __slots__ = ['tensor', 'metadata'] + tensor: torch.Tensor + metadata: ShardMetadata + + def __post_init__(self): + # verification between local tensor and metadata + if list(self.tensor.size()) != self.metadata.shard_sizes: + raise ValueError( + "Shard tensor size does not match with metadata.shard_lengths! " + f"Found shard tensor size: {list(self.tensor.size())}, " + f"metadata.shard_lengths: {self.metadata.shard_sizes}, " + ) + placement_device = self.metadata.placement + if placement_device is not None and placement_device.device() != self.tensor.device: + raise ValueError( + f"Local shard tensor device does not match with local Shard's placement! " + f"Found local shard tensor device: {self.tensor.device}, " + f"local shard metadata placement device: {placement_device.device()}" + ) + + @classmethod + def from_tensor_and_offsets(cls, tensor: torch.Tensor, shard_offsets: List[int], rank: int): + """ + Creates a Shard of a ShardedTensor from a local torch.Tensor, shard_offsets and rank. + + Args: + tensor(torch.Tensor): Local tensor for the shard. + shard_offsets(List[int]): List of integers specify the offset + of the shard on each dimension. + rank(int): Specify the rank for the shard. + """ + shard_sizes = list(tensor.size()) + placement = _remote_device(f"rank:{rank}/{str(tensor.device)}") + shard_meta = ShardMetadata( + shard_offsets=shard_offsets, + shard_sizes=shard_sizes, + placement=placement + ) + return Shard(tensor, shard_meta) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/utils.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9ca0ee0eba743284186317febadfafec6c7a9a44 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharded_tensor/utils.py @@ -0,0 +1,211 @@ +import collections.abc +import copy +from typing import Optional, List, Sequence + +import torch +from torch.distributed import distributed_c10d +from torch.distributed import rpc +from torch.distributed._shard.sharding_spec._internals import ( + check_tensor, + validate_non_overlapping_shards_metadata, +) + +from torch.distributed._shard.metadata import ShardMetadata +from .metadata import TensorProperties, ShardedTensorMetadata +from .shard import Shard + +def _parse_and_validate_remote_device(pg, remote_device): + if remote_device is None: + raise ValueError("remote device is None") + + worker_name = remote_device.worker_name() + rank = remote_device.rank() + device = remote_device.device() + + # Validate rank, skip validation if rank is not part of process group. + if not distributed_c10d._rank_not_in_group(pg): + if rank is not None and (rank < 0 or rank >= distributed_c10d.get_world_size(pg)): + raise ValueError(f'Invalid rank: {rank}') + + if worker_name is not None: + if not rpc._is_current_rpc_agent_set(): + raise RuntimeError(f'RPC framework needs to be initialized for using worker names: {worker_name}') + + workers = rpc._get_current_rpc_agent().get_worker_infos() + for worker in workers: + if worker.name == worker_name: + return worker.id, device + + raise ValueError(f'Invalid worker name: {worker_name}') + + return rank, device + +def _validate_output_tensor_for_gather( + my_rank: int, + dst_rank: int, + size: torch.Size, + dst_tensor: Optional[torch.Tensor], +) -> None: + if dst_rank == my_rank: + if dst_tensor is None: + raise ValueError( + f"Argument ``dst_tensor`` must be specified on destination rank {dst_rank}" + ) + if tuple(size) != (dst_tensor.size()): + raise ValueError( + f"Argument ``dst_tensor`` have size {tuple(dst_tensor.size())}," + f"but should be {tuple(size)}" + ) + elif dst_tensor: + raise ValueError( + "Argument ``dst_tensor`` must NOT be specified " + "on non-destination ranks." + ) + +def _flatten_tensor_size(size) -> torch.Size: + """ + Checks if tensor size is valid, then flatten/return a torch.Size object. + """ + if len(size) == 1 and isinstance(size[0], collections.abc.Sequence): + dims = list(*size) + else: + dims = list(size) + + for dim in dims: + if not isinstance(dim, int): + raise TypeError(f'size has to be a sequence of ints, found: {dims}') + + return torch.Size(dims) + +def _raise_if_mismatch(expected, actual, prop_name, ranks, is_local=True): + if is_local: + assert isinstance(ranks, int) + if expected != actual: + raise ValueError(f"Local shards' tensor {prop_name} property need to be the same on rank:{ranks}! " + f"Found one local shard tensor {prop_name}={expected}, " + f"the other local shard tensor {prop_name}={actual}.") + else: + # compare failure check across ranks, ranks list should have two rank + assert len(ranks) == 2 + if expected != actual: + raise ValueError(f"ShardedTensor {prop_name} property does not match from different ranks! " + f"Found {prop_name}={expected} on rank:{ranks[0]}, " + f"and {prop_name}={actual} on rank:{ranks[1]}.") + + +def build_metadata_from_local_shards( + local_shards: List[Shard], + global_size: torch.Size, + current_rank: int, + pg: distributed_c10d.ProcessGroup +) -> ShardedTensorMetadata: + + assert len(local_shards) > 0, "must have local shards!" + local_shard_metadatas: List[ShardMetadata] = [] + + first_shard_dtype = local_shards[0].tensor.dtype + first_shard_layout = local_shards[0].tensor.layout + first_shard_requires_grad = local_shards[0].tensor.requires_grad + first_shard_is_pinned = local_shards[0].tensor.is_pinned() + + # 1). Validate local tensors and associated metadatas + for local_shard in local_shards: + local_shard_tensor = local_shard.tensor + local_shard_meta = local_shard.metadata + local_shard_metadatas.append(local_shard_meta) + rank, local_device = _parse_and_validate_remote_device(pg, local_shard_meta.placement) + + if local_shard_tensor.layout != torch.strided or local_shard_tensor.layout != first_shard_layout: + raise ValueError( + f'Only torch.strided layout is currently supported, but found ' + f'{local_shard_tensor.layout} on rank:{current_rank}!' + ) + + if not local_shard_tensor.is_contiguous(): + raise ValueError('Only torch.contiguous_format memory_format is currently supported!') + + if rank != current_rank: + raise ValueError( + f"Local shard metadata's rank does not match with the rank in its process group! " + f'Found current rank in the process group: {current_rank}, ' + f"local ShardMetadata placement's rank: {rank}" + ) + if local_shard_tensor.device != local_device: + raise ValueError( + f"Local shard tensor device does not match with local Shard's placement! " + f"Found local shard tensor device: {local_shard_tensor.device}, " + f"local shard metadata placement device: {local_device}" + ) + + _raise_if_mismatch(local_shard_meta.shard_sizes, list(local_shard_tensor.size()), "size", current_rank) + _raise_if_mismatch(local_shard_tensor.is_pinned(), first_shard_is_pinned, "pin_memory", current_rank) + _raise_if_mismatch(local_shard_tensor.dtype, first_shard_dtype, "dtype", current_rank) + _raise_if_mismatch(local_shard_tensor.requires_grad, first_shard_requires_grad, "requires_grad", current_rank) + + # 2). Build a "local" ShardedTensorMetadata with all local shards on this rank, then + # do all_gather to collect local_sharded_tensor_metadata from all ranks + local_tensor_properties = TensorProperties( + dtype=first_shard_dtype, + layout=first_shard_layout, + requires_grad=first_shard_requires_grad, + memory_format=torch.contiguous_format, + pin_memory=first_shard_is_pinned + ) + + local_sharded_tensor_metadata = ShardedTensorMetadata( + shards_metadata=local_shard_metadatas, + size=global_size, + tensor_properties=local_tensor_properties) + + return local_sharded_tensor_metadata + + +def build_global_metadata(gathered_metadatas: Sequence[Optional[ShardedTensorMetadata]]): + global_sharded_tensor_metadata = None + global_metadata_rank = 0 + + for rank, rank_metadata in enumerate(gathered_metadatas): + if rank_metadata is None: + continue + + if global_sharded_tensor_metadata is None: + global_sharded_tensor_metadata = copy.deepcopy(rank_metadata) + global_metadata_rank = rank + else: + _raise_if_mismatch(global_sharded_tensor_metadata.size, + rank_metadata.size, + "global_size", + [global_metadata_rank, rank], + is_local=False) + + # don't need to check layout and memory format as we already checked in local shards validation stage + _raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.dtype, + rank_metadata.tensor_properties.dtype, + "dtype", + [global_metadata_rank, rank], + is_local=False) + + _raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.requires_grad, + rank_metadata.tensor_properties.requires_grad, + "requires_grad", + [global_metadata_rank, rank], + is_local=False) + + _raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.pin_memory, + rank_metadata.tensor_properties.pin_memory, + "pin_memory", + [global_metadata_rank, rank], + is_local=False) + # pass all validations, extend shards metadata + global_sharded_tensor_metadata.shards_metadata.extend(rank_metadata.shards_metadata) + + if global_sharded_tensor_metadata is not None: + # check if shards_metadata have overlap shards + validate_non_overlapping_shards_metadata(global_sharded_tensor_metadata.shards_metadata) + + # check if the shards_metadata is compatible with global size of the sharded tensor. + check_tensor(global_sharded_tensor_metadata.shards_metadata, global_sharded_tensor_metadata.size) + else: + raise ValueError("ShardedTensor have no local shards on all ranks!") + + return global_sharded_tensor_metadata diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharder.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharder.py new file mode 100644 index 0000000000000000000000000000000000000000..bf3b3596d1beea8b50c9c81db74d91ffcb42c16b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharder.py @@ -0,0 +1,27 @@ +import abc +import torch.nn as nn + +class Sharder(abc.ABC): + """ + This is an interface which allows user to create more advanced + sharding strategies that are not easily be composed by the + `ShardingSpec`. + + :class:`torch.distributed._shard.sharding_plan.ShardingPlan` could + take an object of the `Sharder` and call `shard` to shard the module, + then replace the original module with sharded module returned. + """ + @abc.abstractmethod + def shard(self, module: nn.Module) -> nn.Module: + """ + Shard a module base on the implementation of this method, and + return the sharded version of the module. + + Args: + module (:class:`torch.nn.Module`): + The module to apply sharding to. + Returns: + A :class:`torch.nn.Module` object that represents a module + that's already been sharded. + """ + pass diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..269dfd8af76052f20852a2492adbc7adaf83c040 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__init__.py @@ -0,0 +1,4 @@ +from .api import ( + ShardingPlan, + ShardingPlanner +) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff1a4e388b7116f3f6fd854599bd0cd72dcf5df4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05be9df6785328299767afd16d0c500b9e5f9454 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/api.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/api.py new file mode 100644 index 0000000000000000000000000000000000000000..fa92bf70788876216f6b6c139f6c4389bd42747a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_plan/api.py @@ -0,0 +1,86 @@ +import abc +import torch.nn as nn + +from dataclasses import dataclass +from typing import Dict, List, Optional, Union + +from torch.distributed._shard.sharder import Sharder +from torch.distributed._shard.sharding_spec import ShardingSpec + +@dataclass +class ShardingPlan: + """ + Representation of a sharding plan, describes how to shard a module + across hosts. `plan` is used to shard module parameters according to the spec provided, + `output_plan` and `return_local_tensor` are optional, they are used to specify the output + layout of a module with a spec, and when to convert back to data parallel fashion. + + Args: + plan (Dict[str, Union[:class:`torch.distributed._shard.sharding_spec.ShardingSpec`, + :class:`torch.distributed._shard.sharder.Sharder`]): + a dict describes how to shard a module, there're currently two ways to shard a module: + 1. directly shard a module parameter by a `ShardingSpec`, keyed by the name of + a parameter to a `ShardingSpec`. + 2. shard a submodule by applying a `Sharder` on it, keyed by the name of a module + to a `Sharder` object. + output_plan (Dict[str, :class:`torch.distributed._shard.sharding_spec.ShardingSpec`), optional): + a dict specifies the layout of a module's output which produces a ShardedTensor, + keyed by the name of module to ShardingSpec("" in key means the root module). + Default: `None` + return_local_tensor (List[str], optional): a list of string, each element enables + a module's sharded output to be returned as a Tensor from its local shards to + ensure further processing in a data parallel fashion. ("" in list means the + root module). + Default: None + Example: + Suppose we want to shard a module with two linear layers and then run it with DDP, we also + want to convert the output of the second linear layer back to DDP, we can do it as follows: + + >>> # xdoctest: +REQUIRES(module:torch._C._distributed_c10d) + >>> class MyModule(nn.Module): + >>> def __init__(self): + >>> super().__init__() + >>> self.fc1 = nn.Linear() + >>> self.gelu = nn.GELU() + >>> self.fc2 = nn.Linear() + >>> self.relu = nn.Linear() + >>> + >>> def forward(self, input): + >>> return self.relu(self.fc2(self.gelu(self.fc1(input)))) + + + >>> # xdoctest: +SKIP("Undefined spec1, spec2) + >>> sharding_plan = ShardingPlan( + >>> plan={ + >>> "fc1.weight": spec1, + >>> "fc2.weight": spec2 + >>> }, + >>> output_plan={ + >>> "fc2": output_spec + >>> }, + >>> return_local_tensor=["fc2"] + >>> ) + """ + plan: Dict[str, Union[ShardingSpec, Sharder]] + output_plan: Optional[Dict[str, ShardingSpec]] = None + return_local_tensor: Optional[List[str]] = None + + +class ShardingPlanner(abc.ABC): + """ + Default ShardingPlanner interface, can be extended and + implement advanced sharding strategies. + """ + @abc.abstractmethod + def build_plan(self, module: nn.Module) -> ShardingPlan: + """ + Given a nn.Module, define how to shard the module across + ranks, return a ShardingPlan + Args: + module (:class:`torch.nn.Module`): + The module to apply sharding to. + Returns: + A :class:`torch.distributed._shard.sharding_plan.ShardingPlan` object that + represents how to shard the module. + """ + pass diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8dd38105c53ba4783b4d1517f88f54025719eab4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__init__.py @@ -0,0 +1,12 @@ +from .api import ( + DevicePlacementSpec, + EnumerableShardingSpec, + PlacementSpec, + ShardingSpec, + _infer_sharding_spec_from_shards_metadata, +) +from .chunk_sharding_spec import ( + ChunkShardingSpec as ChunkShardingSpec, +) + +from torch.distributed._shard.metadata import ShardMetadata diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be9ab36394cafd40350bd1aae46a8263ffeee239 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/_internals.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/_internals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba3b8d4433d8f741ce3190f1f1edefc0152a3dcb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/_internals.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71b851a7b1fc31b952d79a18368490999c7efba8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/chunk_sharding_spec.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/chunk_sharding_spec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70908c626f2010476c48617fae24ccb9fb9bfb1e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/__pycache__/chunk_sharding_spec.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/_internals.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/_internals.py new file mode 100644 index 0000000000000000000000000000000000000000..e8275063e038266cf3893c83ba2a552de2723c8c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/_internals.py @@ -0,0 +1,209 @@ +from typing import List, Optional, Tuple + +from torch.distributed._shard.metadata import ShardMetadata + + +def _check_shard_metadata_pair_overlap(shard1: ShardMetadata, shard2: ShardMetadata): + """ + Checks if two shards overlap. + """ + + # For each dim of each shard, check if one shard resides on the other + # end of second shard with respect to that dim. As an example for a 2D + # shard, we would check if one shard is above or on the left of the + # other shard. + ndims = len(shard1.shard_offsets) + for i in range(ndims): + if shard1.shard_offsets[i] >= shard2.shard_offsets[i] + shard2.shard_sizes[i]: + return False + if shard2.shard_offsets[i] >= shard1.shard_offsets[i] + shard1.shard_sizes[i]: + return False + + return True + + +def _find_nd_overlapping_shards( + shards: List[ShardMetadata], sharded_dims: List[int] +) -> Optional[Tuple[int, int]]: + # Each rank has len(sharded_dims) tuples. Each tuple represent the + # [begin, end] (inclusive) pair of that dimension. + shard_intervals = [ + [ + (s.shard_offsets[dim], s.shard_offsets[dim] + s.shard_sizes[dim] - 1) + for dim in sharded_dims + ] + for s in shards + ] + + for i in range(len(shards)): + shard_i = shard_intervals[i] + for j in range(i + 1, len(shards)): + shard_j = shard_intervals[j] + # For each dim of each shard, check if one shard resides on the other + # end of second shard with respect to that dim. As an example for a 2D + # shard, we would check if one shard is above or on the left of the + # other shard. + overlap = True + for interval_i, interval_j in zip(shard_i, shard_j): + if interval_i[0] > interval_j[1] or interval_j[0] > interval_i[1]: + overlap = False + break + if overlap: + return (i, j) + return None + + +def _find_1d_overlapping_shards( + shards: List[ShardMetadata], dim: int +) -> Optional[Tuple[int, int]]: + # (begin, end, index_in_shards). Begin and end are inclusive. + intervals = [ + (s.shard_offsets[dim], s.shard_offsets[dim] + s.shard_sizes[dim] - 1, i) + for i, s in enumerate(shards) + ] + intervals.sort() + for i in range(len(shards) - 1): + if intervals[i][1] >= intervals[i + 1][0]: + return (intervals[i][2], intervals[i + 1][2]) + return None + + +def validate_non_overlapping_shards_metadata(shards: List[ShardMetadata]): + """ + Ensures none of the shards overlap with each other. + + Args: + shards(List[ShardMetadata]): List of :class:`ShardMetadata` objects representing + each shard. + Raises: + ``ValueError`` if there's overlap in any two shards. + """ + if not shards or len(shards) == 1: + return + + sharded_dims: List[int] = [] + for dim in range(len(shards[0].shard_offsets)): + for i in range(1, len(shards)): + if ( + shards[i].shard_offsets[dim] != shards[0].shard_offsets[dim] or + shards[i].shard_sizes[dim] != shards[0].shard_sizes[dim] + ): + sharded_dims.append(dim) + break + + pair: Optional[Tuple[int, int]] = None + if len(sharded_dims) == 0: + # All shards are the same, all dims are not partitioned. Choose any 2. + pair = (0, 1) + elif len(sharded_dims) == 1: + # Shards are partitioned over only one dimension. Overlap can be found + # using a O(nlogn) overlapping interval algorithm. + pair = _find_1d_overlapping_shards(shards, sharded_dims[0]) + else: + # Shards are partitioned over more than one dimension. Fall back to + # pair-wise check. Even though O(nlogn) algorithms (line sweep) exist + # for 2D overlap, the implementation is not trivial and may not justify + # the time saving in most cases. + pair = _find_nd_overlapping_shards(shards, sharded_dims) + + if pair: + raise ValueError(f'Shards {shards[pair[0]]} and {shards[pair[1]]} overlap') + + +def check_tensor(shards_metadata, tensor_dims) -> None: + """ + Checks if the shards_metadata is compatible with the provided tensor dims. + + Args: + shards_metadata(List[ShardMetadata]): List of :class:`ShardMetadata` + objects representing each shard of the tensor. + tensor_dims(Sequence of int): Dimensions of tensor to verify + Raises: + ``ValueError`` if not compatible. + """ + + # If the tensor's volume matches the total volume of all shards and + # all shard boundaries are within tensor dims, we have a compatible + # sharding spec for this tensor. Note that we have already verified + # we don't have overlapping shards. + tensor_rank = len(tensor_dims) + shards_rank = len(shards_metadata[0].shard_offsets) + if tensor_rank != shards_rank: + raise ValueError(f'Rank of tensor is {tensor_rank}, but shards rank is {shards_rank}') + + total_shard_volume = 0 + for shard in shards_metadata: + shard_volume = 1 + for i, shard_length in enumerate(shard.shard_sizes): + shard_volume *= shard_length + if shard.shard_offsets[i] + shard.shard_sizes[i] > tensor_dims[i]: + raise ValueError( + f'Shard offset {shard.shard_offsets[i]} and length ' + f'{shard.shard_sizes[i]} exceeds tensor dim: {tensor_dims[i]} for shard {shard}') + total_shard_volume += shard_volume + + tensor_volume = 1 + for size in tensor_dims: + tensor_volume *= size + + if total_shard_volume != tensor_volume: + # TODO: Can we improve this error message to point out the gaps? + raise ValueError( + f'Total volume of shards: {total_shard_volume} ' + f'does not match tensor volume: {tensor_volume}, in other words ' + f'all the individual shards do not cover the entire tensor') + +def get_split_size(dim_size, chunks): + """ + Computes the split size inline with ``torch.chunk`` + + Args: + dim_size(int): Size of the dimension being chunked. + chunks(int): Number of chunks to create for ``dim_size``. + + Returns: + An int indicating the split size to use. + """ + return (dim_size + chunks - 1) // chunks + +def get_chunked_dim_size(dim_size, split_size, idx): + """ + Computes the dim size of the chunk for provided ``idx`` given ``dim_size`` + and ``split_size``. + + Args: + dim_size(int): Size of the dimension being chunked. + split_size(int): The chunk size for each chunk of ``dim_size``. + idx(int): The index of chunk whose dim size is being requested. + + Returns: + An int indicating the dim size of the chunk. + """ + return max(min(dim_size, split_size * (idx + 1)) - split_size * idx, 0) + +def get_chunk_sharding_params(sharding_dim_size, world_size, spec, rank): + """ + Generate the start pos and offset length for the current rank for + chunk sharding. + + Args: + sharding_dim_size(int): The dimension length which we shard on. + world_size(int): number of ranks. + spec (:class:`torch.distributed._shard.sharding_spec.ChunkShardingSpec`): + sharding spec. + rank(int): # of cuda process. + + Returns: + start_pos(int): start position of sharded tensor on the given rank. + chunk_size(int): chunk size of sharded tensor on the given rank. + """ + split_size = get_split_size(sharding_dim_size, world_size) + current_offsets = 0 + start_pos = current_offsets + for idx, placement in enumerate(spec.placements): + chunk_size = get_chunked_dim_size(sharding_dim_size, split_size, idx) + if rank == placement.rank(): + start_pos = current_offsets + break + current_offsets += chunk_size + return start_pos, chunk_size # type: ignore[possibly-undefined] diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/api.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/api.py new file mode 100644 index 0000000000000000000000000000000000000000..bcfacbf0354dfa553f973e009c8ff84657fc9a9d --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/api.py @@ -0,0 +1,242 @@ +from abc import ABC, abstractmethod +from dataclasses import dataclass +import functools +from typing import Callable, Dict, List, TYPE_CHECKING + +import torch + +from ._internals import ( + check_tensor, + get_chunked_dim_size, + get_split_size, + validate_non_overlapping_shards_metadata +) +from torch.distributed._shard.metadata import ShardMetadata + +import torch.distributed._shard.sharded_tensor.metadata as sharded_tensor_meta +from torch.distributed._shard.op_registry_utils import _decorator_func + +if TYPE_CHECKING: + # Only include ShardedTensor when do type checking, exclude it + # from run-time to resolve circular dependency. + from torch.distributed._shard.sharded_tensor import ShardedTensor + +class PlacementSpec(ABC): # noqa: B024 + """ + Base class representing the placement of an entity. Subclasses of this + class can be used to specify customized placements which might not be + covered by existing APIs. + """ + pass + + +@dataclass +class DevicePlacementSpec(PlacementSpec): + """ + Associates placement of an entity with a single device. + + Args: + device(:class:`torch.distributed._remote_device`): The device to place the entity on. + """ + + device: torch.distributed._remote_device + + def __post_init__(self): + if not isinstance(self.device, torch.distributed._remote_device): + self.device = torch.distributed._remote_device(self.device) + +class ShardingSpec(ABC): + """ + Base class representing sharding specifications. + """ + @abstractmethod + def build_metadata(self, + tensor_sizes: torch.Size, + tensor_properties: sharded_tensor_meta.TensorProperties, + ) -> sharded_tensor_meta.ShardedTensorMetadata: + """ + Given a global tensor size, define how to shard a tensor like this shape + across ranks, return ShardedTensorMetadata + Args: + tensor_sizes (:class:`torch.Size`): + The tensor shape to shard on, a `torch.Size` object that represents the + tensor shape to be sharded according to the ShardingSpec. + tensor_properties(:class:`torch.distributed._shard.sharded_tensor.TensorProperties): + Tensor properties used to create a ShardedTensor. + Returns: + A :class:`ShardedTensorMetadata` object that encodes the information about + the layout of the ShardedTensor and its properties. + """ + + @abstractmethod + def shard(self, tensor: torch.Tensor, src_rank: int = 0, process_group=None) -> "ShardedTensor": + """ + Given a global tensor on src_rank, shard this tensor + across ranks within the process group, return a ShardedTensor. + Args: + tensor (:class:`torch.Tensor`): Tensor needs to be sharded. + Keyword args: + src_rank (int, optional): The source rank which is used as the ground truth of + the data for the parameter that would be sharded and scattered + across the rest of the ranks. + Default: 0. + process_group (ProcessGroup, optional): The process group to work on. If None, + the default process group will be used. + Returns: + A :class:`ShardedTensor` sharded from the given tensor. + """ + +# Ops customized for a particular ShardingSpec. +_CUSTOM_SHARDING_SPEC_OPS: Dict[str, Dict[Callable, Callable]] = {} + +def _has_custom_op(sharding_spec, op): + """ + Returns whether or not the ShardingSpec has a custom op implementation. + """ + class_name = type(sharding_spec).__qualname__ + return class_name in _CUSTOM_SHARDING_SPEC_OPS and op in _CUSTOM_SHARDING_SPEC_OPS[class_name] + +def _dispatch_custom_op(sharding_spec, op: Callable, types, args, kwargs, process_group): + """ + Calls the custom op for this ShardingSpec if it exists. + """ + class_name = type(sharding_spec).__qualname__ + if not _has_custom_op(sharding_spec, op): + raise RuntimeError(f'Custom op: {op} not registered for {class_name}') + func = _CUSTOM_SHARDING_SPEC_OPS[class_name][op] + return func(types, args, kwargs, process_group) + +def custom_sharding_spec_op(sharding_spec_class, func): + """ + Decorator to allow custom registration of ops. + Args: + sharding_spec_class(type): The ShardingSpec for which we need to add this custom op. + func(Callable): The op to override (ex: torch.bmm) + """ + class_name = sharding_spec_class.__qualname__ + if class_name not in _CUSTOM_SHARDING_SPEC_OPS: + _CUSTOM_SHARDING_SPEC_OPS[class_name] = {} + return functools.partial( + _decorator_func, + op=func, + op_table=_CUSTOM_SHARDING_SPEC_OPS[class_name] + ) + + +@dataclass +class EnumerableShardingSpec(ShardingSpec): + """ + This is a type of PlacementSpec that allows users to specify a generic + sharding scheme by enumerating exactly how each shard is laid out. + + Args: + shards(List[ShardMetadata]): List of :class:`ShardMetadata` objects representing + each shard. Note that none of the shards should overlap. + """ + + shards: List[ShardMetadata] + + def __post_init__(self): + if len(self.shards) == 0: + raise ValueError(f'Empty shard list provided: {self.shards}') + + # Validate each shard has same rank. + rank = -1 + for shard in self.shards: + if rank != -1 and rank != len(shard.shard_offsets): + raise ValueError(f'Found inconsistent ranks for shards: {rank} and {len(shard.shard_offsets)}') + rank = len(shard.shard_offsets) + + validate_non_overlapping_shards_metadata(self.shards) + + def build_metadata(self, + tensor_sizes: torch.Size, + tensor_properties: sharded_tensor_meta.TensorProperties, + ) -> sharded_tensor_meta.ShardedTensorMetadata: + # check if shards form a valid tensor + check_tensor(self.shards, tensor_sizes) + return sharded_tensor_meta.ShardedTensorMetadata( + self.shards, + tensor_sizes, + tensor_properties + ) + + def shard(self, tensor: torch.Tensor, src_rank: int = 0, process_group=None) -> "ShardedTensor": + # TODO: figure out a generic and efficient way to scatter the shards for EnumerableShardingSpec + raise NotImplementedError("EnumerableShardingSpec.shard not implemented yet!") + + +def _infer_sharding_spec_from_shards_metadata(shards_metadata): + """ + Infer the sharding spec from the metadata of each shard of a ShardedTensor. + If the tensor is sharded only on one dimension, we can then verify whether it's + a ChunkShardingSpec or not. The way to verify it is to first get the total length + and perform a chunk sharding with the given placements to see if we can have the + same chunk size as the given shards_metadata. If not, we assume it's enum sharded. + + Args: + shards_metadata (List[ShardMetadata]): List of Metadata of local shards. + + Returns: + A :class:`torch.distributed._shard.sharding_spec.ShardingSpec` object of sharding + spec for one sharded tensor. + """ + placements = [] + chunk_sharding_dim = None + chunk_offset_list = [] + shard_size_list = [] + shard_offset_list = [] + # collect local shard metadatas from the global sharded_tensor_metadata + for shard_metadata in shards_metadata: # type: ignore[attr-defined] + placements.append(shard_metadata.placement) + local_offsets = shard_metadata.shard_offsets + chunk_offset_list.append(sum(local_offsets)) + shard_size_list.append(shard_metadata.shard_sizes) + shard_offset_list.append(shard_metadata.shard_offsets) + shard_dims = [idx for idx, e in enumerate(local_offsets) if e != 0] + # If the offset is [0, 0, ..., 0] (all zeros), + # we cannot decide whether how the tensor is sharded. + if len(shard_dims) == 0: + continue + # If the offset is [0, N, .,0, M, 0, .., 0], + # we are sure it's sharded by more than one dimension. + if len(shard_dims) != 1: + chunk_sharding_dim = None + break + # If the offset is [0, 0, .,0, M, 0, .., 0], aka, it's sharded by just + # one dimension, we need to make sure all ranks share the same dimension. + if not chunk_sharding_dim: + chunk_sharding_dim = shard_dims[0] + elif chunk_sharding_dim != shard_dims[0]: + chunk_sharding_dim = None + break + + if chunk_sharding_dim is not None: + # Ensure we infer the correct placement order from offsets + placements = [ + x for _, x in sorted(zip(chunk_offset_list, placements), key=lambda e: e[0]) + ] + + from .chunk_sharding_spec import ChunkShardingSpec + chunk_spec = ChunkShardingSpec( + dim=chunk_sharding_dim, + placements=placements, + ) + + shard_sizes = sorted([x[chunk_sharding_dim] for x in shard_size_list]) + shard_total_length = sum(shard_sizes) + shard_offsets = sorted([x[chunk_sharding_dim] for x in shard_offset_list]) + + chunks = len(placements) + split_size = get_split_size(shard_total_length, chunks) + chunk_shard_sizes = sorted( + [ + get_chunked_dim_size(shard_total_length, split_size, idx) + for idx in range(chunks) + ] + ) + # Should match ChunkShardingSpec offsets calculation + chunk_shard_offsets = [split_size * idx for idx in range(chunks)] + if shard_sizes == chunk_shard_sizes and shard_offsets == chunk_shard_offsets: + return chunk_spec + return EnumerableShardingSpec(shards_metadata) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__init__.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4916bc7de5a0ca8142135a0728973601367b6c10 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/_common.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..edd3f5279a440694069d09b6b1af095f1e759e24 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/_common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/embedding.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/embedding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80c482c3e83eb8857e5a247fa1c2f4b83953175b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/embedding.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/embedding_bag.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/embedding_bag.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98d967210d9d2666d8f2b93e2c35a8b796aa74c2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__pycache__/embedding_bag.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..24727005870de8373074acea3562aab0814455e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py @@ -0,0 +1,349 @@ + +import torch +import torch.distributed as dist +from torch.distributed._shard.sharded_tensor import ShardedTensor +from torch.distributed._shard.sharded_tensor._ops._common import _sharded_op_common +from torch.distributed._shard.sharding_spec import ChunkShardingSpec +from torch.distributed._shard.sharding_spec._internals import ( + get_chunk_sharding_params, + get_chunked_dim_size, + get_split_size, +) +from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op +from torch.distributed.nn.functional import ( + _all_gather_base, + all_reduce, + all_to_all_single, +) + + +def _chunk_sharding_spec_check(spec, op): + """ + For the given op implementation check if the sharding spec is ChunkShardingSpec. + """ + if not isinstance(spec, ChunkShardingSpec): + raise NotImplementedError( + f"Only ChunkShardingSpec supported for '{op.__name__}'." + ) + + +def _register_sharded_op_on_local_tensor( + op, early_stop_func=None, extra_check=None, customized_func=None +): + """ + Handles ``__torch_function__`` dispatch for ops which are performed on + the single local tensor of the sharded tensor such as op like + ``torch.nn.functional.softmax`` or ``torch.Tensor.view``. + + For more complicated ops, a customized func can be used to generate + the new local tensor, sharding spec and sharded tensor size. + + Args: + op: The op to be registered and applied to all shards of the st. + early_stop_func (Callable, optional): the func for early stop. + Default: if ``None``, no early stop. + extra_check (Callable, optional): the func for extra condition check. + Default: if ``None``, no extra check. + customized_func (Callable, optional): the func for customized logic + to generate the new local tensor, sharding spec and sharded tensor size. + Default: if ``None``, we simply lower to the real op call with + the single local tensor of the st. + + Return: + func (Callable): registered implementation for sharded op for + ``__torch_function__`` dispatch. + """ + + @custom_sharding_spec_op(ChunkShardingSpec, op) + @_sharded_op_common(op, early_stop_func, extra_check) + def sharded_tensor_op_on_local_tensor(types, args=(), kwargs=None, pg=None): + st = args[0] + sharding_spec = st.sharding_spec() + if len(st.local_shards()) != 1: + raise TypeError( + f"torch function '{op.__name__}', with args: {args} and " + f"kwargs: {kwargs} only supported for single local tensor!" + ) + st_size = st.size() + if customized_func: + local_tensor, sharding_spec, st_size = customized_func(args, kwargs, pg) + else: + args = (st.local_tensor(), *args[1:]) + local_tensor = op(*args, **kwargs) + return ShardedTensor._init_from_local_tensor( + local_tensor.contiguous(), + sharding_spec, + st_size, # type: ignore[arg-type] + process_group=pg, + init_rrefs=st._init_rrefs, + ) + + +def _handle_col_wise_sharding_base( + op_func, + col_dim, + input, + world_size, + weight, + local_shard, + pg, + gathered_inputs, + mode=None, + gathered_per_sample_weights=None, + gathered_offsets=None, + padding_idx=None, +): + """ + For col-wise sharding of weight, lots of logic are common. + So we extract the common logic and put in this function: + Step 1. To get input from each rank and + Step 2. To perform the op on the concatenated tensor. + Step 3. To distribute results to each rank with col rearrangement. + Step 4. To concatenate all results from all ranks. + + Args: + op_func: operator which is applied to the input tensor. + col_dim: dim of result tensor after the operation. + input: tensor to be applied op on. + world_size: number of ranks. + weight: sharded weight tensor. + local_shard: col-wise sharded weight tensor. + pg: process group. + gathered_inputs: list of inputs from all ranks. If specified, we + don't need to communicate with each rank any more. + mode: aggregation mode of EmbeddingBag. + gathered_per_sample_weights: per_sample_weights across all ranks. + gathered_offsets: offsets across all ranks. + padding_idx: If specified, the entries at padding_idx do + not contribute to the gradient; therefore, the embedding + vector at padding_idx is not updated during training, + i.e. it remains as a fixed “pad”. + Note that the embedding vector at padding_idx is + excluded from the reduction. + + Return: final result of input being applied with the op. + """ + # run the operator's function for all the inputs. + results = [] + for i, inp in enumerate(gathered_inputs): + if op_func == torch.nn.functional.embedding_bag: + result = op_func( + inp, + local_shard, + offsets=gathered_offsets[i] if gathered_offsets is not None else None, + mode=mode, + per_sample_weights=gathered_per_sample_weights[i] + if gathered_per_sample_weights is not None + else None, + padding_idx=padding_idx, + ) + elif op_func == torch.nn.functional.embedding: + result = op_func( + inp, + local_shard, + padding_idx=padding_idx, + ) + else: + result = op_func(inp, local_shard) + results.append(torch.transpose(result, 0, col_dim)) + + # Distribute results to each rank with col rearrangement. + output = _result_distribute_with_col_rearrange( + results, input, world_size, weight, pg + ) + + # transpose the output and return result. + return torch.transpose(output, 0, col_dim) + + +def _result_distribute_with_col_rearrange(results, input, world_size, weight, pg): + """ + For col-wise sharding of weight, we need to distribute + results to each rank. We do them in this function. + Note that, if the index in the Sharding Spec is not equal to + the rank number, we need to do the rearrangement based on the + order given by the Sharding Spec (placement). + + Args: + results: results from ops applied to inputs from all ranks. + We need to distribute them back to their original ranks. + input: tensor to be applied op to. + world_size: number of ranks. + weight: sharded weight tensor. + pg: process group. + + Return: column rearranged result. + """ + # Process results and outputs for all2all. + sharding_dim = weight._sharding_spec.dim + sharding_dim_size = weight.size(sharding_dim) + dims = list(results[0].size()) + dims[0] = sharding_dim_size + combined_results = torch.cat(results) + output = torch.empty( + *dims, device=combined_results.device, dtype=combined_results.dtype + ) + + # Compute output splits + split_size = get_split_size(sharding_dim_size, world_size) + output_split_sizes = [0] * world_size + for idx, placement in enumerate(weight._sharding_spec.placements): + output_split_sizes[placement.rank()] = get_chunked_dim_size( + sharding_dim_size, split_size, idx + ) + + # distribute the outputs using all2all. + output = all_to_all_single( + output, combined_results, output_split_sizes=output_split_sizes, group=pg + ) + + # Check if we need to rearrange columns appropriately for output. + rearrange_columns = any( + idx != placement.rank() + for idx, placement in enumerate(weight._sharding_spec.placements) + ) + if not rearrange_columns: + return output + + indices = [] + for placement in weight._sharding_spec.placements: + dim_size = output_split_sizes[placement.rank()] + start = sum( + [ + split_size if i < placement.rank() else 0 + for i, split_size in enumerate(output_split_sizes) + ] + ) + indices += list(range(start, start + dim_size)) + + return output.index_select(0, torch.tensor(indices, device=output.device)) + + +def _handle_max_norm_col_wise( + max_norm, + norm_type, + local_shard, + input, + world_size, + gathered_inputs, + pg, +): + """ + For col-wise sharding of weight, we need to aggregate the + norm across all ranks before we can perform the proper re-norm. + Note that, the max_norm logic is only applied to the embedding + indices that are looked up and not the whole shard. + + Args: + max_norm: If given, each embedding vector with norm larger + than max_norm is renormalized to have norm max_norm. + Note: this will modify weight in-place. + norm_type: The p in the p-norm to compute for the max_norm option. + local_shard: col-wise shared local weight used for lookup. + input: tensor to be applied op to. + world_size: number of ranks. + gathered_inputs: list of inputs from all ranks. + pg: process group. + + Return: + local_shard_norm_renormed: local_shard re-normed to max_norm if the norm is larger + than it. + + """ + norm_type = norm_type if norm_type is not None else 2.0 + unique_inp = torch.unique(torch.cat(gathered_inputs)) + local_shard_sum = torch.sum( + torch.pow(torch.abs(local_shard), norm_type), dim=1, dtype=local_shard.dtype + ) + # For col-wise sharding, we need to first aggregate the powered sum + # from each rank first and then calculate the norm. + local_shard_sum = all_reduce(local_shard_sum, group=pg) + local_shard_norm = torch.pow(local_shard_sum, 1.0 / norm_type) + max_norm_tensor = torch.full( + (local_shard.size(0),), + float("inf"), + dtype=local_shard.dtype, + device=input.device, + ) + max_norm_tensor[unique_inp] = max_norm + local_shard_t = local_shard.t().contiguous() + normalized_tensor = torch.where( + local_shard_norm > max_norm_tensor, max_norm_tensor, local_shard_norm + ) + # Make sure divisor is not zero. + local_shard_norm[local_shard_norm == 0.0] = 1.0 + local_shard_norm_renormed = ( + torch.div(torch.mul(local_shard_t, normalized_tensor), local_shard_norm) + .t() + .contiguous() + ) + return local_shard_norm_renormed + + +def _all_gather_base_input(input, pg): + """ + Use _all_gather_base to get a concatenated input from each rank. + + Args: + input: tensor to be applied op on. + pg: process group. + + Returns: + gathered_inputs: input gathered from each rank and concat by dim 0. + """ + # allgather the inputs first. + gather_inp_size = list(input.size()) + gather_inp_size[0] = input.size(0) * dist.get_world_size(pg) + gather_inp = torch.empty(gather_inp_size, device=input.device, dtype=input.dtype) + return _all_gather_base(gather_inp, input, group=pg) + + +def _handle_row_wise_mask(gather_inp, padding_idx, weight, world_size, rank): + """ + Mask the input for embedding look-up for IDs which are not stored + on the current rank. This function also adjust the ``padding_idx`` + so that it is only used on the rank where the corresponding row is + stored. + + Note that, with ``max_norm`` flag on, only weights of rows being + looked up will be re-normed. So we need an extra row for masked ID + so that it does not affect the final result and ``max_norm``. + + Args: + gather_inp: tensor to be applied op on gathered from all ranks. + padding_idx: If specified, the entries at padding_idx do + not contribute to the gradient; therefore, the embedding + vector at padding_idx is not updated during training, + i.e. it remains as a fixed “pad”. + Note that the embedding vector at padding_idx is + excluded from the reduction. + weight: weight tensor of Embedding look-up table. + world_size: number of ranks. + rank: # of cuda process. + + Returns: + lookup_input: Tensor of masked input. + padding_idx: adjusted padding_idx. + padding_row: The extra row we used during lookup so that + looking up does not affect ``max_norm``. + """ + (start_pos, chunk_size) = get_chunk_sharding_params( + weight.size(0), world_size, weight._sharding_spec, rank + ) + mask = (gather_inp < start_pos) | (gather_inp >= start_pos + chunk_size) + lookup_input = gather_inp.clone() - start_pos + lookup_input[mask] = chunk_size + if ( + padding_idx is not None + and padding_idx >= start_pos + and padding_idx < (start_pos + chunk_size) + ): + padding_idx = padding_idx - start_pos + else: + padding_idx = None + + # When max_norm is set, it will only re-norm the row being looked up. + padding_row = torch.zeros( + 1, weight.size(1), device=gather_inp.device, dtype=weight.dtype + ) + return lookup_input, padding_idx, padding_row diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..e1c1cb6380439ca14f6e352dc03ea30877e6286c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding.py @@ -0,0 +1,293 @@ + +import torch +import torch.distributed as dist +from torch.distributed._shard.sharded_tensor import ShardedTensor +from torch.distributed._shard.sharding_spec import ChunkShardingSpec +from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op +from torch.distributed.nn.functional import all_gather, reduce_scatter + +from ._common import ( + _all_gather_base_input, + _handle_col_wise_sharding_base, + _handle_max_norm_col_wise, + _handle_row_wise_mask, +) + + +@custom_sharding_spec_op(ChunkShardingSpec, torch.nn.functional.embedding) +def sharded_embedding(types, args, kwargs, pg): + """ + Handles ``__torch_function__`` dispatch for ``torch.nn.functional.embedding``. + This method computes a sharded embedding lookup and has the following limitations: + + 1. Supports only sharding of ``weight``. + 2. Supports only ``ChunkShardingSpec``. + 3. Supports only a single local shard per rank. + 4. Supports all specs except for scale_grad_by_freq, sparse, etc. + + Based on the dimension that the weight is sharded on, there are two + algorithms: + + ROWWISE SHARDING + ================ + For row-wise sharding the weight is sharded on dimension 0. + + The overall algorithm can be best explained with an example. Let's assume + the dims for input are (4 x 6) and W are (10 x 17) and W is sharded across + 4 GPUs creating 3 shard of (3 x 17) and 1 shard of (1 x 17). + The algorithm is as follows: + + 1. First the input is all gathered to all ranks, since this is SPMD and + input is actually sharded across all ranks. The inputs then become a + 4 (4 x 6) tensor on each rank. For example if the given input is + tensor([[6, 5, 2, 9, 6, 3], + [3, 1, 2, 4, 7, 6], + [4, 0, 4, 9, 8, 9], + [8, 6, 6, 4, 6, 1]]) + on rank 0. + Then on every rank, we will have this tensor. + If input itself is already replicated, no all-gather will be done. + 2. Next, we mask the ID which are not stored on that rank. + For example on rank 0, we store ID [0, 1, 2]. We only keep the ID + inside the set of numbers. The rest of them will be masked to an extra row. + The masked matrix will be used for embedding look up and is like: + tensor([[4, 4, 2, 4, 4, 4], + [4, 1, 2, 4, 4, 4], + [4, 0, 4, 4, 4, 4], + [4, 4, 4, 4, 4, 1]]) + The reason of having an extra row (aka, number 4 in the example) is + because when max_norm is specified only weight which has looked will + be re-normed so mask IDs whose embeddings are not stored in current + rank will to an extra row will ensure max_norm still works as expected. + 3. If max_norm is specified, the extra row guarantees that the mask ID will + not affect the behavior of weigh re-norm. + + COLWISE SHARDING + ================ + For col-wise sharding the weight is sharded on dimension 1. + + The overall algorithm can be best explained with an example. Let's assume + the dims for input are (4 x 6) and W are (16 x 17) and W is sharded across + 4 GPUs creating 3 shards of (16 x 5) and 1 shard of (16 x 2). + The algorithm is as follows: + + 1. First the input is broadcasted to all ranks, since this is SPMD we + actually do an all_gather for all the inputs resulting in 4 (4 x 6) + inputs on each rank. + 2. Next we perform local embedding lookup operation by apply each + input (4 x 6) with the local shard (16 x 5) ((16 x 2) for the last). + This results in 4 (5 x 6 x 4) ((2 x 6 x 4) for the last) matrices + on each rank. We transpose dim 0 and dim 2. + 3. Next, we concat these 4 matrices and perform an all2all to share the + appropriate (5 x 6 x 4) or (2 x 6 x 4) matrices to each rank. + 4. Now, each rank receives a (17 x 6 x 4) matrix which is basically the + size of the result we need. + 5. If placements are not in order any appropriate rearrangement of columns + are done for the (17 x 6 x 4) matrix and finally we transpose the + dim 0 and dim 2 again. + 6. If max_norm is specified, we manually sum up the norm and renorm. Because + the renorm must be in place, we need to override the local_shard to mimic + this behavior. + """ + # Validate input params + _validate_embedding_param(args, kwargs) + + input = args[0] + weight = args[1] + max_norm = kwargs.get("max_norm") + norm_type = kwargs.get("norm_type") + padding_idx = kwargs.get("padding_idx") + + local_shard = weight.local_tensor().contiguous() + sharding_dim = weight._sharding_spec.dim + world_size = dist.get_world_size(pg) + rank = dist.get_rank(pg) + + if sharding_dim == 1: + output, local_shard = _handle_col_wise_sharding( + input, world_size, weight, local_shard, max_norm, norm_type, padding_idx, pg + ) + weight.local_shards()[0].tensor = local_shard + return output + elif sharding_dim == 0: + return _handle_row_wise_sharding( + input, + world_size, + weight, + local_shard, + max_norm, + norm_type, + padding_idx, + rank, + pg, + ) + else: + raise RuntimeError( + f"nn.Embedding weight sharded on dim {sharding_dim} not supported!" + ) + + +def _validate_embedding_param(args, kwargs): + """ + Validate input params of sharded embedding op. + + Args: + input: list of ID used for lookup. + weight: sharded weight tensor. + kwargs: same as normal Embedding. + + Return: None. + """ + + input = args[0] + weight = args[1] + max_norm = kwargs.get("max_norm") + scale_grad_by_freq = kwargs.get("scale_grad_by_freq") + sparse = kwargs.get("sparse") + + # Validate types + if not isinstance(input, torch.Tensor): + raise TypeError("input need to be torch.Tensor") + if not isinstance(weight, ShardedTensor): + raise TypeError("weight needs to be ShardedTensor") + weight_size = weight.size() + if len(weight_size) != 2: + raise ValueError("Weight needs to have exactly 2 dims") + if int(torch.min(input).item()) < 0: + raise ValueError( + "Index out of range in Input %d %d", + int(torch.min(input).item()), + weight_size[1], + ) + if int(torch.max(input).item()) >= weight_size[0]: + raise ValueError( + "Index out of range in Input %d %d", + int(torch.max(input).item()), + weight_size[1], + ) + if scale_grad_by_freq: + raise RuntimeError( + 'nn.Embedding weight sharded with flag on "scale_grad_by_freq" not supported!' + ) + if sparse: + raise RuntimeError( + 'nn.Embedding weight sharded with flag on "sparse" not supported!' + ) + if max_norm and max_norm <= 0.0: + raise ValueError('"max_norm" must be larger than zero!') + + if not isinstance(weight._sharding_spec, ChunkShardingSpec): + raise ValueError("Only ChunkShardingSpec supported for ShardedTensor ops!") + if len(weight.local_shards()) != 1: + raise ValueError("Only one local shard supported!") + + +def _handle_col_wise_sharding( + input, world_size, weight, local_shard, max_norm, norm_type, padding_idx, pg +): + """ + Entry-point function to handle the logic of col-wise sharding of weight + for embedding. (Detailed explanations of the logic can be found in + the comment for sharded_embedding.) + + Args: + input: list of ID used for lookup and aggregation. + world_size: number of ranks. + weight: sharded weight tensor. + local_shard: col-wise shared local weight used for lookup. + max_norm: If given, each embedding vector with norm larger + than max_norm is renormalized to have norm max_norm. + Note: this will modify weight in-place. + norm_type: The p in the p-norm to compute for the max_norm option. + padding_idx: If specified, the entries at padding_idx do + not contribute to the gradient; therefore, the embedding + vector at padding_idx is not updated during training, + i.e. it remains as a fixed “pad”. + pg: process group. + + Returns: final result of lookup. + """ + # allgather the inputs first for non Replicated Tensor. + gathered_inputs = all_gather(input, group=pg) + + if max_norm is not None: + # max_norm changes the weight in-place + local_shard = _handle_max_norm_col_wise( + max_norm, norm_type, local_shard, input, world_size, gathered_inputs, pg + ) + + output = _handle_col_wise_sharding_base( + torch.nn.functional.embedding, + len(input.size()), + input, + world_size, + weight, + local_shard, + pg, + gathered_inputs, + padding_idx=padding_idx, + ) + return (output, local_shard) + + +def _handle_row_wise_sharding( + input, world_size, weight, local_shard, max_norm, norm_type, padding_idx, rank, pg +): + """ + Entry-point function to handle the logic of row-wise sharding of weight + for embedding. (Detailed explanations of the logic can be found in + the comment for sharded_embedding.) + + Args: + input: list of ID used for lookup and aggregation. + world_size: number of ranks. + weight: sharded weight tensor. + local_shard: row-wise shared local weight used for lookup. + max_norm: If given, each embedding vector with norm larger + than max_norm is renormalized to have norm max_norm. + Note: this will modify weight in-place. + norm_type: The p in the p-norm to compute for the max_norm option. + padding_idx: If specified, the entries at padding_idx do + not contribute to the gradient; therefore, the embedding + vector at padding_idx is not updated during training, + i.e. it remains as a fixed “pad”. + rank: # of cuda process. + pg: process group. + + Returns: final result of lookup. + """ + # allgather the inputs first for non Replicated Tensor. + gather_inp = _all_gather_base_input(input, pg) + + # Mask the input according to sharding spec. + lookup_input, padding_idx, padding_row = _handle_row_wise_mask( + gather_inp, padding_idx, weight, world_size, rank + ) + + # When input is a large tensor, the value of weight is changed. + # This is a walk-around for now. GH issue: #81717 + if max_norm is not None: + torch.nn.functional.embedding( + torch.unique(lookup_input)[:-1], + local_shard, + padding_idx=padding_idx, + max_norm=max_norm, + norm_type=norm_type, + ) + max_norm = None + + local_input_embeddings = torch.nn.functional.embedding( + lookup_input, + torch.cat([local_shard, padding_row]), + padding_idx=padding_idx, + max_norm=max_norm, + norm_type=norm_type, + ) + + # TODO: Make the result a PartialTensor. + local_shards = local_input_embeddings.chunk(pg.size()) + return reduce_scatter( + torch.empty_like(local_shards[0]), + list(local_shards), + group=pg, + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding_bag.py b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding_bag.py new file mode 100644 index 0000000000000000000000000000000000000000..2d6ea1d705d59dba53eee215913000db4b4b333c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding_bag.py @@ -0,0 +1,476 @@ + +from typing import cast, List + +import torch +import torch.distributed as dist +from torch._C._distributed_c10d import ReduceOp +from torch.distributed._shard.sharded_tensor import ShardedTensor +from torch.distributed._shard.sharding_spec import ChunkShardingSpec +from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op +from torch.distributed.nn.functional import all_gather, reduce_scatter + +from ._common import ( + _all_gather_base_input, + _handle_col_wise_sharding_base, + _handle_max_norm_col_wise, + _handle_row_wise_mask, +) + + +@custom_sharding_spec_op(ChunkShardingSpec, torch.nn.functional.embedding_bag) +def sharded_embedding_bag(types, args, kwargs, pg): + """ + Handles ``__torch_function__`` dispatch for ``torch.nn.functional.embedding_bag``. + This method computes a sharded embedding bag aggregation and has the following limitations: + + 1. Supports only sharding of ``weight``. + 2. Supports only ``ChunkShardingSpec``. + 3. Supports only a single local shard per rank. + 4. Supports all specs except for scale_grad_by_freq, sparse, etc. + + Based on the dimension that the weight is sharded on, there are two + algorithms: + + ROWWISE SHARDING + ================ + For row-wise sharding the weight is sharded on dimension 0. + + The overall algorithm can be best explained with an example. Let's assume + the dims for input are (4 x 6) and W are (16 x 17) and W is sharded across + 4 GPUs creating 4 shard of (4 x 17). + The algorithm is as follows: + + 1. First the input is all gathered to all ranks, since this is SPMD and + input is actually sharded across all ranks. The inputs then become a + 4 (4 x 6) tensor on each rank. For example if the given input is + tensor([[6, 5, 2, 9, 6, 3], + [3, 1, 2, 4, 7, 6], + [4, 0, 4, 9, 8, 9], + [8, 6, 6, 4, 6, 1]]) + on rank 0. + Then on every rank, we will have this tensor. + If input itself is already replicated, no all-gather will be done. + 2. Next, we mask the ID which are not stored on that rank. + For example on rank 0, we store ID [0, 1, 2]. We only keep the ID + inside the set of numbers. The rest of them will be masked to an extra row. + The masked matrix will be used for embedding look up and is like: + tensor([[4, 4, 2, 4, 4, 4], + [4, 1, 2, 4, 4, 4], + [4, 0, 4, 4, 4, 4], + [4, 4, 4, 4, 4, 1]]) + 3. If ``max_norm`` is specified, the extra row guarantees that the mask ID will + not affect the behavior of weigh re-norm. + 4. The example above only happens in one rank and each rank does a very similar thing. + For "Mean" mode we need to divide by either column size (2D) or the interval length + defined by the offset (excluding the row specified in ``padding_idx``). + We also need to mask the unexisting row to neg Inf so that negative value does not + gets wiped out in the "Max" mode. + + COLWISE SHARDING + ================ + For col-wise sharding the weight is sharded on dimension 1. + + The overall algorithm can be best explained with an example. Let's assume + the dims for input are (4 x 6) and W are (16 x 17) and W is sharded across + 4 GPUs creating 3 shards of (16 x 5) and 1 shard of (16 x 2). + The algorithm is as follows: + + 1. First the input is broadcasted to all ranks, since this is SPMD we + actually do an all_gather for all the inputs resulting in 4 (4 x 6) + inputs on each rank. + 2. Next we perform local embedding bag operation under the given mode by + apply each input (4 x 6) with the local shard (16 x 5) ((16 x 2) for the last). + This results in 4 (5 x 4) ((2 x 4) for the last) matrices on each rank. + We transpose the aggregation result. + 3. Next, we concatenate these 4 matrices and perform an all2all to share the + appropriate (5 x 4) or (2 x 4) matrices to each rank. + 4. Now, each rank receives a (17 x 4) matrix which is basically the + size of the result we need. + 5. If placements are not in order any appropriate rearrangement of columns + are done for the (17 x 4) matrix and finally we transpose the output again. + 6. If max_norm is specified, we manually sum up the norm and renorm. Because + the renorm must be in place, we need to override the local_shard to mimic + this behavior. + """ + # Validate input params + _validate_embedding_bag_param(args, kwargs) + + input = args[0] + weight = args[1] + offsets = kwargs.get("offsets") + per_sample_weights = kwargs.get("per_sample_weights") + mode = kwargs.get("mode") + max_norm = kwargs.get("max_norm") + norm_type = kwargs.get("norm_type") + include_last_offset = kwargs.get("include_last_offset") + padding_idx = kwargs.get("padding_idx") + + local_shard = weight.local_tensor().contiguous() + sharding_dim = weight._sharding_spec.dim + world_size = dist.get_world_size(pg) + rank = dist.get_rank(pg) + if include_last_offset: + offsets = offsets[:-1] + + if sharding_dim == 1: + output, local_shard = _handle_col_wise_sharding( + input, + world_size, + weight, + local_shard, + offsets, + per_sample_weights, + mode, + max_norm, + norm_type, + padding_idx, + pg, + ) + weight.local_shards()[0].tensor = local_shard + return output + elif sharding_dim == 0: + return _handle_row_wise_sharding( + input, + world_size, + weight, + local_shard, + offsets, + per_sample_weights, + mode, + max_norm, + norm_type, + padding_idx, + rank, + pg, + ) + else: + raise RuntimeError( + f"nn.EmbeddingBag weight sharded on dim {sharding_dim} not supported!" + ) + + +def _validate_embedding_bag_param(args, kwargs): + """ + Validate input params of sharded embeddingBag op. + + Args: + input: list of ID used for lookup and aggregation. + weight: sharded weight tensor. + kwargs: same as normal EmbeddingBag. + + Return: None. + """ + + input = args[0] + weight = args[1] + offsets = kwargs.get("offsets") + per_sample_weights = kwargs.get("per_sample_weights") + mode = kwargs.get("mode") + max_norm = kwargs.get("max_norm") + scale_grad_by_freq = kwargs.get("scale_grad_by_freq") + sparse = kwargs.get("sparse") + include_last_offset = kwargs.get("include_last_offset") + + # Validate types + if not isinstance(input, torch.Tensor): + raise TypeError("input need to be torch.Tensor") + if offsets is not None and not isinstance(offsets, torch.Tensor): + raise TypeError("offsets need to be torch.Tensor") + if per_sample_weights is not None and not isinstance( + per_sample_weights, torch.Tensor + ): + raise TypeError("per_sample_weights need to be torch.Tensor") + if not isinstance(weight, ShardedTensor): + raise TypeError("weight needs to be ShardedTensor") + if len(input.size()) > 2: + raise ValueError("Input more than 2 dims not supported") + weight_size = weight.size() + if len(weight_size) != 2: + raise ValueError("Weight needs to have exactly 2 dims") + if int(torch.min(input).item()) < 0: + raise ValueError( + "Index out of range in Input %d %d", + int(torch.min(input).item()), + weight_size[1], + ) + if int(torch.max(input).item()) >= weight_size[0]: + raise ValueError( + "Index out of range in Input %d %d", + int(torch.max(input).item()), + weight_size[1], + ) + if offsets is not None and len(input.size()) != 1: + raise ValueError("Input dimension needs to be exactly 1 dim") + if len(input.size()) == 1 and offsets is None: + raise ValueError("offsets is required for 1D input") + if per_sample_weights is not None and per_sample_weights.size() != input.size(): + raise ValueError( + f"per_sample_weights size {per_sample_weights.size()} not equal to input size {input.size()}" + ) + if mode is None: + mode = "mean" + if mode not in ["sum", "mean", "max"]: + raise ValueError(f"mode '{mode}' is not supported") + if scale_grad_by_freq: + raise RuntimeError( + 'nn.Embedding weight sharded with flag on "scale_grad_by_freq" not supported!' + ) + if sparse: + raise RuntimeError( + 'nn.Embedding weight sharded with flag on "sparse" not supported!' + ) + if include_last_offset and offsets is None: + raise ValueError('offsets is required for flag "include_last_offset"!') + if include_last_offset and cast(List[int], offsets)[-1] != input.size(0): + raise ValueError( + 'offsets need to have the input size in the end when the flag "include_last_offset" is on!' + ) + + if max_norm and max_norm <= 0.0: + raise ValueError('"max_norm" must be larger than zero!') + + if not isinstance(weight._sharding_spec, ChunkShardingSpec): + raise ValueError("Only ChunkShardingSpec supported for ShardedTensor ops!") + if len(weight.local_shards()) != 1: + raise ValueError("Only one local shard supported!") + + +def _handle_col_wise_sharding( + input, + world_size, + weight, + local_shard, + offsets, + per_sample_weights, + mode, + max_norm, + norm_type, + padding_idx, + pg, +): + """ + Entry-point function to handle the logic of col-wise sharding of weight + for embeddingBag. (Detailed explanations of the logic can be found in + the comment for sharded_embedding_bag.) + + Args: + input: list of ID used for lookup and aggregation. + world_size: number of ranks. + weight: sharded weight tensor. + local_shard: col-wise shared local weight used for lookup. + offsets: list of start positions of each bag for 1D input. + per_sample_weights: weights for weighted sum mode. + mode: aggregation method of each bag. + max_norm: If given, each embedding vector with norm larger + than max_norm is renormalized to have norm max_norm. + Note: this will modify weight in-place. + norm_type: The p in the p-norm to compute for the max_norm option. + padding_idx: If specified, the entries at padding_idx do + not contribute to the gradient; therefore, the embedding + vector at padding_idx is not updated during training, + i.e. it remains as a fixed “pad”. + Note that the embedding vector at padding_idx is + excluded from the reduction. + pg: process group. + + Return: + output: final result of lookup and aggregation. + local_shard: col-wise shared local weight used for lookup. + If max_norm, this will be the renormed weight. + """ + # allgather the special input of embedding bag first. + ( + gathered_inputs, + gathered_per_sample_weights, + gathered_offsets, + ) = _all_gather_embedding_bag_input(input, per_sample_weights, offsets, pg) + + if max_norm is not None: + # max_norm changes the weight in-place + local_shard = _handle_max_norm_col_wise( + max_norm, norm_type, local_shard, input, world_size, gathered_inputs, pg + ) + + output = _handle_col_wise_sharding_base( + torch.nn.functional.embedding_bag, + 1, + input, + world_size, + weight, + local_shard, + pg, + gathered_inputs, + mode=mode, + gathered_per_sample_weights=gathered_per_sample_weights, + gathered_offsets=gathered_offsets, + padding_idx=padding_idx, + ) + return (output, local_shard) + + +def _handle_row_wise_sharding( + input, + world_size, + weight, + local_shard, + offsets, + per_sample_weights, + mode, + max_norm, + norm_type, + padding_idx, + rank, + pg, +): + """ + Entry-point function to handle the logic of row-wise sharding of weight + for embeddingBag. (Detailed explanations of the logic can be found in + the comment for sharded_embedding_bag.) + + Args: + input: list of ID used for lookup and aggregation. + world_size: number of ranks. + weight: sharded weight tensor. + local_shard: row-wise shared local weight used for lookup. + offsets: list of start positions of each bag for 1D input. + per_sample_weights: weights for weighted sum mode. + mode: aggregation method of each bag. + max_norm: If given, each embedding vector with norm larger + than max_norm is renormalized to have norm max_norm. + Note: this will modify weight in-place. + norm_type: The p in the p-norm to compute for the max_norm option. + padding_idx: If specified, the entries at padding_idx do + not contribute to the gradient; therefore, the embedding + vector at padding_idx is not updated during training, + i.e. it remains as a fixed “pad”. + Note that the embedding vector at padding_idx is + excluded from the reduction. + rank: # of cuda process. + pg: process group. + + Returns: + gathered_output: final result of lookup and aggregation. + """ + if input.dim() > 1 and per_sample_weights is None: + # allgather the inputs first for non Replicated Tensor. + gather_inp = _all_gather_base_input(input, pg) + else: + ( + gathered_inputs, + gathered_per_sample_weights, + gathered_offsets, + ) = _all_gather_embedding_bag_input(input, per_sample_weights, offsets, pg) + cat_dim = 0 if input.dim() != 1 else -1 + gather_inp = torch.cat(gathered_inputs, dim=cat_dim) + if per_sample_weights is not None: + per_sample_weights = torch.cat(gathered_per_sample_weights, dim=cat_dim) + offset_add = 0 if input.dim() > 1 else input.size(0) + if offsets is not None: + offsets_list = torch.cat( + [gathered_offsets[i] + (offset_add * i) for i in range(pg.size())], + dim=cat_dim, + ) + + # Mask the input according to sharding spec. + lookup_input, padding_local, padding_row = _handle_row_wise_mask( + gather_inp, padding_idx, weight, world_size, rank + ) + if mode == "max": + padding_row[:] = -float("Inf") + + # When input is a large tensor, the value of weight is changed. + # This is a walk-around for now. GH issue: #81717. + if max_norm is not None: + torch.nn.functional.embedding_bag( + torch.unique(lookup_input)[:-1], + local_shard, + offsets=torch.tensor([0], device=local_shard.device, dtype=torch.long), + mode=mode, + per_sample_weights=None, + max_norm=max_norm, + norm_type=norm_type, + padding_idx=padding_local, + ) + max_norm = None + result = torch.nn.functional.embedding_bag( + lookup_input, + torch.cat([local_shard, padding_row]), + offsets=offsets_list if offsets is not None else offsets, # type: ignore[possibly-undefined] + mode=mode if mode != "mean" else "sum", + per_sample_weights=per_sample_weights, + max_norm=max_norm, + norm_type=norm_type, + padding_idx=padding_local, + ) + + op = ReduceOp.SUM if mode != "max" else ReduceOp.MAX + # TODO: Make the result a PartialTensor and move the logic below there. + local_shards = result.chunk(pg.size()) + result = reduce_scatter( + torch.empty_like(local_shards[0]), + list(local_shards), + op=op, + group=pg, + ) + + # For Mean, we cannot do the division until very end because the sum of means + # not equal to the mean of sum. (Divisor is different) + if mode == "mean": + if input.dim() > 1: + padding_idx = padding_idx if padding_idx is not None else -1 + split_sizes = torch.sum( + torch.ne(input, padding_idx), dim=-1, dtype=local_shard.dtype + ) + else: + split_sizes = torch.cat( + ( + offsets[1 : offsets.size(0)] - offsets[0:-1], + (input.size(0) - offsets[-1]).unsqueeze(0), + ), + dim=-1, + ) + return torch.div(result, split_sizes.unsqueeze(1)) + + # Return the appropriate local result. + return result + + +def _all_gather_embedding_bag_input(input, per_sample_weights, offsets, pg): + """ + In case we need to gather input and all other parameters of embeddingBag + ops, we need to stack all input together to perform ``all_gather`` + collective communication just once. + + Note that since offsets does not share the same size as input and + is always smaller than input, we resize it during the communication. + + Args: + input: tensor to be applied op on. + per_sample_weights: weights for weighted sum mode. + offsets: when input is 1D. offsets determines the starting + index position of each bag (sequence) in input. + pg: process group. + + Returns: + gathered_inputs: list of input tensor gathered from each rank. + gathered_per_sample_weights: list of per_sample_weights from each rank. + gathered_offsets: list of offsets from each rank. + """ + input_to_gather = [input] + if per_sample_weights is not None: + input_to_gather.append(per_sample_weights) + if offsets is not None: + input_to_gather.append(offsets.clone().resize_(input.size())) + gathered_inputs = all_gather(torch.stack(input_to_gather), group=pg) + + gathered_per_sample_weights = None + if per_sample_weights is not None: + gathered_per_sample_weights = [t[1] for t in gathered_inputs] + gathered_offsets = None + if offsets is not None: + idx = 2 if per_sample_weights is not None else 1 + gathered_offsets = [ + t[idx].resize_(offsets.size()).to(offsets.dtype) for t in gathered_inputs + ] + gathered_inputs = [t[0].to(input.dtype) for t in gathered_inputs] + return gathered_inputs, gathered_per_sample_weights, gathered_offsets diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33ed70e97f026372563383f4e2cc63f4c81cb11f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3f79b597c471630fa8939a0eb6e049403a3cba7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/file_based_local_timer.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/file_based_local_timer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d6e8ebf23eff58df2cf0bce77dc852119df675a Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/file_based_local_timer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/local_timer.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/local_timer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7863e875babb773909dc07886ec2b37e2706c31d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/__pycache__/local_timer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/api.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/api.py new file mode 100644 index 0000000000000000000000000000000000000000..aa9735549282019d9ef8bd519531a117e0d1792a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/api.py @@ -0,0 +1,280 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import abc +import logging +import threading +import time +from contextlib import contextmanager +from inspect import getframeinfo, stack +from typing import Any, Dict, List, Optional, Set + +__all__ = ['TimerRequest', 'TimerClient', 'RequestQueue', 'TimerServer', 'configure', 'expires'] + +log = logging.getLogger(__name__) + +class TimerRequest: + """ + Data object representing a countdown timer acquisition and release + that is used between the ``TimerClient`` and ``TimerServer``. + A negative ``expiration_time`` should be interpreted as a "release" + request. + + .. note:: the type of ``worker_id`` is implementation specific. + It is whatever the TimerServer and TimerClient implementations + have on to uniquely identify a worker. + """ + + __slots__ = ["worker_id", "scope_id", "expiration_time"] + + def __init__(self, worker_id: Any, scope_id: str, expiration_time: float): + self.worker_id = worker_id + self.scope_id = scope_id + self.expiration_time = expiration_time + + def __eq__(self, other): + if isinstance(other, TimerRequest): + return ( + self.worker_id == other.worker_id + and self.scope_id == other.scope_id + and self.expiration_time == other.expiration_time + ) + return False + + +class TimerClient(abc.ABC): + """ + Client library to acquire and release countdown timers by communicating + with the TimerServer. + """ + + @abc.abstractmethod + def acquire(self, scope_id: str, expiration_time: float) -> None: + """ + Acquires a timer for the worker that holds this client object + given the scope_id and expiration_time. Typically registers + the timer with the TimerServer. + """ + pass + + @abc.abstractmethod + def release(self, scope_id: str): + """ + Releases the timer for the ``scope_id`` on the worker this + client represents. After this method is + called, the countdown timer on the scope is no longer in effect. + """ + pass + + +class RequestQueue(abc.ABC): + """ + Consumer queue holding timer acquisition/release requests + """ + + @abc.abstractmethod + def size(self) -> int: + """ + Returns the size of the queue at the time this method is called. + Note that by the time ``get`` is called the size of the queue + may have increased. The size of the queue should not decrease + until the ``get`` method is called. That is, the following assertion + should hold: + + size = q.size() + res = q.get(size, timeout=0) + assert size == len(res) + + -- or -- + + size = q.size() + res = q.get(size * 2, timeout=1) + assert size <= len(res) <= size * 2 + """ + pass + + @abc.abstractmethod + def get(self, size: int, timeout: float) -> List[TimerRequest]: + """ + Gets up to ``size`` number of timer requests in a blocking fashion + (no more than ``timeout`` seconds). + """ + pass + + +class TimerServer(abc.ABC): + """ + Entity that monitors active timers and expires them + in a timely fashion. This server is responsible for + reaping workers that have expired timers. + """ + + def __init__( + self, request_queue: RequestQueue, max_interval: float, daemon: bool = True + ): + """ + :param request_queue: Consumer ``RequestQueue`` + :param max_interval: max time (in seconds) to wait + for an item in the request_queue + :param daemon: whether to run the watchdog thread as a daemon + """ + super().__init__() + self._request_queue = request_queue + self._max_interval = max_interval + self._daemon = daemon + self._watchdog_thread: Optional[threading.Thread] = None + self._stop_signaled = False + + @abc.abstractmethod + def register_timers(self, timer_requests: List[TimerRequest]) -> None: + """ + Processes the incoming timer requests and registers them with the server. + The timer request can either be a acquire-timer or release-timer request. + Timer requests with a negative expiration_time should be interpreted + as a release-timer request. + """ + pass + + @abc.abstractmethod + def clear_timers(self, worker_ids: Set[Any]) -> None: + """ + Clears all timers for the given ``worker_ids``. + """ + pass + + @abc.abstractmethod + def get_expired_timers(self, deadline: float) -> Dict[str, List[TimerRequest]]: + """ + Returns all expired timers for each worker_id. An expired timer + is a timer for which the expiration_time is less than or equal to + the provided deadline. + """ + pass + + @abc.abstractmethod + def _reap_worker(self, worker_id: Any) -> bool: + """ + Reaps the given worker. Returns True if the worker has been + successfully reaped, False otherwise. If any uncaught exception + is thrown from this method, the worker is considered reaped + and all associated timers will be removed. + """ + + def _reap_worker_no_throw(self, worker_id: Any) -> bool: + """ + Wraps ``_reap_worker(worker_id)``, if an uncaught exception is + thrown, then it considers the worker as reaped. + """ + try: + return self._reap_worker(worker_id) + except Exception: + log.exception( + "Uncaught exception thrown from _reap_worker(), " + "check that the implementation correctly catches exceptions", + ) + return True + + def _watchdog_loop(self): + while not self._stop_signaled: + try: + self._run_watchdog() + except Exception: + log.exception("Error running watchdog") + + def _run_watchdog(self): + batch_size = max(1, self._request_queue.size()) + timer_requests = self._request_queue.get(batch_size, self._max_interval) + self.register_timers(timer_requests) + now = time.time() + reaped_worker_ids = set() + for worker_id, expired_timers in self.get_expired_timers(now).items(): + log.info( + "Reaping worker_id=[%s]." + " Expired timers: %s", + worker_id, self._get_scopes(expired_timers) + ) + if self._reap_worker_no_throw(worker_id): + log.info("Successfully reaped worker=[%s]", worker_id) + reaped_worker_ids.add(worker_id) + else: + log.error( + "Error reaping worker=[%s]. Will retry on next watchdog.", worker_id + ) + self.clear_timers(reaped_worker_ids) + + def _get_scopes(self, timer_requests): + return [r.scope_id for r in timer_requests] + + def start(self) -> None: + log.info( + "Starting %s..." + " max_interval=%s," + " daemon=%s", + type(self).__name__, self._max_interval, self._daemon + ) + self._watchdog_thread = threading.Thread( + target=self._watchdog_loop, daemon=self._daemon + ) + log.info("Starting watchdog thread...") + self._watchdog_thread.start() + + def stop(self) -> None: + log.info("Stopping %s", type(self).__name__) + self._stop_signaled = True + if self._watchdog_thread: + log.info("Stopping watchdog thread...") + self._watchdog_thread.join(self._max_interval) + self._watchdog_thread = None + else: + log.info("No watchdog thread running, doing nothing") + + +_timer_client: Optional[TimerClient] = None + + +def configure(timer_client: TimerClient): + """ + Configures a timer client. Must be called before using ``expires``. + """ + global _timer_client + _timer_client = timer_client + log.info("Timer client configured to: %s", type(_timer_client).__name__) + + +@contextmanager +def expires( + after: float, scope: Optional[str] = None, client: Optional[TimerClient] = None +): + """ + Acquires a countdown timer that expires in ``after`` seconds from now, + unless the code-block that it wraps is finished within the timeframe. + When the timer expires, this worker is eligible to be reaped. The + exact meaning of "reaped" depends on the client implementation. In + most cases, reaping means to terminate the worker process. + Note that the worker is NOT guaranteed to be reaped at exactly + ``time.now() + after``, but rather the worker is "eligible" for being + reaped and the ``TimerServer`` that the client talks to will ultimately + make the decision when and how to reap the workers with expired timers. + + Usage:: + + torch.distributed.elastic.timer.configure(LocalTimerClient()) + with expires(after=10): + torch.distributed.all_reduce(...) + """ + if client is None: + if _timer_client is None: + raise RuntimeError("Configure timer client before using countdown timers.") + client = _timer_client + if scope is None: + # grab the caller file + lineno + caller = getframeinfo(stack()[1][0]) + scope = f"{caller.filename}#{caller.lineno}" + expiration = time.time() + after + client.acquire(scope, expiration) + try: + yield + finally: + client.release(scope) diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/file_based_local_timer.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/file_based_local_timer.py new file mode 100644 index 0000000000000000000000000000000000000000..26ebce33dcb5b5062667cbb5fc9630b93eee4c79 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/file_based_local_timer.py @@ -0,0 +1,333 @@ +# Copyright (c) Meta Platforms, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import io +import json +import logging +import os +import select +import signal +import sys +import threading +import time +from typing import Callable, Dict, List, Optional, Set, Tuple + +from torch.distributed.elastic.timer.api import TimerClient, TimerRequest + +__all__ = ["FileTimerClient", "FileTimerRequest", "FileTimerServer"] + +log = logging.getLogger(__name__) + +class FileTimerRequest(TimerRequest): + """ + Data object representing a countdown timer acquisition and release + that is used between the ``FileTimerClient`` and ``FileTimerServer``. + A negative ``expiration_time`` should be interpreted as a "release" + request. + ``signal`` is the signal to reap the worker process from the server + process. + """ + + __slots__ = ["version", "worker_pid", "scope_id", "expiration_time", "signal"] + + def __init__(self, worker_pid: int, scope_id: str, expiration_time: float, signal: int = 0) -> None: + self.version = 1 + self.worker_pid = worker_pid + self.scope_id = scope_id + self.expiration_time = expiration_time + self.signal = signal + + def __eq__(self, other) -> bool: + if isinstance(other, FileTimerRequest): + return ( + self.version == other.version + and self.worker_pid == other.worker_pid + and self.scope_id == other.scope_id + and self.expiration_time == other.expiration_time + and self.signal == other.signal + ) + return False + + def to_json(self) -> str: + return json.dumps( + { + "version": self.version, + "pid": self.worker_pid, + "scope_id": self.scope_id, + "expiration_time": self.expiration_time, + "signal": self.signal + }, + ) + + +class FileTimerClient(TimerClient): + """ + Client side of ``FileTimerServer``. This client is meant to be used + on the same host that the ``FileTimerServer`` is running on and uses + pid to uniquely identify a worker. + This client uses a named_pipe to send timer requests to the + ``FileTimerServer``. This client is a producer while the + ``FileTimerServer`` is a consumer. Multiple clients can work with + the same ``FileTimerServer``. + + Args: + + file_path: str, the path of a FIFO special file. ``FileTimerServer`` + must have created it by calling os.mkfifo(). + + signal: signal, the signal to use to kill the process. Using a + negative or zero signal will not kill the process. + """ + def __init__(self, file_path: str, signal=(signal.SIGKILL if sys.platform != "win32" else + signal.CTRL_C_EVENT)) -> None: # type: ignore[attr-defined] + super().__init__() + self._file_path = file_path + self.signal = signal + + def _open_non_blocking(self) -> Optional[io.TextIOWrapper]: + try: + fd = os.open(self._file_path, os.O_WRONLY | os.O_NONBLOCK) + return os.fdopen(fd, "wt") + except Exception: + return None + + def _send_request(self, request: FileTimerRequest) -> None: + # The server may have crashed or may haven't started yet. + # In such case, calling open() in blocking model blocks the client. + # To avoid such issue, open it in non-blocking mode, and an OSError will + # be raised if the server is not there. + file = self._open_non_blocking() + if file is None: + raise BrokenPipeError("Could not send the FileTimerRequest because FileTimerServer is not available.") + with file: + json_request = request.to_json() + # Write request with no greater than select.PIPE_BUF is guarantee to be atomic. + if len(json_request) > select.PIPE_BUF: + raise RuntimeError( + f"FileTimerRequest larger than {select.PIPE_BUF} bytes " + f"is not supported: {json_request}" + ) + file.write(json_request + "\n") + + def acquire(self, scope_id: str, expiration_time: float) -> None: + self._send_request( + request=FileTimerRequest( + worker_pid=os.getpid(), + scope_id=scope_id, + expiration_time=expiration_time, + signal=self.signal + ), + ) + + def release(self, scope_id: str) -> None: + self._send_request( + request=FileTimerRequest( + worker_pid=os.getpid(), + scope_id=scope_id, + expiration_time=-1, + signal=0 + ), + ) + + +class FileTimerServer: + """ + Server that works with ``FileTimerClient``. Clients are expected to be + running on the same host as the process that is running this server. + Each host in the job is expected to start its own timer server locally + and each server instance manages timers for local workers (running on + processes on the same host). + + Args: + + file_path: str, the path of a FIFO special file to be created. + + max_interval: float, max interval in seconds for each watchdog loop. + + daemon: bool, running the watchdog thread in daemon mode or not. + A daemon thread will not block a process to stop. + log_event: Callable[[Dict[str, str]], None], an optional callback for + logging the events in JSON format. + """ + + def __init__( + self, + file_path: str, + max_interval: float = 10, + daemon: bool = True, + log_event: Optional[Callable[[str, Optional[FileTimerRequest]], None]] = None + ) -> None: + self._file_path = file_path + self._max_interval = max_interval + self._daemon = daemon + self._timers: Dict[Tuple[int, str], FileTimerRequest] = {} + self._stop_signaled = False + self._watchdog_thread: Optional[threading.Thread] = None + if os.path.exists(self._file_path): + os.remove(self._file_path) + os.mkfifo(self._file_path) + # For test only. Count the number of requests received. + self._request_count = 0 + # For test only. Process all requests and stop the server. + self._run_once = False + self._log_event = log_event if log_event is not None else lambda name, request: None + + + def start(self) -> None: + log.info( + "Starting %s..." + " max_interval=%s," + " daemon=%s", + type(self).__name__, self._max_interval, self._daemon + ) + self._watchdog_thread = threading.Thread(target=self._watchdog_loop, daemon=self._daemon) + log.info("Starting watchdog thread...") + self._watchdog_thread.start() + self._log_event("watchdog started", None) + + def stop(self) -> None: + log.info("Stopping %s", type(self).__name__) + self._stop_signaled = True + if self._watchdog_thread: + log.info("Stopping watchdog thread...") + self._watchdog_thread.join(self._max_interval) + self._watchdog_thread = None + else: + log.info("No watchdog thread running, doing nothing") + if os.path.exists(self._file_path): + os.remove(self._file_path) + self._log_event("watchdog stopped", None) + + def run_once(self) -> None: + self._run_once = True + if self._watchdog_thread: + log.info("Stopping watchdog thread...") + self._watchdog_thread.join() + self._watchdog_thread = None + else: + log.info("No watchdog thread running, doing nothing") + if os.path.exists(self._file_path): + os.remove(self._file_path) + + def _watchdog_loop(self) -> None: + # Open the pipe in blocking mode blocks the server thread. + # This is fine for the following reasons: + # 1. No client case usually does not happen. + # 2. We are running the watchdog loop in a separate daemon + # thread, which will not block the process to stop. + with open(self._file_path) as fd: + while not self._stop_signaled: + try: + run_once = self._run_once + self._run_watchdog(fd) + if run_once: + break + except Exception: + log.exception("Error running watchdog") + + def _run_watchdog(self, fd: io.TextIOWrapper) -> None: + timer_requests = self._get_requests(fd, self._max_interval) + self.register_timers(timer_requests) + now = time.time() + reaped_worker_pids = set() + for worker_pid, expired_timers in self.get_expired_timers(now).items(): + log.info("Reaping worker_pid=[%s]. Expired timers: %s", worker_pid, self._get_scopes(expired_timers)) + reaped_worker_pids.add(worker_pid) + # In case we have multiple expired timers, we find the first timer + # with a valid signal (>0) in the expiration time order. + expired_timers.sort(key=lambda timer: timer.expiration_time) + signal = 0 + expired_timer = None + for timer in expired_timers: + self._log_event("timer expired", timer) + if timer.signal > 0: + signal = timer.signal + expired_timer = timer + break + if signal <= 0: + log.info("No signal specified with worker=[%s]. Do not reap it.", worker_pid) + continue + if self._reap_worker(worker_pid, signal): + log.info("Successfully reaped worker=[%s] with signal=%s", worker_pid, signal) + self._log_event("kill worker process", expired_timer) + else: + log.error("Error reaping worker=[%s]. Will retry on next watchdog.", worker_pid) + self.clear_timers(reaped_worker_pids) + + def _get_scopes(self, timer_requests: List[FileTimerRequest]) -> List[str]: + return [r.scope_id for r in timer_requests] + + def _get_requests(self, fd: io.TextIOWrapper, max_interval: float) -> List[FileTimerRequest]: + start = time.time() + requests = [] + while not self._stop_signaled or self._run_once: + # For named pipe, readline() is blocking when at least one writer opens. + # It returns only when flush() is called at the writer side. + # Note that flush() is automatically called inside close(). + # After the last writer closes, readline() is not blocking. + # It will return an empty string when it's at end-of-file. + # Since the client side always opens the pipe, writes a message and closes + # the pipe immediately, the readline() call below is not blocking for long. + json_request = fd.readline() + if len(json_request) == 0: + if self._run_once: + break + time.sleep(min(max_interval, 1)) + else: + request = json.loads(json_request) + pid = request["pid"] + scope_id = request["scope_id"] + expiration_time = request["expiration_time"] + signal = request["signal"] + requests.append( + FileTimerRequest( + worker_pid=pid, scope_id=scope_id, expiration_time=expiration_time, signal=signal + ) + ) + now = time.time() + if now - start > max_interval: + break + return requests + + def register_timers(self, timer_requests: List[FileTimerRequest]) -> None: + for request in timer_requests: + pid = request.worker_pid + scope_id = request.scope_id + expiration_time = request.expiration_time + self._request_count += 1 + + key = (pid, scope_id) + # negative expiration is a proxy for a release call + if expiration_time < 0: + if key in self._timers: + del self._timers[key] + else: + self._timers[key] = request + + def clear_timers(self, worker_pids: Set[int]) -> None: + for (pid, scope_id) in list(self._timers.keys()): + if pid in worker_pids: + del self._timers[(pid, scope_id)] + + def get_expired_timers(self, deadline: float) -> Dict[int, List[FileTimerRequest]]: + # pid -> [timer_requests...] + expired_timers: Dict[int, List[FileTimerRequest]] = {} + for request in self._timers.values(): + if request.expiration_time <= deadline: + expired_scopes = expired_timers.setdefault(request.worker_pid, []) + expired_scopes.append(request) + return expired_timers + + def _reap_worker(self, worker_pid: int, signal: int) -> bool: + try: + os.kill(worker_pid, signal) + return True + except ProcessLookupError: + log.info("Process with pid=%s does not exist. Skipping", worker_pid) + return True + except Exception: + log.exception("Error terminating pid=%s", worker_pid) + return False diff --git a/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/local_timer.py b/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/local_timer.py new file mode 100644 index 0000000000000000000000000000000000000000..05f467c807a5bc61bb0a3c6853cd17243636e1cb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributed/elastic/timer/local_timer.py @@ -0,0 +1,125 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. +import logging +import multiprocessing as mp +import os +import signal +import time +from queue import Empty +from typing import Any, Dict, List, Set, Tuple + +from .api import RequestQueue, TimerClient, TimerRequest, TimerServer + +__all__ = ['LocalTimerClient', 'MultiprocessingRequestQueue', 'LocalTimerServer'] + +log = logging.getLogger(__name__) + +class LocalTimerClient(TimerClient): + """ + Client side of ``LocalTimerServer``. This client is meant to be used + on the same host that the ``LocalTimerServer`` is running on and uses + pid to uniquely identify a worker. This is particularly useful in situations + where one spawns a subprocess (trainer) per GPU on a host with multiple + GPU devices. + """ + + def __init__(self, mp_queue): + super().__init__() + self._mp_queue = mp_queue + + def acquire(self, scope_id, expiration_time): + pid = os.getpid() + acquire_request = TimerRequest(pid, scope_id, expiration_time) + self._mp_queue.put(acquire_request) + + def release(self, scope_id): + pid = os.getpid() + release_request = TimerRequest(pid, scope_id, -1) + self._mp_queue.put(release_request) + + +class MultiprocessingRequestQueue(RequestQueue): + """ + A ``RequestQueue`` backed by python ``multiprocessing.Queue`` + """ + + def __init__(self, mp_queue: mp.Queue): + super().__init__() + self._mp_queue = mp_queue + + def size(self) -> int: + return self._mp_queue.qsize() + + def get(self, size, timeout: float) -> List[TimerRequest]: + requests = [] + wait = timeout + for _ in range(0, size): + start = time.time() + + try: + r = self._mp_queue.get(block=True, timeout=wait) + except Empty: + break + + requests.append(r) + wait = wait - (time.time() - start) + if wait <= 0: + break + + return requests + + +class LocalTimerServer(TimerServer): + """ + Server that works with ``LocalTimerClient``. Clients are expected to be + subprocesses to the parent process that is running this server. Each host + in the job is expected to start its own timer server locally and each + server instance manages timers for local workers (running on processes + on the same host). + """ + + def __init__( + self, mp_queue: mp.Queue, max_interval: float = 60, daemon: bool = True + ): + super().__init__(MultiprocessingRequestQueue(mp_queue), max_interval, daemon) + self._timers: Dict[Tuple[Any, str], TimerRequest] = {} + + def register_timers(self, timer_requests: List[TimerRequest]) -> None: + for request in timer_requests: + pid = request.worker_id + scope_id = request.scope_id + expiration_time = request.expiration_time + + # negative expiration is a proxy for a release call + if expiration_time < 0: + self._timers.pop((pid, scope_id), None) + else: + self._timers[(pid, scope_id)] = request + + def clear_timers(self, worker_ids: Set[int]) -> None: + for (pid, scope_id) in list(self._timers.keys()): + if pid in worker_ids: + self._timers.pop((pid, scope_id)) + + def get_expired_timers(self, deadline: float) -> Dict[Any, List[TimerRequest]]: + # pid -> [timer_requests...] + expired_timers: Dict[Any, List[TimerRequest]] = {} + for request in self._timers.values(): + if request.expiration_time <= deadline: + expired_scopes = expired_timers.setdefault(request.worker_id, []) + expired_scopes.append(request) + return expired_timers + + def _reap_worker(self, worker_id: int) -> bool: + try: + os.kill(worker_id, signal.SIGKILL) + return True + except ProcessLookupError: + log.info("Process with pid=%s does not exist. Skipping", worker_id) + return True + except Exception: + log.exception("Error terminating pid=%s", worker_id) + return False diff --git a/venv/lib/python3.10/site-packages/torch/optim/__init__.py b/venv/lib/python3.10/site-packages/torch/optim/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..878842bfa496e3bfd88f85dfd816d2ca92b3a838 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/optim/__init__.py @@ -0,0 +1,39 @@ +""" +:mod:`torch.optim` is a package implementing various optimization algorithms. + +Most commonly used methods are already supported, and the interface is general +enough, so that more sophisticated ones can also be easily integrated in the +future. +""" + +from .adadelta import Adadelta +from .adagrad import Adagrad +from .adam import Adam +from .adamw import AdamW +from .sparse_adam import SparseAdam +from .adamax import Adamax +from .asgd import ASGD +from .sgd import SGD +from .radam import RAdam +from .rprop import Rprop +from .rmsprop import RMSprop +from .optimizer import Optimizer +from .nadam import NAdam +from .lbfgs import LBFGS +from . import lr_scheduler +from . import swa_utils + +del adadelta # noqa: F821 +del adagrad # noqa: F821 +del adam # noqa: F821 +del adamw # noqa: F821 +del sparse_adam # noqa: F821 +del adamax # noqa: F821 +del asgd # noqa: F821 +del sgd # noqa: F821 +del radam # noqa: F821 +del rprop # noqa: F821 +del rmsprop # noqa: F821 +del optimizer # noqa: F821 +del nadam # noqa: F821 +del lbfgs # noqa: F821 diff --git a/venv/lib/python3.10/site-packages/torch/optim/adagrad.py b/venv/lib/python3.10/site-packages/torch/optim/adagrad.py new file mode 100644 index 0000000000000000000000000000000000000000..ce17e2d7ad33168008b1d2629b0dbaad478c3d9b --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/optim/adagrad.py @@ -0,0 +1,384 @@ +import torch +from torch import Tensor + +from .optimizer import (Optimizer, _use_grad_for_differentiable, _get_value, _view_as_real, + _default_to_fused_or_foreach, _get_scalar_dtype, _differentiable_doc, + _foreach_doc, _maximize_doc) +from typing import List, Optional + +__all__ = ["Adagrad", "adagrad"] + + +class Adagrad(Optimizer): + def __init__( + self, + params, + lr=1e-2, + lr_decay=0, + weight_decay=0, + initial_accumulator_value=0, + eps=1e-10, + foreach: Optional[bool] = None, + *, + maximize: bool = False, + differentiable: bool = False, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= lr_decay: + raise ValueError(f"Invalid lr_decay value: {lr_decay}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + if not 0.0 <= initial_accumulator_value: + raise ValueError( + f"Invalid initial_accumulator_value value: {initial_accumulator_value}" + ) + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + + defaults = dict( + lr=lr, + lr_decay=lr_decay, + eps=eps, + weight_decay=weight_decay, + initial_accumulator_value=initial_accumulator_value, + foreach=foreach, + maximize=maximize, + differentiable=differentiable, + ) + super().__init__(params, defaults) + + for group in self.param_groups: + for p in group["params"]: + state = self.state[p] + state["step"] = torch.tensor(0.0, dtype=_get_scalar_dtype()) + init_value = ( + complex(initial_accumulator_value, initial_accumulator_value) + if torch.is_complex(p) + else initial_accumulator_value + ) + state["sum"] = torch.full_like( + p, init_value, memory_format=torch.preserve_format + ) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("foreach", None) + group.setdefault("maximize", False) + group.setdefault("differentiable", False) + + state_values = list(self.state.values()) + step_is_tensor = (len(state_values) != 0) and torch.is_tensor( + state_values[0]["step"] + ) + if not step_is_tensor: + for s in state_values: + s["step"] = torch.tensor(float(s["step"]), dtype=_get_scalar_dtype()) + + def share_memory(self): + for group in self.param_groups: + for p in group["params"]: + state = self.state[p] + state["sum"].share_memory_() + + def _init_group(self, group, params_with_grad, grads, state_sums, state_steps): + has_sparse_grad, has_complex = False, False + for p in group["params"]: + if p.grad is not None: + has_sparse_grad |= p.grad.is_sparse + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + grads.append(p.grad) + state = self.state[p] + state_sums.append(state["sum"]) + state_steps.append(state["step"]) + + return has_sparse_grad, has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + state_sums = [] + state_steps = [] + + has_sparse_grad, has_complex = self._init_group(group, params_with_grad, grads, state_sums, state_steps) + + adagrad( + params_with_grad, + grads, + state_sums, + state_steps, + lr=group["lr"], + weight_decay=group["weight_decay"], + lr_decay=group["lr_decay"], + eps=group["eps"], + has_sparse_grad=has_sparse_grad, + foreach=group["foreach"], + maximize=group["maximize"], + differentiable=group["differentiable"], + has_complex=has_complex, + ) + + return loss + + +Adagrad.__doc__ = r"""Implements Adagrad algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, \: f(\theta) + \text{ (objective)}, \: \lambda \text{ (weight decay)}, \\ + &\hspace{12mm} \tau \text{ (initial accumulator value)}, \: \eta\text{ (lr decay)}\\ + &\textbf{initialize} : state\_sum_0 \leftarrow 0 \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm} \tilde{\gamma} \leftarrow \gamma / (1 +(t-1) \eta) \\ + &\hspace{5mm} \textbf{if} \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm}state\_sum_t \leftarrow state\_sum_{t-1} + g^2_t \\ + &\hspace{5mm}\theta_t \leftarrow + \theta_{t-1}- \tilde{\gamma} \frac{g_t}{\sqrt{state\_sum_t}+\epsilon} \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `Adaptive Subgradient Methods for Online Learning + and Stochastic Optimization`_. + """ + fr""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-2) + lr_decay (float, optional): learning rate decay (default: 0) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-10) + {_foreach_doc} + {_maximize_doc} + {_differentiable_doc} + + .. _Adaptive Subgradient Methods for Online Learning and Stochastic + Optimization: http://jmlr.org/papers/v12/duchi11a.html + + """ + + +def adagrad( + params: List[Tensor], + grads: List[Tensor], + state_sums: List[Tensor], + state_steps: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting these as kwargs for now as functional API is compiled by torch/distributed/optim + has_sparse_grad: bool = None, + foreach: Optional[bool] = None, + differentiable: bool = False, + has_complex: bool = False, + *, + lr: float, + weight_decay: float, + lr_decay: float, + eps: float, + maximize: bool, +): + r"""Functional API that performs Adagrad algorithm computation. + + See :class:`~torch.optim.Adagrad` for details. + """ + if not all(isinstance(t, torch.Tensor) for t in state_steps): + raise RuntimeError( + "API has changed, `state_steps` argument must contain a list of singleton tensors" + ) + + if foreach is None: + _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False) + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + + if foreach and not torch.jit.is_scripting(): + func = _multi_tensor_adagrad + else: + func = _single_tensor_adagrad + + func( + params, + grads, + state_sums, + state_steps, + lr=lr, + weight_decay=weight_decay, + lr_decay=lr_decay, + eps=eps, + has_sparse_grad=has_sparse_grad, + maximize=maximize, + differentiable=differentiable, + has_complex=has_complex, + ) + + +def _make_sparse(grad, grad_indices, values): + size = grad.size() + if grad_indices.numel() == 0 or values.numel() == 0: + return torch.empty_like(grad) + return torch.sparse_coo_tensor(grad_indices, values, size) + + +def _single_tensor_adagrad( + params: List[Tensor], + grads: List[Tensor], + state_sums: List[Tensor], + state_steps: List[Tensor], + *, + lr: float, + weight_decay: float, + lr_decay: float, + eps: float, + has_sparse_grad: bool, + maximize: bool, + differentiable: bool, + has_complex: bool, +): + + for (param, grad, state_sum, step_t) in zip(params, grads, state_sums, state_steps): + # update step + step_t += 1 + step = _get_value(step_t) + grad = grad if not maximize else -grad + + if weight_decay != 0: + if grad.is_sparse: + raise RuntimeError( + "weight_decay option is not compatible with sparse gradients" + ) + grad = grad.add(param, alpha=weight_decay) + + clr = lr / (1 + (step - 1) * lr_decay) + + if grad.is_sparse: + grad = grad.coalesce() # the update is non-linear so indices must be unique + grad_indices = grad._indices() + grad_values = grad._values() + + state_sum.add_(_make_sparse(grad, grad_indices, grad_values.pow(2))) + std = state_sum.sparse_mask(grad) + std_values = std._values().sqrt_().add_(eps) + param.add_( + _make_sparse(grad, grad_indices, grad_values / std_values), alpha=-clr + ) + else: + is_complex = torch.is_complex(param) + if is_complex: + grad = torch.view_as_real(grad) + state_sum = torch.view_as_real(state_sum) + param = torch.view_as_real(param) + state_sum.addcmul_(grad, grad, value=1) + if differentiable: + std = state_sum.sqrt() + eps + else: + std = state_sum.sqrt().add_(eps) + param.addcdiv_(grad, std, value=-clr) + if is_complex: + param = torch.view_as_complex(param) + state_sum = torch.view_as_complex(state_sum) + + +def _multi_tensor_adagrad( + params: List[Tensor], + grads: List[Tensor], + state_sums: List[Tensor], + state_steps: List[Tensor], + *, + lr: float, + weight_decay: float, + lr_decay: float, + eps: float, + has_sparse_grad: bool, + maximize: bool, + differentiable: bool, + has_complex: bool, +): + + assert not differentiable, "_foreach ops don't support autograd" + + # Foreach functions will throw errors if given empty lists + if len(params) == 0: + return + + grouped_tensorlists = Optimizer._group_tensors_by_device_and_dtype([params, grads, state_sums, state_steps]) + for ((device_params, device_grads, device_state_sums, device_state_steps), _) in grouped_tensorlists.values(): + device_has_sparse_grad = has_sparse_grad and any(grad.is_sparse for grad in device_grads) + + if device_has_sparse_grad: + _single_tensor_adagrad( + device_params, + device_grads, + device_state_sums, + device_state_steps, + lr=lr, + weight_decay=weight_decay, + lr_decay=lr_decay, + eps=eps, + has_sparse_grad=True, + maximize=False, + differentiable=differentiable, + has_complex=has_complex, + ) + continue + + # Handle complex parameters + if has_complex: + _view_as_real(device_params, device_grads, device_state_sums) + + if maximize: + device_grads = torch._foreach_neg(device_grads) + + # Update steps + # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over + # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just + # wrapped it once now. The alpha is required to assure we go to the right overload. + if device_state_steps[0].is_cpu: + torch._foreach_add_(device_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0) + else: + torch._foreach_add_(device_state_steps, 1) + + if weight_decay != 0: + # Re-use the intermediate memory (device_grads) already allocated for maximize + if maximize: + torch._foreach_add_(device_grads, device_params, alpha=weight_decay) + else: + device_grads = torch._foreach_add(device_grads, device_params, alpha=weight_decay) + + minus_clr = [-lr / (1 + (_get_value(step) - 1) * lr_decay) for step in device_state_steps] + + torch._foreach_addcmul_(device_state_sums, device_grads, device_grads, value=1) + + std = torch._foreach_sqrt(device_state_sums) + torch._foreach_add_(std, eps) + + if weight_decay != 0 or maximize: + # Again, re-use the intermediate memory (device_grads) already allocated + torch._foreach_mul_(device_grads, minus_clr) + numerator = device_grads + else: + numerator = torch._foreach_mul(device_grads, minus_clr) + + torch._foreach_addcdiv_(device_params, numerator, std) diff --git a/venv/lib/python3.10/site-packages/torch/optim/adagrad.pyi b/venv/lib/python3.10/site-packages/torch/optim/adagrad.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4557ece1417f9bf3d3c56497355e9147cc4dedbd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/optim/adagrad.pyi @@ -0,0 +1,12 @@ +from .optimizer import Optimizer, ParamsT + +class Adagrad(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = ..., + lr_decay: float = ..., + weight_decay: float = ..., + initial_accumulator_value: float = ..., + eps: float = ..., + ) -> None: ... diff --git a/venv/lib/python3.10/site-packages/torch/optim/adam.py b/venv/lib/python3.10/site-packages/torch/optim/adam.py new file mode 100644 index 0000000000000000000000000000000000000000..386bbc2705a0f7d4eec5cb6106b9c02ab75435bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/optim/adam.py @@ -0,0 +1,660 @@ +from typing import List, Optional, Union, Tuple + +import torch +from torch import Tensor +from .optimizer import (Optimizer, ParamsT, _use_grad_for_differentiable, _get_value, + _stack_if_compiling, _dispatch_sqrt, _default_to_fused_or_foreach, + _get_scalar_dtype, _capturable_doc, _differentiable_doc, _foreach_doc, + _fused_doc, _maximize_doc, _view_as_real) +from torch.utils._foreach_utils import _get_fused_kernels_supported_devices + +__all__ = ['Adam', 'adam'] + + +class Adam(Optimizer): + def __init__(self, + params: ParamsT, + lr: Union[float, Tensor] = 1e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-8, + weight_decay: float = 0, + amsgrad: bool = False, + *, + foreach: Optional[bool] = None, + maximize: bool = False, + capturable: bool = False, + differentiable: bool = False, + fused: Optional[bool] = None): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if isinstance(lr, Tensor) and foreach and not capturable: + raise ValueError("lr as a Tensor is not supported for capturable=False and foreach=True") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, amsgrad=amsgrad, + maximize=maximize, foreach=foreach, capturable=capturable, + differentiable=differentiable, fused=fused) + super().__init__(params, defaults) + + if fused: + if differentiable: + raise RuntimeError("`fused` does not support `differentiable`") + self._step_supports_amp_scaling = True + # TODO(crcrpar): [low prec params & their higher prec copy] + # Support AMP with FP16/BF16 model params which would need + # higher prec copy of params to do update math in higher prec to + # alleviate the loss of information. + fused_supported_devices = _get_fused_kernels_supported_devices() + if not all( + p.device.type in fused_supported_devices and + torch.is_floating_point(p) for pg in self.param_groups for p in pg['params'] + ): + raise RuntimeError("`fused=True` requires all the params to be floating point Tensors of " + f"supported devices: {fused_supported_devices}.") + if foreach: + raise RuntimeError("`fused` and `foreach` cannot be `True` together.") + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + group.setdefault('maximize', False) + group.setdefault('foreach', None) + group.setdefault('capturable', False) + group.setdefault('differentiable', False) + fused = group.setdefault('fused', None) + for p in group["params"]: + p_state = self.state.get(p, []) + if len(p_state) != 0 and not torch.is_tensor(p_state['step']): + step_val = float(p_state["step"]) + p_state["step"] = (torch.tensor(step_val, dtype=_get_scalar_dtype(is_fused=fused), device=p.device) + if group['capturable'] or group['fused'] + else torch.tensor(step_val, dtype=_get_scalar_dtype())) + + def _init_group( + self, + group, + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps + ): + has_complex = False + for p in group['params']: + if p.grad is not None: + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') + grads.append(p.grad) + + state = self.state[p] + # Lazy state initialization + if len(state) == 0: + # note(crcrpar): [special device hosting for step] + # Deliberately host `step` on CPU if both capturable and fused are off. + # This is because kernel launches are costly on CUDA and XLA. + state['step'] = ( + torch.zeros((), dtype=_get_scalar_dtype(is_fused=group['fused']), device=p.device) + if group['capturable'] or group['fused'] + else torch.tensor(0.0, dtype=_get_scalar_dtype()) + ) + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) + if group['amsgrad']: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) + + exp_avgs.append(state['exp_avg']) + exp_avg_sqs.append(state['exp_avg_sq']) + + if group['amsgrad']: + max_exp_avg_sqs.append(state['max_exp_avg_sq']) + if group['differentiable'] and state['step'].requires_grad: + raise RuntimeError('`requires_grad` is not supported for `step` in differentiable mode') + + # Foreach without capturable does not support a tensor lr + if group['foreach'] and torch.is_tensor(group['lr']) and not group['capturable']: + raise RuntimeError('lr as a Tensor is not supported for capturable=False and foreach=True') + + state_steps.append(state['step']) + return has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + self._cuda_graph_capture_health_check() + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + max_exp_avg_sqs = [] + state_steps = [] + beta1, beta2 = group['betas'] + + has_complex = self._init_group( + group, + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps) + + adam( + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=group['amsgrad'], + has_complex=has_complex, + beta1=beta1, + beta2=beta2, + lr=group['lr'], + weight_decay=group['weight_decay'], + eps=group['eps'], + maximize=group['maximize'], + foreach=group['foreach'], + capturable=group['capturable'], + differentiable=group['differentiable'], + fused=group['fused'], + grad_scale=getattr(self, "grad_scale", None), + found_inf=getattr(self, "found_inf", None), + ) + + return loss + + +Adam.__doc__ = r"""Implements Adam algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{ (lr)}, \beta_1, \beta_2 + \text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)} \\ + &\hspace{13mm} \lambda \text{ (weight decay)}, \: \textit{amsgrad}, + \:\textit{maximize} \\ + &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, + v_0\leftarrow 0 \text{ (second moment)},\: \widehat{v_0}^{max}\leftarrow 0\\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + + &\hspace{5mm}\textbf{if} \: \textit{maximize}: \\ + &\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}\textbf{else} \\ + &\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}\textbf{if} \: \lambda \neq 0 \\ + &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ + &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ + &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ + &\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\ + &\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\ + &\hspace{5mm}\textbf{if} \: amsgrad \\ + &\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max}, + \widehat{v_t}) \\ + &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/ + \big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big) \\ + &\hspace{5mm}\textbf{else} \\ + &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/ + \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_. + """ + fr""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR + is not yet supported for all our implementations. Please use a float + LR if you are not also specifying fused=True or capturable=True. + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (bool, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + {_foreach_doc} + {_maximize_doc} + {_capturable_doc} + {_differentiable_doc} + {_fused_doc} + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + + """ + + +def adam(params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + foreach: Optional[bool] = None, + capturable: bool = False, + differentiable: bool = False, + fused: Optional[bool] = None, + grad_scale: Optional[Tensor] = None, + found_inf: Optional[Tensor] = None, + has_complex: bool = False, + *, + amsgrad: bool, + beta1: float, + beta2: float, + lr: Union[float, Tensor], + weight_decay: float, + eps: float, + maximize: bool): + r"""Functional API that performs Adam algorithm computation. + + See :class:`~torch.optim.Adam` for details. + """ + # Respect when the user inputs False/True for foreach or fused. We only want to change + # the default when neither have been user-specified. Note that we default to foreach + # and pass False to use_fused. This is not a mistake--we want to give the fused impl + # bake-in time before making it the default, even if it is typically faster. + if fused is None and foreach is None: + _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False) + # Do not flip on foreach for the unsupported case where lr is a Tensor and capturable=False. + if foreach and isinstance(lr, Tensor) and not capturable: + foreach = False + if fused is None: + fused = False + if foreach is None: + foreach = False + + # this check is slow during compilation, so we skip it + # if it's strictly needed we can add this check back in dynamo + if not torch._utils.is_compiling() and not all(isinstance(t, torch.Tensor) for t in state_steps): + raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors") + + if foreach and torch.jit.is_scripting(): + raise RuntimeError('torch.jit.script not supported with foreach optimizers') + if fused and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with fused optimizers") + + if fused and not torch.jit.is_scripting(): + func = _fused_adam + elif foreach and not torch.jit.is_scripting(): + func = _multi_tensor_adam + else: + func = _single_tensor_adam + + func(params, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=amsgrad, + has_complex=has_complex, + beta1=beta1, + beta2=beta2, + lr=lr, + weight_decay=weight_decay, + eps=eps, + maximize=maximize, + capturable=capturable, + differentiable=differentiable, + grad_scale=grad_scale, + found_inf=found_inf) + + +def _single_tensor_adam(params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + amsgrad: bool, + has_complex: bool, + beta1: float, + beta2: float, + lr: Union[float, Tensor], + weight_decay: float, + eps: float, + maximize: bool, + capturable: bool, + differentiable: bool): + + assert grad_scale is None and found_inf is None + + if torch.jit.is_scripting(): + # this assert is due to JIT being dumb and not realizing that the ops below + # have overloads to handle both float and Tensor lrs, so we just assert it's + # a float since most people using JIT are using floats + assert isinstance(lr, float) + + for i, param in enumerate(params): + grad = grads[i] if not maximize else -grads[i] + exp_avg = exp_avgs[i] + exp_avg_sq = exp_avg_sqs[i] + step_t = state_steps[i] + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + assert ( + (param.is_cuda and step_t.is_cuda) or (param.is_xla and step_t.is_xla) + ), "If capturable=True, params and state_steps must be CUDA or XLA tensors." + + # update step + step_t += 1 + + if weight_decay != 0: + grad = grad.add(param, alpha=weight_decay) + + if torch.is_complex(param): + grad = torch.view_as_real(grad) + exp_avg = torch.view_as_real(exp_avg) + exp_avg_sq = torch.view_as_real(exp_avg_sq) + if amsgrad: + max_exp_avg_sqs[i] = torch.view_as_real(max_exp_avg_sqs[i]) + param = torch.view_as_real(param) + + # Decay the first and second moment running average coefficient + exp_avg.lerp_(grad, 1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2) + + if capturable or differentiable: + step = step_t + + bias_correction1 = 1 - beta1 ** step + bias_correction2 = 1 - beta2 ** step + + step_size = lr / bias_correction1 + step_size_neg = step_size.neg() + + bias_correction2_sqrt = bias_correction2.sqrt() + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + if differentiable: + max_exp_avg_sq = max_exp_avg_sqs[i].clone() + else: + max_exp_avg_sq = max_exp_avg_sqs[i] + + max_exp_avg_sqs[i].copy_(torch.maximum(max_exp_avg_sq, exp_avg_sq)) + + # Uses the max. for normalizing running avg. of gradient + # Folds in (admittedly ugly) 1-elem step_size math here to avoid extra param-set-sized read+write + # (can't fold it into addcdiv_ below because addcdiv_ requires value is a Number, not a Tensor) + denom = (max_exp_avg_sqs[i].sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg) + else: + denom = (exp_avg_sq.sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg) + + param.addcdiv_(exp_avg, denom) + else: + step = _get_value(step_t) + + bias_correction1 = 1 - beta1 ** step + bias_correction2 = 1 - beta2 ** step + + step_size = lr / bias_correction1 + + bias_correction2_sqrt = _dispatch_sqrt(bias_correction2) + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i]) + + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_sqs[i].sqrt() / bias_correction2_sqrt).add_(eps) + else: + denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps) + + param.addcdiv_(exp_avg, denom, value=-step_size) + + # Lastly, switch back to complex view + if amsgrad and torch.is_complex(params[i]): + max_exp_avg_sqs[i] = torch.view_as_complex(max_exp_avg_sqs[i]) + + +def _multi_tensor_adam(params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + amsgrad: bool, + has_complex: bool, + beta1: float, + beta2: float, + lr: Union[float, Tensor], + weight_decay: float, + eps: float, + maximize: bool, + capturable: bool, + differentiable: bool): + if len(params) == 0: + return + + if isinstance(lr, Tensor) and not capturable: + raise RuntimeError("lr as a Tensor is not supported for capturable=False and foreach=True") + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + assert all(p.is_cuda and step.is_cuda for p, step in zip(params, state_steps)), \ + "If capturable=True, params and state_steps must be CUDA tensors." + + assert grad_scale is None and found_inf is None + + assert not differentiable, "_foreach ops don't support autograd" + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps]) + for (( + device_params, + device_grads, + device_exp_avgs, + device_exp_avg_sqs, + device_max_exp_avg_sqs, + device_state_steps, + ), _) in grouped_tensors.values(): + + # Handle complex parameters + if has_complex: + if amsgrad: + _view_as_real(device_params, device_grads, device_exp_avgs, device_exp_avg_sqs, device_max_exp_avg_sqs) + else: + _view_as_real(device_params, device_grads, device_exp_avgs, device_exp_avg_sqs) + + if maximize: + device_grads = torch._foreach_neg(device_grads) + + # Update steps + # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over + # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just + # wrapped it once now. The alpha is required to assure we go to the right overload. + if device_state_steps[0].is_cpu: + torch._foreach_add_(device_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0) + else: + torch._foreach_add_(device_state_steps, 1) + + if weight_decay != 0: + # Re-use the intermediate memory (device_grads) already allocated for maximize + if maximize: + torch._foreach_add_(device_grads, device_params, alpha=weight_decay) + else: + device_grads = torch._foreach_add(device_grads, device_params, alpha=weight_decay) + + # Decay the first and second moment running average coefficient + torch._foreach_lerp_(device_exp_avgs, device_grads, 1 - beta1) + + torch._foreach_mul_(device_exp_avg_sqs, beta2) + torch._foreach_addcmul_(device_exp_avg_sqs, device_grads, device_grads, 1 - beta2) + + # Delete the local intermediate since it won't be used anymore to save on peak memory + del device_grads + + if capturable: + bias_correction1 = torch._foreach_pow(beta1, device_state_steps) + bias_correction2 = torch._foreach_pow(beta2, device_state_steps) + # foreach_sub doesn't allow a scalar as the first arg + torch._foreach_sub_(bias_correction1, 1) + torch._foreach_sub_(bias_correction2, 1) + # we do not negate bias_correction1 as it'll need to be negated later anyway + torch._foreach_neg_(bias_correction2) + + # foreach_div doesn't allow a scalar as the first arg + torch._foreach_div_(bias_correction1, lr) + torch._foreach_reciprocal_(bias_correction1) + + torch._foreach_sqrt_(bias_correction2) + + # Re-assign for clarity as we maintain minimal intermediates: we'll have + # step_size = - lr / (1 - beta1 ^ t) where t = num_steps + # bias_correction2_sqrt = sqrt(1 - beta2 ^ t) + step_size = bias_correction1 + bias_correction2_sqrt = bias_correction2 + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch._foreach_maximum_(device_max_exp_avg_sqs, device_exp_avg_sqs) # type: ignore[assignment] + + # Set intermediate to the max. for normalizing running avg. of gradient when amsgrad + exp_avg_sq_sqrt = torch._foreach_sqrt(device_max_exp_avg_sqs) + else: + exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs) + + torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt) + torch._foreach_add_(exp_avg_sq_sqrt, eps) + torch._foreach_div_(exp_avg_sq_sqrt, step_size) + + # at this point, exp_avg_sq_sqrt = - (1 - beta^t) * [sqrt(exp_avg_sq / (1 - beta2^t)) + eps] / lr + torch._foreach_addcdiv_(device_params, device_exp_avgs, exp_avg_sq_sqrt) + else: + bias_correction1 = [1 - beta1 ** _get_value(step) for step in device_state_steps] + bias_correction2 = [1 - beta2 ** _get_value(step) for step in device_state_steps] + + step_size = _stack_if_compiling([(lr / bc) * -1 for bc in bias_correction1]) + + bias_correction2_sqrt = [_dispatch_sqrt(bc) for bc in bias_correction2] + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch._foreach_maximum_(device_max_exp_avg_sqs, device_exp_avg_sqs) + + # Use the max. for normalizing running avg. of gradient + exp_avg_sq_sqrt = torch._foreach_sqrt(device_max_exp_avg_sqs) + else: + exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs) + + torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt) + torch._foreach_add_(exp_avg_sq_sqrt, eps) + torch._foreach_addcdiv_(device_params, device_exp_avgs, exp_avg_sq_sqrt, step_size) + + +def _fused_adam( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + amsgrad: bool, + has_complex: bool, # Needed for consistency. + beta1: float, + beta2: float, + lr: Union[float, Tensor], + weight_decay: float, + eps: float, + maximize: bool, + capturable: bool, # Needed for consistency. + differentiable: bool, +) -> None: + if not params: + return + if differentiable: + raise RuntimeError("Adam with fused=True does not support differentiable=True") + + grad_scale_dict = {grad_scale.device: grad_scale} if grad_scale is not None else None + found_inf_dict = {found_inf.device: found_inf} if found_inf is not None else None + + # We only shuffle around the lr when it is a Tensor and on CUDA, otherwise, we prefer + # treating it as a scalar. + lr_dict = {lr.device: lr} if isinstance(lr, Tensor) and str(lr.device) != "cpu" else None + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps]) + for (device, _), ((device_params, + device_grads, + device_exp_avgs, + device_exp_avg_sqs, + device_max_exp_avg_sqs, + device_state_steps,), _) in grouped_tensors.items(): + device_grad_scale, device_found_inf = None, None + if grad_scale is not None: + if device not in grad_scale_dict: + grad_scale_dict[device] = grad_scale.to(device, non_blocking=True) + device_grad_scale = grad_scale_dict[device] + if found_inf is not None: + if found_inf not in found_inf_dict: + found_inf_dict[device] = found_inf.to(device, non_blocking=True) + device_found_inf = found_inf_dict[device] + if lr_dict is not None and device not in lr_dict: + lr_dict[device] = lr.to(device=device, non_blocking=True) + lr = lr_dict[device] + torch._foreach_add_(device_state_steps, 1) + torch._fused_adam_( + device_params, + device_grads, + device_exp_avgs, + device_exp_avg_sqs, + device_max_exp_avg_sqs, + device_state_steps, + amsgrad=amsgrad, + lr=lr, + beta1=beta1, + beta2=beta2, + weight_decay=weight_decay, + eps=eps, + maximize=maximize, + grad_scale=device_grad_scale, + found_inf=device_found_inf, + ) + if device_found_inf is not None: + torch._foreach_sub_(device_state_steps, [device_found_inf] * len(device_state_steps)) diff --git a/venv/lib/python3.10/site-packages/torch/optim/adamax.pyi b/venv/lib/python3.10/site-packages/torch/optim/adamax.pyi new file mode 100644 index 0000000000000000000000000000000000000000..d38cfaefe388cd703f4a816459f572216226bf4f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/optim/adamax.pyi @@ -0,0 +1,13 @@ +from typing import Tuple + +from .optimizer import Optimizer, ParamsT + +class Adamax(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = ..., + betas: Tuple[float, float] = ..., + eps: float = ..., + weight_decay: float = ..., + ) -> None: ... diff --git a/venv/lib/python3.10/site-packages/torch/optim/adamw.py b/venv/lib/python3.10/site-packages/torch/optim/adamw.py new file mode 100644 index 0000000000000000000000000000000000000000..f97e66e6f32dcc2e64a177eca2fc1e7b20139aa0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/optim/adamw.py @@ -0,0 +1,688 @@ +import torch +from torch import Tensor +from .optimizer import (Optimizer, _use_grad_for_differentiable, _get_value, _dispatch_sqrt, + _stack_if_compiling, _get_scalar_dtype, _capturable_doc, _differentiable_doc, + _foreach_doc, _fused_doc, _maximize_doc, _default_to_fused_or_foreach, + ParamsT, _view_as_real) +from typing import List, Optional, Tuple, Union +from torch.utils._foreach_utils import _get_fused_kernels_supported_devices + +__all__ = ["AdamW", "adamw"] + + +class AdamW(Optimizer): + def __init__( + self, + params: ParamsT, + lr: Union[float, Tensor] = 1e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-8, + weight_decay: float = 1e-2, + amsgrad: bool = False, + *, + maximize: bool = False, + foreach: Optional[bool] = None, + capturable: bool = False, + differentiable: bool = False, + fused: Optional[bool] = None, + ): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if isinstance(lr, Tensor) and foreach and not capturable: + raise ValueError("lr as a Tensor is not supported for capturable=False and foreach=True") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= weight_decay: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + amsgrad=amsgrad, + foreach=foreach, + maximize=maximize, + capturable=capturable, + differentiable=differentiable, + fused=fused, + ) + super().__init__(params, defaults) + + if fused: + if differentiable: + raise RuntimeError("`fused` does not support `differentiable`") + self._step_supports_amp_scaling = True + # TODO(crcrpar): [low prec params & their higher prec copy] + # Suppor AMP with FP16/BF16 model params which would need + # higher prec copy of params to do update math in higher prec to + # alleviate the loss of information. + fused_supported_devices = _get_fused_kernels_supported_devices() + if not all( + p.device.type in fused_supported_devices and + torch.is_floating_point(p) + for pg in self.param_groups for p in pg['params'] + ): + raise RuntimeError("`fused=True` requires all the params to be floating point Tensors of " + f"supported devices: {fused_supported_devices}.") + if foreach: + raise RuntimeError("`fused` and `foreach` cannot be `True` together.") + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("amsgrad", False) + group.setdefault("maximize", False) + group.setdefault("foreach", None) + group.setdefault("capturable", False) + group.setdefault("differentiable", False) + fused = group.setdefault("fused", None) + for p in group["params"]: + p_state = self.state.get(p, []) + if len(p_state) != 0 and not torch.is_tensor(p_state['step']): + step_val = float(p_state["step"]) + p_state["step"] = (torch.tensor(step_val, dtype=_get_scalar_dtype(is_fused=fused), device=p.device) + if group['capturable'] or group['fused'] + else torch.tensor(step_val, dtype=_get_scalar_dtype())) + + def _init_group( + self, + group, + params_with_grad, + grads, + amsgrad, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + ): + has_complex = False + for p in group["params"]: + if p.grad is None: + continue + has_complex |= torch.is_complex(p) + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError("AdamW does not support sparse gradients") + grads.append(p.grad) + + state = self.state[p] + + # State initialization + if len(state) == 0: + # note(crcrpar): Deliberately host `step` on CPU if both capturable and fused are off. + # This is because kernel launches are costly on CUDA and XLA. + state["step"] = ( + torch.zeros((), dtype=_get_scalar_dtype(is_fused=group["fused"]), device=p.device) + if group["capturable"] or group["fused"] + else torch.tensor(0.0, dtype=_get_scalar_dtype()) + ) + # Exponential moving average of gradient values + state["exp_avg"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + # Exponential moving average of squared gradient values + state["exp_avg_sq"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state["max_exp_avg_sq"] = torch.zeros_like( + p, memory_format=torch.preserve_format + ) + + exp_avgs.append(state["exp_avg"]) + exp_avg_sqs.append(state["exp_avg_sq"]) + + if group['amsgrad']: + max_exp_avg_sqs.append(state["max_exp_avg_sq"]) + if group['differentiable'] and state['step'].requires_grad: + raise RuntimeError('`requires_grad` is not supported for `step` in differentiable mode') + + # Foreach without capturable does not support a tensor lr + if group['foreach'] and isinstance(group['lr'], Tensor) and not group['capturable']: + raise RuntimeError('lr as a Tensor is not supported for capturable=False and foreach=True') + + state_steps.append(state["step"]) + return has_complex + + @_use_grad_for_differentiable + def step(self, closure=None): + """Perform a single optimization step. + + Args: + closure (Callable, optional): A closure that reevaluates the model + and returns the loss. + """ + self._cuda_graph_capture_health_check() + + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + max_exp_avg_sqs = [] + state_steps = [] + amsgrad = group["amsgrad"] + beta1, beta2 = group["betas"] + + has_complex = self._init_group( + group, + params_with_grad, + grads, + amsgrad, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + ) + + adamw( + params_with_grad, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=amsgrad, + beta1=beta1, + beta2=beta2, + lr=group["lr"], + weight_decay=group["weight_decay"], + eps=group["eps"], + maximize=group["maximize"], + foreach=group["foreach"], + capturable=group["capturable"], + differentiable=group["differentiable"], + fused=group["fused"], + grad_scale=getattr(self, "grad_scale", None), + found_inf=getattr(self, "found_inf", None), + has_complex=has_complex, + ) + + return loss + + +AdamW.__doc__ = r"""Implements AdamW algorithm. + + .. math:: + \begin{aligned} + &\rule{110mm}{0.4pt} \\ + &\textbf{input} : \gamma \text{(lr)}, \: \beta_1, \beta_2 + \text{(betas)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)}, + \: \epsilon \text{ (epsilon)} \\ + &\hspace{13mm} \lambda \text{(weight decay)}, \: \textit{amsgrad}, + \: \textit{maximize} \\ + &\textbf{initialize} : m_0 \leftarrow 0 \text{ (first moment)}, v_0 \leftarrow 0 + \text{ ( second moment)}, \: \widehat{v_0}^{max}\leftarrow 0 \\[-1.ex] + &\rule{110mm}{0.4pt} \\ + &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ + + &\hspace{5mm}\textbf{if} \: \textit{maximize}: \\ + &\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm}\textbf{else} \\ + &\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ + &\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\ + &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ + &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ + &\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\ + &\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\ + &\hspace{5mm}\textbf{if} \: amsgrad \\ + &\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max}, + \widehat{v_t}) \\ + &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/ + \big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big) \\ + &\hspace{5mm}\textbf{else} \\ + &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/ + \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\ + &\rule{110mm}{0.4pt} \\[-1.ex] + &\bf{return} \: \theta_t \\[-1.ex] + &\rule{110mm}{0.4pt} \\[-1.ex] + \end{aligned} + + For further details regarding the algorithm we refer to `Decoupled Weight Decay Regularization`_. + """ + fr""" + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, Tensor, optional): learning rate (default: 1e-3). A tensor LR + is not yet supported for all our implementations. Please use a float + LR if you are not also specifying fused=True or capturable=True. + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay coefficient (default: 1e-2) + amsgrad (bool, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + {_maximize_doc} + {_foreach_doc} + {_capturable_doc} + {_differentiable_doc} + {_fused_doc} + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + + """ + + +def adamw( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + foreach: Optional[bool] = None, + capturable: bool = False, + differentiable: bool = False, + fused: Optional[bool] = None, + grad_scale: Optional[Tensor] = None, + found_inf: Optional[Tensor] = None, + has_complex: bool = False, + *, + amsgrad: bool, + beta1: float, + beta2: float, + lr: Union[float, Tensor], + weight_decay: float, + eps: float, + maximize: bool, +): + r"""Functional API that performs AdamW algorithm computation. + + See :class:`~torch.optim.AdamW` for details. + """ + if not torch._utils.is_compiling() and not all(isinstance(t, torch.Tensor) for t in state_steps): + raise RuntimeError( + "API has changed, `state_steps` argument must contain a list of singleton tensors" + ) + + # Respect when the user inputs False/True for foreach or fused. We only want to change + # the default when neither have been user-specified. Note that we default to foreach + # and pass False to use_fused. This is not a mistake--we want to give the fused impl + # bake-in time before making it the default, even if it is typically faster. + if fused is None and foreach is None: + _, foreach = _default_to_fused_or_foreach(params, differentiable, use_fused=False) + # Do not flip on foreach for the unsupported case where lr is a Tensor and capturable=False. + if foreach and isinstance(lr, Tensor) and not capturable: + foreach = False + if fused is None: + fused = False + if foreach is None: + foreach = False + + if foreach and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with foreach optimizers") + if fused and torch.jit.is_scripting(): + raise RuntimeError("torch.jit.script not supported with fused optimizers") + + if fused and not torch.jit.is_scripting(): + func = _fused_adamw + elif foreach and not torch.jit.is_scripting(): + func = _multi_tensor_adamw + else: + func = _single_tensor_adamw + + func( + params, + grads, + exp_avgs, + exp_avg_sqs, + max_exp_avg_sqs, + state_steps, + amsgrad=amsgrad, + beta1=beta1, + beta2=beta2, + lr=lr, + weight_decay=weight_decay, + eps=eps, + maximize=maximize, + capturable=capturable, + differentiable=differentiable, + grad_scale=grad_scale, + found_inf=found_inf, + has_complex=has_complex, + ) + + +def _single_tensor_adamw( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + amsgrad: bool, + beta1: float, + beta2: float, + lr: Union[Tensor, float], + weight_decay: float, + eps: float, + maximize: bool, + capturable: bool, + differentiable: bool, + has_complex: bool, +): + + assert grad_scale is None and found_inf is None + + if torch.jit.is_scripting(): + # this assert is due to JIT being dumb and not realizing that the ops below + # have overloads to handle both float and Tensor lrs, so we just assert it's + # a float since most people using JIT are using floats + assert isinstance(lr, float) + + for i, param in enumerate(params): + grad = grads[i] if not maximize else -grads[i] + exp_avg = exp_avgs[i] + exp_avg_sq = exp_avg_sqs[i] + step_t = state_steps[i] + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + assert ( + (param.is_cuda and step_t.is_cuda) or (param.is_xla and step_t.is_xla) + ), "If capturable=True, params and state_steps must be CUDA or XLA tensors." + + if torch.is_complex(param): + grad = torch.view_as_real(grad) + exp_avg = torch.view_as_real(exp_avg) + exp_avg_sq = torch.view_as_real(exp_avg_sq) + if amsgrad: + max_exp_avg_sqs[i] = torch.view_as_real(max_exp_avg_sqs[i]) + param = torch.view_as_real(param) + + # update step + step_t += 1 + + # Perform stepweight decay + param.mul_(1 - lr * weight_decay) + + # Decay the first and second moment running average coefficient + exp_avg.lerp_(grad, 1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + + if capturable or differentiable: + step = step_t + + bias_correction1 = 1 - beta1 ** step + bias_correction2 = 1 - beta2 ** step + + step_size = lr / bias_correction1 + step_size_neg = step_size.neg() + + bias_correction2_sqrt = bias_correction2.sqrt() + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + if differentiable: + max_exp_avg_sq = max_exp_avg_sqs[i].clone() + else: + max_exp_avg_sq = max_exp_avg_sqs[i] + + max_exp_avg_sqs[i].copy_(torch.maximum(max_exp_avg_sq, exp_avg_sq)) + + # Uses the max. for normalizing running avg. of gradient + # Folds in (admittedly ugly) 1-elem step_size math here to avoid extra param-set-sized read+write + # (can't fold it into addcdiv_ below because addcdiv_ requires value is a Number, not a Tensor) + denom = ( + max_exp_avg_sqs[i].sqrt() / (bias_correction2_sqrt * step_size_neg) + ).add_(eps / step_size_neg) + else: + denom = ( + exp_avg_sq.sqrt() / (bias_correction2_sqrt * step_size_neg) + ).add_(eps / step_size_neg) + + param.addcdiv_(exp_avg, denom) + else: + step = _get_value(step_t) + + bias_correction1 = 1 - beta1 ** step + bias_correction2 = 1 - beta2 ** step + + step_size = lr / bias_correction1 + + bias_correction2_sqrt = _dispatch_sqrt(bias_correction2) + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i]) + + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_sqs[i].sqrt() / bias_correction2_sqrt).add_(eps) + else: + denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps) + + param.addcdiv_(exp_avg, denom, value=-step_size) + + # Lastly, switch back to complex view + if amsgrad and torch.is_complex(params[i]): + max_exp_avg_sqs[i] = torch.view_as_complex(max_exp_avg_sqs[i]) + + +def _multi_tensor_adamw( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + amsgrad: bool, + beta1: float, + beta2: float, + lr: Union[Tensor, float], + weight_decay: float, + eps: float, + maximize: bool, + capturable: bool, + differentiable: bool, + has_complex: bool, +): + if len(params) == 0: + return + + if isinstance(lr, Tensor) and not capturable: + raise RuntimeError("lr as a Tensor is not supported for capturable=False and foreach=True") + + # If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable] + if not torch._utils.is_compiling() and capturable: + assert all( + p.is_cuda and step.is_cuda for p, step in zip(params, state_steps) + ), "If capturable=True, params and state_steps must be CUDA tensors." + + assert not differentiable, "_foreach ops don't support autograd" + + assert grad_scale is None and found_inf is None + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype([ + params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps]) + for (( + device_params, + device_grads, + device_exp_avgs, + device_exp_avg_sqs, + device_max_exp_avg_sqs, + device_state_steps, + ), _) in grouped_tensors.values(): + if has_complex: + if amsgrad: + _view_as_real(device_params, device_grads, device_exp_avgs, device_exp_avg_sqs, device_max_exp_avg_sqs) + else: + _view_as_real(device_params, device_grads, device_exp_avgs, device_exp_avg_sqs) + + if maximize: + device_grads = torch._foreach_neg(device_grads) + + # Update steps + # If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over + # and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just + # wrapped it once now. The alpha is required to assure we go to the right overload. + if device_state_steps[0].is_cpu: + torch._foreach_add_(device_state_steps, torch.tensor(1.0, device='cpu'), alpha=1.0) + else: + torch._foreach_add_(device_state_steps, 1) + + # Perform stepweight decay + if weight_decay != 0: + torch._foreach_mul_(device_params, 1 - lr * weight_decay) + + # Decay the first and second moment running average coefficient + torch._foreach_lerp_(device_exp_avgs, device_grads, 1 - beta1) + + torch._foreach_mul_(device_exp_avg_sqs, beta2) + torch._foreach_addcmul_(device_exp_avg_sqs, device_grads, device_grads, 1 - beta2) + + # Delete the local intermediate since it won't be used anymore to save on peak memory + del device_grads + + if capturable: + bias_correction1 = torch._foreach_pow(beta1, device_state_steps) + bias_correction2 = torch._foreach_pow(beta2, device_state_steps) + # foreach_sub doesn't allow a scalar as the first arg + torch._foreach_sub_(bias_correction1, 1) + torch._foreach_sub_(bias_correction2, 1) + # we do not negate bias_correction1 as it'll need to be negated later anyway + torch._foreach_neg_(bias_correction2) + + # foreach_div doesn't allow a scalar as the first arg + torch._foreach_div_(bias_correction1, lr) + torch._foreach_reciprocal_(bias_correction1) + + torch._foreach_sqrt_(bias_correction2) + + # Re-assign for clarity as we maintain minimal intermediates: we'll have + # step_size = - lr / (1 - beta1 ^ t) where t = num_steps + # bias_correction2_sqrt = sqrt(1 - beta2 ^ t) + step_size = bias_correction1 + bias_correction2_sqrt = bias_correction2 + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch._foreach_maximum_(device_max_exp_avg_sqs, device_exp_avg_sqs) + + # Use the max. for normalizing running avg. of gradient + exp_avg_sq_sqrt = torch._foreach_sqrt(device_max_exp_avg_sqs) + else: + exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs) + + torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt) + torch._foreach_add_(exp_avg_sq_sqrt, eps) + torch._foreach_div_(exp_avg_sq_sqrt, step_size) + + # at this point, exp_avg_sq_sqrt = - (1 - beta^t) * [sqrt(exp_avg_sq / (1 - beta2^t)) + eps] / lr + torch._foreach_addcdiv_(device_params, device_exp_avgs, exp_avg_sq_sqrt) + else: + bias_correction1 = [1 - beta1 ** _get_value(step) for step in device_state_steps] + bias_correction2 = [1 - beta2 ** _get_value(step) for step in device_state_steps] + + step_size = _stack_if_compiling([(lr / bc) * -1 for bc in bias_correction1]) + + bias_correction2_sqrt = [_dispatch_sqrt(bc) for bc in bias_correction2] + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch._foreach_maximum_(device_max_exp_avg_sqs, device_exp_avg_sqs) + + # Use the max. for normalizing running avg. of gradient + exp_avg_sq_sqrt = torch._foreach_sqrt(device_max_exp_avg_sqs) + else: + exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs) + + torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt) + torch._foreach_add_(exp_avg_sq_sqrt, eps) + torch._foreach_addcdiv_(device_params, device_exp_avgs, exp_avg_sq_sqrt, step_size) + + +def _fused_adamw( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + max_exp_avg_sqs: List[Tensor], + state_steps: List[Tensor], + grad_scale: Optional[Tensor], + found_inf: Optional[Tensor], + *, + amsgrad: bool, + beta1: float, + beta2: float, + lr: Union[float, Tensor], + weight_decay: float, + eps: float, + maximize: bool, + capturable: bool, # Needed for consistency. + differentiable: bool, + has_complex: bool, +) -> None: + if not params: + return + if differentiable: + raise RuntimeError("Adam with fused=True does not support differentiable=True") + + grad_scale_dict = {grad_scale.device: grad_scale} if grad_scale is not None else None + found_inf_dict = {found_inf.device: found_inf} if found_inf is not None else None + + # We only shuffle around the lr when it is a Tensor and on CUDA, otherwise, we prefer + # treating it as a scalar. + lr_dict = {lr.device: lr} if isinstance(lr, Tensor) and str(lr.device) != "cpu" else None + + grouped_tensors = Optimizer._group_tensors_by_device_and_dtype( + [params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps]) + for (device, _), ((device_params, + device_grads, + device_exp_avgs, + device_exp_avg_sqs, + device_max_exp_avg_sqs, + device_state_steps,), _) in grouped_tensors.items(): + device_grad_scale, device_found_inf = None, None + if grad_scale is not None: + if device not in grad_scale_dict: + grad_scale_dict[device] = grad_scale.to(device, non_blocking=True) + device_grad_scale = grad_scale_dict[device] + if found_inf is not None: + if found_inf not in found_inf_dict: + found_inf_dict[device] = found_inf.to(device, non_blocking=True) + device_found_inf = found_inf_dict[device] + if lr_dict is not None and device not in lr_dict: + lr_dict[device] = lr.to(device=device, non_blocking=True) + lr = lr_dict[device] + torch._foreach_add_(device_state_steps, 1) + torch._fused_adamw_( + device_params, + device_grads, + device_exp_avgs, + device_exp_avg_sqs, + device_max_exp_avg_sqs, + device_state_steps, + amsgrad=amsgrad, + lr=lr, + beta1=beta1, + beta2=beta2, + weight_decay=weight_decay, + eps=eps, + maximize=maximize, + grad_scale=device_grad_scale, + found_inf=device_found_inf, + ) + if device_found_inf is not None: + torch._foreach_sub_(device_state_steps, [device_found_inf] * len(device_state_steps)) diff --git a/venv/lib/python3.10/site-packages/torch/optim/nadam.pyi b/venv/lib/python3.10/site-packages/torch/optim/nadam.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f62e188b3d72b1d9021e8f0f6619b8bde4349cb1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/optim/nadam.pyi @@ -0,0 +1,15 @@ +from typing import Tuple + +from .optimizer import Optimizer, ParamsT + +class NAdam(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = ..., + betas: Tuple[float, float] = ..., + eps: float = ..., + weight_decay: float = ..., + momentum_decay: float = ..., + decoupled_weight_decay: bool = ..., + ) -> None: ... diff --git a/venv/lib/python3.10/site-packages/torch/optim/optimizer.py b/venv/lib/python3.10/site-packages/torch/optim/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..cbbdf1c8f40f2dd54da892851579431fff94cbd0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/optim/optimizer.py @@ -0,0 +1,912 @@ +import math +import functools +import warnings +from collections import OrderedDict, defaultdict +from copy import deepcopy +from itertools import chain +from typing import ( + Any, + Callable, + DefaultDict, + Dict, + Hashable, + Iterable, + List, + Optional, + Set, + Tuple, + TypeVar, + Union, + cast, + overload, +) +from typing_extensions import ParamSpec, Self, TypeAlias + +import torch +import torch.utils.hooks as hooks +from torch.utils.hooks import RemovableHandle +from torch.utils._foreach_utils import ( + Indices, + TensorListList, + _get_foreach_kernels_supported_devices, + _get_fused_kernels_supported_devices, +) +from torch._utils import is_compiling +from torch.utils._foreach_utils import _group_tensors_by_device_and_dtype + +Args: TypeAlias = Tuple[Any, ...] +Kwargs: TypeAlias = Dict[str, Any] +StateDict: TypeAlias = Dict[str, Any] + +GlobalOptimizerPreHook: TypeAlias = Callable[["Optimizer", Args, Kwargs], Optional[Tuple[Args, Kwargs]]] +GlobalOptimizerPostHook: TypeAlias = Callable[["Optimizer", Args, Kwargs], None] + +__all__ = ['Optimizer', 'register_optimizer_step_pre_hook', 'register_optimizer_step_post_hook'] +_global_optimizer_pre_hooks: Dict[int, GlobalOptimizerPreHook] = OrderedDict() +_global_optimizer_post_hooks: Dict[int, GlobalOptimizerPostHook] = OrderedDict() +_foreach_supported_types = [torch.Tensor, torch.nn.parameter.Parameter] + +class _RequiredParameter: + """Singleton class representing a required parameter for an Optimizer.""" + def __repr__(self) -> str: + return "" + +required = _RequiredParameter() + + +def _use_grad_for_differentiable(func): + def _use_grad(self, *args, **kwargs): + import torch._dynamo + prev_grad = torch.is_grad_enabled() + try: + # Note on graph break below: + # we need to graph break to ensure that aot respects the no_grad annotation. + # This is important for perf because without this, functionalization will generate an epilogue + # which updates the mutated parameters of the optimizer which is *not* visible to inductor, as a result, + # inductor will allocate for every parameter in the model, which is horrible. + # With this, aot correctly sees that this is an inference graph, and functionalization will generate + # an epilogue which is appended to the graph, which *is* visible to inductor, as a result, inductor sees that + # step is in place and is able to avoid the extra allocation. + # In the future, we will either 1) continue to graph break on backward, so this graph break does not matter + # or 2) have a fully fused forward and backward graph, which will have no_grad by default, and we can remove this + # graph break to allow the fully fused fwd-bwd-optimizer graph to be compiled. + # see https://github.com/pytorch/pytorch/issues/104053 + torch.set_grad_enabled(self.defaults['differentiable']) + torch._dynamo.graph_break() + ret = func(self, *args, **kwargs) + finally: + torch._dynamo.graph_break() + torch.set_grad_enabled(prev_grad) + return ret + functools.update_wrapper(_use_grad, func) + return _use_grad + +def _get_value(x): + # item is significantly faster than a cpu tensor in eager mode + if not torch.jit.is_scripting() and is_compiling(): + return x + else: + return x.item() + +def _stack_if_compiling(x): + if not torch.jit.is_scripting() and is_compiling(): + return torch.stack(x) + else: + return x + +def _dispatch_sqrt(x: float): # float annotation is needed because of torchscript type inference + if not torch.jit.is_scripting() and isinstance(x, torch.Tensor): + return x.sqrt() + else: + return math.sqrt(x) + +# For any optimizer with a faster implementation, we attempt to default to the +# fastest + stablest whenever possible. For foreach, the requirements are to have +# native params all on CUDA. For fused, there's currently the additional requirement +# that the tensors' dtypes must be floating point. Neither alternative supports +# torch.jit.script nor differentiable, so we fall back to the single tensor +# implementation in those cases. +def _default_to_fused_or_foreach(params: List[torch.Tensor], + differentiable: bool, + use_fused: bool = False) -> Tuple[bool, bool]: + if torch.jit.is_scripting() or differentiable: + return False, False + + fused_supported_devices = _get_fused_kernels_supported_devices() + foreach_supported_devices = _get_foreach_kernels_supported_devices() + fused = use_fused and all( + p is None or (type(p) in _foreach_supported_types and + p.device.type in fused_supported_devices and + torch.is_floating_point(p)) for p in params + ) + foreach = not fused and all( + p is None or (type(p) in _foreach_supported_types and + p.device.type in foreach_supported_devices) for p in params + ) + return fused, foreach + +def _view_as_real(params, *state_and_grads): + for i, p in enumerate(params): + if torch.is_complex(p): + params[i] = torch.view_as_real(params[i]) + for s in state_and_grads: + s[i] = torch.view_as_real(s[i]) + +def _get_scalar_dtype(is_fused=None): + if is_fused: + return torch.float32 + return torch.float64 if torch.get_default_dtype() == torch.float64 else torch.float32 + +# Common doc strings among optimizers +_foreach_doc = r"""foreach (bool, optional): whether foreach implementation of optimizer + is used. If unspecified by the user (so foreach is None), we will try to use + foreach over the for-loop implementation on CUDA, since it is usually + significantly more performant. Note that the foreach implementation uses + ~ sizeof(params) more peak memory than the for-loop version due to the intermediates + being a tensorlist vs just one tensor. If memory is prohibitive, batch fewer + parameters through the optimizer at a time or switch this flag to False (default: None)""" + +_fused_doc = r"""fused (bool, optional): whether the fused implementation (CUDA only) is used. + Currently, `torch.float64`, `torch.float32`, `torch.float16`, and `torch.bfloat16` + are supported. (default: None) + + .. note:: The foreach and fused implementations are typically faster than the for-loop, + single-tensor implementation. Thus, if the user has not specified BOTH flags + (i.e., when foreach = fused = None), we will attempt defaulting to the foreach + implementation when the tensors are all on CUDA. For example, if the user specifies + True for fused but nothing for foreach, we will run the fused implementation. If + the user specifies False for foreach but nothing for fused (or False for fused but + nothing for foreach), we will run the for-loop implementation. If the user specifies + True for both foreach and fused, we will prioritize fused over foreach, as it is + typically faster. We attempt to use the fastest, so the hierarchy goes fused -> + foreach -> for-loop. HOWEVER, since the fused implementation is relatively new, + we want to give it sufficient bake-in time, so we default to foreach and NOT + fused when the user has not specified either flag.""" + +_capturable_doc = r"""capturable (bool, optional): whether this instance is safe to + capture in a CUDA graph. Passing True can impair ungraphed performance, + so if you don't intend to graph capture this instance, leave it False + (default: False)""" + +_differentiable_doc = r"""differentiable (bool, optional): whether autograd should + occur through the optimizer step in training. Otherwise, the step() + function runs in a torch.no_grad() context. Setting to True can impair + performance, so leave it False if you don't intend to run autograd + through this instance (default: False)""" + +_maximize_doc = r"""maximize (bool, optional): maximize the objective with respect to the + params, instead of minimizing (default: False)""" + + +def register_optimizer_step_pre_hook(hook: GlobalOptimizerPreHook) -> RemovableHandle: + r"""Register a pre hook common to all optimizers. The hook should have the following + signature:: + + hook(optimizer, args, kwargs) -> None or modified args and kwargs + + Args: + hook (Callable): A user defined hook which is registered on all optimizers. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(_global_optimizer_pre_hooks) + _global_optimizer_pre_hooks[handle.id] = hook + return handle + + +def register_optimizer_step_post_hook(hook: GlobalOptimizerPostHook) -> RemovableHandle: + r"""Register a post hook common to all optimizers. The hook should have the following + signature:: + + hook(optimizer, args, kwargs) -> None + + Args: + hook (Callable): A user defined hook which is registered on all optimizers. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(_global_optimizer_post_hooks) + _global_optimizer_post_hooks[handle.id] = hook + return handle + +ParamsT: TypeAlias = Union[Iterable[torch.Tensor], Iterable[Dict[str, Any]]] + +_P = ParamSpec("_P") +R = TypeVar("R") +T = TypeVar("T") + + +class Optimizer: + r"""Base class for all optimizers. + + .. warning:: + Parameters need to be specified as collections that have a deterministic + ordering that is consistent between runs. Examples of objects that don't + satisfy those properties are sets and iterators over values of dictionaries. + + Args: + params (iterable): an iterable of :class:`torch.Tensor` s or + :class:`dict` s. Specifies what Tensors should be optimized. + defaults: (dict): a dict containing default values of optimization + options (used when a parameter group doesn't specify them). + """ + + OptimizerPreHook: TypeAlias = Callable[[Self, Args, Kwargs], Optional[Tuple[Args, Kwargs]]] # type: ignore[misc] + OptimizerPostHook: TypeAlias = Callable[[Self, Args, Kwargs], None] # type: ignore[misc] + + _optimizer_step_pre_hooks: Dict[int, OptimizerPreHook] + _optimizer_step_post_hooks: Dict[int, OptimizerPostHook] + _optimizer_state_dict_pre_hooks: 'OrderedDict[int, Callable[["Optimizer"], None]]' + _optimizer_state_dict_post_hooks: 'OrderedDict[int, Callable[["Optimizer", StateDict], Optional[StateDict]]]' + _optimizer_load_state_dict_pre_hooks: 'OrderedDict[int, Callable[["Optimizer", StateDict], Optional[StateDict]]]' + _optimizer_load_state_dict_post_hooks: 'OrderedDict[int, Callable[["Optimizer"], None]]' + + def __init__(self, params: ParamsT, defaults: Dict[str, Any]) -> None: + torch._C._log_api_usage_once("python.optimizer") + self.defaults = defaults + self._optimizer_step_pre_hooks = OrderedDict() + self._optimizer_step_post_hooks = OrderedDict() + self._optimizer_state_dict_pre_hooks = OrderedDict() + self._optimizer_state_dict_post_hooks = OrderedDict() + self._optimizer_load_state_dict_pre_hooks = OrderedDict() + self._optimizer_load_state_dict_post_hooks = OrderedDict() + + self._patch_step_function() + + if isinstance(params, torch.Tensor): + if self.__class__.__name__ == 'SparseAdam': + warnings.warn(("Passing in a raw Tensor as ``params`` to SparseAdam " + "is deprecated. In the future, this will raise an error. " + "Please wrap your Tensor in an iterable instead."), + FutureWarning) + params = [params] + else: + raise TypeError("params argument given to the optimizer should be " + "an iterable of Tensors or dicts, but got " + + torch.typename(params)) + + self.state: DefaultDict[torch.Tensor, Any] = defaultdict(dict) + self.param_groups: List[Dict[str, Any]] = [] + + param_groups = list(params) + if len(param_groups) == 0: + raise ValueError("optimizer got an empty parameter list") + if not isinstance(param_groups[0], dict): + param_groups = [{'params': param_groups}] + + for param_group in param_groups: + self.add_param_group(cast(dict, param_group)) + + # Allows _cuda_graph_capture_health_check to rig a poor man's TORCH_WARN_ONCE in python, + # which I don't think exists + # https://github.com/pytorch/pytorch/issues/72948 + self._warned_capturable_if_run_uncaptured = True + + def __getstate__(self) -> Dict[str, Any]: + return { + 'defaults': self.defaults, + 'state': self.state, + 'param_groups': self.param_groups, + } + + def __setstate__(self, state: Dict[str, Any]) -> None: + self.__dict__.update(state) + if '_optimizer_step_pre_hooks' not in self.__dict__: + self._optimizer_step_pre_hooks = OrderedDict() + if '_optimizer_step_post_hooks' not in self.__dict__: + self._optimizer_step_post_hooks = OrderedDict() + if '_optimizer_state_dict_pre_hooks' not in self.__dict__: + self._optimizer_state_dict_pre_hooks = OrderedDict() + if '_optimizer_state_dict_post_hooks' not in self.__dict__: + self._optimizer_state_dict_post_hooks = OrderedDict() + if '_optimizer_load_state_dict_pre_hooks' not in self.__dict__: + self._optimizer_load_state_dict_pre_hooks = OrderedDict() + if '_optimizer_load_state_dict_post_hooks' not in self.__dict__: + self._optimizer_load_state_dict_post_hooks = OrderedDict() + self._patch_step_function() # To support multiprocessing pickle/unpickle + self.defaults.setdefault('differentiable', False) + + def __repr__(self) -> str: + format_string = self.__class__.__name__ + ' (' + for i, group in enumerate(self.param_groups): + format_string += '\n' + format_string += f'Parameter Group {i}\n' + for key in sorted(group.keys()): + if key != 'params': + format_string += f' {key}: {group[key]}\n' + format_string += ')' + return format_string + + # Currently needed by Adam and AdamW + def _cuda_graph_capture_health_check(self) -> None: + # Note [torch.compile x capturable] + # If we are compiling, we try to take the capturable path automatically by + # setting the flag to True during tracing. Due to this, we skip all the checks + # normally required for determining whether we can use CUDA graphs and + # shunt the responsibility to torch.inductor. This saves time during tracing + # since the checks are slow without sacrificing UX since inductor will warn + # later if CUDA graphs cannot be enabled, e.g., + # https://github.com/pytorch/pytorch/blob/d3ba8901d8640eb16f88b2bfef9df7fa383d4b47/torch/_inductor/compile_fx.py#L390. + # Thus, when compiling, inductor will determine if cudagraphs + # can be enabled based on whether there is input mutation or CPU tensors. + if not is_compiling() and torch.backends.cuda.is_built() and torch.cuda.is_available(): + capturing = torch.cuda.is_current_stream_capturing() + + if capturing and not all(group['capturable'] for group in self.param_groups): + raise RuntimeError("Attempting CUDA graph capture of step() for an instance of " + + self.__class__.__name__ + + " but param_groups' capturable is False.") + + if ( + (not getattr(self, "_warned_capturable_if_run_uncaptured", False)) + and all(group['capturable'] for group in self.param_groups) + and (not capturing) + ): + warnings.warn( + "This instance was constructed with capturable=True or some of all the param_groups came with capturable=True, " + "but step() is running without CUDA graph capture. If you never intend to graph-capture this " + "instance, capturable=True can impair performance, and you should set capturable=False." + ) + self._warned_capturable_if_run_uncaptured = True + + def _optimizer_step_code(self) -> None: + """Entry point for `torch.profile.profiler`. + + When python tracing is enabled the profiler will hook into this + function at the CPython level to inspect the optimizer's parameters and + param groups. It is called it after `step()` since many optimizers + lazily initialize state. + + This is a workaround due to lack of a proper step hook on the optimizer, + and will be removed if it exists. + """ + pass + + @staticmethod + def profile_hook_step(func: Callable[_P, R]) -> Callable[_P, R]: + + @functools.wraps(func) + def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> R: + self, *_ = args + self = cast(Optimizer, self) + profile_name = f"Optimizer.step#{self.__class__.__name__}.step" + with torch.autograd.profiler.record_function(profile_name): + # call optimizer step pre hooks + for pre_hook in chain(_global_optimizer_pre_hooks.values(), self._optimizer_step_pre_hooks.values()): + result = pre_hook(self, args, kwargs) + if result is not None: + if isinstance(result, tuple) and len(result) == 2: + args, kwargs = result # type: ignore[assignment] + else: + raise RuntimeError( + f"{func} must return None or a tuple of (new_args, new_kwargs), but got {result}." + ) + + out = func(*args, **kwargs) + self._optimizer_step_code() + + # call optimizer step post hooks + for post_hook in chain(self._optimizer_step_post_hooks.values(), _global_optimizer_post_hooks.values()): + post_hook(self, args, kwargs) + + return out + + return wrapper + + @staticmethod + def _group_tensors_by_device_and_dtype( + tensorlistlist: TensorListList, + with_indices: bool = False, + ) -> Union[ + Dict[Tuple[None, None], Tuple[TensorListList, Indices]], + Dict[Tuple[torch.device, torch.dtype], Tuple[TensorListList, Indices]], + ]: + """Groups a list of lists of tensors by device and dtype. + Skips this step if we are compiling since this will occur during inductor lowering.""" + if is_compiling(): + return {(None, None): (tensorlistlist, list(range(len(tensorlistlist[0]))))} + else: + return _group_tensors_by_device_and_dtype(tensorlistlist, with_indices) + + def _patch_step_function(self) -> None: + self._zero_grad_profile_name = f"Optimizer.zero_grad#{self.__class__.__name__}.zero_grad" + hooked = getattr(self.__class__.step, "hooked", None) + if not hooked: + self.__class__.step = self.profile_hook_step(self.__class__.step) # type: ignore[assignment] + self.__class__.step.hooked = True # type: ignore[attr-defined] + + def register_step_pre_hook(self, hook: OptimizerPreHook) -> RemovableHandle: + r"""Register an optimizer step pre hook which will be called before + optimizer step. It should have the following signature:: + + hook(optimizer, args, kwargs) -> None or modified args and kwargs + + The ``optimizer`` argument is the optimizer instance being used. If + args and kwargs are modified by the pre-hook, then the transformed + values are returned as a tuple containing the new_args and new_kwargs. + + Args: + hook (Callable): The user defined hook to be registered. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._optimizer_step_pre_hooks) + self._optimizer_step_pre_hooks[handle.id] = hook + return handle + + def register_step_post_hook(self, hook: OptimizerPostHook) -> RemovableHandle: + r"""Register an optimizer step post hook which will be called after optimizer step. + It should have the following signature:: + + hook(optimizer, args, kwargs) -> None + + The ``optimizer`` argument is the optimizer instance being used. + + Args: + hook (Callable): The user defined hook to be registered. + + Returns: + :class:`torch.utils.hooks.RemovableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._optimizer_step_post_hooks) + self._optimizer_step_post_hooks[handle.id] = hook + return handle + + + def register_state_dict_pre_hook( + self, hook: Callable[["Optimizer"], None], prepend: bool = False + ) -> RemovableHandle: + r"""Register a state dict pre-hook which will be called before + :meth:`~torch.optim.Optimizer.state_dict` is called. It should have the + following signature:: + + hook(optimizer) -> None + + The ``optimizer`` argument is the optimizer instance being used. + The hook will be called with argument ``self`` before calling ``state_dict`` on ``self``. + The registered hook can be used to perform pre-processing before the ``state_dict`` + call is made. + + Args: + hook (Callable): The user defined hook to be registered. + prepend (bool): If True, the provided pre ``hook`` will be fired before + all the already registered pre-hooks on ``state_dict``. Otherwise, + the provided ``hook`` will be fired after all the already registered + pre-hooks. (default: False) + + Returns: + :class:`torch.utils.hooks.RemoveableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._optimizer_state_dict_pre_hooks) + self._optimizer_state_dict_pre_hooks[handle.id] = hook + if prepend: + self._optimizer_state_dict_pre_hooks.move_to_end(handle.id, last=False) + return handle + + + def register_state_dict_post_hook( + self, + hook: Callable[["Optimizer", StateDict], Optional[StateDict]], + prepend: bool = False, + ) -> RemovableHandle: + r"""Register a state dict post-hook which will be called after + :meth:`~torch.optim.Optimizer.state_dict` is called. It should have the + following signature:: + + hook(optimizer, state_dict) -> state_dict or None + + The hook will be called with arguments ``self`` and ``state_dict`` after generating + a ``state_dict`` on ``self``. The hook may modify the state_dict inplace or optionally + return a new one. The registered hook can be used to perform post-processing + on the ``state_dict`` before it is returned. + + Args: + hook (Callable): The user defined hook to be registered. + prepend (bool): If True, the provided post ``hook`` will be fired before + all the already registered post-hooks on ``state_dict``. Otherwise, + the provided ``hook`` will be fired after all the already registered + post-hooks. (default: False) + + Returns: + :class:`torch.utils.hooks.RemoveableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._optimizer_state_dict_post_hooks) + self._optimizer_state_dict_post_hooks[handle.id] = hook + if prepend: + self._optimizer_state_dict_post_hooks.move_to_end(handle.id, last=False) + return handle + + + @torch._disable_dynamo + def state_dict(self) -> StateDict: + r"""Returns the state of the optimizer as a :class:`dict`. + + It contains two entries: + + * ``state``: a Dict holding current optimization state. Its content + differs between optimizer classes, but some common characteristics + hold. For example, state is saved per parameter, and the parameter + itself is NOT saved. ``state`` is a Dictionary mapping parameter ids + to a Dict with state corresponding to each parameter. + * ``param_groups``: a List containing all parameter groups where each + parameter group is a Dict. Each parameter group contains metadata + specific to the optimizer, such as learning rate and weight decay, + as well as a List of parameter IDs of the parameters in the group. + + NOTE: The parameter IDs may look like indices but they are just IDs + associating state with param_group. When loading from a state_dict, + the optimizer will zip the param_group ``params`` (int IDs) and the + optimizer ``param_groups`` (actual ``nn.Parameter`` s) in order to + match state WITHOUT additional verification. + + A returned state dict might look something like: + + .. code-block:: text + + { + 'state': { + 0: {'momentum_buffer': tensor(...), ...}, + 1: {'momentum_buffer': tensor(...), ...}, + 2: {'momentum_buffer': tensor(...), ...}, + 3: {'momentum_buffer': tensor(...), ...} + }, + 'param_groups': [ + { + 'lr': 0.01, + 'weight_decay': 0, + ... + 'params': [0] + }, + { + 'lr': 0.001, + 'weight_decay': 0.5, + ... + 'params': [1, 2, 3] + } + ] + } + + """ + + for pre_hook in self._optimizer_state_dict_pre_hooks.values(): + pre_hook(self) + + # Save order indices instead of Tensors + param_mappings: Dict[int, int] = {} + start_index = 0 + + def pack_group(group: Dict[str, Any]) -> Dict[str, Any]: + nonlocal start_index + packed = {k: v for k, v in group.items() if k != 'params'} + param_mappings.update({id(p): i for i, p in enumerate(group['params'], start_index) + if id(p) not in param_mappings}) + packed['params'] = [param_mappings[id(p)] for p in group['params']] + start_index += len(packed['params']) + return packed + param_groups = [pack_group(g) for g in self.param_groups] + # Remap state to use order indices as keys + packed_state = {(param_mappings[id(k)] if isinstance(k, torch.Tensor) else k): v + for k, v in self.state.items()} + + state_dict = { + 'state': packed_state, + 'param_groups': param_groups, + } + + for post_hook in self._optimizer_state_dict_post_hooks.values(): + hook_result = post_hook(self, state_dict) + if hook_result is not None: + state_dict = hook_result + return state_dict + + @staticmethod + def _process_value_according_to_param_policy( + param: torch.Tensor, + value: torch.Tensor, + param_id: int, + param_groups: List[Dict[Any, Any]], + key: Hashable = None, + ) -> torch.Tensor: + # Floating-point types are a bit special here. They are the only ones + # that are assumed to always match the type of params. + # Make sure state['step'] is not casted https://github.com/pytorch/pytorch/issues/74424 + # UNLESS fused or capturable, see note [special device hosting for step] + fused = False + capturable = False + assert param_groups is not None + for pg in param_groups: + if param_id in pg["params"]: + fused = pg["fused"] if "fused" in pg else False + capturable = pg["capturable"] if "capturable" in pg else False + break + if key == "step": + if capturable or fused: + return value.to(dtype=torch.float32, device=param.device) + else: + return value + else: + if param.is_floating_point(): + return value.to(dtype=param.dtype, device=param.device) + else: + return value.to(device=param.device) + + + def register_load_state_dict_pre_hook( + self, + hook: Callable[["Optimizer", StateDict], Optional[StateDict]], + prepend: bool = False, + ) -> RemovableHandle: + r"""Register a load_state_dict pre-hook which will be called before + :meth:`~torch.optim.Optimizer.load_state_dict` is called. It should have the + following signature:: + + hook(optimizer, state_dict) -> state_dict or None + + The ``optimizer`` argument is the optimizer instance being used and the + ``state_dict`` argument is a shallow copy of the ``state_dict`` the user + passed in to ``load_state_dict``. The hook may modify the state_dict inplace + or optionally return a new one. If a state_dict is returned, it will be used + to be loaded into the optimizer. + + The hook will be called with argument ``self`` and ``state_dict`` before + calling ``load_state_dict`` on ``self``. The registered hook can be used to + perform pre-processing before the ``load_state_dict`` call is made. + + Args: + hook (Callable): The user defined hook to be registered. + prepend (bool): If True, the provided pre ``hook`` will be fired before + all the already registered pre-hooks on ``load_state_dict``. Otherwise, + the provided ``hook`` will be fired after all the already registered + pre-hooks. (default: False) + + Returns: + :class:`torch.utils.hooks.RemoveableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._optimizer_load_state_dict_pre_hooks) + self._optimizer_load_state_dict_pre_hooks[handle.id] = hook + if prepend: + self._optimizer_load_state_dict_pre_hooks.move_to_end(handle.id, last=False) + return handle + + + def register_load_state_dict_post_hook( + self, hook: Callable[["Optimizer"], None], prepend: bool = False + ) -> RemovableHandle: + r"""Register a load_state_dict post-hook which will be called after + :meth:`~torch.optim.Optimizer.load_state_dict` is called. It should have the + following signature:: + + hook(optimizer) -> None + + The ``optimizer`` argument is the optimizer instance being used. + + The hook will be called with argument ``self`` after calling + ``load_state_dict`` on ``self``. The registered hook can be used to + perform post-processing after ``load_state_dict`` has loaded the + ``state_dict``. + + Args: + hook (Callable): The user defined hook to be registered. + prepend (bool): If True, the provided post ``hook`` will be fired before + all the already registered post-hooks on ``load_state_dict``. Otherwise, + the provided ``hook`` will be fired after all the already registered + post-hooks. (default: False) + + Returns: + :class:`torch.utils.hooks.RemoveableHandle`: + a handle that can be used to remove the added hook by calling + ``handle.remove()`` + """ + handle = hooks.RemovableHandle(self._optimizer_load_state_dict_post_hooks) + self._optimizer_load_state_dict_post_hooks[handle.id] = hook + if prepend: + self._optimizer_load_state_dict_post_hooks.move_to_end(handle.id, last=False) # type: ignore[attr-defined] + return handle + + + @torch._disable_dynamo + def load_state_dict(self, state_dict: StateDict) -> None: + r"""Loads the optimizer state. + + Args: + state_dict (dict): optimizer state. Should be an object returned + from a call to :meth:`state_dict`. + """ + # shallow copy, to be consistent with module API + state_dict = state_dict.copy() + + for pre_hook in self._optimizer_load_state_dict_pre_hooks.values(): + hook_result = pre_hook(self, state_dict) + if hook_result is not None: + state_dict = hook_result + + # Validate the state_dict + groups = self.param_groups + + # Deepcopy as we write into saved_groups later to update state + saved_groups = deepcopy(state_dict['param_groups']) + + if len(groups) != len(saved_groups): + raise ValueError("loaded state dict has a different number of " + "parameter groups") + param_lens = (len(g['params']) for g in groups) + saved_lens = (len(g['params']) for g in saved_groups) + if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)): + raise ValueError("loaded state dict contains a parameter group " + "that doesn't match the size of optimizer's group") + + # Update the state + id_map = dict(zip(chain.from_iterable(g['params'] for g in saved_groups), + chain.from_iterable(g['params'] for g in groups))) + + def _cast(param, value, param_id=None, param_groups=None, key=None): + r"""Make a deep copy of value, casting all tensors to device of param.""" + if isinstance(value, torch.Tensor): + return Optimizer._process_value_according_to_param_policy(param, value, param_id, param_groups, key) + elif isinstance(value, dict): + return {k: _cast(param, v, param_id=param_id, param_groups=param_groups, key=k) for k, v in value.items()} + elif isinstance(value, Iterable): + return type(value)(_cast(param, v, param_id=param_id, param_groups=param_groups) for v in value) # type: ignore[call-arg] + else: + return value + + # Copy state assigned to params (and cast tensors to appropriate types). + # State that is not assigned to params is copied as is (needed for + # backward compatibility). + state: DefaultDict[torch.Tensor, Dict[Any, Any]] = defaultdict(dict) + for k, v in state_dict['state'].items(): + if k in id_map: + param = id_map[k] + state[param] = _cast(param, v, param_id=k, param_groups=state_dict['param_groups']) + else: + state[k] = v + + # Update parameter groups, setting their 'params' value + def update_group(group: Dict[str, Any], new_group: Dict[str, Any]) -> Dict[str, Any]: + new_group['params'] = group['params'] + return new_group + param_groups = [ + update_group(g, ng) for g, ng in zip(groups, saved_groups)] + self.__setstate__({'state': state, 'param_groups': param_groups}) + + for post_hook in self._optimizer_load_state_dict_post_hooks.values(): + post_hook(self) + + + @torch._disable_dynamo + def zero_grad(self, set_to_none: bool = True) -> None: + r"""Resets the gradients of all optimized :class:`torch.Tensor` s. + + Args: + set_to_none (bool): instead of setting to zero, set the grads to None. + This will in general have lower memory footprint, and can modestly improve performance. + However, it changes certain behaviors. For example: + 1. When the user tries to access a gradient and perform manual ops on it, + a None attribute or a Tensor full of 0s will behave differently. + 2. If the user requests ``zero_grad(set_to_none=True)`` followed by a backward pass, ``.grad``\ s + are guaranteed to be None for params that did not receive a gradient. + 3. ``torch.optim`` optimizers have a different behavior if the gradient is 0 or None + (in one case it does the step with a gradient of 0 and in the other it skips + the step altogether). + """ + foreach = self.defaults.get('foreach', False) or self.defaults.get('fused', False) + + if not hasattr(self, "_zero_grad_profile_name"): + self._patch_step_function() + + per_device_and_dtype_grads: Optional[DefaultDict[torch.device, DefaultDict[torch.dtype, List[torch.Tensor]]]] + if foreach: + per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) + else: + per_device_and_dtype_grads = None + + with torch.autograd.profiler.record_function(self._zero_grad_profile_name): + for group in self.param_groups: + for p in group['params']: + if p.grad is not None: + if set_to_none: + p.grad = None + else: + if p.grad.grad_fn is not None: + p.grad.detach_() + else: + p.grad.requires_grad_(False) + if (not foreach or p.grad.is_sparse): + p.grad.zero_() + else: + assert per_device_and_dtype_grads is not None + per_device_and_dtype_grads[p.grad.device][p.grad.dtype].append(p.grad) + if foreach: + assert per_device_and_dtype_grads is not None + for per_dtype_grads in per_device_and_dtype_grads.values(): + for grads in per_dtype_grads.values(): + torch._foreach_zero_(grads) + + @overload + def step(self, closure: None = ...) -> None: + ... + + @overload + def step(self, closure: Callable[[], float]) -> float: + ... + + def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: + r"""Performs a single optimization step (parameter update). + + Args: + closure (Callable): A closure that reevaluates the model and + returns the loss. Optional for most optimizers. + + .. note:: + Unless otherwise specified, this function should not modify the + ``.grad`` field of the parameters. + """ + raise NotImplementedError + + @torch._disable_dynamo + def add_param_group(self, param_group: Dict[str, Any]) -> None: + r"""Add a param group to the :class:`Optimizer` s `param_groups`. + + This can be useful when fine tuning a pre-trained network as frozen layers can be made + trainable and added to the :class:`Optimizer` as training progresses. + + Args: + param_group (dict): Specifies what Tensors should be optimized along with group + specific optimization options. + """ + if not isinstance(param_group, dict): + raise TypeError(f"param_group must be a dict, but got {type(param_group)}") + + params = param_group['params'] + if isinstance(params, torch.Tensor): + param_group['params'] = [params] + elif isinstance(params, set): + raise TypeError('optimizer parameters need to be organized in ordered collections, but ' + 'the ordering of tensors in sets will change between runs. Please use a list instead.') + else: + param_group['params'] = list(params) + + for param in param_group['params']: + if not isinstance(param, torch.Tensor): + raise TypeError("optimizer can only optimize Tensors, " + "but one of the params is " + torch.typename(param)) + if not self.defaults.get('differentiable', None) and not (param.is_leaf or param.retains_grad): + raise ValueError("can't optimize a non-leaf Tensor") + + for name, default in self.defaults.items(): + if default is required and name not in param_group: + raise ValueError(f"parameter group didn't specify a value of required optimization parameter {name}") + else: + param_group.setdefault(name, default) + + params = param_group['params'] + if len(params) != len(set(params)): + warnings.warn("optimizer contains a parameter group with duplicate parameters; " + "in future, this will cause an error; " + "see github.com/pytorch/pytorch/issues/40967 for more information", stacklevel=3) + + param_set: Set[torch.Tensor] = set() + for group in self.param_groups: + param_set.update(set(group['params'])) + + if not param_set.isdisjoint(set(param_group['params'])): + raise ValueError("some parameters appear in more than one parameter group") + + self.param_groups.append(param_group) diff --git a/venv/lib/python3.10/site-packages/torch/optim/rprop.pyi b/venv/lib/python3.10/site-packages/torch/optim/rprop.pyi new file mode 100644 index 0000000000000000000000000000000000000000..fd0c6ba209161be0102b9b13a80e956d7d4b8f3e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/optim/rprop.pyi @@ -0,0 +1,12 @@ +from typing import Tuple + +from .optimizer import Optimizer, ParamsT + +class Rprop(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = ..., + etas: Tuple[float, float] = ..., + step_sizes: Tuple[float, float] = ..., + ) -> None: ... diff --git a/venv/lib/python3.10/site-packages/torch/optim/sgd.pyi b/venv/lib/python3.10/site-packages/torch/optim/sgd.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ba1bcd60a1b89c4e02ce8851766d2f21e4b7050e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/optim/sgd.pyi @@ -0,0 +1,12 @@ +from .optimizer import Optimizer, ParamsT + +class SGD(Optimizer): + def __init__( + self, + params: ParamsT, + lr: float = ..., + momentum: float = ..., + dampening: float = ..., + weight_decay: float = ..., + nesterov: bool = ..., + ) -> None: ...