python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import collections
import copy
import enum
import inspect
import io
import logging
from itertools import chain
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Set,
Type,
Union,
)
import torch
import torch.distributed as dist
from torch.distributed.algorithms.join import Join, Joinable, JoinHook
from torch.distributed.optim.utils import functional_optim_map
from torch.optim import Optimizer
logger = logging.getLogger(__name__)
__all__ = ["ZeroRedundancyOptimizer"]
# Credits: classy_vision/generic/distributed_util.py
def _recursive_copy_to_device(
value: Any,
non_blocking: bool,
device: torch.device,
) -> Any:
r"""
Recursively searches lists, tuples, dicts and copies tensors to device if
possible. Non-tensor values are passed as-is in the result.
.. note: These are all copies, so if there are two objects that reference
the same object, then after this call, there will be two different objects
referenced on the device.
"""
if isinstance(value, torch.Tensor):
return value.to(device, non_blocking=non_blocking)
if isinstance(value, (list, tuple)):
values = [_recursive_copy_to_device(val, non_blocking=non_blocking, device=device) for val in value]
return values if isinstance(value, list) else tuple(values)
if isinstance(value, collections.abc.Mapping):
return {
key: _recursive_copy_to_device(val, non_blocking=non_blocking, device=device) for key, val in value.items()
}
return value
def _is_trainable(param: torch.Tensor) -> bool:
r"""
Returns if a parameter is trainable, where trainability is equivalent to
requiring a gradient.
"""
return param.requires_grad
def _broadcast_object(
obj: Any, src_rank: int,
group: object = dist.group.WORLD,
device: torch.device = torch.device("cpu")
) -> Any:
r"""
Broadcasts an object to the given group, sending the object if called from
the source rank and receiving the object otherwise.
Arguments:
obj: object to broadcast; only used if called on the source rank.
src_rank (int): source rank.
group (``ProcessGroup``, optional): group used for the broadcast
(default: ``dist.group.WORLD``).
device (``torch.device``, optional): device to send from or receive
to (default: ``torch.device("cpu")``).
Returns:
The broadcasted object.
"""
if dist.get_rank() == src_rank:
# Send the object
buffer = io.BytesIO()
torch.save(obj, buffer)
data = bytearray(buffer.getbuffer())
length_tensor = torch.LongTensor([len(data)]).to(device)
data_send_tensor = torch.ByteTensor(data).to(device)
dist.broadcast(length_tensor, src=src_rank, group=group, async_op=False)
dist.broadcast(data_send_tensor, src=src_rank, group=group, async_op=False)
else:
# Receive the object
length_tensor = torch.LongTensor([0]).to(device)
dist.broadcast(length_tensor, src=src_rank, group=group, async_op=False)
data_recv_tensor = torch.empty([int(length_tensor.item())], dtype=torch.uint8, device=device)
dist.broadcast(data_recv_tensor, src=src_rank, group=group, async_op=False)
buffer = io.BytesIO(data_recv_tensor.cpu().numpy())
obj = torch.load(buffer, map_location=device)
return obj
def _get_global_rank(group: Any, rank: int) -> int:
r"""
Returns the global rank for the given group and rank.
"""
return (rank if group is dist.group.WORLD
else dist.distributed_c10d._get_global_rank(group, rank))
class _ZeROJoinHook(JoinHook):
def __init__(self, zero):
assert isinstance(zero, ZeroRedundancyOptimizer), \
"ZeRO join hook requires passing in a ZeroRedundancyOptimizer " \
"instance as the state"
self.zero = zero
super().__init__()
def main_hook(self):
"""
Performs an optimizer step, which updates the joined process's shard of
the parameters and broadcasts those parameters.
"""
self.zero.step()
class _DDPBucketAssignment():
r"""
This represents a :class:`DistributedDataParallel` bucket assignment,
meaning a (possibly non-strict) subset of the parameters corresponding to
a DDP bucket assigned to a rank to update.
Attributes:
bucket_index (int): index of the bucket determined by the DDP gradient
bucket all-reduce order.
parameters (List[torch.Tensor]): model parameters in the bucket
assigned to this rank.
offset (int): offset into the :class:`GradBucket` 's :meth:`parameters`
giving the index of the first element in the passed-in
``parameters``; this equivalently indexes into the
:class:`GradBucket` 's :meth:`gradients`.
device (torch.device): device on which the parameters are stored.
tensor (torch.Tensor): flattened tensor giving the data of the
parameter subset assigned to the rank.
"""
def __init__(
self,
bucket_index: int,
parameters: List[torch.Tensor],
offset: int,
):
self.bucket_index = bucket_index
self.parameters = parameters
self.offset = offset
if len(self.parameters) == 0:
raise ValueError("Empty bucket assignment")
# DDP guarantees all parameters in the bucket have the same device
self.device: torch.device = self.parameters[0].device
self.tensor: Optional[torch.Tensor] = None
class _OverlapStatus(enum.IntEnum):
r"""
This defines the three possible statuses that
:class:`ZeroRedundancyOptimizer` can be in when overlapping with
:class:`DistributedDataParallel`.
``UNINITIALIZED``: The ZeRO instance is effectively uninitialized and
is waiting for DDP to finalize its bucketing.
``DDP_HAS_REBUILT_BUCKETS``: DDP has rebuilt its buckets, meaning that
its bucketing is finalized. The ZeRO instance can now collect the
necessary information about the DDP bucketing.
``INITIALIZED``: The ZeRO instance is fully initialized and can now
optimize parameters.
"""
UNINITIALIZED = 0
DDP_HAS_REBUILT_BUCKETS = 1
INITIALIZED = 2
class _OverlapInfo():
r"""
This contains the information needed by :class:`ZeroRedundancyOptimizer`
to overlap with :class:`DistributedDataParallel`.
Arguments:
world_size (int): world size of the process group being used.
Attributes:
shard_buckets (bool): if ``True``, then the assignment of each
:class:`DistributedDataParallel` bucket is partitioned across
possibly multiple :class:`ZeroRedundancyOptimizer` instances (i.e.
across possibly multiple ranks) to approximate uniformity following
a threshold given by the total parameter size divided by the world
size; if ``False``, then each bucket is wholly assigned to a single
:class:`ZeroRedundancyOptimizer` instance (i.e. to a single rank);
this should be set to the value passed into the hook constructor.
status (_OverlapStatus): current status; see :class:`_OverlapStatus`
for more information.
params_per_bucket (List[List[torch.Tensor]]): ``params_per_bucket[i]``
gives the model parameters in the ``i``th bucket.
params_per_rank (List[List[torch.Tensor]]): ``params_per_rank[i]``
gives the model parameters assigned to the ``i``th rank, where the
parameters are grouped by increasing bucket indices.
offsets (Dict[int, int]): maps from bucket index to the offset in
``self.params_per_rank[rank]`` giving the index of the first
parameter in that bucket, where ``rank`` is this process's own
rank; the keys of this :class:`dict` are the bucket indices
assigned to this rank.
num_bucket_assignments (int): total number of bucket assignments across
all ranks; this is equal to the number of
:class:`DistributedDataParallel` gradient buckets if
``shard_buckets=False`` and possibly greater otherwise.
total_size (int, optional): total size of all buckets (i.e. sum of
``param.numel()`` for all ``param`` across all buckets) if
``shard_buckets=True``; otherwise, ``None``.
broadcast_handles (List[Work]): :class:`list` of async work handles for
the parameter broadcasts.
bucket_index_to_future (Dict[int, torch.futures.Future]):
:class:`dict` mapping bucket index to the corresponding all-reduce
future.
bucket_index_to_bucket (Dict[int, dist.GradBucket]): :class:`dict`
mapping bucket index to the corresponding bucket.
bucket_indices_seen (List[int]): :class:`list` of the bucket indices
seen on this iteration.
"""
def __init__(self, world_size) -> None:
self.status: _OverlapStatus = _OverlapStatus.UNINITIALIZED
self.shard_buckets: bool = False
# Modified per bucket reconstruction
self.params_per_bucket: List[List[torch.Tensor]] = []
self.params_per_rank: List[List[torch.Tensor]] = \
[[] for _ in range(world_size)]
self.offsets: Dict[int, int] = {}
self.assigned_ranks_per_bucket: List[Set[int]] = []
self.num_bucket_assignments: int = 0
self.total_size: Optional[int] = None
# Modified per iteration
self.broadcast_handles: List[Any] = []
self.bucket_indices_seen: List[int] = []
# Used by `hook_with_zero_step()`
self.bucket_index_to_future: Dict[int, torch.futures.Future] = {}
self.bucket_index_to_bucket: Dict[int, dist.GradBucket] = {}
def wait_for_broadcasts(self) -> None:
r"""
Waits for all parameter broadcasts. This should be called once all
broadcasts have been scheduled, meaning ``self.broadcast_handles`` is
filled. This clears ``self.broadcast_handles`` in preparation for the
next iteration.
"""
assert len(self.broadcast_handles) == self.num_bucket_assignments, \
f"Missing at least one broadcast handle on rank {dist.get_rank()}"
_ = list(map(lambda x: x.wait(), self.broadcast_handles))
self.broadcast_handles.clear()
def clear_per_iter_info(self) -> None:
r"""
Clears the data structures that are modified per-iteration. This should
be called at the end of an iteration.
"""
self.bucket_indices_seen.clear()
self.bucket_index_to_future.clear()
self.bucket_index_to_bucket.clear()
class ZeroRedundancyOptimizer(Optimizer, Joinable):
r"""
This class wraps an arbitrary :class:`optim.Optimizer
<torch.optim.Optimizer>` and shards its states across ranks in the group as
described by ZeRO_. The local optimizer instance in each rank is only
responsible for updating approximately ``1 / world_size`` parameters and
hence only needs to keep ``1 / world_size`` optimizer states. After
parameters are updated locally, each rank will broadcast its parameters to
all other peers to keep all model replicas in the same state.
``ZeroRedundancyOptimizer`` can be used in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel` to reduce per-rank peak
memory consumption.
``ZeroRedundancyOptimizer`` uses a sorted-greedy algorithm to pack a number
of parameters at each rank. Each parameter belongs to a single rank and is
not divided among ranks. The partition is arbitrary and might not match the
the parameter registration or usage order.
Arguments:
params (``Iterable``): an ``Iterable`` of :class:`torch.Tensor` s
or :class:`dict` s giving all parameters, which will be sharded
across ranks.
Keyword Args:
optimizer_class (:class:`torch.nn.Optimizer`): the class of the local
optimizer.
process_group (``ProcessGroup``, optional): ``torch.distributed``
``ProcessGroup`` (default: ``dist.group.WORLD`` initialized by
:meth:`torch.distributed.init_process_group`).
parameters_as_bucket_view (bool, optional): if ``True``, parameters are
packed into buckets to speed up communication, and ``param.data``
fields point to bucket views at different offsets; if ``False``,
each individual parameter is communicated separately, and each
``params.data`` stays intact (default: ``False``).
overlap_with_ddp (bool, optional): if ``True``, :meth:`step` is
overlapped with :class:`DistributedDataParallel` 's gradient
synchronization; this requires (1) either a functional optimizer
for the ``optimizer_class`` argument or one with a functional
equivalent and (2) registering a DDP communication hook
constructed from one of the functions in ``ddp_zero_hook.py``;
parameters are packed into buckets matching those in
:class:`DistributedDataParallel`, meaning that the
``parameters_as_bucket_view`` argument is ignored.
If ``False``, :meth:`step` runs disjointly after the backward pass
(per normal).
(default: ``False``)
**defaults: any trailing arguments, which are forwarded to the local
optimizer.
Example::
>>> import torch.nn as nn
>>> from torch.distributed.optim import ZeroRedundancyOptimizer
>>> from torch.nn.parallel import DistributedDataParallel as DDP
>>> # xdoctest: +SKIP
>>> model = nn.Sequential(*[nn.Linear(2000, 2000).to(rank) for _ in range(20)])
>>> ddp = DDP(model, device_ids=[rank])
>>> opt = ZeroRedundancyOptimizer(
>>> ddp.parameters(),
>>> optimizer_class=torch.optim.Adam,
>>> lr=0.01
>>> )
>>> ddp(inputs).sum().backward()
>>> opt.step()
.. warning::
Currently, ``ZeroRedundancyOptimizer`` requires that all of the
passed-in parameters are the same dense type.
.. warning::
If you pass ``overlap_with_ddp=True``, be wary of the following: Given
the way that overlapping :class:`DistributedDataParallel` with
:class:`ZeroRedundancyOptimizer` is currently implemented, the first
two or three training iterations do not perform parameter updates in
the optimizer step, depending on if ``static_graph=False`` or
``static_graph=True``, respectively. This is because it needs
information about the gradient bucketing strategy used by
:class:`DistributedDataParallel`, which is not finalized until the
second forward pass if ``static_graph=False`` or until the third
forward pass if ``static_graph=True``. To adjust for this, one option
is to prepend dummy inputs.
.. warning:: ZeroRedundancyOptimizer is experimental and subject to change.
.. _ZeRO: https://arxiv.org/abs/1910.02054
"""
def __init__(
self,
params,
optimizer_class: Type[Optimizer],
process_group: Optional[Any] = None,
parameters_as_bucket_view: bool = False,
overlap_with_ddp: bool = False,
**defaults: Any,
):
# Perform type and assumption checks on the input parameters
params = self._verify_and_init_params(params)
self._verify_same_dense_param_type()
# NOTE: The parent constructor uses `add_param_group()` which is
# partially overloaded in ZeroRedundancyOptimizer, so we use the
# `initialized` flag to dissociate the behaviour of `add_param_group()`
# between the parent and child.
self.initialized = False
Optimizer.__init__(self, params, defaults)
Joinable.__init__(self)
# Now, all parameters are held in both `self._all_params` and
# `self.param_groups`
# Internal data structures (`_cache` indicates lazily evaluated)
self._param_to_rank_cache: Dict[torch.Tensor, int] = {}
self._param_to_index_cache: Dict[torch.Tensor, int] = {}
self._partition_parameters_cache: List[List[Dict]] = []
self._index_to_param_cache: List[torch.Tensor] = []
self._device_to_params_per_rank_cache: Dict[torch.device, List[List[torch.Tensor]]] = {}
self._bucket_assignments_per_rank_cache: List[Dict[int, _DDPBucketAssignment]] = []
self._is_trainable_mask = self._get_is_trainable_mask()
# Default device for collective communication and buckets
self._default_device = self._all_params[0].device
self.process_group = process_group if process_group is not None else dist.group.WORLD
self.world_size: int = dist.get_world_size(self.process_group)
self.rank: int = dist.get_rank(self.process_group)
self.global_rank: int = _get_global_rank(self.process_group, self.rank)
self._overlap_with_ddp: bool = overlap_with_ddp
self._optim_defaults = defaults
self._optim_constructor = self._get_optimizer_constructor(optimizer_class)
# If `overlap_with_ddp=True`, local optimizer initialization is delayed
# to run time after the necessary information has been collected
if not overlap_with_ddp:
self._init_local_optimizer()
else:
self._overlap_info: _OverlapInfo = _OverlapInfo(self.world_size)
if parameters_as_bucket_view:
logger.warning(
"`parameters_as_bucket_view=True` will be ignored since "
"`overlap_with_ddp=True`; instead, a different bucketing "
"strategy will be used"
)
# `self._buckets` is used if `parameters_as_bucket_view=True`, in
# which case parameter data is flattened into contiguous bucket tensors
self.parameters_as_bucket_view = parameters_as_bucket_view
self._buckets: List[List[torch.Tensor]] = []
self._build_param_buckets()
# Optional consolidated optimizer state, only populated if this rank
# is the target in `consolidate_state_dict()`
self._all_state_dicts: List[Dict[str, Any]] = []
self.initialized = True
def _clear_cache(self) -> None:
r"""
Clears the cached data structures giving partition information.
"""
self._partition_parameters_cache.clear()
self._param_to_rank_cache.clear()
self._index_to_param_cache.clear()
self._param_to_index_cache.clear()
self._device_to_params_per_rank_cache.clear()
self._bucket_assignments_per_rank_cache.clear()
def add_param_group(self, param_group: dict) -> None:
r"""
Add a parameter group to the :class:`Optimizer` 's ``param_groups``.
This can be useful when fine tuning a pre-trained network, as frozen
layers can be made trainable and added to the :class:`Optimizer` as
training progresses.
Arguments:
param_group (dict): specifies the parameters to be optimized and
group-specific optimization options.
.. warning:: This method handles updating the shards on all partitions
but needs to be called on all ranks. Calling this on a subset of
the ranks will cause the training to hang because communication
primitives are called depending on the managed parameters and
expect all the ranks to participate on the same set of parameters.
"""
if self.initialized and self._overlap_with_ddp:
raise RuntimeError(
"ZeroRedundancyOptimizer with `overlap_with_ddp=True` only "
"supports a single parameter group"
)
super().add_param_group(param_group)
# NOTE: The rest of the method assumes that the call to the parent's
# `add_param_group()` appends the new parameter group and preserves
# the previous parameter-group ordering
if self.initialized:
# Force a re-partitioning of the parameters
self._clear_cache()
param_groups = self._partition_parameters()[self.rank]
# NOTE: All parameters in the old parameter groups should be
# assigned to the same ranks so that the local optimizers do not
# need to be reinitialized
# Add the parameters assigned to this rank from the new parameter
# group to the local optimizer, if any
if len(param_groups) == len(self.optim.param_groups) + 1:
self.optim.add_param_group(param_groups[-1])
# Update the bucketing strategy accordingly
if self.parameters_as_bucket_view:
self._build_param_buckets()
def consolidate_state_dict(self, to: int = 0) -> None:
r"""
Consolidate a list of ``state_dict`` s (one per rank) on the target
rank.
Arguments:
to (int): the rank that receives the optimizer states (default: 0).
Raises:
RuntimeError: if ``overlap_with_ddp=True`` and this method is
called before this :class:`ZeroRedundancyOptimizer` instance
has been fully initialized, which happens once
:class:`DistributedDataParallel` gradient buckets have been
rebuilt.
.. warning:: This needs to be called on all ranks.
"""
self._check_overlap_initialized()
# Sync the exposed `param_groups` attributes to the local optimizer in
# case they have been updated
self._sync_param_groups(self.param_groups, self.optim.param_groups)
# Pull the sharded state from all ranks and store them in rank order
empty_messenger = torch.tensor([0], dtype=torch.uint8, device=self._default_device)
# NOTE: We wastefully use `broadcast()` (e.g. instead of `gather()`)
# due to compatibility issues with NCCL backend; a possible follow-up
# is to move all sharded state management to RPC RRef
self._all_state_dicts = []
for rank in range(self.world_size):
global_rank = _get_global_rank(self.process_group, rank)
if self.rank == to:
# Consolidate all local `state_dict`s on this rank, storing on
# CPU to save GPU memory
if rank == self.rank:
# Directly append own optimizer state
self._all_state_dicts.append(
_recursive_copy_to_device(self.optim.state_dict(), non_blocking=True, device=torch.device("cpu"),)
)
else:
# Receive the optimizer state from the source rank
local_state_dict = _broadcast_object(
empty_messenger,
src_rank=global_rank,
group=self.process_group,
device=self._default_device,
)
self._all_state_dicts.append(
_recursive_copy_to_device(local_state_dict, non_blocking=True, device=torch.device("cpu"))
)
else:
if rank == self.rank:
# Send the optimizer state to the target rank
_ = _broadcast_object(
self.optim.state_dict(),
src_rank=self.global_rank,
group=self.process_group,
device=self._default_device,
)
elif rank != to:
# Discard the received object; `broadcast()` is used for
# compatibility reasons
_ = _broadcast_object(
empty_messenger,
src_rank=global_rank,
group=self.process_group,
device=self._default_device,
)
def _verify_params_per_rank(
self,
params_per_rank: List[List[torch.Tensor]],
) -> None:
r"""
Verifies ``params_per_rank`` for :meth:`_partition_parameters`,
checking that ``params_per_rank`` has length equal to the world size
and that it does not contain any parameters not passed into the
:class:`ZeroRedundancyOptimizer` constructor.
The parameters in ``params_per_rank`` being a strict subset of those
passed into the constructor is valid since some parameters may be
frozen.
Raises:
ValueError: if ``params_per_rank`` does not have length equal to
the world size or if it contains a parameter that was not
passed into the :class:`ZeroRedundancyOptimizer` constructor.
"""
if len(params_per_rank) != self.world_size:
raise ValueError(
"`params_per_rank` must have length equal to the world size"
)
all_params_set = set(self._all_params)
for params in params_per_rank:
for param in params:
if param not in all_params_set:
raise ValueError(
"Passing a new parameter in `params_per_rank` that "
"was not passed into the ZeroRedundancyOptimizer "
"constructor"
)
def _partition_param_group(
self,
param_group: Dict[str, Any],
params_per_rank: List[List[torch.Tensor]]
) -> None:
r"""
Partitions the parameter group ``param_group`` according to
``params_per_rank`` by modifying ``self._partition_parameters_cache``.
This method should only be used as a subroutine for
:meth:`_partition_parameters`.
Arguments:
param_group (dict[str, Any]): a parameter group as normally defined
in an optimizer state.
params_per_rank (list[list[torch.Tensor]]): a :class:`list` of
length world size containing :class:`list` s of parameters to
assign to each rank.
"""
for rank, params in enumerate(params_per_rank):
rank_param_group = copy.copy(param_group)
rank_param_group["params"] = params
self._partition_parameters_cache[rank].append(rank_param_group)
def _partition_parameters(
self,
params_per_rank: Optional[List[List[torch.Tensor]]] = None,
) -> List[List[Dict]]:
r"""
Partitions parameters across distributed data parallel ranks.
Arguments:
params_per_rank (list[list[torch.Tensor]], optional): a
:class:`list` of length world size containing :class:`list` s
of parameters to assign to each rank; this provides a way to
specify a partition manually.
If ``None``, the parameters are partitioned according to an
internal algorithm.
(default: ``None``)
Returns:
A :class:`list` where each element of the list contains the
``param_groups`` for a rank (which itself is a :class:`list` of
:class:`dict`); element 0 corresponds to rank 0, etc.; each rank
stores the ``param_groups`` for all ranks for the collective
communication in :meth:`step`.
Raises:
ValueError: see :meth:`_validate_params_per_rank`.
RuntimeError: if ``params_per_rank`` is not ``None`` and this
:class:`ZeroRedundancyOptimizer` instance is using more than
one parameter group.
"""
if params_per_rank is None:
# Partition the parameters optimizing for uniformity
if len(self._partition_parameters_cache) == 0:
self._partition_parameters_cache = [[] for _ in range(self.world_size)]
sizes = [0] * self.world_size
for param_group in self.param_groups:
param_group_params_per_rank: List[List] = [[] for _ in range(self.world_size)]
# Sort the parameters by size (largest first)
params_sorted = sorted(param_group["params"], key=lambda t: t.numel(), reverse=True)
for param in params_sorted:
# Greedily add the parameter to rank with smallest size so far
rank = self._get_min_index(sizes)
param_group_params_per_rank[rank].append(param)
sizes[rank] += param.numel()
# Apply the constructed partition of the parameter group
self._partition_param_group(param_group, param_group_params_per_rank)
return self._partition_parameters_cache
# Partition the parameters according to `params_per_rank`
assert len(self._partition_parameters_cache) == 0, \
"Specifying `params_per_rank` should only be done when the " \
"parameters have not been partitioned yet"
if len(self.param_groups) != 1:
raise RuntimeError(
"Specifying `params_per_rank` only supports a single "
"parameter group"
)
self._verify_params_per_rank(params_per_rank)
self._partition_parameters_cache = [[] for _ in range(self.world_size)]
# Apply the passed-in partition of the parameter group
param_group = self.param_groups[0]
self._partition_param_group(param_group, params_per_rank)
return self._partition_parameters_cache
@property
def _param_to_rank(self) -> Dict[torch.Tensor, int]:
r"""
:class:`dict` mapping parameters to their assigned data parallel rank
in the partition.
"""
if len(self._param_to_rank_cache) == 0:
for rank, param_groups in enumerate(self._partition_parameters()):
for param_group in param_groups:
for param in param_group["params"]:
self._param_to_rank_cache[param] = rank
return self._param_to_rank_cache
@property
def _param_to_index(self) -> Dict[torch.Tensor, int]:
r"""
:class:`dict` mapping parameters to their indices in the global
optimizer state.
NOTE: This assumes that the global optimizer state's indexing (in
``state_dict``) follows a linear ordering over the parameter groups.
"""
if len(self._param_to_index_cache) == 0:
self._param_to_index_cache = {
p: i for i, p in enumerate(chain(*(g["params"] for g in self.param_groups)))
}
return self._param_to_index_cache
@property
def _index_to_param(self) -> List[torch.Tensor]:
r"""
List mapping parameter indices in the global optimizer scheme to the
actual params.
"""
if len(self._index_to_param_cache) == 0:
self._index_to_param_cache = list(chain(*(g["params"] for g in self.param_groups)))
return self._index_to_param_cache
def _broadcast_params_from_rank(self, rank: int):
r"""
Broadcasts the shard of parameters from a given rank to all other
ranks asynchronously.
Arguments:
rank (int): the source rank.
Returns:
A :class:`list` of async work handles for the ``broadcast()`` s
performed to synchronize the parameters.
"""
assert not self._overlap_with_ddp, \
"`_broadcast_params_from_rank()` should not be used if " \
"`overlap_with_ddp=True`; instead, the broadcasting should " \
"happen in the DDP communication hook"
handles = []
if self.parameters_as_bucket_view:
for dev_i_buckets in self._buckets:
bucket = dev_i_buckets[rank]
global_rank = _get_global_rank(self.process_group, rank)
handles.append(
dist.broadcast(tensor=bucket, src=global_rank,
group=self.process_group, async_op=True)
)
else:
param_groups = self._partition_parameters()[rank]
global_rank = _get_global_rank(self.process_group, rank)
for param_group in param_groups:
for param in param_group["params"]:
handles.append(
dist.broadcast(tensor=param.data, src=global_rank,
group=self.process_group, async_op=True)
)
return handles
def _sync_params(self):
r"""
Syncs all parameter shards across the ranks.
This rank sends its shard of the parameters to all other ranks and
receives a shard from each other rank. This is done using
``broadcast()``. Parameters are sent bucket-by-bucket if
``parameters_as_bucket_view=True``and sent parameter-by-parameter
otherwise.
"""
handles = []
for rank in range(self.world_size):
handles.extend(self._broadcast_params_from_rank(rank))
_ = list(map(lambda x: x.wait(), handles))
@property
def _device_to_params_per_rank(
self
) -> Dict[torch.device, List[List[torch.Tensor]]]:
r"""
:class:`dict` mapping each device to a :class:`list` of the per-rank parameter
lists filtered to only include the parameters stored on that device.
Each per-rank parameter list gives the parameters assigned to that rank
to update.
This is used for constructing the parameter buckets if
``parameters_as_bucket_view=True``.
Let ``dev_i`` denote the ``i``th device for this rank. Then:
``dev_0`` maps to a list containing:
rank 0's assigned parameters stored on ``dev_0``,
rank 1's assigned parameters stored on ``dev_0``,
...
``dev_1`` maps to a list containing:
rank 0's assigned parameters stored on ``dev_1``,
rank 1's assigned parameters stored on ``dev_1``,
...
...
"""
assert self.parameters_as_bucket_view, \
"`_device_to_params_per_rank` should only be used if " \
"`parameters_as_bucket_view=True`"
if len(self._device_to_params_per_rank_cache) == 0:
for rank, param_groups in enumerate(self._partition_parameters()):
for param_group in param_groups:
for param in param_group["params"]:
device = param.device
if device not in self._device_to_params_per_rank_cache:
self._device_to_params_per_rank_cache[device] = [[] for _ in range(self.world_size)]
self._device_to_params_per_rank_cache[device][rank].append(param)
return self._device_to_params_per_rank_cache
def _get_min_index(
self,
values: List[int],
disallowed_indices: Optional[Set[int]] = None,
) -> int:
r"""
Returns ``values.index(min(values))``, except only uses one pass. It
also excludes any indices in ``disallowed_indices`` if provided.
Arguments:
values: (List[int]): :class:`list` of values.
disallowed_indices (Optional[Set[int]]): indices that are
disallowed from being the returned min index.
"""
min_index = -1
min_value = float("inf")
for i, value in enumerate(values):
if disallowed_indices and i in disallowed_indices:
continue
if value < min_value:
min_value = value
min_index = i
assert min_index >= 0, "All indices are disallowed"
return min_index
def _assign_bucket_subset_to_rank(
self,
bucket_index: int,
bucket_params: List[torch.Tensor],
bucket_offset: int,
assigned_rank: int,
assigned_ranks_per_bucket: List[Set[int]],
) -> None:
r"""
Assigns the model parameters given by ``bucket_params``, representing a
(possibly non-strict) subset of the parameters corresponding to a
:class:`DistributedDataParallel` bucket, to the rank with the least
size assigned so far and collects relevant information.
Arguments:
bucket_index (int): index of the :class:`DistributedDataParallel`
gradient bucket.
bucket_params (List[torch.Tensor]): subset of the parameters
corresponding to the bucket to assign.
bucket_offset (int): offset giving the index of the first element
in ``bucket_params`` in the bucket's full parameter list.
assigned_rank (int): rank to assign to.
assigned_ranks_per_bucket (List[Set[int]]): :class:`set` of ranks
assigned to each bucket.
"""
overlap_info = self._overlap_info
if len(bucket_params) == 0:
raise ValueError(
"Empty bucket assignment"
)
params_per_rank = overlap_info.params_per_rank
offsets = overlap_info.offsets
self._bucket_assignments_per_rank_cache[assigned_rank][bucket_index] = \
_DDPBucketAssignment(bucket_index, bucket_params, bucket_offset)
if self.global_rank == assigned_rank:
offsets[bucket_index] = len(params_per_rank[assigned_rank])
params_per_rank[assigned_rank].extend(bucket_params)
assigned_ranks_per_bucket[bucket_index].add(assigned_rank)
self._overlap_info.num_bucket_assignments += 1
@property
def _bucket_assignments_per_rank(
self
) -> List[Dict[int, _DDPBucketAssignment]]:
r"""
:class:`list` of length world size consisting of :class:`dict` s
mapping bucket indices to :class:`_DDPBucketAssignment` s for each
rank.
"""
assert self._overlap_with_ddp, "`_bucket_assignments_per_rank` " \
"only be used if `overlap_with_ddp=True`"
if len(self._bucket_assignments_per_rank_cache) > 0:
return self._bucket_assignments_per_rank_cache
overlap_info = self._overlap_info
assert overlap_info.status == _OverlapStatus.INITIALIZED
self._bucket_assignments_per_rank_cache = [{} for _ in range(self.world_size)]
params_per_bucket = overlap_info.params_per_bucket
if overlap_info.shard_buckets:
# Define the assignment threshold to approximate uniformity
assert overlap_info.total_size is not None, \
"`total_size` was not computed"
threshold = overlap_info.total_size / self.world_size # type: ignore[operator]
size_per_rank = [0 for _ in range(self.world_size)]
num_buckets = len(params_per_bucket)
overlap_info.assigned_ranks_per_bucket = [set() for _ in range(num_buckets)]
assigned_ranks_per_bucket = overlap_info.assigned_ranks_per_bucket
if not overlap_info.shard_buckets:
# Assign each DDP bucket entirely to a single rank
for bucket_index, bucket_params in enumerate(params_per_bucket):
assert len(bucket_params) > 0, "Empty bucket"
assigned_rank = self._get_assigned_rank(bucket_index)
self._assign_bucket_subset_to_rank(
bucket_index,
bucket_params,
0,
assigned_rank,
assigned_ranks_per_bucket,
)
else:
# Assign each DDP bucket to possibly multiple ranks
# Specifically, sort the DDP buckets by increasing size, and for
# each bucket, iteratively assign the maximal unassigned subset
# with size less than `threshold` to the rank with the least total
# size so far -- each such assignment is represented by a
# `_DDPBucketAssignment` instance and only contains parameters from
# a single DDP bucket
params_per_bucket_enum = sorted(
enumerate(params_per_bucket),
key=lambda x: sum(p.numel() for p in x[1])
)
for bucket_index, bucket_params in params_per_bucket_enum:
assert len(bucket_params) > 0, "Empty bucket"
bucket_offset = 0
assignment_size = 0
for param_index, param in enumerate(bucket_params):
param_numel = param.numel()
if assignment_size + param_numel >= threshold and param_index > bucket_offset:
assigned_rank = self._get_min_index(size_per_rank, assigned_ranks_per_bucket[bucket_index])
# Include up to but not including the parameter that
# exceeded the threshold
self._assign_bucket_subset_to_rank(
bucket_index,
bucket_params[bucket_offset:param_index],
bucket_offset,
assigned_rank,
assigned_ranks_per_bucket,
)
size_per_rank[assigned_rank] += assignment_size
bucket_offset = param_index
assignment_size = 0
assignment_size += param_numel
# Assign the remainder of the bucket so that no assignment
# spans across two buckets
assigned_rank = self._get_min_index(size_per_rank, assigned_ranks_per_bucket[bucket_index])
self._assign_bucket_subset_to_rank(
bucket_index,
bucket_params[bucket_offset:],
bucket_offset,
assigned_rank,
assigned_ranks_per_bucket,
)
size_per_rank[assigned_rank] += assignment_size
return self._bucket_assignments_per_rank_cache
def _local_step(
self,
gradients: Optional[List[Optional[torch.Tensor]]] = None,
closure: Optional[Callable[[], float]] = None,
**kwargs: Any,
) -> Optional[float]:
r"""
Performs a single optimizer step without syncing parameters across
ranks.
Arguments:
gradients (list[Optional[torch.Tensor]], optional): a :class:`list`
of length equal to the number of parameters assigned to this
rank containing gradient tensors or ``None`` as its elements;
a ``None`` in the :class:`list` indicates that the
corresponding parameter should not be updated.
If the argument itself is ``None``, then all parameters are
updated, and the gradients are assumed to be already populated.
(default: ``None``)
closure (Callable): a closure that re-evaluates the model and
returns the loss; optional for most optimizers and should be
``None`` if ``gradients`` is not ``None``; (default: ``None``)
Returns:
Optional loss depending on the underlying local optimizer.
.. warning::
The argument ``gradients`` should only be specified (i.e. not
``None``) if ``overlap_with_ddp=True``, in which case
:class:`ZeroRedundancyOptimizer` wraps a functional optimizer.
"""
Join.notify_join_context(self)
# Check if the model trainability has changed
is_trainable_mask = self._get_is_trainable_mask()
if is_trainable_mask != self._is_trainable_mask:
if self._overlap_with_ddp:
raise RuntimeError(
"ZeroRedundancyOptimizer with `overlap_with_ddp=True` "
"does not support changing parameter trainability at run "
"time"
)
logger.warning(
"ZeroRedundancyOptimizer detected that the trainable "
"parameters changed; rebuilding the parameter buckets if "
"enabled"
)
self._build_param_buckets()
self._is_trainable_mask = is_trainable_mask
# Sync the exposed `param_groups` attributes to the local optimizer in
# case they have been updated
self._sync_param_groups(self.param_groups, self.optim.param_groups)
# Run the optimizer step on this shard only
if gradients is None:
loss = self.optim.step(**kwargs) if closure is None \
else self.optim.step(closure=closure, **kwargs)
else:
assert self._overlap_with_ddp, "Specifying `gradients` should not " \
"be used when `overlap_with_ddp=False`"
assert closure is None, "`closure` is not supported when using " \
"a local functional optimizer"
loss = self.optim.step(gradients=gradients)
# Sync any updated attributes in the local optimizer to the exposed
# `param_groups`
self._sync_param_groups(self.optim.param_groups, self.param_groups)
return loss
def step(
self,
closure: Optional[Callable[[], float]] = None,
**kwargs: Any,
) -> Optional[float]:
r"""
Performs a single optimizer step and syncs parameters across all ranks.
Arguments:
closure (Callable): a closure that re-evaluates the model and
returns the loss; optional for most optimizers.
Returns:
Optional loss depending on the underlying local optimizer.
.. note: Any extra parameters are passed to the base optimizer as-is.
"""
if self._overlap_with_ddp:
logger.warning(
"`step()` should not be included in the training loop when "
"`overlap_with_ddp=True`"
)
return None
# Perform the local optimizer step
loss = self._local_step(closure=closure, **kwargs)
# Sync all of the updated parameter shards across the ranks
self._sync_params()
return loss
def join_hook(self, **kwargs):
r"""
Returns the ZeRO join hook, which enables training on uneven inputs by
shadowing the collective communications in the optimizer step.
Gradients must be properly set before this hook is called.
Arguments:
kwargs (dict): a :class:`dict` containing any keyword arguments
to modify the behavior of the join hook at run time; all
:class:`Joinable` instances sharing the same join context
manager are forwarded the same value for ``kwargs``.
This hook does not support any keyword arguments; i.e. ``kwargs`` is
unused.
"""
return _ZeROJoinHook(self)
@property
def join_device(self) -> torch.device:
return self._default_device
@property
def join_process_group(self) -> Any:
return self.process_group
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
r"""
Load the state pertaining to the given rank from the input
``state_dict``, updating the local optimizer as needed.
Arguments:
state_dict (dict): optimizer state; should be an object returned
from a call to :meth:`state_dict`.
Raises:
RuntimeError: if ``overlap_with_ddp=True`` and this method is
called before this :class:`ZeroRedundancyOptimizer` instance
has been fully initialized, which happens once
:class:`DistributedDataParallel` gradient buckets have been
rebuilt.
"""
self._check_overlap_initialized()
for index, value in state_dict["state"].items():
param = self._index_to_param[index]
if self._param_to_rank[param] != self.rank:
# Clear any state irrelevant to this rank
state_dict["state"][index] = None
else:
# Load the parameter state to the local optimizer
self.optim.state[param] = _recursive_copy_to_device(value, non_blocking=True, device=param.device)
# Force zero-dimensional tensors (like Adam "step") on CPU
for state_name, state_value in self.optim.state[param].items():
if torch.is_tensor(state_value) and state_value.dim() == 0:
self.optim.state[param][state_name] = state_value.cpu()
super().load_state_dict(state_dict)
# Sync the input state with the exposed and local optimizer states
self._sync_param_groups(state_dict["param_groups"], self.param_groups)
self._sync_param_groups(self.param_groups, self.optim.param_groups)
def state_dict(self) -> Dict[str, Any]:
r"""
Returns the last global optimizer state known to this rank.
.. warning:
If the state has not been consolidated to this rank, this raises a
runtime error, and even if it has, the state may not be up-to-date,
depending on when :meth:`consolidate_state_dict` was last called.
Raises:
RuntimeError: if ``overlap_with_ddp=True`` and this method is
called before this :class:`ZeroRedundancyOptimizer` instance
has been fully initialized, which happens once
:class:`DistributedDataParallel` gradient buckets have been
rebuilt; or if this method is called without a preceding call
to :meth:`consolidate_state_dict`.
"""
self._check_overlap_initialized()
if len(self._all_state_dicts) == 0:
raise RuntimeError(
"Optimizer state has not been consolidated on this rank. "
f"Please call `consolidate_state_dict(to={self.rank})` on "
"all ranks beforehand if you meant to save the global state."
)
# Get the possibly-stale global optimizer state that uses global
# parameter indexing
state_dict = super().state_dict()
# Update the global optimizer state with local state information,
# factoring in the translation from local to global indexing
for rank, local_state_dict in enumerate(self._all_state_dicts):
local_param_groups = local_state_dict["param_groups"]
global_param_groups = self._partition_parameters()[rank]
assert len(local_param_groups) == len(global_param_groups), \
"Mismatch between number of local and global parameter groups"
for local_param_group, global_param_group in zip(local_param_groups, global_param_groups):
# `local_param_group` stores local indices, while
# `global_param_group` stores the tensors directly
local_param_indices = local_param_group["params"]
global_params = global_param_group["params"]
assert len(local_param_indices) == len(global_params), \
"Mismatch between number of local and global parameters in parameter group"
for local_param_index, global_param in zip(local_param_indices, global_params):
# Update the global parameter state, if any
if local_param_index in local_state_dict["state"]:
global_param_index = self._param_to_index[global_param]
state_dict["state"][global_param_index] = local_state_dict["state"][local_param_index]
# Sort the parameters in the state
state_dict["state"] = dict(sorted(state_dict["state"].items()))
return state_dict
@staticmethod
def _sync_param_groups(
src_param_groups: List[Dict[Any, Any]],
dst_param_groups: List[Dict[Any, Any]],
) -> None:
r"""
Syncs the attributes from the source parameter groups to the
destination parameter groups.
Example attributes include learning rate or scheduler attributes. The
two parameter groups should have the same length (i.e. same number of
parameter groups).
Arguments:
src_param_groups (list[dict]): parameter groups giving the
attribute settings to copy.
dst_param_groups (list[dict]): parameter groups giving the
attribute settings to set.
"""
assert len(src_param_groups) == len(dst_param_groups), \
"Mismatch between number of source and destination parameter groups"
for src_param_group, dst_param_group in zip(src_param_groups, dst_param_groups):
# Sync all attributes except the parameters
for attr in filter(lambda x: x != "params", src_param_group.keys()):
dst_param_group[attr] = src_param_group[attr]
def _build_param_buckets(self) -> None:
r"""
Builds parameter buckets if ``parameters_as_bucket_view=True`` so
that for each device that stores this rank's parameters, there is a
bucket (represented as a tensor) containing all of the parameters on
that device that are assigned to a given rank in the parameter update
partition.
This method is called in the constructor and any time parameter
trainability is changed.
.. warning::
The current implementation assumes that all of the parameters in a
bucket are of the same dense type when allocating the bucket's
tensor.
.. warning::
If the model parameters are stored across more than one device,
then the storage partitioning must be the same across all
processes in order for parameter synchronization to work.
"""
if not self.parameters_as_bucket_view or self._overlap_with_ddp:
return
# `self._buckets[i][j]` are the parameters stored on device i and
# assigned to rank j
num_devices = len(self._device_to_params_per_rank)
self._buckets = [[] for _ in range(num_devices)] # type: ignore[assignment]
for dev_i, (device, params_per_rank) in enumerate(self._device_to_params_per_rank.items()):
for params in params_per_rank:
bucket_size = 0
dtype = None
trainable_params = []
for param in params:
if not _is_trainable(param):
# Clone in case the parameter was previously part of
# a bucket to avoid the data from being destroyed
param.data = param.data.detach().clone()
else:
bucket_size += param.numel()
trainable_params.append(param)
dtype = param.dtype # assumes all same dtype
if bucket_size == 0:
# Create a dummy bucket if there are no parameters
bucket = torch.zeros(1, device=device)
else:
# Construct the bucket (assuming all dense and same dtype)
bucket = torch.empty(bucket_size, dtype=dtype, device=device)
offset = 0
for param in trainable_params:
offset_next = offset + param.numel()
bucket[offset:offset_next].copy_(param.data.flatten())
param.data = bucket[offset:offset_next].view_as(param.data)
offset = offset_next
self._buckets[dev_i].append(bucket) # type: ignore[arg-type]
def _build_ddp_param_buckets(self) -> None:
r"""
For each DDP bucket with parameters assigned to this rank, flattens the
data of those parameters into a single tensor and saves the tensor to
the ``tensor`` attribute in the corresponding
:class:`_DDPBucketAssignment` instance stored in
``self._bucket_assignments_per_rank``.
:class:`DistributedDataParallel` guarantees that the parameters
corresponding to a gradient bucket have the same device and the same
dtype.
"""
for bucket_assignments in self._bucket_assignments_per_rank:
for bucket_assignment in bucket_assignments.values():
params = bucket_assignment.parameters
bucket_size = 0
dtype = None
for param in params:
assert _is_trainable(param), "Model parameter " \
"corresponding to a gradient in a DDP bucket should " \
"require a gradient"
bucket_size += param.numel()
dtype = param.dtype # assumes all same dtype
assert bucket_size > 0, "Empty bucket"
# Construct the bucket tensor (assuming all dense and same dtype)
tensor = torch.empty(bucket_size, dtype=dtype, device=bucket_assignment.device)
offset = 0
for param in params:
offset_next = offset + param.numel()
tensor[offset:offset_next].copy_(param.data.flatten())
param.data = tensor[offset:offset_next].view_as(param.data)
offset = offset_next
bucket_assignment.tensor = tensor
def _verify_and_init_params(
self, params: Any,
) -> Union[List[torch.Tensor], List[dict]]:
r"""
Verifies the type of ``params`` and initializes ``self._all_params``
as a :class:`list` of all parameters if ``params`` is valid.
Arguments:
params (Any): Candidate parameter list or parameter groups to
verify.
Raises:
TypeError: ``params`` has an invalid type.
ValueError: ``params`` is empty.
Returns:
The persistent form of ``params`` to be passed into the parent
:class:`Optimizer` constructor -- i.e. returns ``params`` as a
:class:`list` to ensure that it can be iterated over again.
"""
if isinstance(params, torch.Tensor):
raise TypeError("`params` argument should be an iterable of "
f"Tensors, but got {torch.typename(params)}")
try:
all_params = list(params)
except TypeError:
raise TypeError("`params` argument should be an iterable of Tensors"
f" or dicts, but got {torch.typename(params)}")
if len(all_params) == 0:
raise ValueError("ZeroRedundancyOptimizer got an empty parameter "
"list")
all_tensors = True
all_dicts = True
for param in all_params:
all_tensors &= isinstance(param, torch.Tensor)
all_dicts &= isinstance(param, dict)
if not all_tensors and not all_dicts:
raise TypeError("`params` argument should be an iterable of "
"Tensors or dicts")
# Ensure that `self._all_params` contains a list of all parameters
if all_tensors:
self._all_params = all_params
elif all_dicts:
self._all_params = []
# `all_params` contains parameter groups (not parameters)
for param_group in all_params:
if "params" not in param_group:
raise ValueError(
"Each parameter group passed-in via `params` must "
"have a 'params' key mapping to the parameters in "
"the group"
)
self._all_params.extend(param_group["params"])
return all_params
def _verify_same_dense_param_type(self) -> None:
r"""
Verifies that all parameters are of the same dense type.
The method assumes that ``self._all_params`` has been initialized
and is non-empty.
Raises:
ValueError: ``params`` contains sparse parameters or parameters
of varying dense types.
NOTE: This method can be removed once support for sparse parameters
and varying parameter types is added.
"""
typename = torch.typename(self._all_params[0])
if self._all_params[0].is_sparse:
raise ValueError("ZeroRedundancyOptimizer only supports using "
"the same dense type for all parameters but got "
f"{typename}")
for param in self._all_params[1:]:
other_typename = torch.typename(param)
if other_typename != typename:
raise ValueError("ZeroRedundancyOptimizer only supports "
"using the same dense type for all "
f"parameters but got both {typename} and "
f"{other_typename}")
def _get_is_trainable_mask(self) -> List[bool]:
r"""
Returns a boolean mask indicating if each parameter is trainable
(``requires_grad``) or not.
"""
return list(map(_is_trainable, self._all_params))
def _init_local_optimizer(self) -> None:
r"""
Initializes this rank's local optimizer, responsible for its subset of
the parameters.
The local optimizer is saved in ``self.optim``.
"""
assert self._optim_constructor is not None, \
"The local optimizer class has not been set"
param_groups = self._partition_parameters()[self.rank]
# `overlap_with_ddp=True` requires a local functional optimizer
if self._overlap_with_ddp:
# Functional optimizers only support a single parameter group and
# require passing in the parameters as a list
assert len(param_groups) == 1, "Initializing the local " \
"functional optimizer with more than one parameter group"
params = param_groups[0]["params"]
# Try to pass `_allow_empty_param_list=True` to avoid erroring
if "_allow_empty_param_list" in inspect.signature(self._optim_constructor).parameters:
self.optim: Any = self._optim_constructor(params, **self._optim_defaults, _allow_empty_param_list=True)
else:
logger.warning(
f"{self._optim_constructor} does not support the argument "
"`_allow_empty_param_list`; ZeroRedundancyOptimizer may "
"error due to an empty parameter list"
)
self.optim: Any = self._optim_constructor(params, **self._optim_defaults) # type: ignore[no-redef]
# Log information about the DDP and ZeRO bucketing
if dist.get_debug_level() != dist.DebugLevel.OFF:
local_numel = sum(p.numel() for p in params)
num_assigned_buckets = len(self._bucket_assignments_per_rank[self.global_rank])
logger.info(
f"rank {self.global_rank} with {local_numel} parameters "
f"across {num_assigned_buckets} buckets"
)
if self.global_rank == 0:
logger.info(
f"{len(self._overlap_info.params_per_bucket)} DDP "
f"buckets and "
f"{self._overlap_info.num_bucket_assignments} bucket "
"assignments"
)
else:
# NOTE: Passing `param_groups` into the local optimizer constructor
# bypasses the empty parameter list check
self.optim: Optimizer = self._optim_constructor(param_groups, **self._optim_defaults) # type: ignore[no-redef]
# TODO: Manually add `self.param_groups` if using a functional
# optimizer; remove this if/when the functional optimizers support
# multiple parameter groups
if self._overlap_with_ddp and not hasattr(self.optim, "param_groups"):
assert hasattr(self.optim, "param_group"), \
"The functional optimizer should set at least one of the " \
"attributes `param_group` or `param_groups`"
self.optim.param_groups = [self.optim.param_group] # type: ignore[attr-defined]
self._sync_param_groups(self.optim.param_groups, self.param_groups)
def _init_zero_for_overlap(self) -> None:
r"""
Performs a delayed initialization of the local optimizer and the
supporting data structures.
"""
assert self._overlap_with_ddp, \
"`_init_zero_for_overlap()` should only be called when " \
"`overlap_with_ddp=True`"
self._overlap_info.status = _OverlapStatus.INITIALIZED
self._clear_cache()
self._partition_parameters(self._overlap_info.params_per_rank)
self._build_ddp_param_buckets()
self._init_local_optimizer()
def _get_assigned_rank(self, bucket_index: int) -> int:
r"""
Returns the single rank assigned to a :class:`DistributedDataParallel`
gradient bucket.
Arguments:
bucket_index (int): index of the :class:`DistributedDataParallel`
bucket for which to get the assigned rank.
"""
assert not self._overlap_info.shard_buckets, \
"The bucket assignment requires global bucket information and " \
"will be computed later; there should be no need to use this " \
"method"
return bucket_index % self.world_size
def _check_overlap_initialized(self):
r"""
Checks that the delayed initialization has occurred (see
:meth:`_init_zero_for_overlap`) if ``overlap_with_ddp=True``, and
raises a ``RuntimeError`` if not. This should preface methods that
should not be run before that delayed initialization.
Raises:
RuntimeError: if ``overlap_with_ddp=True`` and
:meth:`_init_zero_for_overlap` has not been called.
"""
if self._overlap_with_ddp \
and self._overlap_info.status != _OverlapStatus.INITIALIZED:
raise RuntimeError(
"This method should not be called until this "
"ZeroRedundancyOptimizer instance has been fully "
"initialized"
)
def _get_optimizer_constructor(self, optimizer_class: Any) -> Any:
r"""
Returns the proper optimizer constructor, performing the necessary
validation and transformation depending on ``overlap_with_ddp``.
Returns:
- ``optimizer_class`` if ``overlap_with_ddp=False`` and
``optimizer_class`` is not a functional optimizer.
- ``optimizer_class`` if ``overlap_with_ddp=True`` and
``optimizer_class`` is already a functional optimizer.
- The functional equivalent of ``optimizer_class`` if
``overlap_with_ddp=True`` and ``optimizer_class`` is not
already a functional optimizer (assuming the equivalent
exists).
Raises:
ValueError:
- if ``overlap_with_ddp=True`` but ``optimizer_class`` is
neither a functional optimizer nor translatable to a
functional optimizer.
- if ``overlap_with_ddp=False`` and ``optimizer_class`` is a
functional optimizer.
"""
functional_optims = functional_optim_map.values()
if not self._overlap_with_ddp:
if optimizer_class in functional_optims:
# Using a functional optimizer is only supported when
# `overlap_with_ddp=True`
raise ValueError(
f"Passing in a functional optimizer {optimizer_class} "
"when `overlap_with_ddp=False`"
)
else:
return optimizer_class
else:
if optimizer_class in functional_optims:
# Already a functional optimizer
return optimizer_class
elif optimizer_class in functional_optim_map:
# Translate the passed-in optimizer class to its functional
# equivalent if `overlap_with_ddp=True`
optim_constructor = functional_optim_map[optimizer_class]
logger.info(
f"Using the functional optimizer {optim_constructor} "
f"instead of {optimizer_class} since "
"`overlap_with_ddp=True`"
)
return optim_constructor
else:
raise ValueError(
"Using `ddp_with_overlap=True` requires using a "
"functional optimizer, but there is no supported functional "
f"optimizer equivalent for {optimizer_class}"
)
| pytorch-master | torch/distributed/optim/zero_redundancy_optimizer.py |
from typing import List, Dict, Optional, Tuple
import torch
import torch.optim._functional as F
from torch import Tensor
__all__ : List[str] = []
# Define a TorchScript compatible Functional Adamax Optimizer
# where we use these optimizer in a functional way.
# Instead of using the `param.grad` when updating parameters,
# we explicitly allow the distributed optimizer pass gradients to
# the `step` function. In this way, we could separate the gradients
# and parameters and allow multithreaded trainer to update the
# parameters without data traces on accumulating to the same .grad.
# NOTE: This should be only used by distributed optimizer internals
# and not meant to expose to the user.
@torch.jit.script
class _FunctionalAdamax(object):
def __init__(
self,
params: List[Tensor],
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 0.0,
foreach: bool = False,
maximize: bool = False,
_allow_empty_param_list: bool = False,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
self.defaults = {
"lr": lr,
"eps": eps,
"beta1": betas[0],
"beta2": betas[1],
"weight_decay": weight_decay,
}
self.foreach = foreach
self.maximize = maximize
self.state = torch.jit.annotate(Dict[torch.Tensor, Dict[str, torch.Tensor]], {})
if len(params) == 0 and not _allow_empty_param_list:
raise ValueError("optimizer got an empty parameter list")
# NOTE: we only have one param_group and don't allow user to add additional
# param group as it's not a common use case.
self.param_group = {"params": params}
def step(self, gradients: List[Optional[Tensor]]):
params = self.param_group['params']
params_with_grad = []
grads = []
exp_avgs = []
exp_infs = []
state_steps: List[Tensor] = []
if len(params) != len(gradients):
raise ValueError(
"the gradients passed in does not equal to the size of the parameters!"
+ f"Params length: {len(params)}. "
+ f"Gradients length: {len(gradients)}"
)
for param, gradient in zip(self.param_group['params'], gradients):
if gradient is not None:
params_with_grad.append(param)
grads.append(gradient)
# Lazy state initialization
if param not in self.state:
self.state[param] = {}
state = self.state[param]
state['step'] = torch.tensor(0.0)
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(param, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_inf'] = torch.zeros_like(param, memory_format=torch.preserve_format)
state = self.state[param]
exp_avgs.append(state['exp_avg'])
exp_infs.append(state['exp_inf'])
state_steps.append(state['step'])
with torch.no_grad():
F.adamax(params_with_grad,
grads,
exp_avgs,
exp_infs,
state_steps,
eps=self.defaults['eps'],
beta1=self.defaults['beta1'],
beta2=self.defaults['beta2'],
lr=self.defaults['lr'],
weight_decay=self.defaults['weight_decay'],
foreach=self.foreach,
maximize=self.maximize)
| pytorch-master | torch/distributed/optim/functional_adamax.py |
from collections import abc, defaultdict
import logging
from typing import Dict, List, Optional, Union
import torch
from torch.cuda import FloatTensor # type: ignore[attr-defined]
from torch.cuda.amp.grad_scaler import GradScaler, OptState, _MultiDeviceReplicator
from torch.distributed.distributed_c10d import ProcessGroup
import torch.distributed as dist
from torch.optim.sgd import SGD
def _refresh_per_optimizer_state():
return {"stage": OptState.READY, "found_inf_per_device": {}}
def _is_supported_device(tensor: torch.Tensor):
return tensor.is_cuda or tensor.device.type in ("xla", "cpu")
class _GeneralMultiDeviceReplicator(_MultiDeviceReplicator):
"""
Lazily serves tensor to request device. This class extends
_MultiDeviceReplicator to allow support for "cpu" as a device.
"""
def __init__(self, master_tensor: torch.Tensor) -> None:
assert _is_supported_device(master_tensor)
self.master = master_tensor
self._per_device_tensors: Dict[torch.device, torch.Tensor] = {}
class ShardedGradScaler(GradScaler):
"""
ShardedGradScaler helps perform gradient scaling in a shard aware manner. It extends
functionality from GradScaler:
* Suports Pytorch DDP and FSDP implementations
* Support CPU offloaded tensors (as used in fully sharded data parallel[FSDP])
* Supports the custom Mixed Precision loss dtype (fp16, bf16) that FSDP returns
* Sync inf/nan for scaled gradient tensors on any torch.device (where tensors are placed) across
nodes
Example::
# Creates a ShardedGradScaler once at the beginning of training.
scaler = ShardedGradScaler()
for epoch in epochs:
for input, target in data:
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
# Scales loss. Calls backward() on scaled loss to create scaled gradients.
scaler.scale(loss).backward()
# scaler.step() first unscales gradients of the optimizer's params.
# If gradients don't contain infs/NaNs, optimizer.step() is then called,
# otherwise, optimizer.step() is skipped.
scaler.step(optimizer)
# Updates the scale for next iteration.
scaler.update()
See :class:`GradScaler` for explanation of scaling/unscaling and more use cases.
Args:
init_scale (float, optional, default=2.**16): Initial scale factor.
growth_factor (float, optional, default=2.0): Factor by which the scale is multiplied during
:meth:`update` if no inf/NaN gradients occur for ``growth_interval`` consecutive iterations.
backoff_factor (float, optional, default=0.5): Factor by which the scale is multiplied during
:meth:`update` if inf/NaN gradients occur in an iteration.
growth_interval (int, optional, default=2000): Number of consecutive iterations without inf/NaN gradients
that must occur for the scale to be multiplied by ``growth_factor``.
enabled (bool, optional): If ``False``, disables gradient scaling. :meth:`step` simply
invokes the underlying ``optimizer.step()``, and other methods become no-ops.
Default: ``True``
process_group (ProcessGroup, optional, default=torch.distributed.group.WORLD):
process group for sharding
"""
def __init__(
self,
init_scale: float = 2.0 ** 16,
backoff_factor: float = 0.5,
growth_factor: float = 2.0,
growth_interval: int = 2000,
enabled: bool = True,
process_group: Optional[ProcessGroup] = dist.group.WORLD,
):
super().__init__(
init_scale=init_scale,
backoff_factor=backoff_factor,
growth_factor=growth_factor,
growth_interval=growth_interval,
enabled=enabled,
)
if self._enabled:
self.process_group = process_group
self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
def scale(self, outputs: Union[torch.Tensor, List[torch.Tensor]]) -> Union[torch.Tensor, List[torch.Tensor]]:
if not self._enabled:
return outputs
if isinstance(outputs, torch.Tensor):
assert _is_supported_device(outputs)
if self._scale is None:
self._lazy_init_scale_growth_tracker(outputs.device)
assert self._scale is not None
scaled_output = outputs * self._scale.to(device=outputs.device, non_blocking=True)
# Here we ensure the return dtype is the same as the outputs dtype.
# For the FSDP + Mixed Precision use case, the loss output is in the Mixed Precision
# format (fp16, bf16) and so the scaled loss should be of the same dtype.
return scaled_output.type(outputs.dtype)
stash: List[_GeneralMultiDeviceReplicator] = []
def apply_scale(val: Union[torch.Tensor, abc.Iterable]) -> Union[torch.Tensor, abc.Iterable]:
if isinstance(val, torch.Tensor):
assert _is_supported_device(val)
if len(stash) == 0:
if self._scale is None:
self._lazy_init_scale_growth_tracker(val.device)
assert self._scale is not None
stash.append(_GeneralMultiDeviceReplicator(self._scale))
scaled_val = val * stash[0].get(val.device)
# Here we ensure the return dtype is the same as the outputs dtype.
# For the FSDP + Mixed Precision use case, the loss output is in the Mixed Precision
# format (fp16, bf16) and so the scaled loss should be of the same dtype.
return scaled_val.type(val.dtype)
elif isinstance(val, abc.Iterable):
iterator = map(apply_scale, val)
if isinstance(val, (list, tuple)):
return type(val)(iterator)
else:
return iterator
else:
raise ValueError("outputs must be a Tensor or an iterable of Tensors")
return apply_scale(outputs) # type: ignore[return-value]
def _foreach_non_finite_check_and_unscale_cpu_(
self, grads: List, found_inf: torch.Tensor, inv_scale: torch.Tensor
) -> None:
if len(grads) == 0:
return
assert inv_scale.numel() == 1, "inv_scale must be a 1-element tensor."
assert found_inf.numel() == 1, "found_inf must be a 1-element tensor."
expected_device = grads[0].device
for grad in grads:
for tensor in grad:
if tensor.device != expected_device:
logging.error("tensor device is %s and expected device is %s" % (tensor.device, expected_device))
raise ValueError("Gradients must be on the same device.")
# check for non_overlapping_and_dense doesn't exist in the python world
# as remarked here https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/cuda/AmpKernels.cu#L108
# we assume tensor is not MTA(multi tensor apply) safe. iterate through each item regardless of dtype
if torch.isinf(tensor).any().item() is True or torch.isnan(tensor).any().item() is True:
found_inf.data = torch.tensor([1.0])
break
else:
tensor.data *= inv_scale.item()
def _unscale_grads_(
self, optimizer: SGD, inv_scale: torch.Tensor, found_inf: torch.Tensor, allow_fp16: bool = True
) -> Dict[torch.device, torch.Tensor]:
per_device_inv_scale = _GeneralMultiDeviceReplicator(inv_scale)
per_device_found_inf = _GeneralMultiDeviceReplicator(found_inf)
# To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype.
# There could be thousands of grads, so we'd like to iterate through them just once.
# However, we don't know their devices or dtypes in advance.
# https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict
# Google says mypy struggles with defaultdicts type annotations.
per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) # type: ignore[var-annotated]
with torch.no_grad():
for group in optimizer.param_groups:
for param in group["params"]:
if param.grad is None:
continue
if (not allow_fp16) and param.grad.dtype == torch.float16:
raise ValueError("Attempting to unscale FP16 gradients.")
if param.grad.is_sparse:
# is_coalesced() == False means the sparse grad has values with duplicate indices.
# coalesce() deduplicates indices and adds all values that have the same index.
# For scaled fp16 values, there's a good chance coalescing will cause overflow,
# so we should check the coalesced _values().
if param.grad.dtype is torch.float16:
# coalesce is not suported in torch.float16
param_grad_fp32 = param.grad.type(torch.float32).coalesce()
param.grad = param_grad_fp32.type(torch.float16)
to_unscale = param.grad._values()
else:
to_unscale = param.grad
per_device_and_dtype_grads[to_unscale.device][to_unscale.dtype].append(to_unscale)
for device, per_dtype_grads in per_device_and_dtype_grads.items():
for grads in per_dtype_grads.values():
if grads[0].device.type == "cpu":
self._foreach_non_finite_check_and_unscale_cpu_(
grads,
per_device_found_inf.get(device),
per_device_inv_scale.get(device),
)
else:
torch._amp_foreach_non_finite_check_and_unscale_(
grads,
per_device_found_inf.get(device),
per_device_inv_scale.get(device),
)
return per_device_found_inf._per_device_tensors
def unscale_(self, optimizer: SGD) -> None:
if not self._enabled:
return
self._check_scale_growth_tracker("unscale_")
optimizer_state = self._per_optimizer_states[id(optimizer)]
if optimizer_state["stage"] is OptState.UNSCALED:
raise RuntimeError("unscale_() has already been called on this optimizer since the last update().")
elif optimizer_state["stage"] is OptState.STEPPED:
raise RuntimeError("unscale_() is being called after step().")
# FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64.
assert self._scale is not None
inv_scale = self._scale.double().reciprocal().float()
found_inf = torch.full((1,), 0.0, dtype=torch.float32, device=self._scale.device)
optimizer_state["found_inf_per_device"] = self._unscale_grads_(optimizer, inv_scale, found_inf, True)
optimizer_state["stage"] = OptState.UNSCALED
# Synchronize the detected inf across the ranks
optimizer_state = self._per_optimizer_states[id(optimizer)]
future_handles = []
for v in optimizer_state["found_inf_per_device"].values():
if v.device.type == "cpu":
v_on_cuda = v.cuda()
future_handles.append(dist.all_reduce(v_on_cuda, async_op=True, group=self.process_group).get_future())
v.copy_(v_on_cuda.cpu())
else:
future_handles.append(dist.all_reduce(v, async_op=True, group=self.process_group).get_future())
# Make sure that the calls are done before moving out.
if future_handles:
torch.futures.wait_all(future_handles)
def step(self, optimizer: SGD, *args, **kwargs) -> Optional[float]:
return super().step(optimizer, *args, **kwargs)
def _amp_update_scale_cpu_(self, found_inf) -> None:
"""
If found_inf is 1.0 (True), then scale is multiplied by backoff_factor and growth_tracker is set to zero.
Otherwise, scale is multiplied by the growth factor when the growth interval is reached.
"""
if found_inf.item() >= 1.0:
self._scale *= self._backoff_factor # type: ignore[arg-type]
self._growth_tracker = 0
else:
successful = self._growth_tracker + 1 # type: ignore[operator]
if successful == self._growth_interval: # type: ignore[arg-type]
self._scale *= self._growth_factor # type: ignore[arg-type]
self._growth_tracker = 0
else:
self._growth_tracker = successful
def update(self, new_scale: Optional[Union[float, FloatTensor]] = None) -> None:
"""
Updates the scale factor.
If any optimizer steps were skipped the scale is multiplied by ``backoff_factor``
to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively,
the scale is multiplied by ``growth_factor`` to increase it.
Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not
used directly, it's used to fill GradScaler's internal scale tensor. So if
``new_scale`` was a tensor, later in-place changes to that tensor will not further
affect the scale GradScaler uses internally.)
Args:
new_scale (float or :class:`torch.cuda.FloatTensor`, optional, default=None): New scale factor.
.. warning::
:meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has
been invoked for all optimizers used this iteration.
"""
if not self._enabled:
return
_scale, _growth_tracker = self._check_scale_growth_tracker("update") # type: ignore[var-annotated]
if new_scale is not None:
# Accept a new user-defined scale.
if isinstance(new_scale, float):
self._scale.fill_(new_scale) # type: ignore[union-attr]
else:
reason = "new_scale should be a float or a 1-element torch.cuda.FloatTensor with requires_grad=False."
assert isinstance(new_scale, torch.cuda.FloatTensor), reason # type: ignore[attr-defined]
assert new_scale.numel() == 1, reason
assert new_scale.requires_grad is False, reason
self._scale.copy_(new_scale) # type: ignore[union-attr]
else:
# Consume shared inf/nan data collected from optimizers to update the scale.
# If all found_inf tensors are on the same device as self._scale, this operation is asynchronous.
found_infs = [
found_inf.to(device=_scale.device, non_blocking=True)
for state in self._per_optimizer_states.values()
for found_inf in state["found_inf_per_device"].values()
]
assert len(found_infs) > 0, "No inf checks were recorded prior to update."
found_inf_combined = found_infs[0]
if len(found_infs) > 1:
for i in range(1, len(found_infs)):
found_inf_combined += found_infs[i]
if _scale.device.type == "cpu":
self._amp_update_scale_cpu_(found_inf_combined)
else:
torch._amp_update_scale_(
self._scale, # type: ignore[arg-type]
self._growth_tracker, # type: ignore[arg-type]
found_inf_combined,
self._growth_factor, # type: ignore[arg-type]
self._backoff_factor, # type: ignore[arg-type]
self._growth_interval, # type: ignore[arg-type]
)
# To prepare for next iteration, clear the data collected from optimizers this iteration.
self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)
| pytorch-master | torch/distributed/fsdp/sharded_grad_scaler.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) Tongzhou Wang
# Licensed under the MIT License.
import contextlib
from typing import Any, Dict, Generator, List
import torch.nn as nn
from torch.distributed.utils import _replace_by_prefix
from .flat_param import FlatParamHandle
FLAT_PARAM = "flat_param"
FPW_MODULE = "_fpw_module"
__all__ = ["FlattenParamsWrapper"]
def _post_state_dict_hook(
module: nn.Module, state_dict: Dict[str, Any], prefix: str, *args: Any
) -> Dict[str, Any]:
"""
_post_state_dict_hook() is called after the state_dict() is executed
and before returning the state_dict to the users.
This API post-processes the keys of the state_dict to remove the
FlattenParamsWrapper internal prefix.
"""
# Move everything from FPW_MODULE up one level.
_replace_by_prefix(state_dict, prefix + f"{FPW_MODULE}.", prefix)
return state_dict
def _pre_load_state_dict_hook(
state_dict: Dict[str, Any],
prefix: str,
*args: Any,
) -> None:
"""
_pre_load_state_dict_hook() is called before the _load_from_state_dict() is
executed. This API pre-processes the keys of the state_dict to add the
FlattenParamsWrapper internal prefix.
"""
# Push everything down to FPW_MODULE level.
_replace_by_prefix(state_dict, prefix, prefix + f"{FPW_MODULE}.")
# The flat_param_* keys actually needs to move one level up.
flat_param_key = prefix + f"{FPW_MODULE}.{FLAT_PARAM}"
for k in list(state_dict.keys()):
if k.startswith(flat_param_key):
last_part = k.split(".")[-1]
assert last_part.startswith(
FLAT_PARAM
), f"Expected key to contain flat_param, but key name is {k}"
_replace_by_prefix(state_dict, k, prefix + last_part)
class FlattenParamsWrapper(nn.Module):
"""
This is a wrapper for flattening parameters in a ``nn.Module`` 's subtree
into a single flattened parameter and is based on [1]. This is used for
:class:`FullyShardedDataParallel` 's recursive wrapping.
[1] https://github.com/SsnL/PyTorch-Reparam-Module
Args:
module (nn.Module): Module to wrap.
params (List[nn.Parameter]): Parameters in ``module`` 's subtree to
flatten into a single flattened parameter.
Attributes:
flat_param (Optional[FlatParameter]): The flattened parameter.
``flat_param`` is ``None`` either when (1) this wrapper manages no
parameters or (2) the wrapped module's parameters are unflattened.
_fpw_module (nn.Module): The wrapped module.
_flat_param_handle (FlatParamHandle): A handle for the flattened
parameter; only present if this wrapper manages parameters.
"""
def __init__(
self,
module: nn.Module,
params: List[nn.Parameter],
) -> None:
super().__init__()
self._fpw_module = module
self.flat_param = None
# Register hooks to clean parameter names for state dict (even if this
# wrapper itself manages no parameters since it must clean names from
# submodules)
self._register_state_dict_hook(_post_state_dict_hook)
self._register_load_state_dict_pre_hook(_pre_load_state_dict_hook)
if len(params) == 0:
return
self._flat_param_handle = FlatParamHandle(params, module)
# Defining `self.flat_param` registers the `FlatParameter` and makes it
# visible to `named_parameters()`
self.flat_param = self._flat_param_handle.flat_param
assert getattr(self, FPW_MODULE) is self._fpw_module
assert getattr(self, FLAT_PARAM) is self.flat_param
@property
def has_params(self) -> bool:
"""Returns whether this wrapper manages any parameters."""
return hasattr(self, "_flat_param_handle")
@property
def handle(self) -> FlatParamHandle:
assert hasattr(self, "_flat_param_handle"), \
"Accessing the handle of a `FlattenParamsWrapper` that does not " \
"manage any parameters"
return self._flat_param_handle
@property
def module(self) -> Any:
"""Returns the wrapped module (like DDP)."""
return self._fpw_module
@contextlib.contextmanager
def unflatten_as_params(self) -> Generator:
"""
Assumes that the flattened parameter is unsharded. When in the context,
unflattens the original parameters as ``nn.Parameter`` views into the
flattened parameter and de-registers the flattened parameter. After the
context, restores the original parameters as ``Tensor`` views into the
flattened parameter and re-registers the flattened parameter.
"""
if getattr(self, "flat_param", None) is None:
yield
else:
# De-register the `FlatParameter` from this wrapper to hide it from
# `named_parameters()` (though it still exists in memory)
del self.flat_param
try:
with self._flat_param_handle.unflatten_as_params():
yield
finally:
# Re-register the `FlatParameter`
self.flat_param = self._flat_param_handle.flat_param
def __getattr__(self, name: str) -> Any:
"""Forward missing attributes of this wrapper to the wrapped module."""
try:
return super().__getattr__(name) # defer to `nn.Module`'s logic
except AttributeError:
return getattr(self.module, name) # fall back to the wrapped module
def __getitem__(self, key: int) -> Any:
"""Forward indexing calls to the wrapped module in case the wrapped
module is an ``nn.Sequential``."""
return self.module.__getitem__(key)
def forward(self, *inputs: Any, **kwinputs: Any) -> Any:
if self.flat_param is not None:
self._flat_param_handle._unflatten(as_params=False)
return self.module(*inputs, **kwinputs)
| pytorch-master | torch/distributed/fsdp/flatten_params_wrapper.py |
import collections
import contextlib
import copy
import functools
import itertools
import math
import traceback
import warnings
from contextlib import contextmanager
from dataclasses import dataclass
from enum import Enum, auto
from typing import (
Any,
Callable,
Dict,
Generator,
Iterable,
Iterator,
List,
Mapping,
NamedTuple,
Optional,
Set,
Tuple,
Union,
cast,
)
import torch
import torch.distributed as dist
import torch.distributed.algorithms._checkpoint.checkpoint_wrapper as checkpoint_wrapper
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.distributed import ProcessGroup
from torch.distributed._shard.sharded_tensor import (
Shard,
ShardedTensor,
init_from_local_shards,
)
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
_CHECKPOINT_PREFIX,
)
from torch.distributed.algorithms._comm_hooks import (
LOW_PRECISION_HOOKS,
default_hooks,
)
from torch.distributed.distributed_c10d import _get_default_group
from torch.distributed.utils import (
_replace_by_prefix,
_sync_params_and_buffers,
_to_kwargs,
)
from torch.nn.parameter import Parameter
from ._optim_utils import (
_broadcast_pos_dim_tensor_states,
_broadcast_processed_optim_state_dict,
_flatten_full_optim_state_dict,
_get_flat_param_to_fsdp_module,
_get_param_id_to_param,
_get_param_to_param_id,
_OptimStateKey,
_process_pos_dim_tensor_state,
_rekey_sharded_optim_state_dict,
_unflatten_optim_state,
)
from ._utils import (
_apply_to_modules,
_apply_to_tensors,
_contains_batchnorm,
_override_batchnorm_mixed_precision,
)
from .flat_param import FlatParameter, FlatParamHandle
from .flatten_params_wrapper import (
FLAT_PARAM,
FPW_MODULE,
FlattenParamsWrapper,
)
from .wrap import (
ParamExecOrderWrapPolicy,
_or_policy,
_recursive_wrap,
_wrap_batchnorm_individually,
)
_TORCHDISTX_AVAIL = True
try:
from torchdistx import deferred_init, fake
except ImportError:
_TORCHDISTX_AVAIL = False
_TORCH_FX_AVAIL = True
if not hasattr(torch, "fx"):
_TORCH_FX_AVAIL = False
if _TORCH_FX_AVAIL:
from ._symbolic_trace import (
TracingConfig,
_init_execution_info,
_patch_tracer,
)
__all__ = [
"FullyShardedDataParallel", "ShardingStrategy", "MixedPrecision",
"CPUOffload", "BackwardPrefetch", "StateDictType", "StateDictConfig",
"FullStateDictConfig", "LocalStateDictConfig", "ShardedStateDictConfig",
"OptimStateKeyType", "TrainingState_", "p_assert", "clean_tensor_name",
]
FSDP_WRAPPED_MODULE = "_fsdp_wrapped_module"
FSDP_PREFIX = FSDP_WRAPPED_MODULE + "." + FPW_MODULE + "."
_PARAM_BROADCAST_BUCKET_SIZE = int(250 * 1024 * 1024)
def _default_meta_device_init_fn(module):
"""
Default initializer for modules initialized on the meta device.
"""
# TODO: move module to device_id here once device_id is available.
module.to_empty(device=torch.cuda.current_device())
try:
with torch.no_grad():
module.reset_parameters()
except BaseException as e:
warnings.warn(
f"Unable to call reset_parameters() for module on meta device with error {str(e)}. "
"Please ensure your module implements a ``reset_parameters`` function."
)
raise e
class ShardingStrategy(Enum):
"""
This specifies the sharding strategy to be used for distributed training by
:class:`FullyShardedDataParallel`.
FULL_SHARD: Parameters, gradients, and optimizer states are sharded. For
the parameters, this algorithm all-gathers before the forward,
reshards after the forward, all-gathers before the backward
computation, and reshards after the backward computation. The
gradients are synchronized and sharded via reduce-scatter after
the backward computation. The sharded optimizer states are
updated locally.
SHARD_GRAD_OP: Gradients and optimizer states are sharded during
computation, and additionally parameters are sharded outside
computation. For the parameters, this algorithm all-gathers
before the forward, does not reshard after the forward, and
only reshards after the backward computation. The gradients
are synchronized and sharded via reduce-scatter after the
backward computation. The sharded optimizer states are
updated locally. Inside ``no_sync()``, the parameters are
not resharded after the backward computation.
NO_SHARD: Parameters, gradients, and optimizer states are not sharded but
instead replicated across ranks, similar to PyTorch's
``DistributedDataParallel`` API. The gradients are synchronized
via all-reduce after the backward computation. The unsharded
optimizer states are updated locally.
HYBRID_SHARD(future support): Apply ``FULL_SHARD`` intra-node and
``NO_SHARD`` inter-node.
"""
FULL_SHARD = auto()
SHARD_GRAD_OP = auto()
NO_SHARD = auto()
# TODO
# HYBRID_SHARD = auto()
@dataclass
class MixedPrecision:
"""
A config to enable mixed precision training with FullyShardedDataParallel.
This class can be constructed with three flags:
``param_dtype`` controls the precision of model parameters, inputs, and
therefore the precision under which computation happens. After forward
and backward passes, FSDP parameters point to full precision shards
that are kept in memory. Full precision parameters are always
checkpointed.
``reduce_dtype`` controls the precision under which gradient reduction
would occur, which can potentially be different than ``param_dtype``
for use cases such as communication efficiency.
``buffer_dtype`` controls the precision that buffers are cast to. Note
that buffers are unsharded and are cast in the first forward pass, and
remain in their reduced precision state even after forward/backward
passes. However, when taking checkpoints with ``state_dict``, buffers
are checkpointed in their full precision (and then restored back to
to their reduced precision) as expected. Note that this checkpoint
support is currently limited to ``StateDictType.FULL_STATE_DICT``.
.. note:: In ``summon_full_params``, parameters are summoned in full
precision but buffers are not.
.. note:: Parameters and buffers are checkpointed in full precision. For
buffers, this is only guaranteed to work for ``StateDictType.FULL_STATE_DICT``.
.. note:: This API is experimental and subject to change.
.. note:: Specification of reduced precision types must be explicit, in that
if, for example, ``param_dtype`` is not specified, it will not be cast by
FSDP. Thus, a config such as ``MixedPrecision(reduce_dtype=torch.float16)``
will not cast buffers or parameters. Note that if a ``MixedPrecision``
config is specified without a ``reduce_dtype``, gradient communication
would occur in the `param_dtype` precision, if given, otherwise, in the
original parameter precision.
"""
# maintain a tensor of this dtype that the fp32 param shard will be cast to.
# Will control the precision of model params, inputs, and thus compute as
# well.
param_dtype: Optional[torch.dtype] = None
# Gradient communication precision.
reduce_dtype: Optional[torch.dtype] = None
# Buffer precision.
# TODO: buffer + param are usually of the same type, if user specifies
# param but not buffer, should we automatically make buffer be the same?
buffer_dtype: Optional[torch.dtype] = None
@dataclass
class CPUOffload:
"""
CPU offloading config. Currently, only parameter and gradient CPU
offload are supported.
offload_params: Offloading parameters to CPUs when these parameters are
not used for computation on GPUs. This implicitly enables
gradient offloading to CPUs in order for parameters and
gradients to be on the same device to work with optimizer.
"""
offload_params: bool = False
class BackwardPrefetch(Enum):
"""
Specify where to prefetch next layer's full parameters
during backward pass.
BACKWARD_PRE: prefetch right before current layer's backward computation
starts, this approach will increase backward communication
and computation overalpping and potentialy improve training
performance, but it may increase the peak memory usage as
the prefetched full parameters will be kept in the GPU memory
until next layer's backward computation is done.
BACKWARD_POST: prefetch right after current layer's backward computation finishes,
this approach will not increase peak memory as prefetching happens
after current layer's full parameters are freed.
It could potentially improve backward communication and computation
overlapping as it avoids all_gather and reduce_scatter are blocked
each other in the single NCCL stream. However, based on our experiments,
for some models, the backward post backward hook fire order is not always
the reversed forward computation order, so this
approach may prefetch full parameters for layers ahead of next layer,
this 'ahead' all_gather could delay next layer's all_gather in the
single NCCL stream and cause the next layer's computation delay. So it may
cause some performance regession for some models.
"""
BACKWARD_PRE = auto()
BACKWARD_POST = auto()
# TODO, BACKWARD_PRE_CPU, prefetch full parameters and keep them in the CPU memory
class TrainingState_(Enum):
"""
Simple enum to indicate what state FSDP is in. Used for asserting
to make sure APIs are called in the correct state.
..note::
``BACKWARD_PRE`` and ``BACKWARD_POST`` states are used to ensure we
receives backward hooks in the correct order. It is used to catch
unexpected order of hooks being called (likely due to our
hook registration logic or autograd engine logic changes).
"""
IDLE = auto()
FORWARD = auto()
BACKWARD_PRE = auto()
BACKWARD_POST = auto()
SUMMON_FULL_PARAMS = auto()
class StateDictType(Enum):
"""
This enum indicates that which type of ``state_dict`` the FSDP module is
currently processing (returning or loading).
The default value is FULL_STATE_DICT to comply the PyTorch convention.
..note::
FSDP currently supports two types of ``state_dict``:
1. ``state_dict/load_state_dict`: this pair of APIs return and load
the non-sharded, unflattened parameters. The semantics is the
same as using DDP.
2. ``_local_state_dict/_load_local_state_dict``: this pair of APIs return
and load local sharded, flattened parameters. The values returned
by ``_local_state_dict`` can be directly used by FSDP and is only
meaningful to FSDP (because parameters are flattened). Note that
these APIs are meant for use via the :func:`state_dict_type`
context manager as follows:
>>> # xdoctest: +SKIP("undefined variables")
>>> with fsdp.state_dict_type(StateDictType.LOCAL_STATE_DICT):
... state = fsdp.state_dict() # loads local state dict
3. ``_sharded_state_dict/_load_sharded_state_dict``: this pair of APIs
return and load sharded, unflattened parameters. The ``state_dict``
return by ``sharded_state_dict`` can be used by all other parallel
schemes (resharding may be required).
"""
FULL_STATE_DICT = auto()
LOCAL_STATE_DICT = auto()
SHARDED_STATE_DICT = auto()
@dataclass
class StateDictConfig:
"""
``StateDictConfig`` is the base class for all state_dict configuration classes.
Users should instantiate a child version (i.e. ``FullStateDictConfig``) in
order to configure settings for the particular type of ``state_dict``
implementation FSDP will use.
"""
pass
@dataclass
class FullStateDictConfig(StateDictConfig):
"""
``FullStateDictConfig`` is a config class meant to be used with
``StateDictType.FULL_STATE_DICT``. Currently, it accepts two parameters,
``offload_to_cpu`` and ``rank0_only`` which can be configured to offload
the full ``state_dict`` to CPU and to materialize the ``state_dict`` on
rank 0 only. When used, it is recommended to enable both of these flags
together to optimize memory savings when taking checkpoints. Note that
this config class is meant for user via the :func:`state_dict_type`
context manager as follows:
>>> # xdoctest: +SKIP("undefined variables")
>>> fsdp = FSDP(model, auto_wrap_policy=...)
>>> cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
>>> with FullyShardedDataParallel.state_dict_type(fsdp, StateDictType.FULL_STATE_DICT, cfg):
>>> state = fsdp.state_dict()
>>> # state will be empty on non rank 0 and contain CPU tensors on rank 0.
>>> # To reload checkpoint for inference, finetuning, transfer learning, etc:
>>> model = model_fn() # Initialize model on CPU in preparation for wrapping with FSDP
>>> if dist.get_rank() == 0:
>>> # Load checkpoint only on rank 0 to avoid memory redundancy
>>> state_dict = torch.load("my_checkpoint.pt")
>>> model.load_state_dict(state_dict)
>>> # All ranks initialize FSDP module as usual. ``sync_module_states`` argument
>>> # communicates loaded checkpoint states from rank 0 to rest of the world.
>>> fsdp = FSDP(model, device_id=torch.cuda.current_device(), auto_wrap_policy=..., sync_module_states=True)
>>> # After this point, all ranks have FSDP model with loaded checkpoint.
"""
offload_to_cpu: bool = False
rank0_only: bool = False
@dataclass
class LocalStateDictConfig(StateDictConfig):
pass
@dataclass
class ShardedStateDictConfig(StateDictConfig):
pass
_state_dict_type_to_config = {
StateDictType.FULL_STATE_DICT: FullStateDictConfig,
StateDictType.LOCAL_STATE_DICT: LocalStateDictConfig,
StateDictType.SHARDED_STATE_DICT: ShardedStateDictConfig,
}
class OptimStateKeyType(Enum):
PARAM_NAME = auto()
PARAM_ID = auto()
class _ExecOrderWarnStatus(Enum):
"""Used internally for execution order validation."""
NONE = auto() # no deviation yet
WARNING = auto() # deviated this iteration; currently issuing warnings
WARNED = auto() # deviated in a previous iteration
class _ExecOrderData():
"""
This contains the data used for validating execution order across ranks.
Attributes:
_all_flat_params (List[FlatParameter]): A :class:`list` of all
flattened parameters contained in the FSDP module hierarchy with
the list index implicitly giving a unique parameter index.
_param_to_unflat_param_names (Dict[FlatParameter, List[str]]): A
mapping from flattened parameter to the comprising unflattened
parameters' names.
is_first_iter (bool): Whether executing in the first iteration or not.
param_order (List[int]): Order that parameters participate in the
forward pass; constructed on the first iteration and validated
against in subsequent iterations.
index (int): Index tracking the position in ``param_order``
when validating the forward pass execution order in subsequent
iterations.
warn_status (_ExecOrderWarnStatus): To avoid flooding the console, we
only issue warnings throughout the first deviating iteration and no
longer check thereafter; this tracks the warning status.
"""
def __init__(self) -> None:
self._all_flat_params: List[FlatParameter] = []
self._param_to_unflat_param_names: Dict[FlatParameter, List[str]] = []
# Modified in the first iteration:
self.is_first_iter: bool = True
self.param_order: List[int] = []
# Modified in the subsequent iterations:
self.index: int = 0
self.warn_status: _ExecOrderWarnStatus = _ExecOrderWarnStatus.NONE
def init(self, root_module: "FullyShardedDataParallel"):
assert root_module._is_root, "This data structure should only be " \
"initialized on an FSDP root module"
# Save all `FlatParameter`s in `root_module`'s hierarchy to
# `_all_flat_params` instead of re-materializing each time to avoid the
# result depending on the calling context (e.g. when some parameters
# have been rebuilt)
self._all_flat_params = [
param for param in root_module.parameters()
if isinstance(param, FlatParameter)
]
self._param_to_unflat_param_names = cast(
Dict[FlatParameter, List[str]],
_get_param_to_unflat_param_names(root_module)
)
def get_param_index(self, param: FlatParameter) -> int:
"""Returns a unique non-negative parameter index for ``param`` if it is
valid or -1 otherwise. Critically, this index assignment must be the
same across ranks."""
assert isinstance(param, FlatParameter), \
f"Expects `param` is a `FlatParameter` but got {type(param)}"
for i, p in enumerate(self._all_flat_params):
if p is param:
return i
return -1
def get_param(self, param_index: int) -> Optional[FlatParameter]:
"""Returns the parameter corresponding to ``param_index`` or ``None``
if the index is invalid."""
for i, p in enumerate(self._all_flat_params):
if i == param_index:
return p
return None
def get_unflat_param_names(self, param_index: int) -> List[str]:
"""Returns a :class:`list` of unflattened parameter names comprising
the flattened parameter with index ``param_index`` or an empty
:class:`list` if ``param_index`` is invalid."""
param = self.get_param(param_index)
if param is None:
return []
assert param in self._param_to_unflat_param_names, \
"Internal data structures out of sync; check `init()`"
return self._param_to_unflat_param_names[param]
def reset(self):
"""Called in :meth:`_wait_for_post_backward` to reset data for the next
iteration."""
self.is_first_iter = False
self.index = 0
# `reset()` marks the end of an iteration, so transition if needed
if self.warn_status == _ExecOrderWarnStatus.WARNING:
self.warn_status = _ExecOrderWarnStatus.WARNED
class FullyShardedDataParallel(nn.Module):
"""
A wrapper for sharding Module parameters across data parallel workers. This
is inspired by `Xu et al.`_ as well as the ZeRO Stage 3 from DeepSpeed_.
FullyShardedDataParallel is commonly shortened to FSDP.
.. _`Xu et al.`: https://arxiv.org/abs/2004.13336
.. _DeepSpeed: https://www.deepspeed.ai/
Example::
>>> # xdoctest: +SKIP("undefined variables")
>>> import torch
>>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
>>> torch.cuda.set_device(device_id)
>>> sharded_module = FSDP(my_module)
>>> optim = torch.optim.Adam(sharded_module.parameters(), lr=0.0001)
>>> x = sharded_module(x, y=3, z=torch.Tensor([1]))
>>> loss = x.sum()
>>> loss.backward()
>>> optim.step()
.. warning::
The optimizer must be initialized *after* the module has been wrapped,
since FSDP will shard parameters in-place and this will break any
previously initialized optimizers.
.. warning::
If the destination CUDA device has ID ``dev_id``, either (1)
``module`` should already be placed on that device, (2) the device
should be set using ``torch.cuda.set_device(dev_id)``, or (3)
``dev_id`` should be passed into the ``device_id`` constructor
argument. This FSDP instance's compute device will be that destination
device. For (1) and (3), the FSDP initialization always occurs on GPU.
For (2), the FSDP initialization happens on ``module`` 's current
device, which may be CPU.
.. warning::
FSDP currently does not support gradient accumulation outside
``no_sync()`` when using CPU offloading. Trying to do so yields
incorrect results since FSDP will use the newly-reduced gradient
instead of accumulating with any existing gradient.
.. warning::
Changing the original parameter variable names after construction will
lead to undefined behavior.
.. warning::
Passing in `sync_module_states=True` flag requires module to be put
on GPU, or to use ``device_id`` argument to specify a CUDA device that
FSDP will move module to. This is because ``sync_module_states=True``
requires GPU communication.
.. warning::
As of PyTorch 1.12, FSDP only offers limited support for shared parameters
(for example, setting one ``Linear`` layer's weight to another's). In
particular, modules that share parameters must be wrapped as part of the
same FSDP unit. If enhanced shared parameter support is needed for your
use case, please ping https://github.com/pytorch/pytorch/issues/77724
.. note::
Inputs into FSDP ``forward`` function will be moved to compute device
(same device FSDP module is on) before running ``forward``, so user does
not have to manually move inputs from CPU -> GPU.
Args:
module (nn.Module):
module to be wrapped with FSDP.
process_group (Optional[ProcessGroup]):
process group for sharding
sharding_strategy (Optional[ShardingStrategy]):
Config sharding algorithm, different sharding algorithm has trade
off between memory saving and communication overhead. ``FULL_SHARD``
will be chosen if sharding_strategy is not specified.
cpu_offload (Optional[CPUOffload]):
CPU offloading config. Currently, only parameter and gradient CPU
offload is supported. It can be enabled via passing in
``cpu_offload=CPUOffload(offload_params=True)``. Note that this
currently implicitly enables gradient offloading to CPU in order for
params and grads to be on same device to work with optimizer. This
API is subject to change. Default is ``None`` in which case there
will be no offloading.
auto_wrap_policy (Optional[Callable[[nn.Module, bool, int], bool]]):
A callable specifying a policy to recursively wrap layers with FSDP.
Note that this policy currently will only apply to child modules of
the passed in module. The remainder modules are always wrapped in
the returned FSDP root instance.
``size_based_auto_wrap_policy`` written in ``torch.distributed.fsdp.wrap`` is
an example of ``auto_wrap_policy`` callable, this policy wraps layers
with the number of parameters larger than 100M. ``transformer_auto_wrap_policy``
written in ``torch.distributed.fsdp.wrap`` is an example of ``auto_wrap_policy``
callable for transformer-like model architectures. Users can supply the customized
``auto_wrap_policy`` callable that should accept following arguments:
``module: nn.Module``, ``recurse: bool``, ``unwrapped_params: int``, and return
a ``bool`` specifying whether the passed in ``module``` should be wrapped
(if ``recurse=False``) or whether we should recurse down the subgraph of ``module``
children (if ``recurse=True``). Extra customized arguments could be added to
the customized ``auto_wrap_policy`` callable as well. It is a good practice to
print out the sharded model and check whether the sharded model is what
the application wants and then adjust accordingly.
Example::
>>> def custom_auto_wrap_policy(
>>> module: nn.Module,
>>> recurse: bool,
>>> unwrapped_params: int,
>>> # These are customizable for this policy function.
>>> min_num_params: int = int(1e8),
>>> ) -> bool:
>>> return unwrapped_params >= min_num_params
>>> # Configure a custom min_num_params
>>> my_auto_wrap_policy = functools.partial(custom_auto_wrap_policy, min_num_params=1e5)
backward_prefetch (Optional[BackwardPrefetch]):
This is an experimental feature that is subject to change in the
the near future. It allows users to enable two different backward_prefetch
algorithms to help backward communication and computation overlapping.
Pros and cons of each algorithm is explained in the class ``BackwardPrefetch``.
mixed_precision (Optional[MixedPrecision]): A ``MixedPrecision`` instance
describing the mixed precision training config to be used. ``MixedPrecision``
supports configuring parameter, buffer, and gradient communication dtype. Note
that only floating point data is cast to the reduced precision. This allows
users potential memory saving and training speedup while trading off
accuracy during model training. If ``None``, no mixed precision is applied.
Note that if ``mixed_precision`` is enabled for FSDP model that
contains ``BatchNorm`` with ``auto_wrap_policy``, FSDP will take
care to disable mixed precision for ``BatchNorm`` units by wrapping
them separately in their own FSDP unit with ``mixed_precision=None``.
This is done because several ``BatchNorm`` kernels do not implement
reduced type support at the moment. If individually wrapping the model,
users must take care to set ``mixed_precision=None`` for
``BatchNorm`` units.
(Default: ``None``)
ignored_modules (Optional[Iterable[torch.nn.Module]]): Modules whose
own parameters and child modules' parameters and buffers are
ignored by this instance. None of the modules directly in
``ignored_modules`` should be :class:`FullyShardedDataParallel`
instances, and any child modules that are already-constructed
:class:`FullyShardedDataParallel` instances will not be ignored if
they are nested under this instance. This argument may be used to
avoid sharding specific parameters at module granularity when using an
``auto_wrap_policy`` or if parameters' sharding is not managed by
FSDP. (Default: ``None``)
param_init_fn (Optional[Callable[[nn.Module], None]]):
A ``Callable[torch.nn.Module] -> None`` that
specifies how modules that are currently on the meta device should be initialized
onto an actual device. Note that as of v1.12, we detect modules on the meta
device via ``is_meta`` check and apply a default initialization that calls
``reset_parameters`` method on the passed in ``nn.Module`` if ``param_init_fn``
is not specified, otherwise we run ``param_init_fn`` to initialize the passed
in ``nn.Module``. In particular, this means that if ``is_meta=True`` for any
module parameters for modules that will be wrapped with FSDP and ``param_init_fn``
is not specified, we assume your module properly implements a ``reset_paramters()``
and will throw errors if not. Note that additionally, we offer support for modules
initialized with torchdistX's (https://github.com/pytorch/torchdistX)
``deferred_init`` API. In this case, deferred modules would be initialized
by a default initialization function that calls torchdistX's
``materialize_module``, or the passed in ``param_init_fn``, if it is not
``None``. The same ``Callable`` is applied to initialize all meta modules.
Note that this initialization function is applied before doing any FSDP sharding
logic.
Example::
>>> # xdoctest: +SKIP("undefined variables")
>>> module = MyModule(device="meta")
>>> def my_init_fn(module):
>>> # responsible for initializing a module, such as with reset_parameters
>>> ...
>>> fsdp_model = FSDP(module, param_init_fn=my_init_fn, auto_wrap_policy=size_based_auto_wrap_policy)
>>> print(next(fsdp_model.parameters()).device) # current CUDA device
>>> # With torchdistX
>>> module = deferred_init.deferred_init(MyModule, device="cuda")
>>> # Will initialize via deferred_init.materialize_module().
>>> fsdp_model = FSDP(module, auto_wrap_policy=size_based_auto_wrap_policy)
device_id (Optional[Union[int, torch.device]]): An ``int`` or ``torch.device``
describing the CUDA device the FSDP module should be moved to determining where
initialization such as sharding takes place. If this argument is not specified
and ``module`` is on CPU, we will move ``module`` to current CUDA device for faster
initialization and move ``module`` back to CPU before returning.
If specified, resulting FSDP instances will reside on this device.
Note that if ``device_id`` is specified but ``module`` is already
on a different CUDA device, an error will be thrown. (Default: ``None``)
sync_module_states (bool): If ``True``, each individually wrapped FSDP unit will broadcast
module parameters from rank 0 to ensure they are the same across all ranks after
initialization. This helps ensure model parameters are the same across ranks
before starting training, but adds communication overhead to ``__init__``, as at least
one broadcast is triggered per individually wrapped FSDP unit.
This can also help load checkpoints taken by ``state_dict`` and to be loaded by
``load_state_dict`` in a memory efficient way. See documentation for
:class:`FullStateDictConfig` for an example of this. (Default: ``False``)
"""
def __init__(
self,
module: nn.Module,
process_group: Optional[ProcessGroup] = None,
sharding_strategy: Optional[ShardingStrategy] = None,
cpu_offload: Optional[CPUOffload] = None,
auto_wrap_policy: Optional[Callable] = None,
backward_prefetch: Optional[BackwardPrefetch] = None,
mixed_precision: Optional[MixedPrecision] = None,
ignored_modules: Optional[Iterable[torch.nn.Module]] = None,
param_init_fn: Optional[Callable[[nn.Module], None]] = None,
device_id: Optional[Union[int, torch.device]] = None,
sync_module_states: bool = False,
forward_prefetch: bool = False,
):
if isinstance(auto_wrap_policy, ParamExecOrderWrapPolicy):
self._init_param_exec_order_wrap_policy(
module=module,
process_group=process_group,
sharding_strategy=sharding_strategy,
cpu_offload=cpu_offload,
auto_wrap_policy=auto_wrap_policy,
backward_prefetch=backward_prefetch,
mixed_precision=mixed_precision,
ignored_modules=ignored_modules,
param_init_fn=param_init_fn,
device_id=device_id,
sync_module_states=sync_module_states,
)
return
torch._C._log_api_usage_once("torch.distributed.fsdp")
super().__init__()
self._handles: List[FlatParamHandle] = []
# Validate the ignored modules and derive the ignored parameters/buffers
ignored_modules = self._get_ignored_modules(module, ignored_modules)
self._ignored_modules = ignored_modules
ignored_params, ignored_param_names = \
self._get_ignored_params(module, ignored_modules)
buffer_names = self._get_buffer_names(module)
# Compute the names to ignore for full state dict cloning (i.e. those
# of the ignored modules' parameters and of all modules' buffers)
self._ignored_param_names = ignored_param_names
self._buffer_names = buffer_names
# NOTE: Since the names are computed at construction time, if the user
# changes them later, then FSDP will not properly ignore them. However,
# the `FlatParameter` implementation already relies on this assumption.
# We do this at construction time since we want the fully prefixed
# parameter names matching the keys in the model state dict (namely,
# including the wrapped module's name in the prefix), which may be done
# most non-intrusively here before flattening.
# if auto_wrap_policy is specified, submodules should not be
# already wrapped, otherwise we'd attempt to double wrap them resulting
# in errors.
if auto_wrap_policy is not None:
self._check_wrapped(
module,
check_fn=lambda mod: not isinstance(mod, FullyShardedDataParallel),
err_fn=lambda mod: f"Expected {mod} to NOT be FullyShardedDataParallel if auto_wrap is enabled.",
)
if mixed_precision is not None and _contains_batchnorm(module):
_override_batchnorm_mixed_precision(module)
policy_to_use = functools.partial(
_or_policy,
policies=[_wrap_batchnorm_individually, auto_wrap_policy]
)
warnings.warn(
"Mixed precision was specified for FSDP module with"
" batchnorm submodules wrapped via ``auto_wrap_policy``."
" BatchNorm units will be wrapped as a separate FSDP unit,"
" with mixed_precision disabled (i.e. set to ``None``)"
" as several BatchNorm kernels would raise errors when"
" operating on reduced precision inputs."
)
else:
policy_to_use = auto_wrap_policy
_recursive_wrap(
module,
auto_wrap_policy=policy_to_use,
wrapper_cls=FullyShardedDataParallel,
ignored_modules=ignored_modules,
ignored_params=ignored_params,
# Note that we have the recursive_wrap skip wrapping for
# the outermost (this) module otherwise it will result in a
# double-wrap causing issues.
only_wrap_children=True,
# FSDP arguments follow.
process_group=process_group,
sharding_strategy=sharding_strategy,
cpu_offload=cpu_offload,
backward_prefetch=backward_prefetch,
forward_prefetch=forward_prefetch,
mixed_precision=mixed_precision,
param_init_fn=param_init_fn,
device_id=device_id,
sync_module_states=sync_module_states,
)
self.process_group = process_group or _get_default_group()
self.rank = self.process_group.rank()
self.world_size = self.process_group.size()
if device_id is not None:
self.device_id = (
device_id if isinstance(device_id, torch.device)
else torch.device(device_id)
)
# If user passed in something like torch.device("cuda"),
# device index of current device is unclear, make it explicit.
if self.device_id == torch.device("cuda"):
warnings.warn(
f"Passed in {self.device_id} does not have explicit index, "
f"setting it to current index: {torch.cuda.current_device()}. "
"If this is not correct, please explicitly call torch.cuda.set_device()"
"before FSDP initialization or pass in explicit device index as device_id argument."
)
self.device_id = torch.device("cuda", torch.cuda.current_device())
else:
self.device_id = None
is_meta_module = any(p.is_meta for p in module.parameters())
is_torchdistX_deferred_init = (
not is_meta_module and _TORCHDISTX_AVAIL
and any(fake.is_fake(p) for p in module.parameters())
)
def _run_param_init_fn():
# Call user-specified initialization function.
if not callable(param_init_fn):
raise ValueError(
f"Expected {param_init_fn} to be callable, but got {type(param_init_fn)}"
)
param_init_fn(module)
if is_meta_module:
if param_init_fn is not None:
_run_param_init_fn()
else:
# Call default initialization function that is dependent on
# reset_parameters.
_default_meta_device_init_fn(module)
elif is_torchdistX_deferred_init:
assert _TORCHDISTX_AVAIL, "Got torchdistX initialized module but torchdistX lib is not available."
if param_init_fn is not None:
_run_param_init_fn()
else:
# Call default torchdistX initialization function. Omit re-initialization of FSDP submodules
# which is unnecessary.
check_fn = lambda k: not isinstance(k, FullyShardedDataParallel) # noqa: E731
deferred_init.materialize_module(module, check_fn=check_fn)
# Check that module was placed onto a single device.
module_devices = set(
p.device for p in module.parameters() if p not in ignored_params and not isinstance(p, FlatParameter)
)
if len(module_devices) > 1:
raise RuntimeError(
f"FSDP only supports single device modules, but got params on {module_devices}"
)
# Move module appropriately depending on device_id and whether module is on CPU.
self._move_module_if_needed(module)
# device for computation, if module is on GPU, use module.device;
# if module is on CPU, use current device;
self.compute_device = _get_default_cuda_device(module)
# if device_id is specified, ensure it is the same
assert (
self.device_id is None or self.compute_device == self.device_id
), f"Inconsistent compute_device and device_id: {self.compute_device} vs {self.device_id}"
# Enum to indicate if we're in the forward/backward pass, idle, etc.
self.training_state = TrainingState_.IDLE
# setting two factors to avoid underflow and overflow
self.gradient_predivide_factor: float = self._get_gradient_predivide_factor(
self.world_size
)
self.gradient_postdivide_factor: float = (
self.world_size / self.gradient_predivide_factor
)
self.cpu_offload = cpu_offload or CPUOffload()
self.backward_prefetch = backward_prefetch
self.forward_prefetch = forward_prefetch
self.sharding_strategy = sharding_strategy or ShardingStrategy.FULL_SHARD
self.mixed_precision = mixed_precision
# Original buffer type (mapping since all buffers may not be of same type). In
# the case of mixed precision training, this is used to restore buffers
# to their original type (which may not be the same as that of the
# parameters in the model) when checkpointing.
self._orig_buffer_dtypes: Dict[str, torch.dtype] = {}
# Only handle params which are not already sharded. This enables
# sharding individual layers of a Module, with an outer wrapper to
# shard any leftover parameters.
params = [
p for p in module.parameters()
if p not in ignored_params and not isinstance(p, FlatParameter)
]
if sync_module_states:
if params != [] and params[0].device == torch.device("cpu"):
raise ValueError(
"Module has CPU parameters, but sync_module_states=True is specified."
"This only works for GPU module, please specify `device_id` argument or move"
" module to GPU before init."
)
# Collect buffers we have to synchronize, avoiding buffers that have already
# been synchronized to avoid redundant synchronization.
bufs_to_sync = []
for buf in module.buffers():
if not getattr(buf, '_fsdp_has_been_sync', False):
buf._fsdp_has_been_sync = True
bufs_to_sync.append(buf.detach())
states_to_sync = [param.detach() for param in params]
states_to_sync.extend(bufs_to_sync)
_sync_params_and_buffers(
process_group=self.process_group,
module_states=states_to_sync,
# Same bucket size as DDP
broadcast_bucket_size=_PARAM_BROADCAST_BUCKET_SIZE,
src=0,
)
self._fsdp_wrapped_module = FlattenParamsWrapper(module, params)
assert getattr(self, FSDP_WRAPPED_MODULE) is self._fsdp_wrapped_module
self.params = []
if self._fsdp_wrapped_module.has_params:
self.params.append(self._fsdp_wrapped_module.flat_param)
self._register_param_handle(self._fsdp_wrapped_module.handle)
# Shard module parameters in place
self._shard_parameters()
# Check that the sharding logic was applied to all parameters by
# checking that the original module parameters have been replaced by
# `Tensor` views and are no longer `nn.Parameter`s
for n, p in self.named_parameters():
if p not in ignored_params and not isinstance(p, FlatParameter):
raise RuntimeError(
f"found unflattened parameter: {n} ; {p.size()} {p.__class__}"
)
self._reset_lazy_init()
# Flag indicating if we require gradient reduction in the backward
# pass (set to `False` in the `no_sync()` context manager)
self._require_backward_grad_sync: bool = True
self._state_dict_type = StateDictType.FULL_STATE_DICT
self._state_dict_config = FullStateDictConfig()
# FSDP currently provides three different state_dicts. The actual
# state_dict that will be saved/loaded is decided by
# self._state_dict_type. And the main logic of each state_dict is
# implemented in the hook. Therefore, for each hook (post-save and
# pre-load), there is a dispatcher dictionary to dispatch the execution
# flow to the correct implementation.
self._register_state_dict_hook(self._post_state_dict_hook)
self._post_state_dict_hook_fn = {
StateDictType.FULL_STATE_DICT: self._full_post_state_dict_hook,
StateDictType.LOCAL_STATE_DICT: self._local_post_state_dict_hook,
StateDictType.SHARDED_STATE_DICT: self._sharded_post_state_dict_hook,
}
self._register_load_state_dict_pre_hook(
self._pre_load_state_dict_hook, with_module=True
)
self._pre_load_state_dict_hook_fn = {
StateDictType.FULL_STATE_DICT: self._full_pre_load_state_dict_hook,
StateDictType.LOCAL_STATE_DICT: self._local_pre_load_state_dict_hook,
StateDictType.SHARDED_STATE_DICT: self._sharded_pre_load_state_dict_hook,
}
self.register_load_state_dict_post_hook(
self._post_load_state_dict_hook
)
self._post_load_state_dict_hook_fn = {
StateDictType.FULL_STATE_DICT: self._full_post_load_state_dict_hook,
StateDictType.LOCAL_STATE_DICT: self._local_post_load_state_dict_hook,
StateDictType.SHARDED_STATE_DICT: self._sharded_post_load_state_dict_hook,
}
# Flag to guard against preparing gradients multiple times per backward pass.
self._pre_backward_hook_has_run = False
# Used for prefetching all gather full params in post backward hook
self._need_rebuild_full_params = False
# If specified, offload parameter shard to CPU.
if self.cpu_offload.offload_params:
for p in self.params:
self._offload_to_cpu(p)
# For validating execution order across ranks
self._exec_order_data = _ExecOrderData()
# setting communication hook to a default
self.communication_hook = self._get_default_comm_hook()
self.communication_hook_state = self._get_default_comm_hook_state()
self._hook_registered = False
def _init_param_exec_order_wrap_policy(self, *args, **kwargs) -> None:
auto_wrap_policy = kwargs["auto_wrap_policy"]
module = kwargs["module"]
assert hasattr(auto_wrap_policy, "tracing_config")
if not _TORCH_FX_AVAIL:
assert (
auto_wrap_policy.tracing_config is None
), "tracing_config should be None when torch.fx is not enabled"
elif isinstance(
auto_wrap_policy.tracing_config,
TracingConfig
):
tracer = auto_wrap_policy.tracing_config.tracer
execution_info = _init_execution_info(module)
for m in module.modules():
assert not isinstance(
m, FullyShardedDataParallel
), "The input module of _patch_tracer should not contain FSDP modules"
with _patch_tracer(
tracer=tracer,
root_module=module,
execution_info=execution_info,
):
try:
tracer.trace(module, auto_wrap_policy.tracing_config.concrete_args)
except BaseException as e:
raise RuntimeError(
"tracer.trace failed inside _init_param_exec_order_wrap_policy"
f" with the error: {e}."
)
else:
assert (
auto_wrap_policy.tracing_config is None
), "tracing_config should either be an instance of TracingConfig or be None"
# The initial FSDP wrapping is done with auto_wrap_policy.init_policy
kwargs["auto_wrap_policy"] = auto_wrap_policy.init_policy
self.__init__(*args, **kwargs)
self._param_exec_order_policy: bool = True
# self._param_exec_order_prep_stage is set to True before we get the execution order
self._param_exec_order_prep_stage: bool = True
# A list that stores the flatten parameters and its name based on the parameter execution order
self._fsdp_params_exec_order: List[FlatParameter] = []
if _TORCH_FX_AVAIL and isinstance(
auto_wrap_policy.tracing_config,
TracingConfig
):
# Initialize a dict that maps each module to its parent FSDP wrap
module_to_fsdp: Dict[nn.Module, FullyShardedDataParallel] = dict()
for wrap in self.fsdp_modules(self):
module_to_fsdp[wrap.module] = wrap
# Set self._fsdp_params_exec_order based on execution_info.module_forward_order.
# TODO (linjianma): self._fsdp_params_exec_order will be set based on
# the parameter execution order rather than module_forward_order,
# once the non-recursive wrapping policy is fully implemented.
for m in execution_info.module_forward_order:
if m in module_to_fsdp:
for flat_param in module_to_fsdp[m].params:
self._fsdp_params_exec_order.append(flat_param)
self._param_exec_order_prep_stage = False
for m in self.modules():
if m is not self and isinstance(m, FullyShardedDataParallel):
# Assignment by reference, so each children FSDP wrap has access to
# the _fsdp_params_exec_order of the root module
m._fsdp_params_exec_order = self._fsdp_params_exec_order
m._param_exec_order_policy = self._param_exec_order_policy
m._param_exec_order_prep_stage = self._param_exec_order_prep_stage
def _move_module_if_needed(self, module) -> None:
"""
Moves module if module is on CPU and device_id is specified.
If device_id is not specified and module is on CPU, we log a
warning to user mentioning to use ``device_id`` argument to speed
up initialization performance.
"""
# Move module to device specified. Note that this is done prior to
# setting compute_device to ensure that they align.
if self.device_id is not None:
param = None
try:
# Get the next unflat param
param_gen = module.parameters()
while True:
param = next(param_gen)
if not isinstance(param, FlatParameter):
break
if param.device == torch.device("cpu"):
module = module.to(self.device_id)
except StopIteration:
# this FSDP instance manages no parameters.
pass
# For GPU modules, module device should match device_id.
if (
param is not None
and not isinstance(param, FlatParameter)
and param.device != self.device_id
):
raise RuntimeError(
f"Module on rank {self.rank} is given device_id argument "
f"{self.device_id}, but is on {param.device}. "
" Either move module before FSDP init or omit device_id argument."
)
else:
# device_id argument is not specified
# If module is on CPU, log a warning asking user to use `device_id` for faster
# GPU init.
try:
# Get the next unflat param
param_gen = module.parameters()
while True:
param = next(param_gen)
if not isinstance(param, FlatParameter):
break
if param.device == torch.device("cpu"):
warnings.warn(
"Module is put on CPU and will thus have flattening and sharding"
" run on CPU, which is less efficient than on GPU. We recommend passing in "
"`device_id` argument which will enable FSDP to put module on GPU device,"
" module must also be on GPU device to work with `sync_module_states=True` flag"
" which requires GPU communication."
)
except StopIteration:
# this FSDP instance manages no parameters
pass
def _init_reshard_after_forward(self):
if self.sharding_strategy == ShardingStrategy.FULL_SHARD:
# Free full params and keep shard only after forward
self.reshard_after_forward = True
elif self.sharding_strategy == ShardingStrategy.SHARD_GRAD_OP:
# Keep full params in the GPU memory until backward
# computation is done
self.reshard_after_forward = False
elif self.sharding_strategy == ShardingStrategy.NO_SHARD:
# self.reshard_after_forward is not used when NO_SHARD
# is set, just setting it as False here
self.reshard_after_forward = False
else:
raise RuntimeError(
"sharding_strategy only supports FULL_SHARD, SHARD_GRAD_OP and NO_SHARD right now."
)
def _get_ignored_modules(
self,
root_module: torch.nn.Module,
_ignored_modules: Any,
) -> Set[torch.nn.Module]:
"""
Checks that ``_ignored_modules`` (1) is an iterable of
``torch.nn.Module`` s without any :class:`FullyShardedDataParallel`
instances and does not contain the top-level ``module`` itself, and
then returns them and their children as a :class:`set`, excluding
nested :class:`FullyShardedDataParallel` instances.
We include the child modules of modules in ``_ignored_modules`` to be
more intuitive since ignoring a module should ignore its child modules
as well, and we exclude :class:`FullyShardedDataParallel` instances
since ``self`` may be the intended root instance that manages them.
"""
if _ignored_modules is None:
return set()
msg_prefix = "`ignored_modules` should be an iterable of " \
"`torch.nn.Module`s "
try:
ignored_root_modules = set(_ignored_modules)
except TypeError:
raise TypeError(msg_prefix + f"but got {type(_ignored_modules)}")
for module in ignored_root_modules:
if not isinstance(module, torch.nn.Module):
raise TypeError(
msg_prefix + f"but got an iterable with {type(module)}"
)
if isinstance(module, FullyShardedDataParallel):
raise ValueError(
"`ignored_modules` should not include FSDP modules"
)
# Include child modules and exclude nested FSDP modules
ignored_modules = set(
child for module in ignored_root_modules
for child in module.modules()
if not isinstance(child, FullyShardedDataParallel) and
not isinstance(child, FlattenParamsWrapper)
)
if root_module in ignored_modules:
warnings.warn(
"Trying to ignore the top-level module passed into the FSDP "
"constructor itself will result in all parameters being "
f"ignored and is not supported: {module}"
)
for submodule in root_module.modules():
if isinstance(submodule, FullyShardedDataParallel):
assert hasattr(submodule, "_ignored_modules")
ignored_modules.update(submodule._ignored_modules)
return ignored_modules
def _get_ignored_params(
self,
root_module: torch.nn.Module,
ignored_modules: Set[torch.nn.Module],
) -> Tuple[Set[torch.nn.Parameter], Set[str]]:
"""
Returns the parameters of the modules in ``ignored_modules``,
excluding any :class:`FlatParameter` s and their fully prefixed names,
both as :class:`set` s.
Args:
root_module (torch.nn.Module): Top-level module passed into the
FSDP constructor from which to derive the fully prefixed names.
ignored_modules (Set[torch.nn.Module]): Modules to ignore.
"""
ignored_params = set(
p for m in ignored_modules for p in m.parameters()
if not isinstance(p, FlatParameter)
)
param_to_unflat_param_names = _get_param_to_unflat_param_names(
root_module, dedup_shared_params=False,
)
ignored_param_names = set()
for param in ignored_params:
unflat_param_names = param_to_unflat_param_names[param]
clean_names = []
for k in unflat_param_names:
clean_names.append(clean_tensor_name(k))
ignored_param_names.update(clean_names)
return ignored_params, ignored_param_names
def _get_buffer_names(self, root_module: torch.nn.Module) -> Set[str]:
"""
Returns the fully prefixed names of all buffers in the module hierarchy
rooted at ``root_module`` as a class:`set`.
Args:
root_module (torch.nn.Module): Top-level module passed into the
FSDP constructor from which to derive the fully prefixed names.
"""
def module_fn(module, prefix, buffer_names):
# For FSDP modules, only add the entry when considering the
# contained `FlattenParamsWrapper` to avoid duplication
if not isinstance(module, FullyShardedDataParallel):
for buffer_name, _ in module.named_buffers(recurse=False):
prefixed_buffer_name = clean_tensor_name(prefix + buffer_name)
buffer_names.add(prefixed_buffer_name)
def return_fn(buffer_names, *args):
return buffer_names
buffer_names: Set[str] = set()
return _apply_to_modules(
root_module, module_fn, return_fn, buffer_names,
)
@classmethod
def _check_wrapped(cls, begin_module, check_fn, err_fn):
for _, mod in begin_module.named_modules():
if not check_fn(mod):
raise ValueError(err_fn(mod))
def _register_param_handle(self, handle: FlatParamHandle) -> None:
"""Registers the parameter handle to this FSDP instance."""
if handle not in self._handles:
self._handles.append(handle)
@property
def module(self) -> nn.Module:
"""Make model.module accessible, just like DDP. Return the
underlying module without the flatten_params_wrapper
"""
assert isinstance(self._fsdp_wrapped_module, FlattenParamsWrapper)
return self._fsdp_wrapped_module.module
def check_is_root(self) -> bool:
self._lazy_init()
assert self._is_root is not None
return self._is_root
def _use_param_exec_order_policy(self) -> bool:
return (
hasattr(self, "_param_exec_order_policy")
and self._param_exec_order_policy
)
def _is_param_exec_order_prep_stage(self) -> bool:
is_prep_stage = (
hasattr(self, "_param_exec_order_prep_stage")
and self._param_exec_order_prep_stage
)
if not is_prep_stage:
for p in self.parameters():
assert (
not hasattr(p, "_params_exec_order_hook_handle")
), "When not in execution order prep stage, all _params_exec_order_hook_handle should be removed."
return is_prep_stage
@staticmethod
def fsdp_modules(
module: nn.Module,
root_only: bool = False,
) -> List["FullyShardedDataParallel"]:
"""
Returns all nested FSDP instances, possibly including ``module`` itself
and only including FSDP root modules if ``root_only=True``.
Args:
module (torch.nn.Module): Root module, which may or may not be an
``FSDP`` module.
root_only (bool): Whether to return only FSDP root modules.
(Default: ``False``)
Returns:
List[FullyShardedDataParallel]: FSDP modules that are nested in
the input ``module``.
"""
return [
submodule for submodule in module.modules()
if isinstance(submodule, FullyShardedDataParallel) and
(not root_only or submodule.check_is_root())
]
def apply(self, fn: Callable[[nn.Module], None]) -> "FullyShardedDataParallel":
r"""Applies ``fn`` recursively to every submodule (as returned by ``.children()``)
as well as self. Typical use includes initializing the parameters of a model
(see also :ref:`nn-init-doc`).
Compared to ``torch.nn.Module.apply``, this version additionally gathers
the full parameters before applying ``fn``. It should not be called from
within another ``summon_full_params`` context.
Args:
fn (:class:`Module` -> None): function to be applied to each submodule
Returns:
Module: self
"""
uninitialized = self._is_root is None
self._assert_state(TrainingState_.IDLE)
with self._summon_full_params(recurse=False, writeback=True):
ret = super().apply(fn)
# Reset lazy init that might be called by _summon_full_params, since
# it could have set is_root incorrectly for non-root FSDP instances.
if uninitialized and self._is_root:
for module in self.fsdp_modules(self):
module._reset_lazy_init()
return ret
# setting two factors 'self.gradient_predivide_factor'
# and 'self.gradient_postdivide_factor' to avoid underflow and overflow
def _get_gradient_predivide_factor(self, world_size: int) -> float:
factor: int = 1
while world_size % factor == 0 and world_size / factor > factor:
factor *= 2
return float(factor)
def _offload_to_cpu(self, p):
"""
Offloads parameter to CPU from self.compute_device. If the parameter is
already on CPU then this is a noop.
"""
cpu_device = torch.device("cpu")
if p.device == cpu_device:
return
with torch.no_grad():
p.data = p.to(cpu_device)
def _mixed_precision_enabled_for_params(self) -> bool:
"""
Whether user explicitly enabled mixed precision for
parameters or not.
"""
return (
self.mixed_precision is not None
and self.mixed_precision.param_dtype is not None
)
def _mixed_precision_enabled_for_buffers(self) -> bool:
"""
Whether user explicitly enabled mixed precision for
buffers or not.
"""
return (
self.mixed_precision is not None
and self.mixed_precision.buffer_dtype is not None
)
def _mixed_precision_enabled_for_reduce(self) -> bool:
"""
Whether user explicitly enabled mixed precision for
gradient reduction or not.
"""
return (
self.mixed_precision is not None
and self.mixed_precision.reduce_dtype is not None
)
def _low_precision_hook_enabled(self) -> bool:
"""
Wether a low precision hook is registered or not.
"""
return (
self.communication_hook is not None
and self.communication_hook in LOW_PRECISION_HOOKS
)
def _cast_fp_inputs_to_precision(
self, dtype: torch.dtype, *args: Any, **kwargs: Any
) -> Tuple[Any, Any]:
"""
Casts floating point tensors in args and kwargs to precision given by dtype.
requires_grad field is respected.
"""
def cast_fn(x: torch.Tensor) -> torch.Tensor:
if not torch.is_floating_point(x):
return x
y = x.to(dtype)
# Explicitly copy over requires_grad context since this is happening
# within torch.no_grad.
if x.is_leaf:
y.requires_grad = x.requires_grad
return y
with torch.no_grad():
return (
_apply_to_tensors(cast_fn, args),
_apply_to_tensors(cast_fn, kwargs)
)
@torch.no_grad()
def _cast_param_shards_to_dtype(self):
"""
Allocates a mixed precision paramter shard and casts parameter shards to
reduced precision by copying into this mixed precision shard. Note that
if we are CPU offloading, this also implicitly loads the parameter shard
back to GPU.
"""
assert (
self._mixed_precision_enabled_for_params()
), "Expected to only be called when mixed precision for parameters is enabled."
with torch.cuda.stream(self._streams["mixed_precision_params"]):
for p in self.params:
assert p._mp_shard is not None
_alloc_storage(data=p._mp_shard, size=p._local_shard.size())
# Cast is done by copy
p._mp_shard.copy_(
# no-op if not CPU offloading, otherwise nonblocking because
# p._local_shard is pinned in _init_param_attributes.
p._local_shard.to(p._mp_shard.device, non_blocking=True)
)
# Point p to the mp shard
p.data = p._mp_shard
# Block current stream on this copy work.
torch.cuda.current_stream().wait_stream(self._streams["mixed_precision_params"])
@torch.no_grad()
def _free_mp_shard(self, params: List[FlatParameter]):
"""
Deallocate storage for parameter's mixed precision shard.
"""
assert (
self._mixed_precision_enabled_for_params()
), "Expected to only be called when mixed precision for parameters is enabled."
current_stream = torch.cuda.current_stream()
for p in params:
# mp_shard should always be allocated.
assert p._mp_shard is not None
# Shard is allocated in "mixed_precision_stream" and then we block
# current stream on this stream, so don't free it until work in the
# current stream is completed.
p._mp_shard.record_stream(current_stream)
_free_storage(p._mp_shard)
def _cast_buffers(
self,
device: Optional[torch.device] = None,
dtype: Optional[Dict[str, torch.dtype]] = None,
memo: Optional[Set] = None,
recurse: bool = True,
) -> None:
"""Move all buffers to the given *device* and *dtype*.
If *device* is not given, then it will default to
``self.compute_device``, otherwise buffer will be moved to ``device``.
In the case of nested FSDP instances, we will respect the child instance's
``compute_device`` configuration.
If *dtype* is given, it must be a mapping of buffer name to buffer dtype,
and this argument is currently only given to restore back to original
buffer types during checkpoint. If *dtype* is not given, and we are
in mixed precision training, the buffer will be cast to buffer_dtype,
otherwise the buffer will not be cast.
Args:
device (torch.device, Optional):
device to cast buffers to (defaults to compute_device)
dtype: (Dict[str, torch.dtype], Optional):
Mapping of buffer name to their dtype to cast to.
memo (Set, Optional):
set of modules that have already been processed
recurse (bool, Optional):
Whether to call _cast_buffers recursively on nested FSDP
instances (default is True).
"""
if memo is None:
memo = set()
for module in self.modules():
if module is not self and isinstance(module, FullyShardedDataParallel) and recurse:
# Allow any child FSDP instances to handle their own buffers.
module._cast_buffers(device=device, dtype=dtype, memo=memo, recurse=recurse)
elif module not in memo:
memo.add(module)
for name, buf in module.named_buffers(recurse=False):
if buf is None:
continue
buf = buf.to(device=device or self.compute_device)
if name not in self._orig_buffer_dtypes:
self._orig_buffer_dtypes[name] = buf.dtype
# If given, cast buffer to the given dtype. This is used to
# suppport mixed precision for buffers
# (given by self.mixed_precision.buffer_dtype) and also used
# to restore the buffer dtype to the original precision for
# state_dict() calls.
# Note that non-floating point buffers are not casted.
if torch.is_floating_point(buf):
# We are restoring the original buffer type in
# preparation for checkpoint.
if dtype:
buf = buf.to(dtype=dtype[name])
# Note that we don't pass in self.mixed_precision.buffer_dtype
# recursively into _cast_buffers, as we want to respect
# mp config for child FSDP instances.
elif self._mixed_precision_enabled_for_buffers():
buf = buf.to(self.mixed_precision.buffer_dtype)
setattr(module, name, buf)
@torch.no_grad()
def _shard_parameters(self) -> None:
"""
At initialization we wrap a module with full parameters and shard the
parameters in-place. Sharding is implemented by viewing each parameter
as a 1D Tensor and retaining only a single slice, where the slice size
is determined by the number of data parallel workers.
After this initial sharding is complete, the user can initialize a
``torch.optim.Optimizer`` in the usual way, i.e.::
.. code-block:: python
optim = torch.optim.Adam(sharded_module.parameters(), lr=0.0001)
The optimizer will see only a single slice of parameters and will thus
allocate less memory for optimizer state, avoiding redundancy across
data parallel workers.
"""
for handle in self._handles:
p = handle.flat_param
assert not p._is_sharded, "Param should have not been sharded yet."
assert (
p.is_floating_point()
), "Autograd does not support operations for integer type."
# Sharding is done only when world_size is larger than 1 and
# sharding_strategy!=NO_SHARD.
p._is_sharded = ( # type: ignore[attr-defined]
self.world_size > 1
and self.sharding_strategy != ShardingStrategy.NO_SHARD
)
if not p._is_sharded: # type: ignore[attr-defined]
continue
# Save the original storage and free it later on.
# Since we're modifying the tensor's storage directly,
# make sure the tensor is the sole occupant of the storage.
assert (
p.storage_offset() == 0
), "The tensor is not the sole occupant of the storage."
orig_storage = p.storage()
# Replace p with the relevant shard.
local_shard, numel_padded = FlatParamHandle._get_shard(p, self.rank, self.world_size)
p.set_(local_shard) # type: ignore[call-overload]
handle.init_shard_metadata(local_shard.numel(), numel_padded, self.rank)
# Free storage that contains the original full data.
if orig_storage.size() > 0:
orig_storage.resize_(0) # type: ignore[attr-defined]
def __getattr__(self, name: str) -> Any:
"""Forward missing attributes to wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
return getattr(self._fsdp_wrapped_module, name)
def __getitem__(self, key: int) -> Any:
"""Forward indexing calls in case the module is a nn.Sequential."""
return self._fsdp_wrapped_module.__getitem__(key) # type: ignore[operator]
def _reset_lazy_init(self) -> None:
"""
Reset instance so :func:`_lazy_init` will run on the next forward.
"""
self._is_root: Optional[bool] = None
self._streams: Dict[str, torch.cuda.Stream] = {}
self._fsdp_graph_order: List[nn.Module] = []
self._my_fsdp_idx_in_graph: Optional[int] = None
self._pre_backward_hook_full_params_prefetched: bool = False
self._forward_full_params_prefetched: bool = False
for p in self.params:
if hasattr(p, "_local_shard"):
# reset attributes that are added in _init_param_attributes, as
# part of _lazy_init
del p._local_shard # type: ignore[attr-defined]
# set 'self.reshard_after_forward' flag based on self.sharding_strategy
self._init_reshard_after_forward()
def _lazy_init(self) -> None:
"""
Performs initialization lazily, typically right before the first
forward pass. The laziness is needed to ensure that the parameter
device/dtype and the FSDP hierarchy have finalized.
This method's actual logic only runs on the root FSDP instance, which
performs initialization for all non-root FSDP instances to avoid
partial initialization.
"""
if self._is_root is not None:
return # no-op: already initialized
# The following logic is only run on the root FSDP instance
self._is_root = True
self._assert_state(TrainingState_.IDLE)
self._init_streams()
self._cast_buffers(recurse=True)
for param in self.params:
self._init_param_attributes(param)
# Do not reshard the root's parameters at the end of the forward pass
# with the intention that they are immediately used in the backward
# pass gradient computation (though this may not be true)
self.reshard_after_forward = False
self._exec_order_data.init(self)
# Initialize non-root FSDP instances and share attributes from the root
# to non-root instances (e.g. streams for overlapping)
for fsdp_module in self.fsdp_modules(self):
if fsdp_module is not self:
# Relax the assert for non-root FSDP instances in case the
# nested initialized module is wrapped again in FSDP later (e.g.
# after training to run inference)
assert fsdp_module._is_root is None or not fsdp_module._is_root, (
"Non-root FSDP instance's `_is_root` should not have been "
"set yet or should have been set to `False`"
)
fsdp_module._is_root = False
fsdp_module._streams = self._streams
fsdp_module._fsdp_graph_order = self._fsdp_graph_order
fsdp_module._exec_order_data = self._exec_order_data
for param in fsdp_module.params:
fsdp_module._init_param_attributes(param)
@torch.no_grad()
def _init_param_attributes(self, p: FlatParameter) -> None:
"""
We manage several attributes on each Parameter instance. The first is
set by :func:`_shard_parameters`:
``_is_sharded``: ``True`` if the Parameter is sharded or ``False``
if the Parameter is intentionally not sharded (in which case we
will all-reduce grads for this param). Currently the way
`_is_sharded = False` is if world_size = 1 or sharding strategy
is NO_SHARD.
A few attributes are set here:
``_local_shard``: a single shard of the parameter. This is needed to
recover the shard after rebuilding full parameter in forward
and backward.
``_full_param_padded``: the full weight (padded to be evenly
divisible by ``world_size``), used for computation in the
forward and backward pass. It is initialized with the
appropriate size and then has its storage freed. This will be
resized in place and only materialized (via all-gather) as needed.
Another attribute is set by :func:`_register_post_backward_hooks`:
``_shard_bwd_hook``: it holds the parameter's AccumulateGrad object
and the registered post hook handle.
"""
assert hasattr(p, "_is_sharded"), "Parameters should have been sharded during construction."
# If _local_shard has been set in the first lazy init and
# current parameter is pointed to _local_shard, no need to
# set the _local_shard again.
if hasattr(p, "_local_shard"):
# If CPU offloading, p._local_shard should have been placed on CPU
# during its first lazy construction.
if self.cpu_offload.offload_params:
assert p._local_shard.device == torch.device( # type: ignore[attr-defined]
"cpu"
), (
"Expected p._local_shard to be on CPU, " # type: ignore[attr-defined]
f"but it's on {p._local_shard.device}" # type: ignore[attr-defined]
)
return
# A single shard of the parameters. Also makes p._local_shard to be on
# CPU if we are CPU offloading, since p.data would be on CPU during
# init.
if self.cpu_offload.offload_params:
assert p.device == torch.device("cpu"), (
"Expected param to be on CPU when cpu_offloading is enabled. "
"If CPU offloading is enabled correctly, you may be "
"accidentally moving the model to CUDA after FSDP initialization."
)
p._local_shard = p.data # type: ignore[attr-defined]
# If CPU offloading, pin the memory to enable faster CPU -> GPU device
# transfer.
if self.cpu_offload.offload_params:
assert p._local_shard.device == torch.device("cpu") # type: ignore[attr-defined]
p._local_shard.pin_memory() # type: ignore[attr-defined]
# When offloading parameters, also move the grad shard to CPU during
# backward pass. In this case, it's important to pre-allocate the
# CPU grad shard in pinned memory so that we can do a non-blocking
# transfer.
p._cpu_grad = torch.zeros_like( # type: ignore[attr-defined]
p, device=torch.device("cpu")
).pin_memory()
# If mixed_precision, maintain reduced precision param shard on
# compute_device for computation in fwd/bwd. We resize storage to 0 here
# and rematerialize before building the full param when needed. After
# fwd/bwd, it is freed and we only hold on to the full precision shard.
# As a result, this reduced precision shard is not allocated if we are
# not in the forward/backward pass.
if (
self._mixed_precision_enabled_for_params()
):
p._mp_shard = torch.zeros_like(
p._local_shard,
device=self.compute_device,
dtype=self.mixed_precision.param_dtype
)
_free_storage(p._mp_shard)
# We also maintain a full-sized parameter of type self.compute_dtype.
# We resize the storage to size 0 at init (here) and only materialize
# as needed. The storage may contain padding elements so that it is
# evenly divisible by world_size, although these padding elements will
# be removed before the relevant computation.
if p._is_sharded: # type: ignore[attr-defined]
# We set p._full_param_padded's dtype to the desired parameter dtype
# in the case of mixed precision. This is so that when we all_gather
# into full_param_padded it can occur without issues and result in
# full_param_padded having the expected param_dtype.
full_param_dtype = (
p.dtype if not self._mixed_precision_enabled_for_params()
else self.mixed_precision.param_dtype
)
p._full_param_padded = torch.zeros( # type: ignore[attr-defined]
p.numel() * self.world_size,
device=self.compute_device,
dtype=full_param_dtype,
)
_free_storage(p._full_param_padded) # type: ignore[attr-defined]
# Track whether the `FlatParameter`'s post-backward hook has been
# called for validation in `_wait_for_post_backward()`
p._post_backward_called = False
def _init_streams(self) -> None:
"""Initializes CUDA streams for overlapping data transfer and
computation. This should only be called on the root FSDP instance."""
assert self._is_root
if torch.cuda.is_available():
# Stream for all-gathering parameters.
self._streams["all_gather"] = torch.cuda.Stream()
# Stream for overlapping grad reduction with the backward pass.
self._streams["post_backward"] = torch.cuda.Stream()
# Stream to move main params to self.mixed_precision.param_dtype
# for forward pass.
if self._mixed_precision_enabled_for_params():
self._streams["mixed_precision_params"] = torch.cuda.Stream()
def _wait_for_previous_optim_step(self) -> None:
"""
The root :class:`FullyShardedDataParallel` instance needs to
synchronize with the default stream to ensure that the previous
optimizer step is done.
"""
if not torch.cuda.is_available() or not self._is_root:
return
if self._mixed_precision_enabled_for_params():
self._streams["mixed_precision_params"].wait_stream(
torch.cuda.current_stream()
)
self._streams["all_gather"].wait_stream(torch.cuda.current_stream())
def _need_prefetch_full_params(self, state: TrainingState_) -> bool:
allowed_states = (
TrainingState_.FORWARD, TrainingState_.BACKWARD_PRE, TrainingState_.BACKWARD_POST
)
assert state in allowed_states, f"state needs to be in the set of {allowed_states}"
valid_fsdp_graph_and_index = (
self._fsdp_graph_order is not None
and self._my_fsdp_idx_in_graph is not None
)
if state == TrainingState_.FORWARD:
return (
self.forward_prefetch
and valid_fsdp_graph_and_index
and self._my_fsdp_idx_in_graph < len(self._fsdp_graph_order) - 1
and self._fsdp_graph_order[self._my_fsdp_idx_in_graph + 1].training_state
!= TrainingState_.FORWARD
)
elif state == TrainingState_.BACKWARD_PRE:
return (
self.backward_prefetch == BackwardPrefetch.BACKWARD_PRE
and valid_fsdp_graph_and_index
and self._my_fsdp_idx_in_graph > 0
and self._fsdp_graph_order[self._my_fsdp_idx_in_graph - 1].training_state
!= TrainingState_.BACKWARD_POST
)
else:
return (
self.backward_prefetch == BackwardPrefetch.BACKWARD_POST
and valid_fsdp_graph_and_index
and self._my_fsdp_idx_in_graph > 0
and self._fsdp_graph_order[self._my_fsdp_idx_in_graph - 1].training_state
!= TrainingState_.BACKWARD_POST
and self._fsdp_graph_order[
self._my_fsdp_idx_in_graph - 1
]._need_rebuild_full_params
)
@staticmethod
@contextlib.contextmanager
def state_dict_type(
module: nn.Module,
state_dict_type: StateDictType,
state_dict_config: Optional[StateDictConfig] = None,
) -> Generator:
"""
A context manager to set the ``state_dict_type`` of all the descendant
FSDP modules of the target module. The target module does not have to
be a FSDP module. If the target module is a FSDP module, its
``state_dict_type`` will also be changed.
.. note:: This API should be called for only the top-level (root)
module.
.. note:: This API enables users to transparently use the conventional
``state_dict`` API to take model checkpoints in cases where the
root FSDP module is wrapped by another ``nn.Module``. For example,
the following will ensure ``state_dict`` is called on all non-FSDP
instances, while dispatching into `local_state_dict` implementation
for FSDP:
Example::
>>> # xdoctest: +SKIP("undefined variables")
>>> model = DDP(FSDP(...))
>>> with FSDP.state_dict_type(model, StateDictType.LOCAL_STATE_DICT):
>>> checkpoint = model.state_dict()
Args:
module (torch.nn.Module): Root module.
state_dict_type (StateDictType): the desired ``state_dict_type`` to set.
"""
prev_state_dict_type = None
prev_state_dict_config = None
# Use default config a state_dict config is not set.
if state_dict_config is None:
state_dict_config = _state_dict_type_to_config[state_dict_type]()
for submodule in FullyShardedDataParallel.fsdp_modules(module):
if prev_state_dict_type is None:
prev_state_dict_type = submodule._state_dict_type
if prev_state_dict_config is None:
prev_state_dict_config = submodule._state_dict_config
if prev_state_dict_type != submodule._state_dict_type:
raise RuntimeError("All FSDP module should the same state_dict_type.")
if type(prev_state_dict_config) != type(submodule._state_dict_config):
raise RuntimeError(
"All FSDP modules should have the same type of state_dict_config."
)
expected_state_dict_config_type = _state_dict_type_to_config[state_dict_type]
if expected_state_dict_config_type != type(state_dict_config):
raise RuntimeError(
f"Expected state_dict_config of type {expected_state_dict_config_type} but got {type(state_dict_config)}"
)
submodule._state_dict_type = state_dict_type
submodule._state_dict_config = state_dict_config
try:
yield
finally:
assert prev_state_dict_type is not None # Avoid mypy warning
assert prev_state_dict_config is not None # Avoid mypy warning
for submodule in FullyShardedDataParallel.fsdp_modules(module):
submodule._state_dict_type = prev_state_dict_type
submodule._state_dict_config = prev_state_dict_config
@property
def _param_fqns(self) -> Iterator[Tuple[str, str, str]]:
for param_name, module_name in (
self._fsdp_wrapped_module.handle.parameter_module_names()
):
module_name = module_name.replace(f"{FPW_MODULE}.", "")
module_name = module_name.replace(f"{FPW_MODULE}", "")
if module_name:
module_name = f"{module_name}."
# Activation checkpoint adds a prefix that has to be
# removed as well.
module_name = module_name.replace(
f"{checkpoint_wrapper._CHECKPOINT_PREFIX}.", ""
)
fqn = f"{module_name}{param_name}"
yield fqn, param_name, module_name
def _full_post_state_dict_hook(
self,
state_dict: Dict[str, Any],
prefix: str,
) -> Dict[str, Any]:
"""
Hook that runs after model.state_dict() is called before returning result to
user. For FSDP, we may have to clone the tensors in state_dict as params go
back to sharded version after _summon_full_params ends, and also remove
"_fsdp_wrapped_module" prefix.
"""
_replace_by_prefix(state_dict, prefix + f"{FSDP_WRAPPED_MODULE}.", prefix)
self._assert_state([TrainingState_.SUMMON_FULL_PARAMS])
# Return early for trivial cases
if not state_dict or not self._fsdp_wrapped_module.has_params:
return state_dict
# If the `FlatParameter` is registered, then this rank only needed to
# participate in the all-gather but does not actually save the state
# dict (e.g. when `rank0_only=True` and `self.rank != 0`)
if hasattr(self._fsdp_wrapped_module, "flat_param"):
return state_dict
offload_to_cpu = self._state_dict_config.offload_to_cpu
cpu_device = torch.device("cpu")
# Loop only the parameters saved in self._fsdp_wrapped_module to avoid
# processing buffers.
for fqn, param_name, module_name in self._param_fqns:
fqn = f"{prefix}{fqn}"
clean_key = fqn
clean_prefix = clean_tensor_name(prefix)
# Strip prefix out of key if needed as buffer names and param names
# do not have prefix considered as they are not computed in `state_dict`
# call.
if clean_key.startswith(clean_prefix):
clean_key = clean_key[len(clean_prefix):]
# Clone non-ignored parameters before exiting the
# `_summon_full_params()` context
assert fqn in state_dict, (
f"FSDP assumes {fqn} is in the state_dict but the state_dict "
f"only has {state_dict.keys()}. prefix={prefix}, "
f"module_name={module_name} param_name={param_name} rank={self.rank}."
)
if clean_key not in self._ignored_param_names and \
not getattr(state_dict[fqn], "_has_been_cloned", False):
try:
state_dict[fqn] = state_dict[fqn].clone().detach()
state_dict[fqn]._has_been_cloned = True # type: ignore[attr-defined]
except BaseException as e:
warnings.warn(
f"Failed to clone() tensor with name {fqn}. This may mean "
"that this state_dict entry could point to invalid memory "
"regions after returning from state_dict() call if this "
"parameter is managed by FSDP. Please check clone "
f"implementation of {fqn}. Error: {str(e)}"
)
# Offload the buffer to CPU if needed -- we do not do this in
# `_summon_full_params()` since without care, that would free
# the original buffer's GPU memory and require reallocating
# that memory later; this only affects the state dict's buffer
# variable and leaves the original buffer's GPU memory intact
if offload_to_cpu:
for clean_key in self._buffer_names:
# This is a hack to support activation checkpoint.
clean_key = clean_key.replace(
f"{checkpoint_wrapper._CHECKPOINT_PREFIX}.", ""
)
fqn = f"{prefix}{clean_key}"
if state_dict[fqn].device != cpu_device:
state_dict[fqn] = state_dict[fqn].to(cpu_device)
return state_dict
def _local_post_state_dict_hook(
self,
state_dict: Dict[str, Any],
prefix: str,
) -> Dict[str, Any]:
"""
This hook create a ShardedTensor from the local flat_param and replace
the state_dict[f"{prefix}{FLAT_PARAM}] with the ShardedTensor. No copy
will happen. The underlying storage is the same.
"""
_replace_by_prefix(state_dict, f"{prefix}{FSDP_WRAPPED_MODULE}.", prefix)
if not self._fsdp_wrapped_module.has_params:
return state_dict
# state_dict[f"{prefix}{FLAT_PARAM}"] exists and has the same tensor
# value as the flat_param but it is a pure Tensor because
# nn.Module.state_dict() will detach the parameter. Therefore, we need
# to get flat_param from the FlattenParamsWrapper to get the metadata.
flat_param = getattr(self._fsdp_wrapped_module, FLAT_PARAM, None)
# Construct a ShardedTensor from the flat_param.
full_numel = flat_param._unsharded_size.numel()
shard_offset = flat_param.numel() * self.rank
valid_data_size = flat_param.numel() - flat_param._shard_numel_padded
if valid_data_size > 0 and flat_param._shard_numel_padded > 0:
flat_param = flat_param.narrow(0, 0, valid_data_size)
local_shards = [
Shard.from_tensor_and_offsets(flat_param, [shard_offset], self.rank)
]
state_dict[f"{prefix}{FLAT_PARAM}"] = init_from_local_shards(
local_shards, full_numel, process_group=self.process_group
) # type: ignore[assignment]
return state_dict
@torch.no_grad()
def _sharded_post_state_dict_hook(
self,
state_dict: Dict[str, Any],
prefix: str,
) -> Dict[str, Any]:
"""
The hook replaces the unflattened, unsharded parameter in the state_dict
with a unflattened, sharded parameter (a ShardedTensor).
"""
_replace_by_prefix(state_dict, f"{prefix}{FSDP_WRAPPED_MODULE}.", prefix)
if not self._fsdp_wrapped_module.has_params:
return state_dict
assert self.training_state != TrainingState_.SUMMON_FULL_PARAMS, (
"Inside _sharded_post_load_state_dict_hook, the training_state must "
"not be SUMMON_FULL_PARAMS."
)
with self._summon_full_params(recurse=False, writeback=False):
for fqn, _, _ in self._param_fqns:
# Create a ShardedTensor for the unflattened, non-sharded parameter.
param = functools.reduce(getattr, fqn.split("."), self.module)
local_shard = param.chunk(self.world_size)[self.rank].clone()
offsets = [0 for _ in param.size()]
offsets[0] = math.ceil(param.size()[0] / self.world_size) * self.rank
local_shards = [
Shard.from_tensor_and_offsets(local_shard, offsets, self.rank)
]
fqn = f"{prefix}{fqn}"
state_dict[fqn] = init_from_local_shards(
local_shards, param.size(), process_group=self.process_group
) # type: ignore[assignment]
state_dict.pop(f"{prefix}{FLAT_PARAM}")
return state_dict
@staticmethod
def _post_state_dict_hook(
module: nn.Module,
state_dict: Dict[str, Any],
prefix: str,
*args: Any,
) -> Dict[str, Any]:
"""
_post_state_dict_hook() is called after the state_dict() of this
FSDP module is executed. ``self._state_dict_type`` is used to decide
what postprocessing will be done.
"""
self = cast(FullyShardedDataParallel, module)
processed_state_dict = self._post_state_dict_hook_fn[self._state_dict_type](state_dict, prefix)
# Restore buffers, which currently are in their full precision type,
# back to their mixed precision type. This is because buffers are cast
# during lazy_init() and stay at their mixed precision type before/after
# forward/backward. As a result state_dict() should maintain this.
if (
self._is_root
and self._mixed_precision_enabled_for_buffers()
):
self._cast_buffers(recurse=True)
return processed_state_dict
def state_dict(self, *args, **kwargs):
"""
This is the entry point of all three FSDP ``state_dict`` APIs: full,
local, and sharded. For the full state dict
(``StateDictType.FULL_STATE_DICT``), FSDP attempts to unshard the model
on all ranks, which may result in an OOM error if the full model cannot
fit on a single GPU. In that case, users may pass in a
:class:`FullStateDictConfig` to only save the checkpoint on rank 0 and/
or to offload it to CPU memory layer by layer, enabling much larger
checkpoints. If the full model cannot fit in CPU memory, then users may
instead take a local state dict (``StateDictType.LOCAL_STATE_DICT``)
that only saves the local shard of the model. The sharded state dict
(``StateDictType.SHARDED_STATE_DICT``) saves the model parameters as
``ShardedTensor`` s. The ``state_dict`` type can be configured using
the :meth:`state_dict_type` context manager.
Example::
>>> # xdoctest: +SKIP("undefined variables")
>>> import torch
>>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
>>> from torch.distributed.fsdp import StateDictType
>>> torch.cuda.set_device(device_id)
>>> my_module = nn.Linear(...)
>>> sharded_module = FSDP(my_module)
>>> full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, rank0_only=True)
>>> with FSDP.state_dict_type(sharded_module, StateDictType.FULL_STATE_DICT, full_state_dict_config):
>>> full_dict = sharded_module.state_dict()
>>> full_dict.keys()
>>> odict_keys(['weight', 'bias'])
>>> # using local state dict
>>> with FSDP.state_dict_type(sharded_module, StateDictType.LOCAL_STATE_DICT):
>>> local_dict = sharded_module.state_dict()
>>> local_dict.keys()
>>> odict_keys(['flat_param', 'inner.flat_param'])
.. warning:: This needs to be called on all ranks, since synchronization
primitives may be used.
"""
# TODO (rohan-varma): separate these out once a state_dict pre-hook
# is available.
if torch.cuda.is_available():
torch.cuda.synchronize()
self._lazy_init()
if self._state_dict_type == StateDictType.FULL_STATE_DICT:
# Get config args
full_state_dict_config = (
self._state_dict_config if self._state_dict_config is not None
else FullStateDictConfig()
)
rank0_only = full_state_dict_config.rank0_only
offload_to_cpu = full_state_dict_config.offload_to_cpu
summon_ctx = (
self._summon_full_params(
recurse=False, writeback=False, offload_to_cpu=offload_to_cpu, rank0_only=rank0_only
)
if self.training_state != TrainingState_.SUMMON_FULL_PARAMS else
contextlib.suppress()
)
with summon_ctx:
# Since buffers are not sharded and stay casted, restore them to their
# original user module specified types for checkpoint. We take care to
# recast in post_state_dict_hook for consistency with the fact that
# buffers stay casted after forward/backward. We must have the
# call here instead of above because _summon_full_params itself
# calls _lazy_init() which would cast the buffers.
if (
self._is_root
and self._mixed_precision_enabled_for_buffers()
):
self._cast_buffers(
dtype=self._orig_buffer_dtypes, recurse=False
)
state_dict = super().state_dict(*args, **kwargs)
# TODO: support offload to CPU in post state dict hook.
if not rank0_only or self.rank == 0:
return state_dict
else:
return {}
elif (
self._state_dict_type == StateDictType.LOCAL_STATE_DICT or
self._state_dict_type == StateDictType.SHARDED_STATE_DICT
):
if (
self._fsdp_wrapped_module.flat_param is not None and
not self._fsdp_wrapped_module.flat_param._is_sharded
):
raise RuntimeError(
"sharded_state_dict/local_state_dict can only be called "
"when parameters are flatten and sharded."
)
return super().state_dict(*args, **kwargs)
else:
raise ValueError(f"Unknown StateDictType {self._state_dict_type}.")
def _local_state_dict(self, *args: Any, **kwargs: Any) -> Any:
"""
Returns the local state of the module. Parameters are flattened and
sharded, so the resulting state_dict can only be loaded after the module
has been wrapped with FSDP.
"""
with self.state_dict_type(self, StateDictType.LOCAL_STATE_DICT):
return self.state_dict(*args, **kwargs)
def _full_post_load_state_dict_hook(self, *args, **kwargs) -> None:
# We should exit summon_full_params context.
self._assert_state([TrainingState_.SUMMON_FULL_PARAMS])
assert getattr(self, '_full_param_ctx', None) is not None
self._full_param_ctx.__exit__(None, None, None)
self._full_param_ctx = None
def _sharded_state_dict(self, *args: Any, **kwargs: Any) -> Any:
"""
Returns the sharded states of the module. Parameters are unflattened and
sharded, so the resulting state_dict can be used with any parallelism
(e.g., DPP, model parallelism, and single trainer) after a valid
resharding.
"""
with self.set_state_dict_type(StateDictType.SHARDED_STATE_DICT):
return self.state_dict(self, *args, **kwargs)
def _full_pre_load_state_dict_hook(
self,
state_dict: Dict[str, Any],
prefix: str,
) -> None:
# We do not expect to be calling pre-hooks twice without post-hook
# call in between.
assert getattr(self, '_full_param_ctx', None) is None
# Note that it needs writeback=True to persist.
self._full_param_ctx = self._summon_full_params(
recurse=False, writeback=True
)
self._full_param_ctx.__enter__()
_replace_by_prefix(state_dict, prefix, prefix + f"{FSDP_WRAPPED_MODULE}.")
def _local_post_load_state_dict_hook(self, *args, **kwargs) -> None:
pass
def _local_pre_load_state_dict_hook(
self,
state_dict: Dict[str, Any],
prefix: str,
) -> None:
"""
This hook finds the local flat_param for this FSDP module from the
state_dict. The flat_param should be a ShardedTensor. This hook converts
the ShardedTensor to a tensor. No copy happen unless padding is required.
"""
_replace_by_prefix(state_dict, prefix, f"{prefix}{FSDP_WRAPPED_MODULE}.")
fqn = f"{prefix}{FSDP_WRAPPED_MODULE}.{FLAT_PARAM}"
if fqn not in state_dict:
assert getattr(self._fsdp_wrapped_module, FLAT_PARAM, None) is None, (
"No flat parameter in state_dict but self._fsdp_wrapped_module.flat_param is not None"
)
return
load_tensor = state_dict[fqn]
assert isinstance(
load_tensor, ShardedTensor
), "Tensors in local_state_dict should be ShardedTensor."
# Convert the ShardedTensor to a Tensor.
shards = load_tensor.local_shards()
assert len(shards), "load_local_state_dict assume one shard per ShardedTensor."
load_tensor = cast(torch.Tensor, shards[0].tensor)
# Get the metada of the flat_param to decide whether to pad the loaded
# tensor.
flat_param = self._fsdp_wrapped_module.flat_param
assert flat_param is not None
if flat_param._shard_numel_padded not in (0, flat_param.numel()):
assert load_tensor.numel() < flat_param.numel(), (
f"Local shard size = {flat_param.numel()} and the tensor in "
f"the state_dict is {load_tensor.numel()}."
)
load_tensor = F.pad(load_tensor, [0, flat_param._shard_numel_padded])
state_dict[fqn] = load_tensor
def _sharded_post_load_state_dict_hook(self, *args, **kwargs) -> None:
pass
def _sharded_pre_load_state_dict_hook(
self,
state_dict: Dict[str, Any],
prefix: str,
) -> None:
"""
The hook combines the unflattened, sharded parameters (ShardedTensor) to
a new FlatParameter and shards the new FlatParameter to the local chunk.
"""
_replace_by_prefix(state_dict, prefix, prefix + f"{FSDP_WRAPPED_MODULE}.")
if not self._fsdp_wrapped_module.has_params:
return
if not self._fsdp_wrapped_module.flat_param._is_sharded:
raise RuntimeError(
"load_sharded_state_dict can only be called when parameters "
"are flatten and sharded."
)
nonsharded_tensors = []
# TODO: Reduce the communication by using only one _all_gather_base to
# gather all the parameters in this layer. This can be achieved by
# concatenated all the local shards and then append the padding.
# https://github.com/pytorch/pytorch/issues/77461
for (param_name, _, module_name) in self._fsdp_wrapped_module.handle.flat_param._param_infos:
module_name = module_name.replace(f"{FPW_MODULE}.", "")
module_name = module_name.replace(f"{FPW_MODULE}", "")
if module_name:
module_name = f"{module_name}."
fqn = f"{prefix}{FSDP_WRAPPED_MODULE}.{module_name}{param_name}"
param = state_dict.pop(fqn)
# All-gather the param (ShardedTensor)
shards = param.local_shards()
local_tensor = cast(torch.Tensor, shards[0].tensor).flatten()
dim_0_size = param.size()[0]
param_numel = param.size().numel()
chunk_size = (
math.ceil(dim_0_size / self.world_size) * param_numel // dim_0_size
)
num_padding = chunk_size - local_tensor.numel()
if num_padding > 0:
local_tensor = F.pad(local_tensor, [0, num_padding])
tensor = torch.empty(
chunk_size * self.world_size, dtype=local_tensor.dtype
).cuda()
dist._all_gather_base(tensor, local_tensor, group=self.process_group)
tensor = tensor.narrow(0, 0, param_numel).reshape(param.size())
nonsharded_tensors.append(tensor)
# Create a new flat_param from the loaded, non-sharded tensors.
flat_param = self._fsdp_wrapped_module.flat_param
loaded_flat_param = FlatParamHandle.flatten_params(nonsharded_tensors, requires_grad=False)
# Get the chunk from the loaded flat_param for the local rank.
loaded_flat_param, num_to_pad = FlatParamHandle._get_shard(
loaded_flat_param, self.rank, self.world_size,
)
assert flat_param.numel() == loaded_flat_param.numel(), (
f"The loaded local chunk has different numel({flat_param.numel()}) "
f"from the local chunk {flat_param.numel()}."
)
assert flat_param._shard_numel_padded == num_to_pad, (
f"The loaded local chunk has different padding({num_to_pad}) "
f"from the local chunk {flat_param._shard_numel_padded}."
)
state_dict[f"{prefix}_fsdp_wrapped_module.flat_param"] = loaded_flat_param
@staticmethod
def _pre_load_state_dict_hook(
module: nn.Module,
state_dict: Dict[str, Any],
prefix: str,
*args: Any,
) -> None:
"""
``_pre_state_dict_hook` is called before ``self._load_from_state_dict()``
is called. ``self._state_dict_type`` is used to decide what preprocessing
will be done.
"""
# Code that is common for all state_dict impls
self = cast(FullyShardedDataParallel, module)
if torch.cuda.is_available():
torch.cuda.synchronize()
# Dispatch into state_dict specific implementation of pre-hook.
self._pre_load_state_dict_hook_fn[self._state_dict_type](state_dict, prefix)
@staticmethod
def _post_load_state_dict_hook(module: nn.Module, *args: Any) -> None:
# Code that is common for all state_dict impls
self = cast(FullyShardedDataParallel, module)
# Dispatch into state_dict type specific implementation of post-hook for
# loading state_dict.
self._post_load_state_dict_hook_fn[self._state_dict_type]()
def load_state_dict(
self,
state_dict: Mapping[str, Any],
*args,
**kwargs,
) -> NamedTuple:
"""
The entry point of all three FSDP ``load_state_dict`` APIs. By default,
calling ``load_state_dict`` on an FSDP module will result in FSDP
attempting to load a "full" state_dict, i.e. a state_dict consisting of
full, unsharded, unflattened original module parameters. This requires
FSDP to load the full parameter context on each rank which could result
in GPU OOM. As a result, :func:`state_dict_type` API is available to
configure between ``load_state_dict`` implementations. User can thus use
``with self.state_dict_type(self, StateDictType.LOCAL_STATE_DICT)`` context
manager to load a local state dict checkpoint that will restore only
local shards of the module. Currently, the only supported
implementations are ``StateDictType.LOCAL_STATE_DICT`` and
``StateDictType.FULL_STATE_DICT`` (default). Please see :func:`state_dict`
for documentation around creating an FSDP checkpoint.
Example::
>>> # xdoctest: +SKIP("undefined variables")
>>> import torch
>>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
>>> from torch.distributed.fsdp import StateDictType
>>> torch.cuda.set_device(device_id)
>>> my_module = nn.Linear(...)
>>> sharded_module = FSDP(my_module)
>>> checkpoint = torch.load(PATH)
>>> full_state_dict = checkpoint['full_state_dict']
>>> with FSDP.state_dict_type(sharded_module, StateDictType.FULL_STATE_DICT):
>>> sharded_module.load_state_dict(full_state_dict)
>>> full_dict.keys()
>>> odict_keys(['weight', 'bias'])
>>> # using local state dict
>>> local_state_dict = checkpoint['local_state_dict']
>>> with FSDP.state_dict_type(sharded_module, StateDictType.LOCAL_STATE_DICT):
>>> sharded_module.load_state_dict(local_state_dict)
>>> local_dict.keys()
>>> odict_keys(['flat_param', 'inner.flat_param'])
.. warning:: This needs to be called on all ranks, since synchronization
primitives may be used.
"""
return super().load_state_dict(state_dict, *args)
def _load_local_state_dict(
self,
state_dict: Mapping[str, Any],
*args,
) -> NamedTuple:
"""
Load states from a flattened, sharded state dictionary.
"""
with self.state_dict_type(self, StateDictType.LOCAL_STATE_DICT):
return self.load_state_dict(state_dict, *args)
def _load_sharded_state_dict(
self,
state_dict: Union[Dict[str, torch.Tensor], "OrderedDict[str, torch.Tensor]"],
strict: bool = True,
) -> NamedTuple:
"""
Load states from a unflattened, sharded state dictionary.
"""
with self.set_state_dict_type(StateDictType.SHARDED_STATE_DICT):
return self.load_state_dict(state_dict, strict)
def forward(self, *args: Any, **kwargs: Any) -> Any:
with torch.autograd.profiler.record_function("FullyShardedDataParallel.forward"):
self._lazy_init()
self._wait_for_previous_optim_step()
# Start of a forward pass.
self.training_state = TrainingState_.FORWARD
if self._is_root:
# TODO: disabling side stream for tensor copies for now, investigate
# perf with it on / off.
# Place inputs on compute_device. This is a noop if inputs are already
# on compute_device. Note that when device_id is specified,
# device_id == self.compute_device is guaranteed.
# TODO: for mixed precision, move inputs to right device + cast might
# be done in one go for performance.
args, kwargs = _to_kwargs(args, kwargs, self.compute_device.index, False)
args = args[0]
kwargs = kwargs[0]
# Cast inputs to their mixed precision type.
if (
self._is_root
and self._mixed_precision_enabled_for_params()
):
input_dtype = self.mixed_precision.param_dtype
args, kwargs = self._cast_fp_inputs_to_precision(
input_dtype, *args, **kwargs
)
# Only rebuilding full params when the params are not prefetched in previous layers
if not self._forward_full_params_prefetched:
self._rebuild_full_params()
self._forward_full_params_prefetched = False
# Wait for all_gather full parameters to finish before computation
torch.cuda.current_stream().wait_stream(self._streams["all_gather"])
# Prefetch next layer's full params in forward pass
if self._need_prefetch_full_params(self.training_state):
# This guarantees that pre-fetching is initialized only after all
# previous computations are finished. Therefore, all gather next layer's
# parameters will only overlap with this layer's computation. This
# prevents over-prefetching, where multiple layer's parameters are prefetched
# before the computation.
self._streams["all_gather"].wait_stream(torch.cuda.current_stream())
self._fsdp_graph_order[self._my_fsdp_idx_in_graph + 1]._rebuild_full_params()
self._fsdp_graph_order[self._my_fsdp_idx_in_graph + 1]._forward_full_params_prefetched = True
# Register backward hooks to reshard params and reduce-scatter grads.
# These need to be re-registered every forward pass in some cases where grad_fn
# is mutated.
self._register_post_backward_hooks()
outputs = self._fsdp_wrapped_module(*args, **kwargs)
if self not in self._fsdp_graph_order:
self._my_fsdp_idx_in_graph = len(self._fsdp_graph_order)
self._fsdp_graph_order.append(self)
if self.reshard_after_forward:
self._free_full_params()
if (
self._mixed_precision_enabled_for_params()
):
self._free_mp_shard(self.params)
# Switch to original local shards of params. We maintain this invariant throughout
# the code, i.e., ``p.data == p._local_shard`` after each function. This
# also ensures that after the first forward, the optimizer state will be
# initialized with the correct dtype and (sharded) size, since optimizer
# state is typically initialized lazily in ``optim.step()``. Note that
# when CPU offload is enabled, _use_param_local_shard implicitly
# offloads the local shard to CPU by making p.data point to
# p._local_shard, which would reside on CPU.
self._use_param_local_shard()
# Register pre-backward hooks to all-gather the params for the backward
# pass (if output's grad was needed). This won't register anything if
# we are in eval mode.
outputs = self._register_pre_backward_hooks(outputs)
# Done with a forward pass.
self.training_state = TrainingState_.IDLE
return outputs
@torch.no_grad()
def _write_back_current_shard(self, full_params):
"""
Writes back full_params into self.params.
"""
for p, (full_param, _) in zip(self.params, full_params):
if not p._is_sharded: # type: ignore[attr-defined]
continue # Already copied because no sharding.
# TODO: Might be able to refactor to use _get_shard.
chunks = full_param.chunk(self.world_size) # type: ignore[attr-defined]
assert len(chunks) > self.rank
chunk = chunks[self.rank]
p._local_shard.copy_(chunk) # type: ignore[attr-defined]
@contextlib.contextmanager
def _summon_full_params(
self,
recurse: bool = True,
writeback: bool = True,
rank0_only: bool = False,
offload_to_cpu: bool = False,
):
if writeback and rank0_only:
raise ValueError(
"writeback=True and rank0_only=True is not supported, as model "
"parameter shapes will be different across ranks, and writing "
"to them can lead to inconsistencies across ranks when the "
"context is exited."
)
if offload_to_cpu and not rank0_only:
warnings.warn(
"offload_to_cpu and rank0_only=False will result in "
"full parameters being redundantly copied to CPU memory for "
"GPUs that reside on the same machine, which may incur the risk of "
"CPU OOM. It is recommended to use ``offload_to_cpu`` with "
"rank0_only=True."
)
def _free_full_params_and_use_local_shard(params_to_free):
# We may not always be able to free the full param, for example in
# the case where world_size == 1 and the shard actually points to
# the full parameter.
for (param, can_free) in params_to_free:
if can_free:
current_stream = torch.cuda.current_stream()
# Don't let PyTorch reuse this memory until all work in the
# current stream is complete
param.record_stream(current_stream)
_free_storage(param)
# when CPU offload is enabled, _use_param_local_shard implicitly
# offloads the local shard to CPU by making p.data point to
# p._local_shard, which would reside on CPU.
self._use_param_local_shard()
if recurse:
with contextlib.ExitStack() as stack:
# Summon all params for any nested FSDP instances.
for module in self.fsdp_modules(self):
stack.enter_context(
module._summon_full_params(
recurse=False,
writeback=writeback,
rank0_only=rank0_only,
offload_to_cpu=offload_to_cpu,
)
)
# Yield to the caller, with full params in all nested instances.
yield
# Exiting from the ExitStack will re-shard params.
return
else:
torch.cuda.synchronize()
self._lazy_init()
self._assert_state([TrainingState_.IDLE])
# Set the state so that we assert when trying to go into
# forward/backward.
self.training_state = TrainingState_.SUMMON_FULL_PARAMS
# Even if rank0_only = True, we need to materialize all params here
# and free them right after as full param materialization requires
# collective comm.
currently_local_params = self._rebuild_full_params()
# Wait for all_gather to finish before computation
torch.cuda.current_stream().wait_stream(self._streams["all_gather"])
my_rank = dist.get_rank(self.process_group)
if offload_to_cpu and (not rank0_only or my_rank == 0):
for p in self.params:
if p._is_sharded:
with torch.no_grad():
# Note that we avoid using p._full_param_padded
# directly here as we may not be using that param
# as the full_param from _rebuild_full_params (i.e.)
# in mixed precision.
for p, (full_param, _) in zip(
self.params, currently_local_params
):
full_param = full_param.to(torch.device("cpu"))
self._update_p_data(p, output_tensor=full_param)
if rank0_only and my_rank != 0:
_free_full_params_and_use_local_shard(currently_local_params)
try:
yield
finally:
self.training_state = TrainingState_.IDLE
else:
# FSDP now has the full flattened parameter. Unflatten it to get the
# full parameters.
with contextlib.ExitStack() as stack:
# Invariant: rank == 0 or !rank0_only
stack.enter_context(self._fsdp_wrapped_module.unflatten_as_params())
try:
yield
finally:
if offload_to_cpu and (not rank0_only or my_rank == 0):
for p in self.params:
if p._is_sharded:
with torch.no_grad():
# Note that we avoid using
# p._full_param_padded directly here as
# we may not be using that param
# as the full_param from
# _rebuild_full_params (i.e. in mixed
# precision.
for p, (full_param, _) in zip(
self.params, currently_local_params
):
full_param = full_param.to(self.compute_device)
self._update_p_data(
p, output_tensor=full_param,
)
if writeback:
self._write_back_current_shard(currently_local_params)
stack.close()
_free_full_params_and_use_local_shard(currently_local_params)
self.training_state = TrainingState_.IDLE
@staticmethod
@contextlib.contextmanager
def summon_full_params(
module,
recurse: bool = True,
writeback: bool = True,
rank0_only: bool = False,
offload_to_cpu: bool = False,
) -> Generator:
r""" A context manager to expose full params for FSDP instances.
Can be useful *after* forward/backward for a model to get
the params for additional processing or checking. It can take a non-FSDP
module and will summon full params for all contained FSDP modules as
well as their children, depending on the ``recurse`` argument.
.. note:: This can be used on inner FSDPs.
.. note:: This can *not* be used within a forward or backward pass. Nor
can forward and backward be started from within this context.
.. note:: Parameters will revert to their local shards after the context
manager exits, storage behavior is the same as forward.
.. note:: The full parameters can be modified, but only the portion
corresponding to the local param shard will persist after the
context manager exits (unless ``writeback=False``, in which case
changes will be discarded). In the case where FSDP does not shard
the parameters, currently only when ``world_size == 1``, or ``NO_SHARD``
config, the modification is persisted regardless of ``writeback``.
.. note:: This method works on modules which are not FSDP themselves but
may contain multiple independent FSDP units. In that case, the given
arguments will apply to all contained FSDP units.
.. warning:: Note that ``rank0_only=True`` in conjunction with
``writeback=True`` is not currently supported and will raise an
error. This is because model parameter shapes would be different
across ranks within the context, and writing to them can lead to
inconsistency across ranks when the context is exited.
.. warning:: Note that ``offload_to_cpu`` and ``rank0_only=False`` will
result in full parameters being redundantly copied to CPU memory for
GPUs that reside on the same machine, which may incur the risk of
CPU OOM. It is recommended to use ``offload_to_cpu`` with
``rank0_only=True``.
Args:
recurse (bool, Optional): recursively summon all params for nested
FSDP instances (default: True).
writeback (bool, Optional): if ``False``, modifications to params are
discarded after the context manager exists;
disabling this can be slightly more efficient (default: True)
rank0_only (bool, Optional): if ``True``, full parameters are
materialized on only global rank 0. This means that within the
context, only rank 0 will have full parameters and the other
ranks will have sharded parameters. Note that setting
``rank0_only=True`` with ``writeback=True`` is not supported,
as model parameter shapes will be different across ranks
within the context, and writing to them can lead to
inconsistency across ranks when the context is exited.
offload_to_cpu (bool, Optional): If ``True``, full parameters are
offloaded to CPU. Note that this offloading currently only
occurs if the parameter is sharded (which is only not the case
for world_size = 1 or ``NO_SHARD`` config). It is recommended
to use ``offload_to_cpu`` with ``rank0_only=True`` to avoid
redundant copies of model parameters being offloaded to the same CPU memory.
"""
# Note that we specify root_only as FSDP roots will handle summoning
# child FSDP instances based on recurse argument.
fsdp_modules = FullyShardedDataParallel.fsdp_modules(
module, root_only=True
)
# Summon all params for all FSDP instances
with contextlib.ExitStack() as stack:
for module in fsdp_modules:
stack.enter_context(
module._summon_full_params(
recurse=recurse,
writeback=writeback,
rank0_only=rank0_only,
offload_to_cpu=offload_to_cpu,
)
)
# Yield to the caller, with full params in all FSDP instances.
yield
# Exiting from the ExitStack will reshard all params.
return
def named_buffers(
self,
*args,
**kwargs,
) -> Iterator[Tuple[str, torch.Tensor]]:
"""
Overrides :meth:`named_buffers()` to intercept buffer names and
remove all occurrences of the FSDP-specific flattened buffer prefix
when inside the :meth:`summon_full_params` context manager.
"""
in_summon_full_params = self.training_state == TrainingState_.SUMMON_FULL_PARAMS
for buffer_name, buffer in super().named_buffers(*args, **kwargs):
if in_summon_full_params:
# Remove any instances of the FSDP-specific prefix; there can
# be multiple in the case of nested FSDP modules
buffer_name = buffer_name.replace(FSDP_PREFIX, "")
yield (buffer_name, buffer)
def named_parameters(
self,
*args,
**kwargs,
) -> Iterator[Tuple[str, torch.nn.Parameter]]:
"""
Overrides :meth:`named_parameters()` to intercept parameter names and
remove all occurrences of the FSDP-specific flattened parameter prefix
when inside the :meth:`summon_full_params` context manager.
"""
# Determine which logic to use based on the context at call time
in_summon_full_params = self.training_state == TrainingState_.SUMMON_FULL_PARAMS
for param_name, param in super().named_parameters(*args, **kwargs):
if in_summon_full_params:
# Remove any instances of the FSDP-specific prefix; there can
# be multiple in the case of nested FSDP modules
param_name = param_name.replace(FSDP_PREFIX, "")
yield (param_name, param)
def _register_pre_backward_hooks(self, outputs: Any) -> Any:
"""Register pre-backward hook to run before the wrapped module's
backward. Hooks should be attached to all outputs from the forward.
Returns:
outputs: new outputs with hooks registered if they requires gradient.
"""
# Reset before each backward pass
self._need_rebuild_full_params = False
if not torch.is_grad_enabled():
return outputs # don't register hooks if grad isn't enabled
if self._is_root:
# This actually means that only root instance has
# _post_backward_callback_queued defined. Accidentally accessing this field
# will assert on all other instances, giving us a nice bug checker.
self._post_backward_callback_queued = False
# Reset before each backward pass
self._pre_backward_hook_has_run = False
def _pre_backward_hook(*unused: Any) -> None:
# Run ``_pre_backward_hook`` only once per backward pass
if self._pre_backward_hook_has_run:
return
with torch.autograd.profiler.record_function("FullyShardedDataParallel._pre_backward_hook"):
# try to queue final backward callback only once for root, so
# that final backward callback is attached to the outer most
# backward graph task and called after all the backward
# calls are completed.
if self._is_root:
self._queue_wait_for_post_backward()
if self._pre_backward_hook_full_params_prefetched:
# Always wait for all_gather before rebuilding full params, just
# in case full params have already been prefetched in previous layer's
# pre-backward hook.
torch.cuda.current_stream().wait_stream(self._streams["all_gather"])
# Start of a backward pass for the first time in an backward pass.
self._assert_state([TrainingState_.IDLE])
self.training_state = TrainingState_.BACKWARD_PRE
# All-gather full parameters, moving them to compute device if
# necessary.
self._rebuild_full_params()
self._pre_backward_hook_full_params_prefetched = False
# Wait for all_gather to finish before computation
torch.cuda.current_stream().wait_stream(self._streams["all_gather"])
# Prefetch next layer's full params in backward pass,
# since it is prefetching, no need to wait for all_gather stream.
if self._need_prefetch_full_params(self.training_state):
self._fsdp_graph_order[self._my_fsdp_idx_in_graph - 1]._rebuild_full_params() # type: ignore[operator]
self._fsdp_graph_order[self._my_fsdp_idx_in_graph - 1]._pre_backward_hook_full_params_prefetched = True
self._pre_backward_hook_has_run = True
# Prepare p.grad so that it is in the right shape, device, accumulated values, etc.
self._prep_grads_for_backward()
def _register_hook(t: torch.Tensor) -> torch.Tensor:
if t.requires_grad:
t.register_hook(_pre_backward_hook)
self._need_rebuild_full_params = True
return t
# Attach hooks to Tensor outputs.
outputs = _apply_to_tensors(_register_hook, outputs)
return outputs
def _register_post_backward_hooks(self) -> None:
"""
Register backward hooks to reshard params and reduce-scatter grads.
This is called during forward pass. The goal is to attach a hook
on each of the parameter's gradient generating function (``grad_acc``
below) so that the hook is called *after* all gradients for that
param are computed.
Goals:
1. We want the hook to fire once and only once *after* all gradients
are accumulated for a param.
2. If it fires more than once, we end up incorrectly shard the grad
multiple times. (could lead to dimension too small)
3. If it fires once but too early or doesn't fire, we leave gradients
unsharded. (could lead to dimension too large)
Due to multiple-pass forward, this function can be called on
the same parameter multiple times in a single forward pass. If we register
the hook multiple time, we end up getting called multiple times. We
could try to get a new hook every time and delete the previous one
registered. However, due to *unknown reason* (I have debugged it for
a long time!), in mixed precision mode, we get two different ``grad_acc``
objects below during different calls of this function (in the same
forward pass). If we keep the last one, the hook end up firing too
early. In full precision mode, we luckily get the *same* ``grad_acc``
object, so deleting and re-registering still ensured the hook fire
once after all gradients are generated.
Empirically, keep the first hook register per forward pass seems to
work the best. We do need to remove the hook at the end of the
backward pass. Otherwise, the next forward pass will not register
a new hook, which is needed for a new forward pass.
"""
if not torch.is_grad_enabled():
return # don't register grad hooks if grad isn't enabled
for p in self.params:
if p.requires_grad:
if hasattr(p, "_shard_bwd_hook"):
continue
# Register a hook on the first call, empirically, autograd
# fires it at the end for this param, which makes sense.
p_tmp = p.expand_as(p) # Get a grad_fn on p_tmp.
assert (
p_tmp.grad_fn is not None
), "p_tmp grad_fn should not be None, it is used to access \
p's AccumulateGrad object and register post hook on it."
grad_acc = p_tmp.grad_fn.next_functions[0][
0
] # Gets its AccumulateGrad object.
handle = grad_acc.register_hook(
functools.partial(self._post_backward_hook, p)
)
p._shard_bwd_hook = (grad_acc, handle) # type: ignore[attr-defined]
@torch.no_grad()
def _post_backward_hook(self, param: Parameter, *unused: Any) -> None:
"""
At the start of :func:`_post_backward_hook`, ``param.grad`` contains the
full gradient for the local batch. The reduce-scatter op will replace
``param.grad`` with a single shard of the summed gradient across all
GPUs. This shard will align with the current GPU rank. For example::
before reduce_scatter:
param.grad (GPU #0): [1, 2, 3, 4]
param.grad (GPU #1): [5, 6, 7, 8]
after reduce_scatter:
param.grad (GPU #0): [6, 8] # 1+5, 2+6
param.grad (GPU #1): [10, 12] # 3+7, 4+8
The local GPU's ``optim.step`` is responsible for updating a single
shard of params, also corresponding to the current GPU's rank. This
alignment is created by :func:`_shard_parameters`, which ensures that
the local optimizer only sees the relevant parameter shard.
"""
p_assert(
hasattr(param, '_post_backward_called'),
"Expected flag _post_backward_called to exist on param."
)
param._post_backward_called = True
with torch.autograd.profiler.record_function("FullyShardedDataParallel._post_backward_hook"):
# First hook callback will see PRE state. If we have multiple params,
# then subsequent hook callbacks will see POST state.
self._assert_state([TrainingState_.BACKWARD_PRE, TrainingState_.BACKWARD_POST])
self.training_state = TrainingState_.BACKWARD_POST
if self._use_param_exec_order_policy() and self._param_exec_order_prep_stage:
# In self._fsdp_params_exec_order, the parameters are ordered based on
# the execution order in the backward pass in the first iteration.
self._fsdp_params_exec_order.append(param)
if param.grad is None:
return
if param.grad.requires_grad:
raise RuntimeError(
"FSDP only works with gradients that don't require gradients"
)
if (
self._require_backward_grad_sync
or self.sharding_strategy == ShardingStrategy.FULL_SHARD
):
self._free_full_params(cast(List[FlatParameter], [param]))
if self._mixed_precision_enabled_for_params():
# Noop if reshard_after_forward=True because we'd free the param
# shard when rebuilding the full params in the pre_beckward_hook.
self._free_mp_shard(cast(List[FlatParameter], [param]))
# Switch to local shard after backward. Note that
# when CPU offload is enabled, _use_param_local_shard implicitly
# offloads the local shard to CPU by making p.data point to
# p._local_shard, which would reside on CPU.
self._use_param_local_shard(cast(List[FlatParameter], [param]))
# Prefetch previous layer's full params in backward pass post backward hook,
# If next layer's backward computation is done and full params are freed,
# no need to prefetch the full params again.
# Only prefetch full params if any of the next layer's outputs requires grad
if self._need_prefetch_full_params(self.training_state):
self._fsdp_graph_order[self._my_fsdp_idx_in_graph - 1]._rebuild_full_params() # type: ignore[operator]
# Next layer's computation will start right after this all_gather,
# Wait for all_gather to finish before computation.
torch.cuda.current_stream().wait_stream(self._streams["all_gather"])
if not self._require_backward_grad_sync:
return
# Wait for all work in the current stream to finish, then start the
# reductions in post_backward stream.
self._streams["post_backward"].wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self._streams["post_backward"]):
orig_grad_data = param.grad.data
if (
self._mixed_precision_enabled_for_reduce() and not self._low_precision_hook_enabled()
):
# Cast gradient to precision in which it should be communicated.
# If a low precision hook is registered and reduce_dtype is specified
# in `MixedPrecision`, communication hook will take care of
# casting to lower precision and back.
# TODO: Make this a communication hook when communication hooks
# are implemented for FSDP. Note that this is a noop if the
# reduce_dtype matches the param dtype.
param.grad.data = param.grad.data.to(self.mixed_precision.reduce_dtype)
if self.gradient_predivide_factor > 1 and self.communication_hook is None:
# Average grad by pre-division factor. Together pre- and post-division factors
# lead to an overall averaging by world_size, required for consistency with PyTorch DDP.
# This is a two-step process to avoid potential underflow and overflow.
param.grad.div_(self.gradient_predivide_factor)
grad = param.grad.data
if param._is_sharded: # type: ignore[attr-defined]
# We clear `param.grad` to permit repeated gradient
# computations when this FSDP module is called multiple times.
# This is to avoid a race among multiple re-entrant backward
# passes. For example, the second backward pass computation
# precedes ahead of the first backward pass reduction, which is
# possible since the reduction is in a different stream and is
# async. Then, the first backward pass may be incorrectly
# reducing the second backward pass's `param.grad`.
# The reduced gradients are accumulated in
# `param._saved_grad_shard`, and the gradient reductions can
# happen in arbitrary order, though we tolerate this due to the
# (approximate) commutativity of floating-point addition.
param.grad = None
grad_flatten = torch.flatten(grad)
chunks = list(grad_flatten.chunk(self.world_size))
num_pad = self.world_size * chunks[0].numel() - grad.numel()
input_flattened = F.pad(grad_flatten, [0, num_pad])
output = torch.zeros_like(chunks[0])
dist._reduce_scatter_base(
output, input_flattened, group=self.process_group
)
if self.gradient_postdivide_factor > 1:
# Average grad by pre-division factor. Together pre- and post-division factors
# lead to an overall averaging by world_size, required for consistency with PyTorch DDP.
# This is a two-step process to avoid potential underflow and overflow.
output.div_(self.gradient_postdivide_factor)
self._cast_grad_to_param_dtype(output, param)
# To support gradient accumulation outside `no_sync()`, we save
# the gradient data to `param._saved_grad_shard` before the
# backward pass, accumulate gradients into it here, and set
# `param.grad` with the accumulated value at the end of the
# backward pass in preparation for the optimizer step.
accumulate_grad = hasattr(param, "_saved_grad_shard")
if accumulate_grad:
p_assert(
param._saved_grad_shard.shape == output.shape, # type: ignore[attr-defined]
"Shape mismatch when accumulating gradients: " # type: ignore[attr-defined]
f"existing grad shape={param._saved_grad_shard.shape} "
f"new grad shape={output.shape}" # type: ignore[attr-defined]
)
p_assert(
param._saved_grad_shard.device == output.device, # type: ignore[attr-defined]
"Device mismatch when accumulating gradients: " # type: ignore[attr-defined]
f"existing grad device={param._saved_grad_shard.device} "
f"new grad device={output.device}" # type: ignore[attr-defined]
)
param._saved_grad_shard += output # type: ignore[attr-defined]
else:
param._saved_grad_shard = output # type: ignore[attr-defined]
grad = param._saved_grad_shard # type: ignore[attr-defined]
else:
# Currently the way for _is_sharded to be False is if
# world_size == 1 or sharding_strategy is NO_SHARD.
assert (
self.world_size == 1 or self.sharding_strategy == ShardingStrategy.NO_SHARD
), "Currently the way for _is_sharded to be False is \
world_size == 1 or sharding_stratagy is set to be NO_SHARD"
if self.sharding_strategy == ShardingStrategy.NO_SHARD:
# if a communication hook was not registered,
# then a default hook (`all_reduce`) will be used
self.communication_hook(self.communication_hook_state, param.grad)
self._cast_grad_to_param_dtype(param.grad, param)
# Regardless of sharding or not, offload the grad to CPU if we are
# offloading params. This is so param and grad reside on same device
# which is needed for the optimizer step.
if self.cpu_offload.offload_params:
# We specify non_blocking=True
# and ensure the appropriate synchronization is done by waiting
# streams in _wait_for_post_backward.
param._cpu_grad.copy_( # type: ignore[attr-defined]
grad.detach(), non_blocking=True
)
# Don't let this memory get reused until after the transfer.
grad.data.record_stream(torch.cuda.current_stream())
# After _post_backward_hook returns, orig_grad_data will eventually
# go out of scope, at which point it could otherwise be freed for
# further reuse by the main stream while the div/reduce_scatter/copy
# are underway in the post_backward stream. See:
# github.com/NVIDIA/apex/blob/master/apex/parallel/distributed.py
orig_grad_data.record_stream(self._streams["post_backward"])
def _cast_grad_to_param_dtype(
self,
grad: torch.Tensor,
param: FlatParameter,
):
"""
Casts gradient ``grad`` back to the full parameter dtype so that the
optimizer step runs with that dtype. This performs an actual cast if
1. parameters were in reduced precision during the forward since then
gradients would be in that reduced precision, or
2. parameters were not in reduced precision but gradients were in
reduced precision for communication.
However, if a low precision communication hook is registered, then this
dtype cast happens in the hook instead.
"""
self._assert_state(TrainingState_.BACKWARD_POST)
if (
not self._low_precision_hook_enabled()
and (
self._mixed_precision_enabled_for_params()
or self._mixed_precision_enabled_for_reduce()
)
):
low_prec_grad_data = grad.data
grad.data = grad.data.to(dtype=param.dtype)
# Do not let the low precision gradient memory get reused until
# the cast to full parameter precision completes
low_prec_grad_data.record_stream(torch.cuda.current_stream())
def _queue_wait_for_post_backward(self) -> None:
"""Try to queue a `wait_for_post_backward` callback.
Only called on root and only queue one callback at the beginning of
outer most backward.
"""
assert (
self._is_root
), "_queue_wait_for_post_backward can only be called on root."
if not self._post_backward_callback_queued:
self._assert_state([TrainingState_.IDLE])
self._post_backward_callback_queued = True
Variable._execution_engine.queue_callback(self._wait_for_post_backward)
@torch.no_grad()
def _wait_for_post_backward(self) -> None:
"""Wait for post-backward to finish. Only called on root instance."""
assert self._is_root, "_wait_for_post_backward can only be called on root."
# Check if the root module has params and if any of them has
# the `requires_grad` field set. If `requires_grad=False` for
# all the params, the post_backward hook will not fire and the
# state will remain in `TrainingState_.BACKWARD_PRE`.
if any([p.requires_grad for p in self.params]):
self._assert_state(TrainingState_.BACKWARD_POST)
else:
self._assert_state(TrainingState_.BACKWARD_PRE)
if self._require_backward_grad_sync:
torch.cuda.current_stream().wait_stream(self._streams["post_backward"])
if self.cpu_offload.offload_params:
# We need to wait for the non-blocking GPU ->
# CPU grad transfers to finish. We need to do this for GPU -> CPU
# copies because when grad is on CPU, it won't wait for any CUDA
# stream to finish GPU -> CPU copies unless we explicitly block the
# host-side with synchronize().
torch.cuda.current_stream().synchronize()
# A backward pass is done, clean up below.
self._exec_order_data.reset()
def _finalize_params(fsdp_module: FullyShardedDataParallel) -> None:
"""Helper used below on all fsdp modules."""
for p in fsdp_module.params:
if p.requires_grad:
if hasattr(p, "_shard_bwd_hook"):
assert len(p._shard_bwd_hook) == 2 and len( # type: ignore[attr-defined]
p._shard_bwd_hook # type: ignore[attr-defined]
), ( # type: ignore[attr-defined]
"p._shard_bwd_hook fields are not valid."
)
p._shard_bwd_hook[1].remove() # type: ignore[attr-defined]
delattr(p, "_shard_bwd_hook")
# Preserve the gradient accumulation state if not
# synchronizing: `p.grad` remains the unsharded gradient
# accumulated from prior `no_sync()` iterations, and
# `p._saved_grad_shard` remains the sharded gradient from
# the last synchronized iteration
if not self._require_backward_grad_sync:
continue
# Set `p.grad` as needed to ensure optimizer correctness
# since optimizers operate on the `grad` attribute
if hasattr(p, "_cpu_grad"):
p_assert(
p.device == torch.device("cpu"),
f"Device mismatch: p={p.device} " # type: ignore[attr-defined]
f"p._cpu_grad={p._cpu_grad}"
)
p.grad = p._cpu_grad # type: ignore[attr-defined]
elif hasattr(p, "_saved_grad_shard"):
p_assert(
p.device == p._saved_grad_shard.device, # type: ignore[attr-defined]
f"Device mismatch: p={p.device} " # type: ignore[attr-defined]
f"p._saved_grad_shard={p._saved_grad_shard.device}"
)
p.grad = p._saved_grad_shard # type: ignore[attr-defined]
else:
p_assert(
not p._is_sharded or not p._post_backward_called,
"All sharded parameters that received gradient should "
"use `_saved_grad_shard`"
)
if hasattr(p, "_saved_grad_shard"):
delattr(p, "_saved_grad_shard")
p_assert(
hasattr(p, '_post_backward_called'),
"Expected flag _post_backward_called to be set on param."
)
# Reset _post_backward_called in preparation for the next iteration.
p._post_backward_called = False
# Update root and nested FSDP's hooks and flags.
for m in self.modules(): # includes self
if isinstance(m, FullyShardedDataParallel):
if any(p.requires_grad for p in m.parameters()):
# Check if the module has params and if any of them has
# the `requires_grad` field set. If `requires_grad=False` for
# all the params, the post_backward hook will not fire and the
# state will remain in `TrainingState_.BACKWARD_PRE`.
managed_param_requires_grad = any(p.requires_grad for p in m.params)
if managed_param_requires_grad:
p_assert(
all(hasattr(p, '_post_backward_called') for p in m.params),
"Expected all params to have flag _post_backward_called set!"
)
post_backward_hook_called = any(p._post_backward_called for p in m.params)
if post_backward_hook_called:
m._assert_state(TrainingState_.BACKWARD_POST)
else:
# post backward hook was not called, meaning param
# did not have a gradient computed. It was either unused
# in forward, or unused in loss computation so it did
# not get gradient
m._assert_state([TrainingState_.BACKWARD_PRE, TrainingState_.IDLE])
else:
m._assert_state(TrainingState_.BACKWARD_PRE)
else:
# When `m` and its children have no non-ignored params or
# have non-ignored params but none with `requires_grad==True`,
# there are two cases:
# 1. output tensors are `requires_grad==True`. In this case,
# pre-backward hook is still registered, so it is in BACKWARD_PRE state.
# 2. output tensors are `requires_grad==False`. In this case,
# pre-backward hook is not registered, so it is in IDLE state.
m._assert_state([TrainingState_.BACKWARD_PRE, TrainingState_.IDLE])
_finalize_params(m)
m._pre_backward_hook_has_run = False
m.training_state = TrainingState_.IDLE
if m._is_root:
# reset this flag for cases like "one forward pass + multiple backward passes"
self._post_backward_callback_queued = False
if self._use_param_exec_order_policy() and self._param_exec_order_prep_stage:
self._param_exec_order_policy_second_iter_init()
def _param_exec_order_policy_second_iter_init(self) -> None:
self._param_exec_order_prep_stage = False
# Let the parameters in self._fsdp_params_exec_order ordered based on
# the execution order in the forward pass.
self._fsdp_params_exec_order.reverse()
for m in self.modules():
if m is not self and isinstance(m, FullyShardedDataParallel):
assert hasattr(
m, "_param_exec_order_policy"
), "Non-root FSDP modules should also have _param_exec_order_policy attribute"
assert hasattr(
m, "_param_exec_order_prep_stage"
), "Non-root FSDP modules should also have _param_exec_order_prep_stage attribute"
m._param_exec_order_prep_stage = False
# TODO (linjianma): Construct a fsdp_wrap_map whose keys are all children modules with a FSDP wrap,
# and values are its FSDP wraps. These children FSDP wraps will be detached from the root FSDP module
# and will be used to schedule the parameters (rebuild_full_params and reshard).
# TODO (linjianma): Remove all internal FSDP wraps from the root FSDP module.
# TODO (linjianma): Based on self._fsdp_params_exec_order, get the information
# needed to patch the forward() function of each key in the fsdp_wrap_map. The rules are as follows:
# 1: Before each forward(), rebuild_full_params of all parameters that are currently sharded and
# will be used in the forward, and reshard all parameters that are currently full and will not be
# used in the next forward()
# 2: After each forward(), reshard all parameters just used in the forward, and rebuild_full_params of
# all parameters that will be used next.
# TODO (linjianma): Patch the forward of each model in the keys
# of fsdp_wrap_map based on the information above.
def _update_p_data(self, p, output_tensor: torch.Tensor) -> None:
"""
Helper function to update p.data pointer.
Args:
output_tensor (torch.Tensor): this tensor contains the data we just gathered.
"""
p.data = output_tensor
# Trim any padding and reshape to match original size.
p.data = p.data[:p._unsharded_size.numel()].view(p._unsharded_size) # type: ignore[attr-defined]
@torch.no_grad()
def _rebuild_full_params(self) -> List[Tuple[torch.Tensor, bool]]:
"""
Gather all shards of params.
"""
# _summon_full_params must do a full precision rebuild even under mixed
# precision, because it is used for e.g. checkpoint where we'd like to
# checkpoint in full precision.
force_full_precision = (self.training_state == TrainingState_.SUMMON_FULL_PARAMS)
# full param output tensors and a flag indicating whether
# _summon_full_params can free them or not. It is possible that we can't
# free the full param, which currently occurs when the returned
# parameter points to the unsharded param when world_size == 1, or when
# we're returning the full parameter and reshard_after_forward=False
# (because we need to ensure p._full_param_padded stays intact)
output_tensors: List[Tuple[torch.Tensor, bool]] = []
with torch.cuda.stream(self._streams["all_gather"]):
for p in self.params:
mixed_precision_cast_ran = (
self._mixed_precision_enabled_for_params()
and not force_full_precision
)
if mixed_precision_cast_ran:
self._cast_param_shards_to_dtype()
# TODO: remove below
for p in self.params:
assert p.dtype == self.mixed_precision.param_dtype
# We can skip moving params to GPU if mixed precision, as p.data
# would then be pointing to p._mp_shard which is already on
# self.compute_device.
if self.cpu_offload.offload_params and not mixed_precision_cast_ran:
# Move params to GPU if needed. Note that we don't use
# self._full_param_padded.device here because the attr is
# not set always, i.e. when world_size=1 and
# p._is_sharded = False. However when it is set, the
# device is always self.compute_device.
p.data = p.data.to(self.compute_device, non_blocking=True)
# Check the validity of this `_rebuild_full_params()` call in
# terms of execution order (regardless of if FSDP actually
# needs to all-gather or not)
self._check_rebuild_full_params(p)
# e.g., when world_size == 1
if not p._is_sharded: # type: ignore[attr-defined]
if mixed_precision_cast_ran:
# p.data should be the same type as p._mp_shard, and it
# is safe to free.
assert p.data.dtype == p._mp_shard.dtype
# Safe to free because p.data points to the mp shard.
output_tensors.append((p.data, True))
else:
# p.data points to the unsharded parameter, so not safe to
# free.
output_tensors.append((p.data, False))
continue
# If full param has been rebuilt or has not been freed, no need to call all gather
elif (
p._full_param_padded.storage().size() # type: ignore[attr-defined]
== p._full_param_padded.size().numel() # type: ignore[attr-defined]
):
# Check that the full param is in the expected precision, if
# training with mixed precision
if mixed_precision_cast_ran:
if p._full_param_padded.dtype != self.mixed_precision.param_dtype:
raise ValueError(
"_rebuild_full_params: Expected full param to be "
f"of type {self.mixed_precision.param_dtype}, "
f"but got {p._full_param_padded.dtype}!"
)
# output is full_param_padded which can be freed depending
# on reshard_after_forward (this path is exercised by tests
# in test_fsdp_summon_full_params).
output_tensors.append((p._full_param_padded, self.reshard_after_forward))
self._update_p_data(p, output_tensor=p._full_param_padded) # type: ignore[attr-defined]
continue
else:
# If full param has not been rebuilt or has been freed, call all gather
p_data = p.data # type: ignore[attr-defined]
p_full_size = p._full_param_padded.size() # type: ignore[attr-defined]
assert (
p_full_size.numel() == p_data.numel() * self.world_size
), "Param full size should be equal to its shard size multiply world_size."
assert (
p._full_param_padded.storage().size() == 0 # type: ignore[attr-defined]
), "Full param's storage should have been freed before if all gather is needed." # type: ignore[attr-defined]
if (
self._mixed_precision_enabled_for_params()
and force_full_precision
):
# p._full_param_padded has the reduced precision type,
# but we need full precision rebuild as we're in
# _summon_full_params. Note that this is why
# _summon_full_params collects locally used params from
# _rebuild_full_params instead of relying on
# p._full_param_padded, as it may not always be
# allocated such as during mixed precision.
output_tensor = p_data.new_zeros(p_full_size)
else:
# Allocate based on full size from all shards.
_alloc_storage(p._full_param_padded, size=p_full_size) # type: ignore[attr-defined]
output_tensor = p._full_param_padded # type: ignore[attr-defined]
# Fill output_tensor with (p.data for each shard in self.world_size)
dist._all_gather_base(
output_tensor, p_data, group=self.process_group
)
# The full parameter, which can be freed. Note that we
# append here before update_p_data so as to not saved the
# tensor with padding trimmed, which causes issues with
# writeback in _summon_full_params.
output_tensors.append((output_tensor, True))
# Set p.data = output_tensor (with padding trimmed)
self._update_p_data(p, output_tensor=output_tensor)
# We can free the reduced precision shard as we have the
# full precision parameter.
if (
self._mixed_precision_enabled_for_params()
):
self._free_mp_shard(cast(List[FlatParameter], [p]))
return output_tensors
def _check_rebuild_full_params(self, param: FlatParameter):
"""
Checks the validity of a call to :meth:`_rebuild_full_params` in terms
of the execution order. If on the first iteration, this uses an
all-gather to check that all ranks are running ``forward()`` with the
same parameter, erroring if not, and on subsequent iterations, if the
forward order differs from that of the first iteration (meaning that we
can no longer guarantee correct execution since all-gathers may be
mismatched), then we issue a warning to the user. This only issues
warnings on the first deviating iteration and stops checking
thereafter.
Only the :meth:`_rebuild_full_params` calls in the forward pass are
checked since a correct forward order should imply a correct
pre-backward order for typical cases.
Executing in ``no_sync()`` does not affect this check for
``FULL_SHARD`` and ``SHARD_GRAD_OP``: (1) Being in ``no_sync()`` in the
first iteration does not yield a different forward
:meth:`_rebuild_full_params()` sequence, and (2) being in ``no_sync()``
in a later iteration does not give false positive warnings since the
forward :meth:`_rebuild_full_params()` sequence still matches the first
iteration sequence (for ``FULL_SHARD``) or the first iteration
sequence's prefix (for ``SHARD_GRAD_OP``).
"""
# Only check when rebuilding the full parameters in the forward pass,
# and skip the check (1) when in eval mode since then there is not a
# safe point at which to reset the execution order data and (2) if
# world size is 1 since then there is no chance of desynchronization
if self.training_state != TrainingState_.FORWARD or \
not self.training or self.world_size == 1:
return
eod = self._exec_order_data
param_index = eod.get_param_index(param)
if not eod.is_first_iter:
# Only issue warnings on the first deviating iteration and stop
# checking thereafter to avoid flooding the console
if eod.warn_status == _ExecOrderWarnStatus.WARNED:
return
# However, we may issue multiple warnings on the first deviating
# iteration to help debugging, where either:
# 1. This iteration sees an extra `_rebuild_full_params()` in
# `forward()` compared to the first iteration
msg_prefix = curr_param_order = None # non-`None` means we warn
if eod.index >= len(eod.param_order):
msg_prefix = "Expected to not rebuild any more parameters " \
"in `forward()` for this module but trying to rebuild " \
"parameters for "
curr_param_order = eod.param_order + [param_index]
else:
expected_param_index = eod.param_order[eod.index]
# 2. This iteration sees the same number of
# `_rebuild_full_params()` (so far) but the current parameter
# differs
if param_index != expected_param_index:
expected_param_names = eod.get_unflat_param_names(expected_param_index)
assert len(expected_param_names) > 0, \
"Expected parameter should always be valid"
msg_prefix = "Expected to rebuild parameters in " \
f"`forward()` for {expected_param_names} but " \
"instead trying to rebuild parameters for "
curr_param_order = eod.param_order[:eod.index - 1] + [param_index]
to_issue_warning = msg_prefix is not None
if to_issue_warning:
assert curr_param_order is not None
param_names = eod.get_unflat_param_names(param_index)
is_added_param = len(param_names) == 0
if is_added_param:
msg_suffix = "a newly-added parameter since construction time"
else:
msg_suffix = f"{param_names}"
sub_msg = msg_prefix + msg_suffix
first_iter_param_names = [
eod.get_unflat_param_names(index) for index in eod.param_order
]
curr_iter_param_names = [
eod.get_unflat_param_names(index) for index in curr_param_order
]
warnings.warn(
"Forward order differs from that of the first iteration "
f"on rank {self.rank} -- collectives are unchecked and may "
"give incorrect results or hang\n" + sub_msg + "\n" +
f"First iteration's forward order: {first_iter_param_names}"
"\nThis iteration's forward order (so far): "
f"{curr_iter_param_names}"
)
eod.warn_status = _ExecOrderWarnStatus.WARNING
eod.index += 1
else:
# Use `compute_device` instead of the parameter's device in case it
# is offloaded on CPU and we are using NCCL backend, which requires
# communicated tensors be on GPU
device = self.compute_device
indices = torch.zeros(self.world_size, dtype=torch.int32, device=device)
index = torch.tensor([param_index], dtype=torch.int32, device=device)
dist._all_gather_base(indices, index, group=self.process_group)
# Check that all ranks plan to all-gather the same parameter index
for (r1, i1), (r2, i2) in itertools.combinations(
((rank, indices[rank]) for rank in range(self.world_size)), 2,
):
if not torch.equal(i1, i2):
r1_param_names = eod.get_unflat_param_names(i1)
r2_param_names = eod.get_unflat_param_names(i2)
raise RuntimeError(
f"Forward order differs across ranks: rank {r1} is "
"rebuilding full parameters in `forward()` for "
f"{r1_param_names} while rank {r2} is rebuilding full "
f"parameters in `forward()` for {r2_param_names}"
)
eod.param_order.append(param_index)
@torch.no_grad()
def _prep_grads_for_backward(self) -> None:
"""Make sure p.grad has the correct size/device, otherwise set it to None."""
for p in self.params:
if p.grad is not None and (
p.grad.size() != p._unsharded_size # type: ignore[attr-defined]
or p.grad.device != p.device
):
offloaded: bool = p.grad.device != p.device
if offloaded:
assert self.cpu_offload.offload_params, \
"`p.grad.device` and `p.device` should be the same " \
"if not offloading parameters to CPU"
prev_iter_outside_no_sync: bool = \
p.grad.size() == p._local_shard.shape # type: ignore[attr-defined]
# As long as the previous iteration was outside `no_sync()`,
# then we must save the gradient in `_saved_grad_shard`, even
# if the current iteration is inside `no_sync()`. This is to
# prepare for the next iteration outside `no_sync()`, which may
# try to accumulate gradients. FSDP accumulates gradients in
# the separate variable `p._saved_grad_shard` to leave `p.grad`
# for the per-iteration gradient.
if prev_iter_outside_no_sync:
# FSDP currently does not support gradient accumulation
# outside `no_sync()` when using CPU offloading (see the
# warning in the class's docstring).
if not offloaded:
p._saved_grad_shard = p.grad.data # type: ignore[attr-defined]
p.grad = None
@torch.no_grad()
def _free_full_params(self, params: Optional[List[FlatParameter]] = None) -> None:
"""
Free up storage for full parameters.
"""
if params is None:
params = self.params
current_stream = torch.cuda.current_stream()
for p in params:
# e.g., world_size == 1 or self.sharding_strategy = NO_SHARD
if not p._is_sharded: # type: ignore[attr-defined]
if (
self._mixed_precision_enabled_for_params()
):
self._free_mp_shard(cast(List[FlatParameter], [p]))
continue
# Don't let PyTorch reuse this memory until all work in the current
# stream is complete.
p._full_param_padded.record_stream(current_stream) # type: ignore[attr-defined]
# There may be external references to the Tensor Storage that we
# can't modify, such as references that are created by
# ctx.save_for_backward in the forward pass. Thus when we
# unshard parameters, we should reuse the original Tensor
# Storage object and unshard it in-place. For now, just resize
# the Storage to 0 to save memory.
_free_storage(p._full_param_padded) # type: ignore[attr-defined]
@torch.no_grad()
def _use_param_local_shard(
self, params: Optional[List[FlatParameter]] = None
) -> None:
"""Use local shard for a list of params. Also implicitly offloads
parameters back to CPU if we are CPU offloading."""
if params is None:
params = self.params
for p in params:
if self.cpu_offload.offload_params:
# Ensure local_shard resides in CPU if we are offloading params.
assert p._local_shard.device == torch.device( # type: ignore[attr-defined]
"cpu"
), "Expected p._local_shard to be on CPU"
p.data = p._local_shard # type: ignore[attr-defined]
def _assert_state(self, state: Union[TrainingState_, List[TrainingState_]]) -> None:
"""Assert we are in the given state."""
# Since assert can be turned off and this error checking
# is really important, we use explicit error checking
# and raise a ValueError if needed.
if isinstance(state, TrainingState_):
state = [state]
if self.training_state not in state:
msg = (
f"expected to be in states {state} but current state "
f"is {self.training_state}"
)
# In case we are failing in the context of autograd hook, asserting
# may not generate useful msg. So, let's print it to be sure.
if self.rank == 0:
print(f"Asserting FSDP instance is: {self}")
print(f"ERROR: {msg}")
traceback.print_stack()
raise ValueError(msg)
@contextmanager
def no_sync(self) -> Generator:
"""
A context manager to disable gradient synchronizations across FSDP
instances. Within this context, gradients will be accumulated in module
variables, which will later be synchronized in the first
forward-backward pass after exiting the context. This should only be
used on the root FSDP instance and will recursively apply to all
children FSDP instances.
.. note:: This likely results in higher memory usage because FSDP will
accumulate the full model gradients (instead of gradient shards)
until the eventual sync.
.. note:: When used with CPU offloading, the gradients will not be
offloaded to CPU when inside the context manager. Instead, they
will only be offloaded right after the eventual sync.
"""
self._lazy_init()
assert self._is_root, "`no_sync()` on inner FSDP instances is not supported"
self._assert_state(TrainingState_.IDLE)
old_flags = []
for m in self.modules():
if isinstance(m, FullyShardedDataParallel):
old_flags.append((m, m._require_backward_grad_sync))
m._require_backward_grad_sync = False
try:
yield
finally:
for m, old_flag in old_flags:
assert not m._require_backward_grad_sync, (
"`_require_backward_grad_sync` was incorrectly set to "
"`True` while in the `no_sync()` context manager"
)
m._require_backward_grad_sync = old_flag
@property
def params_with_grad(self) -> List[Parameter]:
"""
Recursively returns a list of all module parameters that have a gradient.
"""
return [p for p in self.parameters() if p.grad is not None]
@torch.no_grad()
def clip_grad_norm_(
self, max_norm: Union[float, int], norm_type: Union[float, int] = 2.0
) -> None:
"""
Clip all gradients at this point in time. The norm is computed over all
gradients together, as if they were concatenated into a single vector.
Gradients are modified in-place.
Args:
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'``
for infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
.. note:: This is analogous to ``torch.nn.utils.clip_grad_norm_`` but
handles the partitioning and multiple devices per rank under the
hood. The default torch util is not applicable here, because each
rank only has a partial view of all the grads in the model, so
calling it for FSDP models would lead to different scaling being
applied per subset of model parameters.
.. warning:: This needs to be called on all ranks, since synchronization
primitives will be used.
"""
self._lazy_init()
self._wait_for_previous_optim_step()
assert self._is_root, "clip_grad_norm should only be called on the root (parent) instance"
self._assert_state(TrainingState_.IDLE)
max_norm = float(max_norm)
norm_type = float(norm_type)
# Computes the max norm for this shard's gradients and sync's across workers
local_norm = _calc_grad_norm(self.params_with_grad, norm_type).cuda() # type: ignore[arg-type]
if norm_type == math.inf:
total_norm = local_norm
dist.all_reduce(total_norm, op=torch.distributed.ReduceOp.MAX, group=self.process_group)
else:
total_norm = local_norm ** norm_type
dist.all_reduce(total_norm, group=self.process_group)
total_norm = total_norm ** (1.0 / norm_type)
if self.cpu_offload:
total_norm = total_norm.cpu()
clip_coef = torch.tensor(max_norm, dtype=total_norm.dtype, device=total_norm.device) / (total_norm + 1e-6)
if clip_coef < 1:
# multiply by clip_coef, aka, (max_norm/total_norm).
for p in self.params_with_grad:
assert p.grad is not None
p.grad.detach().mul_(clip_coef.to(p.grad.device))
@staticmethod
def full_optim_state_dict(
model: torch.nn.Module,
optim: torch.optim.Optimizer,
optim_input: Optional[Union[
List[Dict[str, Any]], Iterable[torch.nn.Parameter],
]] = None,
rank0_only: bool = True,
group=None,
) -> Dict[str, Any]:
"""
Consolidates the full optimizer state on rank 0 and returns it
as a :class:`dict` following the convention of
:meth:`torch.optim.Optimizer.state_dict`, i.e. with keys ``"state"``
and ``"param_groups"``. The flattened parameters in ``FSDP`` modules
contained in ``model`` are mapped back to their unflattened parameters.
.. warning:: This needs to be called on all ranks since synchronization
primitives are used. However, if ``rank0_only=True``, then the
state dict is only populated on rank 0, and all other ranks return
an empty :class:`dict`.
.. warning:: Unlike ``torch.optim.Optimizer.state_dict()``, this method
uses full parameter names as keys instead of parameter IDs.
.. warning:: If you do not pass ``model.parameters()`` as the first
argument to the optimizer, then you should pass that same value to
this method as ``optim_input``.
.. note:: Like in :meth:`torch.optim.Optimizer.state_dict`, the tensors
contained in the optimizer state dict are not cloned, so there may
be aliasing surprises. For best practices, consider saving the
returned optimizer state dict immediately, e.g. using
``torch.save()``.
Args:
model (torch.nn.Module): Root module (which may or may not be a
:class:`FullyShardedDataParallel` instance) whose parameters
were passed into the optimizer ``optim``.
optim (torch.optim.Optimizer): Optimizer for ``model`` 's
parameters.
optim_input (Optional[Union[List[Dict[str, Any]], Iterable[torch.nn.Parameter]]]):
Input passed into the optimizer ``optim`` representing either a
:class:`list` of parameter groups or an iterable of parameters;
if ``None``, then this method assumes the input was
``model.parameters()``. (Default: ``None``)
rank0_only (bool): If ``True``, saves the populated :class:`dict`
only on rank 0; if ``False``, saves it on all ranks. (Default:
``True``)
group (dist.ProcessGroup): Model's process group or ``None`` if using
the default process group. (Default: ``None``)
Returns:
Dict[str, Any]: A :class:`dict` containing the optimizer state for
``model`` 's original unflattened parameters and including keys
"state" and "param_groups" following the convention of
:meth:`torch.optim.Optimizer.state_dict`. If ``rank0_only=True``,
then nonzero ranks return an empty :class:`dict`.
"""
osd = optim.state_dict()
osd_state, osd_param_groups = osd["state"], osd["param_groups"]
rank = dist.get_rank(group)
to_save = not rank0_only or rank == 0
full_osd: Dict = {"state": {}, "param_groups": []} if to_save else {}
full_osd_state = full_osd["state"] if to_save else None
# Construct the local mapping between unflattened parameter names
# (`_OptimStateKey`s) and parameter IDs and broadcast rank 0's mapping
param_to_unflat_param_names: Dict[torch.nn.Parameter, List[str]] = \
_get_param_to_unflat_param_names(model)
flat_param_id_to_param: List[torch.nn.Parameter] = \
_get_param_id_to_param(model, optim_input)
optim_state_key_to_flat_param_id: Dict[_OptimStateKey, int] = {} # local
r0_flat_param_id_to_optim_state_key: Dict[int, _OptimStateKey] = collections.OrderedDict() # rank 0
for flat_param_id, param in enumerate(flat_param_id_to_param):
# Do not include parameters without state to avoid empty mappings
# just like in normal `torch.optim.Optimizer.state_dict()`
if flat_param_id not in osd_state:
continue
optim_state_key = _OptimStateKey(
unflat_param_names=tuple(param_to_unflat_param_names[param]),
is_flat_param=isinstance(param, FlatParameter),
)
if rank == 0:
r0_flat_param_id_to_optim_state_key[flat_param_id] = optim_state_key
optim_state_key_to_flat_param_id[optim_state_key] = flat_param_id
obj_list = [r0_flat_param_id_to_optim_state_key] if rank == 0 else [None]
dist.broadcast_object_list(obj_list, src=0, group=group)
r0_flat_param_id_to_optim_state_key = obj_list[0]
# Ensure that all ranks have at least the optimizer states needed by
# rank 0's optimizer
missing_keys: List[_OptimStateKey] = []
for r0_optim_state_key in r0_flat_param_id_to_optim_state_key.values():
if r0_optim_state_key not in optim_state_key_to_flat_param_id:
# A parameter from rank 0's optimizer does not exist for this
# rank's optimizer
missing_keys.append(r0_optim_state_key)
continue
flat_param_id = optim_state_key_to_flat_param_id[r0_optim_state_key]
assert flat_param_id >= 0 and flat_param_id < len(flat_param_id_to_param), \
"Check the `flat_param_id_to_param` construction"
device = torch.device("cuda", torch.cuda.current_device())
num_missing = torch.tensor([len(missing_keys)], dtype=torch.int32, device=device)
dist.all_reduce(num_missing, group=group)
if num_missing.item() > 0:
obj_list = [None for _ in range(dist.get_world_size(group))]
dist.all_gather_object(obj_list, missing_keys, group=group)
error_msg = (
"FSDP currently requires each rank to have at least the "
"optimizer states needed by rank 0's optimizer but some ranks "
"are missing some of those states"
)
for rank, keys in enumerate(obj_list):
if len(keys) > 0:
error_msg += (
f"\nRank {rank} is missing states for the parameters: "
f"{[key.unflat_param_names for key in keys]}"
)
raise RuntimeError(error_msg)
# Iterate in rank 0's flattened parameter ID order to ensure aligned
# all-gathers across ranks
flat_param_to_fsdp_module = _get_flat_param_to_fsdp_module(model)
for r0_optim_state_key in r0_flat_param_id_to_optim_state_key.values():
flat_param_id = optim_state_key_to_flat_param_id[r0_optim_state_key]
param = flat_param_id_to_param[flat_param_id]
if r0_optim_state_key.is_flat_param:
fsdp_module = flat_param_to_fsdp_module[param]
unflat_state = _unflatten_optim_state(
param, osd_state[flat_param_id], fsdp_module, to_save,
)
if to_save:
assert len(unflat_state) == len(r0_optim_state_key.unflat_param_names)
for unflat_param_name, unflat_param_state in zip(
r0_optim_state_key.unflat_param_names, unflat_state,
):
full_osd_state[unflat_param_name] = unflat_param_state
elif to_save:
assert len(r0_optim_state_key.unflat_param_names) == 1
unflat_param_name = r0_optim_state_key.unflat_param_names[0]
full_osd_state[unflat_param_name] = copy.copy(osd_state[flat_param_id])
for state_name, value in full_osd_state[unflat_param_name].items():
if torch.is_tensor(value):
full_osd_state[unflat_param_name][state_name] = value.cpu()
if not to_save:
return {}
# Handle the "param_groups" part of the optimizer state dict
full_osd_param_groups = full_osd["param_groups"] # alias
for flat_param_group in osd_param_groups:
unflat_param_group = copy.deepcopy(flat_param_group)
param_group_params = [
flat_param_id_to_param[flat_param_id]
for flat_param_id in flat_param_group["params"]
]
nested_unflat_param_names = [
param_to_unflat_param_names[param]
for param in param_group_params
]
unflat_param_group["params"] = [
unflat_param_name
for unflat_param_names in nested_unflat_param_names
for unflat_param_name in unflat_param_names
] # flatten the list of lists
full_osd_param_groups.append(unflat_param_group)
return full_osd
@staticmethod
def shard_full_optim_state_dict(
full_optim_state_dict: Dict[str, Any],
model: torch.nn.Module,
optim_input: Optional[Union[
List[Dict[str, Any]], Iterable[torch.nn.Parameter],
]] = None,
) -> Dict[str, Any]:
"""
Shards the full optimizer state dict ``full_optim_state_dict`` by
remapping the state to flattened parameters instead of unflattened
parameters and restricting to only this rank's part of the optimizer
state. The first argument should be the return value of
:meth:`full_optim_state_dict`.
Example::
>>> # xdoctest: +SKIP("undefined variables")
>>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
>>> model, optim = ...
>>> full_osd = FSDP.full_optim_state_dict(model, optim)
>>> torch.save(full_osd, PATH)
>>> # Define new model with possibly different world size
>>> new_model, new_optim = ...
>>> full_osd = torch.load(PATH)
>>> sharded_osd = FSDP.shard_full_optim_state_dict(full_osd, new_model)
>>> new_optim.load_state_dict(sharded_osd)
.. warning:: If you do not pass ``model.parameters()`` as the first
argument to the optimizer, then you should pass that same value to
this method as ``optim_input``.
.. note:: Both :meth:`shard_full_optim_state_dict` and
:meth:`scatter_full_optim_state_dict` may be used to get the
sharded optimizer state dict to load. Assuming that the full
optimizer state dict resides in CPU memory, the former requires
each rank to have the full dict in CPU memory, where each rank
individually shards the dict without any communication, while the
latter requires only rank 0 to have the full dict in CPU memory,
where rank 0 moves each shard to GPU memory (for NCCL) and
communicates it to ranks appropriately. Hence, the former has
higher aggregate CPU memory cost, while the latter has higher
communication cost.
Args:
full_optim_state_dict (Dict[str, Any]): Optimizer state dict
corresponding to the unflattened parameters and holding the
full non-sharded optimizer state.
model (torch.nn.Module): Root module (which may or may not be a
:class:`FullyShardedDataParallel` instance) whose parameters
correspond to the optimizer state in ``full_optim_state_dict``.
optim_input (Optional[Union[List[Dict[str, Any]], Iterable[torch.nn.Parameter]]]):
Input passed into the optimizer representing either a
:class:`list` of parameter groups or an iterable of parameters;
if ``None``, then this method assumes the input was
``model.parameters()``. (Default: ``None``)
Returns:
Dict[str, Any]: The full optimizer state dict now remapped to
flattened parameters instead of unflattened parameters and
restricted to only include this rank's part of the optimizer state.
"""
sharded_osd = _flatten_full_optim_state_dict(
full_optim_state_dict, model, True,
)
return _rekey_sharded_optim_state_dict(sharded_osd, model, optim_input)
@staticmethod
def scatter_full_optim_state_dict(
full_optim_state_dict: Optional[Dict[str, Any]],
model: torch.nn.Module,
optim_input: Optional[Union[
List[Dict[str, Any]], Iterable[torch.nn.Parameter],
]] = None,
group: Optional[Any] = None,
) -> Dict[str, Any]:
"""
Scatters the full optimizer state dict from rank 0 to all other ranks,
returning the sharded optimizer state dict on each rank. The return
value is the same as :meth:`shard_full_optim_state_dict`, and on rank
0, the first argument should be the return value of
:meth:`full_optim_state_dict`.
Example::
>>> # xdoctest: +SKIP("undefined variables")
>>> from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
>>> model, optim = ...
>>> full_osd = FSDP.full_optim_state_dict(model, optim) # only non-empty on rank 0
>>> # Define new model with possibly different world size
>>> new_model, new_optim, new_group = ...
>>> sharded_osd = FSDP.scatter_full_optim_state_dict(full_osd, new_model, group=new_group)
>>> new_optim.load_state_dict(sharded_osd)
.. note:: Both :meth:`shard_full_optim_state_dict` and
:meth:`scatter_full_optim_state_dict` may be used to get the
sharded optimizer state dict to load. Assuming that the full
optimizer state dict resides in CPU memory, the former requires
each rank to have the full dict in CPU memory, where each rank
individually shards the dict without any communication, while the
latter requires only rank 0 to have the full dict in CPU memory,
where rank 0 moves each shard to GPU memory (for NCCL) and
communicates it to ranks appropriately. Hence, the former has
higher aggregate CPU memory cost, while the latter has higher
communication cost.
Args:
full_optim_state_dict (Optional[Dict[str, Any]]): Optimizer state
dict corresponding to the unflattened parameters and holding
the full non-sharded optimizer state if on rank 0; the argument
is ignored on nonzero ranks.
model (torch.nn.Module): Root module (which may or may not be a
:class:`FullyShardedDataParallel` instance) whose parameters
correspond to the optimizer state in ``full_optim_state_dict``.
optim_input (Optional[Union[List[Dict[str, Any]], Iterable[torch.nn.Parameter]]]):
Input passed into the optimizer representing either a
:class:`list` of parameter groups or an iterable of parameters;
if ``None``, then this method assumes the input was
``model.parameters()``. (Default: ``None``)
group (dist.ProcessGroup): Model's process group or ``None`` if
using the default process group. (Default: ``None``)
Returns:
Dict[str, Any]: The full optimizer state dict now remapped to
flattened parameters instead of unflattened parameters and
restricted to only include this rank's part of the optimizer state.
"""
# Try to use the passed-in process group, the model's process group,
# or the default process group (i.e. `None`) in that priority order
if group is None and hasattr(model, "process_group"):
group = model.process_group
rank = dist.get_rank(group)
world_size = dist.get_world_size(group)
# Check for a valid broadcast device, preferring GPU when available
using_nccl = dist.distributed_c10d._check_for_nccl_backend(group)
broadcast_device = torch.device("cuda") if torch.cuda.is_available() \
else torch.device("cpu")
if using_nccl and not torch.cuda.is_available():
raise RuntimeError("NCCL requires a GPU for collectives")
# Flatten the optimizer state dict and construct a copy with the
# positive-dimension tensors' shapes in place of the tensors themselves
# since those tensors will be broadcast separately to avoid copying
if rank == 0:
if full_optim_state_dict is None:
raise ValueError("Rank 0 must pass in the full optimizer state dict")
flat_osd = _flatten_full_optim_state_dict(full_optim_state_dict, model, False)
processed_osd = _process_pos_dim_tensor_state(flat_osd, world_size)
# Broadcast the optim state dict without positive-dimension tensor
# state and the FSDP parameter IDs from rank 0 to all ranks
processed_osd = _broadcast_processed_optim_state_dict(
processed_osd if rank == 0 else None, rank, group,
)
# Broadcast positive-dimension tensor state (both sharded tensors for
# FSDP parameters and unsharded tensors for non-FSDP parameters)
sharded_osd = _broadcast_pos_dim_tensor_states(
processed_osd, flat_osd if rank == 0 else None, rank, world_size,
group, broadcast_device,
)
# Rekey the optimizer state dict to use parameter IDs according to this
# rank's `optim_input`
sharded_osd = _rekey_sharded_optim_state_dict(sharded_osd, model, optim_input)
return sharded_osd
@staticmethod
def rekey_optim_state_dict(
optim_state_dict: Dict[str, Any],
optim_state_key_type: OptimStateKeyType,
model: torch.nn.Module,
optim_input: Optional[Union[
List[Dict[str, Any]], Iterable[torch.nn.Parameter],
]] = None,
) -> Dict[str, Any]:
"""
Re-keys the optimizer state dict ``optim_state_dict`` to use the key
type ``optim_state_key_type``. This can be used to achieve
compatibility between optimizer state dicts from models with FSDP
instances and ones without.
To re-key an FSDP full optimizer state dict (i.e. from
:meth:`full_optim_state_dict`) to use parameter IDs and be loadable to
a non-wrapped model::
>>> # xdoctest: +SKIP("undefined variables")
>>> wrapped_model, wrapped_optim = ...
>>> full_osd = FSDP.full_optim_state_dict(wrapped_model, wrapped_optim)
>>> nonwrapped_model, nonwrapped_optim = ...
>>> rekeyed_osd = FSDP.rekey_optim_state_dict(full_osd, OptimStateKeyType.PARAM_ID, nonwrapped_model)
>>> nonwrapped_optim.load_state_dict(rekeyed_osd)
To re-key a normal optimizer state dict from a non-wrapped model to be
loadable to a wrapped model::
>>> # xdoctest: +SKIP("undefined variables")
>>> nonwrapped_model, nonwrapped_optim = ...
>>> osd = nonwrapped_optim.state_dict()
>>> rekeyed_osd = FSDP.rekey_optim_state_dict(osd, OptimStateKeyType.PARAM_NAME, nonwrapped_model)
>>> wrapped_model, wrapped_optim = ...
>>> sharded_osd = FSDP.shard_full_optim_state_dict(rekeyed_osd, wrapped_model)
>>> wrapped_optim.load_state_dict(sharded_osd)
Returns:
Dict[str, Any]: The optimizer state dict re-keyed using the
parameter keys specified by ``optim_state_key_type``.
"""
assert optim_state_key_type in \
(OptimStateKeyType.PARAM_NAME, OptimStateKeyType.PARAM_ID)
osd = optim_state_dict # alias
# Validate that the existing parameter keys are uniformly typed
uses_param_name_mask = [
type(param_key) is str for param_key in osd["state"]
]
uses_param_id_mask = [
type(param_key) is int for param_key in osd["state"]
]
if (any(uses_param_name_mask) and not all(uses_param_name_mask)) or \
(any(uses_param_id_mask) and not all(uses_param_id_mask)):
error_msg = f"Invalid parameter keys: {osd['state'].keys()}"
raise ValueError(error_msg)
# Return directly if the existing key type matches the target key type
if (optim_state_key_type == OptimStateKeyType.PARAM_NAME and
all(uses_param_name_mask)) or \
(optim_state_key_type == OptimStateKeyType.PARAM_ID and
all(uses_param_id_mask)):
return osd
# Otherwise, actually perform the re-keying
new_osd = {}
if optim_state_key_type == OptimStateKeyType.PARAM_NAME: # ID -> name
param_id_to_param = _get_param_id_to_param(model, optim_input)
param_to_param_name = _get_param_to_param_name(model)
param_id_to_param_name: List[str] = [
param_to_param_name[param] for param in param_id_to_param
]
new_osd["state"] = {
param_id_to_param_name[param_id]: param_state
for param_id, param_state in osd["state"].items()
}
new_osd["param_groups"] = copy.deepcopy(osd["param_groups"])
for param_group in new_osd["param_groups"]:
param_group["params"] = sorted([
param_id_to_param_name[param_id]
for param_id in param_group["params"]
])
return new_osd
elif optim_state_key_type == OptimStateKeyType.PARAM_ID: # name -> ID
param_name_to_param = _get_param_name_to_param(model)
param_to_param_id = _get_param_to_param_id(model, optim_input)
# Because not all model parameters may be passed as the optimizer
# input, we may need to drop some parameters from this mapping
param_name_to_param_id = {
param_name: param_to_param_id[param]
for param_name, param in param_name_to_param.items()
if param in param_to_param_id
}
new_osd["state"] = {
param_name_to_param_id[param_name]: param_state
for param_name, param_state in osd["state"].items()
}
new_osd["param_groups"] = copy.deepcopy(osd["param_groups"])
for param_group in new_osd["param_groups"]:
param_group["params"] = sorted([
param_name_to_param_id[param_name]
for param_name in param_group["params"]
])
return new_osd
return new_osd # should never reach here
def _get_default_comm_hook(self) -> Any:
r"""
Returns a default communication hook based on a sharding strategy.
"""
if self.sharding_strategy != ShardingStrategy.NO_SHARD:
return None
else:
return default_hooks.allreduce_hook
def _get_default_comm_hook_state(self) -> Any:
r"""
Returns a default communication hook state based on a sharding strategy.
"""
if self.sharding_strategy != ShardingStrategy.NO_SHARD:
return None
else:
return default_hooks.DefaultState(process_group=self.process_group)
def register_comm_hook(self, state: object, hook: callable):
"""
Registers a communication hook which is an enhancement that provides a
flexible hook to users where they can specify how FSDP aggregates gradients
across multiple workers.
This hook can be used to implement several algorithms like
`GossipGrad <https://arxiv.org/abs/1803.05880>`_ and gradient compression
which involve different communication strategies for
parameter syncs while training with :class:`FullyShardedDataParallel`.
.. warning::
FSDP only support communication hooks for a ``NO_SHARD`` strategy at this time.
If other strategies are used, an error will be raised.
.. warning ::
FSDP communication hook should be registered before running an initial forward pass
and only once.
Args:
state (object): Passed to the hook to maintain any state information during the training process.
Examples include error feedback in gradient compression,
peers to communicate with next in `GossipGrad <https://arxiv.org/abs/1803.05880>`_, etc.
It is locally stored by each worker
and shared by all the gradient tensors on the worker.
hook (Callable): Callable with the following signature:
``hook: Callable[torch.Tensor] -> None``:
This function takes in a Python tensor, which represents
the full, flattened, unsharded gradient with respect to all variables
corresponding to the model this FSDP unit is wrapping
(that are not wrapped by other FSDP sub-units).
It then performs all necessary processing and returns ``None``.
"""
if not self.check_is_root():
raise AssertionError("register_comm_hook can only be called on a root instance.")
if self.sharding_strategy != ShardingStrategy.NO_SHARD:
raise NotImplementedError(
"Communication hooks are currently only available for a NO_SHARD strategy."
)
else:
# register same hook for root and all submodules
for submodule in self.fsdp_modules(self):
assert not submodule._hook_registered, "communication hook can be only registered once"
submodule._hook_registered = True
assert submodule.communication_hook == self._get_default_comm_hook(),\
f"communication hook should be default, but it is {submodule.communication_hook.__name__} instead"
submodule.communication_hook_state = state
submodule.communication_hook = hook
def _get_default_cuda_device(module: nn.Module) -> torch.device:
"""Try to infer CUDA device from module parameters."""
try:
compute_device = next(module.parameters()).device
if compute_device.type == "cuda":
return compute_device
# e.g., if module does not have parameters, it will throw StopIteration,
# in this case, instead of raising exception, return cuda device.
except StopIteration:
pass
# Fall back to current CUDA device
return torch.device("cuda", torch.cuda.current_device())
def _free_storage(data: torch.Tensor) -> None:
"""Free underlying storage of a Tensor."""
if data.storage().size() > 0:
# Since we're modifying the Tensor's Storage directly, make sure the Tensor
# is the sole occupant of the Storage.
assert (
data.storage_offset() == 0
), "The tensor is not the sole occupant of the storage."
data.storage().resize_(0) # type: ignore[attr-defined]
@torch.no_grad()
def _alloc_storage(data: torch.Tensor, size: torch.Size) -> None:
"""Allocate storage for a tensor."""
if data.storage().size() == size.numel(): # no need to reallocate
return
assert (
data.storage().size() == 0
), "Then tensor storage should have been resized to be 0."
data.storage().resize_(size.numel()) # type: ignore[attr-defined]
def p_assert(cond: Any, s: Any) -> None:
"""This is used as an alternate to ``assert`` when in the backward context
to print the error message ``s`` since otherwise, it is swallowed."""
if not cond:
print(s)
traceback.print_stack()
raise AssertionError
def _calc_grad_norm(parameters: List[torch.nn.Parameter], p: float) -> torch.Tensor:
r"""Calculate gradient norm of an iterable of parameters.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
parameters = [p for p in parameters if p.grad is not None]
if len(parameters) == 0:
return torch.tensor(0.0)
if p == math.inf:
local_norm = torch.tensor(max(par.grad.detach().abs().max() for par in parameters))
else:
# Compute the norm in full precision no matter what
local_norm = torch.linalg.vector_norm(
torch.stack(
[
torch.linalg.vector_norm(par.grad.detach(), p, dtype=torch.float32)
for par in parameters
]
),
p,
)
local_norm.to(dtype=parameters[0].dtype)
return local_norm
def _get_param_to_unflat_param_names(
model: torch.nn.Module,
dedup_shared_params: bool = True,
) -> Dict[torch.nn.Parameter, List[str]]:
"""
Constructs a mapping from flattened parameter (including non-FSDP-module
parameters) to its unflattened parameter names. For non-FSDP-module
parameters, these mapped-to lists always contain a single element. The
unflattened parameter names should match the keys of the model state dict.
For shared parameters, only the first parameter name is included (following
the ``torch.nn.Module.parameters()`` order).
Args:
model (torch.nn.Module): Root module (which may or may not be a
:class:`FullyShardedDataParallel` instance).
dedup_shared_params (bool): If ``True``, only includes the first
list of unflattened parameter names corresponding to a parameter
in the module walk order; if ``False``, then includes all of the
unflattened parameter names.
"""
def module_fn(module, prefix, param_to_unflat_param_names):
# For FSDP modules, only add the entry when considering the contained
# `FlattenParamsWrapper` to avoid duplication
if not isinstance(module, FullyShardedDataParallel):
for param_name, param in module.named_parameters(recurse=False):
module_prefixed_param_names = (
param._prefixed_param_names if isinstance(param, FlatParameter)
else [param_name]
) # prefixed from `module`
fully_prefixed_param_names = [
clean_tensor_name(prefix + name)
for name in module_prefixed_param_names
] # fully prefixed from the top level including `prefix`
# If this parameter has already been visited, then it is a
# shared parameter; then, only take the first parameter name
is_shared_param = param in param_to_unflat_param_names
if not is_shared_param:
param_to_unflat_param_names[param] = fully_prefixed_param_names
elif not dedup_shared_params:
param_to_unflat_param_names[param].extend(fully_prefixed_param_names)
def return_fn(param_to_unflat_param_names):
return param_to_unflat_param_names
param_to_unflat_param_names: Dict[torch.nn.Parameter, List[str]] = {}
return _apply_to_modules(
model, module_fn, return_fn, param_to_unflat_param_names,
)
def _get_param_to_param_name(
model: torch.nn.Module,
) -> Dict[torch.nn.Parameter, str]:
"""
Constructs a mapping from parameters to their parameter names. ``model``
should not contain any :class:`FullyShardedDataParallel` instances, which
means that none of the parameters should be ``FlatParameter`` s. As a
result, compared to :meth:`_get_param_to_unflat_param_names`, the mapped
values may be flattened from singleton :class:`list` s to the contained
names themselves.
Args:
model (torch.nn.Module): Root module, which should not contain any
:class:`FullyShardedDataParallel` instances.
"""
param_to_param_names = _get_param_to_unflat_param_names(model)
for param_names in param_to_param_names.values():
assert len(param_names) > 0, "`_get_param_to_unflat_param_names()` " \
"should not construct empty lists"
if len(param_names) > 1:
raise RuntimeError(
"Each parameter should only map to one parameter name but got "
f"{len(param_names)}: {param_names}"
)
param_to_param_name = {
param: param_names[0]
for param, param_names in param_to_param_names.items()
}
return param_to_param_name
def _get_param_name_to_param(
model: torch.nn.Module,
) -> Dict[str, torch.nn.Parameter]:
"""Constructs the inverse mapping of :meth:`_get_param_to_param_name`."""
param_to_param_name = _get_param_to_param_name(model)
return dict(zip(param_to_param_name.values(), param_to_param_name.keys()))
def clean_tensor_name(tensor_name: str) -> str:
"""Cleans the parameter or buffer name by removing any FSDP-related
prefixes."""
# FSDP full tensor names may not have both (i.e. `FSDP_PREFIX`), so we
# call `replace()` twice separately
tensor_name = tensor_name.replace(FSDP_WRAPPED_MODULE + ".", "")
tensor_name = tensor_name.replace(FPW_MODULE + ".", "")
# TODO: Explicitly replacing checkpoint_wrapper prefix is not ideal,
# as it increases coupling between CheckpointWrapper and FSDP. This is also not
# scalable for additional wrapped modules, we should come up with a general solution
# for this issue.
tensor_name = tensor_name.replace(_CHECKPOINT_PREFIX + ".", "")
return tensor_name
| pytorch-master | torch/distributed/fsdp/fully_sharded_data_parallel.py |
import copy
import functools
from typing import (
Any,
Dict,
Iterable,
Iterator,
List,
NamedTuple,
Optional,
Sequence,
Tuple,
Union,
)
import torch
import torch.distributed as dist
# Import the entire FSDP file to avoid circular imports
import torch.distributed.fsdp.fully_sharded_data_parallel as FSDP
from torch.distributed.fsdp.flat_param import (
FlatParameter,
FlatParamHandle,
)
class _ConsolidatedOptimState:
"""
This holds the consolidated optimizer state on the target rank. Positive-
dimension tensor state is communicated across ranks, while zero-dimension
tensor state and non-tensor state is taken directly from the target rank.
PyTorch version 1.12 moved to using zero-dimension tensors for scalar
values, but user implemented optimizers may still use float (i.e. a
non-tensor). Thus, we support both and handle them identically.
Attributes:
tensor_state (Dict[str, torch.Tensor]): Mapping from positive-dimension
tensor state name to the unsharded flattened tensor representing
the state.
zero_dim_tensor_state (Dict[str, torch.Tensor]): Mapping from zero-
dimension tensor state name to its value.
non_tensor_state (Dict[str, Any]): Mapping from non-tensor state
name to its value.
"""
tensor_state: Dict[str, torch.Tensor] = {}
zero_dim_tensor_state: Dict[str, torch.Tensor] = {}
non_tensor_state: Dict[str, Any] = {}
class _PosDimTensorInfo(NamedTuple):
"""
Meatadata for positive-dimension tensors used internally for
:meth:`scatter_full_optim_state_dict`.
Attributes:
shape (torch.Size): Sharded tensor shape (which is equal to the
unsharded tensor shape if the tensor is optimizer state for a
non-FSDP parameter and is hence not sharded).
dtype (torch.dtype): Data type of the tensor.
"""
shape: torch.Size
dtype: torch.dtype
class _OptimStateKey(NamedTuple):
"""
This represents an optimizer state key that may be used commonly across
ranks. It is based on the unflattened parameter names rather than parameter
IDs to make it indepenendent of each rank's own optimizer construction.
"""
unflat_param_names: Tuple[str, ...]
is_flat_param: bool
def _unflatten_optim_state(
flat_param: FlatParameter,
flat_param_state: Dict[str, Any],
fsdp_module,
to_save: bool,
) -> List[Dict[str, Any]]:
"""
Unflattens the optimizer state, consisting of the "state" part and the
"param_groups" part. Unflattening the "state" part involves consolidating
the state on the target rank and remapping from flattened to unflattened
parameter IDs, and the "param_groups" part only involves remapping from
flattened to unflattened parameter IDs.
Args:
flat_param (FlatParameter): The flattened parameter.
flat_param_state (Dict[str, Any]): Entry for the flattened parameter
in the "state" part of the optimizer state dict.
fsdp_module (FullyShardedDataParallel): FSDP module that owns
``flat_param``, i.e. holds it in ``self.params``.
to_save (bool): Whether to save the state on this rank.
Returns:
List[Dict[str, Any]]: A :class:`list` holding the entries in the
"state" part of the optimizer state dict corresponding to the
unflattened parameters comprising the flattened parameter
``flat_param`` if on the target rank or an empty :class:`list`
otherwise. The final optimizer state dict will need to map these
entries using the proper unflattened parameter IDs.
"""
consolidated_state = _communicate_optim_state(
flat_param, flat_param_state, fsdp_module, to_save,
)
unflat_param_state = _unflatten_communicated_optim_state(
flat_param,
consolidated_state,
) if to_save else []
return unflat_param_state
def _communicate_optim_state(
flat_param: FlatParameter,
flat_param_state: Dict[str, Any],
fsdp_module,
to_save: bool,
) -> _ConsolidatedOptimState:
"""
Communicates the optimizer state for a flattened parameter ``flat_param``
across ranks so that the target rank holds the entire non-sharded optimizer
state.
If ``N`` is the number of tensor optimizer states in the optimizer state
dict, then the communication complexity is 0 if ``N = 0`` and ``N + 1``
otherwise (where the plus 1 comes from all-gathering the padding per rank).
Args:
flat_param (FlatParameter): The flattened parameter.
flat_param_state (Dict[str, Any]): The entry in the "state" part of the
optimizer state dict corresponding to the flattened parameter.
fsdp_module (FullyShardedDataParallel): FSDP module that owns
``flat_param``, i.e. holds it in ``self.params``.
to_save (bool): Whether to save the state on this rank.
Returns:
ConsolidatedOptimState: Consolidated optimizer state for
``flat_param``; the state is not populated for non-target ranks.
"""
state = _ConsolidatedOptimState()
tensor_state, zero_dim_tensor_state, non_tensor_state = \
state.tensor_state, state.zero_dim_tensor_state, state.non_tensor_state
group = fsdp_module.process_group
tensor_buffer = None # initialize lazily in case it is not needed
for state_name, value in flat_param_state.items():
# Positive-dimension tensor state: communicate across ranks
if torch.is_tensor(value) and value.dim() > 0:
# If the parameter is not sharded (e.g. world size of 1), then
# neither is the positive-dimension tensor state, so no need to
# communicate it -- we take the target rank's value
if not flat_param._is_sharded: # type: ignore[attr-defined]
tensor_state[state_name] = value.cpu()
continue
if tensor_buffer is None:
# Assume that positive-dimension tensor optimizer state
# has the same shape as the sharded flattened parameter
buffer_size = flat_param._full_param_padded.size() # type: ignore[attr-defined]
tensor_buffer = value.new_zeros(*buffer_size)
dist._all_gather_base(tensor_buffer, value, group=group)
torch.cuda.synchronize()
if to_save:
unpadded_numel = flat_param._unsharded_size.numel() # type: ignore[attr-defined]
tensor_state[state_name] = tensor_buffer[:unpadded_numel].cpu()
# Zero-dimension tensor state and non-tensor state: take this rank's
# value directly
elif to_save:
if _is_zero_dim_tensor(value):
zero_dim_tensor_state[state_name] = value.cpu()
else:
non_tensor_state[state_name] = value
return state
def _unflatten_communicated_optim_state(
flat_param: FlatParameter,
state: _ConsolidatedOptimState,
) -> List[Dict[str, Any]]:
"""
Unflattens the communicated optimizer state (given by ``tensor_state``,
``non_tensor_state``, and ``zero_dim_tensor_state``) for a single flattened
parameter ``flat_param``. This should only be called on the target rank.
Args:
flat_param (FlatParameter): The flattened parameter.
state (_ConsolidatedOptimState): Consolidated optimizer state.
Returns:
List[Dict[str, Any]]: A :class:`list` holding the entries in the
"state" part of the optimizer state dict corresponding to the
unflattened parameters comprising the flattened parameter
``flat_param``. The final optimizer state dict will need to map these
entries using the proper unflattened parameter IDs.
"""
unflat_param_state: List[Dict[str, Any]] = []
flat_param_views: Dict[str, Iterator] = {}
num_unflat_params = flat_param._num_params
tensor_state, zero_dim_tensor_state, non_tensor_state = \
state.tensor_state, state.zero_dim_tensor_state, state.non_tensor_state
for _ in range(num_unflat_params):
unflat_state_param = {}
# Add positive-dimension tensor state: unflatten with views
for state_name, flat_tensor in tensor_state.items():
views_generated = state_name in flat_param_views
if not views_generated:
views = FlatParamHandle._get_unflat_views(flat_param, flat_tensor)
flat_param_views[state_name] = views
else:
views = flat_param_views[state_name]
unflat_state_param[state_name] = next(views)
# Add zero-dimension tensor state: take the target rank's value
for state_name, zero_dim_tensor in zero_dim_tensor_state.items():
unflat_state_param[state_name] = zero_dim_tensor
# Add non-tensor state: take the target rank's value
for state_name, non_tensor in non_tensor_state.items():
unflat_state_param[state_name] = non_tensor
unflat_param_state.append(unflat_state_param)
return unflat_param_state
def _flatten_full_optim_state_dict(
full_optim_state_dict: Dict[str, Any],
model: torch.nn.Module,
shard_state: bool,
) -> Dict[str, Any]:
"""
Flattens the full optimizer state dict, still keying by unflattened
parameter names. If ``shard_state=True``, then FSDP-managed
``FlatParameter`` 's optimizer states are sharded, and otherwise, they are
kept unsharded.
Returns:
Dict[str, Any]: The flattened optimizer state dict.
"""
full_osd = full_optim_state_dict
if "state" not in full_osd or "param_groups" not in full_osd:
raise ValueError(
"`full_optim_state_dict` must have the keys \"state\" and "
"\"param_groups\" to be a valid optimizer state dict"
)
flat_param_to_fsdp_module = _get_flat_param_to_fsdp_module(model)
param_to_unflat_param_names = FSDP._get_param_to_unflat_param_names(model)
# Construct the "state" part
flat_osd_state: Dict[_OptimStateKey, Any] = {}
full_osd_state = full_osd["state"]
for param, unflat_param_names in param_to_unflat_param_names.items():
if isinstance(param, FlatParameter): # flatten FSDP parameters' states
assert param in flat_param_to_fsdp_module, \
"Check the `flat_param_to_fsdp_module` construction\n" \
f"param: {param}"
fsdp_module = flat_param_to_fsdp_module[param]
flat_state = _flatten_optim_state(
full_osd_state, unflat_param_names, fsdp_module, param,
shard_state,
)
key = _OptimStateKey(tuple(unflat_param_names), True)
flat_osd_state[key] = flat_state
else: # do not flatten non-FSDP parameters' states
assert len(unflat_param_names) == 1
unflat_param_name = unflat_param_names[0]
if unflat_param_name not in full_osd_state:
# The state dict may not have an entry for a parameter if it
# was not passed into the optimizer (e.g. if it is not an
# FSDP-managed parameter)
continue
key = _OptimStateKey(tuple(unflat_param_names), False)
flat_osd_state[key] = copy.copy(full_osd_state[unflat_param_name])
# Construct the "param_groups" part -- copy as is since it will be
# rekeyed later according to the target rank's `optim_input`
flat_osd_param_groups = copy.deepcopy(full_osd["param_groups"])
return {"state": flat_osd_state, "param_groups": flat_osd_param_groups}
def _flatten_optim_state(
unflat_osd_state: Dict[str, Dict[str, Any]],
unflat_param_names: List[str],
fsdp_module,
flat_param: FlatParameter,
shard_state: bool,
) -> Dict[str, Any]:
"""
Flattens the optimizer state in ``full_optim_state_dict`` for a single
flattened parameter ``flat_param`` in ``fsdp_module`` corresponding to
the unflattened parameter names in ``unflat_param_names``.
Args:
unflat_osd_state (Dict[str, Dict[str, Any]]): The "state" part of the
optimizer state dict corresponding to the unflattened parameters.
unflat_param_names (List[str]): A :class:`list` of unflattened
parameter names corresponding to the flattened parameter
``flat_param``.
fsdp_module (FullyShardedDataParallel): FSDP module owning the
flattened parameter.
flat_param (FlatParameter): The flattened parameter.
shard_state (bool): Whether to shard flattened positive-dimension
tensor state; if ``False``, then the full flattened tensor is
kept in the returned :class:`dict.
Returns:
Dict[str, Any]: A :class:`dict` mapping state names to their values for
a particular flattened parameter. The sharded optimizer state dict's
"state" part will map a key to this returned value.
"""
num_unflat_params = len(unflat_param_names)
assert num_unflat_params > 0, \
"Expects at least one unflattened parameter corresponding to the " \
"flattened parameter"
unflat_param_shapes = flat_param._shapes
num_unflat_param_shapes = len(unflat_param_shapes)
assert num_unflat_params == num_unflat_param_shapes, \
f"Expects {num_unflat_params} shapes but got {num_unflat_param_shapes}"
# Check if these unflattened parameters have any optimizer state
has_state = [
bool(unflat_param_name in unflat_osd_state)
for unflat_param_name in unflat_param_names
]
# If none of the unflattened parameters comprising this flattened parameter
# have any state, then we do not want an entry in the optimizer state dict
if not any(has_state):
return {} # no need to flatten any state
# There may still be some unflattened parameters with state and some
# without
unflat_param_states = [
unflat_osd_state[unflat_param_name]
if unflat_param_name in unflat_osd_state else None
for unflat_param_name in unflat_param_names
]
# Check that the unflattened parameters have the same state names
state_names = None
for unflat_param_state in unflat_param_states:
if unflat_param_state is None:
continue
if state_names is None:
state_names = set(unflat_param_state.keys())
else:
if state_names != set(unflat_param_state.keys()):
raise ValueError(
"Differing optimizer state names for the unflattened "
f"parameters: {unflat_param_names}"
)
assert state_names is not None
# Flatten the state
flat_state: Dict[str, Any] = {}
for state_name in state_names:
state_values = [
unflat_param_state[state_name]
if unflat_param_state is not None else None
for unflat_param_state in unflat_param_states
]
non_none_state_values = [v for v in state_values if v is not None]
are_pos_dim_tensors = are_zero_dim_tensors = are_non_tensors = True
for v in non_none_state_values:
are_pos_dim_tensors &= torch.is_tensor(v) and v.dim() > 0
are_zero_dim_tensors &= _is_zero_dim_tensor(v)
are_non_tensors &= not torch.is_tensor(v)
types = set(type(v) for v in non_none_state_values)
if len(types) != 1 or not (
are_pos_dim_tensors or are_zero_dim_tensors or are_non_tensors
):
raise ValueError(
f"Differing optimizer state types for state {state_name}, "
f"values {non_none_state_values}, and unflattened parameter "
f"names {unflat_param_names}"
)
if are_pos_dim_tensors:
flat_tensor = _flatten_tensor_optim_state(
state_name, state_values, unflat_param_names,
unflat_param_shapes, flat_param,
)
if shard_state:
# Shard the flattened tensor immediately to minimize max memory
# usage
sharded_flat_tensor, _ = FlatParamHandle._get_shard(
flat_tensor, fsdp_module.rank, fsdp_module.world_size,
)
flat_state[state_name] = sharded_flat_tensor
else:
flat_state[state_name] = flat_tensor
elif are_zero_dim_tensors:
flat_state[state_name] = _flatten_zero_dim_tensor_optim_state(
state_name, state_values, unflat_param_names,
)
else:
assert are_non_tensors
flat_state[state_name] = _flatten_non_tensor_optim_state(
state_name, state_values, unflat_param_names,
)
return flat_state
def _flatten_tensor_optim_state(
state_name: str,
pos_dim_tensors: List[torch.Tensor],
unflat_param_names: List[str],
unflat_param_shapes: Sequence[torch.Size],
flat_param: FlatParameter,
) -> torch.Tensor:
"""
Flattens the positive-dimension tensor optimizer state given by the values
``tensors`` for the state ``state_name`` for a single flattened parameter
``flat_param`` corresponding to the unflattened parameter names
``unflat_param_names`` and unflatted parameter shapes
``unflat_param_shapes``. This flattens each unflattened parameter's tensor
state into one tensor.
NOTE: We use zero tensors for any unflattened parameters without state
since some value is required to fill those entries. This assumes that the
zero tensor is mathematically equivalent to having no state, which is true
for Adam's "exp_avg" and "exp_avg_sq" but may not be true for all
optimizers.
Args:
state_name (str): Optimizer state name.
pos_dim_tensors (List[torch.Tensor]): Positive-dimension tensor
optimizer state values for the unflattened parameters corresponding
to the single flattened parameter.
unflat_param_names (List[str]): A :class:`list` of unflattened
parameter names corresponding to the single flattened parameter.
unflat_param_shapes (List[torch.Size]): Unflattened parameter shapes
corresponding to the single flattened parameter.
flat_param (FlatParameter): The flattened parameter.
Returns:
torch.Tensor: A flattened tensor containing the optimizer state
corresponding to ``state_name`` constructed by concatenating the
unflattened parameter tensor states in ``pos_dim_tensors`` (using zero
tensors for any unflattened parameters without the state).
"""
non_none_tensors = [t for t in pos_dim_tensors if t is not None]
# Check that all are tensors with the same dtype
dtypes = set(t.dtype for t in non_none_tensors)
if len(dtypes) != 1:
raise ValueError(
"All unflattened parameters comprising a single flattened "
"parameter must have positive-dimension tensor state with the "
f"same dtype but got dtypes {dtypes} for state {state_name} and "
f"unflattened parameter names {unflat_param_names}"
)
dtype = next(iter(dtypes))
# Check that each tensor state matches its parameter's shape
for tensor, shape in zip(pos_dim_tensors, unflat_param_shapes):
if tensor is None and len(shape) == 0:
raise ValueError(
"Flattening a zero-dimension parameter is not supported"
)
elif tensor is not None and tensor.shape != shape:
raise ValueError(
"Tensor optimizer state does not have same shape as its "
f"parameter: {tensor.shape} {shape}"
)
# Flatten the tensor states: we do not need to add any padding since the
# flattened optimizer state tensor sharded via `_get_shard()`, which pads
# the shard as needed (just like for the flattened parameter)
cpu_device = torch.device("cpu")
tensors = [
torch.flatten(state_value.to(cpu_device)) if state_value is not None
else torch.flatten(torch.zeros(
size=shape, dtype=dtype, device=cpu_device,
))
for state_value, shape
in zip(pos_dim_tensors, unflat_param_shapes)
]
flat_tensor = torch.cat(tensors)
flat_param_shape = flat_param._unsharded_size # type: ignore[attr-defined]
assert flat_tensor.shape == flat_param_shape, \
f"tensor optim state: {flat_tensor.shape} " \
f"flattened parameter: {flat_param_shape}"
return flat_tensor
def _flatten_zero_dim_tensor_optim_state(
state_name: str,
zero_dim_tensors: List[torch.Tensor],
unflat_param_names: List[str],
) -> torch.Tensor:
"""
Flattens the zero-dimension tensor optimizer state given by the values
``zero_dim_tensors`` for the state ``state_name`` for a single flattened
parameter corresponding to the unflattened parameter names
``unflat_param_names`` by enforcing that all tensors are the same and using
that common value.
NOTE: The requirement that the tensors are the same across all unflattened
parameters comprising the flattened parameter is needed to maintain the
invariant that FSDP performs the same computation as its non-sharded
equivalent. This means that none of the unflattened parameters can be
missing this state since imposing a value may differ from having no value.
For example, for Adam's "step", no value means maximum bias correction,
while having some positive value means less bias correction.
Args:
state_name (str): Optimizer state name.
zero_dim_tensors (List[torch.Tensor]): Zero-dimension optimizer state
for the unflattened parameters corresponding to the single
flattened parameter.
unflat_param_names (List[str]): A :class:`list` of unflattened
parameter names corresponding to the single flattened parameter.
Returns:
torch.Tensor: A zero-dimensional tensor giving the value of the state
``state_name`` for all unflattened parameters corresponding to the
names ``unflat_param_names``.
"""
non_none_tensors = [t for t in zero_dim_tensors if t is not None]
# Enforce that all have the same value and dtype
values_set = set(t.item() if t is not None else None for t in zero_dim_tensors)
dtypes = set(t.dtype if t is not None else None for t in zero_dim_tensors)
if len(non_none_tensors) != len(zero_dim_tensors) or \
len(values_set) != 1 or len(dtypes) != 1:
raise ValueError(
"All unflattened parameters comprising a single flattened "
"parameter must have scalar state with the same value and dtype "
f"but got values {values_set} and dtypes {dtypes} for state "
f"{state_name} and unflattened parameter names "
f"{unflat_param_names}"
)
value = next(iter(values_set))
dtype = next(iter(dtypes))
return torch.tensor(value, dtype=dtype, device=torch.device("cpu"))
def _flatten_non_tensor_optim_state(
state_name: str,
non_tensors: List[Any],
unflat_param_names: List[str],
) -> Any:
"""
Flattens the non-tensor optimizer state given by the values ``non_tensors``
for the state ``state_name`` for a single flattened parameter corresponding
to the unflattened parameter names ``unflat_param_names`` by enforcing that
all values are the same and using that common value.
See the note in :func:`_flatten_zero_dim_tensor_optim_state`.
Args:
state_name (str): Optimizer state name.
non_tensors (List[Any]): Non-tensor optimizer state for the unflattened
parameters corresponding to the single flattened parameter.
unflat_param_names (List[str]): A :class:`list` of unflattened
parameter names corresponding to the single flattened parameter.
Returns:
Any: A non-tensor giving the value of the state ``state_name`` for all
unflattened parameters corresponding to the names
``unflat_param_names``.
"""
non_none_non_tensors = [nt for nt in non_tensors if nt is not None]
# Enforce that all have the same value (same type already checked)
non_tensor_set = set(non_tensors)
if len(non_none_non_tensors) != len(non_tensors) or \
len(non_tensor_set) != 1:
raise ValueError(
"All unflattened parameters comprising a single flattened "
"parameter must have scalar state with the same value and dtype "
f"but got values {non_tensor_set} for state {state_name} and "
f"unflattened parameter names {unflat_param_names}"
)
non_tensor = next(iter(non_tensor_set))
return non_tensor
def _process_pos_dim_tensor_state(
flat_optim_state_dict: Dict[str, Any],
world_size: int,
) -> Dict[str, Any]:
"""
Processes positive-dimension tensor states in ``flat_optim_state_dict`` by
replacing them with metadata. This is done so the processed optimizer state
dict can be broadcast from rank 0 to all ranks without copying those tensor
states, and thus, this is meant to only be called on rank 0.
Args:
flat_optim_state_dict (Dict[str, Any]): Flattened optimizer state dict
with the positive-dimension tensor states unsharded.
Returns:
Dict[str, Any]: The flattened optimizer state dict with positive-
dimension tensor states replaced by metadata.
"""
flat_osd = flat_optim_state_dict # alias
no_tensor_osd: Dict[str, Any] = {"state": {}}
for key, param_state in flat_osd["state"].items():
no_tensor_osd["state"][key] = {}
for state_name, value in param_state.items():
is_pos_dim_tensor_state = torch.is_tensor(value) and value.dim() > 0
if not is_pos_dim_tensor_state:
no_tensor_osd["state"][key][state_name] = value
continue
if key.is_flat_param: # FSDP parameter
sharded_size = FlatParamHandle._get_sharded_size(value, rank=0, world_size=world_size)
assert len(sharded_size) == 1, f"{sharded_size}"
info = _PosDimTensorInfo(sharded_size, value.dtype)
else: # non-FSDP parameter
info = _PosDimTensorInfo(value.shape, value.dtype)
no_tensor_osd["state"][key][state_name] = info
no_tensor_osd["param_groups"] = flat_osd["param_groups"]
return no_tensor_osd
def _broadcast_processed_optim_state_dict(
processed_optim_state_dict: Optional[Dict[str, Any]],
rank: int,
group,
) -> Dict[str, Any]:
"""
Broadcasts the processed optimizer state dict from rank 0 to all ranks.
Args:
processed_optim_state_dict (Optional[Dict[str, Any]]): The flattened
optimizer state dict with positive-dimension tensor states replaced
with metadata if on rank 0; ignored otherwise.
Returns:
Dict[str, Any]: The processed optimizer state dict.
"""
# Broadcast the two data structures rank 0 to all ranks
obj_list = [processed_optim_state_dict] if rank == 0 \
else [None]
dist.broadcast_object_list(obj_list, src=0, group=group)
processed_optim_state_dict = obj_list[0] # type: ignore[assignment]
assert processed_optim_state_dict is not None
# Keep zero-dimension tensors on CPU
return processed_optim_state_dict
def _broadcast_pos_dim_tensor_states(
processed_optim_state_dict: Dict[str, Any],
flat_optim_state_dict: Optional[Dict[str, Any]],
rank: int,
world_size: int,
group,
broadcast_device: torch.device,
) -> Dict[str, Any]:
"""
Takes ``processed_optim_state_dict``, which has metadata in place of
positive-dimension tensor states, and broadcasts those tensor states from
rank 0 to all ranks. For tensor states corresponding to FSDP parameters,
rank 0 shards the tensor and broadcasts shard-by-shard, and for tensor
states corresponding to non-FSDP parameters, rank 0 broadcasts the full
tensor.
Args:
processed_optim_state_dict (Dict[str, Any]): The flattened optimizer
state dict with positive-dimension tensor states replaced with
metadata; this should be returned by
:meth:`_process_pos_dim_tensor_state` and non-empty on all ranks.
flat_optim_state_dict (Optional[Dict[str, Any]]): The flattened
unsharded optimizer state dict with the actual positive-dimension
tensor states if on rank 0; ignored on nonzero ranks.
Returns:
Dict[str, Any]: The optimizer state dict with the positive-dimension
tensor state correctly populated via ``broadcast()`` s from rank 0.
"""
assert rank != 0 or flat_optim_state_dict is not None, \
"Expects rank 0 to pass in the flattened optimizer state dict"
no_tensor_osd = processed_optim_state_dict # alias
flat_osd = flat_optim_state_dict # alias
for key, param_state in no_tensor_osd["state"].items():
for state_name, value in param_state.items():
is_pos_dim_tensor_state = isinstance(value, _PosDimTensorInfo)
if not is_pos_dim_tensor_state:
continue
if rank == 0:
assert flat_osd is not None
unsharded_tensor = flat_osd["state"][key][state_name]
else:
unsharded_tensor = None
shape, dtype = value.shape, value.dtype
if key.is_flat_param: # FSDP parameter
_broadcast_sharded_pos_dim_tensor_state(
unsharded_tensor, param_state, state_name, shape, dtype,
broadcast_device, rank, world_size, group,
) # modify `param_state` destructively
else: # non-FSDP parameter
_broadcast_unsharded_pos_dim_tensor_state(
unsharded_tensor, param_state, state_name, shape, dtype,
broadcast_device, rank, group,
) # modify `param_state` destructively
return no_tensor_osd
def _broadcast_sharded_pos_dim_tensor_state(
unsharded_tensor: Optional[torch.Tensor],
param_state: Dict[str, Any],
state_name: str,
shape: torch.Size,
dtype: torch.dtype,
broadcast_device: torch.device,
rank: int,
world_size: int,
group,
) -> None:
"""
Broadcasts positive-dimension tensor state for the state ``state_name``
corresponding to an FSDP parameter shard-by-shard, only to be saved on the
relevant rank. This modifies ``param_state`` destructively.
Args:
unsharded_tensor (Optional[torch.Tensor]): Unsharded tensor from which
to broadcast shards if on rank 0; ignored otherwise.
shape (torch.Size): Shape of the sharded tensor; same on all ranks.
"""
get_shard: Optional[functools.partial[Tuple[torch.Tensor, int]]] = None
if rank == 0:
assert unsharded_tensor is not None, \
"Expects rank 0 to pass in the unsharded tensor"
get_shard = functools.partial(
FlatParamHandle._get_shard,
unsharded_tensor,
)
for target_rank in range(1, world_size):
if rank == 0:
assert get_shard is not None
sharded_tensor = get_shard(target_rank, world_size)[0].to(broadcast_device)
else:
sharded_tensor = torch.zeros(
shape, requires_grad=False, dtype=dtype,
device=broadcast_device,
)
dist.broadcast(sharded_tensor, src=0, group=group)
# Only keep the shard on the target rank and keep it on the broadcast
# device, which is typically GPU
if rank == target_rank:
param_state[state_name] = sharded_tensor
else:
del sharded_tensor
# Lastly, shard on rank 0
if rank != 0:
return
param_state[state_name] = get_shard(0, world_size)[0].to(broadcast_device) # type: ignore[misc]
def _broadcast_unsharded_pos_dim_tensor_state(
unsharded_tensor: Optional[torch.Tensor],
param_state: Dict[str, Any],
state_name: str,
shape: torch.Size,
dtype: torch.dtype,
broadcast_device: torch.device,
rank: int,
group,
) -> None:
"""
Broadcasts positive-dimension tensor state for the state ``state_name``
corresponding to an unsharded non-FSDP parameter from rank 0 to all ranks.
This modifies ``param_state`` destructively.
Args:
unsharded_tensor (Optional[torch.Tensor]): Unsharded tensor to
broadcast if on rank 0; ignored otherwise.
"""
if rank == 0:
assert unsharded_tensor is not None, \
"Expects rank 0 to pass in the unsharded tensor"
assert shape == unsharded_tensor.shape, \
f"Shape mismatch: {shape} {unsharded_tensor.shape}"
assert dtype == unsharded_tensor.dtype, \
f"dtype mismatch: {dtype} {unsharded_tensor.dtype}"
unsharded_tensor = unsharded_tensor.to(broadcast_device)
else:
unsharded_tensor = torch.zeros(
shape, requires_grad=False, dtype=dtype, device=broadcast_device,
)
dist.broadcast(unsharded_tensor, src=0, group=group)
# Keep the tensor on the broadcast device, which is typically GPU
param_state[state_name] = unsharded_tensor
def _rekey_sharded_optim_state_dict(
sharded_osd: Dict[str, Any],
model: torch.nn.Module,
optim_input: Optional[Union[
List[Dict[str, Any]], Iterable[torch.nn.Parameter],
]] = None,
) -> Dict[str, Any]:
"""
Rekeys the optimizer state dict from unflattened parameter names to
flattened parameter IDs according to the calling rank's ``optim_input``,
which may be different across ranks. In particular, the unflattened
parameter names are represented as :class:`_OptimStateKey` s.
"""
param_to_flat_param_id = _get_param_to_param_id(model, optim_input)
param_to_unflat_param_names = FSDP._get_param_to_unflat_param_names(model)
# All parameter keys in `param_to_flat_param_id` should be in
# `param_to_unflat_param_names` -- strict inequality follows when not all
# parameters are passed to the optimizer via `optim_input`
assert len(param_to_flat_param_id) <= len(param_to_unflat_param_names)
unflat_param_names_to_flat_param_id: Dict[Tuple[str, ...], int] = {} # for "state"
unflat_param_name_to_flat_param_id: Dict[str, int] = {} # for "param_groups"
for param, unflat_param_names in param_to_unflat_param_names.items():
if param not in param_to_flat_param_id:
# This parameter was not passed to the optimizer via `optim_input`
continue
flat_param_id = param_to_flat_param_id[param]
unflat_param_names_to_flat_param_id[tuple(unflat_param_names)] = flat_param_id
for unflat_param_name in unflat_param_names:
unflat_param_name_to_flat_param_id[unflat_param_name] = flat_param_id
sharded_osd_state = sharded_osd["state"]
rekeyed_osd_state = {}
for key, param_state in sharded_osd_state.items():
flat_param_id = unflat_param_names_to_flat_param_id[key.unflat_param_names]
rekeyed_osd_state[flat_param_id] = param_state
rekeyed_osd_param_groups: List[Dict[str, Any]] = []
for unflat_param_group in sharded_osd["param_groups"]:
flat_param_group = copy.deepcopy(unflat_param_group)
flat_param_ids = sorted(set(
unflat_param_name_to_flat_param_id[unflat_param_name]
for unflat_param_name in unflat_param_group["params"]
))
flat_param_group["params"] = flat_param_ids
rekeyed_osd_param_groups.append(flat_param_group)
return {"state": rekeyed_osd_state, "param_groups": rekeyed_osd_param_groups}
def _get_flat_param_to_fsdp_module(model: torch.nn.Module):
"""
Constructs a mapping from FSDP flattened parameters to their owning FSDP
modules and ensures that all FSDP modules are initialized.
Args:
model (torch.nn.model): Root module (which may or may not be a
:class:`FullyShardedDataParallel` instance).
Returns:
Dict[FlatParameter, FullyShardedDataParallel]: Mapping from FSDP
flattened parameters to their owning FSDP modules.
"""
flat_param_to_fsdp_module = {}
for module in model.modules():
if isinstance(module, FSDP.FullyShardedDataParallel):
module._lazy_init()
for param in module.params: # may have none
flat_param_to_fsdp_module[param] = module
return flat_param_to_fsdp_module
def _get_param_id_to_param(
model: torch.nn.Module,
optim_input: Optional[Union[
List[Dict[str, Any]], Iterable[torch.nn.Parameter],
]] = None,
) -> List[torch.nn.Parameter]:
"""
Constructs a mapping from parameter IDs to parameters. This may be used
both for models with ``FlatParameter`` s and without.
NOTE: We critically assume that, whether the optimizer input is a list of
parameters or a list of parameter groups, :class:`torch.optim.Optimizer`
enumerates the parameter IDs in order. In other words, for a parameter list
input, the parameter IDs should be in that list order, and for a parameter
groups input, the parameter IDs should be in order within each parameter
group and in order across parameter groups.
Args:
model (torch.nn.Module): Model whose parameters are passed into the
optimizer.
optim_input (Optional[Union[List[Dict[str, Any]],
Iterable[torch.nn.Parameter]]]): Input passed into the optimizer
representing either a :class:`list` of parameter groups or an
iterable of parameters; if ``None``, then this method assumes the
input was ``model.parameters()``. (Default: ``None``)
Returns:
List[torch.nn.Parameter]: Mapping from parameter IDs to parameters,
where the parameter ID is implicitly the index in the :class:`list`.
"""
# Assume the standard case of passing `model.parameters()` to the optimizer
# if `optim_input` is not specified
if optim_input is None:
return list(model.parameters())
try:
params = list(optim_input)
except TypeError:
raise TypeError(
"Optimizer input should be an iterable of Tensors or dicts, "
f"but got {optim_input}"
)
if len(params) == 0:
raise ValueError("Optimizer input should not be empty")
# Check if the optimizer input represents tensors or parameter groups
all_tensors = True
all_dicts = True
for param in params:
all_tensors &= isinstance(param, torch.Tensor)
all_dicts &= isinstance(param, dict)
if not all_tensors and not all_dicts:
raise TypeError(
"Optimizer input should be an iterable of Tensors or dicts"
)
if all_tensors:
return params # type: ignore[return-value]
assert all_dicts
param_id_to_param = []
for param_group in params:
has_params_key = "params" in param_group # type: ignore[operator]
assert has_params_key, \
"A parameter group should map \"params\" to a list of the " \
"parameters in the group"
for param in param_group["params"]: # type: ignore[index]
# Implicitly map `flat_param_id` (current length of the list) to
# `param`
param_id_to_param.append(param)
return param_id_to_param # type: ignore[return-value]
def _get_param_to_param_id(
model: torch.nn.Module,
optim_input: Optional[Union[
List[Dict[str, Any]], Iterable[torch.nn.Parameter],
]] = None,
) -> Dict[torch.nn.Parameter, int]:
"""Constructs the inverse mapping of :func:`_get_param_id_to_param`."""
param_id_to_param = _get_param_id_to_param(model, optim_input)
return {
param: param_id for param_id, param in enumerate(param_id_to_param)
}
def _get_unflat_to_flat_param_ids(
flat_to_unflat_param_ids: Dict[int, List[int]],
) -> List[int]:
"""
Inverts the mapping ``flat_to_unflat_param_ids`` to be from unflattened
parameter ID to flattened parameter ID, where the unflattened parameter ID
is the index in the returned :class:`list`. There may be multiple
unflattened parameter IDs mapping to the same flattened parameter ID.
Args:
flat_to_unflat_param_ids (Dict[int, List[int]]): A mapping from
flattened parameter ID to a :class:`list` of corresponding
unflattened parameter IDs.
Returns:
List[int]: A mapping from unflattened parameter ID to flattened
parameter ID, where the unflattened parameter ID is the index in the
:class:`list`.
"""
# Construct as a dict and then convert to list
unflat_to_flat_param_ids = {}
for flat_param_id, unflat_param_ids in flat_to_unflat_param_ids.items():
for unflat_param_id in unflat_param_ids:
assert unflat_param_id not in unflat_to_flat_param_ids, \
"`flat_to_unflat_param_ids` has the unflattened parameter " \
f"ID {unflat_param_id} mapped to multiple flattened " \
"parameter IDs"
unflat_to_flat_param_ids[unflat_param_id] = flat_param_id
num_unflat_param_ids = len(unflat_to_flat_param_ids)
unflat_param_ids_set = set(unflat_to_flat_param_ids.keys())
assert unflat_param_ids_set == set(range(num_unflat_param_ids)), \
"The set of unflattened parameter IDs should be {0, ..., " + \
str(num_unflat_param_ids - 1) + "} but got " + \
f"{unflat_param_ids_set}"
return [
unflat_to_flat_param_ids[unflat_param_id]
for unflat_param_id in range(num_unflat_param_ids)
]
def _is_zero_dim_tensor(x: Any) -> bool:
return torch.is_tensor(x) and x.dim() == 0
| pytorch-master | torch/distributed/fsdp/_optim_utils.py |
from .flat_param import FlatParameter
from .fully_sharded_data_parallel import (
BackwardPrefetch,
CPUOffload,
FullStateDictConfig,
FullyShardedDataParallel,
LocalStateDictConfig,
MixedPrecision,
OptimStateKeyType,
ShardingStrategy,
StateDictType,
)
from .wrap import ParamExecOrderWrapPolicy
| pytorch-master | torch/distributed/fsdp/__init__.py |
import contextlib
from itertools import accumulate, chain
from typing import (
Dict,
Generator,
Iterator,
List,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
Union,
)
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
__all__ = [
"FlatParameter", "FlatParamHandle", "FlatParamShardMetadata",
"ParamInfo", "SharedParamInfo",
]
class ParamInfo(NamedTuple):
"""Information for an original module parameter."""
param_name: str # unprefixed
module: nn.Module
module_name: str
class SharedParamInfo(NamedTuple):
"""
Additional information for a shared parameter.
For each shared parameter, we designate one module and its parameter
variable to be the primary owner, determined as the first one encountered
in the parameter walk. These are prefixed with "prim". The primary module
and parameter do not have their own :class:`SharedParamInfo` instance.
"""
param_name: str # unprefixed
module: nn.Module
module_name: str
prim_param_name: str # unprefixed
prim_module: nn.Module
prim_module_name: str
class FlatParamShardMetadata(NamedTuple):
"""
This holds metadata specific to this rank's shard of the flattened
parameter.
Attributes:
param_names (Tuple[str, ...]): Prefixed parameter names of this rank's
shard of the parameters; see :class:`FlatParameter`.
param_shapes (Tuple[torch.Size, ...]): Parameter shapes of this rank's
shard of the parameters; see :class:`FlatParameter`.
param_numels (Tuple[int, ...]): Parameter numels of this rank's shard
of the parameters; see :class:`FlatParameter`.
param_offsets (Tuple[Tuple[int, int], ...]): [start, end] offsets (in
units of numels) giving this rank's part of each flattened
original module parameter.
"""
param_names: Tuple[str, ...]
param_shapes: Tuple[torch.Size, ...]
param_numels: Tuple[int, ...]
param_offsets: Tuple[Tuple[int, int], ...]
class FlatParameter(nn.Parameter):
"""
This is the flattened parameter used by :class:`FullyShardedDataParallel`.
It is comprised of one or more original parameters, which are flattened
and concatenated to construct the flattened parameter.
Under the current design, this parameter logically represents both the
unsharded and sharded flattened parameter, and its data changes storages
dynamically.
- In the :class:`FullyShardedDataParallel` constructor, the parameter
is initialized as unsharded and then sharded in-place.
- At runtime, the parameter is lazily (re)-initialized. The sharded
parameter data is saved in ``self._local_shard``, and a new ``Tensor``
``self._full_param_padded`` is created, which is the all-gather
destination and owns the unsharded parameter storage thereafter. (See
:meth:`FullyShardedDataParallel._init_param_attributes`.)
- Throughout runtime, the parameter data changes storages as needed,
e.g. to the sharded flattened parameter, reduced-precision sharded
flattened parameter, or the unsharded flattened parameter.
Attributes:
_is_sharded (bool): Whether the flattened parameter is *ever* sharded
across ranks (not whether it is *currently* sharded).
_unsharded_size (torch.Size): Unsharded flattened parameter's size.
_param_infos (Tuple[ParamInfo, ...]): Each parameter's parameter info
entry; see :class:`ParamInfo`.
_numels (Tuple[int, ...]): Each parameter's numel.
_shapes (Tuple[torch.Size, ...]): Each parameter's shape.
_prefixed_param_names (Tuple[str, ...]): Each parameter's name prefixed
with the parent module names starting from the module passed to
construct this flattened parameter via :class:`FlatParamHandle`;
the prefixed names are guaranteed to be unique within the subtree
rooted in that module.
_num_params (int): Number of original parameters flattened into this
flattened parameter; this is the length of ``_param_infos``,
``_numels``, ``_shapes``, and ``_prefixed_param_names``.
_shared_param_infos (Tuple[SharedParamInfo, ...]): Shared parameter
info entries; see :class:`SharedParamInfo`.
_shard_param_offsets (List[Tuple[int, int])): [start, end] offsets (in
units of numel) giving this rank's part of each flattened original
module parameter; for any parameter ``p`` that is not sharded
across ranks, this will be [0, ``p.numel()``-1].
_shard_indices (Tuple[int, int]): [start, end] indices (in units of
parameters) for this rank's shard of the original model parameters,
where the parameters follow the order in which they were originally
flattened; this indexes appropriately into any data structure that
follows the flattening order (e.g. ``_param_infos``, ``_numels``,
etc.).
_shard_numel_padded (int): Numel padded for this rank's sharded
flattened parameter.
_local_shard (Tensor): Sharded flattened parameter with padding.
_full_param_padded (Tensor): Unsharded flattened parameter with
padding.
_shard_bwd_hook (Tuple[AccumulateGrad, RemovableHandle]): Flattened
parameter's :class:`AccumulateGrad` object and post-backward hook
handle.
_mp_shard (Tensor): Reduced-precision flattened parameter with padding.
_cpu_grad (Tensor): Sharded gradient with padding stored on CPU.
_saved_grad_shard (Tensor): Sharded gradient with padding from previous
iterations for gradient accumulation without :meth:`no_sync`.
"""
def init_metadata(
self,
param_infos: List[ParamInfo],
numels: List[int],
shapes: List[torch.Size],
prefixed_param_names: List[str],
shared_param_infos: List[SharedParamInfo],
) -> None:
"""
Initializes attributes holding metadata about the original parameters
comprising the flattened parameter.
We expose this method separate from the constructor to keep the
constructor only responsible for the flattened parameter's tensor data.
This method should only be called once per model, while the constructor
may be called multiple times, e.g. when reloading from a checkpoint, in
which case only the tensor data needs to be passed to the constructor.
Since :meth:`load_state_dict` is implemented via :meth:`copy_`, the
metadata is correctly assumed to be unchanged.
Args:
See the Attributes in the class docstring.
"""
assert len(param_infos) == len(numels)
assert len(param_infos) == len(shapes)
assert len(param_infos) == len(prefixed_param_names)
self._num_params = len(param_infos)
self._param_infos = tuple(param_infos)
self._numels = tuple(numels)
self._shapes = tuple(shapes)
self._prefixed_param_names = tuple(prefixed_param_names)
self._shared_param_infos = tuple(shared_param_infos)
self._is_sharded = False
self._unsharded_size = self.size()
class FlatParamHandle:
"""
This handle manages a flattened parameter (:class:`FlatParameter`).
Args:
params (Sequence[nn.Parameter]): The parameters to use for the
flattened parameter.
module (nn.Module): A module that is the root of the subtree containing
all parameters in ``params``; for non-recursive wrapping, this must
be the top-level module, while for recursive wrapping, this may not
necessarily be the top-level module.
"""
def __init__(
self,
params: Sequence[nn.Parameter],
module: nn.Module,
) -> None:
super().__init__()
self._init_flat_param(module, params)
self._unflatten(as_params=False)
def _init_flat_param(
self,
module: nn.Module,
params: Sequence[Optional[nn.Parameter]],
) -> None:
"""
Initializes the flattened parameter ``self.flat_param`` by flattening
the parameters in ``params`` into a single :class:`FlatParameter` and
saves relevant metadata. Shared parameters are only included in the
flattened parameter once.
This checks that all comprising parameters have the same dtype and
``requires_grad`` and does not support nested construction of
:class:`FlatParameter` s.
Args:
See the Args in the class docstring.
"""
params_set = set(params)
params_set.discard(None)
assert len(params_set) > 0, \
"Cannot initialize a `FlatParameter` from an empty parameter list"
param_infos: List[ParamInfo] = []
numels: List[int] = []
shapes: List[torch.Size] = []
prefixed_param_names: List[str] = []
shared_param_infos: List[SharedParamInfo] = []
shared_param_memo: Dict[nn.Parameter, Tuple[nn.Module, str, str]] = {}
params_to_flatten: List[nn.Parameter] = []
dtype: Optional[torch.dtype] = None
requires_grad: Optional[bool] = None
for submodule_name, submodule in module.named_modules():
for param_name, param in submodule.named_parameters(recurse=False):
if param not in params_set:
continue
if param in shared_param_memo:
prim_module, prim_module_name, prim_param_name = shared_param_memo[param]
shared_param_infos.append(SharedParamInfo(
param_name, submodule, submodule_name, prim_param_name,
prim_module, prim_module_name,
))
else:
if isinstance(param, FlatParameter):
raise ValueError("`FlatParameter` does not support nesting")
if dtype is not None and param.dtype != dtype:
raise ValueError(
"`FlatParameter` requires uniform dtype but got "
f"{dtype} and {param.dtype}"
)
if requires_grad is not None and param.requires_grad != requires_grad:
raise ValueError("`FlatParameter` requires uniform `requires_grad`")
dtype = param.dtype
requires_grad = param.requires_grad
shared_param_memo[param] = (submodule, submodule_name, param_name)
params_to_flatten.append(param)
param_infos.append(ParamInfo(param_name, submodule, submodule_name))
numels.append(param.numel())
shapes.append(param.shape)
prefixed_param_name = submodule_name + "." + param_name \
if submodule_name else param_name
prefixed_param_names.append(prefixed_param_name)
assert requires_grad is not None
self.flat_param = FlatParamHandle.flatten_params(params_to_flatten, requires_grad)
self.flat_param.init_metadata(
param_infos, numels, shapes, prefixed_param_names, shared_param_infos,
)
@staticmethod
def flatten_params(
params: Sequence[torch.Tensor],
requires_grad: bool,
) -> FlatParameter:
"""
Flattens the parameters in ``params`` into a single
:class:`FlatParameter`. This should be the only way used to construct
:class:`FlatParameter` s.
We expose this factory method for checkpointing (e.g. sharded state
dict). The flattened parameter's metadata should only be initialized
once (see :meth:`init_metadata`), but its tensor data may be reloaded.
"""
with torch.no_grad():
flat_params = [
p.detach().reshape(-1) if isinstance(p, nn.Parameter)
else p.reshape(-1) for p in params
]
flat_param_data = torch.cat(flat_params, dim=0)
flat_param = FlatParameter(flat_param_data, requires_grad=requires_grad)
return flat_param
@staticmethod
def _get_unflat_views(
flat_param: FlatParameter,
tensor: Optional[torch.Tensor] = None,
) -> Iterator[Tensor]:
"""
Returns unflattened ``Tensor`` views into ``tensor`` if it is not
``None`` or ``flat_param`` otherwise, where the unflattening is based
on ``flat_param`` 's metadata.
In other words, to get views into the unsharded flattened parameter,
pass ``tensor`` as ``None``, but to get views into tensor optimizer
state, pass ``tensor`` as the optimizer state tensor.
"""
if tensor is None:
tensor = flat_param
assert tensor.numel() == flat_param._unsharded_size.numel(), \
f"Expects {flat_param._unsharded_size.numel()} numel but got " \
f"{tensor.numel()} numel"
views = (
subtensor.view(shape) for (subtensor, shape) in
zip(torch.split(tensor, flat_param._numels, dim=0), flat_param._shapes) # type: ignore[arg-type]
)
return views
def _unflatten(self, as_params: bool) -> None:
"""
Unflattens the unsharded flattened parameter by setting the original
module parameter variables to be views into it.
Args:
as_params (bool): If ``True``, then registers the original
parameters as ``nn.Parameter`` s; if ``False``, then registers
the original parameters only as ``Tensor`` s. ``False`` should
be used during forward/backward computation and when hiding the
original parameters from :meth:`nn.Module.named_parameters`.
"""
views = self._get_unflat_views(self.flat_param)
for view, (param_name, module, _) in zip(views, self.flat_param._param_infos):
if hasattr(module, param_name):
delattr(module, param_name)
if as_params:
module.register_parameter(param_name, nn.Parameter(view))
else:
setattr(module, param_name, view)
for (param_name, module, _, prim_param_name, prim_module, _) in self.flat_param._shared_param_infos:
if hasattr(module, param_name):
delattr(module, param_name)
assert hasattr(prim_module, prim_param_name)
param: Union[Tensor, nn.Parameter] = getattr(prim_module, prim_param_name)
if as_params:
assert isinstance(param, nn.Parameter)
module.register_parameter(param_name, param)
else:
setattr(module, param_name, param)
@contextlib.contextmanager
def unflatten_as_params(self) -> Generator:
"""
Assumes the flattened parameter is unsharded. When in the context,
unflattens the original parameters as ``nn.Parameter`` views into the
flattened parameter, and after the context, restores the original
parameters as ``Tensor`` views into the flattened parameter.
"""
self._unflatten(as_params=True)
try:
yield
finally:
self._unflatten(as_params=False)
def init_shard_metadata(
self,
sharded_flat_param_numel: int,
numel_padded: int,
rank: int,
) -> None:
"""
Initializes shard-related metadata for this rank's shard of the
flattened parameter: ``_shard_param_offsets``, ``_shard_indices``, and
``_shard_numel_padded``.
Args:
sharded_flat_param_numel (int): Numel of each rank's sharded
flattened parameter with padding (i.e. including
``numel_padded``).
numel_padded (int): Numel padded for this rank's sharded flattened
parameter.
rank (int): Caller's rank.
"""
if numel_padded > sharded_flat_param_numel:
raise ValueError(
f"Sharded flattened parameter with {sharded_flat_param_numel} "
f"numel cannot have {numel_padded} numel padded"
)
start = sharded_flat_param_numel * rank
end = sharded_flat_param_numel * (rank + 1) - 1 # inclusive
self.flat_param._shard_param_offsets, self.flat_param._shard_indices = ( # type: ignore[attr-defined]
self._get_shard_metadata(start, end)
)
self.flat_param._shard_numel_padded = numel_padded # type: ignore[attr-defined]
def _get_shard_metadata(
self,
start: int,
end: int,
) -> Tuple[Tuple[Tuple[int, int], ...], Tuple[int, int]]:
"""
Computes the shard metadata based on ``start`` and ``end``, which give
the closed interval of the unsharded flattened parameter specifying the
shard.
Args:
start (int): Start index (in units of numel) of this rank's shard
of the flattened parameter.
end (int): End index (in units of numel and inclusive) of this
rank's shard of the flattened parameter.
Return:
Tuple[Tuple[Tuple[int, int], ...], Tuple[int, int]]: See
``_shard_param_offsets`` and ``_shard_indices`` in
:class:`FlatParameter` 's docstring.
"""
flat_param_offsets = self._get_flat_param_offsets()
# Indices of the original parameters in this rank's sharded flattened
# parameter
shard_param_indices_range = [] # elements will be consecutive
# [start, end] offsets giving this rank's part of the flattened
# original module parameter (which will be [0, `p.numel()`-1] for any
# parameter that is not sharded across ranks)
shard_param_offsets = []
for i, (param_start, param_end) in enumerate(flat_param_offsets):
if start > param_end or end < param_start:
continue
if start <= param_start:
intra_param_start = 0
else:
intra_param_start = start - param_start
intra_param_end = min(param_end, end) - param_start
shard_param_indices_range.append(i)
shard_param_offsets.append((intra_param_start, intra_param_end)) # both inclusive
if len(shard_param_indices_range) == 0:
shard_param_indices = (0, 0)
assert len(shard_param_offsets) == 0
else:
shard_param_indices = (
shard_param_indices_range[0], shard_param_indices_range[-1],
)
assert len(shard_param_offsets) == \
shard_param_indices[-1] - shard_param_indices[0] + 1
return tuple(shard_param_offsets), shard_param_indices
@staticmethod
def _get_unpadded_shard(
tensor: Tensor,
rank: int,
world_size: int,
) -> Tuple[Tensor, int]:
"""
Returns the shard of ``tensor`` without any padding for the given
``rank`` and ``world_size`` and the numel to pad for that shard.
If ``tensor`` is already flattened or may be viewed in the flattened
shape (which is true in the expected usage), then this method does not
allocate any new tensor memory.
"""
chunks = torch.flatten(tensor).chunk(world_size)
if len(chunks) < (rank + 1):
# This rank gets an empty chunk fully padded with zeros since there
# are not enough chunks across ranks
chunk = chunks[0].new_empty(0)
else:
chunk = chunks[rank]
numel_to_pad = chunks[0].numel() - chunk.numel()
assert numel_to_pad >= 0, "Chunk's size should be at most the first chunk's size"
return chunk, numel_to_pad
@staticmethod
def _get_shard(
tensor: Tensor,
rank: int,
world_size: int,
) -> Tuple[Tensor, int]:
"""
Returns the shard of ``tensor`` with padding for the given ``rank`` and
``world_size`` and the numel padded for that shard.
This method allocates new memory (via :meth:`clone`) since the
unsharded ``tensor`` may be deallocated after this method returns.
"""
chunk, numel_to_pad = FlatParamHandle._get_unpadded_shard(tensor, rank, world_size)
shard = chunk.clone()
if numel_to_pad > 0:
shard = F.pad(shard, [0, numel_to_pad])
return shard, numel_to_pad
@staticmethod
def _get_sharded_size(tensor: Tensor, rank: int, world_size: int) -> torch.Size:
"""
Returns the shape of ``tensor`` after sharding including padding. This
requires ``tensor`` to have 1D shape and ensures that the returned
shape is 1D.
"""
assert len(tensor.shape) == 1, f"{tensor.shape}"
unpadded_sharded_tensor, numel_to_pad = (
FlatParamHandle._get_unpadded_shard(tensor, rank, world_size)
)
unpadded_sharded_size = unpadded_sharded_tensor.size()
assert len(unpadded_sharded_size) == 1, f"{unpadded_sharded_size}"
return torch.Size([unpadded_sharded_size[0] + numel_to_pad])
def _get_flat_param_offsets(self) -> List[Tuple[int, int]]:
"""Returns [start, end] offsets of each original parameter's flattened
data in the unsharded flattened parameter (without padding)."""
cumulative_sum = list(accumulate(self.flat_param._numels))
starts = [0] + cumulative_sum[:-1]
ends = [end - 1 for end in cumulative_sum] # inclusive
param_offsets = list(zip(starts, ends))
return param_offsets
def shard_metadata(
self,
) -> FlatParamShardMetadata:
"""Returns shard-related metadata specific to this rank's shard of the
flattened parameter."""
assert hasattr(self.flat_param, "_shard_indices") and \
hasattr(self.flat_param, "_shard_param_offsets"), \
"Shard metadata has not been initialized"
shard_param_start_index = self.flat_param._shard_indices[0] # type: ignore[attr-defined]
shard_param_end_index = self.flat_param._shard_indices[1] # type: ignore[attr-defined]
sl = slice(shard_param_start_index, shard_param_end_index + 1) \
if shard_param_start_index <= shard_param_end_index else slice(0, 0)
return FlatParamShardMetadata(
self.flat_param._prefixed_param_names[sl],
self.flat_param._shapes[sl],
self.flat_param._numels[sl],
self.flat_param._shard_param_offsets[:], # type: ignore[attr-defined]
)
def _get_modules(self) -> Set[nn.Module]:
"""Returns a :class:`set` of the modules whose parameters are included
in this handle's flattened parameter."""
return set(pi.module for pi in self.flat_param._param_infos).union(
set(spi.module for spi in self.flat_param._shared_param_infos)
)
def parameter_module_names(self) -> Iterator[Tuple[str, str]]:
shared_param_infos = [
ParamInfo(param_name, module, module_name)
for (param_name, module, module_name, _, _, _)
in self.flat_param._shared_param_infos
]
for param_name, _, module_name in chain(
self.flat_param._param_infos, shared_param_infos
):
yield (param_name, module_name)
| pytorch-master | torch/distributed/fsdp/flat_param.py |
import contextlib
import functools
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple
import torch
__all__ = ["TracingConfig"]
@dataclass
class TracingConfig:
"""
Configurations used in ``ParamExecOrderWrapPolicy`` for symbolic tracing of
a model.
Args:
tracer (torch.fx.Tracer): An instance of ``torch.fx.Tracer`` that will
be used to perform symbolic tracing. ``tracer`` is default to be
``torch.fx.Tracer()``, but can also be instance of some child class
of ``torch.fx.Tracer``. For example, one may want to use
``HFTracer`` for models in Transformers: .. _Transformers:
https://huggingface.co/docs/transformers/index
concrete_args (Optional[Dict[str, Any]]): Concrete arguments that should
not be treated as ``torch.fx.Proxy`` when tracing the forward
function. ``concrete_args`` allows one to partially specialize the
forward function, including removing control flow or data
structures. ``concrete_args`` is also the argument used in
:meth:`~torch.fx.Tracer.trace`.
"""
tracer: torch.fx.Tracer = torch.fx.Tracer()
concrete_args: Optional[Dict[str, Any]] = None
@dataclass
class _ExecutionInfo:
"""
Contains the execution order information in the model forward pass.
Attributes:
current_module: record the module that is currently being traced.
module_forward_order: a list of modules, where the ordering is based on
when their forward function is called. ``module_forward_order``
includes the info of how many times a module is called + used to
check the forward order in different iterations.
param_exec_order: a list of parameters ordered based on their execution
order.
module_to_execution_infos: a dict that maps each module to a list of
tuples each containing a module and a list of named parameters.
``module_execution_info_dict`` is used as the parameter execution
order info. For a given module, each tuple: 1. either contains this
module and part of its ``named_parameters`` that will be executed
together, 2. or contains one of its child modules and all of the
child module's ``named_parameters``. The list of tuples is ordered
based on the parameter execution order.
"""
current_module: torch.nn.Module
module_forward_order: List[torch.nn.Module]
module_to_execution_infos: Dict[
torch.nn.Module,
List[Tuple[torch.nn.Module, List[Tuple[str, torch.nn.Parameter]]]],
]
param_exec_order: List[torch.nn.Parameter] = field(default_factory=list)
def _init_execution_info(root_module: torch.nn.Module) -> _ExecutionInfo:
"""
Create an instance of _ExecutionInfo with initialization based on
``root_module``.
Args:
root_module (torch.nn.Module): the module to get the execution
information via ``tracer.trace()`` inside ``_patch_tracer``.
"""
return _ExecutionInfo(
current_module=root_module,
module_forward_order=[root_module],
module_to_execution_infos={root_module: []},
)
def _patched_create_proxy(
create_proxy: Callable,
execution_info: _ExecutionInfo,
prefixed_param_name_to_param: Dict[str, torch.nn.Parameter],
kind: str,
target: torch.fx.node.Target,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
name: Optional[str] = None,
type_expr: Optional[Any] = None,
proxy_factory_fn: Callable[[torch.fx.Node], torch.fx.Proxy] = None,
) -> torch.fx.Proxy:
"""
Override of :meth:`~torch.fx.Tracer.create_proxy`. ``Tracer.create_proxy``
is called in symbolic tracing for each leaf function/method/module. This
override intercepts the recording of each of these operations to update
``execution_info.module_to_execution_infos``.
Args:
create_proxy (Callable):
The ``create_proxy`` function to be patched.
execution_info (_ExecutionInfo):
Used to record the execution information.
prefixed_param_name_to_param (Dict[str, torch.nn.Parameter]):
A dict that maps each prefixed parameter name to the parameter.
kind (str):
The type of the target method. One of 'call_function',
'call_method', 'get_attr', 'call_module', 'placeholder', or
'output'. The semantics of these opcodes are described in the
``torch.fx.Graph`` docstring. This is the input to ``create_proxy``.
target (torch.fx.node.Target):
Contains the string name of the method. This is the input to
``create_proxy``.
args (Tuple[Any, ...]):
Arguments of the method. This is the input to ``create_proxy``.
kwargs (Dict[str, Any]):
Keyword arguments of the method. This is the input to
``create_proxy``.
name (Optional[str]):
An optional string name for the ``Node`` created in
``create_proxy``. This is the input to ``create_proxy``.
type_expr (Optional[Any]):
An optional type annotation representing the Python type the output
of a node will have. This is the input to ``create_proxy``.
proxy_factory_fn (Callable[[torch.fx.Node], torch.fx.Proxy]):
An alternative proxy constructor used in ``create_proxy``. This is
the input to ``create_proxy``.
"""
proxy = create_proxy(kind, target, args, kwargs, name, type_expr, proxy_factory_fn)
module = execution_info.current_module
if kind in ["call_function", "call_method"]:
if args is not None:
named_params: List[Tuple[str, torch.nn.Parameter]] = []
for arg in args:
if isinstance(arg, torch.fx.Proxy) and arg.node.target in prefixed_param_name_to_param:
param = prefixed_param_name_to_param[arg.node.target]
named_params.append((arg.node.target, param))
if param not in set(execution_info.param_exec_order):
execution_info.param_exec_order.append(param)
if named_params:
execution_info.module_to_execution_infos[module].append((module, named_params))
elif kind == "call_module":
named_params = list(module.named_parameters())
if named_params:
execution_info.module_to_execution_infos[module].append(
(module, named_params)
)
for (_, p) in named_params:
if p not in set(execution_info.param_exec_order):
execution_info.param_exec_order.append(p)
return proxy
def _patched_call_module(
call_module: Callable,
execution_info: _ExecutionInfo,
module: torch.nn.Module,
forward: Callable[..., Any],
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
) -> Any:
"""
Override of :meth:`~torch.fx.Tracer.call_module`. ``Tracer.call_module`` is
called in symbolic tracing for each non-root module. This override
intercepts the recording of each operation to update
``execution_info.module_forward_order`` and
``execution_info.module_to_execution_infos``.
Args:
call_module (Callable):
The ``call_module`` function to be patched.
execution_info (_ExecutionInfo):
Used to repord the execution information.
module (torch.nn.Module):
The module for which a call is being emitted.
forward (Callable[..., Any]):
The ``forward()`` method of the ``torch.nn.Module`` to be invoked.
args (Tuple[Any, ...]):
``args`` of the module callsite.
kwargs (Dict[str, Any]):
``kwargs`` of the module callsite.
"""
execution_info.module_forward_order.append(module)
named_params = list(module.named_parameters())
if named_params:
execution_info.module_to_execution_infos[execution_info.current_module].append(
(module, list(module.named_parameters()))
)
# Stores away current_module for restoration later
prev_current_module = execution_info.current_module
execution_info.current_module = module
# Note that if the forward of module is called multiple times, this will record
# the execution info of the last forward pass.
execution_info.module_to_execution_infos[module] = []
output = call_module(module, forward, args, kwargs)
execution_info.current_module = prev_current_module
return output
@contextlib.contextmanager
def _patch_tracer(
tracer: torch.fx.Tracer,
root_module: torch.nn.Module,
execution_info: _ExecutionInfo,
) -> Generator:
"""
Within the context manager, patches the input tracer so that during
``tracer.trace()``, the forward order of all modules and the parameter
execution information are recorded. The patches of the input tracer will be
removed after the context manager exits.
Args:
tracer (torch.fx.Tracer): the input ``tracer`` whose member functions
will be patched within the context manager.
root_module (torch.nn.Module): the top-level module to be traced
and should not contain any FSDP modules.
execution_info (_ExecutionInfo): used to record the execution order
information when performing ``tracer.trace()`` within the context
manager.
"""
original_call_module = tracer.call_module
original_create_proxy = tracer.create_proxy
tracer.call_module = functools.partial(
_patched_call_module, original_call_module, execution_info
)
prefixed_param_name_to_param = dict(root_module.named_parameters())
tracer.create_proxy = functools.partial(
_patched_create_proxy, original_create_proxy, execution_info, prefixed_param_name_to_param
)
try:
yield
finally:
tracer.call_module = original_call_module
tracer.create_proxy = original_create_proxy
| pytorch-master | torch/distributed/fsdp/_symbolic_trace.py |
import bisect
import itertools
import math
from typing import Any, Dict, List, Tuple, Optional
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.distributed import distributed_c10d
from torch.distributed._shard.sharded_tensor import ShardedTensor
from torch.distributed._shard.sharding_spec import (
ChunkShardingSpec,
EnumerableShardingSpec,
ShardingSpec,
)
def _sharding_spec_to_offsets(
sharding_spec: ShardingSpec, tensor_numel: int, world_size: int
) -> List[int]:
r"""
Translates the sharding spec to a list of offsets along dim 0. If the
sharding spec is ChunkShardingSpec, only the ``dim`` is used and the
placement is not used.
"""
offsets: List[int] = []
if isinstance(sharding_spec, EnumerableShardingSpec):
for shard in sharding_spec.shards:
offsets.append(shard.shard_offsets[0])
elif isinstance(sharding_spec, ChunkShardingSpec):
assert sharding_spec.dim == 0
chunk_size = math.ceil(tensor_numel / world_size)
if chunk_size == 1:
offsets = [
rank if rank < tensor_numel else tensor_numel
for rank in range(world_size)
]
else:
offsets = [chunk_size if rank > 0 else 0 for rank in range(world_size)]
offsets = list(itertools.accumulate(offsets))
else:
raise ValueError(f"Un-recognized sharding spec type {type(sharding_spec)}.")
return offsets
def _offsets_to_split_sizes(
input_offsets: List[int],
output_offsets: List[int],
tensor_numel: int,
world_size: int,
my_rank: int,
) -> Tuple[List[int], List[int]]:
r"""
Given the shard offsets for each rank of the input tensor and output tensor,
this API returns the corresponding split sizes that can be passed to
all_to_all_single().
"""
def _get_interval(offsets):
if my_rank != world_size - 1:
return offsets[my_rank], offsets[my_rank + 1] - 1
else:
return offsets[my_rank], tensor_numel - 1
def _offsets_to_sizes(offsets, begin, end):
sizes = []
for i, offset in enumerate(offsets):
next_offset = offsets[i + 1] if i < len(offsets) - 1 else end + 1
sizes.append(
(next_offset - offset)
- max(begin - offset, 0)
- max(next_offset - end - 1, 0)
)
return sizes
def _convert(from_offsets, to_offsets, split_sizes):
begin, end = _get_interval(from_offsets)
to_begin_rank = bisect.bisect(to_offsets, begin) - 1
to_end_rank = bisect.bisect(to_offsets, end) - 1
_split_sizes = _offsets_to_sizes(
to_offsets[to_begin_rank : to_end_rank + 1], begin, end
)
split_sizes[to_begin_rank : to_end_rank + 1] = _split_sizes
input_split_sizes = [0 for _ in range(world_size)]
output_split_sizes = [0 for _ in range(world_size)]
_convert(input_offsets, output_offsets, input_split_sizes)
_convert(output_offsets, input_offsets, output_split_sizes)
return input_split_sizes, output_split_sizes
def _reshard_flatten_tensor(
input_tensor: ShardedTensor,
output_spec: ShardingSpec,
world_size: int,
my_rank: int,
device: torch.device,
process_group: Optional[dist.ProcessGroup],
) -> torch.Tensor:
"""
Resharded a sharded flatten tensor, this is used by FSDP to do sharded
state_dict. But the functionaility is not supported by ShardedTensor.
This API is designed to be used for FSDP; therefore this API supports only
1-D ShardedTensor (hence the naming, reshard_flatten_tensor).
This API uses the ChunkShardingSpec and EnumerableShardingSpec from
torch.distributed.sharding_spec but ignores the placement field in
ChunkShardingSpec, as the placement requires the callees understand the
number of GPUs per node. The API simply uses the semantics of the sharding
specs.
Args:
input_tensor (ShardedTensor): the original ShardedTensor. Must be 1D.
output_spec (ShardingSpec): the sharding spect for the output tensor.
world_size (int): total trainer count.
my_rank (int): the rank for this trainer.
Returns:
The local shard for the new ShardedTensor.
"""
input_spec = input_tensor.sharding_spec()
size = input_tensor.size()
if isinstance(size, int):
raise ValueError("The input tensor has no dimensions.")
tensor_numel = size.numel()
input_offsets = _sharding_spec_to_offsets(input_spec, tensor_numel, world_size)
output_offsets = _sharding_spec_to_offsets(output_spec, tensor_numel, world_size)
input_split_sizes, output_split_sizes = _offsets_to_split_sizes(
input_offsets, output_offsets, tensor_numel, world_size, my_rank
)
output_size = sum(output_split_sizes)
local_shard = torch.empty(output_size, dtype=input_tensor.dtype, device=device)
dist.all_to_all_single(
local_shard,
input_tensor.local_shards()[0].tensor,
input_split_sizes=input_split_sizes,
output_split_sizes=output_split_sizes,
group=process_group,
)
return local_shard
def _all_gather_sharded_tensor(
sharded_tensor: ShardedTensor, pg: Optional[dist.ProcessGroup] = None
) -> torch.Tensor:
if pg is None:
pg = distributed_c10d._get_default_group()
world_size = dist.get_world_size(pg)
shards = sharded_tensor.local_shards()
local_tensor = shards[0].tensor.flatten()
dim_0_size = sharded_tensor.size()[0] # type: ignore[index]
tensor_numel = sharded_tensor.size().numel() # type: ignore[union-attr]
chunk_size = math.ceil(dim_0_size / world_size) * tensor_numel // dim_0_size
num_padding = chunk_size - local_tensor.numel()
if num_padding > 0:
local_tensor = F.pad(local_tensor, [0, num_padding])
tensor = torch.empty(chunk_size * world_size, dtype=local_tensor.dtype).cuda()
dist._all_gather_base(tensor, local_tensor, group=pg)
return tensor.narrow(0, 0, tensor_numel).reshape(sharded_tensor.size())
def _gather_state_dict(
state_dict: Dict[str, Any],
pg: Optional[dist.ProcessGroup] = None,
) -> Dict[str, Any]:
"""
Given a state_dict, this API gathers all the ShardedTensor in the state_dict
to the output_rank, and creates a new state_dict which the values are either
the gathered tensors (rank == output_rank) or None (rank != output_rank).
"""
new_state_dict = {}
for key, tensor in state_dict.items():
if isinstance(tensor, ShardedTensor):
"""
# TODO: It is unclear why the following implementation cause a
# timeout in some unittests on AWS servers but not other environment.
output_tensor = (
torch.empty(tensor.shape, dtype=tensor.dtype).cuda()
if curr_rank == output_rank
else None
)
tensor.gather(output_rank, output_tensor)
"""
output_tensor = _all_gather_sharded_tensor(tensor, pg)
tensor = output_tensor
new_state_dict[key] = tensor
return new_state_dict
| pytorch-master | torch/distributed/fsdp/shard_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
from dataclasses import dataclass
from typing import (
Any,
Callable,
Dict,
Generator,
Optional,
Set,
Tuple,
Type,
cast,
)
import torch.nn as nn
from torch.nn.modules.batchnorm import _BatchNorm
__all__ = [
"always_wrap_policy",
"lambda_auto_wrap_policy",
"transformer_auto_wrap_policy",
"size_based_auto_wrap_policy",
"enable_wrap",
"wrap",
"ParamExecOrderWrapPolicy",
]
def always_wrap_policy(*args, **kwargs) -> bool:
"""
A simple wrapper policy that always returns ``True``,
i.e. when passed as the `auto_wrap_policy` into FSDP,
this will result in all submodules being wrapped as
distinct FSDP instances.
"""
return True
def lambda_auto_wrap_policy(
module: nn.Module,
recurse: bool,
unwrapped_params: int,
lambda_fn: Callable
) -> bool:
"""
A convenient auto wrap policy to wrap submodules based on an arbitrary user
function. If `lambda_fn(submodule) == True``, the submodule will be wrapped as
a `wrapper_cls` unit.
Return if a module should be wrapped during auto wrapping.
The first three parameters are required by :func:`_recursive_wrap`.
Args:
module (nn.Module):
The module to be considered in this decision.
recurse (bool):
Indicate if this is called to make a decision on whether we
should recurse down a subgraph of the module structure.
If False, it means this function is called to make a decision
on whether we should wrap the said module.
unwrapped_params (int):
The number of parameters yet to be wrapped in this module.
lambda_fn (Callable[nn.Module] -> bool):
If this returns ``True``, this module will be wrapped by
wrapper_cls individually.
"""
if recurse:
# always recurse
return True
else:
# if not recursing, decide whether we should wrap for the leaf node or reminder
return lambda_fn(module)
def transformer_auto_wrap_policy(
module: nn.Module,
recurse: bool,
unwrapped_params: int,
transformer_layer_cls: Set[Type[nn.Module]],
) -> bool:
"""
A convenient auto wrap policy for transformer models. If the submodule
is an instance of transformer_layer_cls, the submodule will be wrapped
as a FSDP unit. Otherwise, all the other remainder submodules are wrapped
by the outermost FSDP unit. Right now, FSDP requires submodules that share
weights to be wrapped in the same FSDP unit, this auto wrap policy can
conviniently wrap the shared embeddings into the same FSDP unit for transformer
models. In the near future, FSDP will support submodules that share weights
to be wrapped in the separated FSDP units.
Return if a module should be wrapped during FSDP auto wrapping.
The first three parameters are required by :func:`_recursive_wrap`.
Args:
module (nn.Module):
The module to be considered in this decision.
recurse (bool):
Indicate if this is called to make a decision on whether we
should recurse down a subgraph of the module structure.
If False, it means this function is called to make a decision
on whether we should wrap the said module.
unwrapped_params (int):
The number of parameters yet to be wrapped in this module.
transformer_layer_cls (int):
Submodules with one of the `transformer_layer_cls` names
will be wrapped as separated FSDP units
"""
if recurse:
# always recurse
return True
else:
# if not recursing, decide whether we should wrap for the leaf node or reminder
return isinstance(module, tuple(transformer_layer_cls))
def _wrap_batchnorm_individually(
module: nn.Module,
recurse: bool,
*args,
**kwargs,
) -> bool:
"""
A policy that wraps ``BatchNorm`` instances in their own FSDP unit.
"""
if recurse:
# always recurse
return True
else:
# if not recursing, decide whether we should wrap based on whether it is a
# BN layer or not.
return isinstance(module, _BatchNorm)
def _or_policy(
module: nn.Module,
recurse: bool,
unwrapped_params: int,
policies,
) -> bool:
"""
A policy that wraps ``module`` if any policy in the passed in iterable of
``policies`` returns ``True``.
"""
return any(
policy(module, recurse, unwrapped_params) for policy in policies
)
def size_based_auto_wrap_policy(
module: nn.Module,
recurse: bool,
unwrapped_params: int,
# These are customizable for this policy function.
min_num_params: int = int(1e8),
force_leaf_modules: Optional[Set[Type[nn.Module]]] = None,
exclude_wrap_modules: Optional[Set[Type[nn.Module]]] = None,
) -> bool:
"""A size based auto_wrap_policy function for FSDP API.
Return if a module should be wrapped during FSDP auto wrapping.
The first three parameters are used by :func:`_recursive_wrap`. If
you write a custom version of this policy function, your version
needs to at least accept the first three parameters and free
to do whatever you want in the function.
Args:
module (nn.Module):
The module to be considered in this decision.
recurse (bool):
Indicate if this is called to make a decision on whether we
should recurse down a subgraph of the module structure.
If False, it means this function is called to make a decision
on whether we should wrap the said module.
unwrapped_params (int):
The number of parameters yet to be wrapped in this module.
min_num_params (int):
Customizable policy input. It controls the size threshold
on how big should a module be to be considered wrapped.
force_leaf_modules (Set[Type[nn.Module]]): set of module types to
keep as leaves, i.e., their children will never be wrapped.
exclude_wrap_modules (Set[Type[nn.Module]]):
Customizable set of module types to be excluded in wrapping.
"""
force_leaf_modules = (
size_based_auto_wrap_policy.FORCE_LEAF_MODULES # type: ignore[attr-defined]
if force_leaf_modules is None
else force_leaf_modules
)
exclude_wrap_modules = (
size_based_auto_wrap_policy.EXCLUDE_WRAP_MODULES # type: ignore[attr-defined]
if exclude_wrap_modules is None
else exclude_wrap_modules
)
is_large = unwrapped_params >= min_num_params
if recurse:
# We should recurse if the module is big enough but not in force_leaf_modules list.
return is_large and not isinstance(module, tuple(force_leaf_modules))
else:
# If we are not recursing, determine if we should wrap.
return is_large and not isinstance(module, tuple(exclude_wrap_modules))
# Set those defaults to the size_based_auto_wrap_policy function. Make them easy to be imported.
size_based_auto_wrap_policy.EXCLUDE_WRAP_MODULES = {nn.ModuleList, nn.ModuleDict} # type: ignore[attr-defined]
size_based_auto_wrap_policy.FORCE_LEAF_MODULES = {nn.MultiheadAttention} # type: ignore[attr-defined]
@contextlib.contextmanager
def enable_wrap(
*, wrapper_cls: Any, **wrapper_kwargs: Any
) -> Generator[None, None, None]:
"""
Context manager to wrap modules using a wrapper.
Useful for when you'd like to apply the same configuration arguments to all
child modules that you wrap. A particularly important use case is wrapping
large layers so that they get sharded (in-place) during initialization, to
avoid running out of system memory. Large layers can indicate that they
should be sharded via the ``wrap`` annotation and this context manager can
provide the exact configuration for these nested instances.
Usage::
with enable_wrap(wrapper_cls, **params):
# Wraps layer in FSDP by default if within context
self.l1 = wrap(torch.nn.Linear(5, 5))
Args:
wrapper_cls:
Class that `wrap` annotation will `wrap` modules with, such as
`FullyShardedDataParallel`.
**wrapper_kwargs:
Configuration settings that will be passed to all ``wrap``
instances inside the context
"""
kwargs = {
**{"wrapper_cls": wrapper_cls},
**wrapper_kwargs,
}
with _ConfigAutoWrap(**kwargs):
yield
def wrap(module: nn.Module, **wrap_overrides: Any) -> nn.Module:
"""
Annotate that a module should be wrapped. Annotated modules will only be
wrapped if inside of an :func:`enable_wrap` context manager. This allows
a module to be initialized both with and without a wrapper without code
change.
The class that this function wraps the passed in ``nn.Module`` with is the
passed in ``wrapper_cls`` argument into ``enable_wrap``. Both
``enable_wrap`` and ``wrap`` can take in kwargs specifying how to construct
the ``wrapper_cls`` instance. In the case of duplicate kwargs in
``enable_wrap`` and ``wrap``, the argument passed into ``wrap`` will be
respected.
Usage::
with enable_wrap(wrapper_cls=FSDP, **fsdp_config):
# Wraps layer in FSDP by default if within context
self.l1 = wrap(torch.nn.Linear(5, 5))
Args:
module (nn.Module): module to wrap (if in :func:`enable_wrap` context)
**wrap_overrides: configuration overrides that will take priority over
the values provided by the :func:`enable_wrap` context
"""
if _ConfigAutoWrap.in_autowrap_context:
assert _ConfigAutoWrap.wrapper_cls is not None
wrap_overrides = {**_ConfigAutoWrap.kwargs, **wrap_overrides}
return _wrap(
module,
_ConfigAutoWrap.wrapper_cls,
**wrap_overrides,
)
return module
@dataclass
class ParamExecOrderWrapPolicy:
"""
This is the class used for the wrapping policy that wraps parameters and performs
the communication scheduling based on the parameter execution order in the forward pass
(also called non-recursive wrapping policy).
The policy contains multiple wraps. Each wrap contains original parameters that will be executed together,
and the wrap transfers these parameters into one ``FlattenParameter``. In both forward and the backward passes,
the sharded parameters in each wrap will be gathered just before these parameters are used in the passes.
These parameters will then be reshaded once they have been used.
TODO (linjianma): For now, the parameters contained in each wrap of ``ParamExecOrderWrapPolicy``
are the parameters in each wrap of the ``init_policy`` (a recursive wrapping policy).
Later we will wrap parameters based on bucket size.
Args:
init_policy (Callable):
The initial recursive wrapping policy used to guide the wrapping of
this policy. If tracing_config is none, in the first forward and
backward iteration, ``init_policy`` is used to record parameter
execution order. Otherwise, init_policy is only used in FSDP
constructor for module level wrapping.
The default ``always_wrap_policy`` might not be the best choice for every model. For example, for
transformer based models, setting ``transformer_auto_wrap_policy`` as the ``init_policy`` will guarantee
wrapping each transformer layer into one FSDP unit, and can be easily combined with checkpointing
within each transformer layer.
tracing_config (Optional[TracingConfig]):
The configuration used to perform symbolic tracing at FSDP
constructor to get the module and parameter execution order. The
type of ``tracing_config`` needs to be either ``None`` or
``TracingConfig``. If set as ``None``, then symbolic tracing is not
enabled, and one forward as well as backward iteration are needed to
get the parameter execution order.
..warning :: Note that not all modules can be successfully traced when
``tracing_config`` is not None and symbolic tracing is enabled. The two
cases below may be unable to trace: 1. when there is a data-dependent
branch, 2. when the forward pass contains operators that don't support
``torch.fx.Proxy`` as the input type (e.g. ``arange``, ``zeros``, ``ones``,
``full``, ``full_like``, ``eye``, ``empty``, ``tensor``). For those cases,
users can set ``tracing_config = None`` to disable symbolic tracing.
"""
init_policy: Callable = always_wrap_policy
tracing_config: Any = None
def _wrap(module: nn.Module, wrapper_cls: Callable, **kwargs) -> nn.Module:
assert wrapper_cls is not None
if hasattr(module, '_wrap_overrides'):
# If module has a _wrap_overrides attribute, we force overriding the
# FSDP config with these attributes for this module. Currently this
# is only used to disable mixed precision for BatchNorm when
# auto_wrapping.
overrides = {**kwargs, **module._wrap_overrides} # type: ignore[arg-type]
return wrapper_cls(module, **overrides)
return wrapper_cls(module, **kwargs)
def _recursive_wrap(
module: nn.Module,
auto_wrap_policy: Callable,
wrapper_cls: Callable,
ignored_modules: Set[nn.Module],
ignored_params: Set[nn.Parameter],
only_wrap_children: bool = False,
**kwargs: Any
) -> Tuple[nn.Module, int]:
"""
Automatically wrap child modules of *module* that meet the given
criteria with :func:`auto_wrap`. Does not rely on _ConfigAutoWrap.
Args:
module (nn.Module):
module to recursively wrap
auto_wrap_policy (Callable):
A callable specifying a policy to recursively wrap layers with FSDP.
ignored_modules (Set[torch.nn.Module]): Modules to ignore when
wrapping.
ignored_params (Set[torch.nn.Parameter]): Parameters to ignore when
wrapping; these should be the parameters contained in the modules
in ``ignored_modules``.
Returns:
(nn.Module, int):
Wrapped module and the number parameters wrapped recursively.
"""
assert auto_wrap_policy is not None, "Must specify auto_wrap_policy."
assert wrapper_cls is not None, "Must specify wrapper_cls"
# Make sure no child is already wrapped.
for _, child in module.named_modules():
if child in ignored_modules:
continue
try:
assert not isinstance(child, cast(type, wrapper_cls))
except TypeError:
# wrapper_cls is a function as opposed to a class type, just bypass above check.
pass
# We count all params, assuming none of them are already wrapped.
num_params = sum(
p.numel() for p in module.parameters() if p not in ignored_params
)
assert auto_wrap_policy is not None
if auto_wrap_policy(module=module, recurse=True, unwrapped_params=num_params):
total_wrapped_params = 0
# Iterate through the children, recursively wrap if necessary
for name, child in module.named_children():
if child in ignored_modules:
continue
wrapped_child, num_wrapped_params = _recursive_wrap(
module=child,
auto_wrap_policy=auto_wrap_policy,
wrapper_cls=wrapper_cls,
ignored_modules=ignored_modules,
ignored_params=ignored_params,
**kwargs,
)
setattr(module, name, wrapped_child)
# Keep track of how many parameters have been wrapped
total_wrapped_params += num_wrapped_params
# decide if we need to wrap the current module,
# since the left over parameters exceed the number of params to wrap
remainder = num_params - total_wrapped_params
if not only_wrap_children and auto_wrap_policy(
module=module, recurse=False, unwrapped_params=remainder
):
# Leaf node or final wrapping of the remainder both happen here.
return _wrap(module, wrapper_cls, **kwargs), num_params
else:
return module, total_wrapped_params
return module, 0
class _ConfigAutoWrap:
"""
Helper class to wrap modules based on default config args via a context manager.
See :func:`enable_wrap` for more information.
"""
in_autowrap_context: bool = False # Context flag
wrapper_cls: Optional[Callable] = None # The wrapper class
kwargs: Dict[str, Any] = {} # Wrapper's args
def __init__(self, **kwargs: Dict[str, Any]):
self.kwargs = kwargs
@staticmethod
def enable_autowrap_context(kwargs: Any) -> None:
if _ConfigAutoWrap.in_autowrap_context:
raise NotImplementedError(
"You are already within an autowrap context and we currently do not supported nested autowrap."
)
_ConfigAutoWrap.in_autowrap_context = True
# Get and save the wrapper cls for the context.
assert (
"wrapper_cls" in kwargs.keys()
), "Expected to pass in wrapper_cls arg into _ConfigAutoWrap."
_ConfigAutoWrap.wrapper_cls = cast(Callable, kwargs["wrapper_cls"])
del kwargs["wrapper_cls"]
# Save the rest.
_ConfigAutoWrap.kwargs = kwargs
@staticmethod
def disable_autowrap_context() -> None:
_ConfigAutoWrap.in_autowrap_context = False
_ConfigAutoWrap.wrapper_cls = None
_ConfigAutoWrap.kwargs = {}
def __enter__(self) -> None:
self.enable_autowrap_context(self.kwargs)
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.disable_autowrap_context()
| pytorch-master | torch/distributed/fsdp/wrap.py |
from collections import OrderedDict
import dataclasses
from typing import Any, Callable, Dict, List, Set, Tuple, Union
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.parallel.scatter_gather import _is_namedtuple # type: ignore[attr-defined]
from torch.nn.utils.rnn import PackedSequence
"""Useful functions to deal with tensor types with other python container types."""
def _contains_batchnorm(module):
return any(
isinstance(mod, _BatchNorm) for mod in module.modules()
)
def _override_batchnorm_mixed_precision(module):
for mod in module.modules():
if isinstance(mod, _BatchNorm):
mod._wrap_overrides = {"mixed_precision": None} # type: ignore[assignment]
def _apply_to_tensors(
fn: Callable, container: Union[torch.Tensor, Dict, List, Tuple, Set, OrderedDict, PackedSequence]
) -> Any:
"""Recursively apply to all tensor in different kinds of container types."""
def apply(x: Union[torch.Tensor, Dict, List, Tuple, Set, OrderedDict, PackedSequence]) -> Any:
if torch.is_tensor(x):
return fn(x)
elif hasattr(x, "__dataclass_fields__"):
dc = dataclasses.replace(x)
for f in dataclasses.fields(dc):
name = f.name
setattr(dc, name, apply(getattr(dc, name)))
return dc
elif isinstance(x, OrderedDict):
od = x.__class__()
for key, value in x.items():
od[key] = apply(value)
return od
elif isinstance(x, PackedSequence):
apply(x.data)
return x
elif isinstance(x, dict):
return {key: apply(value) for key, value in x.items()}
elif _is_namedtuple(x):
res = (apply(el) for el in x)
return type(x)(*res)
elif isinstance(x, (list, tuple, set)):
return type(x)(apply(el) for el in x)
else:
return x
return apply(container)
def _apply_to_modules(
root_module: torch.nn.Module,
module_fn: Callable,
return_fn: Callable,
*args,
**kwargs,
):
"""
Performs a pre-order traversal of the modules in the hierarchy rooted at
``root_module``, applying ``module_fn`` at each module and finally
returning a value using ``return_fn``. The traversal constructs the full
module prefix name (e.g. "module.submodule." just like in model state dict)
and makes that available to ``module_fn``.
"""
def f(module: torch.nn.Module, prefix: str, *args, **kwargs):
# Call the module function before recursing over children (pre-order)
module_fn(module, prefix, *args, **kwargs)
for submodule_name, submodule in module.named_children():
if submodule is not None:
new_prefix = prefix + submodule_name + "."
f(submodule, new_prefix, *args, **kwargs)
f(root_module, "", *args, **kwargs)
return return_fn(*args, **kwargs)
| pytorch-master | torch/distributed/fsdp/_utils.py |
from dataclasses import dataclass
from typing import List, Union, Optional
from functools import reduce
from torch.distributed.remote_device import _remote_device
@dataclass
class ShardMetadata(object):
"""
Represents a shard of the overall Tensor including its
offsets, lengths and device placement.
Args:
shard_offsets(List[int]): Offsets in the original tensor indicating
the start offsets for this shard. Should have the same rank as
the original tensor.
shard_sizes(List[int]): Integers indicating the size of each
dimension for this shard. Should have the same rank as the
original tensor.
placement(:class:`torch.distributed._remote_device`):
Specifies the placement of this shard.
"""
__slots__ = ['shard_offsets', 'shard_sizes', 'placement']
shard_offsets: List[int]
shard_sizes: List[int]
placement: Optional[_remote_device]
def __init__(
self,
shard_offsets: List[int],
shard_sizes: List[int],
placement: Optional[Union[str, _remote_device]] = None
):
self.shard_offsets = shard_offsets
self.shard_sizes = shard_sizes
if isinstance(placement, str):
self.placement = _remote_device(placement)
else:
self.placement = placement
if len(self.shard_offsets) != len(self.shard_sizes):
raise ValueError(
f'shard_offsets and shard_sizes should have '
f'the same number of elements, found {len(self.shard_offsets)} '
f'and {self.shard_sizes} respectively')
for i in range(len(self.shard_offsets)):
if self.shard_offsets[i] < 0:
raise ValueError('shard_offsets should be >=0')
if self.shard_sizes[i] < 0:
raise ValueError('shard_sizes should be >= 0')
def __hash__(self):
def _hash_reduce(a, b):
return (a << 8) + hash(b)
res = reduce(_hash_reduce, self.shard_offsets, 37)
res = reduce(_hash_reduce, self.shard_sizes, res)
res = _hash_reduce(res, self.placement)
return res
| pytorch-master | torch/distributed/_shard/metadata.py |
import functools
from inspect import signature
from .common_op_utils import _basic_validation
"""
Common utilities to register ops on ShardedTensor, ReplicatedTensor
and PartialTensor.
"""
def _register_op(op, func, op_table):
"""
Performs basic validation and registers the provided op in the given
op_table.
"""
if len(signature(func).parameters) != 4:
raise TypeError(
f'Custom sharded op function expects signature: '
f'(types, args, kwargs, process_group), but received '
f'signature: {signature(func)}')
op_table[op] = func
def _decorator_func(wrapped_func, op, op_table):
"""
Decorator function to register the given ``op`` in the provided
``op_table``
"""
@functools.wraps(wrapped_func)
def wrapper(types, args, kwargs, process_group):
_basic_validation(op, args, kwargs)
return wrapped_func(types, args, kwargs, process_group)
_register_op(op, wrapper, op_table)
return wrapper
| pytorch-master | torch/distributed/_shard/op_registry_utils.py |
import abc
import torch.nn as nn
class Sharder(abc.ABC):
"""
This is an interface which allows user to create more advanced
sharding strategies that are not easily be composed by the
`ShardingSpec`.
:class:`torch.distributed._shard.sharding_plan.ShardingPlan` could
take an object of the `Sharder` and call `shard` to shard the module,
then replace the original module with sharded module returned.
"""
@abc.abstractmethod
def shard(self, module: nn.Module) -> nn.Module:
"""
Shard a module base on the implementation of this method, and
return the sharded version of the module.
Args:
module (:class:`torch.nn.Module`):
The module to apply sharding to.
Returns:
A :class:`torch.nn.Module` object that represents a module
that's already been sharded.
"""
pass
| pytorch-master | torch/distributed/_shard/sharder.py |
from .api import (
_replicate_tensor,
_shard_tensor,
load_with_process_group,
shard_module,
shard_parameter,
)
| pytorch-master | torch/distributed/_shard/__init__.py |
import torch
from torch.utils._pytree import tree_map
from typing import Optional
def _basic_validation(op, args=(), kwargs=None):
"""
Common validation across all ops go in here.
"""
from torch.distributed._shard.partial_tensor import _PartialTensor
from torch.distributed._shard.replicated_tensor import ReplicatedTensor
from torch.distributed._shard.sharded_tensor import ShardedTensor
if len(args) == 0 and (kwargs is None or len(kwargs) == 0):
raise ValueError(f" No input for '{op.__name__}'!")
# Validate types
has_distributed_tensor = False
def is_distributed_tensor(e):
nonlocal has_distributed_tensor
if isinstance(e, ReplicatedTensor) or isinstance(e, _PartialTensor) or isinstance(e, ShardedTensor):
has_distributed_tensor = True
tree_map(is_distributed_tensor, args)
tree_map(is_distributed_tensor, kwargs)
if not has_distributed_tensor:
raise TypeError(
f"torch function '{op.__name__}', with args: {args} and "
f"kwargs: {kwargs} are called without any distributed tensor!"
)
# Validate all distributed tensors use the same PG.
cur_pg: Optional[torch.distributed.ProcessGroup] = None
def validate_pg(e):
nonlocal cur_pg
if isinstance(e, ReplicatedTensor) or isinstance(e, _PartialTensor) or isinstance(e, ShardedTensor):
if cur_pg is not None and e._process_group is not cur_pg:
raise RuntimeError(
'All distributed tensors should use the '
'same ProcessGroup if used together in an op.'
)
cur_pg = e._process_group
tree_map(validate_pg, args)
tree_map(validate_pg, kwargs)
def _register_default_op(op, decorator):
@decorator(op)
def tensor_default_op(types, args=(), kwargs=None, pg=None):
"""
Handles ``__torch_function__`` dispatch for the default tensor ops that
behave the same as ``torch.Tensor`` such as ``torch.Tensor.shape`` or
``torch.Tensor.dtype``. We simply lower to the real op call with
DisableTorchFunction context like ``torch.Tensor.__torch_function__``
to avoid recursions.
"""
if kwargs is None:
kwargs = {}
with torch._C.DisableTorchFunction():
return op(*args, **kwargs)
| pytorch-master | torch/distributed/_shard/common_op_utils.py |
import torch
import torch.distributed as dist
from torch.distributed._shard.sharded_tensor.api import ShardedTensor
from torch.distributed import distributed_c10d
from torch.overrides import get_default_nowrap_functions
_REPLICATED_WITH_NON_TENSOR_ALLOWLIST = [
# List of ops where if parameters are a combination of ReplicatedTensors
# and non-tensors, we can still return a ReplicatedTensor as the result.
torch.unsqueeze,
torch.Tensor.unsqueeze,
torch.Tensor.__getitem__,
]
class ReplicatedTensor(torch.Tensor):
"""
ReplicatedTensor represents a tensor which is replicated across the `world_size` and
has the same value on each rank.
ReplicatedTensor is a :class:`~torch.Tensor` subclass, and it could be used together
with ShardedTensor/Tensor together to express different types of computation. The
inter-op rules defined as (using torch.add as an example op):
ReplicatedTensor + ReplicatedTensor = ReplicatedTensor
ReplicatedTensor + torch.Tensor = torch.Tensor
ReplicatedTensor + ShardedTensor = ShardedTensor
ReplicatedTensor + other type (i.e. Scalar) = other type
NOTE: We do not gurantee equal content of ReplicatedTensor across nodes after its
construction. Although we defined proper inter-op rules to make sure ReplicatedTensor
stays the same, there's no enforcement on it (i.e. if you manually modify content on
some ranks, the modified value will not automatically get synced to other nodes). If
you wish to manually validate tensors are the same across ranks, use `validate()`.
"""
_process_group: distributed_c10d.ProcessGroup
__slots__ = ["_process_group"]
def __new__(cls, data=None, process_group=None):
if data is None:
data = torch.empty(0)
r = torch.Tensor._make_subclass(cls, data, data.requires_grad) # type: ignore[arg-type]
r._process_group = ( # type: ignore[attr-defined]
process_group
if process_group is not None
else distributed_c10d._get_default_group()
)
return r
def __deepcopy__(self, memo):
if id(self) in memo:
return memo[id(self)]
else:
result = type(self)(self.data.clone(memory_format=torch.preserve_format), self._process_group)
memo[id(self)] = result
return result
def __repr__(self):
return f"ReplicatedTensor({super(ReplicatedTensor, self).__repr__()})"
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
# We will re-dispatch the execution to ShardedTensor __torch_function__
# if we find there're ShardedTensor operands. We will also check if args/kwargs
# are all replicated tensor operands, we have to do this to ensure we do not
# converting results back to ReplicatedTensor if not all operands are replicated.
all_replicated = True
replicated_with_non_tensor = True
replicated_pg = None
def dispatch_arg(arg):
# This function returns a tuple, first element represents whether the op been
# executed, the second element represents the result of the execution
nonlocal replicated_pg, all_replicated, replicated_with_non_tensor
if isinstance(arg, ShardedTensor):
# redispatch to ShardedTensor
# TODO: handle ShardedTensor/PartialTensor inter-op with ReplicatedTensor
return True, arg.__torch_function__(func, types, args, kwargs)
if isinstance(arg, ReplicatedTensor):
if replicated_pg is None:
replicated_pg = arg._process_group
elif replicated_pg != arg._process_group:
raise RuntimeError(
f"ReplicatedTensor operands must be in the same process group "
f"in torch function '{func.__name__}', but found at least two "
f"ReplicatedTensor operands in different process groups! ")
elif isinstance(arg, torch.Tensor):
replicated_with_non_tensor = False
all_replicated = False
else:
all_replicated = False
return False, None
for arg in args:
redispatched, res = dispatch_arg(arg)
if redispatched:
return res
if kwargs is not None:
for k, v in kwargs.items():
redispatched, res = dispatch_arg(v)
if redispatched:
return res
# We cann't do super().__torch_function__() as it implicitly convert the result
# back to tensor subclasses, where in our case, we need to control the output type
# base on the inter-op rules we defined.
with torch._C.DisableTorchFunction():
rs = func(*args, **kwargs)
if func in get_default_nowrap_functions():
return rs
result_not_replicated = isinstance(rs, torch.Tensor) and not isinstance(rs, ReplicatedTensor)
should_convert_to_replicated = all_replicated or (
replicated_with_non_tensor and func in _REPLICATED_WITH_NON_TENSOR_ALLOWLIST
)
if result_not_replicated and should_convert_to_replicated:
# if all operands are ReplicatedTensors and does not get dispatched to ShardedTensor
# __torch_function__, result is a torch.Tensor, then we convert and return a
# ReplicatedTensor according to our inter-op rule
rs = rs.as_subclass(ReplicatedTensor) # type: ignore[arg-type]
# propagate the process_group field to result
rs._process_group = replicated_pg # type: ignore[attr-defined]
return rs
def validate(self) -> bool:
"""
Validate the ReplicatedTensor is legit by all gathering tensors on all ranks
and check to make sure they are the same.
If there's some ranks with different values, a ValueError will be raised.
Keyword args:
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
Returns:
True if validation succeed.
"""
world_size = dist.get_world_size(self._process_group)
current_rank = dist.get_rank(self._process_group)
tensors_on_rank = [torch.empty_like(self) for _ in range(world_size)]
dist.all_gather(tensors_on_rank, self, group=self._process_group)
# validate and check if all tensors are equal
for rank, tensor in enumerate(tensors_on_rank):
if not torch.allclose(self, tensor):
raise ValueError(
f"ReplicatedTensor have different values on rank {current_rank} and {rank}")
return True
def __setstate__(self, state):
with torch._C.DisableTorchFunction():
self.data = state
self.requires_grad = state.requires_grad
from torch.distributed._shard.api import _get_current_process_group
self._process_group = _get_current_process_group()
def __getstate__(self):
return self.data
| pytorch-master | torch/distributed/_shard/replicated_tensor.py |
from contextlib import contextmanager
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.distributed import distributed_c10d
from torch.distributed._shard.sharded_tensor import (
ShardedTensor,
_PartialTensor
)
from .replicated_tensor import ReplicatedTensor
from .sharding_spec import (
ShardingSpec,
ChunkShardingSpec
)
from .sharding_plan import (
ShardingPlan
)
from .sharder import Sharder
def _shard_tensor(
tensor: torch.Tensor, sharding_spec: ShardingSpec, src_rank=0, process_group=None
) -> ShardedTensor:
"""
Given a :class:`torch.Tensor`, it shards that tensor according to the provided
``sharding_spec``. ``src_rank`` denotes the source rank which would be
used as the ground truth of the data which would be scattered as shards
across the rest of the ranks.
Args:
tensor (:class:`torch.Tensor`): Tensor needs to be sharded.
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
Keyword args:
src_rank (int, optional): The source rank which is used as the ground truth of
the data for the parameter that would be sharded and scattered
across the rest of the ranks.
Default: 0.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
Returns:
A :class:`ShardedTensor` sharded from the given tensor.
.. warning::
Only :class:`torch.distributed._shard.sharding_spec.ChunkShardingSpec` is
currently supported as the ``sharding_spec``.
"""
if not tensor.is_contiguous():
raise ValueError('input tensor is not a contiguous Tensor')
pg = process_group if process_group is not None else distributed_c10d._get_default_group()
world_size = dist.get_world_size(pg)
current_rank = dist.get_rank(pg)
# Validate src_rank and sharding_spec are same across all ranks.
gathered_list = [None] * world_size
dist.all_gather_object(gathered_list, (src_rank, sharding_spec), group=pg)
for idx, entry in enumerate(gathered_list):
if src_rank != entry[0]: # type: ignore[index]
raise ValueError(
f'src_rank={src_rank} on rank: {current_rank} does not ' # type: ignore[index]
f'match with src_rank={entry[0]} on rank: {idx}')
if sharding_spec != entry[1]: # type: ignore[index]
raise ValueError(
f'sharding_spec={sharding_spec} on rank: {current_rank} does not ' # type: ignore[index]
f'match with sharding_spec={entry[1]} on rank: {idx}')
st = sharding_spec.shard(tensor, src_rank=src_rank, process_group=process_group)
return st
def shard_parameter(
module: torch.nn.Module,
param_name: str,
sharding_spec: ShardingSpec,
src_rank=0,
process_group=None):
"""
Given a :class:`torch.nn.Module`, a ``param_name`` for a parameter in that
module, it shards that parameter according to the provided
``sharding_spec``. ``src_rank`` denotes the source rank which would be
used as the ground truth of the data which would be scattered as shards
across the rest of the ranks.
This method replaces ``module.param_name`` with a
:class:`torch.distributed._sharded_tensor.ShardedTensor`
Args:
module (:class:`torch.nn.Module`): Module whose parameter needs to be sharded.
param_name (str): Name of the parameter of ``module`` that needs to be sharded.
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
Keyword args:
src_rank (int, optional): The source rank which is used as the ground truth of
the data for the parameter that would be sharded and scattered
across the rest of the ranks.
Default: 0.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
.. warning::
Only :class:`torch.distributed._shard.sharding_spec.ChunkShardingSpec` is
currently supported as the ``sharding_spec``.
"""
# Perform some validation first.
if not hasattr(module, param_name):
raise AttributeError(f'{module._get_name()} has no attribute `{param_name}`')
tensor = getattr(module, param_name)
if not isinstance(tensor, torch.Tensor):
raise ValueError(f'Expected {type(module).__name__}.{param_name} to be a Tensor, but found {type(tensor).__name__}')
if not tensor.is_contiguous():
raise ValueError(f'param: {param_name} is not a contiguous Tensor')
st = _shard_tensor(tensor, sharding_spec, src_rank, process_group)
# Replace param with ShardedTensor.
module.register_parameter(param_name, nn.Parameter(st))
def _replicate_tensor(tensor: torch.Tensor, process_group=None) -> ReplicatedTensor:
"""
Given a :class:`torch.Tensor`, mark it as a ReplicatedTensor where all
ranks have the same value.
Args:
tensor (:class:`torch.Tensor`): the tensor to be marked as replicated.
Keyword args:
process_group (ProcessGroup, optional): The process group to replicate on.
If None, the default process group will be used.
Returns:
A :class:`ReplicatedTensor` from the given tensor.
"""
return ReplicatedTensor(tensor, process_group=process_group)
# Tracks the current process group in the load context manager.
_CURRENT_PROCESS_GROUP = None
@contextmanager
def load_with_process_group(process_group):
"""
Context manager to set the process group with which to load a ShardedTensor/ReplicatedTensor.
"""
global _CURRENT_PROCESS_GROUP
if _CURRENT_PROCESS_GROUP is not None:
raise RuntimeError(
'ProcessGroup already set by previous "load_with_process_group" '
'context manager')
_CURRENT_PROCESS_GROUP = process_group
try:
yield process_group
finally:
_CURRENT_PROCESS_GROUP = None
def _get_current_process_group():
"""
Retrieves the current process group set by ``load_with_process_group``.
If not set, it just returns the default group.
"""
global _CURRENT_PROCESS_GROUP
if _CURRENT_PROCESS_GROUP is None:
return distributed_c10d._get_default_group()
else:
return _CURRENT_PROCESS_GROUP
def _reshard_output(
module: torch.nn.Module,
resharding_spec: ShardingSpec) -> torch.nn.Module:
"""
Hook a module with output resharding in the forward pass according
to the given ``resharding_spec``.
Args:
module (:class:`torch.nn.Module`): Module whose output needs to be resharded.
resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`):
The specification describing how the output of the module will be resharded.
Returns:
A :class:`torch.nn.Module` object with reshard API hooked.
"""
def hook_func(_module, _input, output):
if isinstance(output, ShardedTensor) or isinstance(output, _PartialTensor):
return output.reshard(resharding_spec)
return output
module.register_forward_hook(hook_func)
return module
def _collect_local_shard(module: torch.nn.Module) -> torch.nn.Module:
"""
Hook a module with local shards collection in the forward pass.
This API is typically used to convert a sharded representation back to data parallel
representation. In particular, it returns the local tensor for this Shard. If the
size along the sharding dimension for the local tensor is 1, this dimension is removed
from the final result. For example a [4, 16] ShardedTensor across 4 ranks is typically
a local Tensor of size [16] across each rank and not [1, 16] across each rank.
Args:
module (:class:`torch.nn.Module`): Module whose output is ShardedTensor and the
local tensor value needs to be returned.
Returns:
A :class:`torch.nn.Module` object with collection API hooked.
"""
def hook_func(_module, _input, output):
if isinstance(output, ShardedTensor):
local_tensor = output.local_tensor()
# Squeeze the # of dimensions manually, only applicable to ChunkShardingSpec
sharding_spec = output._sharding_spec
if isinstance(sharding_spec, ChunkShardingSpec) \
and local_tensor.size(sharding_spec.dim) == 1: # type: ignore[attr-defined, arg-type]
local_tensor = local_tensor.squeeze(
output._sharding_spec.dim # type: ignore[attr-defined]
)
return local_tensor
module.register_forward_hook(hook_func)
return module
def shard_module(
module: nn.Module,
plan: ShardingPlan,
src_rank=0,
process_group=None
):
"""
Shards a given module according to the provided sharding_plan. This method
first shards all the parameters according to the given sharding_plan. Then if
`output_plan` and `return_local_tensor` are specified in the sharding_plan, it
will tag the output of modules according `output_plan`, convert the module's
output back to data parallel according to `return_local_tensor`.
Needs to be called on all ranks in an SPMD fashion.
Args:
module (:class:`torch.nn.Module`): The module to apply sharding to
sharding_plan (:class:`torch.distributed._shard.sharding_plan.ShardingPlan`):
The ShardingPlan which specified param name to ShardingSpec to apply to
each parameter.
Keyword args:
src_rank (int, optional): The source rank which is used as the ground truth of
the data for the module that would be sharded and scattered across the rest
of the ranks.
Default: 0.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
"""
# record Sharder paths for sanity check on the plan to ensure items in the plan
# does not conflict with the submodule tree that the Sharder is working with
sharder_paths = []
for name, spec in plan.plan.items():
if isinstance(spec, Sharder):
sharder_paths.append(name)
# shard the parameter according to the ShardingPlan
for name, spec in plan.plan.items():
if isinstance(spec, ShardingSpec):
# if found a sharding spec, try to shard the parameter
module_path, _, param_name = name.rpartition(".")
for sharder_path in sharder_paths:
if module_path.startswith(sharder_path):
raise RuntimeError(f"ShardingPlan is in-valid, trying to shard a parameter: {name},"
f" but there's already a Sharder entry for module {sharder_path},"
f" parameter sharding should not conflict with the submodule tree"
f" that a Sharder is working with!")
mod = module.get_submodule(module_path)
shard_parameter(
mod,
param_name,
spec,
src_rank=src_rank,
process_group=process_group
)
elif isinstance(spec, Sharder):
parent_mod_path, _, mod_name = name.rpartition(".")
if name == "":
raise KeyError("Module path must not be empty for custom sharder!")
mod = module.get_submodule(name)
parent_mod = module.get_submodule(parent_mod_path)
sharded_mod = spec.shard(mod)
# swap this submodule with the sharded module
parent_mod.mod_name = sharded_mod
else:
raise TypeError(f"Only `ShardingSpec` and `Sharder` are supported to shard '{name}'")
# reshard output if there's an entry in `reshard_output` for this module
if plan.output_plan is not None:
for module_path, output_spec in plan.output_plan.items():
if isinstance(output_spec, ShardingSpec):
mod = module.get_submodule(module_path)
_reshard_output(mod, output_spec)
else:
raise TypeError(f"Only `ShardingSpec` is supported as output_plan for '{module_path}'")
# convert the output back to data parallel for the modules appears in
# `return_local_tensor` of the plan, we will call `_collect_local_shard`
# to collect the local tensor for output of modules
if plan.return_local_tensor is not None:
for module_path in plan.return_local_tensor:
mod = module.get_submodule(module_path)
_collect_local_shard(mod)
| pytorch-master | torch/distributed/_shard/api.py |
import functools
from typing import Callable, Dict, TYPE_CHECKING
import torch
import torch.distributed as dist
import torch.distributed._shard.sharding_spec as shard_spec
from torch.distributed import distributed_c10d
from torch.distributed.nn.functional import (
reduce_scatter,
)
from torch.distributed._shard.common_op_utils import _register_default_op
from torch.distributed._shard.op_registry_utils import _decorator_func
from torch.utils._pytree import tree_map
if TYPE_CHECKING:
# Only include ShardedTensor when do type checking, exclude it
# from run-time to resolve circular dependency.
from torch.distributed._shard.sharded_tensor import ShardedTensor
# Custom PartialTensor ops
_PARTIAL_TENSOR_OPS: Dict[Callable, Callable] = {}
def _custom_partial_tensor_op(func):
"""
Decorate for custom partial tensor op
Args:
func(Callable): Torch function for which we want to provide a PartialTensor
implementation (ex: torch.nn.functional.linear)
"""
return functools.partial(
_decorator_func,
op=func,
op_table=_PARTIAL_TENSOR_OPS
)
class _PartialTensor(torch.Tensor):
"""
PartialTensor is an abstraction to represent Tensors that need
aggregation across multiple devices and multiple processes.
PartialTensor is initialized in an SPMD like fashion where each rank
initializes the PartialTensor. The PartialTensor object on each rank
then only stores the local partial shard, process group and the
aggregation way to get a full tensor.
PartialTensor doesn't provide any Tensor like operations but is a
wrapper providing the Tensor representing the local partial shard.
We assume the size of each local tensor to be exactly the same.
Users can apply custom distributed sharded computations on top of
this primitive.
Args:
local_partial_shard (Tensor): Partial result stored across ranks.
process_group (ProcessGroup): The process group to aggregate on.
reduce_op (distributed_c10d.ReduceOp): Way to aggregate the partial result.
Default: ``distributed_c10d.ReduceOp.SUM``
Examples:
>>> # All tensors below are of torch.int64 type.
>>> # We have 2 process groups, 2 ranks.
>>> # xdoctest: +SKIP
>>> tensor = torch.arange(2, dtype=torch.int64) + 1 + 2 * rank
>>> tensor = torch.cat([tensor, tensor + 2])
>>> tensor
tensor([1, 2, 3, 4]) # Rank 0
tensor([3, 4, 5, 6]) # Rank 1
>>> partial_tensor = _PartialTensor(tensor, distributed_c10d.ReduceOp.MAX)
>>> sharding_dim = 0
>>> collect_spec = shard_spec.ChunkShardingSpec(
dim=sharding_dim,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
],
)
>>> complete_tensor = partial_tensor.reshard(collect_spec)
>>> complete_tensor
ShardedTensor(
ShardedTensorMetadata(
shards_metadata=[
ShardMetadata(shard_offsets=[0], shard_sizes=[2], placement=rank:0/cuda:0),
ShardMetadata(shard_offsets=[2], shard_sizes=[2], placement=rank:1/cuda:1)],
size=torch.Size([4])
)
>>> complete_tensor.local_tensor()
tensor([3, 4]) # Rank 0
tensor([5, 6]) # Rank 1
>>> # All tensors below are of torch.cfloat type.
>>> # We have 2 process groups, 2 ranks.
>>> tensor = torch.tensor([1, 2]) + 2 * rank
>>> tensor = torch.cat([tensor, tensor + 2])
>>> tensor
tensor([1, 2, 3, 4]) # Rank 0
tensor([3, 4, 5, 6]) # Rank 1
>>> partial_tensor = _PartialTensor(tensor)
>>> complete_tensor = partial_tensor.reshard(collect_spec)
>>> complete_tensor
ShardedTensor(
ShardedTensorMetadata(
shards_metadata=[
ShardMetadata(shard_offsets=[0], shard_sizes=[2], placement=rank:0/cuda:0),
ShardMetadata(shard_offsets=[2], shard_sizes=[2], placement=rank:1/cuda:1)],
size=torch.Size([4])
)
>>> complete_tensor.local_tensor()
tensor([4, 6]) # Rank 0
tensor([8, 10]) # Rank 1
"""
_process_group: distributed_c10d.ProcessGroup
_local_shard: torch.Tensor
_reduce_op: distributed_c10d.ReduceOp
__slots__ = ["_process_group", "_local_shard", "_reduce_op"]
def __new__(cls, local_shard, process_group=None, reduce_op=distributed_c10d.ReduceOp.SUM):
r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
cls,
local_shard.size(),
dtype=local_shard.dtype,
layout=local_shard.layout,
pin_memory=local_shard.is_pinned(),
requires_grad=local_shard.requires_grad) # type: ignore[arg-type]
r._process_group = ( # type: ignore[attr-defined]
process_group
if process_group is not None
else distributed_c10d._get_default_group()
)
r._reduce_op = reduce_op
r._local_shard = local_shard
return r
def __post_init__(self):
if not isinstance(self._reduce_op, distributed_c10d.ReduceOp):
raise ValueError(
"reduce_op needs to be a member of distributed_c10d.ReduceOp."
)
def reshard(self, resharding_spec: shard_spec.ShardingSpec) -> "ShardedTensor":
"""
The reshard happens in two steps logically:
1. Aggregate all the shards of the partial tensor.
2. Shard this tensor according to the provided spec.
In reality, for the sake of performance, we consolidate all partial tensors
across multiple ranks and covert to a sharded tensor in one step.
Args:
resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`):
The specification describing how we reshard the aggregated local result.
Returns:
A :class:`ShardedTensor` filled with local aggregated result.
"""
from torch.distributed._shard.sharded_tensor.api import ShardedTensor
if not isinstance(resharding_spec, shard_spec.ChunkShardingSpec):
raise NotImplementedError("Only ChunkShardingSpec supported for reshard.")
if self._local_shard.is_complex():
raise NotImplementedError("Only real partial tensor supported for reshard.")
sharding_dim = int(resharding_spec.dim) # type: ignore[attr-defined]
chunk_mode_res = self._local_shard.size(sharding_dim) % self._process_group.size()
local_shard = self._local_shard
# Add padding when the size is not divisible by the world size.
if chunk_mode_res != 0:
padding = [0] * (local_shard.dim() * 2)
padding[-1] = self._process_group.size() - chunk_mode_res
local_shard = torch.nn.functional.pad(
local_shard,
tuple(padding),
"constant",
0,
)
current_rank = dist.get_rank(self._process_group) # type: ignore[attr-defined]
rank_idx = None
rearrange_local_shards = False
indices = [0] * self._process_group.size()
for idx, placement in enumerate(resharding_spec.placements): # type: ignore[attr-defined]
if placement.rank() == current_rank: # type: ignore[index, union-attr]
rank_idx = idx # type: ignore[attr-defined]
if placement.rank() != idx: # type: ignore[index, union-attr]
rearrange_local_shards = True
indices[placement.rank()] = idx # type: ignore[index, union-attr]
local_shards = local_shard.chunk(self._process_group.size(), dim=sharding_dim)
if rearrange_local_shards:
# Need to re-arrange original shard_dim of output_tensor_list.
local_shards = [local_shards[idx] for idx in indices] # type: ignore[call-overload]
local_result = reduce_scatter(
torch.empty_like(local_shards[0]),
list(local_shards),
op=self._reduce_op,
group=self._process_group,
)
sharded_tensor_size = self._local_shard.size()
# Remove padding when the size is not divisible by the world size.
if chunk_mode_res != 0:
uneven_local_shards = self._local_shard.chunk(
self._process_group.size(), dim=sharding_dim
)
expected_size = uneven_local_shards[rank_idx].size() # type: ignore[index]
if local_result.size() != expected_size:
local_result = local_result.narrow(
sharding_dim,
0,
expected_size[sharding_dim],
)
return ShardedTensor._init_from_local_tensor(
local_result,
resharding_spec,
sharded_tensor_size,
process_group=self._process_group,
)
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
# Find process_group
process_group = None
def find_process_group(e):
nonlocal process_group
if process_group is None and isinstance(e, _PartialTensor):
process_group = e._process_group
tree_map(find_process_group, args)
tree_map(find_process_group, kwargs)
if func in _PARTIAL_TENSOR_OPS:
return _PARTIAL_TENSOR_OPS[func](types, args, kwargs, process_group)
# Need to disable all dispatch to print args and kwargs appropriately.
guard = torch._C._DisableTorchDispatch() # type: ignore[attr-defined]
try:
with torch._C.DisableTorchFunction():
raise RuntimeError(
f"torch function '{func.__name__}', with args: {args} and "
f"kwargs: {kwargs} not supported for PartialTensor!")
finally:
del guard
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
raise RuntimeError(
f"A {cls.__name__} object is being used from c++ "
f"while calling {func.__module__}.{func.__name__} "
"but the there is no custom __torch_dispatch__ implementation for it."
)
def __repr__(self):
return f"PartialTensor({super(_PartialTensor, self).__repr__()})"
def _transpose_impl(types, args=(), kwargs=None, process_group=None):
partial_tensor = args[0]
input = partial_tensor._local_shard
dim0 = args[1]
dim1 = args[2]
return _PartialTensor(
torch.transpose(input, dim0, dim1),
process_group,
partial_tensor._reduce_op
)
@_custom_partial_tensor_op(torch.Tensor.transpose)
def partial_transpose(types, args=(), kwargs=None, process_group=None):
return _transpose_impl(types, args, kwargs, process_group)
@_custom_partial_tensor_op(torch.transpose)
def partial_torch_transpose(types, args=(), kwargs=None, process_group=None):
return _transpose_impl(types, args, kwargs, process_group)
@_custom_partial_tensor_op(torch.cat)
def partial_cat(types, args=(), kwargs=None, process_group=None):
input_list = args[0]
if len(input_list) == 0:
raise RuntimeError('Empty list of tensors to torch.cat!')
local_shards = []
for idx, input in enumerate(input_list):
if not isinstance(input, _PartialTensor):
raise RuntimeError('All inputs need to be an instance of _PartialTensor')
if idx == 0:
reduce_op = input._reduce_op
elif reduce_op != input._reduce_op:
raise RuntimeError(
'All _PartialTensor reduce_ops need to be the same, found: '
'{reduce_op} and {input._reduce_op}'
)
local_shards.append(input._local_shard)
if kwargs is None:
dim = 0
else:
if 'out' in kwargs:
raise RuntimeError('"out" kwarg is not supported!')
dim = kwargs['dim'] if 'dim' in kwargs else 0
return _PartialTensor(torch.cat(local_shards, dim), process_group, input._reduce_op)
# Tensor properties access
_register_default_op(torch.Tensor.requires_grad.__get__, _custom_partial_tensor_op) # type: ignore[attr-defined]
_register_default_op(torch.Tensor.shape.__get__, _custom_partial_tensor_op) # type: ignore[attr-defined]
_register_default_op(torch.Tensor.dtype.__get__, _custom_partial_tensor_op) # type: ignore[attr-defined]
_register_default_op(torch.Tensor.layout.__get__, _custom_partial_tensor_op) # type: ignore[attr-defined]
_register_default_op(torch.Tensor.size, _custom_partial_tensor_op)
_register_default_op(torch.Tensor.dim, _custom_partial_tensor_op)
_register_default_op(torch.Tensor.ndim.__get__, _custom_partial_tensor_op) # type: ignore[attr-defined]
_register_default_op(torch.Tensor.is_contiguous, _custom_partial_tensor_op)
_register_default_op(torch.Tensor.contiguous, _custom_partial_tensor_op)
| pytorch-master | torch/distributed/_shard/partial_tensor.py |
import torch
from torch.distributed._shard.metadata import ShardMetadata
from typing import Sequence
def narrow_tensor_by_index(tensor: torch.Tensor, offsets: Sequence[int], sizes: Sequence[int]) -> torch.Tensor:
"""
Narrow the tensor according to ``offsets`` and ``sizes``.
"""
narrowed_tensor = tensor
for idx, (offset, size) in enumerate(zip(offsets, sizes)):
if size < tensor.size(idx):
# Reshape to get shard for this rank and we don't want autograd
# recording here for the narrow op and 'local_shard' should be a
# leaf variable in the autograd graph.
narrowed_tensor = narrowed_tensor.narrow(
idx,
offset,
size
)
return narrowed_tensor
def narrow_tensor(tensor: torch.Tensor, metadata: ShardMetadata) -> torch.Tensor:
"""
Narrow the tensor according to the metadata
"""
return narrow_tensor_by_index(tensor, metadata.shard_offsets, metadata.shard_sizes)
| pytorch-master | torch/distributed/_shard/_utils.py |
import io
from dataclasses import dataclass, field
from typing import Dict, List, Tuple, Union, Optional, Sequence, Any
import torch
from torch.distributed._shard.sharded_tensor import (
ShardedTensor,
)
from torch.distributed._shard.sharded_tensor.metadata import TensorProperties
@dataclass
class ChunkStorageMetadata:
"""
Each chunk is expected to have the same properties of the TensorStorageMetadata that includes it.
"""
offsets: torch.Size
sizes: torch.Size
@dataclass
class TensorStorageMetadata:
properties: TensorProperties
size: torch.Size
chunks: List[ChunkStorageMetadata]
@dataclass
class BytesStorageMetadata:
pass
TENSOR_TYPE = Union[torch.Tensor, ShardedTensor]
STORAGE_TYPES = Union[TensorStorageMetadata, BytesStorageMetadata]
STATE_DICT_TYPE = Dict[str, Any]
@dataclass
class Metadata:
# Keys are the same from the `state_dict` used.
state_dict_metadata: Dict[str, STORAGE_TYPES]
storage_data: Any = None
@dataclass
class BytesWriteRequest:
bytes: io.BytesIO
storage_key: str
@dataclass
class BytesReadRequest:
bytes: io.BytesIO
storage_key: str
fqn: str
@dataclass
class TensorWriteRequest:
tensor: torch.Tensor
storage_key: str
@dataclass
class TensorReadRequest:
tensor: torch.Tensor
storage_key: str
# offset and length w.r.t. to the storage identified by ``storage_key``
offsets: Tuple[int, ...]
lengths: Tuple[int, ...]
@dataclass(frozen=True)
class MetadataIndex:
"""
This class represents a lookup key for items in a state dict or Metadata.
"""
fqn: str
"""Fully Qualified Name of the object"""
offset: Optional[torch.Size] = None
"""If the object is a tensor, offset into the tensor we're looking for"""
index: Optional[int] = field(hash=False, compare=False, default=None)
"""
Index hint when searching for tensor chunk to speedup lookups (optional)
A common representation of a sharded tensor is as a list of chunks so to
find the index in such a list you need to linear search it.
When constructing an instance of MetadataIndex that points to that list,
one can provide the index as a hint and it will be probed first before
the linear search and thus making it significantly faster.
"""
def __init__(self, fqn: str, offset: Optional[Sequence[int]] = None, index: Optional[int] = None):
# We must use object.__setattr__ due to frozen=True
object.__setattr__(self, "fqn", fqn)
object.__setattr__(self, "index", index)
if offset is not None:
object.__setattr__(self, "offset", torch.Size(offset))
| pytorch-master | torch/distributed/_shard/checkpoint/metadata.py |
import os
import operator
import pickle
from typing import List, Optional, Union, cast
import torch
from torch import Tensor
from torch.futures import Future
from pathlib import Path
from .metadata import (
BytesReadRequest,
BytesWriteRequest,
Metadata,
TensorReadRequest,
TensorWriteRequest,
)
from .storage import StorageReader, StorageWriter
from torch.distributed._shard._utils import narrow_tensor_by_index
class FileSystemWriter(StorageWriter):
"""
Basic implementation of StorageWriter using file IO.
This implementation makes the following assumptions and simplifications:
* The checkpoint path is an empty or non-existing directory.
* File creation is atomic
The checkpoint consist of one file per write request plus
a `.metadata` file with the serialized metadata.
"""
def __init__(self, path: Union[str, os.PathLike]) -> None:
"""
Initialize the writer pointing to `path`
Args:
path: diretory where the checkpoint will be writen to.
"""
super().__init__()
self.path = Path(path)
def write_bytes(self, requests: List[BytesWriteRequest]) -> Future[None]:
for req in requests:
with (self.path / req.storage_key).open("wb") as w:
w.write(req.bytes.getbuffer())
os.fsync(w.fileno())
fut: Future[None] = Future()
fut.set_result(None)
return fut
def write_tensors(self, requests: List[TensorWriteRequest]) -> Future[None]:
for req in requests:
# The following couple lines are simple implementation to get
# things going.
#
# At load time, to enable resharding, we use (sub)view of the tensor.
# Since the storage of the tensor might not be contiguous. we need to
# preserve the original view, to calculate the correct sub view at load.
#
# `torch.save` saves both the view and storage, it is a good option
# for unblocking. There are two drawbacks:
# 1. `torch.save` is pickle based, and pickle is not known for its
# compatibility, we should consider replacing it with a more
# stable option.
# 2. pickle is not streamable.
with (self.path / req.storage_key).open("wb") as w:
torch.save(req.tensor, w)
os.fsync(w.fileno())
fut: Future[None] = Future()
fut.set_result(None)
return fut
def prepare(self) -> None:
self.path.mkdir(parents=True, exist_ok=True)
def finish(self, metadata: Metadata) -> None:
with (self.path / ".metadata.tmp").open("wb") as metadata_file:
pickle.dump(metadata, metadata_file)
os.fsync(metadata_file.fileno())
(self.path / ".metadata.tmp").rename(self.path / ".metadata")
class FileSystemReader(StorageReader):
def __init__(self, path: Union[str, os.PathLike]) -> None:
super().__init__()
self.path = Path(path)
def read_tensors(self, requests: List[TensorReadRequest]) -> Future[None]:
"""
Very basic implementation that read from file system.
"""
# Sort the the requests by storage key and try to reuse the loaded tensors
requests.sort(key=operator.attrgetter("storage_key"))
cached_storage_key = None
view_cached: Optional[Tensor] = None
for req in requests:
if cached_storage_key != req.storage_key or \
(view_cached is not None and view_cached.device != req.tensor.device):
with (self.path / req.storage_key).open("rb") as storage:
view_cached = cast(Tensor, torch.load(storage, map_location=req.tensor.device))
cached_storage_key = req.storage_key
view_to_copy: Tensor = cast(Tensor, view_cached)
# FileSystemWrite writes the tensor as is during save.
# During load time, we will load the Tensor (with it orignal view)
# narrow it along all dimemsions, and copy_ it to the
# target tensor, which will be the same size.
view_to_copy = narrow_tensor_by_index(view_to_copy, req.offsets, req.lengths)
assert (
view_to_copy.size() == req.tensor.size()
), f"The {req.storage_key} src/dst size does not match."
assert (
view_to_copy.device == req.tensor.device
), f"cannot load across devices {view_to_copy.device} vs {req.tensor.device}"
req.tensor.copy_(view_to_copy)
fut: Future = Future()
fut.set_result(None)
return fut
def read_bytes(self, requests: List[BytesReadRequest]) -> Future[None]:
for req in requests:
with (self.path / req.storage_key).open("rb") as storage:
req.bytes.write(storage.read())
fut: Future = Future()
fut.set_result(None)
return fut
# Implementating the abstract function in StorageReader
def read_metadata(self) -> Metadata:
with (self.path / ".metadata").open("rb") as metadata_file:
return pickle.load(metadata_file)
| pytorch-master | torch/distributed/_shard/checkpoint/filesystem.py |
import io
from typing import Any, Dict, List, Tuple, Optional, Union
import torch
import torch.distributed as dist
from torch import Tensor
from torch.distributed._shard.sharded_tensor import (
ShardedTensor,
)
from .metadata import (
Metadata,
BytesWriteRequest,
TensorWriteRequest,
)
from .resharding import (
_prepare_sharded_tensor_write,
_prepare_tensor_write,
_prepare_bytes_write
)
from .storage import (
StorageWriter,
)
from .utils import _DistWrapper
# -------------- private functions --------------
def _prepare(
state_dict: Dict[str, Any],
write_replicated_data: bool,
) -> Tuple[Metadata, List[BytesWriteRequest], List[TensorWriteRequest]]:
"""
Build the serialization plan for a given state_dict
Args:
state_dict: The instance to plan for.
Returns:
A tuple with the following values:
metadata: Metadata
The storage metadata describing Tensor and ShardedTensors
instances found in `state_dict`. See `Metadata` for the schema.
size_for_storage_keys: Dict[str, int]
Key is the storage key name, value is the associated size
It can used to pre allocate the storage for parallel and non sequential writes.
bytes_write_requests: List[BytesWriteRequest]
List of ByteIO write requests that should be performed by the writer.
tensor_write_requests: List[TensorWriteRequest]
List of Tensor write requests that should be performed by the writer.
"""
metadata = Metadata(state_dict_metadata={})
tensor_write_requests: List[TensorWriteRequest] = []
bytes_write_requests: List[BytesWriteRequest] = []
storage_key_to_fqn: Dict[str, str] = dict()
storage_md = {}
for fqn, obj in state_dict.items():
if isinstance(obj, ShardedTensor):
st_write_reqs, st_md, storage_data = _prepare_sharded_tensor_write(fqn, obj, fqn, storage_key_to_fqn)
tensor_write_requests += st_write_reqs
metadata.state_dict_metadata[fqn] = st_md
storage_md.update(storage_data)
elif isinstance(obj, Tensor):
write_reqs, tensor_md, storage_data = _prepare_tensor_write(obj, fqn, storage_key_to_fqn)
if write_replicated_data:
tensor_write_requests += write_reqs
metadata.state_dict_metadata[fqn] = tensor_md
storage_md.update(storage_data)
else:
bytes_io = io.BytesIO()
# This produces incomplete MD for rank > 0 since we won't populate bytes_io.
# This is ok since only rank == 0 uses this data
if write_replicated_data:
torch.save(obj, bytes_io)
byte_write_reqs, bytes_md, storage_data = _prepare_bytes_write(bytes_io, fqn, storage_key_to_fqn)
if write_replicated_data:
bytes_write_requests += byte_write_reqs
metadata.state_dict_metadata[fqn] = bytes_md
storage_md.update(storage_data)
metadata.storage_data = storage_md
return (metadata, bytes_write_requests, tensor_write_requests)
def save_state_dict(
state_dict: Dict[str, Any],
storage_writer: StorageWriter,
process_group: Optional[dist.ProcessGroup] = None,
coordinator_rank: int = 0,
no_dist: bool = False
) -> None:
"""
Save a distributed model in SPMD style.
This function is different from ``torch.save()`` as it handles
``ShardedTensor`` by having each rank only save their local shards.
To produce a state_dict with ShardedTensor instances you must call
``_register_state_dict_hook`` on the top module with value
`torch.distributed._shard.sharded_tensor.state_dict_hook` prior to
calling `state_dict()` on the top module.
There is no guarantees of Backwards Compatibility across PyTorch versions
for saved state_dicts.
If using the `process_group` argument, make sure that only its ranks
call `save_state_dict` and that all data in state_dict belong to it.
This function can be used to save a state_dict with an intialized process
group by passing ``no_dist=True``. This can be used to produce a checkpoint
that can consumed by load_state_dict is a SPMD fashion.
Args:
state_dict (Dict[str, Any]) : A state_dict
storage_writer (StorageWriter): Instance of StorageWrite use to perform writes.
process_group (ProcessGroup): ProcessGroup to be used for cross-rank synchronization
coordinator_rank (int): Rank to use to coordinate the checkpoint, rank0 is used by default
no_dist (bool): Don't attempt to save in SPMD style. Default to False
Example:
>>> # xdoctest: +SKIP
>>> my_model = MyModule()
>>> # We must call this function prior to state_dict()
>>> my_model._register_state_dict_hook(state_dict_hook)
>>> model_state_dict = my_model.state_dict()
>>> fs_storage_writer = torch.distributed._shard.checkpoint.FileSystemWriter("/checkpoint/1")
>>> torch.distributed._shard.checkpoint.save_state_dict(
>>> state_dict=model_state_dict,
>>> storage_writer=fs_stroage_writer,
>>> )
.. note:: save_state_dict uses collectives to coordinate writes across ranks.
For NCCL-based process groups, internal tensor representations of objects
must be moved to the GPU device before communication takes place. In this
case, the device used is given by ``torch.cuda.current_device()`` and it
is the user's responsibility to ensure that this is set so that each rank
has an individual GPU, via ``torch.cuda.set_device()``
"""
distW = _DistWrapper(process_group, not no_dist, coordinator_rank)
distW.broadcast("prepare", storage_writer.prepare)
metadata = None
def write_step():
nonlocal metadata
(
metadata,
bytes_write_requests,
tensor_write_requests,
) = _prepare(state_dict, distW.is_coordinator)
combined_writes: List[Union[TensorWriteRequest, BytesWriteRequest]] = []
combined_writes.extend(tensor_write_requests)
combined_writes.extend(bytes_write_requests)
storage_writer.prepare_storage(combined_writes)
bytes_futures = storage_writer.write_bytes(bytes_write_requests)
tensor_futures = storage_writer.write_tensors(tensor_write_requests)
torch.futures.wait_all([bytes_futures, tensor_futures])
def finish_checkpoint(_):
assert metadata is not None
storage_writer.finish(metadata=metadata)
distW.all_reduce("checkpoitn write", write_step, finish_checkpoint)
| pytorch-master | torch/distributed/_shard/checkpoint/state_dict_saver.py |
from .metadata import (
BytesReadRequest,
BytesWriteRequest,
TensorStorageMetadata,
BytesStorageMetadata,
ChunkStorageMetadata,
Metadata,
TensorReadRequest,
TensorWriteRequest,
)
from .state_dict_loader import load_state_dict
from .state_dict_saver import save_state_dict
from .storage import StorageReader, StorageWriter
from .filesystem import FileSystemReader, FileSystemWriter
from .api import CheckpointException
| pytorch-master | torch/distributed/_shard/checkpoint/__init__.py |
import hashlib
import io
from typing import List, Tuple, Dict
import torch
from torch import Tensor
from torch.distributed._shard.sharded_tensor import (
ShardedTensor,
)
from torch.distributed._shard.sharding_spec import (
ShardMetadata,
)
from torch.distributed._shard.sharding_spec._internals import (
_check_shard_metadata_pair_overlap,
)
from torch.distributed._shard.sharded_tensor.shard import Shard
from torch.distributed._shard.sharded_tensor.metadata import TensorProperties
from .metadata import (
BytesWriteRequest,
TensorReadRequest,
TensorWriteRequest,
ChunkStorageMetadata,
TensorStorageMetadata,
BytesStorageMetadata,
MetadataIndex,
)
def _trim(tensor: torch.Tensor) -> torch.Tensor:
tensor = tensor.detach()
if tensor.storage().size() != tensor.numel():
return tensor.clone()
return tensor
def _create_storage_key(
storage_key_to_fqn: Dict[str, str],
fqn: str
) -> str:
"""
Compute the storage key from the Fully Qualified Name
Storage keys must respect the following properties:
1) Globally unique name across all objects and ranks.
2) Suitable for usage with common storage systems (IE, alphanumeric only)
"""
storage_key = hashlib.sha256(bytes(fqn, "utf-8")).hexdigest()
counter = 0
while storage_key in storage_key_to_fqn:
storage_key = hashlib.sha256(bytes(f"{fqn}{counter}", "utf-8")).hexdigest()
counter += 1
storage_key_to_fqn[storage_key] = fqn
return storage_key
# This constant is used as the separator character between tensor name and shard name
STORAGE_KEY_SEPARATOR = "$"
def _shards_get_overlap_region_wrt_saved_tensor(
saved_shard: ShardMetadata, current_shard: ShardMetadata
) -> List[Tuple[int, int, int, int]]:
"""
Return the overlapping region between saved_shard and current_shard.
There returned list has the same number of elements as the tensor's dimension.
For each element, we produce a tuple with the following contents:
(dimension, `saved_shard` offset, `current_shard` offset, length)
Offsets are relative to each shard.
"""
narrows = []
for dim, (
saved_shard_offset,
current_shard_offset,
saved_shard_size,
current_shard_size,
) in enumerate(
zip(
saved_shard.shard_offsets,
current_shard.shard_offsets,
saved_shard.shard_sizes,
current_shard.shard_sizes,
)
):
min_range_end = min(
saved_shard_offset + saved_shard_size,
current_shard_offset + current_shard_size,
)
length = min_range_end - max(current_shard_offset, saved_shard_offset)
if saved_shard_offset > current_shard_offset:
offset_for_saved_tensor = 0
offset_for_current_tensor = saved_shard_offset - current_shard_offset
else:
offset_for_saved_tensor = current_shard_offset - saved_shard_offset
offset_for_current_tensor = 0
narrows.append(
(dim, offset_for_saved_tensor, offset_for_current_tensor, length)
)
return narrows
def _chunk_to_shard_md(chunk_md: ChunkStorageMetadata) -> ShardMetadata:
return ShardMetadata(
shard_offsets=list(chunk_md.offsets),
shard_sizes=list(chunk_md.sizes)
)
def _shard_md_to_chunk(chunk_md: ShardMetadata) -> ChunkStorageMetadata:
return ChunkStorageMetadata(
offsets=torch.Size(chunk_md.shard_offsets),
sizes=torch.Size(chunk_md.shard_sizes),
)
def _compute_sharded_tensor_md(
tensor: ShardedTensor,
) -> TensorStorageMetadata:
smd = [_shard_md_to_chunk(sm) for sm in tensor.metadata().shards_metadata]
return TensorStorageMetadata(
properties=tensor.metadata().tensor_properties,
size=torch.Size(tensor.metadata().size),
chunks=smd,
)
def _get_shard_key(shard: ShardMetadata) -> str:
"""
Compute an unique key for a shard.
This key is unique vis-a-vis other shard of the owning ShardedTensor
"""
return "_".join(str(i) for i in shard.shard_offsets)
def _get_shard_storage_key(
tensor_storage_key: str,
shard: ShardMetadata,
storage_key_to_fqn: Dict[str, str]
) -> str:
shard_key = f"{tensor_storage_key}{STORAGE_KEY_SEPARATOR}{_get_shard_key(shard)}"
return _create_storage_key(storage_key_to_fqn, shard_key)
def _prepare_sharded_tensor_write(
fqn: str,
sharded_tensor: ShardedTensor,
storage_key: str,
storage_key_to_fqn: Dict[str, str]
) -> Tuple[List[TensorWriteRequest], TensorStorageMetadata, Dict[MetadataIndex, str]]:
"""
Prepare sharded tensor write.
Args:
fqn: The FQN of ``sharded_tensor`` in the state_dict.
sharded_tensor: The sharded tensor to persist.
storage_key: The identifier for `sharded_tensor`.
storage_key_to_fqn: dict used to produce storage keys
Returns:
A 3-element tuple with the following values:
List of ``TensorWriteRequest`` for the tensor
Metadada describing the tensor.
Dictionary describing storage information for this tensor
NB `storage_key` is used to compose the key names of the local shards.
"""
write_requests = []
shard_to_storage_key: Dict[str, str] = dict()
storage_md = {}
for shard_md in sharded_tensor.metadata().shards_metadata:
shard_storage_key = _get_shard_storage_key(storage_key, shard_md, storage_key_to_fqn)
shard_to_storage_key[_get_shard_key(shard_md)] = shard_storage_key
storage_md[MetadataIndex(fqn, shard_md.shard_offsets)] = shard_storage_key
for shard in sharded_tensor.local_shards():
tensor = shard.tensor.detach()
shard_storage_key = shard_to_storage_key[_get_shard_key(shard.metadata)]
wr = TensorWriteRequest(
tensor=_trim(tensor),
storage_key=shard_storage_key,
)
write_requests.append(wr)
return write_requests, _compute_sharded_tensor_md(sharded_tensor), storage_md
def _prepare_sharded_tensor_read(
fqn: str,
storage_metadata: Dict[MetadataIndex, str],
metadata: TensorStorageMetadata,
sharded_tensor_out: ShardedTensor
) -> List[TensorReadRequest]:
"""
Prepare sharded tensor read.
Args:
fqn: The FQN of ``sharded_tensor`` in the state_dict.
storage_metadata: Dictionary describing checkpoint storage.
metadata: Metadata describing the persisted sharded tensor. Normally,
this is generated by func::`_prepare_sharded_tensor_write`.
sharded_tensor_out: The ShardedTensor being read.
Returns:
A list of class::`TensorReadRequest`. When fullfilled,
`sharded_tensor_out`'s local shards load from the persisted sharded
tensor.
"""
return _prepare_generic_tensor_read(
fqn,
metadata.chunks,
sharded_tensor_out.local_shards(),
storage_metadata)
def _prepare_generic_tensor_read(
fqn: str,
checkpoint_shards: List[ChunkStorageMetadata],
local_shards: List[Shard],
storage_metadata: Dict[MetadataIndex, str]
) -> List[TensorReadRequest]:
read_reqs = []
# this is a naive quadratic algo that can be optimized later
for shard in local_shards:
# scan all mds looking for chunks
for storage_md in checkpoint_shards:
shard_md_from_storage = _chunk_to_shard_md(storage_md)
# do they overlap?
if not _check_shard_metadata_pair_overlap(
shard.metadata, shard_md_from_storage
):
continue
storage_key = storage_metadata[MetadataIndex(fqn, storage_md.offsets)]
target_tensor = shard.tensor.detach()
offsets = []
lengths = []
for (
dim,
offset_for_saved_tensor,
offset_for_current_tensor,
length,
) in _shards_get_overlap_region_wrt_saved_tensor(
saved_shard=shard_md_from_storage, current_shard=shard.metadata
):
# Note that we do NOT want to make any tensor copy.
# all operation must be view only
target_tensor = torch.narrow(
target_tensor, dim, offset_for_current_tensor, length
)
offsets.append(offset_for_saved_tensor)
lengths.append(length)
read_reqs.append(
TensorReadRequest(
tensor=target_tensor,
storage_key=storage_key,
offsets=tuple(offsets),
lengths=tuple(lengths),
)
)
return read_reqs
def _compute_tensor_md(tensor: Tensor) -> TensorStorageMetadata:
return TensorStorageMetadata(
properties=TensorProperties.create_from_tensor(tensor),
size=tensor.size(),
chunks=[ChunkStorageMetadata(
offsets=torch.Size([0] * len(tensor.shape)),
sizes=tensor.size(),
)]
)
def _prepare_tensor_write(
tensor: Tensor, fqn: str, storage_key_to_fqn: Dict[str, str]
) -> Tuple[List[TensorWriteRequest], TensorStorageMetadata, Dict[MetadataIndex, str]]:
storage_key = _create_storage_key(storage_key_to_fqn, fqn)
storage_md = {MetadataIndex(fqn, [0] * len(tensor.shape)): storage_key}
write_reqs = [
TensorWriteRequest(
tensor=_trim(tensor),
storage_key=storage_key,
)
]
return (write_reqs, _compute_tensor_md(tensor), storage_md)
def _compute_bytes_md(bytes: io.BytesIO) -> BytesStorageMetadata:
return BytesStorageMetadata(
)
def _prepare_bytes_write(
bytes: io.BytesIO, fqn: str, storage_key_to_fqn: Dict[str, str]
) -> Tuple[List[BytesWriteRequest], BytesStorageMetadata, Dict[MetadataIndex, str]]:
storage_key = _create_storage_key(storage_key_to_fqn, fqn)
storage_md = {MetadataIndex(fqn): storage_key}
write_reqs = [
BytesWriteRequest(
bytes=bytes,
storage_key=storage_key,
)
]
return (write_reqs, _compute_bytes_md(bytes), storage_md)
| pytorch-master | torch/distributed/_shard/checkpoint/resharding.py |
from typing import Dict, Tuple, Any
import traceback as tb
WRAPPED_EXCEPTION = Tuple[BaseException, tb.StackSummary]
def _wrap_exception(exc: BaseException) -> WRAPPED_EXCEPTION:
return (exc, tb.extract_tb(exc.__traceback__))
def _is_wrapped_exception(obj: Any) -> bool:
if not isinstance(obj, tuple):
return False
if len(obj) != 2:
return False
return isinstance(obj[0], BaseException) and isinstance(obj[1], tb.StackSummary)
class CheckpointException(BaseException):
"""
Exception raised if failure was detected as part of a checkpoint load or save.
"""
def __init__(self, msg: str, failures: Dict[int, WRAPPED_EXCEPTION]):
super().__init__(msg, failures)
self._failures = failures
@property
def failures(self) -> Dict[int, WRAPPED_EXCEPTION]:
"""
Returns:
Dict of failed nodes and their associated exception.
Keys are node ranks and values are exceptions
"""
return self._failures
def __str__(self):
str = f"CheckpointException ranks:{self._failures.keys()}\n"
for rank, exc_pair in self._failures.items():
exc, trace = exc_pair
str += f"Traceback (most recent call last): (RANK {rank})\n"
if trace is not None:
str += "".join(tb.format_list(trace))
str += "".join(tb.format_exception_only(type(exc), value=exc))
return str
| pytorch-master | torch/distributed/_shard/checkpoint/api.py |
import io
from typing import Any, Dict, List, Tuple, Optional, cast
from torch.distributed._shard.metadata import ShardMetadata
from torch.distributed._shard.sharded_tensor.shard import Shard
import torch
import torch.distributed as dist
from torch import Tensor
from torch.distributed._shard.sharded_tensor import (
ShardedTensor,
)
from .metadata import (
BytesReadRequest,
BytesStorageMetadata,
TensorReadRequest,
TensorStorageMetadata,
Metadata,
MetadataIndex,
)
from .resharding import (
_prepare_generic_tensor_read,
)
from .storage import (
StorageReader,
)
from .utils import _DistWrapper
def _create_shard_metadata(size: torch.Size) -> ShardMetadata:
return ShardMetadata(
shard_offsets=[0] * len(size),
shard_sizes=list(size),
)
def _create_shard_for(tensor: Tensor) -> Shard:
return Shard(
tensor=tensor,
metadata=_create_shard_metadata(tensor.size()),
)
def _reshard_and_prepare_read_request(
state_dict: Dict[str, Any], metadata_from_storage: Metadata
) -> Tuple[List[BytesReadRequest], List[TensorReadRequest]]:
"""
Use the loaded metadata and the current state dict to map the saved tensors to current tensor
"""
tensor_read_requests = []
bytes_read_requests = []
storage_md = cast(Dict[MetadataIndex, str], metadata_from_storage.storage_data)
for fqn, obj in state_dict.items():
md = metadata_from_storage.state_dict_metadata[fqn]
if isinstance(obj, ShardedTensor):
local_shards = obj.local_shards()
elif isinstance(obj, torch.Tensor):
local_shards = [_create_shard_for(obj)]
else:
if isinstance(md, BytesStorageMetadata):
bytes_io = io.BytesIO()
brr = BytesReadRequest(
bytes=bytes_io,
storage_key=storage_md[MetadataIndex(fqn)],
fqn=fqn
)
bytes_read_requests.append(brr)
else:
raise ValueError(
f"Invalid checkpoint metadata for {fqn}, " +
f"expected BytesStorageMetadata but found {type(md)}"
)
continue
if isinstance(md, TensorStorageMetadata):
checkpoint_shards = md.chunks
else:
raise ValueError(
f"Invalid checkpoint metadata for {fqn}, " +
f"expected TensorStorageMetadata but found {type(md)}"
)
tensor_read_requests += _prepare_generic_tensor_read(fqn, checkpoint_shards, local_shards, storage_md)
return (bytes_read_requests, tensor_read_requests)
def load_state_dict(
state_dict: Dict[str, Any],
storage_reader: StorageReader,
process_group: Optional[dist.ProcessGroup] = None,
coordinator_rank: int = 0,
no_dist: bool = False
) -> None:
"""
Load a distributed state_dict in SPMD style.
Each rank will try to read the least amount of data necessary
to fullfill the requested `state_dict`.
When loading ShardedTensor instances, each rank only
reads data for their local shards.
All tensors in ``state_dict`` must be allocated on their
destination device prior to calling this function.
All non-tensor data is loaded using `torch.load()` and modified in place
on state_dict.
Users must call `load_state_dict` on the root module to ensure load
pos-processing and non-tensor data properly propagates.
This function can be used for local inference and load a checkpoint
produced by ``save_state_dict`` without having a process group initialized
by passing ``no_dist=True`` and by using Tensors instead of ShardedTensors.
Args:
state_dict (Dict[str, Any]) : The state_dict to load. Note that this
state dict will updated in places.
storage_reader (StorageReader): StorageReader used to load data from.
process_group (ProcessGroup): ProcessGroup to be used for cross-rank synchronization
coordinator_rank (int): Rank to use to coordinate the checkpoint, rank0 is used by default
no_dist (bool): Don't attempt to load in SPMD style. Default to False
Returns:
None.
Examples
>>> # xdoctest: +SKIP
>>> my_model = MyModule()
>>> optimizer = Adagrad(my_model.parameters())
>>> model_state_dict = my_model.state_dict()
>>> fs_storage_loader = torch.distributed._shard.checkpoint.FileSystemLoader("/checkpoint/1")
>>> torch.distributed._shard.checkpoint.load_state_dict(
>>> state_dict=model_state_dict,
>>> storage_reader=fs_storage_loader,
>>> )
>>> # module.load_state_dict() function might have customized steps
>>> # to flush the state_dict, must call it to
>>> # ensure correct behavior.
>>> my_model.load_state_dict(model_state_dict)
.. note:: load_state_dict uses collectives to coordinate reads across ranks.
For NCCL-based process groups, internal tensor representations of objects
must be moved to the GPU device before communication takes place. In this
case, the device used is given by ``torch.cuda.current_device()`` and it
is the user's responsibility to ensure that this is set so that each rank
has an individual GPU, via ``torch.cuda.set_device()``
"""
distW = _DistWrapper(process_group, not no_dist, coordinator_rank)
def load_model():
metadata = storage_reader.read_metadata()
bytes_read_requests, tensor_read_requests = _reshard_and_prepare_read_request(
state_dict=state_dict, metadata_from_storage=metadata
)
bytes_futures = storage_reader.read_bytes(bytes_read_requests)
tensor_futures = storage_reader.read_tensors(tensor_read_requests)
bytes_futures.wait()
# Addtional steps are required to convert the bytes to its original type
# Note that this is NOT inplace,
# it creating a new object and replace what's in the state dict
for req in bytes_read_requests:
# Ensure the BytesIO is rewound
req.bytes.seek(0)
state_dict[req.fqn] = torch.load(req.bytes)
tensor_futures.wait()
distW.all_gather("checkpoint read", load_model)
| pytorch-master | torch/distributed/_shard/checkpoint/state_dict_loader.py |
from typing import List, Callable, Optional, Union, TypeVar, Dict, Any, cast
import torch.distributed as dist
from .api import (
CheckpointException,
_wrap_exception,
_is_wrapped_exception,
WRAPPED_EXCEPTION
)
import torch
from torch.distributed._shard.sharded_tensor import (
ShardedTensor,
)
from torch.distributed._shard.sharded_tensor.shard import Shard
from .metadata import (
STATE_DICT_TYPE,
MetadataIndex,
)
T = TypeVar('T')
R = TypeVar('R')
def _get_failure_dict(results: List[Union[T, WRAPPED_EXCEPTION]]) -> Dict[int, WRAPPED_EXCEPTION]:
return cast(Dict[int, WRAPPED_EXCEPTION], {i: err for i, err in enumerate(results) if _is_wrapped_exception(err)})
class _DistWrapper:
"""
This is a wrapper around PG that provides a series of features around object collectives.
It works without distributed initialized, where most collectives turns into nops.
All variants that take functions are exception robust, meaning that if one or more
ranks raise errors, all ranks will observe those.
"""
def __init__(self, group: Optional[dist.ProcessGroup], use_dist: bool, coordinator_rank: int):
self.group = group
self.use_dist = use_dist
self.coordinator_rank = coordinator_rank
if self.use_dist:
self.rank = dist.get_rank(group)
self.is_coordinator = self.rank == coordinator_rank
else:
self.rank = 0
self.is_coordinator = True
def get_rank(self) -> int:
return self.rank
def get_world_size(self) -> int:
if self.use_dist:
return dist.get_world_size(self.group)
return 1
def broadcast_object(self, object: Optional[T]) -> T:
"""
Same as c10d::broadcast_object_list but works without distributed enabled.
"""
object_list = [object]
if self.use_dist:
dist.broadcast_object_list(
object_list=object_list,
group=self.group,
src=self.coordinator_rank)
return cast(T, object_list[0])
def gather_object(self, object: T) -> Optional[List[T]]:
"""
Same as c10d::gather_object but works without distributed enabled.
"""
if self.use_dist:
gather_objs = cast(List[T], [None] * dist.get_world_size(self.group)) if self.is_coordinator else None
dist.gather_object(
obj=object,
object_gather_list=gather_objs if self.is_coordinator else None,
dst=self.coordinator_rank,
group=self.group
)
result = gather_objs
else:
result = [object]
return result
def all_gather_object(self, object: T) -> List[T]:
"""
Same as c10d::all_gather_object but works without distributed enabled.
"""
if self.use_dist:
gather_objs = cast(List[T], [None] * dist.get_world_size(self.group))
dist.all_gather_object(
object_list=gather_objs,
obj=object,
group=self.group
)
else:
gather_objs = [object]
return gather_objs
def scatter_object(self, object_list: Optional[List[T]]) -> T:
"""
Same as c10d::scatter_object but works without distributed enabled.
"""
if self.use_dist:
gather_result = cast(List[T], [None])
dist.scatter_object_list(
scatter_object_output_list=gather_result,
scatter_object_input_list=object_list if self.is_coordinator else None,
src=self.coordinator_rank,
group=self.group
)
local_reply = gather_result[0]
else:
assert object_list is not None
local_reply = object_list[0]
return local_reply
def reduce_scatter(
self,
step: str,
map_fun: Callable[[], T],
reduce_fun: Callable[[List[T]], List[R]]
) -> R:
"""
Compute a value on each rank, then do centralized reduce on a single rank, followed by a scatter.
This method operates in the following way:
Run ``map_fun`` on all ranks
Gather results on rank 0
Call ``reduce_fun`` on all those values
Scatter to each rank part of the result.
"""
local_data: Union[WRAPPED_EXCEPTION, T]
try:
local_data = map_fun()
except BaseException as e:
local_data = _wrap_exception(e)
all_data = self.gather_object(local_data)
all_results: Optional[List[Union[R, CheckpointException]]] = None
if self.is_coordinator:
assert all_data is not None
node_failures = _get_failure_dict(all_data)
if len(node_failures) == 0:
try:
# N.B. why can't mypy cast List[R] to List[Union[R, WRAPPED_EXCEPTION]]?
all_results = cast(List[Union[R, CheckpointException]], reduce_fun(cast(List[T], all_data)))
except BaseException as e:
node_failures[self.rank] = _wrap_exception(e)
if len(node_failures) > 0:
all_results = [CheckpointException(step, node_failures)] * self.get_world_size()
result = self.scatter_object(all_results)
if isinstance(result, CheckpointException):
raise result
return result
def all_reduce(
self,
step: str,
map_fun: Callable[[], T],
reduce_fun: Callable[[List[T]], R]
) -> R:
"""
Compute a value on each rank, then do centralized reduce on a single rank, followed by a broadcast.
This method operates in the following way:
Run ``map_fun`` on all ranks
Gather results on rank 0
Call ``reduce_fun`` on all those values
Broadcast the reduced value to all ranks.
"""
local_data: Union[T, WRAPPED_EXCEPTION]
try:
local_data = map_fun()
except BaseException as e:
local_data = _wrap_exception(e)
all_data = self.gather_object(local_data)
result: Optional[Union[R, CheckpointException]] = None
if self.is_coordinator:
assert all_data is not None
node_failures = _get_failure_dict(all_data)
if len(node_failures) == 0:
try:
result = reduce_fun(cast(List[T], all_data))
except BaseException as e:
node_failures[self.rank] = _wrap_exception(e)
if len(node_failures) > 0:
result = CheckpointException(step, node_failures)
final_result = self.broadcast_object(result)
if isinstance(final_result, CheckpointException):
raise final_result
return cast(R, final_result)
def all_gather(
self,
step: str,
map_fun: Callable[[], T],
) -> List[T]:
"""
Compute a value on each rank, then all_gather them.
This method operates in the following way:
Run ``map_cp`` on all ranks
all_gather the values to all ranks
"""
result: Union[T, WRAPPED_EXCEPTION]
try:
result = map_fun()
except BaseException as e:
result = _wrap_exception(e)
all_results = self.all_gather_object(result)
node_failures = _get_failure_dict(all_results)
if len(node_failures) > 0:
raise CheckpointException(step, node_failures)
return cast(List[T], all_results)
def broadcast(
self,
step: str,
map_fun: Callable[[], T],
) -> T:
"""
Compute a value on rank 0 and broadcast it.
This method operates in the following way:
Run ``map_cp`` on rank 0
broadcast the value
"""
result: Optional[Union[T, CheckpointException]] = None
if self.is_coordinator:
try:
result = map_fun()
except BaseException as e:
result = CheckpointException(step, {self.rank: _wrap_exception(e)})
final_result = self.broadcast_object(result)
if isinstance(final_result, CheckpointException):
raise final_result
return cast(T, final_result)
def _find_shard(tensor: ShardedTensor, index: MetadataIndex) -> Shard:
if index.offset is None:
raise ValueError(f"Cannot lookup {index.fqn} since its a ShardedTensor and no offset was provided")
shards = tensor.local_shards()
# index fast path
if index.index is not None:
if len(shards) > index.index and torch.Size(shards[index.index].metadata.shard_offsets) == index.offset:
return shards[index.index]
for shard in shards:
if torch.Size(shard.metadata.shard_offsets) == index.offset:
return shard
raise ValueError(f"Could not find shard at '{index.offset}' for FQN: '{index.fqn}'")
def find_state_dict_object(state_dict: STATE_DICT_TYPE, index: MetadataIndex) -> Any:
if index.fqn not in state_dict:
raise ValueError(f"Could not find FQN: '{index.fqn}'")
obj = state_dict[index.fqn]
if isinstance(obj, ShardedTensor):
return _find_shard(obj, index).tensor
if index.offset is not None:
raise ValueError(f"FQN: '{index.fqn}' is not a ShardedTensor, can't find by offset")
return obj
| pytorch-master | torch/distributed/_shard/checkpoint/utils.py |
import abc
from typing import List, Union
from torch.futures import Future
from .metadata import (
BytesReadRequest,
BytesWriteRequest,
Metadata,
TensorReadRequest,
TensorWriteRequest,
)
class StorageWriter(abc.ABC):
"""
Interface used by ``save_state_dict`` to write to storage.
A subclass should expect the following sequence of calls by ``save_state_dict``
1) (called once globally) prepare()
2) prepare_storage() with the writes that will be used with (3) and (4).
3) write_bytes
4) write_tensors.
5) Wait for (2) and (3) futures. If either fail, abort checkpoint.
6) (called once globally) finish().
There's a single process that executes methods that are called once globally.
The writes from (3) and (4) are initiated before any waiting is done.
The last call to finish() has the semantics of commiting the checkpoint.
"""
@abc.abstractmethod
def prepare(self) -> None:
"""
Initialize storage to receive the checkpoint.
This method is called once globally per checkpoint before any other method.
This is in contrast to ``prepare_storage`` which is called on each process
in parallel.
Returns:
Future to signal intialization is complete.
"""
pass
@abc.abstractmethod
def write_bytes(self, requests: List[BytesWriteRequest]) -> Future[None]:
"""
Initiate writes for all requests in `requests`.
Writing can happen asynchronously and/or concurrently. A blocking
implementation is valid.
Args:
requests (List[BytesWriteRequest]): A list of requests to write
Returns:
A future that completes once all writes have finished.
"""
pass
@abc.abstractmethod
def write_tensors(self, requests: List[TensorWriteRequest]) -> Future[None]:
"""
Initiate writes for all requests in `requests`.
Writing can happen asynchronously and/or concurrently. A blocking
implementation is valid.
Implementors are responsible for any device to host transfers required
to copy.
Args:
requests (List[TensorWriteRequest]): A list of requests to write
Returns:
A future that completes once all writes have finished.
"""
pass
@abc.abstractmethod
def finish(self, metadata: Metadata) -> None:
"""
Writes the metadata and marks the current checkpoint as sucessfull.
This method is called once globally after all data was writen
and is used to write its metadata and commit the checkpoint.
The `metadata` object includes a global view of the checkpoint
and, while writing it is optional, it must be recoverable by the
StorageReader implementation.
The actual format/schema used for serializing `metadata` is
considered and implementation detail.
Args:
metadata (Metadata): metadata for the new checkpoint
Returns:
None
"""
pass
def prepare_storage(self, storage_writes: List[Union[TensorWriteRequest, BytesWriteRequest]]) -> None:
"""
Prepare the underlying storage for upcoming writes.
This is an optional override intended for advanced scenarios where
a storage layer needs wants to do some work ahead of the writing itself.
This method is called on each process in parallel before any writes are performed.
The default implementation does nothing.
Args:
storage_writes (List[Union[TensorWriteRequest, BytesWriteRequest]]): A list of
all writes that will be submited.
Returns:
None
"""
pass
class StorageReader(abc.ABC):
"""
Interface used by ``load_state_dict`` to read from storage.
A subclass should expected the following sequence of calls by ``load_state_dict``:
1) read_metadata() - on all ranks
2) read_bytes
3) read_tensors
The reads from (2) and (3) are initiated before any waiting is done.
Implementors must ensure host/device synchronization as part of
completion of both read requests.
"""
@abc.abstractmethod
def read_bytes(self, requests: List[BytesReadRequest]) -> Future[None]:
"""
Initiate read for all requests in `requests`.
Reading happen asynchronously and/or concurrently. A blocking
implementation is valid.
Args:
requests (List[BytesReadRequest]): A list of requests to read.
Return:
A future that completes once all read have finished.
"""
pass
@abc.abstractmethod
def read_tensors(self, requests: List[TensorReadRequest]) -> Future[None]:
"""
Initiate read for all requests in `requests`.
Reading happen asynchronously and/or concurrently. A blocking
implementation is valid.
Implementors must not assume that the original device
at write time will be the same at read time.
If an implementation uses asynchronous copies to device, it must
ensure proper synchronization W.R.T. the returned future.
Args:
requests (List[BytesReadRequest]): A list of requests to read.
Returns:
A future that completes once all read have finished.
"""
pass
@abc.abstractmethod
def read_metadata(self) -> Metadata:
"""
Reads the checkpoint metadata.
Returnss:
The metatada object associated with the checkpoint being loaded.
"""
pass
| pytorch-master | torch/distributed/_shard/checkpoint/storage.py |
from dataclasses import dataclass, field
from enum import Enum
from typing import List
import torch
from torch.distributed._shard.metadata import ShardMetadata
class MEM_FORMAT_ENCODING(Enum):
TORCH_CONTIGUOUS_FORMAT = 0
TORCH_CHANNELS_LAST = 1
TORCH_PRESERVE_FORMAT = 2
@dataclass
class TensorProperties(object):
""" Properties used to create :class:`Tensor` """
# Regular tensor fields
dtype: torch.dtype = field(default=torch.get_default_dtype())
layout: torch.layout = field(default=torch.strided)
requires_grad: bool = False
memory_format: torch.memory_format = field(default=torch.contiguous_format)
pin_memory: bool = False
def __getstate__(self):
# Since torch.memory_format cannot be pickled!
memory_format = self.memory_format
if memory_format == torch.contiguous_format:
mem_format_encoding = MEM_FORMAT_ENCODING.TORCH_CONTIGUOUS_FORMAT
elif memory_format == torch.channels_last:
mem_format_encoding = MEM_FORMAT_ENCODING.TORCH_CHANNELS_LAST
elif memory_format == torch.preserve_format:
mem_format_encoding = MEM_FORMAT_ENCODING.TORCH_PRESERVE_FORMAT
else:
raise RuntimeError(f'Invalid torch.memory_format: {memory_format}')
return (
self.dtype,
self.layout,
self.requires_grad,
mem_format_encoding,
self.pin_memory,
)
def __setstate__(
self,
state,
):
(self.dtype, self.layout, self.requires_grad, mem_format_encoding, self.pin_memory) = state
if mem_format_encoding == MEM_FORMAT_ENCODING.TORCH_CONTIGUOUS_FORMAT:
memory_format = torch.contiguous_format
elif mem_format_encoding == MEM_FORMAT_ENCODING.TORCH_CHANNELS_LAST:
memory_format = torch.channels_last
elif mem_format_encoding == MEM_FORMAT_ENCODING.TORCH_PRESERVE_FORMAT:
memory_format = torch.preserve_format
else:
raise RuntimeError(f'Invalid torch.memory_format encoding: {mem_format_encoding}')
self.memory_format = memory_format
@staticmethod
def create_from_tensor(tensor: torch.Tensor) -> "TensorProperties":
return TensorProperties(
dtype=tensor.dtype,
layout=tensor.layout,
requires_grad=tensor.requires_grad,
memory_format=torch.contiguous_format,
pin_memory=tensor.is_pinned()
)
@dataclass
class ShardedTensorMetadata(object):
"""
Represents metadata for :class:`ShardedTensor`
"""
# Metadata about each shard of the Tensor
shards_metadata: List[ShardMetadata] = field(default_factory=list)
# Size of each dim of the overall Tensor.
size: torch.Size = field(default=torch.Size([]))
tensor_properties: TensorProperties = field(default=TensorProperties())
| pytorch-master | torch/distributed/_shard/sharded_tensor/metadata.py |
# coding=utf-8
import copy
import functools
from typing import List
import torch
import torch.distributed._shard.sharding_spec as shard_spec
from torch.distributed._shard.partial_tensor import _PartialTensor
from .api import (
_CUSTOM_SHARDED_OPS,
_SHARDED_OPS,
Shard,
ShardedTensorBase,
ShardedTensor,
ShardedTensorMetadata,
TensorProperties,
)
from .metadata import ShardMetadata # noqa: F401
from torch.distributed._shard.op_registry_utils import _decorator_func
def empty(sharding_spec: shard_spec.ShardingSpec,
*size,
dtype=None,
layout=torch.strided,
requires_grad=False,
pin_memory=False,
memory_format=torch.contiguous_format,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
Returns a :class:`ShardedTensor` filled with uninitialized data.
Needs to be called on all ranks in an SPMD fashion.
Args:
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a sequence of integers defining the shape of the output
tensor. Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.contiguous_format``.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` object on each rank
"""
return ShardedTensor(
sharding_spec,
*size,
dtype=dtype,
layout=layout,
requires_grad=requires_grad,
pin_memory=pin_memory,
memory_format=memory_format,
process_group=process_group,
init_rrefs=init_rrefs,
)
def ones(sharding_spec: shard_spec.ShardingSpec,
*size,
dtype=None,
layout=torch.strided,
requires_grad=False,
pin_memory=False,
memory_format=torch.contiguous_format,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
Returns a :class:`ShardedTensor` with the scalar value 1.
Needs to be called on all ranks in an SPMD fashion.
Args:
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a sequence of integers defining the shape of the output
tensor. Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` object on each rank
"""
return full(
sharding_spec,
size,
fill_value=1,
dtype=dtype,
layout=layout,
requires_grad=requires_grad,
pin_memory=pin_memory,
memory_format=memory_format,
process_group=process_group,
init_rrefs=init_rrefs
)
def zeros(sharding_spec: shard_spec.ShardingSpec,
*size,
dtype=None,
layout=torch.strided,
requires_grad=False,
pin_memory=False,
memory_format=torch.contiguous_format,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
Returns a :class:`ShardedTensor` filled with the scalar value 0.
Needs to be called on all ranks in an SPMD fashion.
Args:
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a sequence of integers defining the shape of the output
tensor. Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` object on each rank
"""
return full(
sharding_spec,
size,
fill_value=0,
dtype=dtype,
layout=layout,
requires_grad=requires_grad,
pin_memory=pin_memory,
memory_format=memory_format,
process_group=process_group,
init_rrefs=init_rrefs
)
def full(sharding_spec: shard_spec.ShardingSpec,
size,
fill_value,
*,
dtype=None,
layout=torch.strided,
requires_grad=False,
pin_memory=False,
memory_format=torch.contiguous_format,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
Creates a :class:`ShardedTensor` filled with fill_value. The tensor’s dtype
is inferred from fill_value. If dtype is specified, it will override the
inferred type from fill_value. Needs to be called on all ranks in an SPMD fashion.
Args:
sharding_spec (:class:`torch.distributed._sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the
output tensor.
fill_value (Scalar) – the value to fill the output tensor with.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` object on each rank
"""
sharded_tensor = ShardedTensor(
sharding_spec,
*size,
dtype=dtype,
layout=layout,
requires_grad=requires_grad,
pin_memory=pin_memory,
memory_format=memory_format,
process_group=process_group,
init_rrefs=init_rrefs,
)
torch.nn.init.constant_(sharded_tensor, fill_value) # type: ignore[arg-type]
return sharded_tensor
def rand(sharding_spec: shard_spec.ShardingSpec,
*size,
dtype=None,
layout=torch.strided,
requires_grad=False,
pin_memory=False,
memory_format=torch.contiguous_format,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
Creates a :class:`ShardedTensor` filled with random numbers from a uniform distribution
on the interval :math:`[0, 1)`. The shape of the tensor is defined by the
variable argument `size`. Needs to be called on all ranks in an SPMD fashion.
Args:
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the
output tensor.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` object on each rank
"""
sharded_tensor = ShardedTensor(
sharding_spec,
*size,
dtype=dtype,
layout=layout,
requires_grad=requires_grad,
pin_memory=pin_memory,
memory_format=memory_format,
process_group=process_group,
init_rrefs=init_rrefs,
)
torch.nn.init.uniform_(sharded_tensor, 0, 1) # type: ignore[arg-type]
return sharded_tensor
def randn(sharding_spec: shard_spec.ShardingSpec,
*size,
dtype=None,
layout=torch.strided,
requires_grad=False,
pin_memory=False,
memory_format=torch.contiguous_format,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
Creates a :class:`ShardedTensor` filled with random numbers from a uniform distribution
with mean `0` and variance `1` (also called standard normal distribution). The shape
of the tensor is defined by the variable argument `size`. Needs to be called on all ranks
in an SPMD fashion.
Args:
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the
output tensor.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` object on each rank
"""
sharded_tensor = ShardedTensor(
sharding_spec,
*size,
dtype=dtype,
layout=layout,
requires_grad=requires_grad,
pin_memory=pin_memory,
memory_format=memory_format,
process_group=process_group,
init_rrefs=init_rrefs,
)
torch.nn.init.normal_(sharded_tensor, 0, 1) # type: ignore[arg-type]
return sharded_tensor
def init_from_local_shards(
local_shards: List[Shard],
*global_size,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
Creates an :class:`ShardedTensor` from local shards and the global metadata.
Needs to be called on all ranks in an SPMD fashion.
Args:
local_shards (List[:class `torch.distributed._shard.sharded_tensor.Shard`]): A list
of shards that represent the local shards on this rank.
global_size (int...): a list, tuple, or `torch.Size` of integers defining the
shape of the overall sharded tensor.
Keyword args:
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` object handle on this rank
Examples:
Suppose we want construct a sharded tensor on two ranks, global size = (10, 5),
each shard have a (5, 5) local tensor, we can do it like below:
on rank 0:
>>> # xdoctest: +SKIP("not distributed")
>>> local_shard_metadata = ShardMetadata(
>>> shard_offsets=[0, 0],
>>> shard_lengths=[5, 5],
>>> placement="rank:0/cuda:0"
>>> )
>>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)]
>>> sharded_tensor = init_from_local_shards(local_shards, [10, 5])
on rank 1:
>>> # xdoctest: +SKIP("not distributed")
>>> local_shard_metadata = ShardMetadata(
>>> shard_offsets=[5, 0],
>>> shard_lengths=[5, 5],
>>> placement="rank:1/cuda:1"
>>> )
>>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)]
>>> sharded_tensor = init_from_local_shards(local_shards, [10, 5])
"""
return ShardedTensor._init_from_local_shards(
local_shards,
*global_size,
process_group=process_group,
init_rrefs=init_rrefs
)
def state_dict_hook(module, destination, prefix, local_metadata):
"""
Hook to add ShardedTensor to Module's ``state_dict``. Needs to be
registered to the Module using
:meth:`torch.nn.Module._register_state_dict_hook`.
"""
for submodule_name, submodule in module.named_modules():
for attr_name, attr in submodule.__dict__.items():
if isinstance(attr, ShardedTensor):
mod_prefix = prefix + submodule_name
key = mod_prefix + ('.' if mod_prefix else '') + attr_name
destination[key] = attr
def pre_load_state_dict_hook(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
"""
Pre-load state dict hook to add ShardedTensor to the module.
"""
for submodule_name, submodule in module.named_modules():
for attr_name, attr in submodule.__dict__.items():
mod_prefix = prefix + submodule_name
key = mod_prefix + ('.' if mod_prefix else '') + attr_name
if key in state_dict:
if isinstance(state_dict[key], ShardedTensor):
setattr(submodule, attr_name, state_dict[key])
def custom_sharded_op_impl(func):
"""
Provides a way for users to write their own custom sharded operator. This
can be used to override existing ShardedTensor operators or write a new
one not supported by ShardedTensor. If the operator in question is covered
by ``__torch_function__`` dispatch and has a ShardedTensor as any of its
parameters, the function provided will be invoked for that operator.
Example::
>>> @custom_sharded_op_impl(torch.nn.functional.linear)
>>> def my_custom_sharded_linear(types, args, kwargs, process_group):
>>> ...
>>> # xdoctest: +SKIP("Undefined variables")
>>> input = torch.rand(10, 32)
>>> weight = sharded_tensor.rand(32, 16)
>>> bias = torch.rand(16)
>>> # This will call 'my_custom_sharded_linear'
>>> torch.nn.functional.linear(input, weight, bias)
The types, args and kwargs parameters are the same parameters that are
passed to ``__torch_function__`` dispatch API
(https://pytorch.org/docs/stable/notes/extending.html#extending-torch).
There is an additional ``process_group`` parameter which is the
process_group used for the ShardedTensor and can be used by
implementations for communications within a sharded implementation.
Args:
func(Callable): Torch function for which we want to provide a sharded
implementation (ex: torch.nn.functional.linear)
"""
return functools.partial(
_decorator_func,
op=func,
op_table=_CUSTOM_SHARDED_OPS
)
def _sharded_op_impl(func):
"""
Decorator to register a default sharded op.
"""
return functools.partial(
_decorator_func,
op=func,
op_table=_SHARDED_OPS
)
# Import all builtin sharded ops
from ._ops import * # noqa: F403
| pytorch-master | torch/distributed/_shard/sharded_tensor/__init__.py |
import copy
from typing import List, Tuple
import torch
import torch.distributed as dist
from torch._C._distributed_c10d import (
ProcessGroup,
)
import torch.distributed._shard.sharding_spec as shard_spec
from torch.distributed._shard.sharding_spec._internals import (
get_split_size,
get_chunked_dim_size,
)
from torch.distributed.nn.functional import (
all_to_all,
all_to_all_single,
)
from torch.distributed._shard.metadata import ShardMetadata
from .shard import Shard
def get_idx_from_placements(placements, current_rank) -> int:
"""
Return the position of the current rank in the given placements.
Args:
placements(List[Union[_remote_device, str]]):
Specifies the placement of each shard of the Tensor. The size of
the list represents the number of shards to be created. This could
be a list of
:class:`torch.distributed._remote_device`'s. This list
could also contain a string which represents remote
device as accepted by
:class:`torch.distributed._remote_device`
current_rank (int): number of current device.
Returns:
A int which contains the position of current device in the placement list.
"""
for idx, placement in enumerate(placements): # type: ignore[attr-defined]
if current_rank == placement.rank(): # type: ignore[union-attr]
return idx
raise RuntimeError('current_rank not in the placement.')
def build_reshard_metadata(
st_size: torch.Size,
sharding_spec: shard_spec.ShardingSpec,
world_size: int,
) -> Tuple[List[ShardMetadata], List[int]]:
"""
Based the given sharding spec, we calculate the offset and local shard size.
We then build a ShardMetadata on top of the calculation result.
Args:
st_size (torch.Size): The size of the sharded tensor.
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The
specification describing how the tensor is sharded.
world_size (int): number of ranks.
Returns:
A Tuple of the followings:
A List[`ShardMetadata`] which contains the metadata for the shard, including
offsets, lengths and device placement.
A List[int] which contains the ranks in the order of placement.
"""
shard_dim = int(sharding_spec.dim) # type: ignore[attr-defined]
shards_metadata = [None] * world_size
ranks = []
offsets = [0] * len(st_size)
split_size = get_split_size(st_size[shard_dim], world_size)
for idx, placement in enumerate(sharding_spec.placements): # type: ignore[attr-defined]
ranks.append(placement.rank())
sharded_dim_size = get_chunked_dim_size(st_size[shard_dim], split_size, idx)
local_tensor_size = list(st_size)
local_tensor_size[shard_dim] = sharded_dim_size
shards_metadata[placement.rank()] = ShardMetadata( # type: ignore[call-overload]
shard_offsets=copy.deepcopy(offsets),
shard_sizes=local_tensor_size,
placement=placement,
)
offsets[shard_dim] += sharded_dim_size
return shards_metadata, ranks # type: ignore[return-value]
def reshuffle_local_shard(
local_shard: torch.Tensor,
st_size: torch.Size,
sharding_spec: shard_spec.ShardingSpec,
resharding_spec: shard_spec.ShardingSpec,
pg: ProcessGroup,
) -> Tuple[List[Shard], List[ShardMetadata]]:
"""
Reshuffle the local shard directly when the reshard dim is same as the original
sharding dim. Logically we do this in two step:
1. To collect all shards based on original sharding spec.
2. Reshard the tensor based on the given resharding spec.
In reality, we consolidate the two steps into one by sending the local tensor to
the new shard directly based on the resharding spec.
Args:
local_tensor (Tensor): Local tensor stored in the current rank.
st_size (torch.Size): The size of the sharded tensor.
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The
specification describing how the tensor is sharded originally.
resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The
specification describing how the tensor will be resharded.
pg (ProcessGroup): The process group to aggregate on.
Returns:
A Tuple of the followings:
A List[`Shard`] which contains the local tensor and its metadata.
A List[`ShardMetadata`] which contains the metadata for the shard, including
offsets, lengths and device placement.
"""
current_rank = dist.get_rank(pg)
world_size = dist.get_world_size(pg)
# Build shards_metadata first.
shards_metadata, ranks = build_reshard_metadata(
st_size, resharding_spec, world_size
)
# Get input split size for all2all.
reshard_dim = int(resharding_spec.dim) # type: ignore[attr-defined]
split_size = get_split_size(st_size[reshard_dim], world_size)
input_split_sizes = [0] * world_size
idx = get_idx_from_placements(sharding_spec.placements, current_rank) # type: ignore[attr-defined]
new_rank = resharding_spec.placements[idx].rank() # type: ignore[union-attr, attr-defined]
input_split_sizes[new_rank] = local_shard.size(reshard_dim)
# Get output split size for all2all.
output_split_sizes = [0] * world_size
new_idx = ranks.index(current_rank)
sharded_dim_size = get_chunked_dim_size(st_size[reshard_dim], split_size, new_idx)
output_split_sizes[new_rank] = sharded_dim_size
# Get gathered_input for all2all.
local_shard = local_shard.transpose(0, reshard_dim).contiguous()
gathered_input_size = list(local_shard.size())
gathered_input_size[0] = sharded_dim_size
gathered_input = torch.empty(gathered_input_size, device=local_shard.device, dtype=local_shard.dtype)
# all2all.
local_shard = all_to_all_single(
gathered_input,
local_shard,
input_split_sizes=input_split_sizes,
output_split_sizes=output_split_sizes,
group=pg,
)
local_tensor = local_shard.transpose(0, reshard_dim).contiguous()
local_shards = [Shard(local_tensor, shards_metadata[current_rank])]
return local_shards, shards_metadata
def reshard_local_shard(
local_tensor: torch.Tensor,
st_size: torch.Size,
sharding_spec: shard_spec.ShardingSpec,
resharding_spec: shard_spec.ShardingSpec,
pg: ProcessGroup,
) -> Tuple[List[Shard], List[ShardMetadata]]:
"""
Reshard a sharded tensor given the ``resharding_spec``. When the reshard dim is
different from the original sharding dim, we need to do two steps logically:
1. To collect all shards based on original sharding spec.
2. Reshard the tensor based on the given resharding spec.
In reality, we consolidate the two steps into one by sending each rank the new
shard based on the resharding spec.
Args:
local_tensor (Tensor): Local tensor stored in the current rank.
st_size (torch.Size): The size of the sharded tensor.
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The
specification describing how the tensor is sharded originally.
resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The
specification describing how the tensor will be resharded.
pg (ProcessGroup): The process group to aggregate on.
Returns:
A Tuple of the followings:
A List[`Shard`] which contains the local tensor and its metadata.
A List[`ShardMetadata`] which contains the metadata for the shard, including
offsets, lengths and device placement.
"""
current_rank = dist.get_rank(pg)
world_size = dist.get_world_size(pg)
current_sharding_dim = int(sharding_spec.dim) # type: ignore[attr-defined]
reshard_dim = int(resharding_spec.dim) # type: ignore[attr-defined]
# Build shards_metadata first.
shards_metadata, ranks = build_reshard_metadata(
st_size, resharding_spec, world_size
)
# Compute expected size
input_split_sizes = []
for metadata in shards_metadata:
input_split_sizes.append(metadata.shard_sizes[reshard_dim])
rearrange_input = any(ranks[i] > ranks[i + 1] for i in range(len(ranks) - 1))
if rearrange_input:
# Need to re-arrange reshard_dim of local_tensor before all2all.
indices: List[int] = []
for metadata in shards_metadata:
offset_start_idx = metadata.shard_offsets[reshard_dim]
split_size = metadata.shard_sizes[reshard_dim]
indices += range(offset_start_idx, offset_start_idx + split_size)
local_tensor = local_tensor.index_select(
reshard_dim, torch.tensor(indices, device=local_tensor.device)
)
# Because reshard_dim != original shard_dim. We need to compute the
# size of tensor from each rank.
output_tensor_list = [torch.tensor(1)] * world_size
split_size = get_split_size(st_size[current_sharding_dim], world_size)
rearrange_output_list = False
indices = []
for idx, placement in enumerate(sharding_spec.placements): # type: ignore[attr-defined]
sharded_dim_size = get_chunked_dim_size(
st_size[current_sharding_dim], split_size, idx
)
output_tensor_size = list(st_size)
output_tensor_size[current_sharding_dim] = sharded_dim_size
output_tensor_size[reshard_dim] = input_split_sizes[current_rank]
output_tensor_list[
placement.rank()
] = torch.empty( # type: ignore[union-attr, index]
output_tensor_size, device=local_tensor.device, dtype=local_tensor.dtype
)
indices.append(placement.rank()) # type: ignore[union-attr, index, arg-type]
if idx != placement.rank(): # type: ignore[union-attr]
rearrange_output_list = True
# Perform autograd enabled all2all.
input_tensor_list = torch.split(local_tensor, input_split_sizes, dim=reshard_dim)
input_tensor_list = [tensor.contiguous() for tensor in input_tensor_list]
output_tensor_list = all_to_all(
output_tensor_list,
input_tensor_list,
group=pg,
)
if rearrange_output_list:
# Need to re-arrange original shard_dim of output_tensor_list.
output_tensor_list = [output_tensor_list[idx] for idx in indices] # type: ignore[call-overload]
local_tensor = torch.cat(output_tensor_list, dim=current_sharding_dim)
local_shards = [Shard(local_tensor, shards_metadata[current_rank])]
return local_shards, shards_metadata
| pytorch-master | torch/distributed/_shard/sharded_tensor/reshard.py |
from __future__ import annotations # type: ignore[attr-defined]
from dataclasses import dataclass
from typing import (
Callable,
Dict,
List,
Optional,
Sequence,
Tuple,
cast,
)
import copy
from functools import reduce
import weakref
import threading
import torch
import torch.distributed as dist
from torch.distributed import rpc
from torch.distributed import distributed_c10d
from torch.distributed._shard.metadata import ShardMetadata
import torch.distributed._shard.sharding_spec as shard_spec
from torch.distributed._shard.sharding_spec.api import (
_dispatch_custom_op,
_has_custom_op,
)
from torch.distributed._shard.sharding_spec._internals import (
check_tensor,
validate_non_overlapping_shards_metadata,
)
from .metadata import TensorProperties, ShardedTensorMetadata
from .shard import Shard
from .reshard import reshuffle_local_shard, reshard_local_shard
from .utils import (
_flatten_tensor_size,
_parse_and_validate_remote_device,
_validate_output_tensor_for_gather,
build_metadata_from_local_shards,
build_global_metadata
)
from torch.distributed.remote_device import _remote_device
from torch.utils._pytree import tree_map
# Tracking for sharded tensor objects.
_sharded_tensor_lock = threading.Lock()
_sharded_tensor_current_id = 0
_sharded_tensor_map: Dict[int, 'weakref.ReferenceType[ShardedTensor]'] = {}
# Default sharded ops
_SHARDED_OPS: Dict[Callable, Callable] = {}
# Customized user ops
_CUSTOM_SHARDED_OPS: Dict[Callable, Callable] = {}
def _register_remote_shards(sharded_tensor_id: int, rrefs: List[rpc.RRef[Shard]], rpc_rank: int):
with _sharded_tensor_lock:
if sharded_tensor_id not in _sharded_tensor_map:
raise RuntimeError(
f'Could not find sharded_tensor_id: {sharded_tensor_id} in map: {_sharded_tensor_map.keys()}')
sharded_tensor = _sharded_tensor_map[sharded_tensor_id]()
if sharded_tensor is None:
raise RuntimeError('ShardedTensor weakref has been deallocated')
else:
sharded_tensor._register_remote_shards(rrefs, rpc_rank)
class ShardedTensorBase(torch.Tensor):
_sharding_spec: shard_spec.ShardingSpec
_metadata: ShardedTensorMetadata
_local_shards: List[Shard]
def __new__(cls, sharding_spec: shard_spec.ShardingSpec, *size, **kwargs):
# Use __new__ to construct a wrapper tensor, for recording tensor
# properties and logging purposes.
torch._C._log_api_usage_once("torch.distributed._shard.sharded_tensor")
# check sharding spec and build sharded tensor metadata
if not isinstance(sharding_spec, shard_spec.ShardingSpec):
raise ValueError(f"Expecting ShardingSpec but got: {type(sharding_spec)}")
sizes = _flatten_tensor_size(size)
dtype = kwargs["dtype"]
layout = kwargs["layout"]
pin_memory = kwargs["pin_memory"]
requires_grad = kwargs["requires_grad"]
if dtype is None:
dtype = torch.get_default_dtype()
tensor_properties = TensorProperties(
dtype, layout, requires_grad, pin_memory=pin_memory
)
sharded_tensor_metadata = sharding_spec.build_metadata(
sizes, tensor_properties=tensor_properties
)
r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
cls,
sizes,
dtype=dtype,
layout=layout,
pin_memory=pin_memory,
requires_grad=requires_grad,
)
# set sharding spec
r._sharding_spec = sharding_spec
# set metadata
r._metadata = sharded_tensor_metadata
# set local shards
r._local_shards = []
return r
def metadata(self) -> ShardedTensorMetadata:
"""
Returns a :class:`ShardedTensorMetadata` object corresponding to the
metadata for the entire tensor.
"""
return self._metadata
def local_shards(self) -> List[Shard]:
"""
Returns a list of :class:`Shard' corresponding to the
local shards for this rank. Returns an empty list if the current rank
does not host any shards for this Tensor.
"""
return self._local_shards
@classmethod
def _init_from_local_shards_and_global_metadata(
cls,
local_shards: List[Shard],
sharded_tensor_metadata: ShardedTensorMetadata,
sharding_spec=None,
) -> "ShardedTensor":
"""
Initialize a ShardedTensorBase with local shards and a global
ShardedTensorMetadata built on each rank.
Warning: This API is experimental and subject to change. It does
not do cross rank validations, and fully rely on the user
for the correctness of sharded_tensor_metadata on each rank
"""
shards_metadata = sharded_tensor_metadata.shards_metadata
tensor_properties = sharded_tensor_metadata.tensor_properties
if len(shards_metadata) == 0:
raise ValueError("shards_metadata must not be empty!")
if tensor_properties.layout != torch.strided:
raise ValueError("Only torch.strided layout is currently supported")
if sharding_spec is None:
spec = shard_spec._infer_sharding_spec_from_shards_metadata(shards_metadata)
else:
spec = sharding_spec
sharded_tensor_base = ShardedTensor.__new__(
ShardedTensor,
spec,
sharded_tensor_metadata.size,
dtype=tensor_properties.dtype,
layout=tensor_properties.layout,
pin_memory=tensor_properties.pin_memory,
requires_grad=tensor_properties.requires_grad,
)
def _raise_if_mismatch(expected, actual, prop_name, rank, is_property=False):
tensor_property_or_metadata = (
"tensor property" if is_property else "local ShardMetadata"
)
if expected != actual:
raise ValueError(
f"Local shards' tensor {prop_name} property is incompatible with "
f"{tensor_property_or_metadata} on rank {rank}: "
f"{tensor_property_or_metadata} {prop_name}={expected}, "
f"local shard tensor {prop_name}={actual}."
)
for shard in local_shards:
shard_meta = shard.metadata
local_shard_tensor = shard.tensor
placement = shard_meta.placement
assert placement is not None, "Must specify placement for `Shard`!"
rank = placement.rank()
local_device = placement.device()
_raise_if_mismatch(
tensor_properties.layout,
local_shard_tensor.layout,
"layout",
rank,
True,
)
if not local_shard_tensor.is_contiguous():
raise ValueError(
"Only torch.contiguous_format memory_format is currently supported"
)
_raise_if_mismatch(
shard_meta.shard_sizes,
list(local_shard_tensor.size()),
"size",
rank,
)
_raise_if_mismatch(
tensor_properties.pin_memory,
local_shard_tensor.is_pinned(),
"pin_memory",
rank,
True,
)
_raise_if_mismatch(local_device, local_shard_tensor.device, "device", rank)
_raise_if_mismatch(
tensor_properties.dtype,
local_shard_tensor.dtype,
"dtype",
rank,
True,
)
_raise_if_mismatch(
tensor_properties.requires_grad,
local_shard_tensor.requires_grad,
"requires_grad",
rank,
True,
)
# check if shards_metadata have overlap shards
validate_non_overlapping_shards_metadata(shards_metadata)
# check if the shards_metadata is compatible with overall size of the sharded tensor.
check_tensor(shards_metadata, list(sharded_tensor_metadata.size))
# done validation, add local_shards
sharded_tensor_base._local_shards = local_shards
return sharded_tensor_base
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
raise RuntimeError(
f"A {cls.__name__} object is being used from c++ while calling {func.__module__}.{func.__name__} "
"but the there is no custom __torch_dispatch__ implementation for it."
)
class ShardedTensor(ShardedTensorBase):
"""
ShardedTensor is an torch.Tensor subclass to represent Tensors that are sharded
across multiple devices and multiple processes.
ShardedTensor is initialized in an SPMD like fashion where each rank
initializes the ShardedTensor. The ShardedTensor object on each rank
then only stores the local shard for the Tensor and provides global
metadata for all the shards.
ShardedTensor doesn't provide any Tensor like operations but is a wrapper
providing the Tensor representing the local shard and the global metadata.
Using these, users can build their custom distributed._sharded computations
on top of this primitive. The local shards are all initialized using the
create_op specified by tensor_init_params.create_op, e.g., torch.ones, or
torch.empty
Args:
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a sequence of integers defining the shape of the output
tensor. Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.contiguous_format``.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
.. note:: ShardedTensor uses collectives to do various operations, i.e. it
uses all_gather to do cross rank validations. For NCCL-based process
groups, internal tensor representations of objects must be moved to the
GPU device before communication takes place. In this case, the device
used is given by ``torch.cuda.current_device()`` and it is the user's
responsibility to ensure that this is set so that each rank has an
individual GPU, via ``torch.cuda.set_device()``
"""
def __new__(cls, sharding_spec: shard_spec.ShardingSpec, *size, **kwargs):
self = super(ShardedTensor, cls).__new__(cls, sharding_spec, *size, **kwargs)
return self
def __init__(
self,
sharding_spec: shard_spec.ShardingSpec,
*size,
dtype=None,
layout=torch.strided,
requires_grad=False,
pin_memory=False,
memory_format=torch.contiguous_format,
process_group=None,
init_rrefs=False,
):
# prepare initialization, initialize fields like
# _process_group, _local_shards, etc.
self._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
if layout != torch.strided:
raise ValueError('Only torch.strided layout is currently supported')
if memory_format != torch.contiguous_format:
raise ValueError('Only torch.contiguous_format memory_format is currently supported')
self._metadata.tensor_properties.memory_format = memory_format
current_rank = dist.get_rank(self._process_group)
for shard_metadata in self._metadata.shards_metadata:
rank, device = _parse_and_validate_remote_device(self._process_group, shard_metadata.placement)
if rank == current_rank:
local_tensor = _create_tensor_from_params(
shard_metadata.shard_sizes,
local_device=device,
tensor_properties=self._metadata.tensor_properties
)
self._local_shards.append(Shard(local_tensor, shard_metadata))
# do post initialization (i.e. register sharded_tensor_id, initialize_rpc)
self._post_init()
def _prepare_init(self, process_group=None, init_rrefs=False):
self._init_rrefs = init_rrefs
self._sharded_tensor_id = None
self._process_group = (
process_group
if process_group is not None
else distributed_c10d._get_default_group()
)
self._remote_shards: Dict[int, List[rpc.RRef[Shard]]] = {}
def _post_init(self):
# Initialize RPC if available.
if self._init_rrefs:
with _sharded_tensor_lock:
global _sharded_tensor_current_id, _sharded_tensor_map
self._sharded_tensor_id = _sharded_tensor_current_id
_sharded_tensor_map[self._sharded_tensor_id] = weakref.ref(self)
_sharded_tensor_current_id += 1
if not rpc._is_current_rpc_agent_set():
raise RuntimeError(
'RPC Framework needs to be initialized using'
' torch.distributed.rpc.init_rpc if init_rrefs is set to True')
self._init_rpc()
def __del__(self):
# Clean up the global map.
with _sharded_tensor_lock:
global _sharded_tensor_current_id, _sharded_tensor_map
if (
hasattr(self, "_sharded_tensor_id")
and self._sharded_tensor_id in _sharded_tensor_map
):
_sharded_tensor_map.pop(self._sharded_tensor_id) # type: ignore[call-overload]
def _init_rpc(self):
# Validate PG and RPC ranks match.
pg_rank = dist.get_rank()
rpc_rank = rpc.get_worker_info().id
if pg_rank != rpc_rank:
raise ValueError(
f'Default ProcessGroup and RPC ranks must be '
f'the same for ShardedTensor, found process group rank: '
f'{pg_rank} and RPC rank: {rpc_rank}'
)
self._remote_shards = {}
# Gather all the sharded tensor ids.
worker_infos = rpc._get_current_rpc_agent().get_worker_infos()
rank_to_name = {}
name_to_rank = {}
for worker_info in worker_infos:
rank_to_name[worker_info.id] = worker_info.name
name_to_rank[worker_info.name] = worker_info.id
all_tensor_ids = rpc.api._all_gather(self._sharded_tensor_id)
# Share the local shards to the entire world.
futs = []
rpc_rank = rpc.get_worker_info().id
for rank in range(dist.get_world_size()):
# Skip self.
if rank == dist.get_rank():
continue
if len(self.local_shards()) != 0:
rrefs: List[rpc.RRef[Shard]] = [rpc.RRef(shard) for shard in self.local_shards()]
fut = rpc.rpc_async(
rank,
_register_remote_shards,
args=(all_tensor_ids[rank_to_name[rank]], rrefs, rpc_rank))
futs.append(fut)
torch.futures.wait_all(futs)
# Barrier for all RPCs to finish on all ranks.
rpc.api._all_gather(None)
def _get_preferred_device(self) -> torch.device:
"""
Return the prefered device to be used when creating tensors for collectives.
This method takes into account the associated process group
"""
if dist.get_backend(self._process_group) == dist.Backend.NCCL:
return torch.device(torch.cuda.current_device())
return torch.device("cpu")
def gather( # type: ignore[override]
self,
dst: int = 0,
out: Optional[torch.Tensor] = None,
) -> None:
"""
Creates a full :class:`Tensor` on rank ``dst`` by gathering all shards of the
sharded tensor.
The API needs to be called on all ranks in SPMD fashion. All ranks should have
the same ``dst``. ``out`` should be a tensor of the same size as the overall
size of the sharded tensor on ``dst`` and ``None`` on all other ranks.
Args:
dst(int): The rank where full tensor is constructed.
Default: 0
out (:class `torch.Tensor`, optional): The output full tensor.
Must to be provided ONLY on ``dst`` rank.
Default: ``None``
"""
def shard_size(shard_md):
return reduce((lambda x, y: x * y), shard_md.shard_sizes) # type: ignore[attr-defined]
rank = dist.get_rank(self._process_group)
full_size = self.metadata().size
_validate_output_tensor_for_gather(rank, dst, full_size, out)
local_shards = self.local_shards()
world_size = dist.get_world_size(self._process_group)
rank_sizes = [0 for _ in range(world_size)]
max_rank_size = 0
shard_placement: Dict[ShardMetadata, Tuple[int, int]] = dict()
# collect sizes
for shard_md in self.metadata().shards_metadata:
shard_rank = cast(_remote_device, shard_md.placement).rank()
assert shard_rank is not None
shard_placement[shard_md] = (shard_rank, rank_sizes[shard_rank])
rank_sizes[shard_rank] += shard_size(shard_md)
max_rank_size = max(max_rank_size, rank_sizes[shard_rank])
gather_list: Optional[List[torch.Tensor]]
if rank == dst:
assert out is not None
gather_list = [torch.empty((max_rank_size,), device=out.device) for _ in range(world_size)]
else:
gather_list = None
with torch.no_grad():
data = torch.empty(max_rank_size, device=self._get_preferred_device())
for shard in local_shards:
src = shard.tensor.flatten()
shard_offset = shard_placement[shard.metadata][1]
data[shard_offset: shard_offset + src.numel()].copy_(src)
dist.gather(
tensor=data,
gather_list=gather_list,
dst=dst,
group=self._process_group,
)
if rank != dst:
return
# In _validate_output_tensor_for_gather, we raise if out == None and rank == dst
out = cast(torch.Tensor, out)
assert gather_list is not None
full_size = self.metadata().size
dims = len(full_size)
for shard_md in self.metadata().shards_metadata:
rank, rank_offset = shard_placement[shard_md]
tensor = gather_list[rank]
tensor = tensor[rank_offset : rank_offset + shard_size(shard_md)]
tensor = tensor.view(shard_md.shard_sizes)
out_narrow_view = out
for dim in range(dims):
out_narrow_view = out_narrow_view.narrow(
dim,
shard_md.shard_offsets[dim],
shard_md.shard_sizes[dim],
)
out_narrow_view.copy_(tensor)
def cpu(
self,
memory_format=torch.preserve_format,
process_group=None
) -> ShardedTensor:
"""
Returns a copy of this object in CPU memory.
If this ShardedTensor is already on CPU memory, then no copy is
performed and original object is returned.
.. note:: When moving a ShardedTensor from GPU to CPU, the ShardedTensor might
need to be managed by a different type of ProcessGroup(i.e. ProcessGroupGloo),
it is the user's responsiblity to explicitly pass in a new process_group that
is compatible with CPU.
"""
# TODO: make this a __torch_function__ op once ShardedTensor becomes a
# torch.Tensor subclass, see https://github.com/pytorch/pytorch/issues/75402
if memory_format != torch.preserve_format and \
memory_format != torch.contiguous_format:
raise RuntimeError("Only `torch.contiguous_format` or "
"`torch.preserve_format` is supported!")
all_on_cpu = True
for meta in self.metadata().shards_metadata:
all_on_cpu &= (meta.placement.device().type == "cpu") # type: ignore[union-attr]
# if every shard is already on CPU, return the original object
if all_on_cpu:
return self
# if not, returns a copy of this object on CPU
list_shards: List[Shard] = []
# move all local shards to cpu, and change metadata
for shard in self._local_shards:
cpu_tensor = shard.tensor.cpu(memory_format=memory_format) # type: ignore[call-arg]
metadata = copy.deepcopy(shard.metadata)
metadata.placement._device = torch.device("cpu") # type: ignore[union-attr]
list_shards.append(
Shard(cpu_tensor, metadata)
)
st_meta = copy.deepcopy(self.metadata())
for meta in st_meta.shards_metadata:
if meta.placement.device().type != "cpu": # type: ignore[union-attr]
meta.placement._device = torch.device("cpu") # type: ignore[union-attr]
pg = self._process_group if process_group is None else process_group
st_cpu = ShardedTensor._init_from_local_shards_and_global_metadata(
list_shards,
sharded_tensor_metadata=st_meta,
process_group=pg,
init_rrefs=self._init_rrefs
)
return st_cpu
def cuda(
self,
device=None,
non_blocking=False,
memory_format=torch.preserve_format,
process_group=None
) -> ShardedTensor:
"""
Returns a copy of this object in CUDA memory, if the original ShardedTensor
is on CPU, we will move the local shard to the current GPU device of each
process in a SPMD fashion.
If this ShardedTensor is already on CUDA memory and local shards on each rank are
already on current device, we still returns a new ShardedTensor object with new
metadata, but no underlying data movements are performed.
.. note:: When moving a ShardedTensor from CPU to GPU, the ShardedTensor might
need to be managed by a different type of ProcessGroup(i.e. ProcessGroupNCCL),
it is the user's responsiblity to explicitly pass in a new process_group that
is compatible with GPU.
"""
if memory_format != torch.preserve_format and \
memory_format != torch.contiguous_format:
raise RuntimeError("Only `torch.contiguous_format` or "
"`torch.preserve_format` is supported!")
if device is not None:
device = torch.device(device) if isinstance(device, str) else device
assert isinstance(device, torch.device) and device.index == torch.cuda.current_device(), \
'''Only device without device id (e.g. "cpu" or "cuda") is expected for ShardedTensor!'''
current_device = torch.device(torch.cuda.current_device())
# returns a copy of ShardedTensor on CUDA current device
list_shards: List[Shard] = []
# move all local shards to current device, and change metadata
# if local shards already on the current device, there's no
# real data movement, only the metadata are copied.
for shard in self._local_shards:
cuda_tensor = shard.tensor.cuda(
device=current_device,
non_blocking=non_blocking,
memory_format=memory_format
) # type: ignore[call-arg]
metadata = copy.deepcopy(shard.metadata)
metadata.placement._device = current_device # type: ignore[union-attr]
list_shards.append(
Shard(cuda_tensor, metadata)
)
st_meta = copy.deepcopy(self.metadata())
for meta in st_meta.shards_metadata:
if meta.placement.device().type != "cuda": # type: ignore[union-attr]
meta.placement._device = current_device # type: ignore[union-attr]
pg = self._process_group if process_group is None else process_group
# we need to use `init_from_local_shards` to communicate between ranks
# and update the sharding spec/shards metadata.
st_cuda = ShardedTensor._init_from_local_shards_and_global_metadata(
list_shards,
sharded_tensor_metadata=st_meta,
process_group=pg,
init_rrefs=self._init_rrefs
)
return st_cuda
def to(self, *args, **kwargs) -> ShardedTensor:
current_device = self._local_shards[0].tensor.device
current_dtype = self.dtype
device_to = current_device
dtype_to = current_dtype
if len(args) == 1:
if isinstance(args[0], torch.dtype):
dtype_to = args[0]
elif isinstance(args[0], torch.device):
device_to = args[0]
elif isinstance(args[0], (str, int)):
device_to = torch.device(args[0])
elif isinstance(args[0], torch.Tensor):
dtype_to = args[0].dtype
device_to = args[0].device
else:
raise RuntimeError(f"ShardedTensor.to() have wrong arguments: {args}")
elif len(args) == 2:
device_to, dtype_to = args
else:
dtype_to = kwargs.get("dtype", current_dtype)
device_to = kwargs.get("device", current_device)
device_to = torch.device(device_to) if isinstance(device_to, (str, int)) else device_to
if device_to.type == "cuda":
# if device_to set to cuda, set to current device even
# if user specify the device index.
current_idx = torch.cuda.current_device()
if device_to.index != current_idx:
import warnings
warnings.warn("ShardedTensor.to only move tensor to its current device"
"If you want to put to different device, use `reshard` instead.")
device_to = torch.device(current_idx)
copy_tensor = kwargs.get("copy", False)
non_blocking = kwargs.get("non_blocking", False)
memory_format = kwargs.get("memory_format", torch.preserve_format)
process_group = kwargs.get("process_group", None)
if not copy_tensor and dtype_to == current_dtype and device_to == current_device:
# already have correct dtype and device, return itself
return self
# returns a copy of ShardedTensor on CUDA current device
list_shards: List[Shard] = []
for shard in self._local_shards:
new_tensor = shard.tensor.to( # type: ignore[call-overload]
device=device_to,
dtype=dtype_to,
non_blocking=non_blocking,
copy=copy_tensor,
memory_format=memory_format
)
metadata = copy.deepcopy(shard.metadata)
if metadata.placement is not None:
metadata.placement._device = device_to
list_shards.append(Shard(new_tensor, metadata))
# update metadata
st_meta = copy.deepcopy(self.metadata())
st_meta.tensor_properties.dtype = dtype_to
for meta in st_meta.shards_metadata:
meta.placement._device = device_to # type: ignore[union-attr]
pg = self._process_group if process_group is None else process_group
# we need to use `init_from_local_shards` to communicate between ranks
# and update the sharding spec/shards metadata.
st_to = ShardedTensor._init_from_local_shards_and_global_metadata(
list_shards,
sharded_tensor_metadata=st_meta,
process_group=pg,
init_rrefs=self._init_rrefs
)
return st_to
@classmethod
def _init_from_local_shards(
cls,
local_shards: List[Shard],
*global_size,
process_group=None,
init_rrefs=False,
):
# STEP 1: Validate the Shardmetadatas locally
process_group = (
process_group
if process_group is not None
else distributed_c10d._get_default_group()
)
current_rank = dist.get_rank(process_group)
world_size = dist.get_world_size(process_group)
local_sharded_tensor_metadata: Optional[ShardedTensorMetadata] = None
global_tensor_size = _flatten_tensor_size(global_size)
if len(local_shards) > 0:
local_sharded_tensor_metadata = \
build_metadata_from_local_shards(local_shards, global_tensor_size, current_rank, process_group)
# STEP 2. Validate metadata across ranks, and build a global sharded tensor
# metadata by gathering local ShardedTensorMetadata
gathered_metadatas: List[Optional[ShardedTensorMetadata]] = []
if world_size > 1:
gathered_metadatas = [None for _ in range(world_size)]
dist.all_gather_object(
gathered_metadatas,
local_sharded_tensor_metadata,
group=process_group
)
else:
gathered_metadatas = [local_sharded_tensor_metadata]
global_sharded_tensor_metadata = build_global_metadata(gathered_metadatas)
tensor_properties = global_sharded_tensor_metadata.tensor_properties
# STEP 3: Validation done, create the actual ShardedTensor and populate fields
# prepare initialization
spec = shard_spec._infer_sharding_spec_from_shards_metadata(
global_sharded_tensor_metadata.shards_metadata
)
sharded_tensor = cls.__new__(cls,
spec,
global_sharded_tensor_metadata.size,
dtype=tensor_properties.dtype,
layout=tensor_properties.layout,
pin_memory=tensor_properties.pin_memory,
requires_grad=tensor_properties.requires_grad)
sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
# attach local_shards to the ShardedTensor created
sharded_tensor._local_shards = local_shards
# run post initialization, i.e. map registration, rpc initialization
sharded_tensor._post_init()
return sharded_tensor
@classmethod
def _init_from_local_tensor(
cls,
local_tensor: torch.Tensor,
sharding_spec: shard_spec.ShardingSpec,
*global_size: Sequence[int],
process_group: dist.ProcessGroup = None,
init_rrefs=False,
) -> "ShardedTensor":
"""
Initialize a ShardedTensor given only one local tensor, global sharded tensor
size and sharding spec on each rank.
Args:
local_tensor (Tensor): Single tensor of local shard stored in each rank.
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`):
The specification describing how to shard the Tensor.
global_size (Sequence[int]): Size of the sharded tensor.
process_group (ProcessGroup, optional): The process group to aggregate on.
Default: None
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` sharded based on the given sharding_spec with local
tensor stored in the current rank.
Examples:
>>> # All tensors below are of torch.int64 type.
>>> # We have 2 process groups, 2 ranks.
>>> # xdoctest: +SKIP
>>> tensor = torch.arange(2, dtype=torch.int64) + 1 + 2 * rank
>>> local_tensor = torch.unsqueeze(torch.cat([tensor, tensor + 2]))
>>> local_tensor
tensor([[1, 2, 3, 4]]) # Rank 0
tensor([[3, 4, 5, 6]]) # Rank 1
>>> sharding_dim = 0
>>> sharding_spec = ChunkShardingSpec(
dim=sharding_dim,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
],
)
>>> st = ShardedTensor._init_from_local_tensor(local_tensor, sharding_spec, [2, 4])
>>> st
ShardedTensor(
ShardedTensorMetadata(
shards_metadata=[
ShardMetadata(shard_offsets=[0, 0], shard_sizes=[1, 4], placement=rank:0/cuda:0),
ShardMetadata(shard_offsets=[1, 0], shard_sizes=[1, 4], placement=rank:1/cuda:1),
],
size=torch.Size([2, 4])
)
>>> st.local_tensor()
tensor([1, 2, 3, 4]) # Rank 0
tensor([3, 4, 5, 6]) # Rank 1
Warning: This API is experimental and subject to change. It lacks of a fully across
rank validations, and we only validate the local shard on the current rank.
We fully rely on the user to ensure local tensor is sharded based on the
sharding spec.
"""
if not local_tensor.is_contiguous():
raise ValueError('local_tensor is not a contiguous Tensor.')
global_tensor_size = _flatten_tensor_size(global_size)
tensor_properties = TensorProperties(
dtype=local_tensor.dtype,
layout=local_tensor.layout,
requires_grad=local_tensor.requires_grad,
memory_format=torch.contiguous_format,
pin_memory=local_tensor.is_pinned())
sharded_tensor_metadata = sharding_spec.build_metadata(
global_tensor_size,
tensor_properties
)
process_group = (
process_group
if process_group is not None
else distributed_c10d._get_default_group()
)
current_rank = dist.get_rank(process_group)
local_shards: List[Shard] = []
for shard_metadata in sharded_tensor_metadata.shards_metadata:
rank, device = _parse_and_validate_remote_device(process_group, shard_metadata.placement)
if rank == current_rank:
local_shards.append(Shard(local_tensor, shard_metadata))
# TODO: figure out what the API should behave when some rank have no shard
# see https://github.com/pytorch/pytorch/issues/7313
return ShardedTensor._init_from_local_shards_and_global_metadata(
local_shards,
sharded_tensor_metadata,
process_group=process_group,
init_rrefs=init_rrefs,
sharding_spec=sharding_spec,
)
@classmethod
def _init_from_local_shards_and_global_metadata( # type: ignore[override]
cls,
local_shards: List[Shard],
sharded_tensor_metadata: ShardedTensorMetadata,
process_group=None,
init_rrefs=False,
sharding_spec=None,
) -> "ShardedTensor":
"""
Initialize a ShardedTensor with local shards and a global
ShardedTensorMetadata built on each rank.
Warning: This API is experimental and subject to change. It does
not do cross rank validations, and fully rely on the user
for the correctness of sharded_tensor_metadata on each rank
"""
process_group = (
process_group
if process_group is not None
else distributed_c10d._get_default_group()
)
current_rank = dist.get_rank(process_group)
shards_metadata = sharded_tensor_metadata.shards_metadata
local_shard_metadatas = []
# collect local shard metadatas from the global sharded_tensor_metadata
for shard_metadata in shards_metadata: # type: ignore[attr-defined]
rank, local_device = _parse_and_validate_remote_device(process_group, shard_metadata.placement)
if current_rank == rank:
local_shard_metadatas.append(shard_metadata)
if len(local_shards) != len(local_shard_metadatas):
raise RuntimeError(
f'Number of local shards ({len(local_shards)}) does not match number of local '
f'shards metadata in sharded_tensor_metadata ({len(local_shard_metadatas)}) '
f'on rank ({current_rank}) '
)
sharded_tensor = super(
ShardedTensor, cls
)._init_from_local_shards_and_global_metadata(
local_shards, sharded_tensor_metadata, sharding_spec=sharding_spec
)
sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
# run post initialization, i.e. map registration, rpc initialization
sharded_tensor._post_init()
return sharded_tensor
def sharding_spec(self) -> shard_spec.ShardingSpec:
"""
Returns the ShardingSpec for the tensor.
"""
return self._sharding_spec
def reshard(self, resharding_spec: shard_spec.ShardingSpec) -> ShardedTensor:
"""
Reshard a sharded tensor given the ``resharding_spec``. For now, we only support
single local shard.
If ``resharding_spec`` is same as the original one, this becomes a no-op.
If only ``resharding_spec`` shares the same sharding dim with the original one,
we swap local shards directly.
For more generic cases, we merge different shards across different ranks and split
the local shards based on the ``resharding_spec`` via `all_to_all` collective API.
Args:
resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The
specification describing how the tensor is sharded.
Returns:
A :class:`ShardedTensor` object whose local shards are resharded.
Examples:
>>> # We have 2 process groups, 2 ranks.
>>> # xdoctest: +SKIP
>>> tensor = torch.arange(4, dtype=torch.int64) + 1 + 2 * rank
>>> tensor = torch.stack([tensor, tensor])
>>> tensor
tensor([[1, 2, 3, 4], [1, 2, 3, 4]]) # Rank 0
tensor([[3, 4, 5, 6], [3, 4, 5, 6]]) # Rank 1
tensor([[5, 6, 7, 8], [5, 6, 7, 8]]) # Rank 2
tensor([[7, 8, 9, 10], [7, 8, 9, 10]]) # Rank 3
>>> sharding_dim = 0
>>> spec = ChunkShardingSpec(
dim=sharding_dim,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
>>> current_offsets = [0] * 2
>>> current_offsets[0] = rank * 2
>>> shard_metadata = ShardMetadata(
shard_offsets=copy.deepcopy(current_offsets),
shard_sizes=tensor.size(),
placement=spec.placements[rank],
)
>>> local_shards = [
Shard(
tensor=tensor,
metadata=shard_metadata,
)
]
>>> st = ShardedTensor._init_from_local_shards(local_shards, tensor.size())
>>> sharding_dim = 1
>>> resharding_spec = ChunkShardingSpec(
dim=sharding_dim,
placements=[
"rank:0/cuda:0",
"rank:1/cuda:1",
"rank:2/cuda:2",
"rank:3/cuda:3",
],
)
>>> st.reshard(resharding_spec)
>>> tensor = st.local_shards()[0].tensor
>>> tensor
tensor([[1], [1], [3], [3], [5], [5], [7], [7]]) # Rank 0
tensor([[2], [2], [4], [4], [6], [6], [8], [8]]) # Rank 1
tensor([[3], [3], [5], [5], [7], [7], [9], [9]]) # Rank 2
tensor([[4], [4], [6], [6], [8], [8], [10], [10]]) # Rank 3
"""
if (
not isinstance(resharding_spec, shard_spec.ChunkShardingSpec) or
not isinstance(self._sharding_spec, shard_spec.ChunkShardingSpec)
):
raise NotImplementedError("Only ChunkShardingSpec supported for reshard.")
if (len(self.local_shards()) != 1):
raise NotImplementedError("Only single local shard supported for reshard.")
if self._sharding_spec.dim == resharding_spec.dim: # type: ignore[attr-defined]
if self._sharding_spec.placements == resharding_spec.placements: # type: ignore[attr-defined]
return self
else:
local_shards, shards_metadata = reshuffle_local_shard(
self.local_tensor(),
self.size(), # type: ignore[arg-type]
self._sharding_spec,
resharding_spec,
self._process_group,
)
else:
local_shards, shards_metadata = reshard_local_shard(
self.local_tensor(),
self.size(), # type: ignore[arg-type]
self._sharding_spec,
resharding_spec,
self._process_group,
)
self._local_shards = local_shards
self._metadata.shards_metadata = shards_metadata
self._sharding_spec = resharding_spec
return self
def local_tensor(self) -> torch.Tensor:
"""
Return local tensor for a sharded_tensor. For now we only support single local shard.
Returns:
A :class:`torch.Tensor` of the local shard.
"""
if len(self.local_shards()) != 1:
raise NotImplementedError("Only single local shard is supported.")
return self.local_shards()[0].tensor
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
def dispatch(st: ShardedTensor, func: Callable):
# Dispatch to custom user provided op first if it exists.
if func in _CUSTOM_SHARDED_OPS:
return _CUSTOM_SHARDED_OPS[func](types, args, kwargs, st._process_group)
# Dispatch to custom sharding spec op if it has one.
if _has_custom_op(st._sharding_spec, func):
return _dispatch_custom_op(
st._sharding_spec,
func,
types,
args,
kwargs,
st._process_group
)
if func in _SHARDED_OPS:
return _SHARDED_OPS[func](types, args, kwargs, st._process_group)
raise RuntimeError(
f"torch function '{func.__name__}', with args: {args} and "
f"kwargs: {kwargs} not supported for ShardedTensor!")
# Find ShardedTensor instance to get process_group and sharding_spec.
st_instance = None
def find_sharded_tensor(e):
nonlocal st_instance
if st_instance is None and isinstance(e, ShardedTensor):
st_instance = e
tree_map(find_sharded_tensor, args)
tree_map(find_sharded_tensor, kwargs)
if st_instance is not None:
return dispatch(st_instance, func)
raise RuntimeError(
f"torch function '{func.__name__}', with args: {args} and "
f"kwargs: {kwargs} not supported for ShardedTensor!")
def is_pinned(self) -> bool: # type: ignore[override]
"""
Returns True if the sharded tensor (each local shard) resides in pinned memory.
"""
return self._metadata.tensor_properties.pin_memory
def _register_remote_shards(self, remote_shards: List[rpc.RRef[Shard]], rpc_rank: int):
self._remote_shards[rpc_rank] = remote_shards
def remote_shards(self) -> Dict[int, List[rpc.RRef[Shard]]]:
"""
Returns a Dict[int, RRef] with keys being the RPC rank and values
being RRefs to shards on that rank. Need to initialize the
RPC framework for this functionality.
Raises an exception if ShardedTensor was created with ``init_rrefs=False``
"""
if not self._init_rrefs:
raise RuntimeError(
'ShardedTensor created with init_rrefs=False, no RRefs to remote shards available'
)
return self._remote_shards
def __hash__(self):
return id(self)
def __repr__(self):
return f'ShardedTensor({self._metadata})'
@dataclass
class ProcessGroupState:
"""
State for ser-de of process group
"""
local_rank: int
global_rank: int
local_world_size: int
global_world_size: int
def __getstate__(self):
pg_state = ShardedTensor.ProcessGroupState(
distributed_c10d.get_rank(self._process_group),
distributed_c10d.get_rank(),
distributed_c10d.get_world_size(self._process_group),
distributed_c10d.get_world_size(),
)
return self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs
def __setstate__(self, state):
self._sharded_tensor_id = None
if not distributed_c10d.is_initialized():
raise RuntimeError(
'Need to initialize default process group using '
'"init_process_group" before loading ShardedTensor')
self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs = state
# Setup process group
from torch.distributed._shard.api import _get_current_process_group
self._process_group = _get_current_process_group()
# Validate process group.
local_rank = distributed_c10d.get_rank(self._process_group)
if pg_state.local_rank != local_rank:
raise RuntimeError(
f'Local rank at save time was {pg_state.local_rank}, but at '
f'load time was {local_rank}')
global_rank = distributed_c10d.get_rank()
if pg_state.global_rank != global_rank:
raise RuntimeError(
f'Global rank at save time was {pg_state.global_rank}, but at '
f'load time was {global_rank}')
local_world_size = distributed_c10d.get_world_size(self._process_group)
if pg_state.local_world_size != local_world_size:
raise RuntimeError(
f'Local world size at save time was {pg_state.local_world_size}, '
f'but at load time was {local_world_size}')
global_world_size = distributed_c10d.get_world_size()
if pg_state.global_world_size != global_world_size:
raise RuntimeError(
f'Global world size at save time was {pg_state.global_world_size}, '
f'but at load time was {global_world_size}')
self._post_init()
def _create_tensor_from_params(*size, local_device, tensor_properties: TensorProperties):
""" Helper to construct tensor from size, device and common params. """
dtype = tensor_properties.dtype
layout = tensor_properties.layout
requires_grad = tensor_properties.requires_grad
memory_format = tensor_properties.memory_format
pin_memory = tensor_properties.pin_memory
return torch.empty(
*size, dtype=dtype, layout=layout,
device=local_device, requires_grad=requires_grad,
memory_format=memory_format, pin_memory=pin_memory
)
| pytorch-master | torch/distributed/_shard/sharded_tensor/api.py |
from dataclasses import dataclass
from typing import List
import torch
from torch.distributed._shard.metadata import ShardMetadata
from torch.distributed.remote_device import _remote_device
@dataclass
class Shard(object):
"""
Container which holds the data for a shard as a Tensor and also
the associated metadata for that shard.
Args:
tensor(torch.Tensor): Local tensor for the shard.
metadata(:class `torch.distributed._shard.sharded_tensor.ShardMetadata`):
The metadata for the shard, including offsets, lengths and device placement.
"""
__slots__ = ['tensor', 'metadata']
tensor: torch.Tensor
metadata: ShardMetadata
def __post_init__(self):
# verification between local tensor and metadata
if list(self.tensor.size()) != self.metadata.shard_sizes:
raise ValueError(
"Shard tensor size does not match with metadata.shard_lengths! "
f"Found shard tensor size: {list(self.tensor.size())}, "
f"metadata.shard_lengths: {self.metadata.shard_sizes}, "
)
placement_device = self.metadata.placement
if placement_device is not None and placement_device.device() != self.tensor.device:
raise ValueError(
f"Local shard tensor device does not match with local Shard's placement! "
f"Found local shard tensor device: {self.tensor.device}, "
f"local shard metadata placement device: {placement_device.device()}"
)
@classmethod
def from_tensor_and_offsets(cls, tensor: torch.Tensor, shard_offsets: List[int], rank: int):
"""
Creates a Shard of a ShardedTensor from a local torch.Tensor, shard_offsets and rank.
Args:
tensor(torch.Tensor): Local tensor for the shard.
shard_offsets(List[int]): List of integers specify the offset
of the shard on each dimension.
rank(int): Specify the rank for the shard.
"""
shard_sizes = list(tensor.size())
placement = _remote_device(f"rank:{rank}/{str(tensor.device)}")
shard_meta = ShardMetadata(
shard_offsets=shard_offsets,
shard_sizes=shard_sizes,
placement=placement
)
return Shard(tensor, shard_meta)
| pytorch-master | torch/distributed/_shard/sharded_tensor/shard.py |
import collections.abc
import copy
from typing import Optional, List, Sequence
import torch
from torch.distributed import distributed_c10d
from torch.distributed import rpc
from torch.distributed._shard.sharding_spec._internals import (
check_tensor,
validate_non_overlapping_shards_metadata,
)
from torch.distributed._shard.metadata import ShardMetadata
from .metadata import TensorProperties, ShardedTensorMetadata
from .shard import Shard
def _parse_and_validate_remote_device(pg, remote_device):
if remote_device is None:
raise ValueError("remote device is None")
worker_name = remote_device.worker_name()
rank = remote_device.rank()
device = remote_device.device()
# Validate rank, skip validation if rank is not part of process group.
if not distributed_c10d._rank_not_in_group(pg):
if rank is not None and (rank < 0 or rank >= distributed_c10d.get_world_size(pg)):
raise ValueError(f'Invalid rank: {rank}')
if worker_name is not None:
if not rpc._is_current_rpc_agent_set():
raise RuntimeError(f'RPC framework needs to be initialized for using worker names: {worker_name}')
workers = rpc._get_current_rpc_agent().get_worker_infos()
for worker in workers:
if worker.name == worker_name:
return worker.id, device
raise ValueError(f'Invalid worker name: {worker_name}')
return rank, device
def _validate_output_tensor_for_gather(
my_rank: int,
dst_rank: int,
size: torch.Size,
dst_tensor: Optional[torch.Tensor],
) -> None:
if dst_rank == my_rank:
if dst_tensor is None:
raise ValueError(
f"Argument ``dst_tensor`` must be specified on destination rank {dst_rank}"
)
if tuple(size) != (dst_tensor.size()):
raise ValueError(
f"Argument ``dst_tensor`` have size {tuple(dst_tensor.size())},"
f"but should be {tuple(size)}"
)
elif dst_tensor:
raise ValueError(
"Argument ``dst_tensor`` must NOT be specified "
"on non-destination ranks."
)
def _flatten_tensor_size(size) -> torch.Size:
"""
Checks if tensor size is valid, then flatten/return a torch.Size object.
"""
if len(size) == 1 and isinstance(size[0], collections.abc.Sequence):
dims = list(*size)
else:
dims = list(size)
for dim in dims:
if not isinstance(dim, int):
raise TypeError(f'size has to be a sequence of ints, found: {dims}')
return torch.Size(dims)
def _raise_if_mismatch(expected, actual, prop_name, ranks, is_local=True):
if is_local:
assert isinstance(ranks, int)
if expected != actual:
raise ValueError(f"Local shards' tensor {prop_name} property need to be the same on rank:{ranks}! "
f"Found one local shard tensor {prop_name}={expected}, "
f"the other local shard tensor {prop_name}={actual}.")
else:
# compare failure check across ranks, ranks list should have two rank
assert len(ranks) == 2
if expected != actual:
raise ValueError(f"ShardedTensor {prop_name} property does not match from different ranks! "
f"Found {prop_name}={expected} on rank:{ranks[0]}, "
f"and {prop_name}={actual} on rank:{ranks[1]}.")
def build_metadata_from_local_shards(
local_shards: List[Shard],
global_size: torch.Size,
current_rank: int,
pg: distributed_c10d.ProcessGroup
) -> ShardedTensorMetadata:
assert len(local_shards) > 0, "must have local shards!"
local_shard_metadatas: List[ShardMetadata] = []
first_shard_dtype = local_shards[0].tensor.dtype
first_shard_layout = local_shards[0].tensor.layout
first_shard_requires_grad = local_shards[0].tensor.requires_grad
first_shard_is_pinned = local_shards[0].tensor.is_pinned()
# 1). Validate local tensors and associated metadatas
for i, local_shard in enumerate(local_shards):
local_shard_tensor = local_shard.tensor
local_shard_meta = local_shard.metadata
local_shard_metadatas.append(local_shard_meta)
rank, local_device = _parse_and_validate_remote_device(pg, local_shard_meta.placement)
if local_shard_tensor.layout != torch.strided or local_shard_tensor.layout != first_shard_layout:
raise ValueError(
f'Only torch.strided layout is currently supported, but found '
f'{local_shard_tensor.layout} on rank:{current_rank}!'
)
if not local_shard_tensor.is_contiguous():
raise ValueError('Only torch.contiguous_format memory_format is currently supported!')
if rank != current_rank:
raise ValueError(
f"Local shard metadata's rank does not match with the rank in its process group! "
f'Found current rank in the process group: {current_rank}, '
f"local ShardMetadata placement's rank: {rank}"
)
if local_shard_tensor.device != local_device:
raise ValueError(
f"Local shard tensor device does not match with local Shard's placement! "
f"Found local shard tensor device: {local_shard_tensor.device}, "
f"local shard metadata placement device: {local_device}"
)
_raise_if_mismatch(local_shard_meta.shard_sizes, list(local_shard_tensor.size()), "size", current_rank)
_raise_if_mismatch(local_shard_tensor.is_pinned(), first_shard_is_pinned, "pin_memory", current_rank)
_raise_if_mismatch(local_shard_tensor.dtype, first_shard_dtype, "dtype", current_rank)
_raise_if_mismatch(local_shard_tensor.requires_grad, first_shard_requires_grad, "requires_grad", current_rank)
# 2). Build a "local" ShardedTensorMetadata with all local shards on this rank, then
# do all_gather to collect local_sharded_tensor_metadata from all ranks
local_tensor_properties = TensorProperties(
dtype=first_shard_dtype,
layout=first_shard_layout,
requires_grad=first_shard_requires_grad,
memory_format=torch.contiguous_format,
pin_memory=first_shard_is_pinned
)
local_sharded_tensor_metadata = ShardedTensorMetadata(
shards_metadata=local_shard_metadatas,
size=global_size,
tensor_properties=local_tensor_properties)
return local_sharded_tensor_metadata
def build_global_metadata(gathered_metadatas: Sequence[Optional[ShardedTensorMetadata]]):
global_sharded_tensor_metadata = None
global_metadata_rank = 0
for rank, rank_metadata in enumerate(gathered_metadatas):
if rank_metadata is None:
continue
if global_sharded_tensor_metadata is None:
global_sharded_tensor_metadata = copy.deepcopy(rank_metadata)
global_metadata_rank = rank
else:
_raise_if_mismatch(global_sharded_tensor_metadata.size,
rank_metadata.size,
"global_size",
[global_metadata_rank, rank],
is_local=False)
# don't need to check layout and memory format as we already checked in local shards validation stage
_raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.dtype,
rank_metadata.tensor_properties.dtype,
"dtype",
[global_metadata_rank, rank],
is_local=False)
_raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.requires_grad,
rank_metadata.tensor_properties.requires_grad,
"requires_grad",
[global_metadata_rank, rank],
is_local=False)
_raise_if_mismatch(global_sharded_tensor_metadata.tensor_properties.pin_memory,
rank_metadata.tensor_properties.pin_memory,
"pin_memory",
[global_metadata_rank, rank],
is_local=False)
# pass all validations, extend shards metadata
global_sharded_tensor_metadata.shards_metadata.extend(rank_metadata.shards_metadata)
if global_sharded_tensor_metadata is not None:
# check if shards_metadata have overlap shards
validate_non_overlapping_shards_metadata(global_sharded_tensor_metadata.shards_metadata)
# check if the shards_metadata is compatible with global size of the sharded tensor.
check_tensor(global_sharded_tensor_metadata.shards_metadata, global_sharded_tensor_metadata.size)
else:
raise ValueError("ShardedTensor have no local shards on all ranks!")
return global_sharded_tensor_metadata
| pytorch-master | torch/distributed/_shard/sharded_tensor/utils.py |
import torch
from torch.distributed._shard.sharded_tensor import (
_sharded_op_impl,
)
# This is used by `_apply()` within module.py to set new
# parameters after apply a certain method, we should follow
# the future behavior of overwriting the existing tensor
# instead of doing in-place change using `.data = `.
@_sharded_op_impl(torch._has_compatible_shallow_copy_type)
def tensor_has_compatible_shallow_copy_type(types, args=(), kwargs=None, pg=None):
return False
| pytorch-master | torch/distributed/_shard/sharded_tensor/_ops/misc_ops.py |
import copy
import torch
from torch.distributed._shard.sharded_tensor import (
_sharded_op_impl,
Shard,
ShardedTensor,
)
from ._common import (
_register_sharded_op_on_local_shards,
)
from torch.distributed._shard.common_op_utils import _register_default_op
# Tensor properties access
_register_default_op(torch.Tensor.shape.__get__, _sharded_op_impl) # type: ignore[attr-defined]
_register_default_op(torch.Tensor.dtype.__get__, _sharded_op_impl) # type: ignore[attr-defined]
_register_default_op(torch.Tensor.layout.__get__, _sharded_op_impl) # type: ignore[attr-defined]
_register_default_op(torch.Tensor.size, _sharded_op_impl)
_register_default_op(torch.Tensor.dim, _sharded_op_impl)
_register_default_op(torch.Tensor.ndim.__get__, _sharded_op_impl) # type: ignore[attr-defined]
_register_default_op(torch.Tensor.is_contiguous, _sharded_op_impl)
_register_default_op(torch.Tensor.contiguous, _sharded_op_impl)
# __reduce_ex__ to dispatch to get_state/set_state
_register_default_op(torch.Tensor.__reduce_ex__, _sharded_op_impl)
# autograd related properties
_register_default_op(torch.Tensor.requires_grad.__get__, _sharded_op_impl) # type: ignore[attr-defined]
# TODO: set grad with a ShardedTensor that consists of all local grads
_register_default_op(torch.Tensor.grad.__get__, _sharded_op_impl) # type: ignore[union-attr]
_register_default_op(torch.Tensor.grad_fn.__get__, _sharded_op_impl) # type: ignore[attr-defined]
_register_default_op(torch.Tensor.is_leaf.__get__, _sharded_op_impl) # type: ignore[attr-defined]
# device property is ambiguous as from a global prospective,
# ShardedTensor.device consists of multiple devices (might even across hosts)
# We choose to return the current device of the local tensor to represent
# the device property on each rank
@_sharded_op_impl(torch.Tensor.device.__get__)
def tensor_device(types, args=(), kwargs=None, pg=None):
self_st = args[0]
# Validate types
if not isinstance(self_st, ShardedTensor):
raise TypeError("input needs to be a ShardedTensor")
return self_st.local_shards()[0].tensor.device
def sharded_type_as_check(*args, **kwargs):
"""
Perform extra checks for the sharded_type_as op such as the input needs to
be either a Tensor or ShardedTensor.
Args: same as ``torch.Tensor.type_as``.
Return: None
"""
if len(args) < 2:
raise ValueError("Needs to give a tensor to cast type as!")
if not isinstance(args[1], torch.Tensor) and not isinstance(args[1], ShardedTensor):
raise ValueError("Needs to give a Tensor or ShardedTensor to cast type as!")
def same_dtype(*args, **kwargs):
"""
When the dtype is the same, return the original ShardedTensor.
Args: same as ``torch.Tensor.type_as``.
Return (bool): Whether to return early or not.
"""
return args[0].dtype == args[1].dtype
def sharded_type_as(args, kwargs, pg):
"""
Handles ``__torch_function__`` dispatch for the ``torch.Tensor.type_as`` op.
Args: same as ``torch.Tensor.type_as``.
Return:
new_local_shards (List[Shard]): Local shards for the new sharded tensor.
st_meta (ShardedTensorMetadata): Metadata of the new sharded tensor.
"""
st = args[0]
tensor = args[1]
if isinstance(tensor, ShardedTensor):
tensor = tensor.local_tensor()
new_local_shards = []
for shard in st.local_shards():
new_local_shards.append(Shard(shard.tensor.type_as(tensor), shard.metadata))
st_meta = copy.deepcopy(st._metadata)
st_meta.tensor_properties.dtype = tensor.dtype
return new_local_shards, st_meta
_register_sharded_op_on_local_shards(
torch.Tensor.type_as,
early_stop_func=same_dtype,
extra_check=sharded_type_as_check,
customized_func=sharded_type_as,
)
def sharded_deepcopy(args, kwargs, pg):
# NOTE: we directly implement deepcopy magic method
# instead of using the default tensor.__deepcopy__
# and implement clone(). This is because the default
# tensor deepcopy copies every attribute, but the
# process_group in ShardedTensor cannot be deep copied.
self_st = args[0]
new_local_shards = copy.deepcopy(self_st.local_shards())
new_metadata = copy.deepcopy(self_st.metadata())
return new_local_shards, new_metadata
_register_sharded_op_on_local_shards(
torch.Tensor.__deepcopy__,
customized_func=sharded_deepcopy,
)
@_sharded_op_impl(torch.Tensor.copy_)
def sharded_inplace_copy(types, args, kwargs, pg):
# NOTE: inplace op don't need to rewrap
kwargs = {} if kwargs is None else kwargs
self_st = args[0]
new_st = args[1]
nonblocking = kwargs.get("non_blocking", False)
for local_shard, new_shard in zip(self_st.local_shards(), new_st.local_shards()):
if local_shard.metadata != new_shard.metadata:
raise RuntimeError(
"inplace copy can only happen between two ShardedTensor with same metadata!"
)
for local_shard, new_shard in zip(self_st.local_shards(), new_st.local_shards()):
local_shard.tensor.copy_(new_shard.tensor, nonblocking)
return self_st
def sharded_clone(args, kwargs, pg):
self_st = args[0]
desire_memory_format = kwargs.get("memory_format", None)
if desire_memory_format and desire_memory_format != torch.preserve_format:
raise RuntimeError("Only support torch.preserve_format for ShardedTensor!")
cloned_local_shards = [
Shard(
local_shard.tensor.clone(memory_format=desire_memory_format),
metadata=copy.deepcopy(local_shard.metadata),
)
for local_shard in self_st.local_shards()
]
new_metadata = copy.deepcopy(self_st.metadata())
return cloned_local_shards, new_metadata
_register_sharded_op_on_local_shards(
torch.Tensor.clone,
customized_func=sharded_clone,
)
def sharded_detach(args, kwargs, pg):
self_st = args[0]
detached_local_shards = [
Shard(
local_shard.tensor.detach(),
metadata=copy.deepcopy(local_shard.metadata),
)
for local_shard in self_st.local_shards()
]
new_metadata = copy.deepcopy(self_st.metadata())
new_metadata.tensor_properties.requires_grad = False
return detached_local_shards, new_metadata
_register_sharded_op_on_local_shards(
torch.Tensor.detach,
customized_func=sharded_detach,
)
@_sharded_op_impl(torch.Tensor.requires_grad_)
def tensor_requires_grad_set(types, args=(), kwargs=None, pg=None):
self_st = args[0]
# Validate types
if not isinstance(self_st, ShardedTensor):
raise TypeError("input needs to be a ShardedTensor")
if kwargs is None:
kwargs = {}
requires_grad = args[1] if len(args) > 1 else kwargs.get("requires_grad", True)
if requires_grad == self_st.requires_grad:
return self_st
for local_shard in self_st.local_shards():
local_shard.tensor.requires_grad_(requires_grad)
# update the wrapper class property
with torch._C.DisableTorchFunction():
self_st.requires_grad_(requires_grad)
# update the metadata in the meanwhile
self_st._metadata.tensor_properties.requires_grad = requires_grad
return self_st
| pytorch-master | torch/distributed/_shard/sharded_tensor/_ops/tensor_ops.py |
import functools
from torch.distributed._shard.sharded_tensor import (
_sharded_op_impl,
Shard,
ShardedTensor,
)
from torch.distributed._shard.common_op_utils import _basic_validation
def _sharded_op_common(op, early_stop_func, extra_check):
"""
Inject sharded tensor op registration with common logics executed before
different behaviors are done on either local shards or a local tensor.
Example::
>>> # xdoctest: +SKIP("Undefined variables")
>>> op = torch.transpose
>>> @_sharded_op_impl(op)
>>> @_sharded_op_common(op, early_stop_func, extra_check)
>>> def sharded_tensor_op(types, args, kwargs, process_group):
>>> ...
>>>
>>> st = sharded_tensor.rand(32, 16)
>>> st.transpose(1, 2)
>>> # This will call '_sharded_op_common'
Args:
op: The op to be registered and applied to all shards of the st.
early_stop_func (Callable, optional): the func for early stop.
Default: if ``None``, no early stop.
extra_check (Callable, optional): the func for extra condition check.
Default: if ``None``, no extra check.
Return:
func (Callable): Torch function for which we want to provide a sharded
implementation (ex: torch.transpose)
"""
def decorator_sharded_func(wrapped_func):
@functools.wraps(wrapped_func)
def wrapper(types, args=(), kwargs=None, pg=None):
_basic_validation(op, args, kwargs)
st = args[0]
if kwargs is None:
kwargs = {}
if extra_check:
extra_check(*args, **kwargs)
if early_stop_func:
early_stop = early_stop_func(*args, **kwargs)
if early_stop:
return st
return wrapped_func(types, args, kwargs, pg)
return wrapper
return decorator_sharded_func
def _register_sharded_op_on_local_shards(
op, early_stop_func=None, extra_check=None, customized_func=None
):
"""
Handles ``__torch_function__`` dispatch for ops which are performed on
each shard of the sharded tensor such as elementwise op like
``torch.nn.functional.gelu`` or ``torch.nn.functional.relu``.
For more complicated ops, a customized func can be used to generate
the new shards and sharded tensor size.
This function expects that the original ShardingSpec for the ShardedTensor
is preserved irrespective of whether or not a customized function is used.
Args:
op: The op to be registered and applied to all shards of the st.
early_stop_func (Callable, optional): the func for early stop.
Default: if ``None``, no early stop.
extra_check (Callable, optional): the func for extra condition check.
Default: if ``None``, no extra check.
customized_func (Callable, optional): the func for customized logic
to generate new shards and sharded tensor size.
Default: if ``None``, we simply lower to the real op call with
all local shards of the st.
Return:
func (Callable): registered implementation for sharded op for
``__torch_function__`` dispatch.
"""
@_sharded_op_impl(op)
@_sharded_op_common(op, early_stop_func, extra_check)
def sharded_tensor_op_on_local_shards(types, args=(), kwargs=None, pg=None):
st = args[0]
st_metadata = st.metadata()
local_shards = st.local_shards()
local_shards_new = []
if customized_func:
local_shards_new, st_metadata = customized_func(args, kwargs, pg)
else:
for local_shard in local_shards:
args = (local_shard.tensor, *args[1:])
local_shards_new.append(
Shard(op(*args, **kwargs), local_shard.metadata)
)
return ShardedTensor._init_from_local_shards_and_global_metadata(
local_shards_new,
st_metadata,
process_group=pg,
init_rrefs=st._init_rrefs,
sharding_spec=st.sharding_spec()
)
| pytorch-master | torch/distributed/_shard/sharded_tensor/_ops/_common.py |
import torch.distributed._shard.sharded_tensor._ops.chunk
import torch.distributed._shard.sharded_tensor._ops.elementwise_ops
import torch.distributed._shard.sharded_tensor._ops.math_ops
import torch.distributed._shard.sharded_tensor._ops.matrix_ops
import torch.distributed._shard.sharded_tensor._ops.tensor_ops
import torch.distributed._shard.sharded_tensor._ops.misc_ops
from .binary_cmp import equal, allclose
from .init import kaiming_uniform_, normal_, uniform_, constant_
# Import all ChunkShardingSpec ops
from torch.distributed._shard.sharding_spec.chunk_sharding_spec_ops.linear import sharded_linear
from torch.distributed._shard.sharding_spec.chunk_sharding_spec_ops.embedding import sharded_embedding
from torch.distributed._shard.sharding_spec.chunk_sharding_spec_ops.embedding_bag import sharded_embedding_bag
from torch.distributed._shard.sharding_spec.chunk_sharding_spec_ops.softmax import sharded_softmax
import torch.distributed._shard.sharding_spec.chunk_sharding_spec_ops.math_ops
import torch.distributed._shard.sharding_spec.chunk_sharding_spec_ops.matrix_ops
| pytorch-master | torch/distributed/_shard/sharded_tensor/_ops/__init__.py |
import torch
import torch.distributed as dist
import torch.distributed.distributed_c10d as distributed_c10d
from torch.distributed._shard.sharded_tensor import (
ShardedTensor,
_sharded_op_impl
)
def _communicate_result(result, pg):
# Gather results from all ranks.
if result:
result_tensor = torch.ones(1, device=torch.device(torch.cuda.current_device()))
else:
result_tensor = torch.zeros(1, device=torch.device(torch.cuda.current_device()))
dist.all_reduce(result_tensor, group=pg)
expected_result = torch.ones(1, device=torch.device(torch.cuda.current_device())) * dist.get_world_size(pg)
return torch.equal(result_tensor, expected_result)
def binary_cmp(cmp_fun, types, args, kwargs=None, process_group=None):
if len(args) != 2:
raise ValueError(f'Expected two arguments for torch.{cmp_fun.__name__}')
result = True
st1 = args[0]
st2 = args[1]
if not(isinstance(st1, ShardedTensor) and isinstance(st2, ShardedTensor)):
raise TypeError(f'Both arguments to torch.{cmp_fun.__name__} need to be of type ShardedTensor')
# Verify same PG
if st1._process_group != st2._process_group:
return False
if distributed_c10d._rank_not_in_group(st1._process_group) or distributed_c10d._rank_not_in_group(st2._process_group):
return distributed_c10d._rank_not_in_group(st1._process_group) == distributed_c10d._rank_not_in_group(st2._process_group)
# Verify metadata
if st1.metadata() != st2.metadata():
return _communicate_result(False, st1._process_group)
# Verify number of local shards
st1_local_shards = st1.local_shards()
st2_local_shards = st2.local_shards()
if len(st1_local_shards) != len(st2_local_shards):
return _communicate_result(False, st1._process_group)
# kwargs must be dict-like
if kwargs is None:
kwargs = {}
# Verify each local shard
for idx in range(len(st1_local_shards)):
if st1_local_shards[idx].metadata != st2_local_shards[idx].metadata:
return _communicate_result(False, st1._process_group)
if not cmp_fun(st1_local_shards[idx].tensor, st2_local_shards[idx].tensor, **kwargs):
return _communicate_result(False, st1._process_group)
return _communicate_result(True, st1._process_group)
@_sharded_op_impl(torch.equal)
def equal(types, args, kwargs, process_group):
return binary_cmp(torch.equal, types, args, kwargs, process_group)
@_sharded_op_impl(torch.allclose)
def allclose(types, args, kwargs, process_group):
return binary_cmp(torch.allclose, types, args, kwargs, process_group)
| pytorch-master | torch/distributed/_shard/sharded_tensor/_ops/binary_cmp.py |
import torch
from torch.distributed._shard.sharded_tensor import (
_sharded_op_impl,
ShardedTensor,
)
from torch.distributed._shard.sharding_spec import ChunkShardingSpec
def register_chunk_op(op):
@_sharded_op_impl(op)
def sharded_chunk(types, args=(), kwargs=None, pg=None):
"""
Handles ``__torch_function__`` dispatch for the chunk op.
If we chunk by the non-sharding dim, we just directly chunk the
local tensor and create a list of sharded tensor based on them.
Warnings: Chunk by the sharding dim is not supported.
Args: same as ``torch.chunk``.
Return:
List[ShardedTensor]: Chunk results as a list of ShardedTensor.
"""
st = args[0]
chunk_num = args[1]
dim = kwargs.get("dim")
dim = dim if dim else 0
# Validate types
if not isinstance(st, ShardedTensor):
raise TypeError(
f"torch function '{op.__name__}', with args: {args} and "
f"kwargs: {kwargs} are called for non ShardedTensor!"
)
spec = st.sharding_spec()
if not isinstance(spec, ChunkShardingSpec):
raise NotImplementedError("Only ChunkShardingSpec is supported for chunk.")
if spec.dim == dim or st.dim() + spec.dim == dim or st.dim() + dim == spec.dim: # type: ignore[operator]
raise NotImplementedError("Chunk by sharding dim is not supported.")
local_tensor = st.local_tensor()
st_size = st.size()
dim = dim if dim > 0 else st.dim() + dim
results = []
for chunk_tensor in local_tensor.chunk(chunk_num, dim=dim):
new_st_size = (*st_size[:dim], chunk_tensor.size(dim), *st_size[dim + 1 :]) # type: ignore[index]
results.append(
ShardedTensor._init_from_local_tensor(
chunk_tensor.contiguous(),
st.sharding_spec(),
new_st_size,
process_group=pg,
)
)
return results
chunk_ops = [
torch.chunk,
torch.Tensor.chunk,
]
for op in chunk_ops:
register_chunk_op(op)
| pytorch-master | torch/distributed/_shard/sharded_tensor/_ops/chunk.py |
import torch
from ._common import (
_register_sharded_op_on_local_shards,
)
_register_sharded_op_on_local_shards(torch.nn.functional.gelu)
_register_sharded_op_on_local_shards(torch.nn.functional.relu)
_register_sharded_op_on_local_shards(torch.nn.functional.dropout)
_register_sharded_op_on_local_shards(torch.Tensor.tanh)
_register_sharded_op_on_local_shards(torch.nan_to_num)
| pytorch-master | torch/distributed/_shard/sharded_tensor/_ops/elementwise_ops.py |
import torch
from torch import Tensor
from torch.distributed._shard.sharded_tensor import ShardedTensor, _sharded_op_impl
from torch.distributed._shard.replicated_tensor import ReplicatedTensor
from torch.distributed._shard._utils import narrow_tensor
def binary_math_op_impl(op, types, args=(), kwargs=None, pg=None):
"""
Handles ``__torch_function__`` dispatch for the binary math ops
such as `torch.add`, `torch.mul`, `torch.div`, etc.
This method computes on ShardedTensor, or ShardedTensor op ReplicatedTensor
"""
if len(args) != 2:
raise ValueError("Only support binary math op on ShardedTensor for now!")
lhs = args[0]
rhs = args[1]
# Validate types
if isinstance(lhs, ReplicatedTensor):
assert isinstance(rhs, ShardedTensor)
st_size = rhs.size()
st_meta = rhs.local_shards()[0].metadata
if st_size != lhs.size():
# try to broadcast replicated tensor
lhs = lhs.expand(st_size)
replica_part = narrow_tensor(lhs, st_meta)
res = op(replica_part, rhs.local_tensor())
return ShardedTensor._init_from_local_tensor(
res,
rhs.sharding_spec(),
rhs.size(), # type: ignore[arg-type]
process_group=pg,
)
elif isinstance(rhs, ReplicatedTensor):
assert isinstance(lhs, ShardedTensor)
st_size = lhs.size()
st_meta = lhs.local_shards()[0].metadata
if st_size != rhs.size():
# try to broadcast replicated tensor
rhs = rhs.expand(st_size)
replica_part = narrow_tensor(rhs, st_meta)
res = op(lhs.local_tensor(), replica_part)
return ShardedTensor._init_from_local_tensor(
res,
lhs.sharding_spec(),
lhs.size(), # type: ignore[arg-type]
process_group=pg,
)
elif isinstance(lhs, (int, float)):
assert isinstance(rhs, ShardedTensor)
res = op(lhs, rhs.local_tensor())
return ShardedTensor._init_from_local_tensor(
res,
rhs.sharding_spec(),
rhs.size(), # type: ignore[arg-type]
process_group=pg,
)
elif isinstance(rhs, (int, float)):
assert isinstance(lhs, ShardedTensor)
res = op(lhs.local_tensor(), rhs)
return ShardedTensor._init_from_local_tensor(
res,
lhs.sharding_spec(),
lhs.size(), # type: ignore[arg-type]
process_group=pg,
)
else:
raise RuntimeError(
f"torch function '{op.__name__}', with args: {args} and "
f"kwargs: {kwargs} not supported yet for ShardedTensor!"
)
def register_math_op(op):
@_sharded_op_impl(op)
def binary_math_op(types, args=(), kwargs=None, pg=None):
return binary_math_op_impl(op, types, args, kwargs, pg)
binary_ops = [
# add
torch.add,
Tensor.add,
Tensor.__add__,
Tensor.__radd__,
# sub
torch.sub,
Tensor.sub,
Tensor.__sub__,
Tensor.__rsub__,
# mul
torch.mul,
Tensor.mul,
Tensor.__mul__,
Tensor.__rmul__,
# div
torch.div,
Tensor.div,
Tensor.__div__,
Tensor.__rdiv__,
]
for op in binary_ops:
register_math_op(op)
| pytorch-master | torch/distributed/_shard/sharded_tensor/_ops/math_ops.py |
import torch
import torch.distributed._shard.sharded_tensor as sharded_tensor
from torch.distributed._shard.sharded_tensor import (
_sharded_op_impl,
)
def validate_param(param, param_name):
if param is None:
raise ValueError(f"param: {param_name} shouldn't be None!")
@_sharded_op_impl(torch.nn.init.uniform_)
def uniform_(types, args=(), kwargs=None, pg=None):
r"""
Fills the Tensor in sharded_tensor.local_shards with values drawn from the uniform
distribution :math:`\mathcal{U}(a, b)`.
Args:
sharded_tensor: tensor sharded across devices
a: the lower bound of the uniform distribution
b: the upper bound of the uniform distribution
"""
validate_param(kwargs, "kwargs")
sharded_tensor = kwargs["tensor"]
validate_param(sharded_tensor, "sharded_tensor")
a = kwargs['a']
validate_param(a, "a")
b = kwargs['b']
validate_param(b, "b")
for shard in sharded_tensor.local_shards():
torch.nn.init.uniform_(shard.tensor, a=a, b=b)
return sharded_tensor
@_sharded_op_impl(torch.nn.init.normal_)
def normal_(types, args=(), kwargs=None, pg=None):
r"""
Fills the Tensors in sharded_tensor.local_shards with values drawn from the normal
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
Args:
sharded_tensor: tensor sharded across devices
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
"""
validate_param(kwargs, "kwargs")
sharded_tensor = kwargs["tensor"]
validate_param(sharded_tensor, "sharded_tensor")
mean = kwargs['mean']
validate_param(mean, "mean")
std = kwargs['std']
validate_param(std, "std")
for shard in sharded_tensor.local_shards():
torch.nn.init.normal_(shard.tensor, mean=mean, std=std)
return sharded_tensor
@_sharded_op_impl(torch.nn.init.kaiming_uniform_)
def kaiming_uniform_(types, args=(), kwargs=None, pg=None):
r"""
Fills the Tensors in sharded_tensor.local_shards with values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification` - He, K. et al. (2015), using a
uniform distribution. The resulting tensor will have values sampled from
:math:`\mathcal{U}(-\text{bound}, \text{bound})` where
.. math::
\text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}}
Also known as He initialization.
Args:
sharded_tensor: tensor sharded across devices
a: the negative slope of the rectifier used after this layer (only
used with ``'leaky_relu'``)
mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
backwards pass.
nonlinearity: the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
"""
validate_param(kwargs, "kwargs")
sharded_tensor = kwargs["tensor"]
validate_param(sharded_tensor, "sharded_tensor")
a = kwargs['a']
validate_param(a, "a")
mode = kwargs['mode']
validate_param(mode, "mode")
nonlinearity = kwargs['nonlinearity']
validate_param(nonlinearity, "nonlinearity")
for shard in sharded_tensor.local_shards():
torch.nn.init.kaiming_uniform_(shard.tensor, a=a, mode=mode, nonlinearity=nonlinearity)
return sharded_tensor
@_sharded_op_impl(torch.nn.init.constant_)
def constant_(types, args=(), kwargs=None, pg=None):
r"""
Fills the input ShardedTensor with the value \text{val}val.
Args:
sharded_tensor: tensor sharded across devices
val: the value to fill the tensor with
"""
validate_param(kwargs, "kwargs")
sharded_tensor = kwargs["tensor"]
validate_param(sharded_tensor, "sharded_tensor")
val = kwargs['val']
validate_param(val, "val")
for shard in sharded_tensor.local_shards():
torch.nn.init.constant_(shard.tensor, val=val)
return sharded_tensor
tensor_like_creation_op_map = {
torch.full_like: sharded_tensor.full,
torch.empty_like: sharded_tensor.empty,
torch.zeros_like: sharded_tensor.zeros,
torch.ones_like: sharded_tensor.ones,
torch.rand_like: sharded_tensor.rand,
torch.randn_like: sharded_tensor.randn,
}
# tensor ops that behave the same as the default tensor
def register_tensor_creation_op(op):
@_sharded_op_impl(op)
def tensor_creation_op(types, args=(), kwargs=None, pg=None):
"""
Handles ``__torch_function__`` dispatch for tensor creation ops that
takes a ShardedTensor as argument, such as ``torch.zeros_like`` or
``torch.full_like``.
"""
creation_op = tensor_like_creation_op_map.get(op, None)
if creation_op is None:
raise RuntimeError(f"Tensor creation {op} not supported!")
if kwargs is None:
kwargs = {}
st = args[0]
new_st = creation_op(st.sharding_spec(), st.size(), *args[1:], **kwargs) # type: ignore[operator]
return new_st
register_tensor_creation_op(torch.full_like)
register_tensor_creation_op(torch.empty_like)
register_tensor_creation_op(torch.zeros_like)
register_tensor_creation_op(torch.ones_like)
register_tensor_creation_op(torch.rand_like)
register_tensor_creation_op(torch.randn_like)
| pytorch-master | torch/distributed/_shard/sharded_tensor/_ops/init.py |
import copy
import torch
from torch.distributed._shard.sharded_tensor import (
Shard,
ShardedTensor,
)
from ._common import (
_register_sharded_op_on_local_shards,
)
def sharded_type_as_check(*args, **kwargs):
"""
Perform extra checks for the sharded_type_as op such as the input needs to
be either a Tensor or ShardedTensor.
Args: same as ``torch.Tensor.type_as``.
Return: None
"""
if len(args) < 2:
raise ValueError("Needs to give a tensor to cast type as!")
if not isinstance(args[1], torch.Tensor) and not isinstance(args[1], ShardedTensor):
raise ValueError("Needs to give a Tensor or ShardedTensor to cast type as!")
def same_dtype(*args, **kwargs):
"""
When the dtype is the same, return the original ShardedTensor.
Args: same as ``torch.Tensor.type_as``.
Return (bool): Whether to return early or not.
"""
return args[0].dtype == args[1].dtype
def sharded_type_as(args, kwargs, pg):
"""
Handles ``__torch_function__`` dispatch for the ``torch.Tensor.type_as`` op.
Args: same as ``torch.Tensor.type_as``.
Return:
new_local_shards (List[Shard]): Local shards for the new sharded tensor.
st_meta (ShardedTensorMetadata): Metadata of the new sharded tensor.
"""
st = args[0]
tensor = args[1]
if isinstance(tensor, ShardedTensor):
tensor = tensor.local_tensor()
new_local_shards = []
for shard in st.local_shards():
new_local_shards.append(Shard(shard.tensor.type_as(tensor), shard.metadata))
st_meta = copy.deepcopy(st._metadata)
st_meta.tensor_properties.dtype = tensor.dtype
return new_local_shards, st_meta
_register_sharded_op_on_local_shards(
torch.Tensor.type_as,
early_stop_func=same_dtype,
extra_check=sharded_type_as_check,
customized_func=sharded_type_as,
)
| pytorch-master | torch/distributed/_shard/sharded_tensor/_ops/matrix_ops.py |
from .api import (
ShardingPlan,
ShardingPlanner
)
| pytorch-master | torch/distributed/_shard/sharding_plan/__init__.py |
import abc
import torch.nn as nn
from dataclasses import dataclass
from typing import Dict, List, Optional, Union
from torch.distributed._shard.sharder import Sharder
from torch.distributed._shard.sharding_spec import ShardingSpec
@dataclass
class ShardingPlan(object):
"""
Representation of a sharding plan, describes how to shard a module
across hosts. `plan` is used to shard module parameters according to the spec provided,
`output_plan` and `return_local_tensor` are optional, they are used to specify the output
layout of a module with a spec, and when to convert back to data parallel fashion.
Args:
plan (Dict[str, Union[:class:`torch.distributed._shard.sharding_spec.ShardingSpec`,
:class:`torch.distributed._shard.sharder.Sharder`]):
a dict describes how to shard a module, there're currently two ways to shard a module:
1. directly shard a module parameter by a `ShardingSpec`, keyed by the name of
a parameter to a `ShardingSpec`.
2. shard a submodule by applying a `Sharder` on it, keyed by the name of a module
to a `Sharder` object.
output_plan (Dict[str, :class:`torch.distributed._shard.sharding_spec.ShardingSpec`), optional):
a dict specifies the layout of a module's output which produces a ShardedTensor,
keyed by the name of module to ShardingSpec("" in key means the root module).
Default: `None`
return_local_tensor (List[str], optional): a list of string, each element enables
a module's sharded output to be returned as a Tensor from its local shards to
ensure further processsing in a data parallel fashion. ("" in list means the
root module).
Default: None
Example:
Suppose we want to shard a module with two linear layers and then run it with DDP, we also
want to convert the output of the second linear layer back to DDP, we can do it as follows:
>>> class MyModule(nn.Module):
>>> def __init__(self):
>>> super().__init__()
>>> self.fc1 = nn.Linear()
>>> self.gelu = nn.GELU()
>>> self.fc2 = nn.Linear()
>>> self.relu = nn.Linear()
>>>
>>> def forward(self, input):
>>> return self.relu(self.fc2(self.gelu(self.fc1(input))))
>>> # xdoctest: +SKIP("Undefined spec1, spec2)
>>> sharding_plan = ShardingPlan(
>>> plan={
>>> "fc1.weight": spec1,
>>> "fc2.weight": spec2
>>> },
>>> output_plan={
>>> "fc2": output_spec
>>> },
>>> return_local_tensor=["fc2"]
>>> )
"""
plan: Dict[str, Union[ShardingSpec, Sharder]]
output_plan: Optional[Dict[str, ShardingSpec]] = None
return_local_tensor: Optional[List[str]] = None
class ShardingPlanner(abc.ABC):
"""
Default ShardingPlanner interface, can be extended and
implement advanced sharding strategies.
"""
@abc.abstractmethod
def build_plan(self, module: nn.Module) -> ShardingPlan:
"""
Given a nn.Module, define how to shard the module across
ranks, return a ShardingPlan
Args:
module (:class:`torch.nn.Module`):
The module to apply sharding to.
Returns:
A :class:`torch.distributed._shard.sharding_plan.ShardingPlan` object that
represents how to shard the module.
"""
pass
| pytorch-master | torch/distributed/_shard/sharding_plan/api.py |
from .api import (
DevicePlacementSpec,
EnumerableShardingSpec,
PlacementSpec,
ShardingSpec,
_infer_sharding_spec_from_shards_metadata,
)
from .chunk_sharding_spec import (
ChunkShardingSpec,
)
from torch.distributed._shard.metadata import ShardMetadata
| pytorch-master | torch/distributed/_shard/sharding_spec/__init__.py |
from typing import List
from torch.distributed._shard.metadata import ShardMetadata
def _check_shard_metadata_pair_overlap(shard1: ShardMetadata, shard2: ShardMetadata):
"""
Checks if two shards overlap.
"""
# For each dim of each shard, check if one shard resides on the other
# end of second shard with respect to that dim. As an example for a 2D
# shard, we would check if one shard is above or on the left of the
# other shard.
ndims = len(shard1.shard_offsets)
for i in range(ndims):
if shard1.shard_offsets[i] >= shard2.shard_offsets[i] + shard2.shard_sizes[i]:
return False
if shard2.shard_offsets[i] >= shard1.shard_offsets[i] + shard1.shard_sizes[i]:
return False
return True
def validate_non_overlapping_shards_metadata(shards: List[ShardMetadata]):
"""
Ensures none of the shards overlap with each other.
Args:
shards(List[ShardMetadata]): List of :class:`ShardMetadata` objects representing
each shard.
Raises:
``ValueError`` if there's overlap in any two shards.
"""
# TODO: evaluate optimizing this if needed.
for i in range(len(shards)):
for j in range(i + 1, len(shards)):
if _check_shard_metadata_pair_overlap(shards[i], shards[j]):
raise ValueError(f'Shards {shards[i]} and {shards[j]} overlap')
def check_tensor(shards_metadata, tensor_dims) -> None:
"""
Checks if the shards_metadata is compatible with the provided tensor dims.
Args:
shards_metadata(List[ShardMetadata]): List of :class:`ShardMetadata`
objects representing each shard of the tensor.
tensor_dims(Sequence of int): Dimensions of tensor to verify
Raises:
``ValueError`` if not compatible.
"""
# If the tensor's volume matches the total volume of all shards and
# all shard boundaries are within tensor dims, we have a compatible
# sharding spec for this tensor. Note that we have already verified
# we don't have overlapping shards.
tensor_rank = len(tensor_dims)
shards_rank = len(shards_metadata[0].shard_offsets)
if tensor_rank != shards_rank:
raise ValueError(f'Rank of tensor is {tensor_rank}, but shards rank is {shards_rank}')
total_shard_volume = 0
for shard in shards_metadata:
shard_volume = 1
for i, shard_length in enumerate(shard.shard_sizes):
shard_volume *= shard_length
if shard.shard_offsets[i] + shard.shard_sizes[i] > tensor_dims[i]:
raise ValueError(
f'Shard offset {shard.shard_offsets[i]} and length '
f'{shard.shard_sizes[i]} exceeds tensor dim: {tensor_dims[i]} for shard {shard}')
total_shard_volume += shard_volume
tensor_volume = 1
for size in tensor_dims:
tensor_volume *= size
if total_shard_volume != tensor_volume:
# TODO: Can we improve this error message to point out the gaps?
raise ValueError(
f'Total volume of shards: {total_shard_volume} '
f'does not match tensor volume: {tensor_volume}, in other words '
f'all the individual shards do not cover the entire tensor')
def get_split_size(dim_size, chunks):
"""
Computes the split size inline with ``torch.chunk``
Args:
dim_size(int): Size of the dimension being chunked.
chunks(int): Number of chunks to create for ``dim_size``.
Returns:
An int indicating the split size to use.
"""
return (dim_size + chunks - 1) // chunks
def get_chunked_dim_size(dim_size, split_size, idx):
"""
Computes the dim size of the chunk for provided ``idx`` given ``dim_size``
and ``split_size``.
Args:
dim_size(int): Size of the dimension being chunked.
split_size(int): The chunk size for each chunk of ``dim_size``.
idx(int): The index of chunk whose dim size is being requested.
Returns:
An int indicating the dim size of the chunk.
"""
return max(min(dim_size, split_size * (idx + 1)) - split_size * idx, 0)
def get_chunk_sharding_params(sharding_dim_size, world_size, spec, rank):
"""
Generate the start pos and offset length for the current rank for
chunk sharding.
Args:
sharding_dim_size(int): The dimension length which we shard on.
world_size(int): number of ranks.
spec (:class:`torch.distributed._shard.sharding_spec.ChunkShardingSpec`):
sharding spec.
rank(int): # of cuda process.
Returns:
start_pos(int): start position of sharded tensor on the given rank.
chunk_size(int): chunk size of sharded tensor on the given rank.
"""
split_size = get_split_size(sharding_dim_size, world_size)
current_offsets = 0
start_pos = current_offsets
for idx, placement in enumerate(spec.placements):
chunk_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)
if rank == placement.rank():
start_pos = current_offsets
break
current_offsets += chunk_size
return start_pos, chunk_size
| pytorch-master | torch/distributed/_shard/sharding_spec/_internals.py |
from abc import ABC, abstractmethod
from dataclasses import dataclass
import functools
from typing import Callable, Dict, List, TYPE_CHECKING
import torch
from ._internals import (
check_tensor,
get_chunked_dim_size,
get_split_size,
validate_non_overlapping_shards_metadata
)
from torch.distributed._shard.metadata import ShardMetadata
import torch.distributed._shard.sharded_tensor.metadata as sharded_tensor_meta
from torch.distributed._shard.op_registry_utils import _decorator_func
if TYPE_CHECKING:
# Only include ShardedTensor when do type checking, exclude it
# from run-time to resolve circular dependency.
from torch.distributed._shard.sharded_tensor import ShardedTensor
class PlacementSpec(ABC):
"""
Base class representing the placement of an entity. Subclasses of this
class can be used to specify customized placements which might not be
covered by existing APIs.
"""
pass
@dataclass
class DevicePlacementSpec(PlacementSpec):
"""
Associates placement of an entity with a single device.
Args:
device(:class:`torch.distributed._remote_device`): The device to place the entity on.
"""
device: torch.distributed._remote_device
def __post_init__(self):
if not isinstance(self.device, torch.distributed._remote_device):
self.device = torch.distributed._remote_device(self.device)
class ShardingSpec(ABC):
"""
Base class representing sharding specifications.
"""
@abstractmethod
def build_metadata(self,
tensor_sizes: torch.Size,
tensor_properties: sharded_tensor_meta.TensorProperties,
) -> sharded_tensor_meta.ShardedTensorMetadata:
"""
Given a global tensor size, define how to shard a tensor like this shape
across ranks, return ShardedTensorMetadata
Args:
tensor_sizes (:class:`torch.Size`):
The tensor shape to shard on, a `torch.Size` object that represents the
tensor shape to be sharded according to the ShardingSpec.
tensor_properties(:class:`torch.distributed._shard.sharded_tensor.TensorProperties):
Tensor properties used to create a ShardedTensor.
Returns:
A :class:`ShardedTensorMetadata` object that encodes the information about
the layout of the ShardedTensor and its properties.
"""
@abstractmethod
def shard(self, tensor: torch.Tensor, src_rank: int = 0, process_group=None) -> "ShardedTensor":
"""
Given a global tensor on src_rank, shard this tensor
across ranks within the process group, return a ShardedTensor.
Args:
tensor (:class:`torch.Tensor`): Tensor needs to be sharded.
Keyword args:
src_rank (int, optional): The source rank which is used as the ground truth of
the data for the parameter that would be sharded and scattered
across the rest of the ranks.
Default: 0.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
Returns:
A :class:`ShardedTensor` sharded from the given tensor.
"""
# Ops customized for a particular ShardingSpec.
_CUSTOM_SHARDING_SPEC_OPS: Dict[str, Dict[Callable, Callable]] = {}
def _has_custom_op(sharding_spec, op):
"""
Returns whether or not the ShardingSpec has a custom op implementation.
"""
class_name = type(sharding_spec).__qualname__
return class_name in _CUSTOM_SHARDING_SPEC_OPS and op in _CUSTOM_SHARDING_SPEC_OPS[class_name]
def _dispatch_custom_op(sharding_spec, op: Callable, types, args, kwargs, process_group):
"""
Calls the custom op for this ShardingSpec if it exists.
"""
class_name = type(sharding_spec).__qualname__
if not _has_custom_op(sharding_spec, op):
raise RuntimeError(f'Custom op: {op} not registered for {class_name}')
func = _CUSTOM_SHARDING_SPEC_OPS[class_name][op]
return func(types, args, kwargs, process_group)
def custom_sharding_spec_op(sharding_spec_class, func):
"""
Decorator to allow custom registration of ops.
Args:
sharding_spec_class(type): The ShardingSpec for which we need to add this custom op.
func(Callable): The op to override (ex: torch.bmm)
"""
class_name = sharding_spec_class.__qualname__
if class_name not in _CUSTOM_SHARDING_SPEC_OPS:
_CUSTOM_SHARDING_SPEC_OPS[class_name] = {}
return functools.partial(
_decorator_func,
op=func,
op_table=_CUSTOM_SHARDING_SPEC_OPS[class_name]
)
@dataclass
class EnumerableShardingSpec(ShardingSpec):
"""
This is a type of PlacementSpec that allows users to specify a generic
sharding scheme by enumerating exactly how each shard is laid out.
Args:
shards(List[ShardMetadata]): List of :class:`ShardMetadata` objects representing
each shard. Note that none of the shards should overlap.
"""
shards: List[ShardMetadata]
def __post_init__(self):
if len(self.shards) == 0:
raise ValueError(f'Empty shard list provided: {self.shards}')
# Validate each shard has same rank.
rank = -1
for shard in self.shards:
if rank != -1 and rank != len(shard.shard_offsets):
raise ValueError(f'Found inconsistent ranks for shards: {rank} and {len(shard.shard_offsets)}')
rank = len(shard.shard_offsets)
validate_non_overlapping_shards_metadata(self.shards)
def build_metadata(self,
tensor_sizes: torch.Size,
tensor_properties: sharded_tensor_meta.TensorProperties,
) -> sharded_tensor_meta.ShardedTensorMetadata:
# check if shards form a valid tensor
check_tensor(self.shards, tensor_sizes)
return sharded_tensor_meta.ShardedTensorMetadata(
self.shards,
tensor_sizes,
tensor_properties
)
def shard(self, tensor: torch.Tensor, src_rank: int = 0, process_group=None) -> "ShardedTensor":
# TODO: figure out a generic and efficient way to scatter the shards for EnumerableShardingSpec
raise NotImplementedError("EnumerableShardingSpec.shard not implemented yet!")
def _infer_sharding_spec_from_shards_metadata(shards_metadata):
"""
Infer the sharding spec from the metadata of each shard of a ShardedTensor.
If the tensor is sharded only on one dimension, we can then verify whether it's
a ChunkShardingSpec or not. The way to verify it is to first get the total length
and perform a chunk sharding with the given placements to see if we can have the
same chunk size as the given shards_metadata. If not, we assume it's enum sharded.
Args:
shards_metadata (List[ShardMetadata]): List of Metadata of local shards.
Returns:
A :class:`torch.distributed._shard.sharding_spec.ShardingSpec` object of sharding
spec for one sharded tensor.
"""
placements = []
chunk_sharding_dim = None
chunk_offset_list = []
shard_size_list = []
# collect local shard metadatas from the global sharded_tensor_metadata
for shard_metadata in shards_metadata: # type: ignore[attr-defined]
placements.append(shard_metadata.placement)
local_offsets = shard_metadata.shard_offsets
chunk_offset_list.append(sum(local_offsets))
shard_size_list.append(shard_metadata.shard_sizes)
shard_dims = [idx for idx, e in enumerate(local_offsets) if e != 0]
# If the offset is [0, 0, ..., 0] (all zeros),
# we cannot decide whether how the tensor is sharded.
if len(shard_dims) == 0:
continue
# If the offset is [0, N, .,0, M, 0, .., 0],
# we are sure it's sharded by more than one dimension.
if len(shard_dims) != 1:
chunk_sharding_dim = None
break
# If the offset is [0, 0, .,0, M, 0, .., 0], aka, it's sharded by just
# one dimension, we need to make sure all ranks share the same dimension.
if not chunk_sharding_dim:
chunk_sharding_dim = shard_dims[0]
elif chunk_sharding_dim != shard_dims[0]:
chunk_sharding_dim = None
break
if chunk_sharding_dim is not None:
# Ensure we infer the correct placement order from offsets
placements = [
x for _, x in sorted(zip(chunk_offset_list, placements), key=lambda e: e[0])
]
from .chunk_sharding_spec import ChunkShardingSpec
chunk_spec = ChunkShardingSpec(
dim=chunk_sharding_dim,
placements=placements,
)
shard_sizes = sorted([x[chunk_sharding_dim] for x in shard_size_list])
shard_total_length = sum(shard_sizes)
chunks = len(placements)
split_size = get_split_size(shard_total_length, chunks)
chunk_shard_sizes = sorted(
[
get_chunked_dim_size(shard_total_length, split_size, idx)
for idx in range(len(placements))
]
)
if shard_sizes == chunk_shard_sizes:
return chunk_spec
return EnumerableShardingSpec(shards_metadata)
| pytorch-master | torch/distributed/_shard/sharding_spec/api.py |
from dataclasses import dataclass
import torch
import torch.distributed._shard.sharded_tensor.metadata as sharded_tensor_meta
from torch.distributed._shard.metadata import ShardMetadata
from torch.distributed._shard.sharded_tensor.shard import Shard
from torch.distributed._shard.sharded_tensor.utils import (
_parse_and_validate_remote_device
)
from torch.distributed._shard._utils import narrow_tensor
import torch.distributed as dist
import torch.distributed.distributed_c10d as distributed_c10d
from typing import List, Union, TYPE_CHECKING
from ._internals import (
get_chunked_dim_size,
get_split_size,
)
from .api import ShardingSpec
if TYPE_CHECKING:
# Only include ShardedTensor when do type checking, exclude it
# from run-time to resolve circular dependency.
from torch.distributed._shard.sharded_tensor import ShardedTensor
@dataclass
class ChunkShardingSpec(ShardingSpec):
"""
This is a type of PlacementSpec that defines the placement as being sharded
across multiple devices. In particular, it represents sharding a Tensor
along a single dimension into equal chunks (similar to :meth:`torch.chunk`).
The semantics of how a tensor is partitioned is inline with
:meth:`torch.chunk`, where ``dim`` in torch.chunk corresponds to the
specified ``dim`` and ``chunks`` in torch.chunk is the number of elements
in the placement specified.
Args:
dim (int or str):
The dimension to shard on, could be an integer representing the
dimension or a string in case of named tensors where dimensions are
named. Note that named tensor support is not added yet.
placement(List[Union[_remote_device, str]]):
Specifies the placement of each shard of the Tensor. The size of
the list represents the number of shards to be created. This could
be a list of
:class:`torch.distributed._remote_device`'s. This list
could also contain a string which represents remote
device as accepted by
:class:`torch.distributed._remote_device`
"""
ShardingDim = Union[int, str]
dim: ShardingDim
placements: List[Union[torch.distributed._remote_device, str]]
def __post_init__(self):
self._verify_dim(self.dim)
for i, remote_device in enumerate(self.placements):
if not isinstance(remote_device, torch.distributed._remote_device):
self.placements[i] = torch.distributed._remote_device(remote_device)
@staticmethod
def _verify_dim(dim):
# Validate the sharding spec.
# TODO: support named dimension
if isinstance(dim, str):
raise NotImplementedError(
"ChunkShardingSpec does not support named dimension yet!"
)
if not isinstance(dim, int):
raise ValueError(
f"Sharding dim needs to be an integer, found: {dim}"
)
def build_metadata(self,
tensor_sizes: torch.Size,
tensor_properties: sharded_tensor_meta.TensorProperties,
) -> sharded_tensor_meta.ShardedTensorMetadata:
tensor_num_dim = len(tensor_sizes)
self._verify_dim(self.dim)
if self.dim >= tensor_num_dim or self.dim < -tensor_num_dim: # type: ignore[operator]
raise ValueError(f"Invalid sharding dim: {self.dim}")
shards_metadata = []
sharding_dim_size = tensor_sizes[self.dim] # type: ignore[index]
chunks = len(self.placements)
split_size = get_split_size(sharding_dim_size, chunks)
for idx, placement in enumerate(self.placements):
# generate ShardMetadata for each placement device
chunked_dim_size = get_chunked_dim_size(sharding_dim_size, split_size, idx)
if chunked_dim_size > 0:
shard_size = list(tensor_sizes)
current_offsets = [0] * tensor_num_dim
current_offsets[self.dim] = split_size * idx # type: ignore[index]
shard_size[self.dim] = chunked_dim_size # type: ignore[index]
shard_metadata = ShardMetadata(
shard_offsets=current_offsets,
shard_sizes=shard_size,
placement=placement,
)
shards_metadata.append(shard_metadata)
# current_offsets[self.dim] += chunked_dim_size # type: ignore[index]
return sharded_tensor_meta.ShardedTensorMetadata(
shards_metadata,
tensor_sizes,
tensor_properties
)
def shard(self, tensor: torch.Tensor, src_rank: int = 0, process_group=None) -> "ShardedTensor":
# relative imports to avoid circular dependency
from torch.distributed._shard.sharded_tensor import (
ShardedTensor
)
tensor_properties = sharded_tensor_meta.TensorProperties(
dtype=tensor.dtype,
layout=tensor.layout,
requires_grad=tensor.requires_grad,
memory_format=torch.contiguous_format,
pin_memory=tensor.is_pinned()
)
current_rank = dist.get_rank(process_group)
tensor_meta = self.build_metadata(tensor.size(), tensor_properties)
local_shards = []
local_tensor = None
local_metadata = None
tensors_to_scatter = [None] * dist.get_world_size(process_group)
sharding_dim_size = tensor.size()[self.dim] # type: ignore[index]
chunks = len(self.placements)
split_size = get_split_size(sharding_dim_size, chunks)
scatter_shape = list(tensor.size())
scatter_shape[self.dim] = split_size # type: ignore[index]
for shard_meta in tensor_meta.shards_metadata:
rank, device = _parse_and_validate_remote_device(process_group, shard_meta.placement)
if current_rank == src_rank:
# Reshape to get shard for this rank and we don't want autograd
# recording here for the narrow op and 'local_shard' should be a
# leaf variable in the autograd graph.
narrowed_tensor = narrow_tensor(tensor, shard_meta)
if shard_meta.shard_sizes[self.dim] < split_size: # type: ignore[index]
# for the last shard that might be smaller to other shards
# resize the narrowed tensor to the same size and use it for
# the scatter collective as dist.scatter requires same size
# inputs on every rank
tensor_to_scatter = narrowed_tensor.detach().clone().resize_(scatter_shape)
else:
tensor_to_scatter = narrowed_tensor.detach().clone().contiguous()
tensors_to_scatter[rank] = tensor_to_scatter
if current_rank == rank:
local_tensor = torch.empty(
scatter_shape, dtype=tensor.dtype, layout=tensor.layout, device=device)
local_metadata = shard_meta
# each rank should have local_tensor and local_metadata initialized if we build
# the metadata list in a correct way.
assert local_tensor is not None
assert local_metadata is not None
# Scatter the shards to all ranks in the pg
# scatter takes the global rank as ``src``
src_for_scatter = src_rank
if process_group is not None and process_group is not distributed_c10d._get_default_group():
src_for_scatter = distributed_c10d._get_global_rank(process_group, src_for_scatter)
dist.scatter(
local_tensor,
scatter_list=tensors_to_scatter if current_rank == src_rank else None,
src=src_for_scatter,
group=process_group
)
if list(local_tensor.size()) != local_metadata.shard_sizes:
# detach again after receiving to ensure local shards remain a leaf node
local_tensor = local_tensor.resize_(local_metadata.shard_sizes).detach()
# Sync requires_grad to local_shard.
local_tensor.requires_grad = tensor.requires_grad
local_shards.append(Shard(tensor=local_tensor, metadata=local_metadata))
st = ShardedTensor._init_from_local_shards_and_global_metadata(
local_shards,
tensor_meta,
process_group=process_group)
# Manually set sharding_spec
st._sharding_spec = self
return st
| pytorch-master | torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py |
# coding=utf-8
import torch
import torch.distributed as dist
from ._common import (
_communicate_size_to_each_rank,
_handle_col_wise_sharding_base,
_handle_row_wise_lookup_distribute,
_handle_max_norm_col_wise,
)
from torch.distributed._shard.sharding_spec import ChunkShardingSpec
from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op
from torch.distributed._shard.sharded_tensor import (
ShardedTensor
)
@custom_sharding_spec_op(ChunkShardingSpec, torch.nn.functional.embedding)
def sharded_embedding(types, args, kwargs, pg):
"""
Handles ``__torch_function__`` dispatch for ``torch.nn.functional.embedding``.
This method computes a sharded embedding lookup and has the following limitations:
1. Supports only sharding of ``weight``.
2. Supports only ``ChunkShardingSpec``.
3. Supports only a single local shard per rank.
4. Supports all specs except for scale_grad_by_freq, sparse, etc.
Based on the dimension that the weight is sharded on, there are two
algorithms:
ROWWISE SHARDING
================
For row-wise sharding the weight is sharded on dimension 0.
The overall algorithm can be best explained with an example. Let's assume
the dims for input are (4 x 6) and W are (10 x 17) and W is sharded across
4 GPUs creating 3 shard of (3 x 17) and 1 shard of (1 x 17).
The algorithm is as follows:
1. First the input is flattened to 1D and gets sorted so that we can distribute
them to the corresponding rank. For example if the given input is
tensor([[6, 5, 2, 9, 6, 3],
[3, 1, 2, 4, 7, 6],
[4, 0, 4, 9, 8, 9],
[8, 6, 6, 4, 6, 1]])
Then we have the 1D array like:
tensor([6, 5, 2, 9, 6, 3, 3, 1, 2, 4, 7, 6, 4, 0, 4, 9, 8, 9, 8, 6, 6, 4, 6, 1])
And sort it:
tensor([0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 7, 8, 8, 9, 9, 9])
We also record the indices so that we can recover back.
2. Next we perform the split by search the index of the chunking
boundary. So the above array will be split into 4 parts:
tensor([[0, 1, 1, 2, 2], [3, 3, 4, 4, 4, 4, 5],
[6, 6, 6, 6, 6, 6, 7, 8, 8], [9, 9, 9])
Rearrangement may be needed if the rank order is different from
its index in the placement.
3. Next, we communicate the length of each part to each rank via all2all
so that each rank now knows what input it will get from all other ranks.
4. Before we send out the array to other ranks, we need to do the modular operation
so that each rank do use that for embedding lookup.
The above tensor will look like the below after performing the moduler of 3:
tensor([[0, 1, 1, 2, 2], [0, 0, 1, 1, 1, 1, 2],
[0, 0, 0, 0, 0, 0, 1, 2, 2], [0, 0, 0])
5. Now, each rank receives a matrix (size may vary) and do the lookup. We then use
all2all to send the result back to each rank.
6. We use the recorded indices to recover the sorted positions and reshape the
matrix to (4 x 6 x 17), which is what we need.
COLWISE SHARDING
================
For col-wise sharding the weight is sharded on dimension 1.
The overall algorithm can be best explained with an example. Let's assume
the dims for input are (4 x 6) and W are (16 x 17) and W is sharded across
4 GPUs creating 3 shards of (16 x 5) and 1 shard of (16 x 2).
The algorithm is as follows:
1. First the input is broadcasted to all ranks, since this is SPMD we
actually do an all_gather for all the inputs resulting in 4 (4 x 6)
inputs on each rank.
2. Next we perform local embedding lookup operation by apply each
input (4 x 6) with the local shard (16 x 5) ((16 x 2) for the last).
This results in 4 (5 x 6 x 4) ((2 x 6 x 4) for the last) matrices
on each rank. We transpose dim 0 and dim 2.
3. Next, we concat these 4 matrices and perform an all2all to share the
appropriate (5 x 6 x 4) or (2 x 6 x 4) matrices to each rank.
4. Now, each rank receives a (17 x 6 x 4) matrix which is basically the
size of the result we need.
5. If placements are not in order any appropriate rearrangement of columns
are done for the (17 x 6 x 4) matrix and finally we transpose the
dim 0 and dim 2 again.
6. If max_norm is specified, we manually sum up the norm and renorm. Because
the renorm must be in place, we need to override the local_shard to mimic
this behavior.
"""
# Validate input params
_validate_embedding_param(args, kwargs)
input = args[0]
weight = args[1]
max_norm = kwargs.get("max_norm")
norm_type = kwargs.get("norm_type")
padding_idx = kwargs.get("padding_idx")
local_shard = weight.local_tensor().contiguous()
sharding_dim = weight._sharding_spec.dim
world_size = dist.get_world_size(pg)
rank = dist.get_rank(pg)
if sharding_dim == 1:
output, local_shard = _handle_col_wise_sharding(
input, world_size, weight, local_shard, max_norm, norm_type, padding_idx, pg
)
weight.local_shards()[0].tensor = local_shard
return output
elif sharding_dim == 0:
return _handle_row_wise_sharding(
input,
world_size,
weight,
local_shard,
max_norm,
norm_type,
padding_idx,
rank,
pg,
)
else:
raise RuntimeError(
f"nn.Embedding weight sharded on dim {sharding_dim} not supported!"
)
def _validate_embedding_param(args, kwargs):
"""
Validate input params of sharded embedding op.
Args:
input: list of ID used for lookup.
weight: shareded weight tensor.
kwargs: same as normal Embedding.
Return: None.
"""
input = args[0]
weight = args[1]
max_norm = kwargs.get("max_norm")
norm_type = kwargs.get("norm_type")
scale_grad_by_freq = kwargs.get("scale_grad_by_freq")
sparse = kwargs.get("sparse")
padding_idx = kwargs.get("padding_idx")
# Validate types
if not isinstance(input, torch.Tensor):
raise TypeError("input need to be torch.Tensor")
if not isinstance(weight, ShardedTensor):
raise TypeError("weight needs to be ShardedTensor")
weight_size = weight.size()
if len(weight_size) != 2:
raise ValueError("Weight needs to have exactly 2 dims")
if int(torch.min(input).item()) < 0:
raise ValueError(
"Index out of range in Input %d %d",
int(torch.min(input).item()),
weight_size[1],
)
if int(torch.max(input).item()) >= weight_size[0]:
raise ValueError(
"Index out of range in Input %d %d",
int(torch.max(input).item()),
weight_size[1],
)
if scale_grad_by_freq:
raise RuntimeError(
'nn.Embedding weight sharded with flag on "scale_grad_by_freq" not supported!'
)
if sparse:
raise RuntimeError(
'nn.Embedding weight sharded with flag on "sparse" not supported!'
)
if max_norm and max_norm <= 0.0:
raise ValueError('"max_norm" must be larger than zero!')
if not isinstance(weight._sharding_spec, ChunkShardingSpec):
raise ValueError("Only ChunkShardingSpec supported for ShardedTensor ops!")
if len(weight.local_shards()) != 1:
raise ValueError("Only one local shard supported!")
def _handle_col_wise_sharding(
input, world_size, weight, local_shard, max_norm, norm_type, padding_idx, pg
):
"""
Entry-point function to handle the logic of col-wise sharding of weight
for embedding. (Detailed explanations of the logic can be found in
the comment for sharded_embedding.)
Args:
input: list of ID used for lookup and aggregation.
world_size: number of ranks.
weight: shareded weight tensor.
local_shard: col-wise shared local weight used for lookup.
max_norm: If given, each embedding vector with norm larger
than max_norm is renormalized to have norm max_norm.
Note: this will modify weight in-place.
norm_type: The p in the p-norm to compute for the max_norm option.
padding_idx: If specified, the entries at padding_idx do
not contribute to the gradient; therefore, the embedding
vector at padding_idx is not updated during training,
i.e. it remains as a fixed “pad”.
pg: process group.
Returns: final result of lookup.
"""
gathered_inputs = None
if max_norm is not None:
# max_norm changes the weight in-place
local_shard, gathered_inputs = _handle_max_norm_col_wise(
max_norm, norm_type, local_shard, input, world_size, pg
)
output = _handle_col_wise_sharding_base(
torch.nn.functional.embedding,
len(input.size()),
input,
world_size,
weight,
local_shard,
pg,
padding_idx=padding_idx,
gathered_inputs=gathered_inputs,
)
return (output, local_shard)
def _handle_row_wise_sharding(
input, world_size, weight, local_shard, max_norm, norm_type, padding_idx, rank, pg
):
"""
Entry-point function to handle the logic of row-wise sharding of weight
for embedding. (Detailed explanations of the logic can be found in
the comment for sharded_embedding.)
Args:
input: list of ID used for lookup and aggregation.
world_size: number of ranks.
weight: shareded weight tensor.
local_shard: row-wise shared local weight used for lookup.
max_norm: If given, each embedding vector with norm larger
than max_norm is renormalized to have norm max_norm.
Note: this will modify weight in-place.
norm_type: The p in the p-norm to compute for the max_norm option.
padding_idx: If specified, the entries at padding_idx do
not contribute to the gradient; therefore, the embedding
vector at padding_idx is not updated during training,
i.e. it remains as a fixed “pad”.
rank: # of cuda process.
pg: process group.
Returns: final result of lookup.
"""
# flatten the ids across all input and sort
input_size = input.size()
input_1d = torch.reshape(input, (-1,)).contiguous()
input_sorted, indices_1d = torch.sort(input_1d)
rearrange_indices_1d = torch.argsort(indices_1d)
input_sorted.contiguous()
(
input_sorted,
input_split_sizes,
sharded_dim_size_max,
_,
rearrange_indices_1d_second_order,
padding_idx,
) = _handle_row_wise_lookup_distribute(
input_sorted, input, world_size, weight, rank, padding_idx
)
# Get the input split size to be sent from each rank to the current rank.
# We can then infer the output split size.
output_split_sizes = _communicate_size_to_each_rank(
input_split_sizes, world_size, input, pg
)
# Input sent from each rank to the current rank may have different sizes.
gathered_input = torch.empty(
sum(output_split_sizes), dtype=torch.int64, device=input.device
)
# Perform the modular operation of the 1D tensor to be sent to each rank.
input_sorted = torch.remainder(input_sorted, sharded_dim_size_max)
# Perform alltoall
dist.all_to_all_single(
gathered_input,
input_sorted,
input_split_sizes=input_split_sizes,
output_split_sizes=output_split_sizes,
group=pg,
)
# If input is None, passing in max_norm causes
# errors in CUDA.
if max_norm is not None and gathered_input.size(0) == 0:
max_norm = None
# Perform local embedding look up.
gathered_input_embeddings = torch.nn.functional.embedding(
gathered_input,
local_shard,
padding_idx=padding_idx,
max_norm=max_norm,
norm_type=norm_type,
)
# Gather all lookup result appropriately by performing alltoall again
gathered_output = torch.empty(
input_sorted.size(0), weight.size(1), device=input.device
)
dist.all_to_all_single(
gathered_output,
gathered_input_embeddings,
input_split_sizes=output_split_sizes,
output_split_sizes=input_split_sizes,
group=pg,
)
# Rearrange the results to its original shape.
if rearrange_indices_1d_second_order is not None:
gathered_output = gathered_output[rearrange_indices_1d_second_order]
gathered_output = gathered_output[rearrange_indices_1d]
# Return the appropriate local result.
return torch.reshape(gathered_output, (*input_size, weight.size(1)))
| pytorch-master | torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding.py |
from typing import List
import torch
import torch.distributed as dist
from torch.autograd import Function
from torch.distributed.nn.functional import (
_all_gather_base,
all_to_all_single,
)
from torch.distributed._shard.partial_tensor import _PartialTensor
from torch.distributed._shard.sharded_tensor import (
ShardedTensor,
)
from torch.distributed._shard.sharding_spec import ChunkShardingSpec
from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op
from torch.distributed._shard.sharding_spec._internals import (
get_split_size,
get_chunked_dim_size,
get_chunk_sharding_params,
)
from ._common import (
_result_distribute_with_col_rearrange,
)
@custom_sharding_spec_op(ChunkShardingSpec, torch.nn.functional.linear)
def sharded_linear(types, args, kwargs, pg):
"""
Handles ``__torch_function__`` dispatch for ``torch.nn.functional.linear``.
This method computes a sharded linear and has the following limitations:
1. Supports only sharding of ``weight``.
2. Supports only ``ChunkShardingSpec``.
3. Supports only a single local shard per rank.
4. Tailored for Megatron-LM style model(tensor) parallelism. Further API
calls are needed if a fully synced local tensor is needed.
Megatron-LM paper link: https://arxiv.org/abs/1909.08053
Based on the dimension that the weight is sharded on, there are two
algorithms:
ROWWISE SHARDING
================
For row-wise sharding the weight is sharded on dimension 1, but this is
row-wise since the actual computation for the linear layer involves
transposing the weight: :math:`y = xA^T + b`
The overall algorithm can be best explained with an example. Let's assume
the dims for x are (13 x 16) and A are (17 x 16) and A is sharded across
4 GPUs creating shards of (17 x 4). The algorithm is as follows:
1. First the input is split on the column dimension to create shards of
(13 x 4) and communicated to all other ranks. Since we are running in
an SPMD mode with each rank having distinct input, this is done via
an all2all run on all ranks.
2. Now each (13 x 4) shard on each GPU is multiplied with the local shard
(4 x 17) (transposed) resulting in a (13 x 17) matrix which is the same
size that we need for the global result which would be (13 x 16)
multiplied by (16 x 17). But the final result needs to be aggregated
across the rest of the ranks.
3. Here we just return the partial result here. One can call API
aggregate_partial_tensor_list to get the aggregated final result.
The API uses a reduce_scatter operation ensuring each rank
aggregates its own result. This is essentially a sum operation across
all the (13 x 17) local computations we did for each rank.
4. For partial result, we only add 1 / n of the bias term to the partial
result. n is # of all GPUs.
COLWISE SHARDING
================
For col-wise sharding the weight is sharded on dimension 0, but this is
col-wise since the actual computation for the linear layer involves
transposing the weight: :math:`y = xA^T + b`
The overall algorithm can be best explained with an example. Let's assume
the dims for x are (13 x 17) and A are (16 x 17) and A is sharded across
4 GPUs creating shards of (4 x 17). The algorithm is as follows:
1. First the input is broadcasted to all ranks, since this is SPMD we
actually do an all_gather for all the inputs resulting in 4 (13 x 17)
inputs on each rank.
2. Next we perform local matmuls by multiplying each input (13 x 17)
with the local shard (17 x 4) (transposed). This results in 4 (13 x 4)
matrices on each rank.
3. Next, we stack them into a (4 x 13 x 4) tensor and build a sharded
tensor across 4 ranks.
4. To merge them into a fully-sync local tensor, one can call API
merge_sharded_local_results.
This API concat these 4 matrices and perform an all2all to share the
appropriate (13 x 4) matrices to each rank. Specifically, each rank
receives a (13 x 16) matrix which is basically the size of the result.
5. If placements are not in order any appropriate rearrangement of rows
are done for the (13 x 16) matrix and finally the bias term is added.
"""
# Validate input params
_validate_linear_op_param(args, kwargs)
input = args[0]
weight = args[1]
bias = args[2]
local_shard = weight.local_tensor()
local_shard_t = local_shard.t()
sharding_dim = weight._sharding_spec.dim
world_size = dist.get_world_size(pg)
rank = dist.get_rank(pg)
if sharding_dim == 1 and isinstance(input, ShardedTensor):
return _handle_row_wise_sharding_sharded_tensor(
input, world_size, weight, local_shard_t, bias, pg
)
elif sharding_dim == 1 and isinstance(input, torch.Tensor):
return _handle_row_wise_sharding_tensor(
input, world_size, weight, rank, local_shard_t, bias, pg
)
elif sharding_dim == 0:
return _handle_col_wise_sharding(
input, world_size, weight, rank, local_shard_t, bias, pg
)
else:
raise RuntimeError(
f"nn.Linear weight sharded on dim {sharding_dim} not supported!"
)
def _validate_linear_op_param(args, kwargs):
"""
Validate input params of sharded linear op.
Args:
input: input of the linear layer.
weight: shareded weight tensor.
kwargs: same as normal Linear.
Return: None.
"""
input = args[0]
weight = args[1]
bias = args[2]
# Validate types
if not isinstance(input, torch.Tensor) and not isinstance(input, ShardedTensor):
raise TypeError("input needs to be either torch.Tensor or ShardedTensor")
if type(bias) != torch.Tensor and type(bias) != torch.nn.Parameter:
raise TypeError("bias needs to be torch.Tensor")
if not isinstance(weight, ShardedTensor):
raise TypeError("weight needs to be ShardedTensor")
if len(input.size()) < 1: # type: ignore[arg-type]
raise ValueError("Input needs to have at least 1 dim")
weight_size = weight.size()
if len(weight_size) != 2:
raise ValueError("Weight needs to have exactly 2 dims")
if len(bias.size()) != 1:
raise ValueError("Bias needs to have exactly 1 dim")
if input.size()[-1] != weight_size[1]: # type: ignore[index]
raise ValueError(
f"Input dim: {input.size()[-1]} does not match " # type: ignore[index]
f"appropriate weight dim: {weight_size[1]}"
)
if not isinstance(weight._sharding_spec, ChunkShardingSpec):
raise ValueError("Only ChunkShardingSpec supported for ShardedTensor ops!")
if len(weight.local_shards()) != 1:
raise ValueError("Only one local shard supported!")
def _handle_col_wise_sharding(input, world_size, weight, rank, local_shard_t, bias, pg):
"""
Entry-point function to handle the logic of col-wise sharding of weight
for Linear. (Detailed explanations of the logic can be found in the
comment for sharded_linear.)
When the local tensor only has one dimension, we increase one more dimension
for reshard. We need to do squeeze manually to reduce the dimension later-on.
For example, if we have:
input: size[15]
weight: size[15, 16]
world_size: 4
In each rank, we will have 4 * [4] tensors. We then stack them into a [4, 4]
tensor and generate a sharded tenor sharded by dim 1.
For the rest situations, we just simply concatenate local tensors. No more actions
are needed afterward.
Args:
input: matrix to be multiplied with the sharded weight.
world_size: number of ranks.
weight: shareded weight tensor.
rank: # of cuda process.
local_shard_t: row-wise shared local weight used for lookup.
bias: bias term of linear op.
pg: process group.
Returns:
A :class:`ShardedTensor` object which filled with local intermediate results.
"""
# allgather the inputs first.
out_size = list(input.size())
out_size[0] = input.size(0) * dist.get_world_size(pg)
output = torch.empty(out_size, device=input.device, dtype=input.dtype)
output = _all_gather_base(output, input, group=pg)
# Adjust bias and perform local matmul.
(start_pos, chunk_size) = get_chunk_sharding_params(
bias.size(0), world_size, weight._sharding_spec, rank
)
local_bias = _BiasTensorNarrow.apply(
world_size, start_pos, chunk_size, weight, pg, bias
)
if output.dim() == 1:
output = output.view(dist.get_world_size(pg), -1)
if output.dim() <= 2:
# Use fused version if possible.
result = torch.addmm(local_bias, output, local_shard_t)
else:
result = output.matmul(local_shard_t) + local_bias
# Build ShardedTensor as result.
st_size = list(result.size())
st_size[-1] = weight.size(0)
new_sharding_spec = ChunkShardingSpec(
dim=-1,
placements=weight.sharding_spec().placements
)
return ShardedTensor._init_from_local_tensor(
result,
new_sharding_spec,
*st_size, # type: ignore[arg-type]
process_group=pg,
)
def _handle_row_wise_sharding_tensor(
input, world_size, weight, rank, local_shard_t, bias, pg
):
"""
Entry-point function to handle the logic of row-wise sharding of weight
for Linear. (Detailed explanations of the logic can be found in the
comment for sharded_linear.)
Args:
input: matrix to be multiplied with the sharded weight.
world_size: number of ranks.
weight: shareded weight tensor.
rank: # of cuda process.
local_shard_t: row-wise shared local weight used for lookup.
bias: bias term of linear op.
pg: process group.
Returns:
A :class:`_PartialTensor` object which stores the partial local result.
"""
# alltoall to gather all the appropriate inputs.
input_t = input.transpose(0, -1).contiguous()
input_t_size = input_t.size()
# Compute expected size
split_size = get_split_size(input_t_size[0], world_size)
input_split_sizes = [0] * world_size
rearrange_rows = False
for idx, placement in enumerate(weight._sharding_spec.placements):
sharded_dim_size = get_chunked_dim_size(input_t_size[0], split_size, idx)
input_split_sizes[placement.rank()] = sharded_dim_size
if placement.rank() != idx:
rearrange_rows = True
if rearrange_rows:
# Need to re-arrange rows of input_t for all2all.
indices: List[List[int]] = [[0]] * world_size
# When we do the chunk split, we always ensure the first N - 1 chunks get max out
# and then the Nth chunk gets the rest. So input_split_sizes like [3, 3, 3, 4]
# are not possible. The expected split size will be [4, 4, 4, 1].
sharded_dim_size_max = max(input_split_sizes)
for idx, placement in enumerate(weight._sharding_spec.placements):
split_size = input_split_sizes[placement.rank()]
offset_start_idx = idx * sharded_dim_size_max
indices[placement.rank()] = list(
range(offset_start_idx, offset_start_idx + split_size)
)
indices_flatten = list(idx for indice in indices for idx in indice)
input_t = input_t.index_select(
0, torch.tensor(indices_flatten, device=input_t.device)
)
gathered_input_size = [input_split_sizes[rank] * world_size] + list(
input_t_size[1:]
)
gathered_input = torch.empty(gathered_input_size, device=input_t.device, dtype=input_t.dtype)
# Perform autograd enabled alltoall
all_to_all_single(
gathered_input, input_t, input_split_sizes=input_split_sizes, group=pg
)
# Reshape gathered_input appropriately for matmul
shard_size = local_shard_t.size()[0]
reshaped_inputs = [
torch.narrow(gathered_input, 0, r * shard_size, shard_size).transpose(0, -1)
for r in range(world_size)
]
reshaped_input = torch.cat(reshaped_inputs)
if reshaped_input.dim() == 1:
reshaped_input = reshaped_input.view(-1, local_shard_t.size(0))
# Perform appropriate local matmul
if reshaped_input.dim() <= 2:
result = torch.addmm(_BiasTensorPartial.apply(world_size, bias), reshaped_input, local_shard_t)
else:
result = reshaped_input.matmul(local_shard_t) + _BiasTensorPartial.apply(world_size, bias)
# Return the partial local result.
return _PartialTensor(result, pg)
def _handle_row_wise_sharding_sharded_tensor(
input, world_size, weight, local_shard_t, bias, pg
):
"""
Entry-point function to handle the logic of row-wise sharding of weight
for Linear when the input is a sharded tensor. (Detailed explanations
of the logic can be found in the comment for sharded_linear.)
Args:
input: matrix to be multiplied with the sharded weight.
world_size: number of ranks.
weight: shareded weight tensor.
local_shard_t: row-wise shared local weight used for lookup.
bias: bias term of linear op.
pg: process group.
Returns:
A :class:`_PartialTensor` object which stores the partial local result.
"""
local_input = input.local_tensor()
if input.sharding_spec().dim not in (-1, len(input.size()) - 1):
raise NotImplementedError(
"The case when the input does not come from col-wise sharded "
"linear is not supported for row-wise sharded linear."
)
# Use fused version if possible.
if local_input.dim() <= 2:
result = torch.addmm(_BiasTensorPartial.apply(world_size, bias), local_input, local_shard_t)
else:
result = local_input.matmul(local_shard_t) + _BiasTensorPartial.apply(world_size, bias)
# Return the partial local result.
return _PartialTensor(result, pg)
class _BiasTensorNarrow(Function):
"""
Since we now return the intermediate results in a col-wise sharding. We
need to narrow the bias term in the forward while doing backward, we need
to gather all gradients of narrowed bias across all ranks.
"""
@staticmethod
def forward(ctx, world_size, start_pos, chunk_size, weight, pg, bias):
ctx.weight = weight
ctx.pg = pg
ctx.world_size = world_size
return torch.narrow(bias, 0, start_pos, chunk_size)
@staticmethod
def backward(ctx, grad_output):
results = [grad_output.clone()] * ctx.world_size
return (None, None, None, None, None) + (
_result_distribute_with_col_rearrange(
results, grad_output, ctx.world_size, ctx.weight, ctx.pg
),
)
class _BiasTensorPartial(Function):
"""
Since we now only return partial results in a row-wise sharding. We need to
divide the bias term by the world size in the forward while doing backward,
we need to skip this division op.
"""
@staticmethod
def forward(ctx, world_size, bias):
ctx.world_size = world_size
return torch.div(bias, world_size)
@staticmethod
def backward(ctx, grad_output):
return (None, grad_output)
| pytorch-master | torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/linear.py |
# coding=utf-8
from typing import List
import torch
import torch.distributed as dist
from torch.distributed._shard.sharding_spec import ChunkShardingSpec
from torch.distributed._shard.sharded_tensor._ops._common import _sharded_op_common
from torch.distributed._shard.sharded_tensor import (
ShardedTensor,
)
from torch.distributed._shard.sharding_spec._internals import (
get_split_size,
get_chunked_dim_size,
)
from torch.distributed.nn.functional import (
all_gather,
all_to_all_single,
)
from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op
def _chunk_sharding_spec_check(spec, op):
"""
For the given op implementation check if the sharding spec is ChunkShardingSpec.
"""
if not isinstance(spec, ChunkShardingSpec):
raise NotImplementedError(
f"Only ChunkShardingSpec supported for '{op.__name__}'."
)
def _register_sharded_op_on_local_tensor(
op, early_stop_func=None, extra_check=None, customized_func=None
):
"""
Handles ``__torch_function__`` dispatch for ops which are performed on
the single local tensor of the sharded tensor such as op like
``torch.nn.functional.softmax`` or ``torch.Tensor.view``.
For more complicated ops, a customized func can be used to generate
the new local tensor, sharding spec and sharded tensor size.
Args:
op: The op to be registered and applied to all shards of the st.
early_stop_func (Callable, optional): the func for early stop.
Default: if ``None``, no early stop.
extra_check (Callable, optional): the func for extra condition check.
Default: if ``None``, no extra check.
customized_func (Callable, optional): the func for customized logic
to generate the new local tensor, sharding spec and sharded tensor size.
Default: if ``None``, we simply lower to the real op call with
the single local tensor of the st.
Return:
func (Callable): registered implementation for sharded op for
``__torch_function__`` dispatch.
"""
@custom_sharding_spec_op(ChunkShardingSpec, op)
@_sharded_op_common(op, early_stop_func, extra_check)
def sharded_tensor_op_on_local_tensor(types, args=(), kwargs=None, pg=None):
st = args[0]
sharding_spec = st.sharding_spec()
if len(st.local_shards()) != 1:
raise TypeError(
f"torch function '{op.__name__}', with args: {args} and "
f"kwargs: {kwargs} only supported for single local tensor!"
)
st_size = st.size()
if customized_func:
local_tensor, sharding_spec, st_size = customized_func(args, kwargs, pg)
else:
args = (st.local_tensor(), *args[1:])
local_tensor = op(*args, **kwargs)
return ShardedTensor._init_from_local_tensor(
local_tensor.contiguous(),
sharding_spec,
st_size, # type: ignore[arg-type]
process_group=pg,
init_rrefs=st._init_rrefs,
)
def _handle_col_wise_sharding_base(
op_func,
col_dim,
input,
world_size,
weight,
local_shard,
pg,
gathered_inputs=None,
mode=None,
gathered_per_sample_weights=None,
gathered_offsets=None,
padding_idx=None,
):
"""
For col-wise sharding of weight, lots of logic are common.
So we extract the common logic and put in this function:
Step 1. To get input from each rank and
Step 2. To perform the op on the concatenated tensor.
Step 3. To distribute results to each rank with col rearrangement.
Step 4. To concatenate all results from all ranks.
Args:
op_func: operator which is applied to the input tensor.
col_dim: dim of result tensor after the operation.
input: tensor to be applied op on.
world_size: number of ranks.
weight: shareded weight tensor.
local_shard: col-wise sharded weight tensor.
pg: process group.
gathered_inputs: list of inputs from all ranks. If specified, we
don't need to communicate with each rank any more.
mode: aggregation mode of EmbeddingBag.
gathered_per_sample_weights: per_sample_weights across all ranks.
gathered_offsets: offsets across all ranks.
padding_idx: If specified, the entries at padding_idx do
not contribute to the gradient; therefore, the embedding
vector at padding_idx is not updated during training,
i.e. it remains as a fixed “pad”.
Note that the embedding vector at padding_idx is
excluded from the reduction.
Return: final result of input being applied with the op.
"""
if gathered_inputs is None:
# allgather the inputs first.
gathered_inputs = all_gather(input, group=pg)
# run the operator's function for all the inputs.
results = []
for i, inp in enumerate(gathered_inputs):
if op_func == torch.nn.functional.embedding_bag:
result = op_func(
inp,
local_shard,
offsets=gathered_offsets[i] if gathered_offsets is not None else None,
mode=mode,
per_sample_weights=gathered_per_sample_weights[i]
if gathered_per_sample_weights is not None
else None,
padding_idx=padding_idx,
)
elif op_func == torch.nn.functional.embedding:
result = op_func(
inp,
local_shard,
padding_idx=padding_idx,
)
else:
result = op_func(inp, local_shard)
results.append(torch.transpose(result, 0, col_dim))
# Distribute results to each rank with col rearrangement.
output = _result_distribute_with_col_rearrange(
results, input, world_size, weight, pg
)
# transpose the output and return result.
return torch.transpose(output, 0, col_dim)
def _result_distribute_with_col_rearrange(
results, input, world_size, weight, pg
):
"""
For col-wise sharding of weight, we need to distribute
results to each rank. We do them in this function.
Note that, if the index in the Sharding Spec is not equal to
the rank number, we need to do the rearrangement based on the
order given by the Sharding Spec (placement).
Args:
results: results from ops applied to inputs from all ranks.
We need to distribute them back to their original ranks.
input: tensor to be applied op to.
world_size: number of ranks.
weight: shareded weight tensor.
pg: process group.
Return: column rearranged result.
"""
# Process results and outputs for all2all.
sharding_dim = weight._sharding_spec.dim
sharding_dim_size = weight.size(sharding_dim)
dims = list(results[0].size())
dims[0] = sharding_dim_size
combined_results = torch.cat(results)
output = torch.empty(*dims, device=combined_results.device, dtype=combined_results.dtype)
# Compute output splits
split_size = get_split_size(sharding_dim_size, world_size)
output_split_sizes = [0] * world_size
for idx, placement in enumerate(weight._sharding_spec.placements):
output_split_sizes[placement.rank()] = get_chunked_dim_size(
sharding_dim_size, split_size, idx
)
# distribute the outputs using all2all.
output = all_to_all_single(
output, combined_results, output_split_sizes=output_split_sizes, group=pg
)
# Check if we need to rearrange columns appropriately for output.
rearrange_columns = any(
[
idx != placement.rank()
for idx, placement in enumerate(weight._sharding_spec.placements)
]
)
if not rearrange_columns:
return output
indices = []
for placement in weight._sharding_spec.placements:
dim_size = output_split_sizes[placement.rank()]
start = sum(
[
split_size if i < placement.rank() else 0
for i, split_size in enumerate(output_split_sizes)
]
)
indices += list(range(start, start + dim_size))
return output.index_select(0, torch.tensor(indices, device=output.device))
def _handle_row_wise_lookup_distribute(
input_sorted, input, world_size, weight, rank, padding_idx
):
"""
In the circumstance of row-wise sharding of weight, we need to distribute
the sorted lookup IDs of embedding/embeddingBag to each rank.
If the index in the placement is not equal to the rank number, we need to
do the rearrangement based on the order given by the Sharding Spec (placement).
In addition, we do two things for padding_idx. The first thing is to only
set it if it's within the range of the current rank and the other thing
is to do the modularization of it by sharded_dim_size_max.
Args:
input_sorted: sorted lookup IDs of embedding/embeddingBag.
input: tensor to be applied op on.
world_size: number of ranks.
weight: shareded weight tensor.
rank: # of cuda process.
padding_idx: If specified, the entries at padding_idx do
not contribute to the gradient and reduction.
Return:
input_sorted: sorted lookup IDs of embedding/embeddingBag
Rearrangement performed if it is needed.
input_split_sizes: size of IDs to be assigned to each rank.
sharded_dim_size_max: the max size of the row each rank gets.
input_split_rearrange_indices: indices of row rearrangement.
rearrange_indices_1d_second_order: reverse indices of row
rearrangement, which will be used to restore the original
order.
padding_idx: Same as input if padding_idx is within the range
of the given rank; otherwise, None is returned. It is
also modularized by sharded_dim_size_max.
"""
# Decide which rank the input goes to by check the sharding range.
split_size = get_split_size(weight.size(0), world_size)
rearrange_rows = False
indices_flatten = None
input_split_sizes: List[int] = [0] * world_size
input_split_start_indices: List[int] = [0] * world_size
start_row_idx_rank = None
end_row_idx_rank = None
# When we do the chunk split, we always ensure the first N - 1 chunks get max out
# and then the Nth chunk gets the rest. So input_split_sizes like [3, 3, 3, 4]
# are not possible. The expected split size will be [4, 4, 4, 1].
sharded_dim_size_max = get_chunked_dim_size(weight.size(0), split_size, 0)
for idx, placement in enumerate(weight._sharding_spec.placements):
sharded_dim_size = get_chunked_dim_size(weight.size(0), split_size, idx)
start_row_idx = idx * sharded_dim_size_max
end_row_idx = start_row_idx + sharded_dim_size
start_idx = torch.searchsorted(input_sorted, start_row_idx).item()
end_idx = torch.searchsorted(input_sorted, end_row_idx).item()
input_split_sizes[placement.rank()] = int(end_idx - start_idx)
input_split_start_indices[placement.rank()] = int(start_idx)
if placement.rank() != idx:
rearrange_rows = True
# Store the range of the current rank.
if placement.rank() == rank:
start_row_idx_rank = start_row_idx
end_row_idx_rank = end_row_idx
# Perform the modular if padding_idx is within the range.
if padding_idx is not None:
if padding_idx < start_row_idx_rank or padding_idx >= end_row_idx_rank:
padding_idx = None
else:
padding_idx = padding_idx % sharded_dim_size_max
rearrange_indices_1d_second_order = None
if rearrange_rows:
# Need to re-arrange the 1D tensor to be sent via all2all.
indices: List[List[int]] = [[0]] * world_size
for placement in weight._sharding_spec.placements:
split_length = input_split_sizes[placement.rank()]
offset_idx = input_split_start_indices[placement.rank()]
indices[placement.rank()] = list(
range(offset_idx, offset_idx + split_length)
)
indices_flatten = list(idx for indice in indices for idx in indice)
input_sorted = input_sorted.index_select(
0, torch.tensor(indices_flatten, device=input.device)
)
rearrange_indices_1d_second_order = torch.argsort(torch.Tensor(indices_flatten))
return (
input_sorted,
input_split_sizes,
sharded_dim_size_max,
torch.tensor(indices_flatten, device=input.device) if rearrange_rows else None,
rearrange_indices_1d_second_order,
padding_idx,
)
def _communicate_size_to_each_rank(
input_size_list, output_size, input, pg, tensor_type=torch.int
):
"""
In the circumstance of row-wise sharding of weight, we need to first
communicate the input length to each rank because each rank gets a
different one.
Args:
input_size_list: list of sizes to be sent to each rank.
output_size: length of the output tensor.
input: tensor to be applied op on.
pg: process group.
tensor_type: dtype of tensor.
Return: A list of communication results (int).
"""
input_size_list_tensor = torch.tensor(
input_size_list, dtype=tensor_type, device=input.device
)
output_size_list_tensor = torch.empty(
output_size, dtype=tensor_type, device=input.device
)
dist.all_to_all_single(
output_size_list_tensor,
input_size_list_tensor,
group=pg,
)
return output_size_list_tensor.tolist()
def _communicate_list_to_each_rank(
input_tensor_list, output_lists, input, pg, tensor_type=torch.int64
):
"""
In the circumstance of row-wise sharding of weight, we need to
communicate a list of input tensors to each rank. Because the
input could be a list of list, we need to first convert the list
to a tensor.
Args:
input_tensor_list: list of tensors to be sent to each rank.
output_lists: list of sizes to be obtained from each rank.
input: tensor to be applied op on.
pg: process group.
tensor_type: dtype of tensor.
Return: A list of communication results (tensors).
"""
output_tensor_list = []
for output_list in output_lists:
output_tensor_list.append(
torch.empty(output_list, dtype=tensor_type, device=input.device)
)
dist.all_to_all(
output_tensor_list,
input_tensor_list,
group=pg,
)
return output_tensor_list
def _handle_max_norm_col_wise(
max_norm,
norm_type,
local_shard,
input,
world_size,
pg,
):
"""
For col-wise sharding of weight, we need to aggregate the
norm across all ranks before we can perform the proper re-norm.
Note that, the max_norm logic is only applied to the embedding
indices that are looked up and not the whole shard.
Args:
max_norm: If given, each embedding vector with norm larger
than max_norm is renormalized to have norm max_norm.
Note: this will modify weight in-place.
norm_type: The p in the p-norm to compute for the max_norm option.
local_shard: col-wise shared local weight used for lookup.
input: tensor to be applied op to.
world_size: number of ranks.
pg: process group.
Return:
local_shard_norm_renormed: local_shard re-normed to max_norm if the norm is larger
than it.
gathered_inputs: list of inputs from all ranks.
"""
norm_type = norm_type if norm_type is not None else 2.0
# allgather the inputs first.
gathered_inputs = [torch.zeros_like(input) for _ in range(world_size)]
dist.all_gather(gathered_inputs, input, group=pg)
unique_inp = torch.unique(torch.cat(gathered_inputs))
local_shard_sum = torch.sum(
torch.pow(torch.abs(local_shard), norm_type), dim=1, dtype=local_shard.dtype
)
# For col-wise sharding, we need to first aggregate the powered sum
# from each rank first and then calculate the norm.
dist.all_reduce(local_shard_sum, group=pg)
local_shard_norm = torch.pow(local_shard_sum, 1.0 / norm_type)
max_norm_tensor = torch.full(
(local_shard.size(0),),
float("inf"),
dtype=local_shard.dtype,
device=input.device,
)
max_norm_tensor[unique_inp] = max_norm
local_shard_t = local_shard.t().contiguous()
normalized_tensor = torch.where(
local_shard_norm > max_norm_tensor, max_norm_tensor, local_shard_norm
)
# Make sure divisor is not zero.
local_shard_norm[local_shard_norm == 0.0] = 1.0
local_shard_norm_renormed = (
torch.div(torch.mul(local_shard_t, normalized_tensor), local_shard_norm)
.t()
.contiguous()
)
return local_shard_norm_renormed, gathered_inputs
| pytorch-master | torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py |
pytorch-master | torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__init__.py |
|
# coding=utf-8
from typing import List, cast
import torch
import torch.distributed as dist
from torch._C._distributed_c10d import (
ReduceOp,
)
from ._common import (
_communicate_list_to_each_rank,
_communicate_size_to_each_rank,
_handle_col_wise_sharding_base,
_handle_row_wise_lookup_distribute,
_handle_max_norm_col_wise,
)
from torch.distributed._shard.sharding_spec import ChunkShardingSpec
from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op
from torch.distributed._shard.sharded_tensor import (
ShardedTensor
)
@custom_sharding_spec_op(ChunkShardingSpec, torch.nn.functional.embedding_bag)
def sharded_embedding_bag(types, args, kwargs, pg):
"""
Handles ``__torch_function__`` dispatch for ``torch.nn.functional.embedding_bag``.
This method computes a sharded embedding bag aggregation and has the following limitations:
1. Supports only sharding of ``weight``.
2. Supports only ``ChunkShardingSpec``.
3. Supports only a single local shard per rank.
4. Supports all specs except for scale_grad_by_freq, sparse, etc.
Based on the dimension that the weight is sharded on, there are two
algorithms:
ROWWISE SHARDING
================
For row-wise sharding the weight is sharded on dimension 0.
The overall algorithm can be best explained with an example. Let's assume
the dims for input are (4 x 6) and W are (16 x 17) and W is sharded across
4 GPUs creating 4 shard of (4 x 17).
The algorithm is as follows:
1. First if the input is a 2D tensor, we sort by row. (If it's a 1D tensor, we sort
the tensor per interval defined by offset.
For example if the given input is generated within [1, 9] like
tensor([[ 3, 7, 7, 9, 2, 1],
[ 0, 0, 14, 5, 3, 12],
[ 4, 5, 5, 9, 5, 13],
[10, 3, 0, 7, 13, 9]])
Then we have the sorted 2D tensor like:
tensor([[ 1, 2, 3, 7, 7, 9],
[ 0, 0, 3, 5, 12, 14],
[ 4, 5, 5, 5, 9, 13],
[ 0, 3, 7, 9, 10, 13]])
Note if placement not equal to rank we will rearrange accordingly.
2. Based on sorted result, we now have the offset like the following:
[tensor([0, 3, 5, 6]), tensor([0, 3, 4, 4]),
tensor([0, 0, 4, 5]), tensor([0, 2, 3, 5])]
Note that embedding bag does allow the offset idx equal to length of
input or repetitive. For these cases, it return a zero tensor.
3. Next, we rearrange the sorted tensor into different ranks by first
flattening it and grouping by ranks. Finally, we get a list of 1D tensors.
So the given tensor now becomes:
[tensor([1, 2, 3, 0, 0, 3, 0, 3]), tensor([7, 7, 5, 4, 5, 5, 5, 7]),
tensor([9, 9, 9, 10]), tensor([12, 14, 13, 13])]
We sync offsets with IDs. Offset now becomes:
[tensor([0, 3, 6, 6]), tensor([0, 2, 3, 7]),
tensor([0, 1, 1, 2]), tensor([0, 0, 2, 3])]
5. Before we send out the array to other ranks, we need to do the modular operation
so that each rank do use that for embedding look-up.
The above ID tensor list will look like the below after performing the moduler of 4:
[tensor([1, 2, 3, 0, 0, 3, 0, 3]), tensor([3, 3, 1, 0, 1, 1, 1, 3]),
tensor([1, 1, 1, 2]), tensor([0, 2, 1, 1])]
4. The example above only happens in one rank and each rank does a very similar thing
with different rearranged IDs and offsets list. We then send IDs and offsets to the
corresponding rank. Each rank do the look-up and aggregation on its local shard.
We then use reduce_scatter to send the result back to each rank and perform the
aggregation simultaneously.
5. For "Mean" mode we need to divide by either column size (2D) or the interval length
defined by the offset. We also need to mask the unexisting row to neg Inf so that
negative value does not gets wiped out in the "Max" mode.
COLWISE SHARDING
================
For col-wise sharding the weight is sharded on dimension 1.
The overall algorithm can be best explained with an example. Let's assume
the dims for input are (4 x 6) and W are (16 x 17) and W is sharded across
4 GPUs creating 3 shards of (16 x 5) and 1 shard of (16 x 2).
The algorithm is as follows:
1. First the input is broadcasted to all ranks, since this is SPMD we
actually do an all_gather for all the inputs resulting in 4 (4 x 6)
inputs on each rank.
2. Next we perform local embedding bag operation under the given mode by
apply each input (4 x 6) with the local shard (16 x 5) ((16 x 2) for the last).
This results in 4 (5 x 4) ((2 x 4) for the last) matrices on each rank.
We transpose the aggregation result.
3. Next, we concatenate these 4 matrices and perform an all2all to share the
appropriate (5 x 4) or (2 x 4) matrices to each rank.
4. Now, each rank receives a (17 x 4) matrix which is basically the
size of the result we need.
5. If placements are not in order any appropriate rearrangement of columns
are done for the (17 x 4) matrix and finally we transpose the output again.
6. If max_norm is specified, we manually sum up the norm and renorm. Because
the renorm must be in place, we need to override the local_shard to mimic
this behavior.
"""
# Validate input params
_validate_embedding_bag_param(args, kwargs)
input = args[0]
weight = args[1]
offsets = kwargs.get("offsets")
per_sample_weights = kwargs.get("per_sample_weights")
mode = kwargs.get("mode")
max_norm = kwargs.get("max_norm")
norm_type = kwargs.get("norm_type")
include_last_offset = kwargs.get("include_last_offset")
padding_idx = kwargs.get("padding_idx")
local_shard = weight.local_tensor().contiguous()
sharding_dim = weight._sharding_spec.dim
world_size = dist.get_world_size(pg)
rank = dist.get_rank(pg)
if include_last_offset:
offsets = offsets[:-1]
if sharding_dim == 1:
output, local_shard = _handle_col_wise_sharding(
input,
world_size,
weight,
local_shard,
offsets,
per_sample_weights,
mode,
max_norm,
norm_type,
padding_idx,
pg,
)
weight.local_shards()[0].tensor = local_shard
return output
elif sharding_dim == 0:
return _handle_row_wise_sharding(
input,
world_size,
weight,
local_shard,
offsets,
per_sample_weights,
mode,
max_norm,
norm_type,
padding_idx,
rank,
pg,
)
else:
raise RuntimeError(
f"nn.EmbeddingBag weight sharded on dim {sharding_dim} not supported!"
)
def _validate_embedding_bag_param(args, kwargs):
"""
Validate input params of sharded embeddingBag op.
Args:
input: list of ID used for lookup and aggregation.
weight: shareded weight tensor.
kwargs: same as normal EmbeddingBag.
Return: None.
"""
input = args[0]
weight = args[1]
offsets = kwargs.get("offsets")
per_sample_weights = kwargs.get("per_sample_weights")
mode = kwargs.get("mode")
max_norm = kwargs.get("max_norm")
norm_type = kwargs.get("norm_type")
scale_grad_by_freq = kwargs.get("scale_grad_by_freq")
sparse = kwargs.get("sparse")
include_last_offset = kwargs.get("include_last_offset")
padding_idx = kwargs.get("padding_idx")
# Validate types
if not isinstance(input, torch.Tensor):
raise TypeError("input need to be torch.Tensor")
if offsets is not None and not isinstance(offsets, torch.Tensor):
raise TypeError("offsets need to be torch.Tensor")
if per_sample_weights is not None and not isinstance(
per_sample_weights, torch.Tensor
):
raise TypeError("per_sample_weights need to be torch.Tensor")
if not isinstance(weight, ShardedTensor):
raise TypeError("weight needs to be ShardedTensor")
if len(input.size()) > 2:
raise ValueError("Input more than 2 dims not supported")
weight_size = weight.size()
if len(weight_size) != 2:
raise ValueError("Weight needs to have exactly 2 dims")
if int(torch.min(input).item()) < 0:
raise ValueError(
"Index out of range in Input %d %d",
int(torch.min(input).item()),
weight_size[1],
)
if int(torch.max(input).item()) >= weight_size[0]:
raise ValueError(
"Index out of range in Input %d %d",
int(torch.max(input).item()),
weight_size[1],
)
if offsets is not None and len(input.size()) != 1:
raise ValueError("Input dimension needs to be exactly 1 dim")
if len(input.size()) == 1 and offsets is None:
raise ValueError("offsets is required for 1D input")
if per_sample_weights is not None and per_sample_weights.size() != input.size():
raise ValueError(
f"per_sample_weights size {per_sample_weights.size()} not equal to input size {input.size()}"
)
if mode is None:
mode = "mean"
if mode not in ["sum", "mean", "max"]:
raise ValueError(f"mode '{mode}' is not supported")
if scale_grad_by_freq:
raise RuntimeError(
'nn.Embedding weight sharded with flag on "scale_grad_by_freq" not supported!'
)
if sparse:
raise RuntimeError(
'nn.Embedding weight sharded with flag on "sparse" not supported!'
)
if include_last_offset and offsets is None:
raise ValueError('offsets is required for flag "include_last_offset"!')
if include_last_offset and cast(List[int], offsets)[-1] != input.size(0):
raise ValueError(
'offsets need to have the input size in the end when the flag "include_last_offset" is on!'
)
if max_norm and max_norm <= 0.0:
raise ValueError('"max_norm" must be larger than zero!')
if not isinstance(weight._sharding_spec, ChunkShardingSpec):
raise ValueError("Only ChunkShardingSpec supported for ShardedTensor ops!")
if len(weight.local_shards()) != 1:
raise ValueError("Only one local shard supported!")
def _handle_col_wise_sharding(
input,
world_size,
weight,
local_shard,
offsets,
per_sample_weights,
mode,
max_norm,
norm_type,
padding_idx,
pg,
):
"""
Entry-point function to handle the logic of col-wise sharding of weight
for embeddingBag. (Detailed explanations of the logic can be found in
the comment for sharded_embedding_bag.)
Args:
input: list of ID used for lookup and aggregation.
world_size: number of ranks.
weight: shareded weight tensor.
local_shard: col-wise shared local weight used for lookup.
offsets: list of start positions of each bag for 1D input.
per_sample_weights: weights for weighted sum mode.
mode: aggregation method of each bag.
max_norm: If given, each embedding vector with norm larger
than max_norm is renormalized to have norm max_norm.
Note: this will modify weight in-place.
norm_type: The p in the p-norm to compute for the max_norm option.
padding_idx: If specified, the entries at padding_idx do
not contribute to the gradient; therefore, the embedding
vector at padding_idx is not updated during training,
i.e. it remains as a fixed “pad”.
Note that the embedding vector at padding_idx is
excluded from the reduction.
pg: process group.
Return:
output: final result of lookup and aggregation.
local_shard: col-wise shared local weight used for lookup.
If max_norm, this will be the renormed weight.
"""
# allgather the special input of embedding bag first.
gathered_per_sample_weights = None
if per_sample_weights is not None:
gathered_per_sample_weights = [
torch.zeros_like(per_sample_weights) for _ in range(world_size)
]
dist.all_gather(gathered_per_sample_weights, per_sample_weights, group=pg)
gathered_offsets = None
if offsets is not None:
gathered_offsets = [torch.zeros_like(offsets) for _ in range(world_size)]
dist.all_gather(gathered_offsets, offsets, group=pg)
gathered_inputs = None
if max_norm is not None:
# max_norm changes the weight in-place
local_shard, gathered_inputs = _handle_max_norm_col_wise(
max_norm, norm_type, local_shard, input, world_size, pg
)
output = _handle_col_wise_sharding_base(
torch.nn.functional.embedding_bag,
1,
input,
world_size,
weight,
local_shard,
pg,
mode=mode,
gathered_per_sample_weights=gathered_per_sample_weights,
gathered_offsets=gathered_offsets,
padding_idx=padding_idx,
gathered_inputs=gathered_inputs,
)
return (output, local_shard)
def _handle_row_wise_sharding(
input,
world_size,
weight,
local_shard,
offsets,
per_sample_weights,
mode,
max_norm,
norm_type,
padding_idx,
rank,
pg,
):
"""
Entry-point function to handle the logic of row-wise sharding of weight
for embeddingBag. (Detailed explanations of the logic can be found in
the comment for sharded_embedding_bag.)
Args:
input: list of ID used for lookup and aggregation.
world_size: number of ranks.
weight: shareded weight tensor.
local_shard: row-wise shared local weight used for lookup.
offsets: list of start positions of each bag for 1D input.
per_sample_weights: weights for weighted sum mode.
mode: aggregation method of each bag.
max_norm: If given, each embedding vector with norm larger
than max_norm is renormalized to have norm max_norm.
Note: this will modify weight in-place.
norm_type: The p in the p-norm to compute for the max_norm option.
padding_idx: If specified, the entries at padding_idx do
not contribute to the gradient; therefore, the embedding
vector at padding_idx is not updated during training,
i.e. it remains as a fixed “pad”.
Note that the embedding vector at padding_idx is
excluded from the reduction.
rank: # of cuda process.
pg: process group.
Returns:
gathered_output: final result of lookup and aggregation.
"""
# We sort each interval defined by offset. If 2D, each interval is a row.
input_size = input.size()
(
input_split_sorted_list,
input_split_sorted_indices,
split_sizes_1d,
split_sizes_1d_with_padding,
) = _input_split_sort(input, offsets, padding_idx)
# Within each interval of the sorted list, we first need to distribute
# each ID to different bucket(rank) and also ensure the rearrangement
# has been done in case the placement idx not equal to rank.
# We then perform some simple stats on each interval for the next step
# If user specifies per_sample_weights we need to rearrange them
# to be sync with IDs and then distribute them to each rank
(
input_combined,
input_combined_split_sizes,
offsets_rearrange_list,
offsets_rearrange_sizes,
per_sample_weights,
sharded_dim_size_max,
padding_idx,
) = _sorted_input_distribute_prepare(
input_split_sorted_list,
input_split_sorted_indices,
world_size,
input,
weight,
per_sample_weights,
rank,
padding_idx,
)
# Send ID/offsets/per_sample_weights to different bucket(rank).
(
gathered_input,
output_offsets_tensor_list,
output_split_sizes,
gathered_per_sample_weights,
) = _distribute_input(
input_combined,
input_combined_split_sizes,
offsets_rearrange_list,
offsets_rearrange_sizes,
sharded_dim_size_max,
world_size,
input,
per_sample_weights,
pg,
)
# Perform the embedding bag look-up and aggregation
results = []
for i, inp in enumerate(gathered_input):
per_sample_weights = (
gathered_per_sample_weights[i]
if gathered_per_sample_weights is not None
else None
)
# If input is None, passing in max_norm causes
# errors in CUDA.
if max_norm is not None and inp.size(0) == 0:
max_norm = None
# Perform local embedding look up and aggregation.
result = torch.nn.functional.embedding_bag(
inp,
local_shard,
offsets=output_offsets_tensor_list[i],
mode=mode if mode != "mean" else "sum",
per_sample_weights=per_sample_weights,
max_norm=max_norm,
norm_type=norm_type,
padding_idx=padding_idx,
)
if mode != "max":
results.append(result)
# For max case, it there is no look-up from some ranks
# it will return all zero for that. For that case, we need
# to set the row to neg inf; otherwise, in the final
# aggregation negative values will be rounded up to zero.
elif inp.size(0) == 0:
result[:] = -float("Inf")
results.append(result)
else:
for idx, current_offset in enumerate(output_offsets_tensor_list[i]):
next_offset = current_offset
if idx == len(output_offsets_tensor_list[i]) - 1:
next_offset = output_split_sizes[i]
else:
next_offset = output_offsets_tensor_list[i][idx + 1]
# When there is no interval in the current rank or all IDs
# are equal to padding_idx, we then need to ensure they
# don't contribute to the final result.
if (current_offset == next_offset) or (
padding_idx is not None
and not torch.any(
torch.ne(inp[current_offset:next_offset], padding_idx)
)
):
result[idx] = -float("Inf")
results.append(result)
# Gather all the aggregated results appropriately by using reduce_scatter.
row_size = input.size(0) if len(input_size) > 1 else len(split_sizes_1d)
gathered_output = torch.empty(row_size, weight.size(1), device=input.device)
op = ReduceOp.SUM if mode != "max" else ReduceOp.MAX
dist.reduce_scatter(gathered_output, results, op=op, group=pg)
# For Mean, we cannot do the division until very end because the sum of means
# not equal to the mean of sum. (Divisor is different)
if mode == "mean":
split_sizes_1d_tensor = torch.tensor(
split_sizes_1d_with_padding, dtype=torch.float, device=input.device
)
# Make sure divisor is not zero.
split_sizes_1d_tensor[split_sizes_1d_tensor == 0.0] = 1.0
return (
torch.div(gathered_output.t().contiguous(), split_sizes_1d_tensor)
.t()
.contiguous()
)
# Return the appropriate local result.
return gathered_output
def _input_split_sort(input, offsets, padding_idx):
"""
In the circumstance of row-wise sharding of weight, we need to distribute
the sorted lookup IDs of embeddingBag to each rank by range. The constraint
here is that we can not directly sort the whole input because we have to
differentiate between each interval because the result is aggregated.
If the index in the placement is not equal to the rank number, we need to
do the rearrangement based on the order given by the Sharding Spec (placement).
We also calculate the split_size with padding_idx excluded per interval
so that we can use it as the divisor to calculate the mean correctly.
Args:
input: tensor to be applied op on.
offsets: start index of each interval in the 1D case.
padding_idx: the embedding vector at padding_idx is
excluded from the reduction.
Return:
input_split_sorted_list: list of ID positions sorted per interval.
input_split_sorted_indices: sorted indices for per_sample_weights
rearrangments.
split_sizes_1d: size of each split for 1D input because it can be
different in such scenario.
split_sizes_1d_with_padding: size of each split for 1D input with
padding_idx excluded. This is for the divisor of `mean` mode.
"""
input_size = input.size()
input_split_sorted_list = []
split_sizes_1d = []
split_sizes_1d_with_padding = []
padding_idx = padding_idx if padding_idx is not None else -1
# For 2D tensor, we just first sort and then append row by row into a list.
if len(input_size) > 1:
indice_offset = 0
sorted_input, input_split_sorted_indices = torch.sort(input)
for i in range(0, sorted_input.size(0)):
input_split_sorted_list.append(sorted_input[i])
input_split_sorted_indices[i] += indice_offset
indice_offset += input.size(1)
split_sizes_1d_with_padding.append(
torch.sum(torch.ne(sorted_input[i], padding_idx)).item()
)
input_split_sorted_indices = torch.reshape(input_split_sorted_indices, (-1,))
# Split 1D input tensor based on the given offsets.
else:
input_split_sorted_indices_list = []
offset_len = len(offsets)
split_size = offsets[1:offset_len] - offsets[0:-1]
split_sizes_1d = split_size.tolist()
if torch.sum(split_size) < input.size(0):
split_sizes_1d.append(input.size(0) - offsets[-1].item())
indice_offset = 0
for idx, split_result in enumerate(torch.split(input, split_sizes_1d)):
split_result_sorted, indices = torch.sort(split_result)
input_split_sorted_list.append(split_result_sorted)
split_sizes_1d_with_padding.append(
torch.sum(torch.ne(split_result_sorted, padding_idx)).item()
)
input_split_sorted_indices_list.append(indices + indice_offset)
indice_offset += split_sizes_1d[idx]
input_split_sorted_indices = torch.cat(input_split_sorted_indices_list)
return (
input_split_sorted_list,
input_split_sorted_indices,
split_sizes_1d,
split_sizes_1d_with_padding,
)
def _sorted_input_distribute_prepare(
input_split_sorted_list,
input_split_sorted_indices,
world_size,
input,
weight,
per_sample_weights,
rank,
padding_idx,
):
"""
In the circumstance of row-wise sharding of weight, we need to distribute
the sorted lookup IDs of embeddingBag to each rank by range. After sorting
per interval, we need to distribute each position to the corresponding
rank and we need to sync this change to offsets and per_sample_weights.
Also, we perform rearrangements, if the order in Sharding Spec is not
same as the rank sequence.
In addition, in the row-wise sharding, we need to do two things for
padding_idx. The first thing is only to set it if it's within the range
of the current rank and the other thing is to do the modularization of
it by sharded_dim_size_max.
Args:
input_split_sorted_list: list of ID positions sorted per interval.
input_split_sorted_indices: sorted indices for per_sample_weights
rearrangments.
input: tensor to be applied op on.
world_size: number of ranks.
weight: shareded weight tensor.
per_sample_weights: weights for weighted sum mode.
rank: # of cuda process.
padding_idx: If specified, the entries at padding_idx do
not contribute to the gradient and reduction.
Returns:
input_combined: list of ID to be sent to each rank.
input_combined_split_sizes: # of bags sent to each rank.
offsets_rearrange_list: list of starting position of each bag.
offsets_rearrange_sizes: # of bag offsets sent to each rank.
per_sample_weights: weights for weighted sum mode.
sharded_dim_size_max: the max size of the row each rank gets.
padding_idx: Modularized padding_idx if it is within the range,
otherwise, None is returned.
"""
input_sorted_list = []
input_split_sizes_list = []
input_split_sizes_rolling_sum = []
rearrange_indices_list = []
input_split_rearrange_indices_combined = None
split_sizes_rolling_sum = 0
for idx, split_result_sorted in enumerate(input_split_sorted_list):
split_result_sorted.contiguous()
(
input_sorted,
input_split_sizes,
sharded_dim_size_max,
input_split_rearrange_indices,
_,
padding_idx_modular,
) = _handle_row_wise_lookup_distribute(
split_result_sorted, input, world_size, weight, rank, padding_idx
)
rearrange_indices_list.append(
input_split_rearrange_indices + split_sizes_rolling_sum
if input_split_rearrange_indices is not None
else None
)
input_sorted_list.append(input_sorted)
input_split_sizes_list.append(input_split_sizes)
input_split_sizes_rolling_sum.append(split_sizes_rolling_sum)
split_sizes_rolling_sum += sum(input_split_sizes)
# padding_idx cannot be directly overridden in the for loop because the
# later iteration will wipe out the modularized padding_idx.
padding_idx = padding_idx_modular
if not (any(x is None for x in rearrange_indices_list)):
input_split_rearrange_indices_combined = torch.cat(rearrange_indices_list)
# Flatten each interval into a big 1D tensor.
input_combined = torch.cat(input_sorted_list)
# Rearrange the 1D tensor to move the IDs of look-up within each
# interval to the corresponding sharding rank. We also rearrange
# the offsets to be in sync with IDs.
input_combined_rearrange_indices = []
offsets_rearrange_list = []
offsets_rearrange_sizes = []
input_combined_split_sizes = []
# Calculate the indices for rearrangements
for rank in range(0, world_size):
offsets_rearrange = []
offset = 0
for idx, input_split_sizes in enumerate(input_split_sizes_list):
offsets_rearrange.append(offset)
split_length = input_split_sizes[rank]
offset_idx = input_split_sizes_rolling_sum[idx] + sum(
[
split_size if i < rank else 0
for i, split_size in enumerate(input_split_sizes)
]
)
input_combined_rearrange_indices += list(
range(offset_idx, offset_idx + split_length)
)
offset += split_length
offsets_rearrange_list.append(offsets_rearrange)
offsets_rearrange_sizes.append(len(offsets_rearrange))
input_combined_split_sizes.append(offset)
# Perform the actual rearrangements of IDs
input_combined = input_combined.index_select(
0, torch.tensor(input_combined_rearrange_indices, device=input.device)
)
# If per_sample_weights exists, we need to sync the shift which
# we applied to the position IDs for look-up.
if per_sample_weights is not None:
# Rearrange per interval.
per_sample_weights = torch.reshape(per_sample_weights, (-1,))
per_sample_weights = per_sample_weights[input_split_sorted_indices]
if input_split_rearrange_indices_combined is not None:
per_sample_weights = per_sample_weights[
input_split_rearrange_indices_combined
]
# Rearrange across different ranks.
per_sample_weights = per_sample_weights.index_select(
0,
torch.tensor(input_combined_rearrange_indices, device=input.device),
)
return (
input_combined,
input_combined_split_sizes,
offsets_rearrange_list,
offsets_rearrange_sizes,
per_sample_weights,
sharded_dim_size_max,
padding_idx,
)
def _distribute_input(
input_combined,
input_combined_split_sizes,
offsets_rearrange_list,
offsets_rearrange_sizes,
sharded_dim_size_max,
world_size,
input,
per_sample_weights,
pg,
):
"""
In the circumstance of row-wise sharding of weight, we need to distribute
the sorted lookup IDs of embeddingBag, offsets and per_sample_weights to
each rank by range. To save the # of communication, we consolidate the
communication of tensors which shares the same dtype.
Args:
input_combined: list of ID to be sent to each rank.
input_combined_split_sizes: # of bags sent to each rank.
offsets_rearrange_list: list of starting position of each bag.
offsets_rearrange_sizes: # of bag offsets sent to each rank.
sharded_dim_size_max: the max size of the row each rank gets.
world_size: number of ranks.
input: tensor to be applied op on.
per_sample_weights: weights for weighted sum mode.
pg: process group.
Returns:
gathered_input: list of tensors of IDs for lookup and aggregation.
output_offsets_tensor_list: list of tensors of offsets which specifies the
boundary of each bag.
output_split_sizes: list of size of IDs sent from each rank.
gathered_per_sample_weights: per_sample_weights from each rank.
"""
# Communicate the length of offset and ID split size to each rank
# To save the # of communications, we interleave the sizes into one list.
input_size_list = offsets_rearrange_sizes + input_combined_split_sizes
input_size_list[::2] = offsets_rearrange_sizes
input_size_list[1::2] = input_combined_split_sizes
output_size_list = _communicate_size_to_each_rank(
input_size_list, world_size * 2, input, pg
)
# Perform the modular operation of the 1D tensor to be sent to each rank.
input_combined = torch.remainder(input_combined, sharded_dim_size_max)
input_combined_list = list(torch.split(input_combined, input_combined_split_sizes))
# Covert each offset list to a tensor and combine with the input
# so we only perform one communication to each rank.
input_tensor_list = []
output_tensor_size_list = []
for idx, input_list in enumerate(offsets_rearrange_list):
input_tensor_list.append(
torch.cat(
(
torch.tensor(input_list, dtype=torch.int64, device=input.device),
input_combined_list[idx],
)
)
)
output_tensor_size_list.append(
output_size_list[2 * idx] + output_size_list[2 * idx + 1]
)
output_tensor_list = _communicate_list_to_each_rank(
input_tensor_list, output_tensor_size_list, input, pg
)
output_tensor_list = list(
torch.split(torch.cat(output_tensor_list), output_size_list)
)
output_offsets_tensor_list = output_tensor_list[::2]
gathered_input = output_tensor_list[1::2]
output_split_sizes = output_size_list[1::2]
# If user specifies per_sample_weights we need to communicate
# them to the corresponding rank.
gathered_per_sample_weights = None
if per_sample_weights is not None:
# Split the 1D tensor per_sample_weights to be sent to each rank.
per_sample_weights_list = list(
torch.split(per_sample_weights, input_combined_split_sizes)
)
gathered_per_sample_weights = _communicate_list_to_each_rank(
per_sample_weights_list,
output_split_sizes,
input,
pg,
tensor_type=per_sample_weights.dtype,
)
return (
gathered_input,
output_offsets_tensor_list,
output_split_sizes,
gathered_per_sample_weights,
)
| pytorch-master | torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding_bag.py |
import torch
from torch import Tensor
from torch.distributed._shard.sharded_tensor import (
ShardedTensor,
)
from torch.distributed._shard.sharding_spec import ChunkShardingSpec
from torch.distributed._shard.sharding_spec.api import custom_sharding_spec_op
from torch.distributed._shard.sharded_tensor._ops.math_ops import binary_math_op_impl
from ._common import (
_chunk_sharding_spec_check,
)
def register_math_op(op):
@custom_sharding_spec_op(ChunkShardingSpec, op)
def binary_math_op(types, args=(), kwargs=None, pg=None):
"""
Handles ``__torch_function__`` dispatch for the binary math ops
such as `torch.add`, `torch.mul`, `torch.div`, etc.
This method computes on ShardedTensor
"""
if len(args) != 2:
raise ValueError("Only support binary math op on ShardedTensor for now!")
lhs = args[0]
rhs = args[1]
# Validate types
if isinstance(lhs, ShardedTensor) and isinstance(rhs, ShardedTensor):
lhs_spec = lhs.sharding_spec()
rhs_spec = rhs.sharding_spec()
_chunk_sharding_spec_check(lhs_spec, op)
_chunk_sharding_spec_check(rhs_spec, op)
if lhs.size() == rhs.size() and lhs_spec.dim == rhs_spec.dim: # type: ignore[attr-defined]
# perform local element-wise math op
res = op(lhs.local_tensor(), rhs.local_tensor())
return ShardedTensor._init_from_local_tensor(
res,
lhs_spec,
lhs.size(), # type: ignore[arg-type]
process_group=pg)
else:
raise RuntimeError("Implicit broadcasting not supported yet!")
else:
# Try dispatch to ShardingSpec agnostic ops.
return binary_math_op_impl(op, types, args, kwargs, pg)
binary_ops = [
# add
torch.add,
Tensor.add,
Tensor.__add__,
Tensor.__radd__,
# sub
torch.sub,
Tensor.sub,
Tensor.__sub__,
Tensor.__rsub__,
# mul
torch.mul,
Tensor.mul,
Tensor.__mul__,
Tensor.__rmul__,
# div
torch.div,
Tensor.div,
Tensor.__div__,
Tensor.__rdiv__,
]
for op in binary_ops:
register_math_op(op)
| pytorch-master | torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/math_ops.py |
import copy
import math
import torch
import torch.distributed as dist
from torch.distributed._shard.sharded_tensor import (
ShardedTensor,
)
from torch.distributed._shard.sharding_spec._internals import (
get_chunk_sharding_params,
)
from torch.distributed.nn.functional import (
all_reduce,
)
from ._common import (
_chunk_sharding_spec_check,
_register_sharded_op_on_local_tensor,
)
def transpose_same_dim(*args, **kwargs):
"""
When the dim0 and dim1 of transpose are the same, return the original ShardedTensor.
Args: same as ``torch.Tensor.transpose``.
Return (bool): Whether to return early or not.
"""
return args[1] == args[2]
def sharded_transpose_check(*args, **kwargs):
"""
Perform extra checks for the sharded_transpose op such as the input needs to
be at least 2 and the sharding spec needs to be a ChunkShardingSpec.
Args: same as ``torch.Tensor.type_as``.
Return: None
"""
if len(args) < 3:
raise ValueError("Needs at least two dimensions for transpose op!")
_chunk_sharding_spec_check(args[0].sharding_spec(), torch.Tensor.transpose)
def sharded_transpose(args, kwargs, pg):
"""
Handles ``__torch_function__`` dispatch for the ``torch.Tensor.transpose`` op.
Returns a new sharded tensor with the given dimensions transposed.
During the transpose, we keep the original shading dim, if the sharding
dim is not neither dim0 nor dim1. Otherwise, we will swap the sharding
dim with the other input of transpose.
Args: (same as ``torch.Tensor.transpose``.)
dim0 (Int): the first dimension to be transposed.
dim1 (Int): the second dimension to be transposed.
Returns:
new_local_shards (List[Shard]): Local shards for the new sharded tensor.
st_meta (ShardedTensorMetadata): Metadata of the new sharded tensor.
"""
def _swap_meta_data(data, idx0, idx1):
"""
Swap the item at idx0 and idx1 in the data list.
"""
data[idx0], data[idx1] = data[idx1], data[idx0]
st = args[0]
dim0 = args[1]
dim1 = args[2]
sharding_spec = copy.deepcopy(st.sharding_spec())
if sharding_spec.dim == dim0:
sharding_spec.dim = dim1
elif sharding_spec.dim == dim1:
sharding_spec.dim = dim0
st_size = list(st.size())
_swap_meta_data(st_size, dim0, dim1)
local_tensor = st.local_tensor().transpose(dim0, dim1).contiguous()
return local_tensor, sharding_spec, tuple(st_size)
_register_sharded_op_on_local_tensor(
torch.transpose,
early_stop_func=transpose_same_dim,
extra_check=sharded_transpose_check,
customized_func=sharded_transpose,
)
_register_sharded_op_on_local_tensor(
torch.Tensor.transpose,
early_stop_func=transpose_same_dim,
extra_check=sharded_transpose_check,
customized_func=sharded_transpose,
)
def sharded_masked_fill_check(*args, **kwargs):
"""
Perform extra checks for the ``torch.Tensor.masked_fill`` op.
Ensure the mask size is broadcastable with the size of
the sharded tensor.
Args: same as ``torch.Tensor.masked_fill``.
Return: None
"""
st = args[0]
mask = args[1]
if st.dim() < mask.dim():
raise ValueError(
"mask dim must not greater than the dim of the sharded tensor."
)
for idx in range(-1, -mask.dim() - 1, -1):
if mask.size(idx) != st.size(idx) and mask.size(idx) != 1:
raise ValueError(
f"The size of mask {mask.dim() + idx} must match the size of "
f"sharded tensor {st.dim() + idx} at non-singleton dimension {mask.dim() + idx}"
)
def sharded_masked_fill(args, kwargs, pg):
"""
Handles ``__torch_function__`` dispatch for the ``torch.Tensor.masked_fill`` op.
We first narrow down the mask to the size of local tensor if the mask
contains the sharding dim and then apply the mask to the local tensor.
Args: same as ``torch.Tensor.masked_fill``.
Return:
local_tensor (Tensor): New local tensor to build the sharded tensor.
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`):
sharding spec of the new sharded tensor.
new_st_size (torch.Size): Size of the new sharded tensor.
"""
st = args[0]
mask = args[1]
value = args[2]
current_rank = dist.get_rank(pg) # type: ignore[attr-defined]
sharding_dim = st.sharding_spec().dim # type: ignore[attr-defined]
narrow_idx = None
for idx in range(-1, -mask.dim() - 1, -1):
if st.dim() + idx == sharding_dim and mask.size(idx) != 1:
narrow_idx = idx
if narrow_idx is not None:
rank_idx = None
for idx, placement in enumerate(st._sharding_spec.placements): # type: ignore[attr-defined]
if placement.rank() == current_rank: # type: ignore[index]
rank_idx = idx # type: ignore[attr-defined]
shard_metadata = st.metadata().shards_metadata[rank_idx] # type: ignore[index]
mask = mask.narrow(
narrow_idx,
shard_metadata.shard_offsets[sharding_dim],
shard_metadata.shard_sizes[sharding_dim],
)
local_tensor = st.local_tensor().masked_fill(mask, value)
return local_tensor, st.sharding_spec(), st.size()
_register_sharded_op_on_local_tensor(
torch.Tensor.masked_fill,
extra_check=sharded_masked_fill_check,
customized_func=sharded_masked_fill,
)
def sharded_view_check(*args, **kwargs):
"""
Perform extra checks for the ``torch.Tensor.view`` op.
Args: same as ``torch.Tensor.view``.
Return: None
"""
st = args[0]
shape = args[1:]
if len(shape) == 0:
raise ValueError("Missing *shape for sharded view op.")
if len(shape) <= st.sharding_spec().dim:
raise NotImplementedError(
f"Shape having dim {len(shape)} is not supported "
f"for sharded tensor sharded on dim {st.sharding_spec().dim}."
)
st_size = math.prod(st.size()) # type: ignore[attr-defined]
shape_size = math.prod(shape) # type: ignore[attr-defined]
neg_sum = sum(i for i in shape if i < 0)
if shape_size > st_size or st_size % shape_size:
raise ValueError(
f"Shape '{list(shape)}' is invalid for sharded tensor size {st_size}."
)
if neg_sum < -1:
raise ValueError("Only one dimension can be inferred for sharded view op.")
def sharded_view(args, kwargs, pg):
"""
Handles ``__torch_function__`` dispatch for the ``torch.Tensor.view`` op.
For now we always keep the sharding dim after view. For example, if
a sharded tensor with size [16, 5] and sharded by 0. If we now view
it as [4, 2, 2, 5], it will still be sharded by dim 0.
Args: same as ``torch.Tensor.view``.
Return:
local_tensor (Tensor): New local tensor to build the sharded tensor.
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`):
sharding spec of the new sharded tensor.
new_st_size (torch.Size): Size of the new sharded tensor.
"""
st = args[0]
shape = args[1:]
try:
infer_idx = shape.index(-1)
except ValueError:
infer_idx = None
# Infer the dim which is specified with -1.
if infer_idx is not None:
st_size = math.prod(st.size()) # type: ignore[attr-defined]
shape_size = -1 * math.prod(shape) # type: ignore[attr-defined]
shape = (*shape[:infer_idx], st_size // shape_size, *shape[infer_idx + 1 :])
if st.size() == shape:
return st.local_tensor(), st.sharding_spec(), shape
sharding_dim = st.sharding_spec().dim
sharding_spec = st.sharding_spec()
# When the sharding dim is negative, we need to ensure the new
# sharded tensor is still sharded by the original dimension.
if sharding_dim < 0:
sharding_spec = copy.deepcopy(sharding_spec)
sharding_dim = st.dim() + sharding_dim
sharding_spec.dim = sharding_dim
world_size = dist.get_world_size(pg)
if shape[sharding_dim] % world_size:
raise NotImplementedError(
f"Case when dim '({shape[sharding_dim]})' is not divisible "
"by world_size is not supported."
)
new_local_tensor_size = (
*shape[:sharding_dim],
shape[sharding_dim] // world_size,
*shape[sharding_dim + 1 :],
)
new_local_tensor = st.local_tensor().view(*new_local_tensor_size)
return new_local_tensor, sharding_spec, shape
_register_sharded_op_on_local_tensor(
torch.Tensor.view,
extra_check=sharded_view_check,
customized_func=sharded_view,
)
def sharded_bmm_check(*args, **kwargs):
"""
Perform extra checks for the sharded_bmm op, for example, st2 needs to
be a sharded tensor and both tensors need to sharded by dim 0, etc.
Args: same as ``torch.bmm``.
Return: None
"""
if len(args) < 2:
raise TypeError("Needs two tensors to perform torch.bmm.")
st = args[0]
st2 = args[1]
# Validate types
if not isinstance(st2, ShardedTensor):
raise TypeError("st2 needs to be a ShardedTensor for torch.bmm.")
_chunk_sharding_spec_check(st2.sharding_spec(), torch.bmm)
if st.dim() != 3 or st2.dim() != 3:
raise TypeError("both st and st2 need to be a 3D ShardedTensor")
if (
st.sharding_spec().dim != st2.sharding_spec().dim # type: ignore[attr-defined]
or st.sharding_spec().dim != 0
):
raise NotImplementedError(
"Only support performing bmm on tensors sharded on dim 0 now."
)
if st.sharding_spec().placements != st2.sharding_spec().placements: # type: ignore[attr-defined]
raise NotImplementedError(
"Both st and st2 need to have same placements for bmm."
)
def sharded_bmm(args, kwargs, pg):
"""
Handles ``__torch_function__`` dispatch for the sharded_bmm op.
Warning: For now we only supports the case when both tensors are sharded
by dim 0 so that no local communication.
Args: same as ``torch.bmm``.
Return:
local_tensor (Tensor): New local tensor to build the sharded tensor.
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`):
sharding spec of the new sharded tensor.
new_st_size (torch.Size): Size of the new sharded tensor.
"""
st = args[0]
st2 = args[1]
local_tensor = torch.bmm(st.local_tensor(), st2.local_tensor())
new_st_size = (*st.size()[:-1], st2.size(-1))
return local_tensor, st.sharding_spec(), new_st_size
_register_sharded_op_on_local_tensor(
torch.Tensor.bmm,
extra_check=sharded_bmm_check,
customized_func=sharded_bmm,
)
_register_sharded_op_on_local_tensor(
torch.bmm,
extra_check=sharded_bmm_check,
customized_func=sharded_bmm,
)
def sharded_layer_norm_check(*args, **kwargs):
"""
Perform extra checks for the ``nn.LayerNorm`` op.
Ensure the normalized shape is compatible with
the size of the sharded tensor.
Args: same as ``torch.nn.LayerNorm``.
Return: None
"""
st = args[0]
normalized_shape = args[1]
if st.dim() < len(normalized_shape):
raise ValueError(
"normalized_shape dim must not be greater than "
"the dim of the sharded tensor."
)
for idx in range(-1, -len(normalized_shape) - 1, -1):
if normalized_shape[idx] != st.size(idx):
raise ValueError(
f"Given normalized_shape=[{normalized_shape[idx]}], expected input with shape "
f"[*, {normalized_shape[idx]}], but got input of size {list(st.size())}."
)
def sharded_layer_norm(args, kwargs, pg):
"""
Handles ``__torch_function__`` dispatch for the ``torch.nn.LayerNorm`` op.
We gather all shards from local shards and perform a global normalization.
We then scatter the result back to each rank.
Args: same as ``torch.nn.LayerNorm``.
Return:
local_tensor (Tensor): New local tensor to build the sharded tensor.
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`):
sharding spec of the new sharded tensor.
new_st_size (torch.Size): Size of the new sharded tensor.
"""
st = args[0]
normalized_shape = args[1]
sharding_dim = st.sharding_spec().dim # type: ignore[attr-defined]
sharding_dim = sharding_dim if sharding_dim >= 0 else st.dim() + sharding_dim
local_tensor = st.local_tensor()
# If sharding dim is smaller than shape start, we just perform a local norm.
shape_start = st.dim() - len(normalized_shape)
if shape_start > sharding_dim:
args = (local_tensor, *args[1:])
local_tensor = torch.nn.functional.layer_norm(*args, **kwargs)
return local_tensor, st.sharding_spec(), st.size()
elementwise_affine = kwargs.get("elementwise_affine", False)
eps = kwargs.get("eps", 1e-05)
norm_dims = tuple(i for i in range(-1, -len(normalized_shape) - 1, -1))
local_size = math.prod(local_tensor.size()[shape_start:]) # type: ignore[attr-defined]
st_size = math.prod(st.size()[shape_start:]) # type: ignore[attr-defined]
local_mean = torch.mul(local_tensor.mean(norm_dims, keepdim=True), local_size)
global_mean = torch.div(all_reduce(local_mean), st_size)
local_variant_sq = torch.square(local_tensor - global_mean).sum(
norm_dims, keepdim=True
)
global_variant = torch.div(all_reduce(local_variant_sq), st_size)
denom = torch.rsqrt(global_variant + eps)
local_tensor = torch.mul(local_tensor - global_mean, denom)
if elementwise_affine:
weight = kwargs["weight"]
bias = kwargs["bias"]
current_rank = dist.get_rank(pg) # type: ignore[attr-defined]
world_size = dist.get_world_size(pg)
(start_pos, chunk_size) = get_chunk_sharding_params(
bias.size(0), world_size, st.sharding_spec(), current_rank
)
local_tensor = torch.addmm(
torch.narrow(bias, 0, start_pos, chunk_size),
local_tensor,
torch.narrow(weight, sharding_dim - shape_start, start_pos, chunk_size),
)
return local_tensor, st.sharding_spec(), st.size()
_register_sharded_op_on_local_tensor(
torch.nn.LayerNorm,
extra_check=sharded_layer_norm_check,
customized_func=sharded_layer_norm,
)
_register_sharded_op_on_local_tensor(
torch.nn.functional.layer_norm,
extra_check=sharded_layer_norm_check,
customized_func=sharded_layer_norm,
)
| pytorch-master | torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/matrix_ops.py |
import torch
from ._common import (
_register_sharded_op_on_local_tensor,
)
def sharded_softmax(args, kwargs, pg):
input = args[0]
dim = kwargs['dim']
sharding_dim = input.sharding_spec().dim
ndims = input.dim()
if dim == sharding_dim or dim + ndims == sharding_dim or sharding_dim + ndims == dim:
exp = torch.exp(input.local_tensor())
exp_sum = exp.sum(dim=dim).unsqueeze(dim=dim)
exp_sum = torch.distributed.nn.functional.all_reduce(exp_sum, group=pg)
smax = torch.div(exp, exp_sum)
else:
smax = torch.nn.functional.softmax(input.local_tensor(), dim=dim)
return smax, input.sharding_spec(), input.size()
_register_sharded_op_on_local_tensor(
torch.nn.functional.softmax,
customized_func=sharded_softmax,
)
| pytorch-master | torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/softmax.py |
from typing import Iterator, Tuple, Union
from .api import ShardedOptimizer
import torch.nn as nn
from torch.distributed._shard.sharded_tensor import (
ShardedTensor
)
def named_params_with_sharded_tensor(
module: nn.Module,
prefix: str = '',
recurse: bool = True,
) -> Iterator[Tuple[str, Union[nn.Parameter, ShardedTensor]]]:
r"""Returns an iterator over module parameters (together with the
ShardedTensor parameters), yielding both the name of the parameter
as well as the parameter itself. This is typically passed to a
:class:torch.distributed._shard.sharded_optim.ShardedOptimizer
Args:
prefix (str): prefix to prepend to all parameter names.
recurse (bool): if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module.
Yields:
(str, Union[Tensor, ShardedTensor]): Tuple containing
the name and parameter (or ShardedTensor parameter)
Example::
>>> # xdoctest: +SKIP
>>> model = torch.nn.Linear(*linear_size)
>>> shard_parameter(model, "weight", spec)
>>> for name, param in named_params_with_sharded_tensor(model):
>>> if name in ['weight']:
>>> print(param.size())
"""
modules = module.named_modules(prefix=prefix) if recurse else [(prefix, module)]
memo = set()
for mod_prefix, mod in modules:
# find all sharded tensor params
for name, val in vars(mod).items():
if isinstance(val, ShardedTensor) and val not in memo:
memo.add(val)
name = mod_prefix + ('.' if mod_prefix else '') + name
yield name, val
# find all nn.Parameters
for name, val in module.named_parameters():
yield name, val
| pytorch-master | torch/distributed/_shard/sharded_optim/__init__.py |
from typing import List, Union, Mapping, Dict, Any
import torch.optim as optim
from torch import Tensor
from torch.distributed._shard.sharded_tensor import ShardedTensor
class ShardedOptimizer(optim.Optimizer):
def __init__(
self,
named_params: Mapping[str, Union[Tensor, ShardedTensor]],
optimizer_class,
*optimizer_args,
**optimizer_kwargs
):
"""
ShardedOptimizer collects all tensors and local shard tensors of
ShardedTensor, then use these tensors as ``params`` for optimizers
Args:
named_params (Dict[str, Union[Tensor, ShardedTensor]]) : a Dict
of parameters, where key is the parameter key, value is either
Tensor or ShardedTensor parameter.
optimizer_class (torch.optim.Optimizer): the Optimizer to use
locally, i.e. torch.optim.SGD, torch.optim.Adagrad, etc.
*optimizer_args: the arguments to initialize the optimizer.
**optimizer_kwargs: the key-word arguments to initialize the optimizer.
"""
tensors: List[Tensor] = []
for value in named_params.values():
if isinstance(value, ShardedTensor):
for local_shard in value.local_shards():
tensors.append(local_shard.tensor)
else:
tensors.append(value)
self.named_params = named_params
self._optim = optimizer_class(tensors, *optimizer_args, **optimizer_kwargs)
self.param_groups = self._optim.param_groups
self.state = self._optim.state
def zero_grad(self, set_to_none: bool = False): # type: ignore[override]
r"""Sets the gradients of all optimized :class:`torch.Tensor` s to zero.
Args:
set_to_none (bool): instead of setting to zero, set the grads to None.
This will in general have lower memory footprint, and can modestly improve performance.
However, it changes certain behaviors. For example:
1. When the user tries to access a gradient and perform manual ops on it,
a None attribute or a Tensor full of 0s will behave differently.
2. If the user requests ``zero_grad(set_to_none=True)`` followed by a backward pass, ``.grad``\ s
are guaranteed to be None for params that did not receive a gradient.
3. ``torch.optim`` optimizers have a different behavior if the gradient is 0 or None
(in one case it does the step with a gradient of 0 and in the other it skips
the step altogether).
"""
self._optim.zero_grad(set_to_none)
def step(self, closure=None):
r"""Performs a single optimization step (parameter update).
Args:
closure (Callable): A closure that reevaluates the model and
returns the loss. Optional for most optimizers.
.. note::
Unless otherwise specified, this function should not modify the
``.grad`` field of the parameters.
"""
self._optim.step(closure)
def state_dict(self) -> Dict[str, Any]:
"""
Returned state and param_groups will contain parameter keys
instead of parameter indices like torch.optim.Optimizer.
This allows for advanced functionality like optimizer re-sharding to be implemented.
"""
# TODO: implement state_dict
raise NotImplementedError("ShardedOptimizer state_dict not implemented yet!")
def load_state_dict(self, state_dict: Mapping[str, Any]):
r"""Loads the ShardedOptimizer state.
Args:
state_dict (dict): ShardedOptimizer state. Should be an object returned
from a call to :meth:`state_dict`.
"""
# TODO: implement load_state_dict
raise NotImplementedError("ShardedOptimizer load_state_dict not implemented yet!")
def add_param_group(self, param_group: Any):
r"""Add a new param group
"""
# TODO: implement add_param_group
raise NotImplementedError("ShardedOptimizer add_param_group not implemented yet!")
| pytorch-master | torch/distributed/_shard/sharded_optim/api.py |
import argparse
import io
import os
import random
import shlex
import subprocess
import time
import numpy as np
import torch
import torch.nn as nn
import torch.distributed as dist
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.multiprocessing as mp
import torch.optim as optim
from torch.distributed.optim import DistributedOptimizer
from torch.distributed.rpc import RRef, TensorPipeRpcBackendOptions
from torch.distributed.rpc.backend_registry import BackendType
from torch.nn.parallel import DistributedDataParallel as DDP
# Config
NUM_TRAINERS = 8
NUM_PS = 8
NUM_EMBEDDINGS = 300
EMBEDDING_DIM = 64
WARMUP_CYCLES = 5
class HybridModel(torch.nn.Module):
r"""
The model consists of a sparse part and a dense part. The dense part is an
nn.Linear module that is replicated across all trainers using
DistributedDataParallel. The sparse part has nn.EmbeddingBags stored on multiple
parameter servers.
The model holds a Remote Reference to the embedding tables on the parameter
servers.
"""
def __init__(self, emb_rref_list, device):
super(HybridModel, self).__init__()
self.emb_rref_list = emb_rref_list
fc1 = torch.nn.Linear(512, 256)
fc2 = torch.nn.Linear(256, 128)
relu = torch.nn.ReLU()
fc3 = torch.nn.Linear(128, 64)
fc4 = torch.nn.Linear(64, 32)
fc5 = torch.nn.Linear(32, 8)
sec = nn.Sequential(fc1, fc2, relu, fc3, fc4, fc5)
self.ddp = DDP(sec.to(device), device_ids=[device])
self.device = device
def forward(self, indices, offsets):
emb_lookups = []
for emb_rref in self.emb_rref_list:
emb_lookups.append(
emb_rref.rpc_sync().forward(
indices, offsets
) # embedding_sum(input, offsets)
)
emb_lookups_cat = torch.cat(emb_lookups, dim=1)
# Make sure combined PS dimension is always bigger or equal than the FC input
assert NUM_PS * EMBEDDING_DIM >= 512
dim_normalizer = int(NUM_PS * EMBEDDING_DIM / 512)
emb_lookups_reshaped = emb_lookups_cat.reshape(
[emb_lookups_cat.shape[0] * dim_normalizer, 512]
)
return self.ddp(emb_lookups_reshaped)
def _retrieve_embedding_parameters(emb_rref):
return [RRef(p) for p in emb_rref.local_value().parameters()]
def _print_header():
_print_cont("\n")
_print_cont("%10s" % "")
for p in [50, 75, 90, 95]:
_print_cont("%14s%10s" % ("sec/epoch", "epoch/sec"))
_print_cont("\n")
def _print_benchmark(prefix, nelem, measurements):
measurements = sorted(measurements)
_print_cont("%8s:" % prefix)
for p in [50, 75, 90, 95]:
v = np.percentile(measurements, p)
_print_cont(" p%02d: %1.3fs %6d/s" % (p, v, nelem / v))
_print_cont("\n")
def _print_cont(msg):
print(msg, end="", flush=True)
def _run_printable(cmd):
proc = subprocess.run(shlex.split(cmd), capture_output=True) # type: ignore[call-overload]
assert proc.returncode == 0
buffer = io.BytesIO()
torch.save(proc.stdout.decode("utf-8"), buffer)
input_tensor = torch.ByteTensor(list(buffer.getvalue()))
input_length = torch.IntTensor([input_tensor.size(0)])
output = []
buffer = io.BytesIO(np.asarray(input_tensor).tobytes())
output.append(torch.load(buffer))
return output
def _run_trainer(emb_rref_list, rank):
r"""
Each trainer runs a forward pass which involves an embedding lookup on the
8 parameter servers and running nn.Linear locally. During the backward pass,
DDP is responsible for aggregating the gradients for the dense part
(nn.Linear) and distributed autograd ensures gradients updates are
propagated to the parameter servers.
"""
# Setup the model.
model = HybridModel(emb_rref_list, rank)
# Retrieve all model parameters as rrefs for DistributedOptimizer.
# Retrieve parameters from all embedding tables for the current trainer.
model_parameter_rrefs = []
for ind, emb_rref in enumerate(emb_rref_list):
ps_name = "ps{}".format(ind)
model_parameter_rrefs.extend(
rpc.rpc_sync(ps_name, _retrieve_embedding_parameters, args=(emb_rref,))
)
# model.parameters() only includes local parameters.
for param in model.parameters():
model_parameter_rrefs.append(RRef(param))
# Setup distributed optimizer
opt = DistributedOptimizer(optim.SGD, model_parameter_rrefs, lr=0.05)
criterion = torch.nn.CrossEntropyLoss()
def get_next_batch(rank):
for _ in range(10):
num_indices = random.randint(20, 50)
indices = torch.LongTensor(num_indices).random_(0, NUM_EMBEDDINGS)
# Generate offsets.
offsets = []
start = 0
batch_size = 0
while start < num_indices:
offsets.append(start)
start += random.randint(1, 10)
batch_size += 1
offsets_tensor = torch.LongTensor(offsets)
target = torch.LongTensor(batch_size).random_(8).cuda(rank)
yield indices, offsets_tensor, target
measurements = []
# Include warm-up cycles during training
for epoch in range(100 + WARMUP_CYCLES):
start = time.time()
batch_size = 0
# create distributed autograd context
for indices, offsets, target in get_next_batch(rank):
batch_size += len(target)
with dist_autograd.context() as context_id:
output = model(indices, offsets)
loss = criterion(output, target)
# Run distributed backward pass
dist_autograd.backward(context_id, [loss])
# Run distributed optimizer. Gradients propagated all the way to the parameter servers
opt.step(context_id)
# Not necessary to zero grads as each iteration creates a different
# distributed autograd context which hosts different grads
measurements.append(time.time() - start)
# print("Training done for epoch {}".format(epoch))
# Throw away warm-up measurements
measurements = measurements[WARMUP_CYCLES:]
return rank, measurements, batch_size
def run_worker(rank, world_size):
r"""
A wrapper function that initializes RPC, calls the function, and shuts down
RPC.
"""
# Using different port numbers in TCP init_method for init_rpc and
# init_process_group to avoid port conflicts.
rpc_backend_options = TensorPipeRpcBackendOptions()
rpc_backend_options.init_method = "tcp://localhost:29500"
# Rank 16. Master
if rank == (NUM_TRAINERS + NUM_PS):
rpc.init_rpc(
"master", rank=rank,
backend=BackendType.TENSORPIPE, # type: ignore[attr-defined]
world_size=world_size
)
# Build the Embedding tables on the Parameter Servers.
emb_rref_list = []
index = 0
while index < NUM_PS:
ps_name = "ps{}".format(index)
emb_rref = rpc.remote(
ps_name,
torch.nn.EmbeddingBag,
args=(NUM_EMBEDDINGS, EMBEDDING_DIM),
kwargs={"mode": "sum"},
)
emb_rref_list.append(emb_rref)
index += 1
# Run training loop on the trainers.
futs = []
for trainer_rank in range(NUM_TRAINERS):
trainer_name = "trainer{}".format(trainer_rank)
fut = rpc.rpc_async(
trainer_name, _run_trainer, args=(emb_rref_list, trainer_rank)
)
futs.append(fut)
_print_header()
measurements_all_trainers = []
batch_size_all_trainers = 0
# Wait for all training to finish.
for fut in futs:
rank, measurements, batch_size = fut.wait()
_print_benchmark("Trainer{}".format(rank), batch_size, measurements)
batch_size_all_trainers += batch_size
measurements_all_trainers.append(measurements)
_print_benchmark("All", batch_size_all_trainers, measurements_all_trainers)
# Rank 0-7. Trainers
elif rank >= 0 and rank < NUM_PS:
# Initialize process group for Distributed DataParallel on trainers.
dist.init_process_group(
backend=dist.Backend.GLOO,
rank=rank,
world_size=NUM_TRAINERS,
init_method="tcp://localhost:29501",
)
# Initialize RPC. Trainer just waits for RPCs from master.
trainer_name = "trainer{}".format(rank)
rpc.init_rpc(
trainer_name,
rank=rank,
world_size=world_size,
rpc_backend_options=rpc_backend_options,
)
# Rank 8-15. Parameter Servers
elif rank >= NUM_TRAINERS and rank < NUM_TRAINERS + NUM_PS:
ps_name = "ps{}".format(rank - NUM_TRAINERS)
rpc.init_rpc(
ps_name,
rank=rank,
world_size=world_size,
backend=BackendType.TENSORPIPE, # type: ignore[attr-defined]
rpc_backend_options=rpc_backend_options,
)
# parameter server do nothing
pass
# block until all rpcs finish
rpc.shutdown()
if __name__ == "__main__":
""" Initializing the distributed environment. """
output = _run_printable("nvidia-smi topo -m")
print("-------------------------------------------")
print(" Info ")
print("-------------------------------------------")
print("")
print("* PyTorch version: {}".format(torch.__version__))
print("* CUDA version: {}".format(torch.version.cuda))
print("")
print("------------ nvidia-smi topo -m -----------")
print("")
print(output[0])
print("-------------------------------------------")
print("PyTorch Distributed Benchmark (DDP and RPC)")
print("-------------------------------------------")
# Cmd arguments to enable automated runs (e.g. Chronos, SSH, etc).
parser = argparse.ArgumentParser(description="PyTorch DDP and RPC Benchmark")
parser.add_argument(
"--master-addr", type=str, default="localhost", help="Address of master node."
)
parser.add_argument("--master-port", type=str, default="29500", help="Master port.")
parser.add_argument(
"--number-trainers",
type=int,
default=NUM_TRAINERS,
help="Number of Trainer Nodes.",
)
parser.add_argument(
"--number-ps", type=int, default=NUM_PS, help="Number of Parameter Servers."
)
parser.add_argument(
"--number-embeddings",
type=int,
default=NUM_EMBEDDINGS,
help="Number of test embeddings to be generated.",
)
parser.add_argument(
"--embedding-dim",
type=int,
default=EMBEDDING_DIM,
help="Number of embedding dimentions.",
)
parser.add_argument(
"--warmup-cycles",
type=int,
default=WARMUP_CYCLES,
help="Number of cycles to warm-up each process before running the benchmark.",
)
args = parser.parse_args()
os.environ["MASTER_ADDR"] = args.master_addr
os.environ["MASTER_PORT"] = args.master_port
NUM_TRAINERS = args.number_trainers
NUM_PS = args.number_ps
NUM_EMBEDDINGS = args.number_embeddings
EMBEDDING_DIM = args.embedding_dim
WARMUP_CYCLES = args.warmup_cycles
# Defaults:
# 8 trainers (rank 0-7),
# 8 parameter servers (rank 8-15),
# 1 master (rank 16).
world_size = NUM_TRAINERS + NUM_PS + 1 # Trainers + PS + Master
mp.spawn(run_worker, args=(world_size,), nprocs=world_size, join=True)
| pytorch-master | torch/distributed/benchmarks/benchmark_ddp_rpc.py |
import functools
def async_execution(fn):
r"""
A decorator for a function indicating that the return value of the function
is guaranteed to be a :class:`~torch.futures.Future` object and this
function can run asynchronously on the RPC callee. More specifically, the
callee extracts the :class:`~torch.futures.Future` returned by the wrapped
function and installs subsequent processing steps as a callback to that
:class:`~torch.futures.Future`. The installed callback will read the value
from the :class:`~torch.futures.Future` when completed and send the
value back as the RPC response. That also means the returned
:class:`~torch.futures.Future` only exists on the callee side and is never
sent through RPC. This decorator is useful when the wrapped function's
(``fn``) execution needs to pause and resume due to, e.g., containing
:meth:`~torch.distributed.rpc.rpc_async` or waiting for other signals.
.. note:: To enable asynchronous execution, applications must pass the
function object returned by this decorator to RPC APIs. If RPC detected
attributes installed by this decorator, it knows that this function
returns a ``Future`` object and will handle that accordingly.
However, this does not mean this decorator has to be outmost one when
defining a function. For example, when combined with ``@staticmethod``
or ``@classmethod``, ``@rpc.functions.async_execution`` needs to be the
inner decorator to allow the target function be recognized as a static
or class function. This target function can still execute asynchronously
because, when accessed, the static or class method preserves attributes
installed by ``@rpc.functions.async_execution``.
Example::
The returned :class:`~torch.futures.Future` object can come from
:meth:`~torch.distributed.rpc.rpc_async`,
:meth:`~torch.futures.Future.then`, or :class:`~torch.futures.Future`
constructor. The example below shows directly using the
:class:`~torch.futures.Future` returned by
:meth:`~torch.futures.Future.then`.
>>> from torch.distributed import rpc
>>>
>>> # omitting setup and shutdown RPC
>>>
>>> # On all workers
>>> @rpc.functions.async_execution
>>> def async_add_chained(to, x, y, z):
>>> # This function runs on "worker1" and returns immediately when
>>> # the callback is installed through the `then(cb)` API. In the
>>> # mean time, the `rpc_async` to "worker2" can run concurrently.
>>> # When the return value of that `rpc_async` arrives at
>>> # "worker1", "worker1" will run the lambda function accordingly
>>> # and set the value for the previously returned `Future`, which
>>> # will then trigger RPC to send the result back to "worker0".
>>> return rpc.rpc_async(to, torch.add, args=(x, y)).then(
>>> lambda fut: fut.wait() + z
>>> )
>>>
>>> # On worker0
>>> # xdoctest: +SKIP
>>> ret = rpc.rpc_sync(
>>> "worker1",
>>> async_add_chained,
>>> args=("worker2", torch.ones(2), 1, 1)
>>> )
>>> print(ret) # prints tensor([3., 3.])
When combined with TorchScript decorators, this decorator must be the
outmost one.
>>> from torch import Tensor
>>> from torch.futures import Future
>>> from torch.distributed import rpc
>>>
>>> # omitting setup and shutdown RPC
>>>
>>> # On all workers
>>> @torch.jit.script
>>> def script_add(x: Tensor, y: Tensor) -> Tensor:
>>> return x + y
>>>
>>> @rpc.functions.async_execution
>>> @torch.jit.script
>>> def async_add(to: str, x: Tensor, y: Tensor) -> Future[Tensor]:
>>> return rpc.rpc_async(to, script_add, (x, y))
>>>
>>> # On worker0
>>> ret = rpc.rpc_sync(
>>> "worker1",
>>> async_add,
>>> args=("worker2", torch.ones(2), 1)
>>> )
>>> print(ret) # prints tensor([2., 2.])
When combined with static or class method, this decorator must be the
inner one.
>>> from torch.distributed import rpc
>>>
>>> # omitting setup and shutdown RPC
>>>
>>> # On all workers
>>> class AsyncExecutionClass:
>>>
>>> @staticmethod
>>> @rpc.functions.async_execution
>>> def static_async_add(to, x, y, z):
>>> return rpc.rpc_async(to, torch.add, args=(x, y)).then(
>>> lambda fut: fut.wait() + z
>>> )
>>>
>>> @classmethod
>>> @rpc.functions.async_execution
>>> def class_async_add(cls, to, x, y, z):
>>> ret_fut = torch.futures.Future()
>>> rpc.rpc_async(to, torch.add, args=(x, y)).then(
>>> lambda fut: ret_fut.set_result(fut.wait() + z)
>>> )
>>> return ret_fut
>>>
>>> @rpc.functions.async_execution
>>> def bound_async_add(self, to, x, y, z):
>>> return rpc.rpc_async(to, torch.add, args=(x, y)).then(
>>> lambda fut: fut.wait() + z
>>> )
>>>
>>> # On worker0
>>> ret = rpc.rpc_sync(
>>> "worker1",
>>> AsyncExecutionClass.static_async_add,
>>> args=("worker2", torch.ones(2), 1, 2)
>>> )
>>> print(ret) # prints tensor([4., 4.])
>>>
>>> ret = rpc.rpc_sync(
>>> "worker1",
>>> AsyncExecutionClass.class_async_add,
>>> args=("worker2", torch.ones(2), 1, 2)
>>> )
>>> print(ret) # prints tensor([4., 4.])
This decorator also works with RRef helpers, i.e., .
:meth:`torch.distributed.rpc.RRef.rpc_sync`,
:meth:`torch.distributed.rpc.RRef.rpc_async`, and
:meth:`torch.distributed.rpc.RRef.remote`.
>>> from torch.distributed import rpc
>>>
>>> # reuse the AsyncExecutionClass class above
>>> rref = rpc.remote("worker1", AsyncExecutionClass)
>>> ret = rref.rpc_sync().static_async_add("worker2", torch.ones(2), 1, 2)
>>> print(ret) # prints tensor([4., 4.])
>>>
>>> rref = rpc.remote("worker1", AsyncExecutionClass)
>>> ret = rref.rpc_async().static_async_add("worker2", torch.ones(2), 1, 2).wait()
>>> print(ret) # prints tensor([4., 4.])
>>>
>>> rref = rpc.remote("worker1", AsyncExecutionClass)
>>> ret = rref.remote().static_async_add("worker2", torch.ones(2), 1, 2).to_here()
>>> print(ret) # prints tensor([4., 4.])
"""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return fn(*args, **kwargs)
# Can't declare and use attributes of function objects (mypy#2087)
wrapper._wrapped_async_rpc_function = fn # type: ignore[attr-defined]
return wrapper
| pytorch-master | torch/distributed/rpc/functions.py |
from typing import Dict, List, Optional, Union
import torch
from torch._C._distributed_rpc import _TensorPipeRpcBackendOptionsBase
from . import constants as rpc_contants
DeviceType = Union[int, str, torch.device]
def _to_device(device: DeviceType) -> torch.device:
device = torch.device(device)
if device.type != "cuda":
raise ValueError(
"`set_devices` expect a list of CUDA devices, but got "
f"device type {device.type}."
)
return device
def _to_device_map(
device_map: Dict[DeviceType, DeviceType]
) -> Dict[torch.device, torch.device]:
full_device_map: Dict[torch.device, torch.device] = {}
reverse_map: Dict[torch.device, torch.device] = {}
for k, v in device_map.items():
k, v = torch.device(k), torch.device(v)
if v in reverse_map:
raise ValueError(
"`device_map` only supports 1-to-1 mapping, "
f"trying to map {k} and {reverse_map[v]} to {v}"
)
full_device_map[k] = v
reverse_map[v] = k
return full_device_map
def _to_device_list(devices: List[DeviceType]) -> List[torch.device]:
return list(map(_to_device, devices))
class TensorPipeRpcBackendOptions(_TensorPipeRpcBackendOptionsBase):
r"""
The backend options for
:class:`~torch.distributed.rpc.TensorPipeAgent`, derived from
:class:`~torch.distributed.rpc.RpcBackendOptions`.
Args:
num_worker_threads (int, optional): The number of threads in the
thread-pool used by
:class:`~torch.distributed.rpc.TensorPipeAgent` to execute
requests (default: 16).
rpc_timeout (float, optional): The default timeout, in seconds,
for RPC requests (default: 60 seconds). If the RPC has not
completed in this timeframe, an exception indicating so will
be raised. Callers can override this timeout for individual
RPCs in :meth:`~torch.distributed.rpc.rpc_sync` and
:meth:`~torch.distributed.rpc.rpc_async` if necessary.
init_method (str, optional): The URL to initialize the distributed
store used for rendezvous. It takes any value accepted for the
same argument of :meth:`~torch.distributed.init_process_group`
(default: ``env://``).
device_maps (Dict[str, Dict], optional): Device placement mappings from
this worker to the callee. Key is the callee worker name and value
the dictionary (``Dict`` of ``int``, ``str``, or ``torch.device``)
that maps this worker's devices to the callee worker's devices.
(default: ``None``)
devices (List[int, str, or ``torch.device``], optional): all local
CUDA devices used by RPC agent. By Default, it will be initialized
to all local devices from its own ``device_maps`` and corresponding
devices from its peers' ``device_maps``. When processing CUDA RPC
requests, the agent will properly synchronize CUDA streams for
all devices in this ``List``.
"""
def __init__(
self,
*,
num_worker_threads: int = rpc_contants.DEFAULT_NUM_WORKER_THREADS,
rpc_timeout: float = rpc_contants.DEFAULT_RPC_TIMEOUT_SEC,
init_method: str = rpc_contants.DEFAULT_INIT_METHOD,
device_maps: Optional[Dict[str, Dict[DeviceType, DeviceType]]] = None,
devices: Optional[List[DeviceType]] = None,
_transports: Optional[List] = None,
_channels: Optional[List] = None,
):
full_device_maps = (
{}
if device_maps is None
else {k: _to_device_map(v) for k, v in device_maps.items()}
)
full_device_list = [] if devices is None else _to_device_list(devices)
super().__init__(
num_worker_threads,
_transports,
_channels,
rpc_timeout,
init_method,
full_device_maps,
full_device_list,
)
def set_device_map(self, to: str, device_map: Dict[DeviceType, DeviceType]):
r"""
Set device mapping between each RPC caller and callee pair. This
function can be called multiple times to incrementally add
device placement configurations.
Args:
worker_name (str): Callee name.
device_map (Dict of int, str, or torch.device): Device placement
mappings from this worker to the callee. This map must be
invertible.
Example::
>>> # both workers
>>> def add(x, y):
>>> print(x) # tensor([1., 1.], device='cuda:1')
>>> return x + y, (x + y).to(2)
>>>
>>> # on worker 0
>>> options = TensorPipeRpcBackendOptions(
>>> num_worker_threads=8,
>>> device_maps={"worker1": {0: 1}}
>>> # maps worker0's cuda:0 to worker1's cuda:1
>>> )
>>> options.set_device_map("worker1", {1: 2})
>>> # maps worker0's cuda:1 to worker1's cuda:2
>>>
>>> # xdoctest: +SKIP
>>> rpc.init_rpc(
>>> "worker0",
>>> rank=0,
>>> world_size=2,
>>> backend=rpc.BackendType.TENSORPIPE,
>>> rpc_backend_options=options
>>> )
>>>
>>> x = torch.ones(2)
>>> rets = rpc.rpc_sync("worker1", add, args=(x.to(0), 1))
>>> # The first argument will be moved to cuda:1 on worker1. When
>>> # sending the return value back, it will follow the invert of
>>> # the device map, and hence will be moved back to cuda:0 and
>>> # cuda:1 on worker0
>>> print(rets[0]) # tensor([2., 2.], device='cuda:0')
>>> print(rets[1]) # tensor([2., 2.], device='cuda:1')
"""
full_device_map = _to_device_map(device_map)
curr_device_maps = super().device_maps
if to in curr_device_maps:
for k, v in full_device_map.items():
if k in curr_device_maps[to] and v != curr_device_maps[to][k]:
raise ValueError(
"`set_device_map` only supports 1-to-1 mapping, trying"
f" to map {k} to {v} and {curr_device_maps[to][k]}"
)
super()._set_device_map(to, full_device_map)
def set_devices(self, devices: List[DeviceType]):
r"""
Set local devices used by the TensorPipe RPC agent. When processing
CUDA RPC requests, the TensorPipe RPC agent will properly synchronize
CUDA streams for all devices in this ``List``.
Args:
devices (List of int, str, or torch.device): local devices used by
the TensorPipe RPC agent.
"""
self.devices = _to_device_list(devices)
| pytorch-master | torch/distributed/rpc/options.py |
import collections
import copyreg
import io
import pickle
import sys
import threading
import traceback
from enum import Enum
import torch
import torch.distributed as dist
from torch._C._distributed_rpc import _get_current_rpc_agent
# Thread local tensor tables to store tensors while pickling torch.Tensor
# objects
_thread_local_tensor_tables = threading.local()
_pickler = pickle.Pickler
_unpickler = pickle.Unpickler
class RPCExecMode(Enum):
SYNC = "sync"
ASYNC = "async"
ASYNC_JIT = "async_jit"
REMOTE = "remote"
class _InternalRPCPickler:
r"""
This class provides serialize() and deserialize() interfaces to serialize
data to be "binary string + tensor table" format
So for RPC python UDF function and args, non tensor data will be serialized
into regular binary string, tensor data will be put into thread local tensor
tables, this serialization format is consistent with builtin operator and args
using JIT pickler. This format will make tensor handling in C++ much easier,
e.g. attach tensor to distributed autograd graph in C++
"""
def __init__(self):
# Ignore type error because dispatch_table is defined in third-party package
self._dispatch_table = copyreg.dispatch_table.copy() # type: ignore[attr-defined]
self._dispatch_table[torch.Tensor] = self._tensor_reducer
# Used for registering customized picklers.
self._class_reducer_dict = {}
def _register_reducer(self, obj_class, reducer):
# For the same class, only register the reducer once.
if obj_class not in self._class_reducer_dict:
self._class_reducer_dict[obj_class] = reducer
@classmethod
def _tensor_receiver(cls, tensor_index):
global _thread_local_tensor_tables
return _thread_local_tensor_tables.recv_tables[tensor_index]
def _tensor_reducer(self, tensor):
global _thread_local_tensor_tables
_thread_local_tensor_tables.send_tables.append(tensor)
tensor_index = len(_thread_local_tensor_tables.send_tables) - 1
return (_InternalRPCPickler._tensor_receiver, (tensor_index,))
@classmethod
def _py_rref_receiver(cls, rref_fork_data):
return dist.rpc.PyRRef._deserialize(rref_fork_data)
def _py_rref_reducer(self, py_rref):
rref_fork_data = py_rref._serialize()
return (_InternalRPCPickler._py_rref_receiver, (rref_fork_data,))
def _rref_reducer(self, rref):
return self._py_rref_reducer(rref)
@classmethod
def _script_module_receiver(cls, script_module_serialized):
"""
Given a serialized representation of a ScriptModule created with torch.jit.save,
loads and returns the ScriptModule.
"""
f = io.BytesIO(script_module_serialized)
m = torch.jit.load(f)
return m
def _script_module_reducer(self, script_module):
"""
Serializes a ScriptModule.
"""
f = io.BytesIO()
torch.jit.save(script_module, f)
return (_InternalRPCPickler._script_module_receiver, (f.getvalue(),))
def serialize(self, obj):
r"""
Serialize non tensor data into binary string, tensor data into
tensor table
"""
f = io.BytesIO()
p = _pickler(f)
p.dispatch_table = self._dispatch_table
# rpc api could accept user picklers inheriting from _InternalRPCPickler to serialize rref,
# user picklers could have different initialization function from _InternalRPCPickler,
# but all the user picklers should call serialize() and use _rref_reducer to pickle rref
# in python. also, when _internal_rpc_pickler is imported to rpc/api.py, rpc.RRef is not
# compiled yet, it is not good place to acces rpc.RRef inside _InternalRPCPickler constructor,
# so puting rref's dispatch table here
#
# The return value of a `rpc.remote(..)` call is type of `rpc.PyRRef`.
# The deserialized RRef object on an RPC receiver side is type of `rpc.PyRRef`.
# Ignore type error because dispatch_table is defined in third-party package
p.dispatch_table[dist.rpc.PyRRef] = self._py_rref_reducer # type: ignore[index]
# An RRef created locally by RRef Python constructor is type of `rpc.RRef`.
# Ignore type error because dispatch_table is defined in third-party package
p.dispatch_table[dist.rpc.RRef] = self._rref_reducer # type: ignore[index]
# Add dispatch pickling for ScriptModule or its subclass.
if isinstance(obj, torch.jit.ScriptModule):
# Ignore type error because dispatch_table is defined in third-party package
p.dispatch_table[obj.__class__] = self._script_module_reducer # type: ignore[index]
# Install customized picklers.
for class_name in self._class_reducer_dict.keys():
p.dispatch_table[class_name] = self._class_reducer_dict[class_name] # type: ignore[index]
# save _thread_local_tensor_tables.send_tables if it is in nested call
global _thread_local_tensor_tables
if hasattr(_thread_local_tensor_tables, "send_tables"):
old_send_tables = _thread_local_tensor_tables.send_tables
else:
old_send_tables = None
_thread_local_tensor_tables.send_tables = []
p.dump(obj)
# restore _thread_local_tensor_tables.send_tables if return
# from nested call, otherwise clean up the table
tensors = _thread_local_tensor_tables.send_tables
if old_send_tables is not None:
_thread_local_tensor_tables.send_tables = old_send_tables
else:
del _thread_local_tensor_tables.send_tables
return (f.getvalue(), tensors)
def deserialize(self, binary_data, tensor_table):
r"""
Deserialize binary string + tensor table to original obj
"""
# save _thread_local_tensor_tables.recv_tables if it is in nested call
global _thread_local_tensor_tables
if hasattr(_thread_local_tensor_tables, "recv_tables"):
old_recv_tables = _thread_local_tensor_tables.recv_tables
else:
old_recv_tables = None
_thread_local_tensor_tables.recv_tables = tensor_table
try:
unpickler = _unpickler(io.BytesIO(binary_data))
ret = unpickler.load()
except AttributeError as e:
# Occurs when function is not found on module/class during
# unpickling.
except_str = (
str(e)
+ """ Default RPC pickler does not serialize
function code. Ensure that UDFs are defined on both caller and
callee modules."""
)
ret = AttributeError(except_str)
# Ensure the stack trace gets preserved
ret.__cause__ = e
# restore _thread_local_tensor_tables.recv_tables if return
# from nested call, otherwise clean up the table
if old_recv_tables is not None:
_thread_local_tensor_tables.recv_tables = old_recv_tables
else:
del _thread_local_tensor_tables.recv_tables
return ret
# Create _internal_rpc_pickler only once to initialize _dispatch_table only once
_internal_rpc_pickler = _InternalRPCPickler()
def serialize(obj):
return _internal_rpc_pickler.serialize(obj)
def deserialize(binary_data, tensor_table):
return _internal_rpc_pickler.deserialize(binary_data, tensor_table)
def _run_function(python_udf):
r"""
This function is exclusively called from C++.
See ``torch/csrc/distributed/rpc/python_rpc_handler.cpp``.
Runs a Python UDF and returns its return value.
Wraps any exception in ``RemoteException`` if the function raises.
"""
try:
if isinstance(python_udf, AttributeError):
raise python_udf
result = python_udf.func(*python_udf.args, **python_udf.kwargs)
except Exception as e:
# except str = exception info + traceback string
except_str = (
f"On {_get_current_rpc_agent().get_worker_info()}:\n"
f"{repr(e)}\n{traceback.format_exc()}"
)
print(except_str, file=sys.stderr)
result = RemoteException(except_str, type(e))
return result
def _handle_exception(result):
if isinstance(result, RemoteException):
raise result.exception_type(result.msg.encode("utf-8").decode("unicode_escape"))
def _build_rpc_profiling_key(
exec_type, func_name, current_worker_name, dst_worker_name
):
"""
Builds the key that RPC calls are profiled with using the autograd profiler.
This will be the name of the corresponding Event recorded in the profiler.
Args:
exec_type (RPCExecMode): Type of RPC/RRef call
func_name (str): Name of function being profiled.
current_worker_name (str): Name of current worker.
dst_worker_name (str): Name of the destination worker.
Returns:
String representing profiling key
"""
profile_key = "rpc_{rpc_type}#{func_name}({current_worker} -> {dst_worker})".format(
rpc_type=exec_type.value,
func_name=func_name,
current_worker=current_worker_name,
dst_worker=dst_worker_name,
)
return profile_key
def _start_record_function(exec_type, func_name, current_worker_name, dest_worker_name):
"""
This function should be called from RPC/RRef functions to create a
RecordFunction object for profiling. This function also runs the before
callbacks that start the profiling, though the user is responsible for
running the appropriate callbacks when the function to be profiled finishes.
Args:
exec_type (RPCExecMode): Type of RPC/RRef call
func_name (str): Name of function being profiled.
current_worker_name (str): Name of current worker.
dest_worker_name (str): Name of the destination worker.
Returns:
An instance of `torch.autograd._RecordFunction`.
"""
assert torch.autograd._profiler_enabled(), "Autograd profiler should be enabled."
profile_key = "rpc_{}#{}({} -> {})".format(
exec_type.value, str(func_name), current_worker_name, dest_worker_name
)
rf = torch.autograd._RecordFunction() # type: ignore[attr-defined]
torch.autograd._run_before_callbacks(rf, profile_key) # type: ignore[attr-defined]
return rf
PythonUDF = collections.namedtuple("PythonUDF", ["func", "args", "kwargs"])
RemoteException = collections.namedtuple("RemoteException", ["msg", "exception_type"])
| pytorch-master | torch/distributed/rpc/internal.py |
from datetime import timedelta
from torch._C._distributed_rpc import (
_DEFAULT_INIT_METHOD,
_DEFAULT_NUM_WORKER_THREADS,
_DEFAULT_RPC_TIMEOUT_SEC,
_UNSET_RPC_TIMEOUT,
)
# For any RpcAgent.
DEFAULT_RPC_TIMEOUT_SEC: float = _DEFAULT_RPC_TIMEOUT_SEC
DEFAULT_INIT_METHOD: str = _DEFAULT_INIT_METHOD
DEFAULT_SHUTDOWN_TIMEOUT: float = 0
# For TensorPipeAgent.
DEFAULT_NUM_WORKER_THREADS: int = _DEFAULT_NUM_WORKER_THREADS
# Ensure that we don't time out when there are long periods of time without
# any operations against the underlying ProcessGroup.
DEFAULT_PROCESS_GROUP_TIMEOUT: timedelta = timedelta(milliseconds=2 ** 31 - 1)
# Value indicating that timeout is not set for RPC call, and the default should be used.
UNSET_RPC_TIMEOUT: float = _UNSET_RPC_TIMEOUT
| pytorch-master | torch/distributed/rpc/constants.py |
from datetime import timedelta
import logging
import os
import threading
import warnings
from typing import Generator, Tuple
from urllib.parse import urlparse
import torch
import torch.distributed as dist
logger = logging.getLogger(__name__)
_init_counter = 0
_init_counter_lock = threading.Lock()
def is_available():
return hasattr(torch._C, "_rpc_init")
if is_available() and not torch._C._rpc_init():
raise RuntimeError("Failed to initialize torch.distributed.rpc")
if is_available():
from torch._C._distributed_c10d import Store
from torch._C._distributed_rpc import (
_disable_jit_rref_pickle,
_enable_jit_rref_pickle,
_disable_server_process_global_profiler,
_enable_server_process_global_profiler,
_set_and_start_rpc_agent,
_reset_current_rpc_agent,
_delete_all_user_and_unforked_owner_rrefs,
_destroy_rref_context,
_set_profiler_node_id,
_is_current_rpc_agent_set,
_rref_context_get_debug_info,
_cleanup_python_rpc_handler,
_invoke_rpc_builtin,
_invoke_rpc_python_udf,
_invoke_rpc_torchscript,
_invoke_remote_builtin,
_invoke_remote_python_udf,
_invoke_remote_torchscript,
_set_rpc_timeout,
_get_current_rpc_agent,
get_rpc_timeout,
enable_gil_profiling,
RpcBackendOptions,
_TensorPipeRpcBackendOptionsBase,
RpcAgent,
PyRRef,
TensorPipeAgent,
RemoteProfilerManager,
WorkerInfo,
_DEFAULT_INIT_METHOD,
_DEFAULT_NUM_WORKER_THREADS,
_UNSET_RPC_TIMEOUT,
_DEFAULT_RPC_TIMEOUT_SEC,
) # noqa: F401
from . import api, backend_registry, functions
from .api import * # noqa: F401,F403
import numbers
import torch.distributed.autograd as dist_autograd
from .backend_registry import BackendType
from .options import TensorPipeRpcBackendOptions # noqa: F401
from .server_process_global_profiler import (
_server_process_global_profile,
)
rendezvous_iterator: Generator[Tuple[Store, int, int], None, None]
def init_rpc(
name,
backend=None,
rank=-1,
world_size=None,
rpc_backend_options=None,
):
r"""
Initializes RPC primitives such as the local RPC agent
and distributed autograd, which immediately makes the current
process ready to send and receive RPCs.
Args:
name (str): a globally unique name of this node. (e.g.,
``Trainer3``, ``ParameterServer2``, ``Master``, ``Worker1``)
Name can only contain number, alphabet, underscore, colon,
and/or dash, and must be shorter than 128 characters.
backend (BackendType, optional): The type of RPC backend
implementation. Supported values is
``BackendType.TENSORPIPE`` (the default).
See :ref:`rpc-backends` for more information.
rank (int): a globally unique id/rank of this node.
world_size (int): The number of workers in the group.
rpc_backend_options (RpcBackendOptions, optional): The options
passed to the RpcAgent constructor. It must be an agent-specific
subclass of :class:`~torch.distributed.rpc.RpcBackendOptions`
and contains agent-specific initialization configurations. By
default, for all agents, it sets the default timeout to 60
seconds and performs the rendezvous with an underlying process
group initialized using ``init_method = "env://"``,
meaning that environment variables ``MASTER_ADDR`` and
``MASTER_PORT`` need to be set properly. See
:ref:`rpc-backends` for more information and find which options
are available.
"""
torch._C._log_api_usage_once("torch.distributed.init_rpc")
if backend is not None and not isinstance(
backend, backend_registry.BackendType
):
raise TypeError("Argument backend must be a member of BackendType")
if rpc_backend_options is not None and not isinstance(
rpc_backend_options, RpcBackendOptions
):
raise TypeError(
"Argument rpc_backend_options must be an instance of RpcBackendOptions"
)
# Try to detect the backend from the options
if backend is None and rpc_backend_options is not None:
for candidate_backend in BackendType:
if isinstance(
rpc_backend_options,
type(
backend_registry.construct_rpc_backend_options(
candidate_backend
)
),
):
backend = candidate_backend
break
else:
raise TypeError(
f"Could not infer backend for options {rpc_backend_options}"
)
# Ignore type error because mypy doesn't handle dynamically generated type objects (#4865)
if backend != BackendType.TENSORPIPE: # type: ignore[attr-defined]
logger.warning(
f"RPC was initialized with no explicit backend but with options " # type: ignore[attr-defined]
f"corresponding to {backend}, hence that backend will be used "
f"instead of the default {BackendType.TENSORPIPE}. To silence this "
f"warning pass `backend={backend}` explicitly."
)
if backend is None:
backend = BackendType.TENSORPIPE # type: ignore[attr-defined]
if rpc_backend_options is None:
# default construct a set of RPC backend options.
rpc_backend_options = backend_registry.construct_rpc_backend_options(
backend
)
# Create store, performs rendezvous for static RPC group.
if not world_size:
# If world_size is not set in construction and also not set in environment variables
# The store will be created for the dynamic group setting
store = dist._create_store_from_options(rpc_backend_options, rank)
else:
# This rendezvous state sometimes is destroyed before all processes
# finishing handshaking. To avoid that issue, we make it global to
# keep it alive.
global rendezvous_iterator
rendezvous_iterator = dist.rendezvous(
rpc_backend_options.init_method, rank=rank, world_size=world_size
)
store, _, _ = next(rendezvous_iterator)
# Use same timeout as RPC.
store.set_timeout(timedelta(seconds=rpc_backend_options.rpc_timeout))
# Use a PrefixStore to distinguish multiple invocations.
with _init_counter_lock:
global _init_counter
store = dist.PrefixStore(str("rpc_prefix_{}".format(_init_counter)), store)
_init_counter += 1
# Initialize autograd before RPC since _init_rpc_backend guarantees all
# processes sync via the store. If we initialize autograd after RPC,
# there could be a race where some nodes might have initialized autograd
# and others might not have. As a result, a node calling
# torch.distributed.autograd.backward() would run into errors since
# other nodes might not have been initialized.
dist_autograd._init(rank)
_set_profiler_node_id(rank)
# Initialize RPC.
_init_rpc_backend(backend, store, name, rank, world_size, rpc_backend_options)
def _validate_rpc_args(backend, store, name, rank, world_size, rpc_backend_options):
type_mapping = {
backend: backend_registry.BackendType,
store: dist.Store,
name: str,
rank: numbers.Integral,
# world_size can be None for a dynamic group
world_size: (numbers.Integral, type(None)),
rpc_backend_options: RpcBackendOptions,
}
for arg, arg_type in type_mapping.items():
if not isinstance(arg, arg_type): # type: ignore[arg-type]
raise RuntimeError(
"Argument {} must be of type {} but got type {}".format(
arg, arg_type, type(arg)
)
)
def _init_rpc_backend(
backend=BackendType.TENSORPIPE, # type: ignore[attr-defined]
store=None,
name=None,
rank=-1,
world_size=None,
rpc_backend_options=None,
):
_validate_rpc_args(backend, store, name, rank, world_size, rpc_backend_options)
if _is_current_rpc_agent_set():
raise RuntimeError("RPC is already initialized")
# Initialize RPC.
rpc_agent = backend_registry.init_backend(
backend,
store=store,
name=name,
rank=rank,
world_size=world_size,
rpc_backend_options=rpc_backend_options,
)
api._init_rpc_states(rpc_agent)
@api._require_initialized
def _get_debug_info():
info = _rref_context_get_debug_info()
info.update(api._get_current_rpc_agent().get_debug_info())
info.update(dist_autograd._get_debug_info())
return info
| pytorch-master | torch/distributed/rpc/__init__.py |
__all__ = ["shutdown", "get_worker_info", "remote", "rpc_sync",
"rpc_async", "RRef", "AllGatherStates", "method_factory", "new_method"]
import collections
import contextlib
import functools
import inspect
import logging
import threading
from typing import Dict, Generic, TypeVar, Set, Any
import torch
from torch.futures import Future
from torch._C._distributed_rpc import (
PyRRef,
RemoteProfilerManager,
WorkerInfo,
TensorPipeAgent,
get_rpc_timeout,
_cleanup_python_rpc_handler,
_delete_all_user_and_unforked_owner_rrefs,
_destroy_rref_context,
_get_current_rpc_agent,
_invoke_remote_builtin,
_invoke_remote_python_udf,
_invoke_remote_torchscript,
_invoke_rpc_builtin,
_invoke_rpc_python_udf,
_invoke_rpc_torchscript,
_is_current_rpc_agent_set,
_reset_current_rpc_agent,
_set_and_start_rpc_agent,
)
from .internal import (
PythonUDF,
RPCExecMode,
_internal_rpc_pickler,
_build_rpc_profiling_key,
)
from .constants import DEFAULT_SHUTDOWN_TIMEOUT, UNSET_RPC_TIMEOUT
from ._utils import _group_membership_management, _update_group_membership
logger = logging.getLogger(__name__)
# NB: Ignoring RRef leaks during shutdown. Without this, applications have to
# make sure there is no references to any RRef in the application code and
# Python GC has done its job to delete those RRefs. This is could result in bad
# debugging experiences especially when for large applications. Therefore, by
# default, we are going to ignore RRef leaks during shutdown. This is usually
# fine as shutdown means applications have done training and no longer care
# about states.
#
# To enable RRef leak checking, set this _ignore_rref_leak to False
_ignore_rref_leak = True
_default_pickler = _internal_rpc_pickler
@contextlib.contextmanager
def _use_rpc_pickler(rpc_pickler):
r"""
rpc_pickler: (.internal._InternalRPCPickler) Overrides the default RPC pickler
"""
global _default_pickler
_default_pickler = rpc_pickler
try:
yield
finally:
_default_pickler = _internal_rpc_pickler
def _require_initialized(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not _is_current_rpc_agent_set():
raise RuntimeError(
"RPC has not been initialized. Call "
"torch.distributed.rpc.init_rpc first."
)
return func(*args, **kwargs)
return wrapper
class AllGatherStates(object):
def __init__(self):
# Each `gathered_objects` is an empty dict at beginning.
# The leader worker is elected as the first worker in a sorted worker
# name list. Whenever there is a worker entering `_all_gather()`, it
# runs `_gather_to_leader()` on the leader to add its own name and
# data obj to this dict. The leader also adds itself's name to the dict
# on calling `_all_gather()`.
# Once `set(gathered_objects.keys()) == _ALL_WORKER_NAMES`, the leader
# will broadcast the gathered dict to all follower workers and set their
# `gathered_objects` field and the `proceed_signal` field.
self.gathered_objects = {}
# All workers wait on this signal until it receives all gathered
# objects.
self.proceed_signal = threading.Event()
# States used by `def _all_gather()`.
# `_ALL_WORKER_NAMES` is initialized on initiaizing RPC layer.
_ALL_WORKER_NAMES: Set[Any] = set()
_all_gather_dict_lock = threading.RLock()
_all_gather_sequence_id: Dict[str, int] = {}
_all_gather_sequence_id_to_states: collections.defaultdict = collections.defaultdict(AllGatherStates)
def _init_rpc_states(agent):
worker_infos = agent.get_worker_infos()
global _ALL_WORKER_NAMES
_ALL_WORKER_NAMES = {worker_info.name for worker_info in worker_infos}
# NB: backend implementation might have already set the rpc_agent.
if not _is_current_rpc_agent_set():
_set_and_start_rpc_agent(agent)
def _gather_to_leader(sequence_id, worker_name, obj, worker_names=None):
with _all_gather_dict_lock:
if not worker_names:
worker_names = _ALL_WORKER_NAMES
assert (
worker_name in worker_names
), f"{worker_name} is not expected by leader."
states = _all_gather_sequence_id_to_states[sequence_id]
assert (
worker_name not in states.gathered_objects
), f"{worker_name} reported intent sequence id {sequence_id} twice. "
states.gathered_objects[worker_name] = obj
if worker_names == set(states.gathered_objects.keys()):
states.proceed_signal.set()
def _broadcast_to_followers(sequence_id, objects_map):
with _all_gather_dict_lock:
states = _all_gather_sequence_id_to_states[sequence_id]
assert (
not states.proceed_signal.is_set()
), "Termination signal sequence id {} got set twice.".format(sequence_id)
states.gathered_objects = objects_map
states.proceed_signal.set()
_thread_local_var = threading.local()
@contextlib.contextmanager
def _wait_all():
r"""
A context manager that collects all futures returned by ``rpc_async`` and
waits them on the context manager's exit; relieving the user of needing
to explicitly call wait.
Example::
>>> # On worker 0:
>>> import torch
>>> import torch.distributed.rpc as rpc
>>> # xdoctest: +SKIP
>>> rpc.init_rpc("worker0", rank=0, world_size=2)
>>> with rpc._wait_all():
>>> fut_1 = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
>>> fut_2 = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1))
>>> #fut_1 and fut_2 are waited on
"""
_thread_local_var.future_list = []
try:
yield
finally:
try:
torch.futures.wait_all(_thread_local_var.future_list)
finally:
del _thread_local_var.future_list
@_require_initialized
def _all_gather(obj, worker_names=None, timeout=UNSET_RPC_TIMEOUT):
r"""
This is similar to torch.distributed.all_gather(), but is using RPC. It
picks the worker with the smallest name (alphabetic order) as the leader.
Then all followers send their data ``obj`` to the leader. After the leader
has received all, it will broadcast the results back to all followers. This
function blocks until all workers have received the gathered results.
"""
if not worker_names:
assert (
_ALL_WORKER_NAMES is not None
), "`_ALL_WORKER_NAMES` is not initialized for `def _all_gather`."
worker_names = _ALL_WORKER_NAMES
leader_name = sorted(worker_names)[0]
self_name = _get_current_rpc_agent().get_worker_info().name
with _all_gather_dict_lock:
concat_names = "".join(sorted(worker_names))
sequence_num = _all_gather_sequence_id.get(concat_names, 0)
_all_gather_sequence_id[concat_names] = sequence_num + 1
sequence_id = concat_names + str(sequence_num)
is_leader = leader_name == self_name
if timeout == UNSET_RPC_TIMEOUT:
# Timeout is specified by agent for RPC calls
rpc_timeout = get_rpc_timeout()
# No timeout for signal
signal_timeout = None
elif timeout == DEFAULT_SHUTDOWN_TIMEOUT:
# No timeout for RPC
rpc_timeout = timeout
# No timeout for signal
signal_timeout = None
else:
# Signal and RPC timeout use the same timeout
signal_timeout = rpc_timeout = timeout
# Phase 1: Followers send it's object to the leader
if is_leader:
_gather_to_leader(sequence_id, self_name, obj, worker_names)
else:
rpc_sync(
leader_name,
_gather_to_leader,
args=(sequence_id, self_name, obj, worker_names),
timeout=rpc_timeout,
)
with _all_gather_dict_lock:
states = _all_gather_sequence_id_to_states[sequence_id]
# Timeout is either set by function parameter or None (which is indefinite)
states.proceed_signal.wait(timeout=signal_timeout)
# Phase 2: Leader broadcast gathered results to all followers
# Leader's signal is the first to be unblocked, after receiving all
# followers' data objects.
if is_leader:
worker_name_to_response_future_dict = dict()
for follower_name in worker_names - {leader_name}:
fut = rpc_async(
follower_name,
_broadcast_to_followers,
args=(sequence_id, states.gathered_objects),
timeout=rpc_timeout
)
worker_name_to_response_future_dict[follower_name] = fut
errors = []
for follower_name, fut in worker_name_to_response_future_dict.items():
try:
fut.wait()
except RuntimeError as ex:
errors.append((follower_name, ex))
if errors:
raise RuntimeError(
f"Followers {[e[0] for e in errors]} timed out in _all_gather "
f"after {rpc_timeout:.2f} seconds. The first exception is {errors[0][1]}"
)
# Clean up for the states using the sequence_id
with _all_gather_dict_lock:
states = _all_gather_sequence_id_to_states.pop(sequence_id)
return states.gathered_objects
@_require_initialized
def _barrier(worker_names):
r"""
Synchronizes local and remote RPC processes.
This will block until all local and remote RPC processes specified under worker_names
reach this method to wait for all outstanding work to complete.
Args:
worker_names (List[str]): The set of workers to synchronize.
"""
try:
_all_gather(None, set(worker_names))
except RuntimeError as ex:
logger.error(
f"Failed to complete barrier, got error {ex}"
)
@_require_initialized
def _wait_all_workers(timeout=DEFAULT_SHUTDOWN_TIMEOUT):
r"""
Block until all local and remote RPC processes reach this method and wait
for all outstanding work to complete. Every RPC process must call this
method before exit to perform a graceful shutdown. This should be used to
terminate the RPC framework, and there is no guarantee that the RPC
framework will work after this method returns.
"""
try:
_all_gather(None, timeout=timeout)
except RuntimeError as ex:
logger.error(
f"Failed to respond to 'Shutdown Proceed' in time, got error {ex}"
)
raise ex
@_require_initialized
def shutdown(graceful=True, timeout=DEFAULT_SHUTDOWN_TIMEOUT):
r"""
Perform a shutdown of the RPC agent, and then destroy the RPC agent. This
stops the local agent from accepting outstanding requests, and shuts
down the RPC framework by terminating all RPC threads. If ``graceful=True``,
this will block until all local and remote RPC processes reach this method
and wait for all outstanding work to complete. Otherwise, if
``graceful=False``, this is a local shutdown, and it does not wait for other
RPC processes to reach this method.
.. warning::
For :class:`~torch.futures.Future` objects returned by
:meth:`~torch.distributed.rpc.rpc_async`, ``future.wait()`` should not
be called after ``shutdown()``.
Args:
graceful (bool): Whether to do a graceful shutdown or not. If True,
this will 1) wait until there is no pending system
messages for ``UserRRefs`` and delete them; 2) block
until all local and remote RPC processes have reached
this method and wait for all outstanding work to
complete.
Example::
Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly
on both workers. Refer to :meth:`~torch.distributed.init_process_group`
API for more details. For example,
export MASTER_ADDR=localhost
export MASTER_PORT=5678
Then run the following code in two different processes:
>>> # xdoctest: +SKIP
>>> # On worker 0:
>>> import torch
>>> import torch.distributed.rpc as rpc
>>> rpc.init_rpc("worker0", rank=0, world_size=2)
>>> # do some work
>>> result = rpc.rpc_sync("worker1", torch.add, args=(torch.ones(1), 1))
>>> # ready to shutdown
>>> rpc.shutdown()
>>> # On worker 1:
>>> import torch.distributed.rpc as rpc
>>> rpc.init_rpc("worker1", rank=1, world_size=2)
>>> # wait for worker 0 to finish work, and then shutdown.
>>> rpc.shutdown()
"""
if graceful:
try:
agent = _get_current_rpc_agent()
if not isinstance(agent, TensorPipeAgent) or agent.is_static_group:
_wait_all_workers(timeout)
_delete_all_user_and_unforked_owner_rrefs()
agent.join(shutdown=True, timeout=timeout)
else:
# This is a dynamic group so we need to grab the token for the operation
my_worker_info = agent.get_worker_info()
my_name = my_worker_info.name
with _group_membership_management(agent.store, my_name, False):
all_worker_infos = agent.get_worker_infos()
for worker in all_worker_infos:
if worker.name != my_name:
rpc_sync(worker.name, _update_group_membership, args=(my_worker_info, [], {}, False))
agent.join(shutdown=True, timeout=timeout)
finally:
# In case of errors, continue to complete the local shutdown.
_finalize_shutdown()
else:
_finalize_shutdown()
def _finalize_shutdown():
try:
# This raises a `TORCH_CHECK()` exception on RRef leak detected.
_destroy_rref_context(_ignore_rref_leak)
finally:
_get_current_rpc_agent().shutdown()
# clean up python rpc handler in shutdown(), see comments in
# PythonRpcHandler::cleanup(), call it in python API because the
# cleanup() function has python dependency, it assumes python
# interpreter exists.
# No matter if RRef leak exception is raised, this clean-up code
# must run to avoid destruction segfault in Python 3.5.
#
# future.wait() should not be called after shutdown().
# pythonRpcHandler is cleaned up in shutdown(), after
# shutdown(), python objects returned from rpc python call can not be
# resolved.
_cleanup_python_rpc_handler()
_reset_current_rpc_agent()
@_require_initialized
def get_worker_info(worker_name=None):
r"""
Get :class:`~torch.distributed.rpc.WorkerInfo` of a given worker name.
Use this :class:`~torch.distributed.rpc.WorkerInfo` to avoid passing an
expensive string on every invocation.
Args:
worker_name (str): the string name of a worker. If ``None``, return the
the id of the current worker. (default ``None``)
Returns:
:class:`~torch.distributed.rpc.WorkerInfo` instance for the given
``worker_name`` or :class:`~torch.distributed.rpc.WorkerInfo` of the
current worker if ``worker_name`` is ``None``.
"""
if worker_name is not None:
return _get_current_rpc_agent().get_worker_info(worker_name)
else:
return _get_current_rpc_agent().get_worker_info()
def _to_worker_info(to):
if isinstance(to, WorkerInfo):
return to
elif isinstance(to, str) or isinstance(to, int):
return get_worker_info(to)
else:
raise ValueError("Cannot get WorkerInfo from name {}".format(to))
def _rref_typeof_on_owner(rref, blocking=True):
rref_type = type(rref.local_value())
if blocking:
return rref_type
else:
# Wrap result into a completed Future. This is so that if blocking=`False`
# is specified, we return a future regardless of if this call is on user
# or owner.
future = Future[type]()
future.set_result(rref_type)
return future
def _rref_typeof_on_user(rref, timeout=UNSET_RPC_TIMEOUT, blocking=True):
fut = rpc_async(
rref.owner(),
_rref_typeof_on_owner,
args=(rref,),
timeout=timeout
)
if blocking:
return fut.wait()
else:
return fut
T = TypeVar("T")
GenericWithOneTypeVar = Generic[T]
try:
# Combine the implementation class and the type class.
class RRef(PyRRef, Generic[T]):
pass
except TypeError:
# TypeError: metaclass conflict: the metaclass of a derived class
# must be a (non-strict) subclass of the metaclasses of all its bases
# Mypy doesn't understand __class__ (mypy bug #4177)
class RRefMeta(PyRRef.__class__, GenericWithOneTypeVar.__class__): # type: ignore[name-defined, misc, valid-type]
pass
# Combine the implementation class and the type class.
# Types for classes expecting a certain generic parameter (mypy bug #7791)
class RRef(PyRRef, GenericWithOneTypeVar, metaclass=RRefMeta): # type: ignore[misc, no-redef, valid-type]
pass
# Install docstrings from `PyRRef` to `RRef`.
#
# This is for the fact that pybind11 generates the parameter
# `self` as type `rpc.PyRRef`, so a `:inherited-members:`
# under `.. autoclass:: RRef` does not work.
# we have to do the following process to replacee `rpc.PyRRef` with `rpc.RRef`.
#
def method_factory(method_name, docstring):
def method(self, *args, **kwargs):
return getattr(super(RRef, self), method_name)(*args, **kwargs)
if method.__doc__:
method.__doc__ = docstring
return method
for method_name, method in inspect.getmembers(PyRRef):
# Ignore magic methods, except "__str__".
if method_name.startswith("_") and method_name != "__str__":
continue
# Get pybind11 generated docstring.
# It's like,
"""
to_here(self: torch.distributed.rpc.PyRRef, timeout: float=-1.0) -> object
Blocking call that copies the value of the RRef from the owner
to the local node and returns it. If the current node is the
owner, returns a reference to the local value.
"""
docstring = getattr(method, "__doc__", None)
assert docstring is not None, "RRef user-facing methods should all have docstrings."
# Do surgery on pybind11 generated docstrings.
docstring = docstring.replace("torch.distributed.rpc.PyRRef", "torch.distributed.rpc.RRef")
# Attach user-facing RRef method with modified docstring.
new_method = method_factory(method_name, docstring)
setattr(RRef, method_name, new_method)
@_require_initialized
def remote(to, func, args=None, kwargs=None, timeout=UNSET_RPC_TIMEOUT):
r"""
Make a remote call to run ``func`` on worker ``to`` and return an
:class:`~torch.distributed.rpc.RRef` to the result value immediately.
Worker ``to`` will be the owner of the returned
:class:`~torch.distributed.rpc.RRef`, and the worker calling ``remote`` is
a user. The owner manages the global reference count of its
:class:`~torch.distributed.rpc.RRef`, and the owner
:class:`~torch.distributed.rpc.RRef` is only destructed when globally there
are no living references to it.
Args:
to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker.
func (Callable): a callable function, such as Python callables, builtin
operators (e.g. :meth:`~torch.add`) and annotated
TorchScript functions.
args (tuple): the argument tuple for the ``func`` invocation.
kwargs (dict): is a dictionary of keyword arguments for the ``func``
invocation.
timeout (float, optional): timeout in seconds for this remote call. If the
creation of this
:class:`~torch.distributed.rpc.RRef` on worker
``to`` is not successfully processed on this
worker within this timeout, then the next time
there is an attempt to use the RRef (such as
``to_here()``), a timeout will be raised
indicating this failure. A value of 0 indicates
an infinite timeout, i.e. a timeout error will
never be raised. If not provided, the default
value set during initialization or with
``_set_rpc_timeout`` is used.
Returns:
A user :class:`~torch.distributed.rpc.RRef` instance to the result
value. Use the blocking API :meth:`torch.distributed.rpc.RRef.to_here`
to retrieve the result value locally.
.. warning ::
The ``remote`` API does not copy storages of argument tensors until
sending them over the wire, which could be done by a different thread
depending on the RPC backend type. The caller should make sure that the
contents of those tensors stay intact until the returned RRef is
confirmed by the owner, which can be checked using the
:meth:`torch.distributed.rpc.RRef.confirmed_by_owner` API.
.. warning ::
Errors such as timeouts for the ``remote`` API are handled on a
best-effort basis. This means that when remote calls initiated by
``remote`` fail, such as with a timeout error, we take a best-effort
approach to error handling. This means that errors are handled and set
on the resulting RRef on an asynchronous basis. If the RRef has not been
used by the application before this handling (such as ``to_here`` or
fork call), then future uses of the ``RRef`` will appropriately raise
errors. However, it is possible that the user application will use the
``RRef`` before the errors are handled. In this case, errors may not be
raised as they have not yet been handled.
Example::
Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly
on both workers. Refer to :meth:`~torch.distributed.init_process_group`
API for more details. For example,
export MASTER_ADDR=localhost
export MASTER_PORT=5678
Then run the following code in two different processes:
>>> # xdoctest: +SKIP
>>> # On worker 0:
>>> import torch
>>> import torch.distributed.rpc as rpc
>>> rpc.init_rpc("worker0", rank=0, world_size=2)
>>> rref1 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 3))
>>> rref2 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 1))
>>> x = rref1.to_here() + rref2.to_here()
>>> rpc.shutdown()
>>> # On worker 1:
>>> import torch.distributed.rpc as rpc
>>> rpc.init_rpc("worker1", rank=1, world_size=2)
>>> rpc.shutdown()
Below is an example of running a TorchScript function using RPC.
>>> # On both workers:
>>> @torch.jit.script
>>> def my_script_add(t1, t2):
>>> return torch.add(t1, t2)
>>> # On worker 0:
>>> import torch.distributed.rpc as rpc
>>> rpc.init_rpc("worker0", rank=0, world_size=2)
>>> rref = rpc.remote("worker1", my_script_add, args=(torch.ones(2), 3))
>>> rref.to_here()
>>> rpc.shutdown()
>>> # On worker 1:
>>> import torch.distributed.rpc as rpc
>>> rpc.init_rpc("worker1", rank=1, world_size=2)
>>> rpc.shutdown()
"""
torch._C._log_api_usage_once("torch.distributed.rpc_remote")
qualified_name = torch.jit._builtins._find_builtin(func)
dst_worker_info = _to_worker_info(to)
should_profile = _get_should_profile()
ctx_manager = _enable_rpc_profiler(should_profile, qualified_name, func, RPCExecMode.REMOTE, dst_worker_info)
with ctx_manager as rf:
args = args if args else ()
kwargs = kwargs if kwargs else {}
is_async_exec = hasattr(func, "_wrapped_async_rpc_function")
if is_async_exec:
wrapped = func._wrapped_async_rpc_function
if isinstance(wrapped, torch.jit.ScriptFunction):
func = wrapped
if qualified_name is not None:
rref = _invoke_remote_builtin(dst_worker_info, qualified_name, timeout, *args, **kwargs)
elif isinstance(func, torch.jit.ScriptFunction):
rref = _invoke_remote_torchscript(
dst_worker_info.name,
torch._jit_internal._qualified_name(func),
timeout,
is_async_exec,
*args,
**kwargs,
)
else:
(pickled_python_udf, tensors) = _default_pickler.serialize(
PythonUDF(func, args, kwargs)
)
rref = _invoke_remote_python_udf(
dst_worker_info,
pickled_python_udf,
tensors,
timeout,
is_async_exec
)
# attach profiling information
if should_profile:
assert torch.autograd._profiler_enabled()
assert rf is not None
fut = rf._call_end_callbacks_on_future(rref._get_future())
rref._set_profiling_future(fut)
return rref
def _invoke_rpc(to, func, rpc_type, args=None, kwargs=None, rpc_timeout=UNSET_RPC_TIMEOUT):
if not callable(func):
raise TypeError("function should be callable.")
qualified_name = torch.jit._builtins._find_builtin(func)
dst_worker_info = _to_worker_info(to)
should_profile = _get_should_profile()
ctx_manager = _enable_rpc_profiler(should_profile, qualified_name, func, rpc_type, dst_worker_info)
with ctx_manager as rf:
args = args if args else ()
kwargs = kwargs if kwargs else {}
is_async_exec = hasattr(func, "_wrapped_async_rpc_function")
if is_async_exec:
wrapped = func._wrapped_async_rpc_function
if isinstance(wrapped, torch.jit.ScriptFunction):
func = wrapped
if qualified_name is not None:
fut = _invoke_rpc_builtin(
dst_worker_info,
qualified_name,
rpc_timeout,
*args,
**kwargs
)
elif isinstance(func, torch.jit.ScriptFunction):
fut = _invoke_rpc_torchscript(
dst_worker_info.name,
torch._jit_internal._qualified_name(func),
args,
kwargs,
rpc_timeout,
is_async_exec
)
else:
(pickled_python_udf, tensors) = _default_pickler.serialize(
PythonUDF(func, args, kwargs)
)
fut = _invoke_rpc_python_udf(
dst_worker_info,
pickled_python_udf,
tensors,
rpc_timeout,
is_async_exec
)
if should_profile:
assert torch.autograd._profiler_enabled()
assert rf is not None
# Schedule profiling callbacks to run when the future completes.
# This returns a future that is completed when the original future
# completes and the profiling callbacks have been completed as well,
# to guarantee that fut.wait() completes the profiling. This new
# future will contain the same value as the original future.
fut = rf._call_end_callbacks_on_future(fut)
return fut
@_require_initialized
def rpc_sync(to, func, args=None, kwargs=None, timeout=UNSET_RPC_TIMEOUT):
r"""
Make a blocking RPC call to run function ``func`` on worker ``to``. RPC
messages are sent and received in parallel to execution of Python code. This
method is thread-safe.
Args:
to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker.
func (Callable): a callable function, such as Python callables, builtin
operators (e.g. :meth:`~torch.add`) and annotated
TorchScript functions.
args (tuple): the argument tuple for the ``func`` invocation.
kwargs (dict): is a dictionary of keyword arguments for the ``func``
invocation.
timeout (float, optional): timeout in seconds to use for this RPC. If
the RPC does not complete in this amount of
time, an exception indicating it has
timed out will be raised. A value of 0
indicates an infinite timeout, i.e. a timeout
error will never be raised. If not provided,
the default value set during initialization
or with ``_set_rpc_timeout`` is used.
Returns:
Returns the result of running ``func`` with ``args`` and ``kwargs``.
Example::
Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly
on both workers. Refer to :meth:`~torch.distributed.init_process_group`
API for more details. For example,
export MASTER_ADDR=localhost
export MASTER_PORT=5678
Then run the following code in two different processes:
>>> # xdoctest: +SKIP
>>> # On worker 0:
>>> import torch
>>> import torch.distributed.rpc as rpc
>>> rpc.init_rpc("worker0", rank=0, world_size=2)
>>> ret = rpc.rpc_sync("worker1", torch.add, args=(torch.ones(2), 3))
>>> rpc.shutdown()
>>> # On worker 1:
>>> import torch.distributed.rpc as rpc
>>> rpc.init_rpc("worker1", rank=1, world_size=2)
>>> rpc.shutdown()
Below is an example of running a TorchScript function using RPC.
>>> # On both workers:
>>> @torch.jit.script
>>> def my_script_add(t1, t2):
>>> return torch.add(t1, t2)
>>> # On worker 0:
>>> import torch.distributed.rpc as rpc
>>> rpc.init_rpc("worker0", rank=0, world_size=2)
>>> ret = rpc.rpc_sync("worker1", my_script_add, args=(torch.ones(2), 3))
>>> rpc.shutdown()
>>> # On worker 1:
>>> import torch.distributed.rpc as rpc
>>> rpc.init_rpc("worker1", rank=1, world_size=2)
>>> rpc.shutdown()
"""
torch._C._log_api_usage_once("torch.distributed.rpc_sync")
fut = _invoke_rpc(to, func, RPCExecMode.SYNC, args, kwargs, timeout)
return fut.wait()
@_require_initialized
def rpc_async(to, func, args=None, kwargs=None, timeout=UNSET_RPC_TIMEOUT):
r"""
Make a non-blocking RPC call to run function ``func`` on worker ``to``. RPC
messages are sent and received in parallel to execution of Python code. This
method is thread-safe. This method will immediately return a
:class:`~torch.futures.Future` that can be awaited on.
Args:
to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker.
func (Callable): a callable function, such as Python callables, builtin
operators (e.g. :meth:`~torch.add`) and annotated
TorchScript functions.
args (tuple): the argument tuple for the ``func`` invocation.
kwargs (dict): is a dictionary of keyword arguments for the ``func``
invocation.
timeout (float, optional): timeout in seconds to use for this RPC. If
the RPC does not complete in this amount of
time, an exception indicating it has
timed out will be raised. A value of 0
indicates an infinite timeout, i.e. a timeout
error will never be raised. If not provided,
the default value set during initialization
or with ``_set_rpc_timeout`` is used.
Returns:
Returns a :class:`~torch.futures.Future` object that can be waited
on. When completed, the return value of ``func`` on ``args`` and
``kwargs`` can be retrieved from the :class:`~torch.futures.Future`
object.
.. warning ::
Using GPU tensors as arguments or return values of ``func`` is not
supported since we don't support sending GPU tensors over the wire. You
need to explicitly copy GPU tensors to CPU before using them as
arguments or return values of ``func``.
.. warning ::
The ``rpc_async`` API does not copy storages of argument tensors until
sending them over the wire, which could be done by a different thread
depending on the RPC backend type. The caller should make sure that the
contents of those tensors stay intact until the returned
:class:`~torch.futures.Future` completes.
Example::
Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly
on both workers. Refer to :meth:`~torch.distributed.init_process_group`
API for more details. For example,
export MASTER_ADDR=localhost
export MASTER_PORT=5678
Then run the following code in two different processes:
>>> # xdoctest: +SKIP
>>> # On worker 0:
>>> import torch
>>> import torch.distributed.rpc as rpc
>>> rpc.init_rpc("worker0", rank=0, world_size=2)
>>> fut1 = rpc.rpc_async("worker1", torch.add, args=(torch.ones(2), 3))
>>> fut2 = rpc.rpc_async("worker1", min, args=(1, 2))
>>> result = fut1.wait() + fut2.wait()
>>> rpc.shutdown()
>>> # On worker 1:
>>> import torch.distributed.rpc as rpc
>>> rpc.init_rpc("worker1", rank=1, world_size=2)
>>> rpc.shutdown()
Below is an example of running a TorchScript function using RPC.
>>> # On both workers:
>>> @torch.jit.script
>>> def my_script_add(t1, t2):
>>> return torch.add(t1, t2)
>>> # On worker 0:
>>> import torch.distributed.rpc as rpc
>>> rpc.init_rpc("worker0", rank=0, world_size=2)
>>> fut = rpc.rpc_async("worker1", my_script_add, args=(torch.ones(2), 3))
>>> ret = fut.wait()
>>> rpc.shutdown()
>>> # On worker 1:
>>> import torch.distributed.rpc as rpc
>>> rpc.init_rpc("worker1", rank=1, world_size=2)
>>> rpc.shutdown()
"""
torch._C._log_api_usage_once("torch.distributed.rpc_async")
fut = _invoke_rpc(to, func, RPCExecMode.ASYNC, args, kwargs, timeout)
if hasattr(_thread_local_var, "future_list"):
_thread_local_var.future_list.append(fut)
return fut
def _get_should_profile():
# Legacy profiler should be enabled. RPC profiling is not supported with
# Kineto profiler.
ActiveProfilerType = torch._C._autograd.ActiveProfilerType
return (
torch.autograd._profiler_enabled() and
torch._C._autograd._profiler_type() == ActiveProfilerType.LEGACY # type: ignore[attr-defined]
)
def _enable_rpc_profiler(should_profile, qualified_name, func, rpc_type, dst_worker_info):
ctx_manager = contextlib.suppress()
if should_profile:
# Create appropriate string representation based on type of func
# (builtin, script, python)
if qualified_name is None:
func_name = (
torch._jit_internal._qualified_name(func)
if isinstance(func, torch.jit.ScriptFunction)
else func.__qualname__
)
else:
func_name = qualified_name
# Build RPC profiling key.
rpc_profiling_key = _build_rpc_profiling_key(
rpc_type,
func_name,
get_worker_info().name,
dst_worker_info.name,
)
RemoteProfilerManager.set_current_profiling_key(rpc_profiling_key)
# Mypy doesn't support re-def of a variable not in the same block (#1174)
ctx_manager = torch.autograd.profiler.record_function(rpc_profiling_key) # type: ignore[assignment]
return ctx_manager
| pytorch-master | torch/distributed/rpc/api.py |
from functools import partial
from . import functions
from . import rpc_async
import torch
from .constants import UNSET_RPC_TIMEOUT
from torch.futures import Future
def _local_invoke(rref, func_name, args, kwargs):
return getattr(rref.local_value(), func_name)(*args, **kwargs)
@functions.async_execution
def _local_invoke_async_execution(rref, func_name, args, kwargs):
return getattr(rref.local_value(), func_name)(*args, **kwargs)
def _invoke_rpc(rref, rpc_api, func_name, timeout, *args, **kwargs):
def _rref_type_cont(rref_fut):
rref_type = rref_fut.value()
_invoke_func = _local_invoke
# Bypass ScriptModules when checking for async function attribute.
bypass_type = issubclass(rref_type, torch.jit.ScriptModule) or issubclass(
rref_type, torch._C.ScriptModule
)
if not bypass_type:
func = getattr(rref_type, func_name)
if hasattr(func, "_wrapped_async_rpc_function"):
_invoke_func = _local_invoke_async_execution
return rpc_api(
rref.owner(),
_invoke_func,
args=(rref, func_name, args, kwargs),
timeout=timeout
)
rref_fut = rref._get_type(timeout=timeout, blocking=False)
if rpc_api != rpc_async:
rref_fut.wait()
return _rref_type_cont(rref_fut)
else:
# A little explanation on this.
# rpc_async returns a Future pointing to the return value of `func_name`, it returns a `Future[T]`
# Calling _rref_type_cont from the `then` lambda causes Future wrapping. IOW, `then` returns a `Future[Future[T]]`
# To address that, we return a Future that is completed with the result of the async call.
result: Future = Future()
def _wrap_rref_type_cont(fut):
try:
_rref_type_cont(fut).then(_complete_op)
except BaseException as ex:
result.set_exception(ex)
def _complete_op(fut):
try:
result.set_result(fut.value())
except BaseException as ex:
result.set_exception(ex)
rref_fut.then(lambda fut: _wrap_rref_type_cont(fut))
return result
# This class manages proxied RPC API calls for RRefs. It is entirely used from
# C++ (see python_rpc_handler.cpp).
class RRefProxy:
def __init__(self, rref, rpc_api, timeout=UNSET_RPC_TIMEOUT):
self.rref = rref
self.rpc_api = rpc_api
self.rpc_timeout = timeout
def __getattr__(self, func_name):
return partial(_invoke_rpc, self.rref, self.rpc_api, func_name, self.rpc_timeout)
| pytorch-master | torch/distributed/rpc/rref_proxy.py |
#!/usr/bin/python3
import itertools
import torch
from torch.autograd.profiler_legacy import profile
from typing import List
from . import (
_disable_server_process_global_profiler,
_enable_server_process_global_profiler,
)
__all__: List[str] = []
class _server_process_global_profile(profile):
"""
It has the same API as ``torch.autograd.profiler.profile`` class,
except that it enables profiling on all threads running RPC server request callbacks.
Context manager that manages autograd profiler state and holds a summary of results.
Under the hood it just records events of functions being executed in C++ and
exposes those events to Python. You can wrap any code into it and it will
only report runtime of PyTorch functions.
Note: profiler is thread local and is automatically propagated into the async tasks
Args:
enabled (bool, optional): Setting this to False makes this context manager a no-op.
Default: ``True``.
use_cuda (bool, optional): Enables timing of CUDA events as well using the cudaEvent API.
Adds approximately 4us of overhead to each tensor operation.
Default: ``False``
record_shapes (bool, optional): If shapes recording is set, information
about input dimensions will be collected. This allows one to see which
dimensions have been used under the hood and further group by them
using prof.key_averages(group_by_input_shape=True). Please note that
shape recording might skew your profiling data. It is recommended to
use separate runs with and without shape recording to validate the timing.
Most likely the skew will be negligible for bottom most events (in a case
of nested function calls). But for higher level functions the total
self cpu time might be artificially increased because of the shape
collection.
profile_memory (bool, optional): Whether to report memory usage, default: ``False``
.. warning:
Enabling memory profiling incurs additional profiler overhead
.. warning:
Due to some CUDA multiprocessing limitations (multiprocessing-cuda-note_),
one cannot use the profiler with ``use_cuda = True`` to benchmark
DataLoaders with ``num_workers > 0``. If you wish to benchmark data loading,
please use ``use_cuda = False`` or ``num_workers = 0``.
Example:
>>> # xdoctest: +SKIP
>>> # On worker 0:
>>> import torch
>>> import torch.distributed.rpc as rpc
>>> rpc.init_rpc("worker0", rank=0, world_size=2)
>>> x, y = torch.tensor(1), torch.tensor(2)
>>> outer_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
>>> outer_profile_rref.rpc_sync().__enter__()
>>> rpc.rpc_sync(dst_worker_name, torch.add, (x, y))
>>> inner_profile_rref = rpc.remote(dst_worker_name, rpc._server_process_global_profile)
>>> inner_profile_rref.rpc_sync().__enter__()
>>> rpc.rpc_sync(dst_worker_name, torch.sub, (x, y))
>>> inner_profile_rref.rpc_sync().__exit__(None, None, None)
>>> outer_profile_rref.rpc_sync().__exit__(None, None, None)
>>> print(inner_profile_rref.rpc_sync().key_averages())
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Name Self CPU total % Self CPU total CPU total % CPU total CPU time avg Number of Calls
--------- --------------- --------------- --------------- --------------- --------------- ---------------
sub 85.06% 76.275us 100.00% 89.667us 89.667us 1
empty 14.94% 13.392us 14.94% 13.392us 13.392us 1
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Self CPU time total: 89.667us
>>> print(outer_profile_rref.rpc_sync().key_averages())
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Name Self CPU total % Self CPU total CPU total % CPU total CPU time avg Number of Calls
--------- --------------- --------------- --------------- --------------- --------------- ---------------
sub 35.65% 76.275us 41.91% 89.667us 89.667us 1
empty 12.67% 27.101us 12.67% 27.101us 13.551us 2
add 51.68% 110.550us 58.09% 124.259us 124.259us 1
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Self CPU time total: 213.926us
>>> rpc.shutdown()
>>> # On worker 1:
>>> import torch.distributed.rpc as rpc
>>> rpc.init_rpc("worker1", rank=1, world_size=2)
>>> # wait for worker 0 to finish work, and then shutdown.
>>> rpc.shutdown()
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __enter__(self):
"""
Turn on server-side process-global profiling.
This enables thread-local profiler on all RPC threads running server-side request callbacks.
"""
if not self.enabled:
return
if self.entered: # type: ignore[has-type]
raise RuntimeError("autograd profiler traces are not reentrant")
self.entered = True
profiler_kind = (
torch.autograd.ProfilerState.CUDA
if self.use_cuda
else torch.autograd.ProfilerState.CPU
)
profiler_config = torch.autograd.ProfilerConfig(
profiler_kind,
self.record_shapes,
self.profile_memory,
False,
False,
False,
torch.profiler._ExperimentalConfig())
_enable_server_process_global_profiler(profiler_config)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Turn off server-side process-global profiling.
Aggregate all profiling events recorded by RPC threads.
These attributes are assigned on exiting context.
Attributes:
function_events (torch.autograd.profiler.EventList). It's a list that has helper
methods, like 1) show record items in a pretty-print table.
2) do averaging by grouping on keys. 3) and more.
process_global_function_events (List[torch.autograd.profiler.FunctionEvent]).
It's a list of ``FunctionEvent`` elements. Every element is a profiling result
of an RPC request handling within the profiling range.
"""
if not self.enabled:
return
process_global_events = _disable_server_process_global_profiler()
# Every element in this list is a thread profiling result from an RPC request handling.
process_global_function_events = []
for thread_local_events in process_global_events:
# Parse from ``Event``s to ``FunctionEvent``s.
thread_local_function_events = torch.autograd.profiler_legacy._parse_legacy_records(
thread_local_events
)
thread_local_function_events.sort(
key=lambda function_event: [
function_event.time_range.start,
-(function_event.time_range.end),
]
)
process_global_function_events.append(thread_local_function_events)
flattened_function_events = list(
itertools.chain(*process_global_function_events)
)
self.function_events = torch.autograd.profiler_util.EventList(
flattened_function_events,
use_cuda=self.use_cuda,
profile_memory=self.profile_memory,
)
self.function_events._build_tree()
self.process_global_function_events = process_global_function_events
return False
| pytorch-master | torch/distributed/rpc/server_process_global_profiler.py |
from contextlib import contextmanager
from typing import cast
import logging
from . import api
from . import TensorPipeAgent
logger = logging.getLogger(__name__)
@contextmanager
def _group_membership_management(store, name, is_join):
token_key = "RpcGroupManagementToken"
join_or_leave = "join" if is_join else "leave"
my_token = f"Token_for_{name}_{join_or_leave}"
while True:
# Retrieve token from store to signal start of rank join/leave critical section
returned = store.compare_set(token_key, "", my_token).decode()
if returned == my_token:
# Yield to the function this context manager wraps
yield
# Finished, now exit and release token
# Update from store to signal end of rank join/leave critical section
store.set(token_key, "")
# Other will wait for this token to be set before they execute
store.set(my_token, "Done")
break
else:
# Store will wait for the token to be released
try:
store.wait([returned])
except RuntimeError:
logger.error(f"Group membership token {my_token} timed out waiting for {returned} to be released.")
raise
def _update_group_membership(worker_info, my_devices, reverse_device_map, is_join):
agent = cast(TensorPipeAgent, api._get_current_rpc_agent())
ret = agent._update_group_membership(worker_info, my_devices, reverse_device_map, is_join)
return ret
| pytorch-master | torch/distributed/rpc/_utils.py |
__all__ = ["init_backend", "backend_registered", "construct_rpc_backend_options", "register_backend", "BackendType", "BackendValue"]
import collections
import enum
from typing import cast, Dict, List, Set, Tuple
import torch
import torch.distributed as dist
from ._utils import _group_membership_management, _update_group_membership
from . import api
from . import constants as rpc_constants
BackendValue = collections.namedtuple(
"BackendValue", ["construct_rpc_backend_options_handler", "init_backend_handler"]
)
def _backend_type_repr(self):
return "BackendType." + self.name
_backend_type_doc = """
An enum class of available backends.
PyTorch ships with a builtin ``BackendType.TENSORPIPE`` backend.
Additional ones can be registered using the
:func:`~torch.distributed.rpc.backend_registry.register_backend` function.
"""
# Create an enum type, `BackendType`, with empty members.
# Can't handle Function Enum API (mypy bug #9079)
BackendType = enum.Enum(value="BackendType", names=dict()) # type: ignore[misc]
# Unable to assign a function a method (mypy bug #2427)
BackendType.__repr__ = _backend_type_repr # type: ignore[assignment]
if BackendType.__doc__:
BackendType.__doc__ = _backend_type_doc
def backend_registered(backend_name):
"""
Checks if backend_name is registered as an RPC backend.
Args:
backend_name (str): string to identify the RPC backend.
Returns:
True if the backend has been registered with ``register_backend``, else
False.
"""
return backend_name in BackendType.__members__.keys()
def register_backend(
backend_name, construct_rpc_backend_options_handler, init_backend_handler
):
"""Registers a new RPC backend.
Args:
backend_name (str): backend string to identify the handler.
construct_rpc_backend_options_handler (function):
Handler that is invoked when
rpc_backend.construct_rpc_backend_options(**dict) is called.
init_backend_handler (function): Handler that is invoked when the
`_init_rpc_backend()` function is called with a backend.
This returns the agent.
"""
global BackendType
if backend_registered(backend_name):
raise RuntimeError("RPC backend {}: already registered".format(backend_name))
# Create a new enum type, `BackendType`, with extended members.
existing_enum_dict = {member.name: member.value for member in BackendType}
extended_enum_dict = dict(
{
backend_name: BackendValue(
construct_rpc_backend_options_handler=construct_rpc_backend_options_handler,
init_backend_handler=init_backend_handler,
)
},
**existing_enum_dict
)
# Can't handle Function Enum API (mypy bug #9079)
BackendType = enum.Enum(value="BackendType", names=extended_enum_dict) # type: ignore[misc]
# Unable to assign a function a method (mypy bug #2427)
BackendType.__repr__ = _backend_type_repr # type: ignore[assignment]
if BackendType.__doc__:
BackendType.__doc__ = _backend_type_doc
return BackendType[backend_name]
def construct_rpc_backend_options(
backend,
rpc_timeout=rpc_constants.DEFAULT_RPC_TIMEOUT_SEC,
init_method=rpc_constants.DEFAULT_INIT_METHOD,
**kwargs
):
return backend.value.construct_rpc_backend_options_handler(
rpc_timeout, init_method, **kwargs
)
def init_backend(backend, *args, **kwargs):
return backend.value.init_backend_handler(*args, **kwargs)
def _init_process_group(store, rank, world_size):
# Initialize ProcessGroup.
process_group_timeout = rpc_constants.DEFAULT_PROCESS_GROUP_TIMEOUT
# We're using a bunch of private APIs here since `new_group` requires the
# default group to be initialized.
group = dist.ProcessGroupGloo(store, rank, world_size, process_group_timeout)
assert group is not None, "Failed to initialize default ProcessGroup."
if (rank != -1) and (rank != group.rank()):
raise RuntimeError(
"rank argument {} doesn't match pg rank {}".format(rank, group.rank())
)
if (world_size != -1) and (world_size != group.size()):
raise RuntimeError(
"world_size argument {} doesn't match pg size {}".format(
world_size, group.size()
)
)
return group
def _tensorpipe_construct_rpc_backend_options_handler(
rpc_timeout,
init_method,
num_worker_threads=rpc_constants.DEFAULT_NUM_WORKER_THREADS,
_transports=None,
_channels=None,
**kwargs
):
from . import TensorPipeRpcBackendOptions
return TensorPipeRpcBackendOptions(
rpc_timeout=rpc_timeout,
init_method=init_method,
num_worker_threads=num_worker_threads,
_transports=_transports,
_channels=_channels,
)
def _tensorpipe_validate_devices(devices, device_count):
return all(
d.type == "cpu" or (d.type == "cuda" and 0 <= d.index < device_count)
for d in devices
)
# detect if any worker has invalid device_map configurations, and return
# reverse device maps
def _tensorpipe_exchange_and_check_all_device_maps(
my_name, my_device_count, my_device_maps, my_devices, group
):
gathered: List[Tuple[
str, int, Dict[str, Dict[torch.device, torch.device]], List[torch.device]
]] = [("", 0, {}, []) for _ in range(group.size())]
dist.all_gather_object(
gathered, (my_name, my_device_count, my_device_maps, my_devices), group
)
all_names = [name for name, _, _, _ in gathered]
all_device_counts = {name: count for name, count, _, _ in gathered}
all_device_maps = {name: map_ for name, _, map_, _ in gathered}
all_devices = {name: devices for name, _, _, devices in gathered}
_validate_device_maps(all_names, all_device_counts, all_device_maps, all_devices)
# passed all checked, construct reverse mapping and get list of devices handled by this agent
reverse_device_maps = _create_reverse_mapping(my_name, all_names, all_device_maps)
my_devices = _create_device_list(my_devices, my_device_maps, reverse_device_maps)
return reverse_device_maps, my_devices
def _validate_device_maps(all_names, all_device_counts, all_device_maps, all_devices, is_static_group=True):
for node in all_names:
devices = all_devices[node]
if len(set(devices)) != len(devices):
raise ValueError(
f"Node {node} has duplicated devices\n"
f"devices = {devices}"
)
if not _tensorpipe_validate_devices(devices, all_device_counts[node]):
raise ValueError(
f"Node {node} has devices with invalid indices\n"
f"devices = {devices}\n"
f"device count = {all_device_counts[node]}"
)
for source_node in all_names:
# For dynamic group (non-static) do not check the target node name since it may not have joined yet
if is_static_group and not set(all_device_maps[source_node].keys()).issubset(all_names):
raise ValueError(
f"Node {source_node} has invalid target node names in its device maps\n"
f"device maps = {all_device_maps[source_node].keys()}\n"
f"node names = {all_names}"
)
for target_node, map_ in all_device_maps[source_node].items():
if len(set(map_.values())) != len(map_):
raise ValueError(
f"Node {source_node} has duplicated target devices "
f"in its device map for {target_node}\n"
f"device map = {map_}"
)
if all_devices[source_node]:
if not set(map_.keys()).issubset(all_devices[source_node]):
raise ValueError(
f"Node {source_node} has unexpected source devices "
f"in its device map for {target_node}\n"
f"device map = {map_}\n"
f"devices = {all_devices[source_node]}"
)
elif not _tensorpipe_validate_devices(
map_.keys(), all_device_counts[source_node]
):
raise ValueError(
f"Node {source_node} has source devices with invalid indices "
f"in its device map for {target_node}\n"
f"device map = {map_}\n"
f"device count = {all_device_counts[source_node]}"
)
if all_devices.get(target_node, []):
if not set(map_.values()).issubset(all_devices[target_node]):
raise ValueError(
f"Node {source_node} has unexpected target devices "
f"in its device map for {target_node}\n"
f"device map = {map_}\n"
f"devices = {all_devices[target_node]}"
)
elif target_node in all_device_counts and not _tensorpipe_validate_devices(
map_.values(), all_device_counts[target_node]
):
raise ValueError(
f"Node {source_node} has target devices with invalid indices "
f"in its device map for {target_node}\n"
f"device map = {map_}\n"
f"device count = {all_device_counts[target_node]}"
)
def _create_device_list(my_devices, my_device_maps, reverse_device_maps):
if not my_devices:
devices_set: Set[torch.device] = set()
for _, map_ in my_device_maps.items():
devices_set.update(map_.keys())
for _, map_ in reverse_device_maps.items():
devices_set.update(map_.keys())
devices_set.discard(torch.device("cpu"))
my_devices = list(devices_set)
my_devices = sorted(my_devices, key=lambda d: d.index)
return my_devices
def _create_reverse_mapping(my_name, all_names, all_device_maps):
reverse_device_maps: Dict[str, Dict[torch.device, torch.device]] = {}
for node in all_names:
if my_name in all_device_maps[node]:
reverse_device_maps[node] = {
v: k for k, v in all_device_maps[node][my_name].items()
}
return reverse_device_maps
def _get_device_infos():
from . import TensorPipeAgent
agent = cast(TensorPipeAgent, api._get_current_rpc_agent())
opts = agent._get_backend_options()
device_count = torch.cuda.device_count()
if torch.cuda.is_available() and opts.devices:
torch.cuda.init()
return device_count, opts.device_maps, opts.devices
def _set_devices_and_reverse_device_map(agent):
from . import TensorPipeAgent
agent = cast(TensorPipeAgent, agent)
# Group state is retrieved from local agent
# On initialization, tensorpipe agent retrieves information from all existing workers, so group state is valid
my_worker_info = agent.get_worker_info()
my_name = my_worker_info.name
all_worker_infos = agent.get_worker_infos()
# One round to get device_maps of all workers and construct reverse device maps
all_device_counts, all_device_maps, all_devices, all_names = {}, {}, {}, []
for worker_info in all_worker_infos:
worker_name = worker_info.name
if worker_name != my_name:
# TODO: make async?
device_count, device_map, devices = api.rpc_sync(worker_name, _get_device_infos)
else:
opts = agent._get_backend_options()
device_count, device_map, devices = torch.cuda.device_count(), opts.device_maps, opts.devices
all_device_counts[worker_name] = device_count
all_device_maps[worker_name] = device_map
all_devices[worker_name] = devices
all_names.append(worker_name)
_validate_device_maps(all_names, all_device_counts, all_device_maps, all_devices, is_static_group=False)
reverse_device_maps = _create_reverse_mapping(my_name, all_names, all_device_maps)
# Perform RPC call to all workers, including itself, to include newly joined worker information and device maps
for worker_name in all_names:
# Set device list for each worker
all_devices[worker_name] = _create_device_list(all_devices[worker_name], all_device_maps[worker_name], reverse_device_maps)
api.rpc_sync(worker_name, _update_group_membership,
args=(my_worker_info, all_devices[worker_name], reverse_device_maps, True))
def _tensorpipe_init_backend_handler(store, name, rank, world_size, rpc_backend_options):
from . import TensorPipeAgent
from . import TensorPipeRpcBackendOptions
if not isinstance(store, dist.Store):
raise TypeError("`store` must be a c10d::Store. {}".format(store))
if not isinstance(
rpc_backend_options, TensorPipeRpcBackendOptions
):
raise TypeError(
"`rpc_backend_options` must be a `TensorPipeRpcBackendOptions`. {}".format(
rpc_backend_options
)
)
device_count = torch.cuda.device_count()
is_static_group = True if world_size else False
# world_size is specified so this is a static group (ranks cannot join and leave)
if is_static_group:
# The agent's join method is required to behave like a barrier and perform
# collective operations, for which it relies on a process group, instead of
# re-implementing this on top of RPCs.
group = _init_process_group(store, rank, world_size)
reverse_device_maps, devices = _tensorpipe_exchange_and_check_all_device_maps(
name,
device_count,
rpc_backend_options.device_maps,
rpc_backend_options.devices,
group,
)
if torch.cuda.is_available() and devices:
# It's necessary to initialize PyTorch CUDA states here (e.g.,
# CUDACachingAllocator). If this is missing, we could hit errors like
# "allocator not initialized", because other processes might send
# CUDA-related RPC request to this process before user code in this
# process initializes its PyTorch CUDA states.
torch.cuda.init()
# TODO: add try-except and destroy _agent in all processes if any fails.
agent = TensorPipeAgent(
store,
name,
rank,
world_size,
rpc_backend_options,
reverse_device_maps,
devices,
)
api._init_rpc_states(agent)
# Run one dummy round of RPC to initialize channels/transports. Without
# this, it's easy to hit timeout in rpc.shutdown() if there is no other RPC
# on that process before rpc.shutdown(), as the agent initialization can
# take longer than 5s.
api._all_gather(None, timeout=rpc_backend_options.rpc_timeout)
# Need a barrier here to make sure no peers leave before the rank0 finishes
# _all_gather
group.barrier().wait()
return agent
# initialization for dynamic rpc (ranks can join and leave)
else:
with _group_membership_management(store, name, True):
# Construct TPAgent with empty reverse_device_map and devices
# these properties will be updated after initialization
agent = TensorPipeAgent(
store,
name,
rank,
world_size,
rpc_backend_options,
{},
[],
)
api._init_rpc_states(agent)
try:
# Notify all workers in group this rank has joined and set devices and reverse_device_map
# This is a synchronous operation that completes once all existing ranks are updated
_set_devices_and_reverse_device_map(agent)
pass
except Exception:
api.shutdown()
raise
return agent
register_backend(
"TENSORPIPE",
_tensorpipe_construct_rpc_backend_options_handler,
_tensorpipe_init_backend_handler,
)
| pytorch-master | torch/distributed/rpc/backend_registry.py |
import torch
def is_available():
return hasattr(torch._C, "_faulty_agent_init")
if is_available() and not torch._C._faulty_agent_init():
raise RuntimeError("Failed to initialize torch.distributed.rpc._testing")
if is_available():
# Registers FAULTY_TENSORPIPE RPC backend.
from . import faulty_agent_backend_registry
from torch._C._distributed_rpc_testing import (
FaultyTensorPipeRpcBackendOptions,
FaultyTensorPipeAgent,
)
| pytorch-master | torch/distributed/rpc/_testing/__init__.py |
#!/usr/bin/env python3
import torch.distributed as dist
import torch.distributed.rpc as rpc
def _faulty_tensorpipe_construct_rpc_backend_options_handler(
rpc_timeout,
init_method,
num_worker_threads,
messages_to_fail,
messages_to_delay,
num_fail_sends,
**kwargs
):
from . import FaultyTensorPipeRpcBackendOptions
return FaultyTensorPipeRpcBackendOptions(
num_worker_threads=num_worker_threads,
rpc_timeout=rpc_timeout,
init_method=init_method,
messages_to_fail=messages_to_fail,
messages_to_delay=messages_to_delay,
num_fail_sends=num_fail_sends,
)
def _faulty_tensorpipe_init_backend_handler(
store, name, rank, world_size, rpc_backend_options
):
from . import FaultyTensorPipeAgent
from . import FaultyTensorPipeRpcBackendOptions
from torch.distributed.rpc import api
if not isinstance(store, dist.Store):
raise TypeError("`store` must be a c10d::Store. {}".format(store))
if not isinstance(
rpc_backend_options, FaultyTensorPipeRpcBackendOptions
):
raise TypeError(
"`rpc_backend_options` must be a `FaultyTensorPipeRpcBackendOptions`. {}".format(
rpc_backend_options
)
)
agent = FaultyTensorPipeAgent(
store,
name,
rank,
world_size,
rpc_backend_options,
{}, # reverse_device_map
[], # devices
)
api._init_rpc_states(agent)
return agent
rpc.backend_registry.register_backend(
"FAULTY_TENSORPIPE",
_faulty_tensorpipe_construct_rpc_backend_options_handler,
_faulty_tensorpipe_init_backend_handler,
)
| pytorch-master | torch/distributed/rpc/_testing/faulty_agent_backend_registry.py |
import torch
import warnings
from typing import Any
__all__ = ["detect_anomaly", "set_detect_anomaly"]
class detect_anomaly(object):
r"""Context-manager that enable anomaly detection for the autograd engine.
This does two things:
- Running the forward pass with detection enabled will allow the backward
pass to print the traceback of the forward operation that created the failing
backward function.
- Any backward computation that generate "nan" value will raise an error.
.. warning::
This mode should be enabled only for debugging as the different tests
will slow down your program execution.
Example:
>>> import torch
>>> from torch import autograd
>>> class MyFunc(autograd.Function):
... @staticmethod
... def forward(ctx, inp):
... return inp.clone()
... @staticmethod
... def backward(ctx, gO):
... # Error during the backward pass
... raise RuntimeError("Some error in backward")
... return gO.clone()
>>> def run_fn(a):
... out = MyFunc.apply(a)
... return out.sum()
>>> inp = torch.rand(10, 10, requires_grad=True)
>>> out = run_fn(inp)
>>> out.backward()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/your/pytorch/install/torch/_tensor.py", line 93, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward
allow_unreachable=True) # allow_unreachable flag
File "/your/pytorch/install/torch/autograd/function.py", line 76, in apply
return self._forward_cls.backward(self, *args)
File "<stdin>", line 8, in backward
RuntimeError: Some error in backward
>>> with autograd.detect_anomaly():
... inp = torch.rand(10, 10, requires_grad=True)
... out = run_fn(inp)
... out.backward()
Traceback of forward call that caused the error:
File "tmp.py", line 53, in <module>
out = run_fn(inp)
File "tmp.py", line 44, in run_fn
out = MyFunc.apply(a)
Traceback (most recent call last):
File "<stdin>", line 4, in <module>
File "/your/pytorch/install/torch/_tensor.py", line 93, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward
allow_unreachable=True) # allow_unreachable flag
File "/your/pytorch/install/torch/autograd/function.py", line 76, in apply
return self._forward_cls.backward(self, *args)
File "<stdin>", line 8, in backward
RuntimeError: Some error in backward
"""
def __init__(self) -> None:
self.prev = torch.is_anomaly_enabled()
warnings.warn('Anomaly Detection has been enabled. '
'This mode will increase the runtime '
'and should only be enabled for debugging.', stacklevel=2)
def __enter__(self) -> None:
torch.set_anomaly_enabled(True)
def __exit__(self, *args: Any) -> None:
torch.set_anomaly_enabled(self.prev)
class set_detect_anomaly(object):
r"""Context-manager that sets the anomaly detection for the autograd engine on or off.
``set_detect_anomaly`` will enable or disable the autograd anomaly detection
based on its argument :attr:`mode`.
It can be used as a context-manager or as a function.
See ``detect_anomaly`` above for details of the anomaly detection behaviour.
Args:
mode (bool): Flag whether to enable anomaly detection (``True``),
or disable (``False``).
"""
def __init__(self, mode: bool) -> None:
self.prev = torch.is_anomaly_enabled()
torch.set_anomaly_enabled(mode)
def __enter__(self) -> None:
pass
def __exit__(self, *args: Any) -> None:
torch.set_anomaly_enabled(self.prev)
| pytorch-master | torch/autograd/anomaly_mode.py |
import torch
from typing import Callable, Any
class saved_tensors_hooks():
"""Context-manager that sets a pair of pack / unpack hooks for saved tensors.
Use this context-manager to define how intermediary results of an operation
should be packed before saving, and unpacked on retrieval.
In that context, the ``pack_hook`` function will be called everytime an
operation saves a tensor for backward (this includes intermediary results
saved using
:func:`~torch.autograd.function._ContextMethodMixin.save_for_backward` but
also those recorded by a PyTorch-defined operation). The output of
``pack_hook`` is then stored in the computation graph instead of the
original tensor.
The ``unpack_hook`` is called when the saved tensor needs to be accessed,
namely when executing :func:`torch.Tensor.backward()` or
:func:`torch.autograd.grad()`. It takes as argument the *packed* object
returned by ``pack_hook`` and should return a tensor which has the same
content as the original tensor (passed as input to the corresponding
``pack_hook``).
The hooks should have the following signatures:
pack_hook(tensor: Tensor) -> Any
unpack_hook(Any) -> Tensor
where the return value of ``pack_hook`` is a valid input to ``unpack_hook``.
In general, you want ``unpack_hook(pack_hook(t))`` to be equal to ``t`` in terms
of value, size, dtype and device.
Example::
>>> def pack_hook(x):
... print("Packing", x)
... return x
>>>
>>> def unpack_hook(x):
... print("Unpacking", x)
... return x
>>>
>>> a = torch.ones(5, requires_grad=True)
>>> b = torch.ones(5, requires_grad=True) * 2
>>> with torch.autograd.graph.saved_tensors_hooks(pack_hook, unpack_hook):
... y = a * b
Packing tensor([1., 1., 1., 1., 1.], requires_grad=True)
Packing tensor([2., 2., 2., 2., 2.], grad_fn=<MulBackward0>)
>>> y.sum().backward()
Unpacking tensor([1., 1., 1., 1., 1.], requires_grad=True)
Unpacking tensor([2., 2., 2., 2., 2.], grad_fn=<MulBackward0>)
.. warning ::
Performing an inplace operation on the input to either hooks may lead
to undefined behavior.
.. warning ::
Only one pair of hooks is allowed at a time. When recursively nesting this
context-manager, only the inner-most pair of hooks will be applied.
"""
def __init__(self, pack_hook: Callable[[torch.Tensor], Any], unpack_hook: Callable[[Any], torch.Tensor]):
self.pack_hook = pack_hook
self.unpack_hook = unpack_hook
def __enter__(self):
torch._C._autograd._push_saved_tensors_default_hooks(self.pack_hook, self.unpack_hook)
def __exit__(self, *args: Any):
torch._C._autograd._pop_saved_tensors_default_hooks()
class save_on_cpu(saved_tensors_hooks):
"""Context-manager under which tensors saved by the forward pass will be
stored on cpu, then retrieved for backward.
When performing operations within this context manager, intermediary
results saved in the graph during the forward pass will be moved to CPU,
then copied back to the original device when needed for the backward pass.
If the graph was already on CPU, no tensor copy is performed.
Use this context-manager to trade compute for GPU memory usage (e.g.
when your model doesn't fit in GPU memory during training).
Args:
pin_memory (bool): If ``True`` tensors will be saved to CPU pinned memory
during packing and copied to GPU asynchronously during unpacking.
Defaults to ``False``.
Also see :ref:`cuda-memory-pinning`.
Example::
>>> # xdoctest: +REQUIRES(env:CUDAHOME)
>>> a = torch.randn(5, requires_grad=True, device="cuda")
>>> b = torch.randn(5, requires_grad=True, device="cuda")
>>> c = torch.randn(5, requires_grad=True, device="cuda")
>>>
>>> def f(a, b, c):
... prod_1 = a * b # a and b are saved on GPU
... with torch.autograd.graph.save_on_cpu():
... prod_2 = prod_1 * c # prod_1 and c are saved on CPU
... y = prod_2 * a # prod_2 and a are saved on GPU
... return y
>>>
>>> y = f(a, b, c)
>>> del a, b, c # for illustration only
>>> # the content of a, b, and prod_2 are still alive on GPU
>>> # the content of prod_1 and c only live on CPU
>>> y.sum().backward() # all CPU tensors are moved back to GPU, for backward
>>> # all intermediary tensors are released (deleted) after the call to backward
"""
def __init__(self, pin_memory=False):
def pack_to_cpu(tensor):
if not pin_memory:
return (tensor.device, tensor.cpu())
packed = torch.empty(
tensor.size(),
dtype=tensor.dtype,
layout=tensor.layout,
pin_memory=(torch.cuda.is_available() and not tensor.is_sparse))
packed.copy_(tensor)
return (tensor.device, packed)
def unpack_from_cpu(packed):
device, tensor = packed
return tensor.to(device, non_blocking=pin_memory)
super().__init__(pack_to_cpu, unpack_from_cpu)
| pytorch-master | torch/autograd/graph.py |
import torch
from .grad_mode import _DecoratorContextManager
from collections import namedtuple
from typing import Any
__all__ = ["UnpackedDualTensor", "enter_dual_level", "exit_dual_level", "make_dual", "unpack_dual", "dual_level"]
# Global variable used to make the python API simpler to use
_current_level = -1
def enter_dual_level():
r"""Function that can be used to enter a new forward grad level.
This level can be used to make and unpack dual Tensors to compute
forward gradients.
This function also updates the current level that is used by default
by the other functions in this API.
"""
global _current_level
new_level = torch._C._enter_dual_level()
if new_level != _current_level + 1:
raise RuntimeError("Entering a new forward AD level but the current level "
"is not valid. Make sure you did not modified it directly.")
_current_level = new_level
return new_level
def exit_dual_level(*, level=None):
r"""Function that can be used to exit a forward grad level.
This function deletes all the gradients associated with this
level. Only deleting the latest entered level is allowed.
This function also updates the current level that is used by default
by the other functions in this API.
"""
global _current_level
if level is None:
level = _current_level
if level != _current_level:
raise RuntimeError("Trying to exit a forward AD level that was not the last one "
"that was created. This is not supported.")
torch._C._exit_dual_level(level=level)
_current_level = level - 1
def make_dual(tensor, tangent, *, level=None):
r"""Associates a tensor value with a forward gradient, the tangent, to create a
"dual tensor", which is used to compute forward AD gradients.
The result is a new tensor aliased to :attr:`tensor` with :attr:`tangent` embedded
as an attribute as-is if it has the same storage layout or copied otherwise.
The tangent attribute can be recovered with :func:`unpack_dual`.
This function is backward differentiable.
Given a function `f` whose jacobian is `J`, it allows one to compute the Jacobian-vector product (`jvp`)
between `J` and a given vector `v` as follows.
Example::
>>> # xdoctest: +SKIP("Undefined variables")
>>> with dual_level():
... inp = make_dual(x, v)
... out = f(inp)
... y, jvp = unpack_dual(out)
Please see the `forward-mode AD tutorial <https://pytorch.org/tutorials/intermediate/forward_ad_usage.html>`__
for detailed steps on how to use this API.
"""
if level is None:
level = _current_level
if level < 0:
raise RuntimeError("Trying to create a dual Tensor for forward AD but no level "
"exists, make sure to enter_dual_level() first.")
if not (tensor.is_floating_point() or tensor.is_complex()):
raise ValueError(f"Expected primal to be floating point or complex, but got: {tensor.dtype}")
if not (tangent.is_floating_point() or tangent.is_complex()):
raise ValueError(f"Expected tangent to be floating point or complex, but got: {tangent.dtype}")
return torch._VF._make_dual(tensor, tangent, level=level)
_UnpackedDualTensor = namedtuple('_UnpackedDualTensor', ['primal', 'tangent'])
class UnpackedDualTensor(_UnpackedDualTensor):
r"""Namedtuple returned by :func:`unpack_dual` containing the primal and tangent components of the dual tensor.
See :func:`unpack_dual` for more details."""
pass
def unpack_dual(tensor, *, level=None):
r"""Unpacks a "dual tensor" to get both its Tensor value and its forward AD gradient.
The result is a namedtuple ``(primal, tangent)`` where ``primal`` is a view of
:attr:`tensor`'s primal and ``tangent`` is :attr:`tensor`'s tangent as-is.
Neither of these tensors can be dual tensor of level :attr:`level`.
This function is backward differentiable.
Example::
>>> # xdoctest: +SKIP("Undefined variables")
>>> with dual_level():
... inp = make_dual(x, x_t)
... out = f(inp)
... y, jvp = unpack_dual(out)
... jvp = unpack_dual(out).tangent
Please see the `forward-mode AD tutorial <https://pytorch.org/tutorials/intermediate/forward_ad_usage.html>`__
for detailed steps on how to use this API.
"""
if level is None:
level = _current_level
if level < 0:
return UnpackedDualTensor(tensor, None)
primal, dual = torch._VF._unpack_dual(tensor, level=level)
return UnpackedDualTensor(primal, dual)
class dual_level(_DecoratorContextManager):
r"""Context-manager that enables forward AD. All forward AD computation must
be performed in a ``dual_level`` context.
.. Note::
The ``dual_level`` context appropriately enters and exit the dual level to
controls the current forward AD level, which is used by default by the other
functions in this API.
We currently don't plan to support nested ``dual_level`` contexts, however, so
only a single forward AD level is supported. To compute higher-order
forward grads, one can use `functorch's jvp <https://github.com/pytorch/functorch#jvp>`__.
Example::
>>> # xdoctest: +SKIP("Undefined variables")
>>> x = torch.tensor([1])
>>> x_t = torch.tensor([1])
>>> with dual_level():
... inp = make_dual(x, x_t)
... # Do computations with inp
... out = your_fn(inp)
... _, grad = unpack_dual(out)
>>> grad is None
False
>>> # After exiting the level, the grad is deleted
>>> _, grad_after = unpack_dual(out)
>>> grad is None
True
Please see the `forward-mode AD tutorial <https://pytorch.org/tutorials/intermediate/forward_ad_usage.html>`__
for detailed steps on how to use this API.
"""
def __init__(self):
super().__init__()
def __enter__(self):
return enter_dual_level()
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
exit_dual_level()
| pytorch-master | torch/autograd/forward_ad.py |
"""
``torch.autograd`` provides classes and functions implementing automatic
differentiation of arbitrary scalar valued functions. It requires minimal
changes to the existing code - you only need to declare :class:`Tensor` s
for which gradients should be computed with the ``requires_grad=True`` keyword.
As of now, we only support autograd for floating point :class:`Tensor` types (
half, float, double and bfloat16) and complex :class:`Tensor` types (cfloat, cdouble).
"""
import torch
import warnings
from torch.types import _TensorOrTensors, _size
from typing import Any, Callable, List, Optional, Sequence, Tuple, Union, cast
from .variable import Variable
from .function import Function, NestedIOFunction
from .gradcheck import gradcheck, gradgradcheck
from .grad_mode import no_grad, enable_grad, set_grad_enabled, inference_mode
from .anomaly_mode import detect_anomaly, set_detect_anomaly
from ..overrides import has_torch_function, handle_torch_function, is_tensor_like
from . import functional
from . import forward_ad
from . import graph
from .. import _vmap_internals
__all__ = ['Variable', 'Function', 'backward', 'grad_mode']
_OptionalTensor = Optional[torch.Tensor]
_ShapeorNestedShape = Union[_size, Sequence[_size], torch.Tensor]
def _calculate_shape(output: torch.Tensor, grad: torch.Tensor,
is_grads_batched: bool) -> Tuple[_ShapeorNestedShape, _ShapeorNestedShape]:
# is_same_size ensures that both tensors are either nested or non nested
if output.is_nested:
if is_grads_batched:
raise RuntimeError("Batched grads are not supported with Nested Tensor.")
out_shape = output._nested_tensor_size()
grad_shape = grad._nested_tensor_size()
return out_shape, grad_shape
reg_out_shape = output.shape
reg_grad_shape = grad.shape if not is_grads_batched else grad.shape[1:]
return reg_out_shape, reg_grad_shape
def _make_grads(outputs: Sequence[torch.Tensor], grads: Sequence[_OptionalTensor],
is_grads_batched: bool) -> Tuple[_OptionalTensor, ...]:
new_grads: List[_OptionalTensor] = []
for out, grad in zip(outputs, grads):
if isinstance(grad, torch.Tensor):
first_grad = grad if not is_grads_batched else grad[0]
if not torch.is_same_size(out, first_grad):
out_shape, grad_shape = _calculate_shape(out, first_grad, is_grads_batched)
if is_grads_batched:
raise RuntimeError("If `is_grads_batched=True`, we interpret the first "
"dimension of each grad_output as the batch dimension. "
"The sizes of the remaining dimensions are expected to match "
"the shape of corresponding output, but a mismatch "
"was detected: grad_output["
+ str(grads.index(grad)) + "] has a shape of "
+ str(grad_shape) + " and output["
+ str(outputs.index(out)) + "] has a shape of "
+ str(out_shape) + ". "
"If you only want some tensors in `grad_output` to be considered "
"batched, consider using vmap.")
else:
raise RuntimeError("Mismatch in shape: grad_output["
+ str(grads.index(grad)) + "] has a shape of "
+ str(grad_shape) + " and output["
+ str(outputs.index(out)) + "] has a shape of "
+ str(out_shape) + ".")
if out.dtype.is_complex != grad.dtype.is_complex:
raise RuntimeError("For complex Tensors, both grad_output and output"
" are required to have the same dtype."
" Mismatch in dtype: grad_output["
+ str(grads.index(grad)) + "] has a dtype of "
+ str(grad.dtype) + " and output["
+ str(outputs.index(out)) + "] has a dtype of "
+ str(out.dtype) + ".")
new_grads.append(grad)
elif grad is None:
if out.requires_grad:
if out.numel() != 1:
raise RuntimeError("grad can be implicitly created only for scalar outputs")
new_grads.append(torch.ones_like(out, memory_format=torch.preserve_format))
else:
new_grads.append(None)
else:
raise TypeError("gradients can be either Tensors or None, but got " +
type(grad).__name__)
return tuple(new_grads)
def _tensor_or_tensors_to_tuple(tensors: Optional[_TensorOrTensors], length: int) -> Tuple[_OptionalTensor, ...]:
if tensors is None:
return (None, ) * length
if isinstance(tensors, torch.Tensor):
return (tensors, )
return tuple(tensors)
def backward(
tensors: _TensorOrTensors,
grad_tensors: Optional[_TensorOrTensors] = None,
retain_graph: Optional[bool] = None,
create_graph: bool = False,
grad_variables: Optional[_TensorOrTensors] = None,
inputs: Optional[_TensorOrTensors] = None,
) -> None:
r"""Computes the sum of gradients of given tensors with respect to graph
leaves.
The graph is differentiated using the chain rule. If any of ``tensors``
are non-scalar (i.e. their data has more than one element) and require
gradient, then the Jacobian-vector product would be computed, in this
case the function additionally requires specifying ``grad_tensors``.
It should be a sequence of matching length, that contains the "vector"
in the Jacobian-vector product, usually the gradient of the differentiated
function w.r.t. corresponding tensors (``None`` is an acceptable value for
all tensors that don't need gradient tensors).
This function accumulates gradients in the leaves - you might need to zero
``.grad`` attributes or set them to ``None`` before calling it.
See :ref:`Default gradient layouts<default-grad-layouts>`
for details on the memory layout of accumulated gradients.
.. note::
Using this method with ``create_graph=True`` will create a reference cycle
between the parameter and its gradient which can cause a memory leak.
We recommend using ``autograd.grad`` when creating the graph to avoid this.
If you have to use this function, make sure to reset the ``.grad`` fields of your
parameters to ``None`` after use to break the cycle and avoid the leak.
.. note::
If you run any forward ops, create ``grad_tensors``, and/or call ``backward``
in a user-specified CUDA stream context, see
:ref:`Stream semantics of backward passes<bwd-cuda-stream-semantics>`.
.. note::
When ``inputs`` are provided and a given input is not a leaf,
the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients).
It is an implementation detail on which the user should not rely.
See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.
Args:
tensors (Sequence[Tensor] or Tensor): Tensors of which the derivative will be
computed.
grad_tensors (Sequence[Tensor or None] or Tensor, optional): The "vector" in
the Jacobian-vector product, usually gradients w.r.t. each element of
corresponding tensors. None values can be specified for scalar Tensors or
ones that don't require grad. If a None value would be acceptable for all
grad_tensors, then this argument is optional.
retain_graph (bool, optional): If ``False``, the graph used to compute the grad
will be freed. Note that in nearly all cases setting this option to ``True``
is not needed and often can be worked around in a much more efficient
way. Defaults to the value of ``create_graph``.
create_graph (bool, optional): If ``True``, graph of the derivative will
be constructed, allowing to compute higher order derivative products.
Defaults to ``False``.
inputs (Sequence[Tensor] or Tensor, optional): Inputs w.r.t. which the gradient
be will accumulated into ``.grad``. All other Tensors will be ignored. If
not provided, the gradient is accumulated into all the leaf Tensors that
were used to compute the attr::tensors.
"""
if grad_variables is not None:
warnings.warn("'grad_variables' is deprecated. Use 'grad_tensors' instead.")
if grad_tensors is None:
grad_tensors = grad_variables
else:
raise RuntimeError("'grad_tensors' and 'grad_variables' (deprecated) "
"arguments both passed to backward(). Please only "
"use 'grad_tensors'.")
if inputs is not None and len(inputs) == 0:
raise RuntimeError("'inputs' argument to backward() cannot be empty.")
tensors = (tensors,) if isinstance(tensors, torch.Tensor) else tuple(tensors)
inputs = (inputs,) if isinstance(inputs, torch.Tensor) else \
tuple(inputs) if inputs is not None else tuple()
grad_tensors_ = _tensor_or_tensors_to_tuple(grad_tensors, len(tensors))
grad_tensors_ = _make_grads(tensors, grad_tensors_, is_grads_batched=False)
if retain_graph is None:
retain_graph = create_graph
# The reason we repeat same the comment below is that
# some Python versions print out the first line of a multi-line function
# calls in the traceback and some print out the last line
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
tensors, grad_tensors_, retain_graph, create_graph, inputs,
allow_unreachable=True, accumulate_grad=True) # Calls into the C++ engine to run the backward pass
def grad(
outputs: _TensorOrTensors,
inputs: _TensorOrTensors,
grad_outputs: Optional[_TensorOrTensors] = None,
retain_graph: Optional[bool] = None,
create_graph: bool = False,
only_inputs: bool = True,
allow_unused: bool = False,
is_grads_batched: bool = False
) -> Tuple[torch.Tensor, ...]:
r"""Computes and returns the sum of gradients of outputs with respect to
the inputs.
``grad_outputs`` should be a sequence of length matching ``output``
containing the "vector" in vector-Jacobian product, usually the pre-computed
gradients w.r.t. each of the outputs. If an output doesn't require_grad,
then the gradient can be ``None``).
.. note::
If you run any forward ops, create ``grad_outputs``, and/or call ``grad``
in a user-specified CUDA stream context, see
:ref:`Stream semantics of backward passes<bwd-cuda-stream-semantics>`.
.. note::
``only_inputs`` argument is deprecated and is ignored now (defaults to ``True``).
To accumulate gradient for other parts of the graph, please use
``torch.autograd.backward``.
Args:
outputs (sequence of Tensor): outputs of the differentiated function.
inputs (sequence of Tensor): Inputs w.r.t. which the gradient will be
returned (and not accumulated into ``.grad``).
grad_outputs (sequence of Tensor): The "vector" in the vector-Jacobian product.
Usually gradients w.r.t. each output. None values can be specified for scalar
Tensors or ones that don't require grad. If a None value would be acceptable
for all grad_tensors, then this argument is optional. Default: None.
retain_graph (bool, optional): If ``False``, the graph used to compute the grad
will be freed. Note that in nearly all cases setting this option to ``True``
is not needed and often can be worked around in a much more efficient
way. Defaults to the value of ``create_graph``.
create_graph (bool, optional): If ``True``, graph of the derivative will
be constructed, allowing to compute higher order derivative products.
Default: ``False``.
allow_unused (bool, optional): If ``False``, specifying inputs that were not
used when computing outputs (and therefore their grad is always zero)
is an error. Defaults to ``False``.
is_grads_batched (bool, optional): If ``True``, the first dimension of each
tensor in ``grad_outputs`` will be interpreted as the batch dimension.
Instead of computing a single vector-Jacobian product, we compute a
batch of vector-Jacobian products for each "vector" in the batch.
We use the vmap prototype feature as the backend to vectorize calls
to the autograd engine so that this computation can be performed in a
single call. This should lead to performance improvements when compared
to manually looping and performing backward multiple times. Note that
due to this feature being experimental, there may be performance
cliffs. Please use ``torch._C._debug_only_display_vmap_fallback_warnings(True)``
to show any performance warnings and file an issue on github if warnings exist
for your use case. Defaults to ``False``.
"""
t_outputs = cast(Tuple[torch.Tensor, ...], (outputs,) if is_tensor_like(outputs) else tuple(outputs))
t_inputs = cast(Tuple[torch.Tensor, ...], (inputs,) if is_tensor_like(inputs) else tuple(inputs))
overridable_args = t_outputs + t_inputs
if has_torch_function(overridable_args):
return handle_torch_function(
grad,
overridable_args,
t_outputs,
t_inputs,
grad_outputs=grad_outputs,
retain_graph=retain_graph,
create_graph=create_graph,
only_inputs=only_inputs,
allow_unused=allow_unused,
is_grads_batched=is_grads_batched,
)
if not only_inputs:
warnings.warn("only_inputs argument is deprecated and is ignored now "
"(defaults to True). To accumulate gradient for other "
"parts of the graph, please use torch.autograd.backward.")
grad_outputs_ = _tensor_or_tensors_to_tuple(grad_outputs, len(t_outputs))
grad_outputs_ = _make_grads(t_outputs, grad_outputs_, is_grads_batched=is_grads_batched)
if retain_graph is None:
retain_graph = create_graph
# The reason we repeat same the comment several times below is because
# some Python versions print out the first line of multi-line function
# calls in the traceback and some print out the last line
if is_grads_batched:
def vjp(gO):
return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
t_outputs, gO, retain_graph, create_graph, t_inputs,
allow_unused, accumulate_grad=False) # Calls into the C++ engine to run the backward pass
return _vmap_internals._vmap(vjp, 0, 0, allow_none_pass_through=True)(grad_outputs_)
else:
return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
t_outputs, grad_outputs_, retain_graph, create_graph, t_inputs,
allow_unused, accumulate_grad=False) # Calls into the C++ engine to run the backward pass
# This function applies in case of gradient checkpointing for memory
# optimization. Currently, gradient checkpointing is supported only if the
# execution engine is invoked through torch.autograd.backward() and its
# inputs argument is not passed. It is not supported for torch.autograd.grad().
# This is because if inputs are specified, the gradient won't be calculated for
# anything else e.g. model parameters like weights, bias etc.
#
# This function returns whether the checkpointing is valid i.e. torch.autograd.backward
# or not i.e. torch.autograd.grad. The implementation works by maintaining a thread
# local variable in torch/csrc/autograd/engine.cpp which looks at the NodeTask
# in the stack and before a NodeTask is executed in evaluate_function, it
# checks for whether reentrant backwards is imperative or not.
# See https://github.com/pytorch/pytorch/pull/4594 for more discussion/context
def _is_checkpoint_valid():
return Variable._execution_engine.is_checkpoint_valid()
def variable(*args, **kwargs):
raise RuntimeError("torch.autograd.variable(...) is deprecated, use torch.tensor(...) instead")
# Monkey patching variable.Variable to fix FX codegen. FX generates a call by roughly doing
# f"{fn.__module__}.{fn.__name__}(...). This yields torch.autograd.variable.Variable(...) in the
# output of an FX graph. Unfortunately the module name torch.autograd.variable is shadowed by the
# deprecated function - variable(...).
variable.Variable = Variable # type: ignore[attr-defined]
if not torch._C._autograd_init():
raise RuntimeError("autograd initialization failed")
# Import all native method/classes
from torch._C._autograd import (DeviceType, ProfilerActivity, ProfilerState, ProfilerConfig, ProfilerEvent,
_enable_profiler_legacy, _disable_profiler_legacy, _profiler_enabled,
_enable_record_function, _set_empty_test_observer, kineto_available,
_record_function_with_args_enter, _record_function_with_args_exit,
_supported_activities, _add_metadata_json, SavedTensor,
_push_saved_tensors_default_hooks, _pop_saved_tensors_default_hooks)
from torch._C._autograd import (_ProfilerResult, _KinetoEvent, _kineto_step,
_prepare_profiler, _enable_profiler, _disable_profiler)
from . import profiler
def _register_py_tensor_class_for_device(device, cls):
if not isinstance(cls, type):
raise RuntimeError("cls isn't a typeinfo object")
torch._C._register_py_class_for_device(device, cls)
| pytorch-master | torch/autograd/__init__.py |
import torch
from torch._six import with_metaclass
class VariableMeta(type):
def __instancecheck__(cls, other):
return isinstance(other, torch.Tensor)
# mypy doesn't understand torch._six.with_metaclass
class Variable(with_metaclass(VariableMeta, torch._C._LegacyVariableBase)): # type: ignore[misc]
pass
from torch._C import _ImperativeEngine as ImperativeEngine
Variable._execution_engine = ImperativeEngine()
| pytorch-master | torch/autograd/variable.py |
import torch
from typing import Tuple, List
from . import forward_ad as fwAD
from torch._vmap_internals import _vmap
# Utility functions
def _as_tuple_nocheck(x):
if isinstance(x, tuple):
return x
elif isinstance(x, list):
return tuple(x)
else:
return x,
def _as_tuple(inp, arg_name=None, fn_name=None):
# Ensures that inp is a tuple of Tensors
# Returns whether or not the original inp was a tuple and the tupled version of the input
if arg_name is None and fn_name is None:
return _as_tuple_nocheck(inp)
is_inp_tuple = True
if not isinstance(inp, tuple):
inp = (inp,)
is_inp_tuple = False
for i, el in enumerate(inp):
if not isinstance(el, torch.Tensor):
if is_inp_tuple:
raise TypeError("The {} given to {} must be either a Tensor or a tuple of Tensors but the"
" value at index {} has type {}.".format(arg_name, fn_name, i, type(el)))
else:
raise TypeError("The {} given to {} must be either a Tensor or a tuple of Tensors but the"
" given {} has type {}.".format(arg_name, fn_name, arg_name, type(el)))
return is_inp_tuple, inp
def _tuple_postprocess(res, to_unpack):
# Unpacks a potentially nested tuple of Tensors
# to_unpack should be a single boolean or a tuple of two booleans.
# It is used to:
# - invert _as_tuple when res should match the inp given to _as_tuple
# - optionally remove nesting of two tuples created by multiple calls to _as_tuple
if isinstance(to_unpack, tuple):
assert len(to_unpack) == 2
if not to_unpack[1]:
res = tuple(el[0] for el in res)
if not to_unpack[0]:
res = res[0]
else:
if not to_unpack:
res = res[0]
return res
def _grad_preprocess(inputs, create_graph, need_graph):
# Preprocess the inputs to make sure they require gradient
# inputs is a tuple of Tensors to preprocess
# create_graph specifies if the user wants gradients to flow back to the Tensors in inputs
# need_graph specifies if we internally want gradients to flow back to the Tensors in res
# Note that we *always* create a new Tensor object to be able to see the difference between
# inputs given as arguments and the same Tensors automatically captured by the user function.
# Check this issue for more details on how that can happen: https://github.com/pytorch/pytorch/issues/32576
res = []
for inp in inputs:
if create_graph and inp.requires_grad:
# Create at least a new Tensor object in a differentiable way
if not inp.is_sparse:
# Use .view_as() to get a shallow copy
res.append(inp.view_as(inp))
else:
# We cannot use view for sparse Tensors so we clone
res.append(inp.clone())
else:
res.append(inp.detach().requires_grad_(need_graph))
return tuple(res)
def _grad_postprocess(inputs, create_graph):
# Postprocess the generated Tensors to avoid returning Tensors with history when the user did not
# request it.
if isinstance(inputs[0], torch.Tensor):
if not create_graph:
return tuple(inp.detach() for inp in inputs)
else:
return inputs
else:
return tuple(_grad_postprocess(inp, create_graph) for inp in inputs)
def _validate_v(v, other, is_other_tuple):
# This assumes that other is the correct shape, and v should match
# Both are assumed to be tuples of Tensors
if len(other) != len(v):
if is_other_tuple:
raise RuntimeError("v is a tuple of invalid length: should be {} but got {}.".format(len(other), len(v)))
else:
raise RuntimeError("The given v should contain a single Tensor.")
for idx, (el_v, el_other) in enumerate(zip(v, other)):
if el_v.size() != el_other.size():
prepend = ""
if is_other_tuple:
prepend = "Entry {} in ".format(idx)
raise RuntimeError("{}v has invalid size: should be {} but got {}.".format(
prepend, el_other.size(), el_v.size()))
def _check_requires_grad(inputs, input_type, strict):
# Used to make all the necessary checks to raise nice errors in strict mode.
if not strict:
return
if input_type not in ["outputs", "grad_inputs", "jacobian", "hessian"]:
raise RuntimeError("Invalid input_type to _check_requires_grad")
for i, inp in enumerate(inputs):
if inp is None:
# This can only be reached for grad_inputs.
raise RuntimeError("The output of the user-provided function is independent of input {}."
" This is not allowed in strict mode.".format(i))
if not inp.requires_grad:
if input_type == "hessian":
raise RuntimeError("The hessian of the user-provided function with respect to input {}"
" is independent of the input. This is not allowed in strict mode."
" You should ensure that your function is thrice differentiable and that"
" the hessian depends on the inputs.".format(i))
elif input_type == "jacobian":
raise RuntimeError("While computing the hessian, found that the jacobian of the user-provided"
" function with respect to input {} is independent of the input. This is not"
" allowed in strict mode. You should ensure that your function is twice"
" differentiable and that the jacobian depends on the inputs (this would be"
" violated by a linear function for example).".format(i))
elif input_type == "grad_inputs":
raise RuntimeError("The gradient with respect to input {} is independent of the inputs of the"
" user-provided function. This is not allowed in strict mode.".format(i))
else:
raise RuntimeError("Output {} of the user-provided function does not require gradients."
" The outputs must be computed in a differentiable manner from the input"
" when running in strict mode.".format(i))
def _autograd_grad(outputs, inputs, grad_outputs=None, create_graph=False, retain_graph=None, is_grads_batched=False):
# Version of autograd.grad that accepts `None` in outputs and do not compute gradients for them.
# This has the extra constraint that inputs has to be a tuple
assert isinstance(outputs, tuple)
if grad_outputs is None:
grad_outputs = (None,) * len(outputs)
assert isinstance(grad_outputs, tuple)
assert len(outputs) == len(grad_outputs)
new_outputs: Tuple[torch.Tensor, ...] = tuple()
new_grad_outputs: Tuple[torch.Tensor, ...] = tuple()
for out, grad_out in zip(outputs, grad_outputs):
if out is not None and out.requires_grad:
new_outputs += (out,)
new_grad_outputs += (grad_out,)
if len(new_outputs) == 0:
# No differentiable output, we don't need to call the autograd engine
return (None,) * len(inputs)
else:
return torch.autograd.grad(new_outputs, inputs, new_grad_outputs, allow_unused=True,
create_graph=create_graph, retain_graph=retain_graph,
is_grads_batched=is_grads_batched)
def _fill_in_zeros(grads, refs, strict, create_graph, stage):
# Used to detect None in the grads and depending on the flags, either replace them
# with Tensors full of 0s of the appropriate size based on the refs or raise an error.
# strict and create graph allow us to detect when it is appropriate to raise an error
# stage gives us information of which backward call we consider to give good error message
if stage not in ["back", "back_trick", "double_back", "double_back_trick"]:
raise RuntimeError("Invalid stage argument '{}' to _fill_in_zeros".format(stage))
res: Tuple[torch.Tensor, ...] = tuple()
for i, grads_i in enumerate(grads):
if grads_i is None:
if strict:
if stage == "back":
raise RuntimeError("The output of the user-provided function is independent of "
"input {}. This is not allowed in strict mode.".format(i))
elif stage == "back_trick":
raise RuntimeError("The gradient with respect to the input is independent of entry {}"
" in the grad_outputs when using the double backward trick to compute"
" forward mode gradients. This is not allowed in strict mode.".format(i))
elif stage == "double_back":
raise RuntimeError("The jacobian of the user-provided function is independent of "
"input {}. This is not allowed in strict mode.".format(i))
else:
raise RuntimeError("The hessian of the user-provided function is independent of "
"entry {} in the grad_jacobian. This is not allowed in strict "
"mode as it prevents from using the double backward trick to "
"replace forward mode AD.".format(i))
grads_i = torch.zeros_like(refs[i])
else:
if strict and create_graph and not grads_i.requires_grad:
if "double" not in stage:
raise RuntimeError("The jacobian of the user-provided function is independent of "
"input {}. This is not allowed in strict mode when create_graph=True.".format(i))
else:
raise RuntimeError("The hessian of the user-provided function is independent of "
"input {}. This is not allowed in strict mode when create_graph=True.".format(i))
res += (grads_i,)
return res
# Public API
def vjp(func, inputs, v=None, create_graph=False, strict=False):
r"""Function that computes the dot product between a vector ``v`` and the
Jacobian of the given function at the point given by the inputs.
Args:
func (function): a Python function that takes Tensor inputs and returns
a tuple of Tensors or a Tensor.
inputs (tuple of Tensors or Tensor): inputs to the function ``func``.
v (tuple of Tensors or Tensor): The vector for which the vector
Jacobian product is computed. Must be the same size as the output
of ``func``. This argument is optional when the output of ``func``
contains a single element and (if it is not provided) will be set
as a Tensor containing a single ``1``.
create_graph (bool, optional): If ``True``, both the output and result
will be computed in a differentiable way. Note that when ``strict``
is ``False``, the result can not require gradients or be
disconnected from the inputs. Defaults to ``False``.
strict (bool, optional): If ``True``, an error will be raised when we
detect that there exists an input such that all the outputs are
independent of it. If ``False``, we return a Tensor of zeros as the
vjp for said inputs, which is the expected mathematical value.
Defaults to ``False``.
Returns:
output (tuple): tuple with:
func_output (tuple of Tensors or Tensor): output of ``func(inputs)``
vjp (tuple of Tensors or Tensor): result of the dot product with
the same shape as the inputs.
Example:
>>> def exp_reducer(x):
... return x.exp().sum(dim=1)
>>> inputs = torch.rand(4, 4)
>>> v = torch.ones(4)
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> vjp(exp_reducer, inputs, v)
(tensor([5.7817, 7.2458, 5.7830, 6.7782]),
tensor([[1.4458, 1.3962, 1.3042, 1.6354],
[2.1288, 1.0652, 1.5483, 2.5035],
[2.2046, 1.1292, 1.1432, 1.3059],
[1.3225, 1.6652, 1.7753, 2.0152]]))
>>> vjp(exp_reducer, inputs, v, create_graph=True)
(tensor([5.7817, 7.2458, 5.7830, 6.7782], grad_fn=<SumBackward1>),
tensor([[1.4458, 1.3962, 1.3042, 1.6354],
[2.1288, 1.0652, 1.5483, 2.5035],
[2.2046, 1.1292, 1.1432, 1.3059],
[1.3225, 1.6652, 1.7753, 2.0152]], grad_fn=<MulBackward0>))
>>> def adder(x, y):
... return 2 * x + 3 * y
>>> inputs = (torch.rand(2), torch.rand(2))
>>> v = torch.ones(2)
>>> vjp(adder, inputs, v)
(tensor([2.4225, 2.3340]),
(tensor([2., 2.]), tensor([3., 3.])))
"""
with torch.enable_grad():
is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "vjp")
inputs = _grad_preprocess(inputs, create_graph=create_graph, need_graph=True)
outputs = func(*inputs)
is_outputs_tuple, outputs = _as_tuple(outputs, "outputs of the user-provided function", "vjp")
_check_requires_grad(outputs, "outputs", strict=strict)
if v is not None:
_, v = _as_tuple(v, "v", "vjp")
v = _grad_preprocess(v, create_graph=create_graph, need_graph=False)
_validate_v(v, outputs, is_outputs_tuple)
else:
if len(outputs) != 1 or outputs[0].nelement() != 1:
raise RuntimeError("The vector v can only be None if the "
"user-provided function returns "
"a single Tensor with a single element.")
enable_grad = True if create_graph else torch.is_grad_enabled()
with torch.set_grad_enabled(enable_grad):
grad_res = _autograd_grad(outputs, inputs, v, create_graph=create_graph)
vjp = _fill_in_zeros(grad_res, inputs, strict, create_graph, "back")
# Cleanup objects and return them to the user
outputs = _grad_postprocess(outputs, create_graph)
vjp = _grad_postprocess(vjp, create_graph)
return _tuple_postprocess(outputs, is_outputs_tuple), _tuple_postprocess(vjp, is_inputs_tuple)
def jvp(func, inputs, v=None, create_graph=False, strict=False):
r"""Function that computes the dot product between the Jacobian of
the given function at the point given by the inputs and a vector ``v``.
Args:
func (function): a Python function that takes Tensor inputs and returns
a tuple of Tensors or a Tensor.
inputs (tuple of Tensors or Tensor): inputs to the function ``func``.
v (tuple of Tensors or Tensor): The vector for which the Jacobian
vector product is computed. Must be the same size as the input of
``func``. This argument is optional when the input to ``func``
contains a single element and (if it is not provided) will be set
as a Tensor containing a single ``1``.
create_graph (bool, optional): If ``True``, both the output and result
will be computed in a differentiable way. Note that when ``strict``
is ``False``, the result can not require gradients or be
disconnected from the inputs. Defaults to ``False``.
strict (bool, optional): If ``True``, an error will be raised when we
detect that there exists an input such that all the outputs are
independent of it. If ``False``, we return a Tensor of zeros as the
jvp for said inputs, which is the expected mathematical value.
Defaults to ``False``.
Returns:
output (tuple): tuple with:
func_output (tuple of Tensors or Tensor): output of ``func(inputs)``
jvp (tuple of Tensors or Tensor): result of the dot product with
the same shape as the output.
Note:
``autograd.functional.jvp`` computes the jvp by using the backward of
the backward (sometimes called the double backwards trick). This is not
the most performant way of computing the jvp. Please consider using
`functorch's jvp <https://github.com/pytorch/functorch#jvp>`_
or the :ref:`low-level forward-mode AD API <forward-mode-ad>` instead.
Example:
>>> def exp_reducer(x):
... return x.exp().sum(dim=1)
>>> inputs = torch.rand(4, 4)
>>> v = torch.ones(4, 4)
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> jvp(exp_reducer, inputs, v)
(tensor([6.3090, 4.6742, 7.9114, 8.2106]),
tensor([6.3090, 4.6742, 7.9114, 8.2106]))
>>> jvp(exp_reducer, inputs, v, create_graph=True)
(tensor([6.3090, 4.6742, 7.9114, 8.2106], grad_fn=<SumBackward1>),
tensor([6.3090, 4.6742, 7.9114, 8.2106], grad_fn=<SqueezeBackward1>))
>>> def adder(x, y):
... return 2 * x + 3 * y
>>> inputs = (torch.rand(2), torch.rand(2))
>>> v = (torch.ones(2), torch.ones(2))
>>> jvp(adder, inputs, v)
(tensor([2.2399, 2.5005]),
tensor([5., 5.]))
"""
with torch.enable_grad():
is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "jvp")
inputs = _grad_preprocess(inputs, create_graph=create_graph, need_graph=True)
if v is not None:
_, v = _as_tuple(v, "v", "jvp")
v = _grad_preprocess(v, create_graph=create_graph, need_graph=False)
_validate_v(v, inputs, is_inputs_tuple)
else:
if len(inputs) != 1 or inputs[0].nelement() != 1:
raise RuntimeError("The vector v can only be None if the input to "
"the user-provided function is a single Tensor "
"with a single element.")
outputs = func(*inputs)
is_outputs_tuple, outputs = _as_tuple(outputs, "outputs of the user-provided function", "jvp")
_check_requires_grad(outputs, "outputs", strict=strict)
# The backward is linear so the value of grad_outputs is not important as
# it won't appear in the double backward graph. We only need to ensure that
# it does not contain inf or nan.
grad_outputs = tuple(torch.zeros_like(out, requires_grad=True) for out in outputs)
grad_inputs = _autograd_grad(outputs, inputs, grad_outputs, create_graph=True)
_check_requires_grad(grad_inputs, "grad_inputs", strict=strict)
if create_graph:
with torch.enable_grad():
grad_res = _autograd_grad(grad_inputs, grad_outputs, v, create_graph=create_graph)
jvp = _fill_in_zeros(grad_res, outputs, strict, create_graph, "back_trick")
else:
grad_res = _autograd_grad(grad_inputs, grad_outputs, v, create_graph=create_graph)
jvp = _fill_in_zeros(grad_res, outputs, strict, create_graph, "back_trick")
# Cleanup objects and return them to the user
outputs = _grad_postprocess(outputs, create_graph)
jvp = _grad_postprocess(jvp, create_graph)
return _tuple_postprocess(outputs, is_outputs_tuple), _tuple_postprocess(jvp, is_outputs_tuple)
def _construct_standard_basis_for(tensors: Tuple[torch.Tensor, ...], tensor_numels: Tuple[int, ...]) -> Tuple[torch.Tensor, ...]:
# This function:
# - constructs a N=sum(tensor_numels) standard basis. i.e. an NxN identity matrix.
# - Splits the identity matrix into chunks with each chunk size determined by `tensor_numels`.
# - Each chunk corresponds to one tensor. The chunk has the same dtype and
# device as the tensor
#
# For example, with tensor_numels = [1, 2, 1], this function returns:
# ( tensor([[1], tensor([[0, 0], tensor([[0],
# [0], [1, 0], [0],
# [0], [0, 1], [0],
# [0]]) , [0, 0]]) , [1]]) )
#
# Precondition: tensor_numels == tuple(tensor.numel() for tensor in tensors)
# Precondition: tensors always has at least one element.
#
# See NOTE: [Computing jacobian with vmap and grad for multiple tensors]
# for context behind this function. All the pre-conditions are guarded for
# in torch.autograd.functional.jacobian.
assert len(tensors) == len(tensor_numels)
assert len(tensors) > 0
total_numel = sum(tensor_numels)
chunks = tuple(tensor.new_zeros(total_numel, tensor_numel)
for tensor, tensor_numel in zip(tensors, tensor_numels))
diag_start_idx = 0
for chunk, numel in zip(chunks, tensor_numels):
chunk.diagonal(diag_start_idx).fill_(1)
diag_start_idx -= numel
return chunks
def _jacfwd(func, inputs, strict=False, vectorize=False):
if strict:
raise RuntimeError('torch.autograd.functional.jacobian: `strict=True` '
'and `strategy="forward-mode"` are not supported together (yet). '
'Please either set `strict=False` or '
'`strategy="reverse-mode"`.')
is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "jacobian")
output_info = []
if vectorize:
# See NOTE: [Computing jacobian with vmap and grad for multiple outputs]
input_numels = tuple(input.numel() for input in inputs)
# Step 1: Prepare tangents
tangents = _construct_standard_basis_for(inputs, input_numels)
# Step 2: Compute vmap over computation with dual tensors
def jvp(tangents):
with fwAD.dual_level():
dual_inputs = tuple(
fwAD.make_dual(input, tangent.view_as(input)) for input, tangent in zip(inputs, tangents))
_is_outputs_tuple, dual_outputs = _as_tuple(func(*dual_inputs), "outputs")
output_info.append(_is_outputs_tuple)
jv = []
primal_outs = []
for dual_out in dual_outputs:
primal, tangent = fwAD.unpack_dual(dual_out)
primal_outs.append(primal)
if tangent is not None:
jv.append(tangent)
else:
jv.append(torch.zeros_like(primal))
output_info.append(primal_outs)
return tuple(jv)
outputs_before_split = _vmap(jvp)(tangents)
is_outputs_tuple, outputs = output_info
# Step 3: for each of the output tangents, split along dim 0
jacobian_input_output = []
for jac, output_i in zip(outputs_before_split, outputs):
jacobian_output_i_output = []
for jac, input_j in zip(jac.split(input_numels, dim=0), inputs):
# We need to transpose the Jacobian because in forward AD, the
# batch dimension represents that of the inputs
jacobian_input_i_output_j = jac.permute(*range(1, jac.ndim), 0) \
.reshape(tuple([*output_i.shape, *input_j.shape])) # noqa: C409
jacobian_output_i_output.append(jacobian_input_i_output_j)
jacobian_input_output.append(jacobian_output_i_output)
# Omit [Step 4] because everything is already transposed w/ forward AD
return _tuple_postprocess(jacobian_input_output, (is_outputs_tuple, is_inputs_tuple))
else:
raise NotImplementedError("Computing Jacobian using forward-AD or forward-over-reverse Hessian is"
"only implemented for `vectorize=True`.")
def jacobian(func, inputs, create_graph=False, strict=False, vectorize=False, strategy="reverse-mode"):
r"""Function that computes the Jacobian of a given function.
Args:
func (function): a Python function that takes Tensor inputs and returns
a tuple of Tensors or a Tensor.
inputs (tuple of Tensors or Tensor): inputs to the function ``func``.
create_graph (bool, optional): If ``True``, the Jacobian will be
computed in a differentiable manner. Note that when ``strict`` is
``False``, the result can not require gradients or be disconnected
from the inputs. Defaults to ``False``.
strict (bool, optional): If ``True``, an error will be raised when we
detect that there exists an input such that all the outputs are
independent of it. If ``False``, we return a Tensor of zeros as the
jacobian for said inputs, which is the expected mathematical value.
Defaults to ``False``.
vectorize (bool, optional): This feature is experimental.
Please consider using
`functorch's jacrev or jacfwd <https://github.com/pytorch/functorch#what-are-the-transforms>`_
instead if you are looking for something less experimental and more performant.
When computing the jacobian, usually we invoke
``autograd.grad`` once per row of the jacobian. If this flag is
``True``, we perform only a single ``autograd.grad`` call with
``batched_grad=True`` which uses the vmap prototype feature.
Though this should lead to performance improvements in many cases,
because this feature is still experimental, there may be performance
cliffs. See :func:`torch.autograd.grad`'s ``batched_grad`` parameter for
more information.
strategy (str, optional): Set to ``"forward-mode"`` or ``"reverse-mode"`` to
determine whether the Jacobian will be computed with forward or reverse
mode AD. Currently, ``"forward-mode"`` requires ``vectorized=True``.
Defaults to ``"reverse-mode"``. If ``func`` has more outputs than
inputs, ``"forward-mode"`` tends to be more performant. Otherwise,
prefer to use ``"reverse-mode"``.
Returns:
Jacobian (Tensor or nested tuple of Tensors): if there is a single
input and output, this will be a single Tensor containing the
Jacobian for the linearized inputs and output. If one of the two is
a tuple, then the Jacobian will be a tuple of Tensors. If both of
them are tuples, then the Jacobian will be a tuple of tuple of
Tensors where ``Jacobian[i][j]`` will contain the Jacobian of the
``i``\th output and ``j``\th input and will have as size the
concatenation of the sizes of the corresponding output and the
corresponding input and will have same dtype and device as the
corresponding input. If strategy is ``forward-mode``, the dtype will be
that of the output; otherwise, the input.
Example:
>>> def exp_reducer(x):
... return x.exp().sum(dim=1)
>>> inputs = torch.rand(2, 2)
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> jacobian(exp_reducer, inputs)
tensor([[[1.4917, 2.4352],
[0.0000, 0.0000]],
[[0.0000, 0.0000],
[2.4369, 2.3799]]])
>>> jacobian(exp_reducer, inputs, create_graph=True)
tensor([[[1.4917, 2.4352],
[0.0000, 0.0000]],
[[0.0000, 0.0000],
[2.4369, 2.3799]]], grad_fn=<ViewBackward>)
>>> def exp_adder(x, y):
... return 2 * x.exp() + 3 * y
>>> inputs = (torch.rand(2), torch.rand(2))
>>> jacobian(exp_adder, inputs)
(tensor([[2.8052, 0.0000],
[0.0000, 3.3963]]),
tensor([[3., 0.],
[0., 3.]]))
"""
assert strategy in ("forward-mode", "reverse-mode"), (
'Expected strategy to be either "forward-mode" or "reverse-mode". Hint: If your '
'function has more outputs than inputs, "forward-mode" tends to be more performant. '
'Otherwise, prefer to use "reverse-mode".')
if strategy == "forward-mode":
if create_graph:
raise NotImplementedError('torch.autograd.functional.jacobian: `create_graph=True` '
'and `strategy="forward-mode"` are not supported together (yet). '
'Please either set `create_graph=False` or '
'`strategy="reverse-mode"`.')
return _jacfwd(func, inputs, strict, vectorize)
with torch.enable_grad():
is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "jacobian")
inputs = _grad_preprocess(inputs, create_graph=create_graph, need_graph=True)
outputs = func(*inputs)
is_outputs_tuple, outputs = _as_tuple(outputs,
"outputs of the user-provided function",
"jacobian")
_check_requires_grad(outputs, "outputs", strict=strict)
if vectorize:
if strict:
raise RuntimeError('torch.autograd.functional.jacobian: `strict=True` '
'and `vectorized=True` are not supported together. '
'Please either set `strict=False` or '
'`vectorize=False`.')
# NOTE: [Computing jacobian with vmap and grad for multiple outputs]
#
# Let's consider f(x) = (x**2, x.sum()) and let x = torch.randn(3).
# It turns out we can compute the jacobian of this function with a single
# call to autograd.grad by using vmap over the correct grad_outputs.
#
# Firstly, one way to compute the jacobian is to stack x**2 and x.sum()
# into a 4D vector. E.g., use g(x) = torch.stack([x**2, x.sum()])
#
# To get the first row of the jacobian, we call
# >>> autograd.grad(g(x), x, grad_outputs=torch.tensor([1, 0, 0, 0]))
# To get the 2nd row of the jacobian, we call
# >>> autograd.grad(g(x), x, grad_outputs=torch.tensor([0, 1, 0, 0]))
# and so on.
#
# Using vmap, we can vectorize all 4 of these computations into one by
# passing the standard basis for R^4 as the grad_output.
# vmap(partial(autograd.grad, g(x), x))(torch.eye(4)).
#
# Now, how do we compute the jacobian *without stacking the output*?
# We can just split the standard basis across the outputs. So to
# compute the jacobian of f(x), we'd use
# >>> autograd.grad(f(x), x, grad_outputs=_construct_standard_basis_for(...))
# The grad_outputs looks like the following:
# ( torch.tensor([[1, 0, 0],
# [0, 1, 0],
# [0, 0, 1],
# [0, 0, 0]]),
# torch.tensor([[0],
# [0],
# [0],
# [1]]) )
#
# But we're not done yet!
# >>> vmap(partial(autograd.grad(f(x), x, grad_outputs=...)))
# returns a Tensor of shape [4, 3]. We have to remember to split the
# jacobian of shape [4, 3] into two:
# - one of shape [3, 3] for the first output
# - one of shape [ 3] for the second output
# Step 1: Construct grad_outputs by splitting the standard basis
output_numels = tuple(output.numel() for output in outputs)
grad_outputs = _construct_standard_basis_for(outputs, output_numels)
flat_outputs = tuple(output.reshape(-1) for output in outputs)
# Step 2: Call vmap + autograd.grad
def vjp(grad_output):
vj = list(_autograd_grad(flat_outputs, inputs, grad_output, create_graph=create_graph, is_grads_batched=True))
for el_idx, vj_el in enumerate(vj):
if vj_el is not None:
continue
vj[el_idx] = torch.zeros_like(inputs[el_idx]).expand((sum(output_numels),) + inputs[el_idx].shape)
return tuple(vj)
jacobians_of_flat_output = vjp(grad_outputs)
# Step 3: The returned jacobian is one big tensor per input. In this step,
# we split each Tensor by output.
jacobian_input_output = []
for jac, input_i in zip(jacobians_of_flat_output, inputs):
jacobian_input_i_output = []
for jac, output_j in zip(jac.split(output_numels, dim=0), outputs):
jacobian_input_i_output_j = jac.view(output_j.shape + input_i.shape)
jacobian_input_i_output.append(jacobian_input_i_output_j)
jacobian_input_output.append(jacobian_input_i_output)
# Step 4: Right now, `jacobian` is a List[List[Tensor]].
# The outer List corresponds to the number of inputs,
# the inner List corresponds to the number of outputs.
# We need to exchange the order of these and convert to tuples
# before returning.
jacobian_output_input = tuple(zip(*jacobian_input_output))
jacobian_output_input = _grad_postprocess(jacobian_output_input, create_graph)
return _tuple_postprocess(jacobian_output_input, (is_outputs_tuple, is_inputs_tuple))
jacobian: Tuple[torch.Tensor, ...] = tuple()
for i, out in enumerate(outputs):
# mypy complains that expression and variable have different types due to the empty list
jac_i: Tuple[List[torch.Tensor]] = tuple([] for _ in range(len(inputs))) # type: ignore[assignment]
for j in range(out.nelement()):
vj = _autograd_grad((out.reshape(-1)[j],), inputs,
retain_graph=True, create_graph=create_graph)
for el_idx, (jac_i_el, vj_el, inp_el) in enumerate(zip(jac_i, vj, inputs)):
if vj_el is not None:
if strict and create_graph and not vj_el.requires_grad:
msg = ("The jacobian of the user-provided function is "
"independent of input {}. This is not allowed in "
"strict mode when create_graph=True.".format(i))
raise RuntimeError(msg)
jac_i_el.append(vj_el)
else:
if strict:
msg = ("Output {} of the user-provided function is "
"independent of input {}. This is not allowed in "
"strict mode.".format(i, el_idx))
raise RuntimeError(msg)
jac_i_el.append(torch.zeros_like(inp_el))
jacobian += (tuple(torch.stack(jac_i_el, dim=0).view(out.size() # type: ignore[operator]
+ inputs[el_idx].size()) for (el_idx, jac_i_el) in enumerate(jac_i)), )
jacobian = _grad_postprocess(jacobian, create_graph)
return _tuple_postprocess(jacobian, (is_outputs_tuple, is_inputs_tuple))
def hessian(func, inputs, create_graph=False, strict=False, vectorize=False, outer_jacobian_strategy="reverse-mode"):
r"""Function that computes the Hessian of a given scalar function.
Args:
func (function): a Python function that takes Tensor inputs and returns
a Tensor with a single element.
inputs (tuple of Tensors or Tensor): inputs to the function ``func``.
create_graph (bool, optional): If ``True``, the Hessian will be computed in
a differentiable manner. Note that when ``strict`` is ``False``, the result can not
require gradients or be disconnected from the inputs.
Defaults to ``False``.
strict (bool, optional): If ``True``, an error will be raised when we detect that there exists an input
such that all the outputs are independent of it. If ``False``, we return a Tensor of zeros as the
hessian for said inputs, which is the expected mathematical value.
Defaults to ``False``.
vectorize (bool, optional): This feature is experimental.
Please consider using
`functorch <https://github.com/pytorch/functorch#what-are-the-transforms>`_
instead if you are looking for something less experimental and more performant.
When computing the hessian, usually we invoke
``autograd.grad`` once per row of the hessian. If this flag is
``True``, we use the vmap prototype feature as the backend to
vectorize calls to ``autograd.grad`` so we only invoke it once
instead of once per row. This should lead to performance
improvements in many use cases, however, due to this feature
being incomplete, there may be performance cliffs. Please
use `torch._C._debug_only_display_vmap_fallback_warnings(True)`
to show any performance warnings and file us issues if
warnings exist for your use case. Defaults to ``False``.
outer_jacobian_strategy (str, optional): The Hessian is computed by
computing the Jacobian of a Jacobian. The inner Jacobian is always
computed in reverse-mode AD. Setting strategy to ``"forward-mode"``
or ``"reverse-mode"`` determines whether the outer Jacobian will be
computed with forward or reverse mode AD. Currently, computing the outer
Jacobian in ``"forward-mode"`` requires ``vectorized=True``. Defaults
to ``"reverse-mode"``.
Returns:
Hessian (Tensor or a tuple of tuple of Tensors): if there is a single input,
this will be a single Tensor containing the Hessian for the input.
If it is a tuple, then the Hessian will be a tuple of tuples where
``Hessian[i][j]`` will contain the Hessian of the ``i``\th input
and ``j``\th input with size the sum of the size of the ``i``\th input plus
the size of the ``j``\th input. ``Hessian[i][j]`` will have the same
dtype and device as the corresponding ``i``\th input.
Example:
>>> def pow_reducer(x):
... return x.pow(3).sum()
>>> inputs = torch.rand(2, 2)
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> hessian(pow_reducer, inputs)
tensor([[[[5.2265, 0.0000],
[0.0000, 0.0000]],
[[0.0000, 4.8221],
[0.0000, 0.0000]]],
[[[0.0000, 0.0000],
[1.9456, 0.0000]],
[[0.0000, 0.0000],
[0.0000, 3.2550]]]])
>>> hessian(pow_reducer, inputs, create_graph=True)
tensor([[[[5.2265, 0.0000],
[0.0000, 0.0000]],
[[0.0000, 4.8221],
[0.0000, 0.0000]]],
[[[0.0000, 0.0000],
[1.9456, 0.0000]],
[[0.0000, 0.0000],
[0.0000, 3.2550]]]], grad_fn=<ViewBackward>)
>>> def pow_adder_reducer(x, y):
... return (2 * x.pow(2) + 3 * y.pow(2)).sum()
>>> inputs = (torch.rand(2), torch.rand(2))
>>> hessian(pow_adder_reducer, inputs)
((tensor([[4., 0.],
[0., 4.]]),
tensor([[0., 0.],
[0., 0.]])),
(tensor([[0., 0.],
[0., 0.]]),
tensor([[6., 0.],
[0., 6.]])))
"""
is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "hessian")
assert outer_jacobian_strategy in ("forward-mode", "reverse-mode"), (
'Expected strategy to be either "forward-mode" or "reverse-mode".')
def ensure_single_output_function(*inp):
out = func(*inp)
is_out_tuple, t_out = _as_tuple(out, "outputs of the user-provided function", "hessian")
_check_requires_grad(t_out, "outputs", strict=strict)
if is_out_tuple or not isinstance(out, torch.Tensor):
raise RuntimeError("The function given to hessian should return a single Tensor")
if out.nelement() != 1:
raise RuntimeError("The Tensor returned by the function given to hessian should contain a single element")
return out.squeeze()
def jac_func(*inp):
if outer_jacobian_strategy == "forward-mode":
# _grad_preprocess requires create_graph=True and input to require_grad
# or else the input will be detached
inp = tuple(t.requires_grad_(True) for t in inp)
jac = jacobian(ensure_single_output_function, inp, create_graph=True)
_check_requires_grad(jac, "jacobian", strict=strict)
return jac
res = jacobian(jac_func, inputs, create_graph=create_graph, strict=strict, vectorize=vectorize,
strategy=outer_jacobian_strategy)
return _tuple_postprocess(res, (is_inputs_tuple, is_inputs_tuple))
def vhp(func, inputs, v=None, create_graph=False, strict=False):
r"""Function that computes the dot product between a vector ``v`` and the
Hessian of a given scalar function at the point given by the inputs.
Args:
func (function): a Python function that takes Tensor inputs and returns
a Tensor with a single element.
inputs (tuple of Tensors or Tensor): inputs to the function ``func``.
v (tuple of Tensors or Tensor): The vector for which the vector Hessian
product is computed. Must be the same size as the input of
``func``. This argument is optional when ``func``'s input contains
a single element and (if it is not provided) will be set as a
Tensor containing a single ``1``.
create_graph (bool, optional): If ``True``, both the output and result
will be computed in a differentiable way. Note that when ``strict``
is ``False``, the result can not require gradients or be
disconnected from the inputs.
Defaults to ``False``.
strict (bool, optional): If ``True``, an error will be raised when we
detect that there exists an input such that all the outputs are
independent of it. If ``False``, we return a Tensor of zeros as the
vhp for said inputs, which is the expected mathematical value.
Defaults to ``False``.
Returns:
output (tuple): tuple with:
func_output (tuple of Tensors or Tensor): output of ``func(inputs)``
vhp (tuple of Tensors or Tensor): result of the dot product with the
same shape as the inputs.
Example:
>>> def pow_reducer(x):
... return x.pow(3).sum()
>>> inputs = torch.rand(2, 2)
>>> v = torch.ones(2, 2)
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> vhp(pow_reducer, inputs, v)
(tensor(0.5591),
tensor([[1.0689, 1.2431],
[3.0989, 4.4456]]))
>>> vhp(pow_reducer, inputs, v, create_graph=True)
(tensor(0.5591, grad_fn=<SumBackward0>),
tensor([[1.0689, 1.2431],
[3.0989, 4.4456]], grad_fn=<MulBackward0>))
>>> def pow_adder_reducer(x, y):
... return (2 * x.pow(2) + 3 * y.pow(2)).sum()
>>> inputs = (torch.rand(2), torch.rand(2))
>>> v = (torch.zeros(2), torch.ones(2))
>>> vhp(pow_adder_reducer, inputs, v)
(tensor(4.8053),
(tensor([0., 0.]),
tensor([6., 6.])))
"""
with torch.enable_grad():
is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "vhp")
inputs = _grad_preprocess(inputs, create_graph=create_graph, need_graph=True)
if v is not None:
_, v = _as_tuple(v, "v", "vhp")
v = _grad_preprocess(v, create_graph=create_graph, need_graph=False)
_validate_v(v, inputs, is_inputs_tuple)
else:
if len(inputs) != 1 or inputs[0].nelement() != 1:
raise RuntimeError("The vector v can only be None if the input to the user-provided function "
"is a single Tensor with a single element.")
outputs = func(*inputs)
is_outputs_tuple, outputs = _as_tuple(outputs, "outputs of the user-provided function", "vhp")
_check_requires_grad(outputs, "outputs", strict=strict)
if is_outputs_tuple or not isinstance(outputs[0], torch.Tensor):
raise RuntimeError("The function given to vhp should return a single Tensor")
if outputs[0].nelement() != 1:
raise RuntimeError("The Tensor returned by the function given to vhp should contain a single element")
jac = _autograd_grad(outputs, inputs, create_graph=True)
_check_requires_grad(jac, "jacobian", strict=strict)
enable_grad = True if create_graph else torch.is_grad_enabled()
with torch.set_grad_enabled(enable_grad):
grad_res = _autograd_grad(jac, inputs, v, create_graph=create_graph)
vhp = _fill_in_zeros(grad_res, inputs, strict, create_graph, "double_back")
outputs = _grad_postprocess(outputs, create_graph)
vhp = _grad_postprocess(vhp, create_graph)
return _tuple_postprocess(outputs, is_outputs_tuple), _tuple_postprocess(vhp, is_inputs_tuple)
def hvp(func, inputs, v=None, create_graph=False, strict=False):
r"""Function that computes the dot product between the Hessian of a given scalar
function and a vector ``v`` at the point given by the inputs.
Args:
func (function): a Python function that takes Tensor inputs and returns
a Tensor with a single element.
inputs (tuple of Tensors or Tensor): inputs to the function ``func``.
v (tuple of Tensors or Tensor): The vector for which the Hessian vector
product is computed. Must be the same size as the input of
``func``. This argument is optional when ``func``'s input contains
a single element and (if it is not provided) will be set as a
Tensor containing a single ``1``.
create_graph (bool, optional): If ``True``, both the output and result will be
computed in a differentiable way. Note that when ``strict`` is
``False``, the result can not require gradients or be disconnected
from the inputs. Defaults to ``False``.
strict (bool, optional): If ``True``, an error will be raised when we
detect that there exists an input such that all the outputs are
independent of it. If ``False``, we return a Tensor of zeros as the
hvp for said inputs, which is the expected mathematical value.
Defaults to ``False``.
Returns:
output (tuple): tuple with:
func_output (tuple of Tensors or Tensor): output of ``func(inputs)``
hvp (tuple of Tensors or Tensor): result of the dot product with
the same shape as the inputs.
Example:
>>> def pow_reducer(x):
... return x.pow(3).sum()
>>> inputs = torch.rand(2, 2)
>>> v = torch.ones(2, 2)
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> hvp(pow_reducer, inputs, v)
(tensor(0.1448),
tensor([[2.0239, 1.6456],
[2.4988, 1.4310]]))
>>> hvp(pow_reducer, inputs, v, create_graph=True)
(tensor(0.1448, grad_fn=<SumBackward0>),
tensor([[2.0239, 1.6456],
[2.4988, 1.4310]], grad_fn=<MulBackward0>))
>>> def pow_adder_reducer(x, y):
... return (2 * x.pow(2) + 3 * y.pow(2)).sum()
>>> inputs = (torch.rand(2), torch.rand(2))
>>> v = (torch.zeros(2), torch.ones(2))
>>> hvp(pow_adder_reducer, inputs, v)
(tensor(2.3030),
(tensor([0., 0.]),
tensor([6., 6.])))
Note:
This function is significantly slower than `vhp` due to backward mode AD constraints.
If your functions is twice continuously differentiable, then hvp = vhp.t(). So if you
know that your function satisfies this condition, you should use vhp instead that is
much faster with the current implementation.
"""
with torch.enable_grad():
is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "hvp")
inputs = _grad_preprocess(inputs, create_graph=create_graph, need_graph=True)
if v is not None:
_, v = _as_tuple(v, "v", "hvp")
v = _grad_preprocess(v, create_graph=create_graph, need_graph=False)
_validate_v(v, inputs, is_inputs_tuple)
else:
if len(inputs) != 1 or inputs[0].nelement() != 1:
raise RuntimeError("The vector v can only be None if the input to the user-provided function "
"is a single Tensor with a single element.")
outputs = func(*inputs)
is_outputs_tuple, outputs = _as_tuple(outputs, "outputs of the user-provided function", "hvp")
_check_requires_grad(outputs, "outputs", strict=strict)
if is_outputs_tuple or not isinstance(outputs[0], torch.Tensor):
raise RuntimeError("The function given to hvp should return a single Tensor")
if outputs[0].nelement() != 1:
raise RuntimeError("The Tensor returned by the function given to hvp should contain a single element")
jac = _autograd_grad(outputs, inputs, create_graph=True)
_check_requires_grad(jac, "jacobian", strict=strict)
grad_jac = tuple(torch.zeros_like(inp, requires_grad=True) for inp in inputs)
double_back = _autograd_grad(jac, inputs, grad_jac, create_graph=True)
_check_requires_grad(jac, "hessian", strict=strict)
enable_grad = True if create_graph else torch.is_grad_enabled()
with torch.set_grad_enabled(enable_grad):
grad_res = _autograd_grad(double_back, grad_jac, v, create_graph=create_graph)
hvp = _fill_in_zeros(grad_res, inputs, strict, create_graph, "double_back_trick")
outputs = _grad_postprocess(outputs, create_graph)
hvp = _grad_postprocess(hvp, create_graph)
return _tuple_postprocess(outputs, is_outputs_tuple), _tuple_postprocess(hvp, is_inputs_tuple)
| pytorch-master | torch/autograd/functional.py |
import sys
import torch
import functools
import inspect
from typing import Any, Callable, TypeVar, cast
__all__ = ['no_grad', 'enable_grad', 'set_grad_enabled',
'inference_mode']
# Used for annotating the decorator usage of 'no_grad' and 'enable_grad'.
# See https://mypy.readthedocs.io/en/latest/generics.html#declaring-decorators
FuncType = Callable[..., Any]
F = TypeVar('F', bound=FuncType)
class _DecoratorContextManager:
"""Allow a context manager to be used as a decorator"""
def __call__(self, func: F) -> F:
if inspect.isgeneratorfunction(func):
return self._wrap_generator(func)
@functools.wraps(func)
def decorate_context(*args, **kwargs):
with self.clone():
return func(*args, **kwargs)
return cast(F, decorate_context)
def _wrap_generator(self, func):
"""Wrap each generator invocation with the context manager"""
@functools.wraps(func)
def generator_context(*args, **kwargs):
gen = func(*args, **kwargs)
# Generators are suspended and unsuspended at `yield`, hence we
# make sure the grad mode is properly set every time the execution
# flow returns into the wrapped generator and restored when it
# returns through our `yield` to our caller (see PR #49017).
try:
# Issuing `None` to a generator fires it up
with self.clone():
response = gen.send(None)
while True:
try:
# Forward the response to our caller and get its next request
request = yield response
except GeneratorExit:
# Inform the still active generator about its imminent closure
with self.clone():
gen.close()
raise
except BaseException:
# Propagate the exception thrown at us by the caller
with self.clone():
response = gen.throw(*sys.exc_info())
else:
# Pass the last request to the generator and get its response
with self.clone():
response = gen.send(request)
# We let the exceptions raised above by the generator's `.throw` or
# `.send` methods bubble up to our caller, except for StopIteration
except StopIteration as e:
# The generator informed us that it is done: take whatever its
# returned value (if any) was and indicate that we're done too
# by returning it (see docs for python's return-statement).
return e.value
return generator_context
def __enter__(self) -> None:
raise NotImplementedError
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
raise NotImplementedError
def clone(self):
# override this method if your children class takes __init__ parameters
return self.__class__()
class no_grad(_DecoratorContextManager):
r"""Context-manager that disabled gradient calculation.
Disabling gradient calculation is useful for inference, when you are sure
that you will not call :meth:`Tensor.backward()`. It will reduce memory
consumption for computations that would otherwise have `requires_grad=True`.
In this mode, the result of every computation will have
`requires_grad=False`, even when the inputs have `requires_grad=True`.
This context manager is thread local; it will not affect computation
in other threads.
Also functions as a decorator. (Make sure to instantiate with parenthesis.)
.. note::
No-grad is one of several mechanisms that can enable or
disable gradients locally see :ref:`locally-disable-grad-doc` for
more information on how they compare.
.. note::
This API does not apply to :ref:`forward-mode AD <forward-mode-ad>`.
If you want to disable forward AD for a computation, you can unpack
your dual tensors.
Example::
>>> # xdoctest: +SKIP
>>> x = torch.tensor([1.], requires_grad=True)
>>> with torch.no_grad():
... y = x * 2
>>> y.requires_grad
False
>>> @torch.no_grad()
... def doubler(x):
... return x * 2
>>> z = doubler(x)
>>> z.requires_grad
False
"""
def __init__(self) -> None:
if not torch._jit_internal.is_scripting():
super().__init__()
self.prev = False
def __enter__(self) -> None:
self.prev = torch.is_grad_enabled()
torch.set_grad_enabled(False)
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
torch.set_grad_enabled(self.prev)
class enable_grad(_DecoratorContextManager):
r"""Context-manager that enables gradient calculation.
Enables gradient calculation, if it has been disabled via :class:`~no_grad`
or :class:`~set_grad_enabled`.
This context manager is thread local; it will not affect computation
in other threads.
Also functions as a decorator. (Make sure to instantiate with parenthesis.)
.. note::
enable_grad is one of several mechanisms that can enable or
disable gradients locally see :ref:`locally-disable-grad-doc` for
more information on how they compare.
.. note::
This API does not apply to :ref:`forward-mode AD <forward-mode-ad>`.
Example::
>>> # xdoctest: +SKIP
>>> x = torch.tensor([1.], requires_grad=True)
>>> with torch.no_grad():
... with torch.enable_grad():
... y = x * 2
>>> y.requires_grad
True
>>> y.backward()
>>> x.grad
tensor([2.])
>>> @torch.enable_grad()
... def doubler(x):
... return x * 2
>>> with torch.no_grad():
... z = doubler(x)
>>> z.requires_grad
True
"""
def __enter__(self) -> None:
self.prev = torch.is_grad_enabled()
torch._C._set_grad_enabled(True)
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
torch._C._set_grad_enabled(self.prev)
class set_grad_enabled(_DecoratorContextManager):
r"""Context-manager that sets gradient calculation to on or off.
``set_grad_enabled`` will enable or disable grads based on its argument :attr:`mode`.
It can be used as a context-manager or as a function.
This context manager is thread local; it will not affect computation
in other threads.
Args:
mode (bool): Flag whether to enable grad (``True``), or disable
(``False``). This can be used to conditionally enable
gradients.
.. note::
set_grad_enabled is one of several mechanisms that can enable or
disable gradients locally see :ref:`locally-disable-grad-doc` for
more information on how they compare.
.. note::
This API does not apply to :ref:`forward-mode AD <forward-mode-ad>`.
Example::
>>> # xdoctest: +SKIP
>>> x = torch.tensor([1.], requires_grad=True)
>>> is_train = False
>>> with torch.set_grad_enabled(is_train):
... y = x * 2
>>> y.requires_grad
False
>>> _ = torch.set_grad_enabled(True)
>>> y = x * 2
>>> y.requires_grad
True
>>> _ = torch.set_grad_enabled(False)
>>> y = x * 2
>>> y.requires_grad
False
"""
def __init__(self, mode: bool) -> None:
self.prev = torch.is_grad_enabled()
torch._C._set_grad_enabled(mode)
self.mode = mode
def __enter__(self) -> None:
pass
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
torch._C._set_grad_enabled(self.prev)
def clone(self):
return self.__class__(self.mode)
class inference_mode(_DecoratorContextManager):
r"""Context-manager that enables or disables inference mode
InferenceMode is a new context manager analogous to :class:`~no_grad`
to be used when you are certain your operations will have no interactions
with autograd (e.g., model training). Code run under this mode gets better
performance by disabling view tracking and version counter bumps. Note that
unlike some other mechanisms that locally enable or disable grad,
entering inference_mode also disables to :ref:`forward-mode AD <forward-mode-ad>`.
This context manager is thread local; it will not affect computation
in other threads.
Also functions as a decorator. (Make sure to instantiate with parenthesis.)
.. note::
Inference mode is one of several mechanisms that can enable or
disable gradients locally see :ref:`locally-disable-grad-doc` for
more information on how they compare.
Args:
mode (bool): Flag whether to enable or disable inference mode
Example::
>>> import torch
>>> x = torch.ones(1, 2, 3, requires_grad=True)
>>> with torch.inference_mode():
... y = x * x
>>> y.requires_grad
False
>>> # xdoctest: +SKIP("want string isnt quite right")
>>> y._version
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
RuntimeError: Inference tensors do not track version counter.
>>> @torch.inference_mode()
... def func(x):
... return x * x
>>> out = func(x)
>>> out.requires_grad
False
"""
def __init__(self, mode=True):
if not torch._jit_internal.is_scripting():
super().__init__()
# Holds a python binding to a RAII guard that can enable or disable
# inference mode
self._inference_mode_raii_guard = None
self.mode = mode
def __enter__(self):
self._inference_mode_raii_guard = torch._C._InferenceMode(self.mode)
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
del self._inference_mode_raii_guard
def clone(self):
return self.__class__(self.mode)
| pytorch-master | torch/autograd/grad_mode.py |
import itertools
import torch
from torch.autograd import DeviceType
from collections import defaultdict, namedtuple
from operator import attrgetter
from typing import Dict, List, Tuple, Optional
import bisect
import math
class EventList(list):
"""A list of Events (for pretty printing)"""
def __init__(self, *args, **kwargs):
use_cuda = kwargs.pop('use_cuda', True)
profile_memory = kwargs.pop('profile_memory', False)
with_flops = kwargs.pop('with_flops', False)
super(EventList, self).__init__(*args, **kwargs)
self._use_cuda = use_cuda
self._profile_memory = profile_memory
self._tree_built = False
self._with_flops = with_flops
def _build_tree(self):
self._populate_cpu_children()
self._remove_dup_nodes()
self._set_backward_stacktraces()
self._tree_built = True
def __str__(self):
return self.table()
def _remove_dup_nodes(self):
while True:
to_delete = set()
for idx in range(len(self)):
if (self[idx].cpu_parent is not None and
self[idx].cpu_parent.name == self[idx].name and
len(self[idx].cpu_parent.cpu_children) == 1):
self[idx].cpu_parent.cpu_children = self[idx].cpu_children
self[idx].cpu_parent.kernels = self[idx].kernels # lift kernels up
for ch in self[idx].cpu_children:
ch.cpu_parent = self[idx].cpu_parent
to_delete.add(idx)
if len(to_delete) == 0:
break
new_evts = [ev for ind, ev in enumerate(self) if ind not in to_delete]
self.clear()
self.extend(new_evts)
def _populate_cpu_children(self):
"""Populates child events into each underlying FunctionEvent object.
One event is a child of another if [s1, e1) is inside [s2, e2). Where
s1 and e1 would be start and end of the child event's interval. And
s2 and e2 start and end of the parent event's interval
Example: In event list [[0, 10], [1, 3], [3, 4]] would have make [0, 10]
be a parent of two other intervals.
If for any reason two intervals intersect only partially, this function
will not record a parent child relationship between then.
"""
# Some events can be async (i.e. start and end on different threads),
# since it's generally undefined how to attribute children ranges to
# async ranges, we do not use them when calculating nested ranges and stats
sync_events = [evt for evt in self if not evt.is_async and evt.device_type == DeviceType.CPU]
events = sorted(
sync_events,
key=attrgetter("thread"),
)
# Group by both thread and node_id, so that events that happen to have
# the same thread_id but are from different nodes aren't incorrectly
# grouped together.
threads = itertools.groupby(
events, key=lambda event: (event.thread, event.node_id)
)
# For each thread we keep a stack of current nested parents.
# We maintain the invariant that each interval is a subset of all other
# intervals lower in the stack.
#
# First we sort the intervals by their start time. Then we iterate over them.
# Every time we see a new interval we remove several parents from
# the top until we restore the invariant. Then parent child relationship
# if recorded if the stack is not empty.
# Finally we add new interval to the list
#
# Algorithm has O(N * log(N)) complexity where N is number of
# intervals
for thread_id, thread_events in threads:
thread_events_ = sorted(
thread_events,
key=lambda event: [event.time_range.start, -event.time_range.end],
)
current_events: List[FunctionEvent] = []
cur_end = 0
for event in thread_events_:
while len(current_events) > 0:
parent = current_events[-1]
if event.time_range.start >= parent.time_range.end or \
event.time_range.end > parent.time_range.end:
# this can't be a parent
current_events.pop()
else:
parent.append_cpu_child(event)
assert (
event.cpu_parent is None
), "There is already a CPU parent event for {}".format(
event.key
)
event.set_cpu_parent(parent)
break
current_events.append(event)
def _set_backward_stacktraces(self):
def bw_parent(evt):
if evt is None:
return None
elif evt.scope == 1: # BACKWARD_FUNCTION
return evt
else:
return bw_parent(evt.cpu_parent)
fwd_stacks = {}
for evt in self:
if bw_parent(evt) is None and evt.stack is not None:
t = (evt.sequence_nr, evt.thread)
if t not in fwd_stacks:
fwd_stacks[t] = evt.stack
for evt in self:
p = bw_parent(evt)
if p is not None:
assert p.fwd_thread is not None
t = (p.sequence_nr, p.fwd_thread)
if t in fwd_stacks:
evt.stack = fwd_stacks[t]
else:
evt.stack = []
@property
def self_cpu_time_total(self):
return sum([event.self_cpu_time_total for event in self])
def table(self, sort_by=None, row_limit=100, max_src_column_width=75, header=None, top_level_events_only=False):
"""Prints an EventList as a nicely formatted table.
Args:
sort_by (str, optional): Attribute used to sort entries. By default
they are printed in the same order as they were registered.
Valid keys include: ``cpu_time``, ``cuda_time``, ``cpu_time_total``,
``cuda_time_total``, ``cpu_memory_usage``, ``cuda_memory_usage``,
``self_cpu_memory_usage``, ``self_cuda_memory_usage``, ``count``.
top_level_events_only(bool, optional): Boolean flag to determine the
selection of events to display. If true, the profiler will only
display events at top level like top-level invocation of python
`lstm`, python `add` or other functions, nested events like low-level
cpu/cuda ops events are omitted for profiler result readability.
Returns:
A string containing the table.
"""
return _build_table(
self,
sort_by=sort_by,
row_limit=row_limit,
max_src_column_width=max_src_column_width,
header=header,
profile_memory=self._profile_memory,
with_flops=self._with_flops,
top_level_events_only=top_level_events_only)
def export_chrome_trace(self, path):
"""Exports an EventList as a Chrome tracing tools file.
The checkpoint can be later loaded and inspected under ``chrome://tracing`` URL.
Args:
path (str): Path where the trace will be written.
"""
import os
with open(path, 'w') as f:
chrome_events = []
next_id = 0
# Use file IO over using json.dump since JSON dumping is very slow and
# this technique is proven to give a 4x speedup.
f.write("[")
for evt in self:
if evt.trace_name is None:
continue
f.write(
'{"name": "%s", '
'"ph": "X", '
'"ts": %s, '
'"dur": %s, '
'"tid": %s, '
'"pid": "CPU functions", '
'"args": {}}, '
% (
evt.trace_name,
evt.time_range.start,
evt.time_range.elapsed_us(),
evt.thread
if not evt.is_remote
else f'" node_id:{evt.node_id}, thread_id:{evt.thread} "',
)
)
for k in evt.kernels:
# 's' and 'f' draw Flow arrows from
# the CPU launch to the GPU kernel
f.write('{"name": "%s", '
'"ph": "s", '
'"ts": %s, '
'"tid": %s, '
'"pid": "CPU functions", '
'"id": %s, '
'"cat": "cpu_to_cuda", '
'"args": {}}, ' % (evt.trace_name, evt.time_range.start,
evt.thread, next_id))
# Note: use torch.profiler to get device kernel trace
next_id += 1
if len(self) > 0:
# remove trailing whitespace and comma
f.seek(f.tell() - 2, os.SEEK_SET)
f.truncate()
f.write("]")
def supported_export_stacks_metrics(self):
return ["self_cpu_time_total", "self_cuda_time_total"]
def export_stacks(self, path: str, metric: str):
if metric not in self.supported_export_stacks_metrics():
raise ValueError("metric should be one of: " + str(self.supported_export_stacks_metrics()))
translate_table = str.maketrans(" ;\t\n", "____")
with open(path, 'w') as f:
for evt in self:
if evt.stack and len(evt.stack) > 0:
metric_value = getattr(evt, metric)
if int(metric_value) > 0:
stack_str = ""
for entry in reversed(evt.stack):
stack_str += entry.translate(translate_table)
stack_str += ";"
stack_str = stack_str[:-1] + " " + str(int(metric_value))
f.write(stack_str + "\n")
def key_averages(self, group_by_input_shapes=False, group_by_stack_n=0):
"""Averages all function events over their keys.
Args:
group_by_input_shapes: group entries by
(event name, input shapes) rather than just event name.
This is useful to see which input shapes contribute to the runtime
the most and may help with size-specific optimizations or
choosing the best candidates for quantization (aka fitting a roof line)
group_by_stack_n: group by top n stack trace entries
Returns:
An EventList containing FunctionEventAvg objects.
"""
assert self._tree_built
stats: Dict[Tuple[str, ...], FunctionEventAvg] = defaultdict(FunctionEventAvg)
def get_key(event, group_by_input_shapes, group_by_stack_n) -> Tuple[str, ...]:
key = [str(event.key), str(event.node_id), str(event.device_type), str(event.is_legacy)]
if group_by_input_shapes:
key.append(str(event.input_shapes))
if group_by_stack_n > 0:
key += event.stack[:group_by_stack_n]
return tuple(key)
for evt in self:
stats[get_key(evt, group_by_input_shapes, group_by_stack_n)].add(evt)
avg_list = EventList(
stats.values(),
use_cuda=self._use_cuda,
profile_memory=self._profile_memory,
with_flops=self._with_flops)
for evt in avg_list:
evt.stack = evt.stack[:group_by_stack_n]
if not group_by_input_shapes:
evt.input_shapes = ""
return avg_list
def total_average(self):
"""Averages all events.
Returns:
A FunctionEventAvg object.
"""
total_stat = FunctionEventAvg()
for evt in self:
total_stat += evt
total_stat.key = None
total_stat.key = 'Total'
return total_stat
def _format_time(time_us):
"""Defines how to format time in FunctionEvent"""
US_IN_SECOND = 1000.0 * 1000.0
US_IN_MS = 1000.0
if time_us >= US_IN_SECOND:
return '{:.3f}s'.format(time_us / US_IN_SECOND)
if time_us >= US_IN_MS:
return '{:.3f}ms'.format(time_us / US_IN_MS)
return '{:.3f}us'.format(time_us)
def _format_time_share(time_us, total_time_us):
"""Defines how to format time in FunctionEvent"""
if total_time_us == 0:
assert time_us == 0, "Expected time_us == 0 but got {}".format(time_us)
return "NaN"
return '{:.2f}%'.format(time_us * 100.0 / total_time_us)
def _format_memory(nbytes):
"""Returns a formatted memory size string"""
KB = 1024
MB = 1024 * KB
GB = 1024 * MB
if (abs(nbytes) >= GB):
return '{:.2f} Gb'.format(nbytes * 1.0 / GB)
elif (abs(nbytes) >= MB):
return '{:.2f} Mb'.format(nbytes * 1.0 / MB)
elif (abs(nbytes) >= KB):
return '{:.2f} Kb'.format(nbytes * 1.0 / KB)
else:
return str(nbytes) + ' b'
def _attr_formatter(name):
return property(lambda self: _format_time(getattr(self, name)))
class FormattedTimesMixin(object):
"""Helpers for FunctionEvent and FunctionEventAvg.
The subclass should define `*_time_total` and `count` attributes.
"""
cpu_time_str = _attr_formatter('cpu_time')
cuda_time_str = _attr_formatter('cuda_time')
cpu_time_total_str = _attr_formatter('cpu_time_total')
cuda_time_total_str = _attr_formatter('cuda_time_total')
self_cpu_time_total_str = _attr_formatter('self_cpu_time_total')
self_cuda_time_total_str = _attr_formatter('self_cuda_time_total')
@property
def cpu_time(self):
return 0.0 if self.count == 0 else 1.0 * self.cpu_time_total / self.count # type: ignore[attr-defined]
@property
def cuda_time(self):
return 0.0 if self.count == 0 else 1.0 * self.cuda_time_total / self.count # type: ignore[attr-defined]
class Interval(object):
def __init__(self, start, end):
self.start = start
self.end = end
def elapsed_us(self):
return self.end - self.start
Kernel = namedtuple('Kernel', ['name', 'device', 'duration'])
class FunctionEvent(FormattedTimesMixin):
"""Profiling information about a single function."""
def __init__(
self, id, name, thread, start_us, end_us, fwd_thread=None, input_shapes=None,
stack=None, scope=0, cpu_memory_usage=0, cuda_memory_usage=0, is_async=False,
is_remote=False, sequence_nr=-1, node_id=-1, device_type=DeviceType.CPU, device_index=0,
is_legacy=False, flops=None, trace_name=None):
self.id: int = id
self.node_id: int = node_id
self.name: str = name
self.trace_name: str = trace_name
self.time_range: Interval = Interval(start_us, end_us)
self.thread: int = thread
self.fwd_thread: Optional[int] = fwd_thread
self.kernels: List[Kernel] = []
self.count: int = 1
self.cpu_children: List[FunctionEvent] = []
self.cpu_parent: Optional[FunctionEvent] = None
self.input_shapes: Tuple[int, ...] = input_shapes
self.stack: List = stack
self.scope: int = scope
self.cpu_memory_usage: int = cpu_memory_usage
self.cuda_memory_usage: int = cuda_memory_usage
self.is_async: bool = is_async
self.is_remote: bool = is_remote
self.sequence_nr: int = sequence_nr
self.device_type: DeviceType = device_type
self.device_index: int = device_index
self.is_legacy: bool = is_legacy
self.flops: Optional[int] = flops
def append_kernel(self, name, device, duration):
assert self.device_type == DeviceType.CPU
self.kernels.append(Kernel(name, device, duration))
def append_cpu_child(self, child):
"""Append a CPU child of type FunctionEvent.
One is supposed to append only direct children to the event to have
correct self cpu time being reported.
"""
assert(self.device_type == DeviceType.CPU)
assert(isinstance(child, FunctionEvent))
assert(child.device_type == DeviceType.CPU)
self.cpu_children.append(child)
def set_cpu_parent(self, parent):
"""Set the immediate CPU parent of type FunctionEvent
One profiling FunctionEvent should have only one CPU parent such that
the child's range interval is completely inside the parent's. We use
this connection to determine the event is from top-level op or not.
"""
assert(self.device_type == DeviceType.CPU)
assert(isinstance(parent, FunctionEvent))
assert(parent.device_type == DeviceType.CPU)
self.cpu_parent = parent
# Note: async events don't have children, are not used when computing 'self'
# metrics of other events, have only total cpu time
@property
def self_cpu_memory_usage(self):
if self.is_async or self.device_type != DeviceType.CPU:
return 0
return self.cpu_memory_usage - sum(
[child.cpu_memory_usage for child in self.cpu_children]
)
@property
def self_cuda_memory_usage(self):
if self.is_async or self.device_type != DeviceType.CPU:
return 0
return self.cuda_memory_usage - sum(
[child.cuda_memory_usage for child in self.cpu_children]
)
@property
def self_cpu_time_total(self):
if self.is_async or self.device_type != DeviceType.CPU:
return 0
return self.cpu_time_total - sum(
[child.cpu_time_total for child in self.cpu_children]
)
@property
def cuda_time_total(self):
if self.is_async:
return 0
if self.device_type == DeviceType.CPU:
if not self.is_legacy:
# account for the kernels in the children ops
return (sum(kinfo.duration for kinfo in self.kernels) +
sum(ch.cuda_time_total for ch in self.cpu_children))
else:
# each legacy cpu events has a single (fake) kernel
return sum(kinfo.duration for kinfo in self.kernels)
else:
assert self.device_type == DeviceType.CUDA
return self.time_range.elapsed_us()
@property
def self_cuda_time_total(self):
if self.is_async:
return 0
if self.device_type == DeviceType.CPU:
return self.cuda_time_total - \
sum([child.cuda_time_total for child in self.cpu_children])
else:
assert(self.device_type == DeviceType.CUDA)
return self.cuda_time_total
@property
def cpu_time_total(self):
if self.device_type == DeviceType.CPU:
return self.time_range.elapsed_us()
else:
return 0
@property
def key(self):
return self.name
def __repr__(self):
return (
'<FunctionEvent id={} name={} device_type={} node_id={} cpu_time={} start_us={} end_us={} '
'cpu_children={} cuda_time={} name={} thread={} input_shapes={} '
'cpu_memory_usage={} cuda_memory_usage={} is_async={} is_remote={} seq_nr={} is_legacy={}>'.format(
self.id,
self.name,
self.device_type,
self.node_id,
self.cpu_time_str,
self.time_range.start,
self.time_range.end,
str([child.id for child in self.cpu_children]),
self.cuda_time_str,
self.name,
self.thread,
str(self.input_shapes),
self.cpu_memory_usage,
self.cuda_memory_usage,
self.is_async,
self.is_remote,
self.sequence_nr,
self.is_legacy,
)
)
class FunctionEventAvg(FormattedTimesMixin):
"""Used to average stats over multiple FunctionEvent objects."""
def __init__(self):
self.key: Optional[str] = None
self.count: int = 0
self.node_id: int = 0
self.is_async: bool = False
self.is_remote: bool = False
self.cpu_time_total: int = 0
self.cuda_time_total: int = 0
self.self_cpu_time_total: int = 0
self.self_cuda_time_total: int = 0
self.input_shapes: Optional[List[List[int]]] = None
self.stack: Optional[List] = None
self.scope: Optional[int] = None
self.cpu_memory_usage: int = 0
self.cuda_memory_usage: int = 0
self.self_cpu_memory_usage: int = 0
self.self_cuda_memory_usage: int = 0
self.cpu_children: Optional[List[FunctionEvent]] = None
self.cpu_parent: Optional[FunctionEvent] = None
self.device_type: DeviceType = DeviceType.CPU
self.is_legacy: bool = False
self.flops: int = 0
def add(self, other):
if self.key is None:
# First function being recorded as part of FunctionEventAvg, propagate
# fields.
self.key = other.key
self.node_id = other.node_id
self.is_async = other.is_async
self.is_remote = other.is_remote
self.cpu_parent = other.cpu_parent
self.cpu_children = other.cpu_children
self.input_shapes = other.input_shapes
self.stack = other.stack
self.scope = other.scope
self.device_type = other.device_type
self.is_legacy = other.is_legacy
assert isinstance(other, (FunctionEvent, FunctionEventAvg))
assert other.key == self.key
self.cpu_time_total += other.cpu_time_total
self.cuda_time_total += other.cuda_time_total
self.self_cpu_time_total += other.self_cpu_time_total
self.self_cuda_time_total += other.self_cuda_time_total
self.cpu_memory_usage += other.cpu_memory_usage
self.cuda_memory_usage += other.cuda_memory_usage
self.self_cpu_memory_usage += other.self_cpu_memory_usage
self.self_cuda_memory_usage += other.self_cuda_memory_usage
self.count += other.count
if self.flops is None:
self.flops = other.flops
elif other.flops is not None:
self.flops += other.flops
return self
def __iadd__(self, other):
return self.add(other)
def __repr__(self):
return (
'<FunctionEventAvg key={} self_cpu_time={} cpu_time={} '
' self_cuda_time={} cuda_time={} input_shapes={} '
'cpu_memory_usage={} cuda_memory_usage={}>'.format(
self.key,
self.self_cpu_time_total_str,
self.cpu_time_str,
self.self_cuda_time_total_str,
self.cuda_time_str,
str(self.input_shapes),
self.cpu_memory_usage,
self.cuda_memory_usage,
)
)
class StringTable(defaultdict):
def __missing__(self, key):
# manage cases like 't' (demangled to 'unsigned short') separately,
# for now simply check the length to avoid unexpected results for
# the short sequences
self[key] = torch._C._demangle(key) if len(key) > 1 else key
return self[key]
class MemRecordsAcc:
"""Acceleration structure for accessing mem_records in interval"""
def __init__(self, mem_records):
self._mem_records = mem_records
self._start_uses = []
self._indices = []
if len(mem_records) > 0:
tmp = sorted([(r[0].start_us(), i) for i, r in enumerate(mem_records)])
self._start_uses, self._indices = zip(*tmp)
def in_interval(self, start_us, end_us):
start_idx = bisect.bisect_left(self._start_uses, start_us)
end_idx = bisect.bisect_right(self._start_uses, end_us)
for i in range(start_idx, end_idx):
yield self._mem_records[self._indices[i]]
def _filter_stack_entry(entry):
filtered_entries = [
("autograd/__init__", "_make_grads"),
("autograd/__init__", "backward"),
("torch/tensor", "backward"),
("_internal/common_utils", "prof_callable"),
("_internal/common_utils", "prof_func_call"),
("_internal/common_utils", "prof_meth_call"),
]
return all([not (f[0] in entry and f[1] in entry) for f in filtered_entries])
MEMORY_EVENT_NAME = "[memory]"
OUT_OF_MEMORY_EVENT_NAME = "[OutOfMemory]"
def _filter_name(name):
# ignoring the following utility ops
filtered_out_names = [
MEMORY_EVENT_NAME, # used only for the top-level memory events
OUT_OF_MEMORY_EVENT_NAME,
"profiler::_record_function_enter",
"profiler::_record_function_enter_new",
"profiler::_record_function_exit",
"aten::is_leaf",
"aten::output_nr",
"aten::_version",
]
return name in filtered_out_names
# Demangles and optionally rewrites the provided event name,
# with_wildcard - whether to replace certain numbered event names
# with a wildcard name to aggregate them together in the profiler table
# output
def _rewrite_name(name, with_wildcard=False):
string_table = StringTable()
name = string_table[name]
if with_wildcard:
if name.startswith("ProfilerStep#"):
name = "ProfilerStep*"
return name
def _build_table(
events,
sort_by=None,
header=None,
row_limit=100,
max_src_column_width=75,
with_flops=False,
profile_memory=False,
top_level_events_only=False):
"""Prints a summary of events (which can be a list of FunctionEvent or FunctionEventAvg)."""
if len(events) == 0:
return ""
has_cuda_time = any([event.self_cuda_time_total > 0 for event in events])
has_cuda_mem = any([event.self_cuda_memory_usage > 0 for event in events])
has_input_shapes = any(
[(event.input_shapes is not None and len(event.input_shapes) > 0) for event in events])
if sort_by is not None:
events = EventList(sorted(
events, key=lambda evt: getattr(evt, sort_by), reverse=True
), use_cuda=has_cuda_time, profile_memory=profile_memory, with_flops=with_flops)
MAX_NAME_COLUMN_WIDTH = 55
name_column_width = max([len(evt.key) for evt in events]) + 4
name_column_width = min(name_column_width, MAX_NAME_COLUMN_WIDTH)
MAX_SHAPES_COLUMN_WIDTH = 80
shapes_column_width = max([len(str(evt.input_shapes)) for evt in events]) + 4
shapes_column_width = min(shapes_column_width, MAX_SHAPES_COLUMN_WIDTH)
DEFAULT_COLUMN_WIDTH = 12
flops_column_width = DEFAULT_COLUMN_WIDTH
src_column_width = None
stacks = []
for evt in events:
if evt.stack is not None and len(evt.stack) > 0:
stacks.append(evt.stack)
has_stack = len(stacks) > 0
if has_stack:
src_column_width = max([max([len(entry) for entry in stack]) for stack in stacks]) + 4
src_column_width = min(src_column_width, max_src_column_width)
headers = [
'Name',
'Self CPU %',
'Self CPU',
'CPU total %',
'CPU total',
'CPU time avg',
]
if has_cuda_time:
headers.extend([
'Self CUDA',
'Self CUDA %',
'CUDA total',
'CUDA time avg',
])
if profile_memory:
headers.extend([
'CPU Mem',
'Self CPU Mem',
])
if has_cuda_mem:
headers.extend([
'CUDA Mem',
'Self CUDA Mem',
])
headers.append(
'# of Calls'
)
# Only append Node ID if any event has a valid (>= 0) Node ID
append_node_id = any([evt.node_id != -1 for evt in events])
if append_node_id:
headers.append('Node ID')
# Have to use a list because nonlocal is Py3 only...
SPACING_SIZE = 2
row_format_lst = [""]
header_sep_lst = [""]
line_length_lst = [-SPACING_SIZE]
MAX_STACK_ENTRY = 5
def add_column(padding, text_dir='>'):
row_format_lst[0] += '{: ' + text_dir + str(padding) + '}' + (' ' * SPACING_SIZE)
header_sep_lst[0] += '-' * padding + (' ' * SPACING_SIZE)
line_length_lst[0] += padding + SPACING_SIZE
def auto_scale_flops(flops):
flop_headers = [
'FLOPs',
'KFLOPs',
'MFLOPs',
'GFLOPs',
'TFLOPs',
'PFLOPs',
]
assert flops > 0
log_flops = max(0, min(math.log10(flops) / 3, float(len(flop_headers) - 1)))
assert log_flops >= 0 and log_flops < len(flop_headers)
return (pow(10, (math.floor(log_flops) * -3.0)), flop_headers[int(log_flops)])
add_column(name_column_width)
for _ in headers[1:]:
add_column(DEFAULT_COLUMN_WIDTH)
if has_input_shapes:
headers.append('Input Shapes')
add_column(shapes_column_width)
if has_stack:
headers.append('Source Location')
add_column(src_column_width, text_dir='<')
if with_flops:
# Auto-scaling of flops header
raw_flops = []
for evt in events:
if evt.flops > 0:
raw_flops.append(evt.flops)
if len(raw_flops) != 0:
(flops_scale, flops_header) = auto_scale_flops(min(raw_flops))
headers.append('Total {}'.format(flops_header))
add_column(flops_column_width)
else:
with_flops = False # can't find any valid flops
row_format = row_format_lst[0]
header_sep = header_sep_lst[0]
line_length = line_length_lst[0]
add_column = None # type: ignore[assignment]
# Have to use a list because nonlocal is Py3 only...
result = []
def append(s):
result.append(s)
result.append('\n') # Yes, newline after the end as well
sum_self_cpu_time_total = sum([event.self_cpu_time_total for event in events])
sum_self_cuda_time_total = 0
for evt in events:
if evt.device_type == DeviceType.CPU:
# in legacy profiler, kernel info is stored in cpu events
if evt.is_legacy:
sum_self_cuda_time_total += evt.self_cuda_time_total
elif evt.device_type == DeviceType.CUDA:
# in kineto profiler, there're events with the correct device type (e.g. CUDA)
sum_self_cuda_time_total += evt.self_cuda_time_total
# Actual printing
if header is not None:
append('=' * line_length)
append(header)
if top_level_events_only:
append('=' * line_length)
append('This report only display top-level ops statistics')
append(header_sep)
append(row_format.format(*headers))
append(header_sep)
def trim_path(path, src_column_width):
if len(path) > src_column_width:
offset = len(path) - src_column_width
path = path[offset:]
if len(path) > 3:
path = "..." + path[3:]
return path
event_limit = 0
for evt in events:
if event_limit == row_limit:
break
if top_level_events_only and evt.cpu_parent is not None:
continue
else:
event_limit += 1
name = evt.key
if len(name) >= MAX_NAME_COLUMN_WIDTH - 3:
name = name[:(MAX_NAME_COLUMN_WIDTH - 3)] + "..."
row_values = [
name,
# Self CPU total %, 0 for async events.
_format_time_share(evt.self_cpu_time_total, sum_self_cpu_time_total),
evt.self_cpu_time_total_str, # Self CPU total
# CPU total %, 0 for async events.
_format_time_share(evt.cpu_time_total, sum_self_cpu_time_total) if not evt.is_async else 0,
evt.cpu_time_total_str, # CPU total
evt.cpu_time_str, # CPU time avg
]
if has_cuda_time:
row_values.extend([
evt.self_cuda_time_total_str,
# CUDA time total %
_format_time_share(evt.self_cuda_time_total, sum_self_cuda_time_total),
evt.cuda_time_total_str,
evt.cuda_time_str, # Cuda time avg
])
if profile_memory:
row_values.extend([
# CPU Mem Total
_format_memory(evt.cpu_memory_usage),
# Self CPU Mem Total
_format_memory(evt.self_cpu_memory_usage),
])
if has_cuda_mem:
row_values.extend([
# CUDA Mem Total
_format_memory(evt.cuda_memory_usage),
# Self CUDA Mem Total
_format_memory(evt.self_cuda_memory_usage),
])
row_values.append(
evt.count, # Number of calls
)
if append_node_id:
row_values.append(evt.node_id)
if has_input_shapes:
row_values.append(str(evt.input_shapes)[:shapes_column_width])
if with_flops:
if evt.flops <= 0:
row_values.append("--")
else:
row_values.append('{0:8.3f}'.format(evt.flops * flops_scale))
if has_stack:
src_field = ""
if len(evt.stack) > 0:
src_field = trim_path(evt.stack[0], src_column_width)
row_values.append(src_field)
append(row_format.format(*row_values))
if has_stack:
empty_headers = [""] * (len(headers) - 1)
for entry in evt.stack[1:MAX_STACK_ENTRY]:
append(row_format.format(*(empty_headers + [trim_path(entry, src_column_width)])))
empty_headers.append("")
append(row_format.format(*empty_headers))
append(header_sep)
append("Self CPU time total: {}".format(_format_time(sum_self_cpu_time_total)))
if has_cuda_time:
append("Self CUDA time total: {}".format(_format_time(sum_self_cuda_time_total)))
return ''.join(result)
| pytorch-master | torch/autograd/profiler_util.py |
import torch
import torch.cuda
from torch.autograd.profiler_util import (
EventList, FunctionEvent, MEMORY_EVENT_NAME,
_filter_name, _filter_stack_entry, _rewrite_name
)
from torch.autograd import (
DeviceType, ProfilerConfig, ProfilerState,
_disable_profiler_legacy, _enable_profiler_legacy,
)
import itertools
from warnings import warn
class profile(object):
"""DEPRECATED: use torch.profiler instead"""
def __init__(
self,
enabled=True,
*,
use_cuda=False,
record_shapes=False,
with_flops=False,
profile_memory=False,
with_stack=False,
with_modules=False):
self.enabled: bool = enabled
if not self.enabled:
return
self.use_cuda = use_cuda
self.function_events = None
self.entered = False
self.record_shapes = record_shapes
self.with_flops = with_flops
self.record_shapes |= self.with_flops
self.profile_memory = profile_memory
self.with_stack = with_stack
self.with_modules = with_modules
if self.use_cuda and not torch.cuda.is_available():
warn("CUDA is not available, disabling CUDA profiling")
self.use_cuda = False
if self.use_cuda:
self.profiler_kind = ProfilerState.CUDA
else:
self.profiler_kind = ProfilerState.CPU
def config(self):
return ProfilerConfig(
self.profiler_kind,
self.record_shapes,
self.profile_memory,
self.with_stack,
self.with_flops,
self.with_modules,
# avoid exposing _ExperimentalConfig this in legacy public API
torch._C._autograd._ExperimentalConfig(),
)
def __enter__(self):
if not self.enabled:
return
if self.entered:
raise RuntimeError("Profiler context manager is not reentrant")
self.entered = True
self._start_trace()
return self
def _start_trace(self):
_enable_profiler_legacy(self.config())
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.enabled:
return
if self.use_cuda:
torch.cuda.synchronize()
records = _disable_profiler_legacy()
parsed_results = _parse_legacy_records(records)
self.function_events = EventList(
parsed_results,
use_cuda=self.use_cuda,
profile_memory=self.profile_memory,
with_flops=self.with_flops)
self.function_events._build_tree()
return False
def __repr__(self):
if self.function_events is None:
return '<unfinished profiler_legacy.profile>'
return repr(self.function_events)
def __str__(self):
if self.function_events is None:
return '<unfinished profile.profiler_legacy.profile>'
return str(self.function_events)
def _check_finish(self):
if self.function_events is None:
raise RuntimeError("Profiler didn't finish running")
def table(self, sort_by=None, row_limit=100, max_src_column_width=75, header=None, top_level_events_only=False):
self._check_finish()
assert self.function_events is not None
return self.function_events.table(
sort_by=sort_by, row_limit=row_limit, max_src_column_width=max_src_column_width, header=header,
top_level_events_only=top_level_events_only
)
table.__doc__ = EventList.table.__doc__
def export_chrome_trace(self, path):
self._check_finish()
assert self.function_events is not None
return self.function_events.export_chrome_trace(path)
export_chrome_trace.__doc__ = EventList.export_chrome_trace.__doc__
def export_stacks(self, path: str, metric: str = "self_cpu_time_total"):
self._check_finish()
assert self.function_events is not None, "Expected profiling results"
assert self.with_stack, "export_stacks() requires with_stack=True"
return self.function_events.export_stacks(path, metric)
def key_averages(self, group_by_input_shape=False, group_by_stack_n=0):
self._check_finish()
assert self.function_events is not None, "Expected profiling results"
return self.function_events.key_averages(group_by_input_shape, group_by_stack_n)
key_averages.__doc__ = EventList.key_averages.__doc__
def total_average(self):
self._check_finish()
assert self.function_events is not None, "Expected profiling results"
return self.function_events.total_average()
total_average.__doc__ = EventList.total_average.__doc__
@property
def self_cpu_time_total(self):
""" Returns total time spent on CPU obtained as a sum of
all self times across all the events.
"""
self._check_finish()
assert self.function_events is not None
return self.function_events.self_cpu_time_total
def _parse_legacy_records(thread_records):
def _get_record_key(record):
"""
Returns a tuple to be used by _parse_legacy_records for correlating start and
end records.
"""
return (record.handle(), record.node_id())
next_id = 0
start_record = None
functions = []
record_stack = []
# '__start_profile' is not guaranteed to be first, so we must find it here
for record in itertools.chain(*thread_records):
name = record.name()
if start_record is None and name == '__start_profile':
start_record = record
assert start_record is not None and not start_record.is_remote()
for thread_record_list in thread_records:
# accumulated memory allocations per handle
cpu_memory_allocs = {}
cuda_memory_allocs = {}
# ranges per handle
range_starts = {}
filtered_handles = set()
prev_record = None
for record in thread_record_list:
record_key = _get_record_key(record)
if (_filter_name(record.name()) or
record_key in filtered_handles):
filtered_handles.add(record_key)
continue
if record.kind() == 'push':
# workaround to reduce double logging from operator
# wrappers and redispatch
if prev_record is not None:
duplicate = (
prev_record.name() == record.name()
and prev_record.kind() == record.kind()
and prev_record.node_id() == record.node_id()
)
if duplicate:
filtered_handles.add(record_key)
continue
range_starts[record_key] = record
cpu_memory_allocs[record_key] = 0
cuda_memory_allocs[record_key] = 0
elif record.kind() == 'pop':
assert (
record_key in range_starts
), """Expected record with key {} to exist in range_starts.
This means that the pop event did not have a corresponding push.""".format(
record_key
)
start = range_starts[record_key]
cpu_memory_usage = cpu_memory_allocs[record_key]
cuda_memory_usage = cuda_memory_allocs[record_key]
is_async = start.is_async() or (
start.thread_id() != record.thread_id()
)
is_remote_event = record.is_remote()
start_flops = start.flops()
fe = FunctionEvent(
id=record.handle(),
node_id=record.node_id(),
name=_rewrite_name(name=start.name(), with_wildcard=True),
trace_name=_rewrite_name(name=start.name(), with_wildcard=False),
thread=start.thread_id(),
start_us=start_record.cpu_elapsed_us(start),
end_us=start_record.cpu_elapsed_us(record),
fwd_thread=start.fwd_thread_id(),
input_shapes=start.shapes(),
stack=[entry for entry in start.stack() if _filter_stack_entry(entry)],
scope=start.scope(),
cpu_memory_usage=cpu_memory_usage,
cuda_memory_usage=cuda_memory_usage,
is_async=is_async,
is_remote=is_remote_event,
sequence_nr=start.sequence_nr(),
device_type=DeviceType.CPU,
is_legacy=True,
flops=start_flops,
)
# note: async events have only cpu total time
if not is_async and start.has_cuda():
duration = start.cuda_elapsed_us(record)
if duration > 0:
fe.append_kernel(
start.name(),
start.device(),
duration)
functions.append(fe)
del range_starts[record_key]
del cpu_memory_allocs[record_key]
del cuda_memory_allocs[record_key]
elif record.kind() == 'memory_alloc':
num_open_handles_cpu = len(cpu_memory_allocs)
num_open_handles_cuda = len(cuda_memory_allocs)
assert num_open_handles_cpu == num_open_handles_cuda
for handle in cpu_memory_allocs.keys():
cpu_memory_allocs[handle] += record.cpu_memory_usage()
for handle in cuda_memory_allocs.keys():
cuda_memory_allocs[handle] += record.cuda_memory_usage()
if num_open_handles_cpu == 0:
# output event as a top-level memory event
fe = FunctionEvent(
id=0,
name=MEMORY_EVENT_NAME,
trace_name=None,
thread=0,
start_us=0,
end_us=0,
stack=[],
cpu_memory_usage=record.cpu_memory_usage(),
cuda_memory_usage=record.cuda_memory_usage(),
is_legacy=True,
)
functions.append(fe)
prev_record = record
# Sort functions by start time then by end time ascending.
# This ensures that--in the case of nested events which
# have the same start time (which may happen due to the
# granularity of the given clock tick)--we always show
# the outermost nested call first. This adds stability
# in how FunctionEvents appear
functions.sort(key=lambda evt: [evt.time_range.start, -evt.time_range.end])
return functions
| pytorch-master | torch/autograd/profiler_legacy.py |
import torch
from torch.types import _TensorOrTensors
import torch.testing
from torch.overrides import is_tensor_like
import collections
from itertools import product
import warnings
from typing import Callable, Union, Optional, Iterable, List, Tuple, Dict
from torch._vmap_internals import vmap, _vmap
import functools
# Note: `get_*_jacobian` functions are added here even though we didn't intend to make them public
# since they have been exposed from before we added `__all__` and we already maintain BC for them
# We should eventually deprecate them and remove them from `__all__`
__all__ = ["gradcheck", "gradgradcheck", "GradcheckError", "get_numerical_jacobian",
"get_analytical_jacobian", "get_numerical_jacobian_wrt_specific_input"]
class GradcheckError(RuntimeError):
r"""Error raised by :func:`gradcheck` and :func:`gradgradcheck`"""
pass
def _is_float_or_complex_tensor(obj):
return is_tensor_like(obj) and (obj.is_floating_point() or obj.is_complex())
def _allocate_jacobians_with_inputs(input_tensors: Tuple, numel_output) -> Tuple[torch.Tensor, ...]:
# Makes zero-filled tensors from inputs. If `numel_output` is not None, for
# each tensor in `input_tensors`, returns a new zero-filled tensor with height
# of `t.numel` and width of `numel_output`. Otherwise, for each tensor, returns
# a 1-d tensor with size `(t.numel,)`. Each new tensor will be strided and have
# the same dtype and device as those of the corresponding input.
out: List[torch.Tensor] = []
for t in input_tensors:
if _is_float_or_complex_tensor(t) and t.requires_grad:
out.append(t.new_zeros((t.numel(), numel_output), layout=torch.strided))
return tuple(out)
def _allocate_jacobians_with_outputs(output_tensors: Tuple, numel_input, dtype=None,
device=None) -> Tuple[torch.Tensor, ...]:
# Makes zero-filled tensors from outputs. If `dim` is not None, for each tensor
# in `output_tensors`, returns a new zero-filled tensor with height of `dim` and
# width of `t.numel`. Otherwise, for each tensor, returns a 1-d tensor with size
# (t.numel,).
out: List[torch.Tensor] = []
options = {"dtype": dtype, "device": device, "layout": torch.strided}
for t in output_tensors:
if _is_float_or_complex_tensor(t):
out.append(t.new_zeros((numel_input, t.numel()), **options))
return tuple(out)
def _iter_tensors(x: Union[torch.Tensor, Iterable[torch.Tensor]],
only_requiring_grad: bool = False) -> Iterable[torch.Tensor]:
if is_tensor_like(x):
# mypy doesn't narrow type of `x` to torch.Tensor
if x.requires_grad or not only_requiring_grad: # type: ignore[union-attr]
yield x # type: ignore[misc]
elif isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
for elem in x:
for result in _iter_tensors(elem, only_requiring_grad):
yield result
def _iter_tensor(x_tensor):
# (Only used for slow gradcheck) Returns a generator that yields the following
# elements at each iteration:
# 1) a tensor: the same tensor is returned across all iterations. The tensor
# is not the same as the original x_tensor as given as input - it is
# prepared so that it can be modified in-place. Depending on whether the
# input tensor is strided, sparse, or dense, the returned tensor may or may
# not share storage with x_tensor.
# 2) a tuple of indices that can be used with advanced indexing (yielded in
# dictionary order)
# 3) flattened index that will be used to index into the Jacobian tensor
#
# For a tensor t with size (2, 2), _iter_tensor yields:
# `x, (0, 0), 0`, `x, (0, 1), 1`, `x, (1, 0), 2`, `x, (1, 1), 3`
#
# where x is the t.data of the original tensor. Perturbing the entry of x
# at index (1, 1) yields the 3rd column of the overall Jacobian matrix.
if x_tensor.is_sparse:
def get_stride(size):
dim = len(size)
tmp = 1
stride = [0] * dim
for i in reversed(range(dim)):
stride[i] = tmp
tmp *= size[i]
return stride
x_nnz = x_tensor._nnz()
x_size = list(x_tensor.size())
x_indices = x_tensor._indices().t()
x_values = x_tensor._values()
x_stride = get_stride(x_size)
# Use .data here to get around the version check
x_values = x_values.data
for i in range(x_nnz):
x_value = x_values[i]
for x_idx in product(*[range(m) for m in x_values.size()[1:]]):
indices = x_indices[i].tolist() + list(x_idx)
d_idx = sum(indices[k] * x_stride[k] for k in range(len(x_size)))
yield x_value, x_idx, d_idx
elif x_tensor.layout == torch._mkldnn: # type: ignore[attr-defined]
for d_idx, x_idx in enumerate(product(*[range(m) for m in x_tensor.size()])):
# this is really inefficient, but without indexing implemented, there's
# not really a better way than converting back and forth
x_tensor_dense = x_tensor.to_dense()
yield x_tensor_dense, x_idx, d_idx
else:
# Use .data here to get around the version check
x_tensor = x_tensor.data
for d_idx, x_idx in enumerate(product(*[range(m) for m in x_tensor.size()])):
yield x_tensor, x_idx, d_idx
def _get_numerical_jacobian(fn, inputs, outputs=None, target=None, eps=1e-3,
is_forward_ad=False) -> List[Tuple[torch.Tensor, ...]]:
"""Computes the numerical Jacobian of `fn(inputs)` with respect to `target`. If
not specified, targets are the input. Returns M * N Jacobians where N is the
number of tensors in target that require grad and M is the number of non-integral
outputs.
Args:
fn: the function to compute the jacobian for
inputs: inputs to `fn`
outputs: provide precomputed outputs to avoid one extra invocation of fn
target: the Tensors wrt whom Jacobians are calculated (default=`inputs`)
eps: the magnitude of the perturbation during finite differencing
(default=`1e-3`)
is_forward_ad: if this numerical jacobian is computed to be checked wrt
forward AD gradients (this is used for error checking only)
Returns:
A list of M N-tuples of tensors
Note that `target` may not even be part of `input` to `fn`, so please be
**very careful** in this to not clone `target`.
"""
jacobians: List[Tuple[torch.Tensor, ...]] = []
if outputs is None:
outputs = _as_tuple(fn(*_as_tuple(inputs)))
if not is_forward_ad and any(o.is_complex() for o in outputs):
raise ValueError("Expected output to be non-complex. get_numerical_jacobian no "
"longer supports functions that return complex outputs.")
if target is None:
target = inputs
inp_indices = [i for i, a in enumerate(target) if is_tensor_like(a) and a.requires_grad]
for i, (inp, inp_idx) in enumerate(zip(_iter_tensors(target, True), inp_indices)):
jacobians += [get_numerical_jacobian_wrt_specific_input(fn, inp_idx, inputs, outputs, eps,
input=inp, is_forward_ad=is_forward_ad)]
return jacobians
def get_numerical_jacobian(fn, inputs, target=None, eps=1e-3, grad_out=1.0):
"""Deprecated API to compute the numerical Jacobian for a given fn and its inputs.
Args:
fn: the function to compute the Jacobian for (must take inputs as a tuple)
input: input to `fn`
target: the Tensors wrt whom Jacobians are calculated (default=`input`)
eps: the magnitude of the perturbation during finite differencing
(default=`1e-3`)
Returns:
A list of Jacobians of `fn` (restricted to its first output) with respect to
each input or target, if provided.
Note that `target` may not even be part of `input` to `fn`, so please be
**very careful** in this to not clone `target`.
"""
warnings.warn("get_numerical_jacobian was part of PyTorch's private API and not "
"meant to be exposed. We are deprecating it and it will be removed "
"in a future version of PyTorch. If you have a specific use for "
"this or feature request for this to be a stable API, please file "
"us an issue at https://github.com/pytorch/pytorch/issues/new")
if grad_out != 1.0: # grad_out param is only kept for backward compatibility reasons
raise ValueError("Expected grad_out to be 1.0. get_numerical_jacobian no longer "
"supports values of grad_out != 1.0.")
def fn_pack_inps(*inps):
return fn(inps)
jacobians = _get_numerical_jacobian(fn_pack_inps, inputs, None, target, eps)
return tuple(jacobian_for_each_output[0] for jacobian_for_each_output in jacobians)
def _compute_numerical_gradient(fn, entry, v, norm_v, nbhd_checks_fn):
# Performs finite differencing by perturbing `entry` in-place by `v` and
# returns the gradient of each of the outputs wrt to x at idx.
orig = entry.clone()
entry.copy_(orig - v)
outa = fn()
entry.copy_(orig + v)
outb = fn()
entry.copy_(orig)
def compute(a, b):
nbhd_checks_fn(a, b)
ret = (b - a) / (2 * norm_v)
return ret.detach().reshape(-1)
return tuple(compute(a, b) for (a, b) in zip(outa, outb))
def _compute_numerical_jvps_wrt_specific_input(jvp_fn, delta, input_is_complex,
is_forward_ad=False) -> List[torch.Tensor]:
# Computing the jacobian only works for real delta
# For details on the algorithm used here, refer:
# Section 3.5.3 https://arxiv.org/pdf/1701.00392.pdf
# s = fn(z) where z = x for real valued input
# and z = x + yj for complex valued input
jvps: List[torch.Tensor] = []
ds_dx_tup = jvp_fn(delta[0] if isinstance(delta, tuple) else delta)
if input_is_complex: # C -> R
ds_dy_tup = jvp_fn(delta[1] * 1j) if isinstance(delta, tuple) else jvp_fn(delta * 1j)
for ds_dx, ds_dy in zip(ds_dx_tup, ds_dy_tup):
assert(not ds_dx.is_complex())
# conjugate wirtinger derivative
conj_w_d = ds_dx + ds_dy * 1j
jvps.append(conj_w_d)
else:
for ds_dx in ds_dx_tup: # R -> R or (R -> C for the forward AD case)
assert(is_forward_ad or not ds_dx.is_complex())
jvps.append(ds_dx)
return jvps
def _combine_jacobian_cols(jacobians_cols: Dict[int, List[torch.Tensor]], outputs, input,
numel) -> Tuple[torch.Tensor, ...]:
# jacobian_cols maps column_idx -> output_idx -> single column of jacobian Tensor
# we return a list that maps output_idx -> full jacobian Tensor
jacobians = _allocate_jacobians_with_outputs(outputs, numel, dtype=input.dtype if input.dtype.is_complex else None)
for i, jacobian in enumerate(jacobians):
for k, v in jacobians_cols.items():
jacobian[k] = v[i]
return jacobians
def _prepare_input(input: torch.Tensor, maybe_perturbed_input: Optional[torch.Tensor],
fast_mode=False) -> torch.Tensor:
# Prepares the inputs to be passed into the function while including the new
# modified input.
if input.layout == torch._mkldnn: # type: ignore[attr-defined] # no attr _mkldnn
# Convert back to mkldnn
if maybe_perturbed_input is not None:
return maybe_perturbed_input.to_mkldnn()
else:
return input
elif input.layout == torch.sparse_coo:
if fast_mode and maybe_perturbed_input is not None:
# entry is already a "cloned" version of the original tensor
# thus changes to entry are not reflected in the input
return maybe_perturbed_input
else:
return input
else:
# We cannot use entry (input.data) if we want gradgrad to work because
# fn (in the gradgrad case) needs to compute grad wrt input
return input
def _check_outputs_same_dtype_and_shape(output1, output2, eps, idx=None) -> None:
# Check that the returned outputs don't have different dtype or shape when you
# perturb the input
on_index = "on index {idx} " if idx is not None else ""
assert output1.shape == output2.shape, \
(f"Expected `func` to return outputs with the same shape"
f" when inputs are perturbed {on_index}by {eps}, but got:"
f" shapes {output1.shape} and {output2.shape}.")
assert output1.dtype == output2.dtype, \
(f"Expected `func` to return outputs with the same dtype"
f" when inputs are perturbed {on_index}by {eps}, but got:"
f" dtypes {output1.dtype} and {output2.dtype}.")
def get_numerical_jacobian_wrt_specific_input(fn, input_idx, inputs, outputs, eps,
input=None, is_forward_ad=False) -> Tuple[torch.Tensor, ...]:
# Computes the numerical jacobians wrt to a single input. Returns N jacobian
# tensors, where N is the number of outputs. We use a dictionary for
# jacobian_cols because indices aren't necessarily consecutive for sparse inputs
# When we perturb only a single element of the input tensor at a time, the jvp
# is equivalent to a single col of the Jacobian matrix of fn.
jacobian_cols: Dict[int, List[torch.Tensor]] = {}
input = inputs[input_idx] if input is None else input
assert input.requires_grad
for x, idx, d_idx in _iter_tensor(input):
wrapped_fn = _with_prepare_inputs(fn, inputs, input_idx, x)
input_to_perturb = x[idx]
nbhd_checks_fn = functools.partial(_check_outputs_same_dtype_and_shape, idx=idx, eps=eps)
jvp_fn = _get_numerical_jvp_fn(wrapped_fn, input_to_perturb, eps, nbhd_checks_fn)
jacobian_cols[d_idx] = _compute_numerical_jvps_wrt_specific_input(jvp_fn, eps, x.is_complex(), is_forward_ad)
return _combine_jacobian_cols(jacobian_cols, outputs, input, input.numel())
def _get_analytical_jacobian_forward_ad(fn, inputs, outputs, *, check_grad_dtypes=False,
all_u=None) -> Tuple[Tuple[torch.Tensor, ...], ...]:
"""Computes the analytical Jacobian using forward mode AD of `fn(inputs)` using forward mode AD with respect
to `target`. Returns N * M Jacobians where N is the number of tensors in target that require grad and
M is the number of non-integral outputs.
Contrary to other functions here, this function requires "inputs" to actually be used by the function.
The computed value is expected to be wrong if the function captures the inputs by side effect instead of
using the passed ones (many torch.nn tests do this).
Args:
fn: the function to compute the jacobian for
inputs: inputs to `fn`
outputs: provide precomputed outputs to avoid one extra invocation of fn
check_grad_dtypes: if True, will check that the gradient dtype are valid
all_u (optional): if provided, the Jacobian will be right multiplied with this vector
Returns:
A tuple of M N-tuples of tensors
"""
# To avoid early import issues
fwAD = torch.autograd.forward_ad
tensor_inputs = tuple(i for i in inputs if is_tensor_like(i) and i.requires_grad)
if any(i.is_complex() for i in tensor_inputs):
raise ValueError("Expected inputs to be non-complex for _get_analytical_jacobian_forward_ad.")
if all_u:
jacobians = tuple(_allocate_jacobians_with_outputs(outputs, 1) for i in tensor_inputs)
else:
jacobians = tuple(_allocate_jacobians_with_outputs(outputs, i.numel()) for i in tensor_inputs)
with fwAD.dual_level():
fw_grads = []
dual_inputs = []
for i, inp in enumerate(inputs):
if is_tensor_like(inp) and inp.requires_grad:
if inp.layout == torch._mkldnn: # type: ignore[attr-defined]
raise ValueError("MKLDNN inputs are not support for forward AD gradcheck.")
inp = fwAD.make_dual(inp.detach(), torch.zeros_like(inp))
# If inp is a differentiable view, the dual might not be the tangent given to
# make_dual, so read it explicitly from the dual tensor
fw_grads.append(fwAD.unpack_dual(inp)[1])
dual_inputs.append(inp)
if all_u:
# Do the full reduction in one pass
# To be consistent with numerical evaluation, we actually compute one reduction per input
for i, (fw_grad, u) in enumerate(zip(fw_grads, all_u)):
fw_grad.copy_(u.view_as(fw_grad))
raw_outputs = _as_tuple(fn(*dual_inputs))
dual_outputs = filter(_is_float_or_complex_tensor, raw_outputs)
for index_o, d_o in enumerate(dual_outputs):
val, res = fwAD.unpack_dual(d_o)
if check_grad_dtypes and res is not None and val.is_complex() != res.is_complex():
raise GradcheckError('Forward AD gradient has dtype mismatch.')
# Remove extra dimension of size 1 corresponding to the reduced input
jacobians[i][index_o].squeeze_(0)
if res is None:
jacobians[i][index_o].zero_()
else:
jacobians[i][index_o].copy_(res.reshape(-1))
fw_grad.zero_()
else:
# Reconstruct the full Jacobian column by column
for i, fw_grad in enumerate(fw_grads):
for lin_idx, grad_idx in enumerate(product(*[range(m) for m in fw_grad.size()])):
fw_grad[grad_idx] = 1.
raw_outputs = _as_tuple(fn(*dual_inputs))
dual_outputs = filter(_is_float_or_complex_tensor, raw_outputs)
for index_o, d_o in enumerate(dual_outputs):
val, res = fwAD.unpack_dual(d_o)
if check_grad_dtypes and val.is_complex() != res.is_complex():
raise GradcheckError('Forward AD gradient has dtype mismatch.')
if res is None:
jacobians[i][index_o][lin_idx].zero_()
else:
jacobians[i][index_o][lin_idx].copy_(res.reshape(-1))
fw_grad[grad_idx] = 0.
return jacobians
def _get_input_to_perturb(input):
# Prepare the input so that it can be modified in-place and do certain
# operations that require the tensor to have strides. If fast_mode=False,
# _iter_tensor would handle the below cases:
if input.layout == torch._mkldnn: # type: ignore[attr-defined] # no attr _mkldnn
# Convert to dense so we can perform operations that require strided tensors
input_to_perturb = input.to_dense()
elif input.layout == torch.sparse_coo:
# Clone because input may require grad, and copy_ calls resize_,
# which is not allowed for .data
input_to_perturb = input.clone()
else:
input_to_perturb = input.data
return input_to_perturb
def _with_prepare_inputs(fn, inputs, input_idx, input_to_perturb, fast_mode=False):
# Wraps `fn` so that its inputs are already supplied
def wrapped_fn():
inp = tuple(_prepare_input(a, input_to_perturb if i == input_idx else None, fast_mode)
if is_tensor_like(a) else a for i, a in enumerate(_as_tuple(inputs)))
return tuple(a.clone() for a in _as_tuple(fn(*inp)))
return wrapped_fn
def _get_numerical_jvp_fn(wrapped_fn, input_to_perturb, eps, nbhd_checks_fn):
# Wraps jvp_fn so that certain arguments are already supplied
def jvp_fn(delta):
return _compute_numerical_gradient(wrapped_fn, input_to_perturb, delta, eps, nbhd_checks_fn)
return jvp_fn
def _reshape_tensor_or_tuple(u, shape):
# We don't need to reshape when input corresponding to u is sparse
if isinstance(u, tuple):
if u[0].layout != torch.sparse_coo:
return (u[0].reshape(shape), u[1].reshape(shape))
else:
if u.layout != torch.sparse_coo:
return u.reshape(shape)
return u
def _mul_tensor_or_tuple(u, k):
if isinstance(u, tuple):
return (k * u[0], k * u[1])
else:
return k * u
def _get_numerical_jvp_wrt_specific_input(fn, input_idx, inputs, u, eps, is_forward_ad=False) -> List[torch.Tensor]:
input = inputs[input_idx]
input_to_perturb = _get_input_to_perturb(input)
wrapped_fn = _with_prepare_inputs(fn, inputs, input_idx, input_to_perturb, True)
nbhd_checks_fn = functools.partial(_check_outputs_same_dtype_and_shape, eps=eps)
jvp_fn = _get_numerical_jvp_fn(wrapped_fn, input_to_perturb, eps, nbhd_checks_fn)
u = _reshape_tensor_or_tuple(u, input_to_perturb.shape)
u = _mul_tensor_or_tuple(u, eps)
return _compute_numerical_jvps_wrt_specific_input(jvp_fn, u, input.is_complex(), is_forward_ad)
def _get_numerical_vJu(fn, inputs, inp_indices, func_out, all_u, all_v, eps, is_forward_ad):
# Note that all_v can also be None, in that case, this function only computes Ju.
reduced_jacobians: List[List[torch.Tensor]] = []
for i, (inp_idx, u) in enumerate(zip(inp_indices, all_u)):
all_Ju = _get_numerical_jvp_wrt_specific_input(fn, inp_idx, inputs, u, eps, is_forward_ad)
# Filter out the Ju for non floating point outputs
filtered_Ju = []
func_out = _as_tuple(func_out)
assert len(all_Ju) == len(func_out)
for Ju, output in zip(all_Ju, func_out):
if _is_float_or_complex_tensor(output):
filtered_Ju.append(Ju)
else:
# TODO: handle the other Ju
pass
if all_v is not None:
jacobian_scalars: List[torch.Tensor] = []
for v, Ju in zip(all_v, filtered_Ju):
jacobian_scalars.append(_dot_with_type_promotion(v, Ju))
reduced_jacobians.append(jacobian_scalars)
else:
reduced_jacobians.append(filtered_Ju)
return reduced_jacobians
def _check_jacobians_equal(j1, j2, atol):
# Check whether the max difference between two Jacobian tensors are within some
# tolerance `atol`.
for j1_x, j2_x in zip(j1, j2):
if j1_x.numel() != 0 and (j1_x - j2_x).abs().max() > atol:
return False
return True
def _stack_and_check_tensors(list_of_list_of_tensors, inputs,
numel_outputs) -> Tuple[Tuple[torch.Tensor, ...], bool, bool]:
# For the ith tensor in the inner list checks whether it has the same size and
# dtype as the ith differentiable input.
out_jacobians = _allocate_jacobians_with_inputs(inputs, numel_outputs)
diff_input_list = list(_iter_tensors(inputs, True))
correct_grad_sizes = True
correct_grad_types = True
for i, tensor_list in enumerate(list_of_list_of_tensors):
inp = diff_input_list[i]
out_jacobian = out_jacobians[i]
for j, tensor in enumerate(tensor_list):
if tensor is not None and tensor.size() != inp.size():
correct_grad_sizes = False
elif tensor is not None and tensor.dtype != inp.dtype:
correct_grad_types = False
if tensor is None:
out_jacobian[:, j].zero_()
else:
dense = tensor.to_dense() if not tensor.layout == torch.strided else tensor
assert out_jacobian[:, j].numel() == dense.numel()
out_jacobian[:, j] = dense.reshape(-1)
return out_jacobians, correct_grad_sizes, correct_grad_types
FAILED_NONDET_MSG = """\n
NOTE: If your op relies on non-deterministic operations i.e., it is listed here:
https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html
this failure might be expected.
If you are adding a new operator, please file an issue and then use one of the
workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck.
If the test
- manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck
with `nondet_tol=<tol>` as a keyword argument.
- is OpInfo-based (e.g., in test_ops_gradients.py), then modify the OpInfo for the test
to have `gradcheck_nondet_tol=<tol>`.
- is a Module test (e.g., in common_nn.py), then modify the corresponding
module_test entry to have `gradcheck_nondet_tol=<tol>`
"""
def _check_analytical_jacobian_attributes(inputs, output, nondet_tol, check_grad_dtypes,
fast_mode=False, v=None) -> Tuple[torch.Tensor, ...]:
# This is used by both fast and slow mode:
# - For slow mode, vjps[i][j] is the jth row the Jacobian wrt the ith
# input.
# - For fast mode, vjps[i][0] is a linear combination of the rows
# of the Jacobian wrt the ith input
diff_input_list = list(_iter_tensors(inputs, True))
def vjp_fn(grad_output):
return torch.autograd.grad(output, diff_input_list, grad_output,
retain_graph=True, allow_unused=True)
# Compute everything twice to check for nondeterminism (which we call reentrancy)
if fast_mode:
vjps1 = _get_analytical_vjps_wrt_specific_output(vjp_fn, output.clone(), v)
vjps2 = _get_analytical_vjps_wrt_specific_output(vjp_fn, output.clone(), v)
else:
vjps1 = _compute_analytical_jacobian_rows(vjp_fn, output.clone())
vjps2 = _compute_analytical_jacobian_rows(vjp_fn, output.clone())
output_numel = output.numel() if not fast_mode else 1
jacobians1, types_ok, sizes_ok = _stack_and_check_tensors(vjps1, inputs, output_numel)
jacobians2, _, _ = _stack_and_check_tensors(vjps2, inputs, output_numel)
reentrant = _check_jacobians_equal(jacobians1, jacobians2, nondet_tol)
if not types_ok and check_grad_dtypes:
raise GradcheckError('Gradient has dtype mismatch')
if not sizes_ok:
raise GradcheckError('Analytical gradient has incorrect size')
if not reentrant:
raise GradcheckError('Backward is not reentrant, i.e., running backward with '
'same input and grad_output multiple times gives different values, '
'although analytical gradient matches numerical gradient.'
f'The tolerance for nondeterminism was {nondet_tol}.' +
FAILED_NONDET_MSG)
return jacobians1
def _get_analytical_vJu_backward_mode(inputs, outputs, nondet_tol, check_grad_dtypes, all_v, all_u):
reduced_jacobians: List[List[torch.Tensor]] = []
for output, v in zip(outputs, all_v):
all_vJ = _check_analytical_jacobian_attributes(inputs, output, nondet_tol, check_grad_dtypes,
fast_mode=True, v=v)
jacobian_scalars: List[torch.Tensor] = []
for vJ, u in zip(all_vJ, all_u):
# Why do we need squeeze here? vJ is a 2-d tensor so that we can reuse
# the error checking logic from slow mode
vJ = vJ.T.squeeze(0)
if vJ.is_complex(): # C -> R
tv = torch.view_as_real(vJ.resolve_conj())
tr = tv.select(-1, 0)
ti = tv.select(-1, 1)
jacobian_scalars.append(tr.dot(u[0]) + 1j * ti.dot(u[1]))
else: # R -> R
jacobian_scalars.append(vJ.dot(u))
reduced_jacobians.append(jacobian_scalars)
return reduced_jacobians
def get_analytical_jacobian(inputs, output, nondet_tol=0.0, grad_out=1.0):
# Replicates the behavior of the old get_analytical_jacobian before the refactor
# This shares much of its code with _check_analytical_jacobian_attributes
warnings.warn("get_analytical_jacobian was part of PyTorch's private API and not "
"meant to be exposed. We are deprecating it and it will be removed "
"in a future version of PyTorch. If you have a specific use for "
"this or feature request for this to be a stable API, please file "
"us an issue at https://github.com/pytorch/pytorch/issues/new")
if grad_out != 1.0: # grad_out param is only kept for backward compatibility reasons
raise ValueError("Expected grad_out to be 1.0. get_analytical_jacobian no longer "
"supports values of grad_out != 1.0.")
if output.is_complex():
raise ValueError("Expected output to be non-complex. get_analytical_jacobian no "
"longer supports functions that return complex outputs.")
diff_input_list = list(_iter_tensors(inputs, True))
def vjp_fn(grad_output):
return torch.autograd.grad(output, diff_input_list, grad_output,
retain_graph=True, allow_unused=True)
# Compute everything twice to check for nondeterminism (which we call reentrancy)
vjps1 = _compute_analytical_jacobian_rows(vjp_fn, output.clone())
vjps2 = _compute_analytical_jacobian_rows(vjp_fn, output.clone())
output_numel = output.numel()
jacobians1, types_ok, sizes_ok = _stack_and_check_tensors(vjps1, inputs, output_numel)
jacobians2, _, _ = _stack_and_check_tensors(vjps2, inputs, output_numel)
reentrant = _check_jacobians_equal(jacobians1, jacobians2, nondet_tol)
return jacobians1, reentrant, sizes_ok, types_ok
def _get_analytical_jacobian(inputs, outputs, input_idx, output_idx):
# Computes the analytical Jacobian in slow mode for a single input-output pair.
# Forgoes performing checks on dtype, shape, and reentrancy.
jacobians = _check_analytical_jacobian_attributes(inputs, outputs[output_idx],
nondet_tol=float('inf'), check_grad_dtypes=False)
return jacobians[input_idx]
def _compute_analytical_jacobian_rows(vjp_fn, sample_output) -> List[List[Optional[torch.Tensor]]]:
# Computes Jacobian row-by-row using backward function `vjp_fn` = v^T J
# NB: this function does not assume vjp_fn(v) to return tensors with the same
# number of elements for different v. This is checked when we later combine the
# rows into a single tensor.
grad_out_base = torch.zeros_like(sample_output, memory_format=torch.legacy_contiguous_format)
flat_grad_out = grad_out_base.view(-1)
# jacobians_rows[i][j] represents the jth row of the ith input
jacobians_rows: List[List[Optional[torch.Tensor]]] = []
for j in range(flat_grad_out.numel()):
flat_grad_out.zero_()
flat_grad_out[j] = 1.0
grad_inputs = vjp_fn(grad_out_base)
for i, d_x in enumerate(grad_inputs):
if j == 0:
jacobians_rows.append([])
jacobians_rows[i] += [d_x.clone() if isinstance(d_x, torch.Tensor) else None]
return jacobians_rows
def _get_analytical_vjps_wrt_specific_output(vjp_fn, sample_output, v) -> List[List[Optional[torch.Tensor]]]:
vjps: List[List[Optional[torch.Tensor]]] = []
grad_inputs = vjp_fn(v.reshape(sample_output.shape))
for vjp in grad_inputs:
vjps.append([vjp.clone() if isinstance(vjp, torch.Tensor) else None])
return vjps
def _check_inputs(tupled_inputs, check_sparse_nnz) -> bool:
if not check_sparse_nnz and any(t.is_sparse or t.is_sparse_csr for t in tupled_inputs if isinstance(t, torch.Tensor)):
raise GradcheckError('gradcheck expects all tensor inputs are dense when check_sparse_nnz is set to False.')
# Make sure that gradients are saved for at least one input
any_input_requiring_grad = False
for idx, inp in enumerate(tupled_inputs):
if is_tensor_like(inp) and inp.requires_grad:
if not (inp.dtype == torch.float64 or inp.dtype == torch.complex128):
warnings.warn(
f'Input #{idx} requires gradient and '
'is not a double precision floating point or complex. '
'This check will likely fail if all the inputs are '
'not of double precision floating point or complex. ')
if inp.is_sparse:
content = inp._values()
elif inp.is_sparse_csr:
content = inp.values()
else:
content = inp
# TODO: To cover more problematic cases, replace stride = 0 check with
# "any overlap in memory" once we have a proper function to check it.
if content.layout is not torch._mkldnn: # type: ignore[attr-defined]
if not all(st > 0 or sz <= 1 for st, sz in zip(content.stride(), content.size())):
raise RuntimeError(
f'The {idx}th input has a dimension with stride 0. gradcheck only '
'supports inputs that are non-overlapping to be able to '
'compute the numerical gradients correctly. You should call '
'.contiguous on the input before passing it to gradcheck.')
any_input_requiring_grad = True
inp.retain_grad()
if not any_input_requiring_grad:
raise ValueError(
'gradcheck expects at least one input tensor to require gradient, '
'but none of the them have requires_grad=True.')
return True
def _check_outputs(outputs) -> None:
if any(t.layout == torch.sparse_coo for t in outputs if isinstance(t, torch.Tensor)):
# it is easier to call to_dense() on the sparse output than
# to modify analytical jacobian
raise ValueError('Sparse output is not supported at gradcheck yet. '
'Please call to_dense() on the output of fn for gradcheck.')
if any(t.layout == torch._mkldnn for t in outputs if isinstance(t, torch.Tensor)): # type: ignore[attr-defined]
raise ValueError('MKLDNN output is not supported at gradcheck yet. '
'Please call to_dense() on the output of fn for gradcheck.')
def _check_no_differentiable_outputs(func, inputs, func_out, eps) -> bool:
# When there are no differentiable outputs, numerical gradient for a function is
# expected to be zero.
jacobians_all_inputs_outputs = _get_numerical_jacobian(func, inputs, func_out, eps=eps)
for jacobians_all_outputs_and_fixed_input in jacobians_all_inputs_outputs:
for jacobian in jacobians_all_outputs_and_fixed_input:
if torch.ne(jacobian, 0).sum() > 0:
raise GradcheckError('Numerical gradient for function expected to be zero')
return True
def _check_no_differentiable_outputs_fast(func, func_out, all_inputs, inputs_indices,
all_u, eps, nondet_tol):
for inp_idx, u in zip(inputs_indices, all_u):
jvps = _get_numerical_jvp_wrt_specific_input(func, inp_idx, all_inputs, u, eps)
for jvp in jvps:
if jvp.numel() == 0:
continue
if (jvp - torch.zeros_like(jvp)).abs().max() > nondet_tol:
raise GradcheckError('Numerical gradient for function expected to be zero')
return True
FAILED_BATCHED_GRAD_MSG = """
gradcheck or gradgradcheck failed while testing batched gradient computation.
This could have been invoked in a number of ways (via a test that calls
gradcheck/gradgradcheck directly or via an autogenerated test).
If you are adding a new operator, please file an issue and then use one of the
workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck.
If the test
- manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck
with `check_batched_grad=False` as a keyword argument.
- is OpInfo-based (e.g., in test_ops_gradients.py), then modify the OpInfo for the test
to have `check_batched_grad=False` and/or `check_batched_gradgrad=False`.
If you're modifying an existing operator that supports batched grad computation,
or wish to make a new operator work with batched grad computation, please read
the following.
To compute batched grads (e.g., jacobians, hessians), we vmap over the backward
computation. The most common failure case is if there is a 'vmap-incompatible
operation' in the backward pass. Please see
NOTE: [How to write vmap-compatible backward formulas]
in the codebase for an explanation of how to fix this.
""".strip()
FAILED_BATCHED_GRAD_MSG_FWD_AD = """
gradcheck failed while testing batched gradient computation with forward-mode AD.
This test is enabled automatically when both `check_batched_grad=True`
and `check_forward_ad=True`, but can be disabled in the following ways
dependong on how the test was invoked (via a test that calls gradcheck
directly or via an autogenerated test).
If you are adding a new operator, please file an issue and then use one of the
workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck.
If the test
- manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck
with `check_batched_forward_grad=False` as a keyword argument.
- is OpInfo-based (e.g., in test_ops_gradients.py), then modify the OpInfo for the test
to have `check_batched_forward_grad=False`
"""
def _get_failed_batched_grad_test_msg(output_idx, input_idx, res, exp, is_forward_ad=False):
return f"""
For output {output_idx} and input {input_idx}:
{FAILED_BATCHED_GRAD_MSG_FWD_AD if is_forward_ad else FAILED_BATCHED_GRAD_MSG}
Got:
{res}
Expected:
{exp}
""".strip()
def _test_batched_grad_forward_ad(func, inputs) -> bool:
fwAD = torch.autograd.forward_ad # To avoid early import issues (do we need this?)
assert isinstance(inputs, tuple)
for input_idx, current_input in enumerate(inputs):
if not (is_tensor_like(current_input) and current_input.requires_grad):
continue
def jvp(tangent: torch.Tensor):
with fwAD.dual_level():
dual = fwAD.make_dual(current_input.detach(), tangent)
inputs_with_dual = tuple(dual if idx == input_idx else (inp.detach() if is_tensor_like(inp) else inp)
for idx, inp in enumerate(inputs))
dual_outputs = _as_tuple(func(*inputs_with_dual))
ret = []
for dual_output in dual_outputs:
if dual_output is None:
continue
primal_out, tangent_out = fwAD.unpack_dual(dual_output)
if tangent_out is not None:
ret.append(tangent_out)
else:
ret.append(torch.zeros([], dtype=primal_out.dtype, device=primal_out.device).expand(primal_out.shape))
return tuple(ret)
if not _is_float_or_complex_tensor(current_input):
continue
tangents = [torch.randn_like(current_input) for _ in range(2)]
expected = [jvp(t) for t in tangents]
expected = [torch.stack(shards) for shards in zip(*expected)]
try:
result = _vmap(jvp)(torch.stack(tangents))
except RuntimeError as ex:
# Rethrow to provide a better error message
raise GradcheckError(
f'While computing batched gradients, got: {ex}\n\n{FAILED_BATCHED_GRAD_MSG_FWD_AD}')
for input_idx, (res, exp) in enumerate(zip(result, expected)):
if torch.allclose(res, exp):
continue
raise GradcheckError(_get_failed_batched_grad_test_msg(input_idx, input_idx, res, exp, is_forward_ad=True))
return True
def _test_batched_grad(input, output, output_idx) -> bool:
# NB: _test_batched_grad compares two autograd.grad invocations with a single
# vmap(autograd.grad) invocation. It's not exactly a "gradcheck" in the
# sense that we're not comparing an analytical jacobian with a numeric one,
# but it is morally similar (we could have computed a full analytic jac
# via vmap, but that is potentially slow)
diff_input_list = list(_iter_tensors(input, True))
grad = functools.partial(torch.autograd.grad, output, diff_input_list, retain_graph=True, allow_unused=True)
def vjp(v):
results = grad(v)
results = tuple(grad if grad is not None else
torch.zeros([], dtype=inp.dtype, device=inp.device).expand(inp.shape)
for grad, inp in zip(results, diff_input_list))
return results
grad_outputs = [torch.randn_like(output) for _ in range(2)]
expected = [vjp(gO) for gO in grad_outputs]
expected = [torch.stack(shards) for shards in zip(*expected)]
# Squash warnings since these are expected to happen in most cases
# NB: this doesn't work for CUDA tests: https://github.com/pytorch/pytorch/issues/50209
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="There is a performance drop")
warnings.filterwarnings("ignore", message="Please use functorch.vmap")
try:
result = vmap(vjp)(torch.stack(grad_outputs))
except RuntimeError as ex:
# It's OK that we're not raising the error at the correct callsite.
# That's because the callsite is always going to inside the Python
# autograd.grad instead of the C++ traceback of what line in the
# backward formula
raise GradcheckError(
f'While computing batched gradients, got: {ex}\n\n{FAILED_BATCHED_GRAD_MSG}')
for input_idx, (res, exp) in enumerate(zip(result, expected)):
if torch.allclose(res, exp):
continue
raise GradcheckError(_get_failed_batched_grad_test_msg(output_idx, input_idx, res, exp))
return True
def _test_backward_mul_by_grad_output(outputs, inputs, check_sparse_nnz) -> bool:
# Tests that backward is multiplied by grad_output
diff_input_list: List[torch.Tensor] = list(_iter_tensors(inputs, True))
if not diff_input_list:
raise GradcheckError("no Tensors requiring grad found in input")
grads_input = torch.autograd.grad(outputs, diff_input_list,
[torch.zeros_like(o, memory_format=torch.legacy_contiguous_format) for o in outputs],
allow_unused=True)
for gi, di in zip(grads_input, diff_input_list):
if gi is None:
continue
if isinstance(gi, torch.Tensor) and gi.layout != torch.strided:
if gi.layout != di.layout:
raise GradcheckError('grad is incorrect layout (' + str(gi.layout) + ' is not ' + str(di.layout) + ')')
if gi.layout == torch.sparse_coo:
if gi.sparse_dim() != di.sparse_dim():
raise GradcheckError('grad is sparse tensor, but has incorrect sparse_dim')
if gi.dense_dim() != di.dense_dim():
raise GradcheckError('grad is sparse tensor, but has incorrect dense_dim')
gi = gi.to_dense()
di = di.to_dense()
if check_sparse_nnz:
if not torch.allclose(gi, torch.zeros_like(gi)):
raise GradcheckError('backward not multiplied by grad_output')
elif not gi.eq(0).all():
raise GradcheckError('backward not multiplied by grad_output')
if gi.dtype != di.dtype or gi.device != di.device or gi.is_sparse != di.is_sparse:
raise GradcheckError("grad is incorrect type")
if gi.size() != di.size():
raise GradcheckError('grad is incorrect size')
return True
def _test_undefined_forward_mode(func, outputs, inputs):
fwAD = torch.autograd.forward_ad
inp_tensors_idx, inp_tensors = _get_inp_tensors(inputs)
all_v, all_u, all_u_dense = _make_vectors(inp_tensors, outputs, use_forward_ad=True)
tensor_inputs = tuple(i for i in inputs if is_tensor_like(i) and i.requires_grad)
with fwAD.dual_level():
fw_grads = []
dual_inputs = []
tensor_indices = set()
for i, inp in enumerate(inputs):
if is_tensor_like(inp) and inp.requires_grad:
if inp.layout == torch._mkldnn: # type: ignore[attr-defined]
raise ValueError("MKLDNN inputs are not support for forward AD gradcheck.")
inp = fwAD.make_dual(inp.detach(), torch.zeros_like(inp))
# If inp is a differentiable view, the dual might not be the tangent given to
# make_dual, so read it explicitly from the dual tensor
fw_grads.append(fwAD.unpack_dual(inp)[1])
tensor_indices.add(i)
dual_inputs.append(inp)
for i, (fw_grad, u) in enumerate(zip(fw_grads, all_u)):
fw_grad.copy_(u.view_as(fw_grad))
for idx, inp in enumerate(inputs):
if idx not in tensor_indices:
continue
dual_inp_obj = dual_inputs[idx]
# case 1 (Materialized Zero Tensor Tangent)
dual_inputs[idx] = fwAD.make_dual(inp.detach(), torch.zeros_like(inp))
raw_outputs = _as_tuple(func(*dual_inputs))
dual_outputs1 = filter(_is_float_or_complex_tensor, raw_outputs)
# case 2 (Efficient Zero Tensor Tangent since we don't make a dual object and pass a regular tensor)
dual_inputs[idx] = inp.detach()
raw_outputs = _as_tuple(func(*dual_inputs))
dual_outputs2 = filter(_is_float_or_complex_tensor, raw_outputs)
# reset
dual_inputs[idx] = dual_inp_obj
for index_o, (d_o1, d_o2) in enumerate(zip(dual_outputs1, dual_outputs2)):
val1, res1 = fwAD.unpack_dual(d_o1)
val2, res2 = fwAD.unpack_dual(d_o2)
if not (res1 is None or res2 is None):
if not torch.allclose(res1, res2):
raise GradcheckError("Mismatch in tangent values for output with index: ", index_o,
" when input: ", inp, " has an undefined tangent value. ",
" Got: ", res1, " but expected: ", res2)
return True
def _test_undefined_backward_mode(func, outputs, inputs) -> bool:
diff_input_list: List[torch.Tensor] = list(_iter_tensors(inputs, True))
if not diff_input_list:
raise GradcheckError("no Tensors requiring grad found in input")
def warn_bc_breaking():
warnings.warn((
'Backwards compatibility: New undefined gradient support checking '
'feature is enabled by default, but it may break existing callers '
'of this function. If this is true for you, you can call this '
'function with "check_undefined_grad=False" to disable the feature'))
def check_undefined_grad_support(output_to_check):
grads_output = [torch.zeros_like(o, memory_format=torch.legacy_contiguous_format) for o in output_to_check]
try:
grads_input = torch.autograd.grad(output_to_check, diff_input_list,
grads_output, allow_unused=True)
except RuntimeError:
warn_bc_breaking()
raise GradcheckError((
'Expected backward function to handle undefined output grads. '
'Please look at "Notes about undefined output gradients" in '
'"tools/autograd/derivatives.yaml"'))
for gi, i in zip(grads_input, diff_input_list):
if (gi is not None) and (not gi.eq(0).all()):
warn_bc_breaking()
raise GradcheckError((
'Expected all input grads to be undefined or zero when all output grads are undefined '
'or zero. Please look at "Notes about undefined output gradients" in '
'"tools/autograd/derivatives.yaml"'))
return True
# All backward functions must work properly if all output grads are undefined
outputs_to_check = [[
torch._C._functions.UndefinedGrad()(o) for o in _differentiable_outputs(func(*inputs))
# This check filters out Tensor-likes that aren't instances of Tensor.
if isinstance(o, torch.Tensor)
]]
# If there are multiple output grads, we should be able to undef one at a time without error
if len(outputs_to_check[0]) > 1:
for undef_grad_idx in range(len(outputs)):
output_to_check = _differentiable_outputs(func(*inputs))
outputs_to_check.append([
torch._C._functions.UndefinedGrad()(o) if idx == undef_grad_idx else o
for idx, o in enumerate(output_to_check)])
return all(check_undefined_grad_support(output) for output in outputs_to_check)
def _as_tuple(x):
if isinstance(x, tuple):
return x
elif isinstance(x, list):
return tuple(x)
else:
return x,
def _differentiable_outputs(x):
return tuple(o for o in _as_tuple(x) if o.requires_grad)
def _get_notallclose_msg(analytical, numerical, output_idx, input_idx, complex_indices,
test_imag=False, is_forward_ad=False) -> str:
out_is_complex = (not is_forward_ad) and complex_indices and output_idx in complex_indices
inp_is_complex = is_forward_ad and complex_indices and input_idx in complex_indices
part = "imaginary" if test_imag else "real"
element = "inputs" if is_forward_ad else "outputs"
prefix = "" if not (out_is_complex or inp_is_complex) else \
f"While considering the {part} part of complex {element} only, "
mode = "computed with forward mode " if is_forward_ad else ""
return prefix + 'Jacobian %smismatch for output %d with respect to input %d,\n' \
'numerical:%s\nanalytical:%s\n' % (mode, output_idx, input_idx, numerical, analytical)
def _transpose(matrix_of_tensors):
# returns list of tuples
return list(zip(*matrix_of_tensors))
def _real_and_imag_output(fn):
# returns new functions real(fn), and imag(fn) where real(fn) and imag(fn) behave the same as
# the original fn, except torch.real or torch.imag are applied to the complex outputs
def apply_to_c_outs(fn, fn_to_apply):
def wrapped_fn(*inputs):
outs = _as_tuple(fn(*inputs))
return tuple(fn_to_apply(o) if o.is_complex() else o for o in outs)
return wrapped_fn
return apply_to_c_outs(fn, torch.real), apply_to_c_outs(fn, torch.imag)
def _real_and_imag_input(fn, complex_inp_indices, tupled_inputs):
# returns new functions that take real inputs instead of complex inputs as
# (x, y) -> fn(x + y * 1j). And it computes: inp -> fn(inp + y * 1j) and inp -> fn(x + inp * 1j).
# In each case, the other part is considered constant.
# We do not use 0 for the constant here to make sure we always call the user function with a valid input.
def apply_to_c_inps(fn, fn_to_apply):
def wrapped_fn(*inputs):
new_inputs = list(inputs)
for should_be_complex in complex_inp_indices:
new_inputs[should_be_complex] = fn_to_apply(new_inputs[should_be_complex],
tupled_inputs[should_be_complex])
return _as_tuple(fn(*new_inputs))
return wrapped_fn
real_fn = apply_to_c_inps(fn, lambda inp, orig: inp + orig.imag * 1j)
imag_fn = apply_to_c_inps(fn, lambda inp, orig: orig.real + inp * 1j)
return real_fn, imag_fn
def _gradcheck_real_imag(gradcheck_fn, func, func_out, tupled_inputs, outputs, eps, rtol,
atol, check_grad_dtypes, check_forward_ad, check_backward_ad, nondet_tol,
check_undefined_grad):
complex_out_indices = [i for i, o in enumerate(outputs) if o.is_complex()]
has_any_complex_output = any(o.is_complex() for o in _as_tuple(func_out))
if check_backward_ad:
if has_any_complex_output:
real_fn, imag_fn = _real_and_imag_output(func)
imag_func_out = imag_fn(*tupled_inputs)
imag_outputs = _differentiable_outputs(imag_func_out)
gradcheck_fn(imag_fn, imag_func_out, tupled_inputs, imag_outputs, eps,
rtol, atol, check_grad_dtypes, nondet_tol,
complex_indices=complex_out_indices, test_imag=True)
real_func_out = real_fn(*tupled_inputs)
real_outputs = _differentiable_outputs(real_func_out)
gradcheck_fn(real_fn, real_func_out, tupled_inputs, real_outputs, eps,
rtol, atol, check_grad_dtypes, nondet_tol, complex_indices=complex_out_indices)
else:
gradcheck_fn(func, func_out, tupled_inputs, outputs, eps,
rtol, atol, check_grad_dtypes, nondet_tol)
if check_forward_ad:
complex_inp_indices = [i for i, inp in enumerate(tupled_inputs) if is_tensor_like(inp) and inp.is_complex()]
if complex_inp_indices:
real_fn, imag_fn = _real_and_imag_input(func, complex_inp_indices, tupled_inputs)
imag_inputs = [inp.imag if is_tensor_like(inp) and inp.is_complex() else inp for inp in tupled_inputs]
imag_func_out = imag_fn(*imag_inputs)
diff_imag_func_out = _differentiable_outputs(imag_func_out)
gradcheck_fn(imag_fn, imag_func_out, imag_inputs, diff_imag_func_out, eps,
rtol, atol, check_grad_dtypes, nondet_tol,
complex_indices=complex_inp_indices, test_imag=True, use_forward_ad=True)
real_inputs = [inp.real if is_tensor_like(inp) and inp.is_complex() else inp for inp in tupled_inputs]
real_func_out = real_fn(*real_inputs)
diff_real_func_out = _differentiable_outputs(real_func_out)
gradcheck_fn(real_fn, real_func_out, real_inputs, diff_real_func_out, eps,
rtol, atol, check_grad_dtypes, nondet_tol, complex_indices=complex_inp_indices,
use_forward_ad=True)
if check_undefined_grad:
_test_undefined_forward_mode(imag_fn, imag_func_out, imag_inputs)
_test_undefined_forward_mode(real_fn, real_func_out, real_inputs)
else:
gradcheck_fn(func, func_out, tupled_inputs, outputs, eps,
rtol, atol, check_grad_dtypes, nondet_tol, use_forward_ad=True)
if check_undefined_grad:
_test_undefined_forward_mode(func, outputs, tupled_inputs)
def _slow_gradcheck(func, func_out, tupled_inputs, outputs, eps, rtol, atol, check_grad_dtypes,
nondet_tol, *, use_forward_ad=False, complex_indices=None, test_imag=False):
func_out = _as_tuple(func_out)
if not outputs:
return _check_no_differentiable_outputs(func, tupled_inputs, func_out, eps)
numerical = _transpose(_get_numerical_jacobian(func, tupled_inputs, func_out, eps=eps, is_forward_ad=use_forward_ad))
# Note: [numerical vs analytical output length]
# The numerical path returns jacobian quantity for all outputs, even if requires_grad of that
# output is False. This behavior is necessary for _check_no_differentiable_outputs to work.
numerical = [nj for o, nj in zip(func_out, numerical) if o.requires_grad]
if use_forward_ad:
analytical_forward = _get_analytical_jacobian_forward_ad(func, tupled_inputs, func_out, check_grad_dtypes=check_grad_dtypes)
for i, n_per_out in enumerate(numerical):
for j, n in enumerate(n_per_out):
a = analytical_forward[j][i]
if not _allclose_with_type_promotion(a, n.to(a.device), rtol, atol):
raise GradcheckError(_get_notallclose_msg(a, n, i, j, complex_indices, test_imag,
is_forward_ad=True))
else:
for i, o in enumerate(outputs):
analytical = _check_analytical_jacobian_attributes(tupled_inputs, o, nondet_tol, check_grad_dtypes)
for j, (a, n) in enumerate(zip(analytical, numerical[i])):
if not _allclose_with_type_promotion(a, n.to(a.device), rtol, atol):
raise GradcheckError(_get_notallclose_msg(a, n, i, j, complex_indices, test_imag))
return True
def _dot_with_type_promotion(u, v):
assert u.dim() == 1 and v.dim() == 1
return (u * v).sum()
def _allclose_with_type_promotion(a, b, rtol, atol):
promoted_type = torch.promote_types(a.dtype, b.dtype)
a = a.to(dtype=promoted_type)
b = b.to(dtype=promoted_type)
return torch.allclose(a, b, rtol, atol)
def _to_real_dtype(dtype):
if dtype == torch.complex128:
return torch.float64
elif dtype == torch.complex64:
return torch.float32
else:
return dtype
def _vec_from_tensor(x, generator, downcast_complex=False):
# Create a random vector with the same number of elements as x and the same
# dtype/device. If x is complex and downcast_complex is False, we create a
# complex tensor with only real component.
if x.layout == torch.sparse_coo:
# For sparse, create a random sparse vec with random values in the same
# indices. Make sure size is set so that it isn't inferred to be smaller.
x_values = x._values()
dtype = _to_real_dtype(x.dtype) if downcast_complex else x.dtype
values = torch.rand(x_values.numel(), generator=generator) \
.to(dtype=dtype, device=x.device) \
.reshape(x_values.shape)
values /= values.norm()
vec = torch.sparse_coo_tensor(x._indices(), values, x.size())
else:
dtype = _to_real_dtype(x.dtype) if downcast_complex else x.dtype
vec = torch.rand(x.numel(), generator=generator).to(dtype=dtype, device=x.device)
vec /= vec.norm()
return vec
def _get_inp_tensors(tupled_inputs):
inp_idx_tup = [(i, t) for i, t in enumerate(tupled_inputs) if is_tensor_like(t) and t.requires_grad]
return [tup[0] for tup in inp_idx_tup], [tup[1] for tup in inp_idx_tup]
def _adjusted_atol(atol, u, v):
# In slow gradcheck, we compare A and B element-wise, i.e., for some a, b we
# allow: |a - b| < atol + rtol * b. But since we now compare q1 = v^T A u and
# q2 = v^T B u, we must allow |q1 - q2| < v^T E u + rtol * v^T B u, where E is
# the correctly sized matrix in which each entry is atol.
#
# We see that atol needs to be scaled by v^T M u (where M is an all-ones M x N
# matrix): v^T M u = \sum_{i} \sum_{j} u_i * v_j = (\sum_{i} u_i)(\sum_{i} v_i)
# TODO: properly handle case when u is tuple instead of only taking first element
u = u[0] if isinstance(u, tuple) else u
sum_u = torch.sparse.sum(u) if u.layout == torch.sparse_coo else u.sum()
sum_v = 1. if v is None else torch.sparse.sum(v) if v.layout == torch.sparse_coo else v.sum()
return atol * float(sum_u) * float(sum_v)
FAST_FAIL_SLOW_OK_MSG = """
Fast gradcheck failed but element-wise differences are small. This means that the
test might've passed in slow_mode!
If you are adding a new operator, please file an issue and then use one of the
workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck:
If the test
- manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck
with `fast_mode=False` as a keyword argument.
- is OpInfo-based (e.g., in test_ops_gradients.py), then modify the OpInfo for the test
to have `gradcheck_fast_mode=False`
- is a Module test (e.g., in common_nn.py), then modify the corresponding
module_test entry to have `gradcheck_fast_mode=False`
""".strip()
def _run_slow_mode_and_get_error(func, tupled_inputs, outputs, input_idx, output_idx, rtol, atol, is_forward_ad):
# Compute jacobians in slow mode for better error message
slow_numerical = _get_numerical_jacobian(func, tupled_inputs, outputs, is_forward_ad=is_forward_ad)[input_idx][output_idx]
if is_forward_ad:
def new_fn(inp):
new_inputs = list(tupled_inputs)
new_inputs[input_idx] = inp
return _as_tuple(func(*new_inputs))[output_idx]
slow_analytical = _get_analytical_jacobian_forward_ad(new_fn, (tupled_inputs[input_idx],), (outputs[output_idx],))[0][0]
else:
slow_analytical = _get_analytical_jacobian(tupled_inputs, outputs, input_idx, output_idx)
# Assume jacobians are non-empty and have the same shape
slow_max_diff = (slow_numerical - slow_analytical).abs().max()
slow_allclose = torch.allclose(slow_analytical, slow_numerical, rtol, atol)
msg = ("\nThe above quantities relating the numerical and analytical jacobians are computed \n"
"in fast mode. See: https://github.com/pytorch/pytorch/issues/53876 for more background \n"
"about fast mode. Below, we recompute numerical and analytical jacobians in slow mode:\n\n"
f"Numerical:\n {slow_numerical}\n"
f"Analytical:\n{slow_analytical}\n\n"
f"The max per-element difference (slow mode) is: {slow_max_diff}.\n")
if slow_allclose:
# Slow gradcheck would've passed!
msg += FAST_FAIL_SLOW_OK_MSG
return msg
def _to_flat_dense_if_sparse(tensor):
if tensor.layout == torch.sparse_coo:
return tensor.to_dense().reshape(-1)
else:
return tensor
def _make_vectors(inp_tensors, outputs, *, use_forward_ad):
# Use our own generator to avoid messing with the user's RNG state
g_cpu = torch.Generator()
all_u = []
all_u_dense = []
for inp in inp_tensors:
ur = _vec_from_tensor(inp, g_cpu, True)
ur_dense = _to_flat_dense_if_sparse(ur)
if inp.is_complex():
ui = _vec_from_tensor(inp, g_cpu, True)
all_u.append((ur, ui))
ui_dense = _to_flat_dense_if_sparse(ui)
all_u_dense.append((ur_dense, ui_dense))
else:
all_u.append(ur)
all_u_dense.append(ur_dense)
all_v = None if use_forward_ad else [_vec_from_tensor(out, g_cpu) for out in outputs]
return all_v, all_u, all_u_dense
def _check_analytical_numerical_equal(all_analytical, all_numerical, complex_indices, tupled_inputs, outputs,
func, all_v, all_u, rtol, atol, test_imag, *, is_forward_ad=False):
for i, all_numerical_for_input_i in enumerate(all_numerical):
for j, n in enumerate(all_numerical_for_input_i):
# Forward AD generates the transpose of what this function expects
if is_forward_ad:
a = all_analytical[i][j]
else:
a = all_analytical[j][i]
n = n.to(device=a.device)
updated_atol = _adjusted_atol(atol, all_u[i], all_v[j] if all_v else None)
if not _allclose_with_type_promotion(a, n.to(a.device), rtol, updated_atol):
jacobians_str = _run_slow_mode_and_get_error(func, tupled_inputs, outputs, i, j, rtol, atol, is_forward_ad)
raise GradcheckError(_get_notallclose_msg(a, n, j, i, complex_indices, test_imag, is_forward_ad) + jacobians_str)
def _fast_gradcheck(func, func_out, inputs, outputs, eps, rtol,
atol, check_grad_dtypes, nondet_tol, *, use_forward_ad=False, complex_indices=None, test_imag=False):
# See https://github.com/pytorch/pytorch/issues/53876 for details
inp_tensors_idx, inp_tensors = _get_inp_tensors(inputs)
# Backward mode computes v^T * J (VJP)
# Since we computed J * u (JVP) through finite difference method, we perform an equality check
# between VJP * u, v * JVP
# ----
# Forward mode computes J * u (JVP)
# Since we already compute JVP through finite difference method,
# we don't need v for correctness check here as asserted below
all_v, all_u, all_u_dense = _make_vectors(inp_tensors, outputs, use_forward_ad=use_forward_ad)
numerical_vJu = _get_numerical_vJu(func, inputs, inp_tensors_idx, func_out, all_u, all_v, eps, is_forward_ad=use_forward_ad)
# TODO: replicate https://github.com/pytorch/pytorch/pull/77743 for fast gradcheck as well
if use_forward_ad:
assert all_v is None
analytical_vJu = _get_analytical_jacobian_forward_ad(func, inputs, _as_tuple(func_out),
all_u=all_u, check_grad_dtypes=check_grad_dtypes)
else:
if not outputs:
_check_no_differentiable_outputs_fast(func, func_out, inputs, inp_tensors_idx, all_u, eps, nondet_tol)
analytical_vJu = _get_analytical_vJu_backward_mode(inputs, outputs, nondet_tol, check_grad_dtypes, all_v, all_u_dense)
_check_analytical_numerical_equal(analytical_vJu, numerical_vJu, complex_indices,
inputs, outputs, func, all_v, all_u, rtol, atol, test_imag, is_forward_ad=use_forward_ad)
return True
# Note [VarArg of Tensors]
# ~~~~~~~~~~~~~~~~~~~~~~~~
# 'func' accepts a vararg of tensors, which isn't expressable in the type system at the moment.
# If https://mypy.readthedocs.io/en/latest/additional_features.html?highlight=callable#extended-callable-types is accepted,
# the '...' first argument of Callable can be replaced with VarArg(Tensor).
# For now, we permit any input.
# the '...' first argument of Callable can be replaced with VarArg(Tensor).
# For now, we permit any input.
def gradcheck(
func: Callable[..., Union[_TensorOrTensors]], # See Note [VarArg of Tensors]
inputs: _TensorOrTensors,
*,
eps: float = 1e-6,
atol: float = 1e-5,
rtol: float = 1e-3,
raise_exception: bool = True,
check_sparse_nnz: bool = False,
nondet_tol: float = 0.0,
check_undefined_grad: bool = True,
check_grad_dtypes: bool = False,
check_batched_grad: bool = False,
check_batched_forward_grad: bool = False,
check_forward_ad: bool = False,
check_backward_ad: bool = True,
fast_mode: bool = False,
) -> bool:
r"""Check gradients computed via small finite differences against analytical
gradients w.r.t. tensors in :attr:`inputs` that are of floating point or complex type
and with ``requires_grad=True``.
The check between numerical and analytical gradients uses :func:`~torch.allclose`.
For most of the complex functions we consider for optimization purposes, no notion of
Jacobian exists. Instead, gradcheck verifies if the numerical and analytical values of
the Wirtinger and Conjugate Wirtinger derivatives are consistent. Because the gradient
computation is done under the assumption that the overall function has a real-valued
output, we treat functions with complex output in a special way. For these functions,
gradcheck is applied to two real-valued functions corresponding to taking the real
components of the complex outputs for the first, and taking the imaginary components
of the complex outputs for the second. For more details, check out
:ref:`complex_autograd-doc`.
.. note::
The default values are designed for :attr:`input` of double precision.
This check will likely fail if :attr:`input` is of less precision, e.g.,
``FloatTensor``.
.. warning::
If any checked tensor in :attr:`input` has overlapping memory, i.e.,
different indices pointing to the same memory address (e.g., from
:func:`torch.expand`), this check will likely fail because the numerical
gradients computed by point perturbation at such indices will change
values at all other indices that share the same memory address.
Args:
func (function): a Python function that takes Tensor inputs and returns
a Tensor or a tuple of Tensors
inputs (tuple of Tensor or Tensor): inputs to the function
eps (float, optional): perturbation for finite differences
atol (float, optional): absolute tolerance
rtol (float, optional): relative tolerance
raise_exception (bool, optional): indicating whether to raise an exception if
the check fails. The exception gives more information about the
exact nature of the failure. This is helpful when debugging gradchecks.
check_sparse_nnz (bool, optional): if True, gradcheck allows for SparseTensor input,
and for any SparseTensor at input, gradcheck will perform check at nnz positions only.
nondet_tol (float, optional): tolerance for non-determinism. When running
identical inputs through the differentiation, the results must either match
exactly (default, 0.0) or be within this tolerance.
check_undefined_grad (bool, optional): if True, check if undefined output grads
are supported and treated as zeros, for ``Tensor`` outputs.
check_batched_grad (bool, optional): if True, check if we can compute
batched gradients using prototype vmap support. Defaults to False.
check_batched_forward_grad (bool, optional): if True, checks if we can compute
batched forward gradients using forward ad and prototype vmap support. Defaults to False.
check_forward_ad (bool, optional): if True, check that the gradients computed with forward
mode AD match the numerical ones. Defaults to False.
check_backward_ad (bool, optional): if False, do not perform any checks that rely on
backward mode AD to be implemented. Defaults to True.
fast_mode (bool, optional): Fast mode for gradcheck and gradgradcheck is currently only
implemented for R to R functions. If none of the inputs and outputs are complex
a faster implementation of gradcheck that no longer computes the entire jacobian
is run; otherwise, we fall back to the slow implementation.
Returns:
True if all differences satisfy allclose condition
"""
assert check_forward_ad or check_backward_ad, \
"Expected at least one of check_forward_ad or check_backward_ad to be True"
assert not (check_batched_grad and not check_backward_ad), (
"Setting check_batched_grad=True requires check_backward_ad to be True")
assert not (check_batched_forward_grad and not check_forward_ad), (
"Setting check_batched_forward_grad=True requires check_forward_ad to be True")
args = locals().copy()
args.pop("raise_exception")
if not raise_exception:
try:
return _gradcheck_helper(**args)
except GradcheckError as e:
return False
else:
return _gradcheck_helper(**args)
def _gradcheck_helper(func, inputs, eps, atol, rtol, check_sparse_nnz, nondet_tol, check_undefined_grad,
check_grad_dtypes, check_batched_grad, check_batched_forward_grad, check_forward_ad,
check_backward_ad, fast_mode):
tupled_inputs = _as_tuple(inputs)
_check_inputs(tupled_inputs, check_sparse_nnz)
func_out = func(*tupled_inputs)
outputs = _differentiable_outputs(func_out)
_check_outputs(outputs)
gradcheck_fn = _fast_gradcheck if fast_mode else _slow_gradcheck
_gradcheck_real_imag(gradcheck_fn, func, func_out, tupled_inputs, outputs, eps,
rtol, atol, check_grad_dtypes, check_forward_ad=check_forward_ad,
check_backward_ad=check_backward_ad, nondet_tol=nondet_tol,
check_undefined_grad=check_undefined_grad)
if check_batched_forward_grad:
_test_batched_grad_forward_ad(func, tupled_inputs)
# Short circuit because remaining tests rely on backward AD to be implemented
if not check_backward_ad:
return True
for i, o in enumerate(outputs):
if check_batched_grad:
_test_batched_grad(tupled_inputs, o, i)
_test_backward_mul_by_grad_output(outputs, tupled_inputs, check_sparse_nnz)
if check_undefined_grad and check_backward_ad:
_test_undefined_backward_mode(func, outputs, tupled_inputs)
return True
def gradgradcheck(
func: Callable[..., _TensorOrTensors], # See Note [VarArg of Tensors]
inputs: _TensorOrTensors,
grad_outputs: Optional[_TensorOrTensors] = None,
*,
eps: float = 1e-6,
atol: float = 1e-5,
rtol: float = 1e-3,
gen_non_contig_grad_outputs: bool = False,
raise_exception: bool = True,
nondet_tol: float = 0.0,
check_undefined_grad: bool = True,
check_grad_dtypes: bool = False,
check_batched_grad: bool = False,
check_fwd_over_rev: bool = False,
check_rev_over_rev: bool = True,
fast_mode: bool = False,
) -> bool:
r"""Check gradients of gradients computed via small finite differences
against analytical gradients w.r.t. tensors in :attr:`inputs` and
:attr:`grad_outputs` that are of floating point or complex type and with
``requires_grad=True``.
This function checks that backpropagating through the gradients computed
to the given :attr:`grad_outputs` are correct.
The check between numerical and analytical gradients uses :func:`~torch.allclose`.
.. note::
The default values are designed for :attr:`input` and
:attr:`grad_outputs` of double precision. This check will likely fail if
they are of less precision, e.g., ``FloatTensor``.
.. warning::
If any checked tensor in :attr:`input` and :attr:`grad_outputs` has
overlapping memory, i.e., different indices pointing to the same memory
address (e.g., from :func:`torch.expand`), this check will likely fail
because the numerical gradients computed by point perturbation at such
indices will change values at all other indices that share the same
memory address.
Args:
func (function): a Python function that takes Tensor inputs and returns
a Tensor or a tuple of Tensors
inputs (tuple of Tensor or Tensor): inputs to the function
grad_outputs (tuple of Tensor or Tensor, optional): The gradients with
respect to the function's outputs.
eps (float, optional): perturbation for finite differences
atol (float, optional): absolute tolerance
rtol (float, optional): relative tolerance
gen_non_contig_grad_outputs (bool, optional): if :attr:`grad_outputs` is
``None`` and :attr:`gen_non_contig_grad_outputs` is ``True``, the
randomly generated gradient outputs are made to be noncontiguous
raise_exception (bool, optional): indicating whether to raise an exception if
the check fails. The exception gives more information about the
exact nature of the failure. This is helpful when debugging gradchecks.
nondet_tol (float, optional): tolerance for non-determinism. When running
identical inputs through the differentiation, the results must either match
exactly (default, 0.0) or be within this tolerance. Note that a small amount
of nondeterminism in the gradient will lead to larger inaccuracies in
the second derivative.
check_undefined_grad (bool, optional): if True, check if undefined output grads
are supported and treated as zeros
check_batched_grad (bool, optional): if True, check if we can compute
batched gradients using prototype vmap support. Defaults to False.
fast_mode (bool, optional): if True, run a faster implementation of gradgradcheck that
no longer computes the entire jacobian.
Returns:
True if all differences satisfy allclose condition
"""
assert check_fwd_over_rev or check_rev_over_rev, \
"Expected at least one of check_fwd_over_rev or check_rev_over_rev to be True"
assert not (check_undefined_grad and not check_rev_over_rev), \
"Setting check_undefined_grad=True requires check_rev_over_rev to be True"
assert not (check_batched_grad and not check_rev_over_rev), (
"Setting check_batched_grad=True requires check_rev_over_rev to be True")
# TODO: do we want to test this too?
# assert not (check_batched_forward_grad and not check_fwd_over_rev), (
# "Setting check_batched_forward_grad=True requires check_fwd_over_rev to be True")
tupled_inputs = _as_tuple(inputs)
if grad_outputs is None:
# If grad_outputs is not specified, create random Tensors of the same shape, type, and device as the outputs
outputs = _differentiable_outputs(func(*tupled_inputs))
tupled_grad_outputs = tuple(
torch.testing.make_tensor(
x.shape,
dtype=x.dtype if x.is_floating_point() or x.is_complex() else torch.double,
device=x.device,
low=-1,
high=1,
requires_grad=True,
noncontiguous=gen_non_contig_grad_outputs,
)
for x in outputs
)
else:
tupled_grad_outputs = _as_tuple(grad_outputs)
num_outputs = len(tupled_grad_outputs)
# NB: We need to save the requires_grad information about the inputs here because gradcheck detaches inputs
# before running forward mode AD
diff_input_args_indices = set(i for i, x in enumerate(tupled_inputs) if is_tensor_like(x) and x.requires_grad)
diff_grad_output_indices = set(i for i, x in enumerate(tupled_grad_outputs) if x.requires_grad)
def new_func(*args):
# Restore the requires_grad information
input_args = tuple(x.requires_grad_() if i in diff_input_args_indices else x for i, x in enumerate(args[:-num_outputs]))
outputs = _differentiable_outputs(func(*input_args))
grad_outputs = tuple(x.requires_grad_() if i in diff_grad_output_indices else x for i, x in enumerate(args[-num_outputs:]))
diff_input_args = tuple(x for i, x in enumerate(input_args) if i in diff_input_args_indices)
grad_inputs = torch.autograd.grad(outputs, diff_input_args, grad_outputs, create_graph=True,
allow_unused=True)
grad_inputs = tuple(g for g in grad_inputs if g is not None)
return grad_inputs
return gradcheck(
new_func, tupled_inputs + tupled_grad_outputs, eps=eps, atol=atol, rtol=rtol, raise_exception=raise_exception,
nondet_tol=nondet_tol, check_undefined_grad=check_undefined_grad,
check_grad_dtypes=check_grad_dtypes, check_batched_grad=check_batched_grad, fast_mode=fast_mode,
check_forward_ad=check_fwd_over_rev, check_backward_ad=check_rev_over_rev)
| pytorch-master | torch/autograd/gradcheck.py |
from typing import Any, Dict, List, Optional
from warnings import warn
import torch
import torch.cuda
from torch._C._autograd import _ExperimentalConfig
from torch.autograd import (
_disable_profiler,
_enable_profiler,
_kineto_step,
_prepare_profiler,
_ProfilerResult,
_supported_activities,
DeviceType,
kineto_available,
ProfilerActivity,
ProfilerConfig,
ProfilerState,
)
from torch.autograd.profiler_util import (
_filter_name,
_filter_stack_entry,
_rewrite_name,
EventList,
FunctionEvent,
MEMORY_EVENT_NAME,
MemRecordsAcc,
OUT_OF_MEMORY_EVENT_NAME,
)
from torch.futures import Future
try:
# Available in Python >= 3.2
from contextlib import ContextDecorator
except ImportError:
import functools
class ContextDecorator(object): # type: ignore[no-redef]
def __enter__(self):
raise NotImplementedError
def __exit__(self, exc_type, exc_val, exc_tb):
raise NotImplementedError
def __call__(self, func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapped
class profile(object):
"""Context manager that manages autograd profiler state and holds a summary of results.
Under the hood it just records events of functions being executed in C++ and
exposes those events to Python. You can wrap any code into it and it will
only report runtime of PyTorch functions.
Note: profiler is thread local and is automatically propagated into the async tasks
Args:
enabled (bool, optional): Setting this to False makes this context manager a no-op.
use_cuda (bool, optional): Enables timing of CUDA events as well using the cudaEvent API.
Adds approximately 4us of overhead to each tensor operation.
record_shapes (bool, optional): If shapes recording is set, information
about input dimensions will be collected. This allows one to see which
dimensions have been used under the hood and further group by them
using prof.key_averages(group_by_input_shape=True). Please note that
shape recording might skew your profiling data. It is recommended to
use separate runs with and without shape recording to validate the timing.
Most likely the skew will be negligible for bottom most events (in a case
of nested function calls). But for higher level functions the total
self cpu time might be artificially increased because of the shape
collection.
with_flops (bool, optional): If with_flops is set, the profiler will estimate
the FLOPs (floating point operations) value using the operator's input shape.
This allows one to estimate the hardware performance. Currently,
this option only works for the matrix multiplication and 2D convolution operators.
profile_memory (bool, optional): track tensor memory allocation/deallocation.
with_stack (bool, optional): record source information (file and line number) for the ops.
with_modules (bool): record module hierarchy (including function names)
corresponding to the callstack of the op. e.g. If module A's forward call's
module B's forward which contains an aten::add op,
then aten::add's module hierarchy is A.B
Note that this support exist, at the moment, only for TorchScript models
and not eager mode models.
use_kineto (bool, optional): experimental, enable profiling with Kineto profiler.
use_cpu (bool, optional): profile CPU events; setting to ``False`` requires
``use_kineto=True`` and can be used to lower the overhead for GPU-only profiling.
experimental_config (_ExperimentalConfig) : A set of experimental options
used by profiler libraries like Kineto. Note, backward compatibility is not guaranteed.
.. warning:
Enabling memory profiling or source attribution incurs additional profiler
overhead
.. warning:
This context managers should not be called recursively, i.e. no nested
instances are allowed
.. warning:
Due to some CUDA multiprocessing limitations (multiprocessing-cuda-note_),
one cannot use the profiler with ``use_cuda = True`` to benchmark
DataLoaders with ``num_workers > 0``. If you wish to benchmark data loading,
please use ``use_cuda = False`` or ``num_workers = 0``.
Example:
>>> # xdoctest: +SKIP
>>> x = torch.randn((1, 1), requires_grad=True)
>>> with torch.autograd.profiler.profile() as prof:
>>> for _ in range(100): # any normal python code, really!
>>> y = x ** 2
>>> y.backward()
>>> # NOTE: some columns were removed for brevity
>>> print(prof.key_averages().table(sort_by="self_cpu_time_total"))
----------------------------------- --------------- --------------- ---------------
Name Self CPU total CPU time avg Number of Calls
----------------------------------- --------------- --------------- ---------------
mul 32.048ms 32.048ms 200
pow 27.041ms 27.041ms 200
PowBackward0 9.727ms 55.483ms 100
torch::autograd::AccumulateGrad 9.148ms 9.148ms 100
torch::autograd::GraphRoot 691.816us 691.816us 100
----------------------------------- --------------- --------------- ---------------
"""
def __init__(
self,
enabled=True,
*,
use_cuda=False,
record_shapes=False,
with_flops=False,
profile_memory=False,
with_stack=False,
with_modules=False,
use_kineto=False,
use_cpu=True,
experimental_config=None):
self.enabled: bool = enabled
if not self.enabled:
return
self.use_cuda = use_cuda
self.function_events: Optional[EventList] = None
self.entered = False
self.record_shapes = record_shapes
self.with_flops = with_flops
self.record_shapes |= self.with_flops
self.profile_memory = profile_memory
self.with_stack = with_stack
self.with_modules = with_modules
self.use_cpu = use_cpu
if experimental_config is None:
experimental_config = _ExperimentalConfig()
self.experimental_config = experimental_config
self.kineto_results: Optional[_ProfilerResult] = None
if not self.use_cpu:
assert use_kineto, \
"Device-only events supported only with Kineto (use_kineto=True)"
if self.use_cuda and not torch.cuda.is_available():
warn("CUDA is not available, disabling CUDA profiling")
self.use_cuda = False
self.kineto_activities = set()
if self.use_cpu:
self.kineto_activities.add(ProfilerActivity.CPU)
self.profiler_kind = ProfilerState.KINETO
if self.use_cuda:
if (not use_kineto or ProfilerActivity.CUDA not in
_supported_activities()):
assert self.use_cpu, "Legacy CUDA profiling requires use_cpu=True"
self.profiler_kind = ProfilerState.KINETO_GPU_FALLBACK
else:
self.kineto_activities.add(ProfilerActivity.CUDA)
assert len(self.kineto_activities) > 0, \
"No activities specified for the profiler"
def config(self):
return ProfilerConfig(
self.profiler_kind,
self.record_shapes,
self.profile_memory,
self.with_stack,
self.with_flops,
self.with_modules,
self.experimental_config)
def __enter__(self):
if not self.enabled:
return
if self.entered:
raise RuntimeError("Profiler context manager is not reentrant")
self._prepare_trace()
self._start_trace()
return self
def _prepare_trace(self):
self.entered = True
_prepare_profiler(self.config(), self.kineto_activities)
def _start_trace(self):
self.entered = True
_enable_profiler(self.config(), self.kineto_activities)
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.enabled:
return
if self.use_cuda:
torch.cuda.synchronize()
self.kineto_results = _disable_profiler()
parsed_results = self._parse_kineto_results(self.kineto_results)
self.function_events = EventList(
parsed_results,
use_cuda=self.use_cuda,
profile_memory=self.profile_memory,
with_flops=self.with_flops)
self.function_events._build_tree()
return False
def __repr__(self):
if self.function_events is None:
return '<unfinished torch.autograd.profile>'
return repr(self.function_events)
def __str__(self):
if self.function_events is None:
return '<unfinished torch.autograd.profile>'
return str(self.function_events)
def _check_finish(self):
if self.function_events is None:
raise RuntimeError("Profiler didn't finish running")
def table(self, sort_by=None, row_limit=100, max_src_column_width=75, header=None, top_level_events_only=False):
self._check_finish()
assert self.function_events is not None
return self.function_events.table(
sort_by=sort_by, row_limit=row_limit, max_src_column_width=max_src_column_width, header=header,
top_level_events_only=top_level_events_only
)
table.__doc__ = EventList.table.__doc__
def export_chrome_trace(self, path):
self._check_finish()
if kineto_available():
self.kineto_results.save(path) # type: ignore[union-attr]
else:
return self.function_events.export_chrome_trace(path) # type: ignore[union-attr]
export_chrome_trace.__doc__ = EventList.export_chrome_trace.__doc__
def export_stacks(self, path: str, metric: str = "self_cpu_time_total"):
self._check_finish()
assert self.function_events is not None, "Expected profiling results"
assert self.with_stack, "export_stacks() requires with_stack=True"
return self.function_events.export_stacks(path, metric)
def key_averages(self, group_by_input_shape=False, group_by_stack_n=0):
self._check_finish()
assert self.function_events is not None, "Expected profiling results"
return self.function_events.key_averages(group_by_input_shape, group_by_stack_n)
key_averages.__doc__ = EventList.key_averages.__doc__
def total_average(self):
self._check_finish()
assert self.function_events is not None, "Expected profiling results"
return self.function_events.total_average()
total_average.__doc__ = EventList.total_average.__doc__
@property
def self_cpu_time_total(self):
""" Returns total time spent on CPU obtained as a sum of
all self times across all the events.
"""
self._check_finish()
assert self.function_events is not None
return self.function_events.self_cpu_time_total
def _parse_kineto_results(self, result):
# result.events() has most of the events - PyTorch op-level and device-level events
trace_start_us = result.trace_start_us()
mem_records = [[evt, False] for evt in result.events() if evt.name() == MEMORY_EVENT_NAME]
oom_records = [evt for evt in result.events() if evt.name() == OUT_OF_MEMORY_EVENT_NAME]
mem_records_acc = MemRecordsAcc(mem_records)
def _cpu_memory_usage(mem_record):
return mem_record.nbytes() if \
mem_record.device_type() in [DeviceType.CPU, DeviceType.MKLDNN, DeviceType.IDEEP] \
else 0
def _cuda_memory_usage(mem_record):
return mem_record.nbytes() if \
mem_record.device_type() in [DeviceType.CUDA, DeviceType.HIP] \
else 0
# Create and return FunctionEvent list
function_events = []
cuda_corr_map: Dict[int, List[FunctionEvent]] = {}
max_evt_id = 0
for kineto_event in result.events():
if _filter_name(kineto_event.name()):
continue
rel_start_us = kineto_event.start_us() - trace_start_us
rel_end_us = rel_start_us + kineto_event.duration_us()
abs_end_us = kineto_event.start_us() + kineto_event.duration_us()
cpu_memory_usage = 0
cuda_memory_usage = 0
if kineto_event.device_type() == DeviceType.CPU:
# find the corresponding memory allocation events
for mem_record in mem_records_acc.in_interval(kineto_event.start_us(), abs_end_us):
cpu_memory_usage += _cpu_memory_usage(mem_record[0])
cuda_memory_usage += _cuda_memory_usage(mem_record[0])
mem_record[1] = True
is_async = kineto_event.is_async() or (
kineto_event.start_thread_id() != kineto_event.end_thread_id()
)
fe = FunctionEvent(
id=kineto_event.correlation_id(),
name=_rewrite_name(name=kineto_event.name(), with_wildcard=True),
trace_name=_rewrite_name(name=kineto_event.name(), with_wildcard=False),
thread=kineto_event.start_thread_id(),
start_us=rel_start_us,
end_us=rel_end_us,
fwd_thread=kineto_event.fwd_thread_id(),
input_shapes=kineto_event.shapes(),
stack=[entry for entry in kineto_event.stack() if _filter_stack_entry(entry)],
scope=kineto_event.scope(),
cpu_memory_usage=cpu_memory_usage,
cuda_memory_usage=cuda_memory_usage,
is_async=is_async,
sequence_nr=kineto_event.sequence_nr(),
device_type=kineto_event.device_type(),
device_index=kineto_event.device_index(),
flops=kineto_event.flops(),
)
max_evt_id = fe.id if fe.id > max_evt_id else max_evt_id
if fe.device_type == DeviceType.CPU and not fe.is_async:
# Check if we have CUDA time as a fallback
cuda_time = kineto_event.cuda_elapsed_us()
if cuda_time > 0:
fe.append_kernel(
fe.name,
fe.device_index,
cuda_time)
fe.is_legacy = True
function_events.append(fe)
corr_id = kineto_event.linked_correlation_id()
if corr_id > 0:
if corr_id not in cuda_corr_map:
cuda_corr_map[corr_id] = []
cuda_corr_map[corr_id].append(fe)
# associate CUDA kernels and CUDA runtime (CPU) with CPU events
for fe in function_events:
if (fe.device_type == DeviceType.CPU and not fe.is_async and
fe.id in cuda_corr_map):
for f_evt in cuda_corr_map[fe.id]:
if f_evt.device_type == DeviceType.CUDA:
fe.append_kernel(
f_evt.name,
f_evt.device_index,
f_evt.time_range.end - f_evt.time_range.start)
elif f_evt.device_type == DeviceType.CPU:
# make sure that 'thread' of a CPU Kineto (e.g. CUDA Runtime) event is associated
# with the 'thread' of the corresponding linked PyTorch event to properly track
# parents and children
f_evt.thread = fe.thread
def createFunctionEventForMemoryEvents(evt):
rel_start_us = evt.start_us() - trace_start_us
fe = FunctionEvent(
id=max_evt_id,
name=evt.name(),
trace_name=None, # not outputting in the trace
thread=evt.start_thread_id(),
start_us=rel_start_us,
end_us=rel_start_us, # no duration
fwd_thread=evt.start_thread_id(),
input_shapes=[],
stack=[],
scope=0, # RecordScope::FUNCTION
cpu_memory_usage=_cpu_memory_usage(evt),
cuda_memory_usage=_cuda_memory_usage(evt),
is_async=False,
sequence_nr=-1,
device_type=DeviceType.CPU,
device_index=0,
)
return fe
# output top-level memory events
for mem_record in mem_records:
if not mem_record[1]:
max_evt_id += 1
fe = createFunctionEventForMemoryEvents(mem_record[0])
function_events.append(fe)
for oom_record in oom_records:
max_evt_id += 1
fe = createFunctionEventForMemoryEvents(oom_record)
function_events.append(fe)
function_events.sort(key=lambda evt: [evt.time_range.start, -evt.time_range.end])
return function_events
class record_function(ContextDecorator):
"""Context manager/function decorator that adds a label to a block of
Python code (or function) when running autograd profiler. It is
useful when tracing the code profile.
Args:
name (str): Label assigned to the block of code.
node_id (int): ID of node, for distributed profiling. Unset in
non-distributed cases.
Example:
>>> x = torch.randn((1, 1), requires_grad=True)
>>> with torch.autograd.profiler.profile() as prof:
... y = x ** 2
... with torch.autograd.profiler.record_function("label-z"): # label the block
... z = y ** 3
... y.backward()
...
>>> # xdoctest: +IGNORE_WANT
>>> # NOTE: some columns were removed for brevity
>>> print(prof.key_averages().table(sort_by="self_cpu_time_total"))
----------------------------------- --------------- --------------- ---------------
Name Self CPU total % CPU time avg Number of Calls
----------------------------------- --------------- --------------- ---------------
pow 60.77% 47.470us 3
mul 21.73% 25.465us 2
PowBackward0 12.03% 121.891us 1
torch::autograd::AccumulateGrad 2.70% 6.324us 1
label-z 2.13% 12.421us 1
torch::autograd::GraphRoot 0.64% 1.503us 1
----------------------------------- --------------- --------------- ---------------
Self CPU time total: 234.344us
CUDA time total: 0.000us
"""
def __init__(self, name: str, args: Optional[str] = None):
self.name: str = name
self.args: Optional[str] = args
# Whether or not we should run record function's end callbacks when exiting.
self.run_callbacks_on_exit: bool = True
# Stores underlying RecordFunction as a tensor. TODO: move to custom
# class (https://github.com/pytorch/pytorch/issues/35026).
self.handle: torch.Tensor = torch.zeros(1)
def __enter__(self):
self.handle = torch.ops.profiler._record_function_enter(self.name, self.args)
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any):
if self.run_callbacks_on_exit:
torch.ops.profiler._record_function_exit(self.handle)
def _call_end_callbacks_on_future(self, fut: Future[Any]) -> Future[Any]:
"""
_call_end_callbacks_on_future is meant to be used for profiling async
calls that return a future. Calling this function will extend recording
beyond this scope, until the future is satisfied. It is useful for profiling
the end to end time of asynchronous calls. This function should only be called
once to attach the callback onto the future, and will throw if called multiple
times.
Args:
fut: (torch._C.Future): future for which to schedule
callback for.
Returns:
A future that completes with the value of the passed in future when
the profiling callbacks have ran.
"""
# Throw if we have already attached a callback onto the future.
if not self.run_callbacks_on_exit:
raise RuntimeError("_call_end_callbacks_on_future can only be called once.")
# We are scheduling to run this RecordFunction's end callbacks when the
# passed in future completes, so don't run end callbacks on exit.
self.run_callbacks_on_exit = False
profiled_future = torch.ops.profiler._call_end_callbacks_on_jit_fut(self.handle, fut)
return profiled_future
class emit_itt(object):
"""Context manager that makes every autograd operation emit an ITT range.
It is useful when running the program under Intel(R) VTune Profiler::
vtune <--vtune_flags> <regular command here>
The Instrumentation and Tracing Technology (ITT) API enables your application to generate and
control the collection of trace data during its execution across different Intel tools.
This context manager is to annotate Intel(R) VTune Profiling trace. With help of this context manager,
you will be able to see labled ranges in Intel(R) VTune Profiler GUI.
.. warning:
This context manager should not be called recursively, i.e. at most one
instance should be enabled at any given time.
Args:
enabled (bool, optional): Setting ``enabled=False`` makes this context manager a no-op.
Default: ``True``.
record_shapes (bool, optional): If ``record_shapes=True``, the itt range wrapping
each autograd op will append information about the sizes of Tensor arguments received
by that op, in the following format:
``[[arg0.size(0), arg0.size(1), ...], [arg1.size(0), arg1.size(1), ...], ...]``
Non-tensor arguments will be represented by ``[]``.
Arguments will be listed in the order they are received by the backend op.
Please note that this order may not match the order in which those arguments were passed
on the Python side. Also note that shape recording may increase the overhead of itt range creation.
Default: ``False``
Example:
>>> # xdoctest: +SKIP("Undefined variables")
>>> with torch.autograd.profiler.emit_itt():
... model(x)
"""
def __init__(self, enabled=True, record_shapes=False):
self.enabled = enabled
self.entered = False
self.record_shapes = record_shapes
def __enter__(self):
if not self.enabled:
return
if self.entered:
raise RuntimeError("ITT annotation context manager is not reentrant")
self.entered = True
_enable_profiler(
ProfilerConfig(
ProfilerState.ITT,
self.record_shapes,
False,
False,
False,
False,
_ExperimentalConfig()),
set()
)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.enabled:
return
_disable_profiler()
return False
class emit_nvtx(object):
"""Context manager that makes every autograd operation emit an NVTX range.
It is useful when running the program under nvprof::
nvprof --profile-from-start off -o trace_name.prof -- <regular command here>
Unfortunately, there's no way to force nvprof to flush the data it collected
to disk, so for CUDA profiling one has to use this context manager to annotate
nvprof traces and wait for the process to exit before inspecting them.
Then, either NVIDIA Visual Profiler (nvvp) can be used to visualize the timeline, or
:func:`torch.autograd.profiler.load_nvprof` can load the results for inspection
e.g. in Python REPL.
.. warning:
This context manager should not be called recursively, i.e. at most one
instance should be enabled at any given time.
Args:
enabled (bool, optional): Setting ``enabled=False`` makes this context manager a no-op.
Default: ``True``.
record_shapes (bool, optional): If ``record_shapes=True``, the nvtx range wrapping
each autograd op will append information about the sizes of Tensor arguments received
by that op, in the following format:
``[[arg0.size(0), arg0.size(1), ...], [arg1.size(0), arg1.size(1), ...], ...]``
Non-tensor arguments will be represented by ``[]``.
Arguments will be listed in the order they are received by the backend op.
Please note that this order may not match the order in which those arguments were passed
on the Python side. Also note that shape recording may increase the overhead of nvtx range creation.
Default: ``False``
Example:
>>> # xdoctest: +SKIP("undefined variables")
>>> with torch.cuda.profiler.profile():
... model(x) # Warmup CUDA memory allocator and profiler
... with torch.autograd.profiler.emit_nvtx():
... model(x)
**Forward-backward correlation**
When viewing a profile created using :class:`emit_nvtx` in the Nvidia Visual Profiler,
correlating each backward-pass op with the corresponding forward-pass op can be difficult.
To ease this task, :class:`emit_nvtx` appends sequence number information to the ranges it
generates.
During the forward pass, each function range is decorated with ``seq=<N>``. ``seq`` is a running
counter, incremented each time a new backward Function object is created and stashed for backward.
Thus, the ``seq=<N>`` annotation associated with each forward function range tells you that
if a backward Function object is created by this forward function,
the backward object will receive sequence number N.
During the backward pass, the top-level range wrapping each C++ backward Function's
``apply()`` call is decorated with ``stashed seq=<M>``. ``M`` is the sequence number that
the backward object was created with. By comparing ``stashed seq`` numbers in backward with ``seq``
numbers in forward, you can track down which forward op created each backward Function.
Any functions executed during the backward pass are also decorated with ``seq=<N>``. During
default backward (with ``create_graph=False``) this information is irrelevant, and in fact,
``N`` may simply be 0 for all such functions. Only the top-level ranges associated with
backward Function objects' ``apply()`` methods are useful, as a way to correlate these Function
objects with the earlier forward pass.
**Double-backward**
If, on the other hand, a backward pass with ``create_graph=True`` is underway (in other words,
if you are setting up for a double-backward), each function's execution during backward
is given a nonzero, useful ``seq=<N>``. Those functions may themselves create Function objects
to be executed later during double-backward, just as the original functions in the forward pass did.
The relationship between backward and double-backward is conceptually the same as the relationship
between forward and backward: The functions still emit current-sequence-number-tagged ranges,
the Function objects they create still stash those sequence numbers, and during the eventual
double-backward, the Function objects' ``apply()`` ranges are still tagged with ``stashed seq``
numbers, which can be compared to `seq` numbers from the backward pass.
.. warning:
The sequence number is thread-local, and some forward functions don't create an associated
backward Function object (instead delegating that to sub-functions further down the call chain).
For these reasons, the correspondence of stashed sequence numbers in
backward Function ``apply()`` ranges with `seq` numbers in forward-pass ranges is
not guaranteed to be 1 to 1. The sequence numbers alone may not be enough to fully
disambiguate which forward function created which
backward Function object. You may need to make a judgment based on analytic knowledge of what
the expected correspondence should be.
"""
def __init__(self, enabled=True, record_shapes=False):
self.enabled = enabled
self.entered = False
self.record_shapes = record_shapes
def __enter__(self):
if not self.enabled:
return
if self.entered:
raise RuntimeError("NVTX annotation context manager is not reentrant")
self.entered = True
torch.cuda.synchronize()
_enable_profiler(
ProfilerConfig(
ProfilerState.NVTX,
self.record_shapes,
False,
False,
False,
False,
_ExperimentalConfig()),
set()
)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.enabled:
return
torch.cuda.synchronize()
_disable_profiler()
return False
def load_nvprof(path):
"""Opens an nvprof trace file and parses autograd annotations.
Args:
path (str): path to nvprof trace
"""
return EventList(parse_nvprof_trace(path))
class EnforceUnique(object):
"""Raises an error if a key is seen more than once."""
def __init__(self):
self.seen = set()
def see(self, *key):
if key in self.seen:
raise RuntimeError('duplicate key: ' + str(key))
self.seen.add(key)
def parse_nvprof_trace(path):
import sqlite3
conn = sqlite3.connect(path)
conn.row_factory = sqlite3.Row
# Parse strings table
strings = {}
for r in conn.execute("SELECT _id_ as id, value FROM StringTable"):
strings[r["id"]] = torch._C._demangle(r["value"])
# First, find all functions and create FunctionEvents for them
marker_query = """
SELECT
start.id AS marker_id, start.name, start.timestamp AS start_time, end.timestamp AS end_time
FROM
CUPTI_ACTIVITY_KIND_MARKER AS start INNER JOIN CUPTI_ACTIVITY_KIND_MARKER AS end
ON start.id = end.id
WHERE
start.name != 0 AND end.name = 0
"""
functions = []
functions_map = {}
unique = EnforceUnique()
for row in conn.execute(marker_query):
unique.see(row['marker_id'])
evt = FunctionEvent(id=row['marker_id'],
node_id=0, # missing a node_id when calling FunctionEvent. This is just to ensure
# that pytorch doesn't crash when creating a FunctionEvent() object
name=strings[row['name']],
start_us=row['start_time'],
end_us=row['end_time'],
thread=0) # TODO: find in sqlite database
functions.append(evt)
functions_map[evt.id] = evt
# Now, correlate all kernels with FunctionEvents
kernel_query = """
SELECT
start.id AS marker_id, start.name, start.timestamp, end.timestamp,
runtime._id_ AS runtime_id, runtime.cbid, runtime.start AS runtime_start, runtime.end AS runtime_end,
kernel.start AS kernel_start, kernel.end AS kernel_end, kernel.name AS kernel_name
FROM
CUPTI_ACTIVITY_KIND_MARKER AS start
INNER JOIN CUPTI_ACTIVITY_KIND_MARKER AS end
ON start.id = end.id
INNER JOIN CUPTI_ACTIVITY_KIND_RUNTIME as runtime
ON (start.timestamp < runtime.start AND runtime.end < end.timestamp)
INNER JOIN CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL AS kernel
ON kernel.correlationId = runtime.correlationId
"""
unique = EnforceUnique()
for row in conn.execute(kernel_query):
unique.see(row['marker_id'], row['runtime_id'])
# 211 is cudaKernelLaunch for cuda >= 9.2
assert (row['cbid'] == 211)
evt = functions_map[row['marker_id']]
evt.append_kernel(row['kernel_name'],
0,
row['kernel_end'] - row['kernel_start'])
functions.sort(key=lambda evt: evt.time_range.start)
return functions
def kineto_step():
""" Notify kineto so it is aware of iteration boundaries for asynchronous
trace requests.
"""
_kineto_step()
| pytorch-master | torch/autograd/profiler.py |
import torch
import torch._C as _C
from torch._C import _functions
import torch.utils.hooks as hooks
from torch._six import with_metaclass
import functools
import warnings
from collections import OrderedDict
from typing import Any, List, Optional
# Formerly known as: _ContextMethodMixin
class FunctionCtx(object):
def save_for_backward(self, *tensors: torch.Tensor):
r"""Saves given tensors for a future call to :func:`~Function.backward`.
``save_for_backward`` should be called at most once, only from inside the
:func:`forward` method, and only with tensors.
All tensors intended to be used in the backward pass should be saved
with ``save_for_backward`` (as opposed to directly on ``ctx``) to prevent
incorrect gradients and memory leaks, and enable the application of saved
tensor hooks. See :class:`torch.autograd.graph.saved_tensors_hooks`.
Note that if intermediary tensors, tensors that are neither inputs
nor outputs of :func:`forward`, are saved for backward, your custom Function
may not support double backward.
Custom Functions that do not support double backward should decorate their
:func:`backward` method with ``@once_differentiable`` so that performing
double backward raises an error. If you'd like to support double backward,
you can either recompute intermediaries based on the inputs during backward
or return the intermediaries as the outputs of the custom Function. See the
`double backward tutorial <https://pytorch.org/tutorials/intermediate/custom_function_double_backward_tutorial.html>`_
for more details.
In :func:`backward`, saved tensors can be accessed through the :attr:`saved_tensors`
attribute. Before returning them to the user, a check is made to ensure
they weren't used in any in-place operation that modified their content.
Arguments can also be ``None``. This is a no-op.
See :ref:`extending-autograd` for more details on how to use this method.
Example::
>>> class Func(Function):
>>> @staticmethod
>>> def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int):
>>> w = x * z
>>> out = x * y + y * z + w * y
>>> ctx.save_for_backward(x, y, w, out)
>>> ctx.z = z # z is not a tensor
>>> return out
>>>
>>> @staticmethod
>>> @once_differentiable
>>> def backward(ctx, grad_out):
>>> x, y, w, out = ctx.saved_tensors
>>> z = ctx.z
>>> gx = grad_out * (y + y * z)
>>> gy = grad_out * (x + z + w)
>>> gz = None
>>> return gx, gy, gz
>>>
>>> a = torch.tensor(1., requires_grad=True, dtype=torch.double)
>>> b = torch.tensor(2., requires_grad=True, dtype=torch.double)
>>> c = 4
>>> d = Func.apply(a, b, c)
"""
self.to_save = tensors
def save_for_forward(self, *tensors: torch.Tensor):
r"""Saves given tensors for a future call to :func:`~Function.jvp`.
``save_for_forward`` should be only called once, from inside the :func:`forward`
method, and only be called with tensors.
In :func:`jvp`, saved objects can be accessed through the :attr:`saved_tensors`
attribute.
Arguments can also be ``None``. This is a no-op.
See :ref:`extending-autograd` for more details on how to use this method.
Example::
>>> # xdoctest: +SKIP
>>> class Func(torch.autograd.Function):
>>> @staticmethod
>>> def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int):
>>> ctx.save_for_backward(x, y)
>>> ctx.save_for_forward(x, y)
>>> ctx.z = z
>>> return x * y * z
>>>
>>> @staticmethod
>>> def jvp(ctx, x_t, y_t, _):
>>> x, y = ctx.saved_tensors
>>> z = ctx.z
>>> return z * (y * x_t + x * y_t)
>>>
>>> @staticmethod
>>> def vjp(ctx, grad_out):
>>> x, y = ctx.saved_tensors
>>> z = ctx.z
>>> return z * grad_out * y, z * grad_out * x, None
>>>
>>> a = torch.tensor(1., requires_grad=True, dtype=torch.double)
>>> t = torch.tensor(1., dtype=torch.double)
>>> b = torch.tensor(2., requires_grad=True, dtype=torch.double)
>>> c = 4
>>>
>>> with fwAD.dual_level():
>>> a_dual = fwAD.make_dual(a, t)
>>> d = Func.apply(a_dual, b, c)
"""
for tensor in tensors:
assert isinstance(tensor, torch.Tensor) or tensor is None, (
"save_for_forward expects all arguments to be tensors; you should "
"save non-tensors as attributes on ctx.")
self.saved_for_forward = tensors
def mark_dirty(self, *args: torch.Tensor):
r"""Marks given tensors as modified in an in-place operation.
**This should be called at most once, only from inside the**
:func:`forward` **method, and all arguments should be inputs.**
Every tensor that's been modified in-place in a call to :func:`forward`
should be given to this function, to ensure correctness of our checks.
It doesn't matter whether the function is called before or after
modification.
Examples::
>>> class Inplace(Function):
>>> @staticmethod
>>> def forward(ctx, x):
>>> x_npy = x.numpy() # x_npy shares storage with x
>>> x_npy += 1
>>> ctx.mark_dirty(x)
>>> return x
>>>
>>> @staticmethod
>>> @once_differentiable
>>> def backward(ctx, grad_output):
>>> return grad_output
>>>
>>> a = torch.tensor(1., requires_grad=True, dtype=torch.double).clone()
>>> b = a * a
>>> Inplace.apply(a) # This would lead to wrong gradients!
>>> # but the engine would not know unless we mark_dirty
>>> # xdoctest: +SKIP
>>> b.backward() # RuntimeError: one of the variables needed for gradient
>>> # computation has been modified by an inplace operation
"""
self.dirty_tensors = args
def mark_shared_storage(self, *pairs):
warnings.warn(
'mark_shared_storage is deprecated. '
'Tensors with shared storages are automatically tracked. Note '
'that calls to `set_()` are not tracked')
def mark_non_differentiable(self, *args: torch.Tensor):
r"""Marks outputs as non-differentiable.
**This should be called at most once, only from inside the**
:func:`forward` **method, and all arguments should be tensor outputs.**
This will mark outputs as not requiring gradients, increasing the
efficiency of backward computation. You still need to accept a gradient
for each output in :meth:`~Function.backward`, but it's always going to
be a zero tensor with the same shape as the shape of a corresponding
output.
This is used e.g. for indices returned from a sort. See example::
>>> class Func(Function):
>>> @staticmethod
>>> def forward(ctx, x):
>>> sorted, idx = x.sort()
>>> ctx.mark_non_differentiable(idx)
>>> ctx.save_for_backward(x, idx)
>>> return sorted, idx
>>>
>>> @staticmethod
>>> @once_differentiable
>>> def backward(ctx, g1, g2): # still need to accept g2
>>> x, idx = ctx.saved_tensors
>>> grad_input = torch.zeros_like(x)
>>> grad_input.index_add_(0, idx, g1)
>>> return grad_input
"""
self.non_differentiable = args
def set_materialize_grads(self, value: bool):
r"""Sets whether to materialize output grad tensors. Default is ``True``.
**This should be called only from inside the** :func:`forward` **method**
If ``True``, undefined output grad tensors will be expanded to tensors full
of zeros prior to calling the :func:`backward` method.
Example::
>>> class SimpleFunc(Function):
>>> @staticmethod
>>> def forward(ctx, x):
>>> return x.clone(), x.clone()
>>>
>>> @staticmethod
>>> @once_differentiable
>>> def backward(ctx, g1, g2):
>>> return g1 + g2 # No check for None necessary
>>>
>>> # We modify SimpleFunc to handle non-materialized grad outputs
>>> class Func(Function):
>>> @staticmethod
>>> def forward(ctx, x):
>>> ctx.set_materialize_grads(False)
>>> ctx.save_for_backward(x)
>>> return x.clone(), x.clone()
>>>
>>> @staticmethod
>>> @once_differentiable
>>> def backward(ctx, g1, g2):
>>> x, = ctx.saved_tensors
>>> grad_input = torch.zeros_like(x)
>>> if g1 is not None: # We must check for None now
>>> grad_input += g1
>>> if g2 is not None:
>>> grad_input += g2
>>> return grad_input
>>>
>>> a = torch.tensor(1., requires_grad=True)
>>> b, _ = Func.apply(a) # induces g2 to be undefined
"""
self.materialize_grads = value
# DO NOT USE: This is only defined to be able to load old serialized models
_ContextMethodMixin = FunctionCtx
class _HookMixin(object):
@staticmethod
def _register_hook(backward_hooks, hook):
if backward_hooks is None:
backward_hooks = OrderedDict()
handle = hooks.RemovableHandle(backward_hooks)
backward_hooks[handle.id] = hook
return backward_hooks, handle
class BackwardCFunction(_C._FunctionBase, FunctionCtx, _HookMixin):
def apply(self, *args):
# _forward_cls is defined by derived class
# The user should define either backward or vjp but never both.
backward_fn = self._forward_cls.backward # type: ignore[attr-defined]
vjp_fn = self._forward_cls.vjp # type: ignore[attr-defined]
if backward_fn is not Function.backward and vjp_fn is not Function.vjp:
raise RuntimeError("Implementing both 'backward' and 'vjp' for a custom "
"Function is not allowed. You should only implement one "
"of them.")
user_fn = vjp_fn if vjp_fn is not Function.vjp else backward_fn
return user_fn(self, *args)
def apply_jvp(self, *args):
# _forward_cls is defined by derived class
return self._forward_cls.jvp(self, *args) # type: ignore[attr-defined]
class FunctionMeta(type):
"""Function metaclass.
This metaclass sets up the following properties:
_backward_cls: The Function class corresponding to the differentiated
version of this function (which is generated on the fly by this
metaclass).
"""
def __init__(cls, name, bases, attrs):
backward_fn = type(name + 'Backward', (BackwardCFunction,), {'_forward_cls': cls})
cls._backward_cls = backward_fn
super(FunctionMeta, cls).__init__(name, bases, attrs)
# mypy doesn't understand `with_metaclass` from torch._six
class Function(with_metaclass(FunctionMeta, _C._FunctionBase, FunctionCtx, _HookMixin)): # type: ignore[misc]
r"""Base class to create custom `autograd.Function`
To create a custom `autograd.Function`, subclass this class and implement
the :meth:`forward` and :meth:`backward` static methods. Then, to use your custom
op in the forward pass, call the class method ``apply``. Do not call
:meth:`forward` directly.
To ensure correctness and best performance, make sure you are calling the
correct methods on ``ctx`` and validating your backward function using
:func:`torch.autograd.gradcheck`.
See :ref:`extending-autograd` for more details on how to use this class.
Examples::
>>> class Exp(Function):
>>> @staticmethod
>>> def forward(ctx, i):
>>> result = i.exp()
>>> ctx.save_for_backward(result)
>>> return result
>>>
>>> @staticmethod
>>> def backward(ctx, grad_output):
>>> result, = ctx.saved_tensors
>>> return grad_output * result
>>>
>>> # Use it by calling the apply method:
>>> # xdoctest: +SKIP
>>> output = Exp.apply(input)
"""
def __init__(self, *args, **kwargs):
cls = self.__class__
warnings.warn(f"{cls} should not be instantiated. Methods on autograd functions"
"are all static, so you should invoke them on the class itself. "
"Instantiating an autograd function will raise an "
"error in a future version of PyTorch.", DeprecationWarning)
def __call__(self, *args, **kwargs):
raise RuntimeError(
"Legacy autograd function with non-static forward method is deprecated. "
"Please use new-style autograd function with static forward method. "
"(Example: https://pytorch.org/docs/stable/autograd.html#torch.autograd.Function)")
# for the tracer
is_traceable = False
@staticmethod
def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any:
r"""Performs the operation.
This function is to be overridden by all subclasses.
It must accept a context ctx as the first argument, followed by any
number of arguments (tensors or other types).
The context can be used to store arbitrary data that can be then
retrieved during the backward pass. Tensors should not be stored
directly on `ctx` (though this is not currently enforced for
backward compatibility). Instead, tensors should be saved either with
:func:`ctx.save_for_backward` if they are intended to be used in
``backward`` (equivalently, ``vjp``) or :func:`ctx.save_for_forward`
if they are intended to be used for in ``jvp``.
"""
raise NotImplementedError("You must implement the forward function for custom"
" autograd.Function.")
@staticmethod
def backward(ctx: Any, *grad_outputs: Any) -> Any:
r"""Defines a formula for differentiating the operation with backward mode
automatic differentiation (alias to the vjp function).
This function is to be overridden by all subclasses.
It must accept a context :attr:`ctx` as the first argument, followed by
as many outputs as the :func:`forward` returned (None will be passed in
for non tensor outputs of the forward function),
and it should return as many tensors, as there were inputs to
:func:`forward`. Each argument is the gradient w.r.t the given output,
and each returned value should be the gradient w.r.t. the
corresponding input. If an input is not a Tensor or is a Tensor not
requiring grads, you can just pass None as a gradient for that input.
The context can be used to retrieve tensors saved during the forward
pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple
of booleans representing whether each input needs gradient. E.g.,
:func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the
first input to :func:`forward` needs gradient computated w.r.t. the
output.
"""
raise NotImplementedError("You must implement either the backward or vjp method for "
"your custom autograd.Function to use it with backward "
"mode AD.")
# vjp and backward are alias of each other
vjp = backward
@staticmethod
def jvp(ctx: Any, *grad_inputs: Any) -> Any:
r"""Defines a formula for differentiating the operation with forward mode
automatic differentiation.
This function is to be overridden by all subclasses.
It must accept a context :attr:`ctx` as the first argument, followed by
as many inputs as the :func:`forward` got (None will be passed in
for non tensor inputs of the forward function),
and it should return as many tensors as there were outputs to
:func:`forward`. Each argument is the gradient w.r.t the given input,
and each returned value should be the gradient w.r.t. the
corresponding output. If an output is not a Tensor or the function is not
differentiable with respect to that output, you can just pass None as a
gradient for that input.
You can use the :attr:`ctx` object to pass any value from the forward to this
functions.
"""
raise NotImplementedError("You must implement the jvp function for custom "
"autograd.Function to use it with forward mode AD.")
def once_differentiable(fn):
@functools.wraps(fn)
def wrapper(ctx, *args):
with torch.no_grad():
outputs = fn(ctx, *args)
if not torch.is_grad_enabled():
return outputs
# If any of the inputs have requires_grad=True, we force the outputs
# to have requires_grad=True but point to a grad_fn which throws an
# error message during (double) back-propagation.
# XXX: this is only an approximation of requires_grad - there's no way
# to figure out if fn didn't use ctx.saved_tensors and as a result
# some Tensors might require grad, even if no args do.
# Unfortunately, this leads to unexpected error messages ("no nodes
# require computing gradients"), but I don't have a better idea.
# These functions would raise an error in backward anyway.
requires_grad = any(isinstance(arg, torch.Tensor) and arg.requires_grad
for arg in args)
if not requires_grad:
return outputs
if not isinstance(outputs, tuple):
outputs = (outputs,)
err_fn = _functions.DelayedError(
b"trying to differentiate twice a function that was marked "
b"with @once_differentiable", len(outputs))
# Create aliases of each output that has requires_grad=True. We need
# at least one of the inputs to err_fn to require grad so that the
# output will have a grad_fn.
def fake_requires_grad(var):
if var is not None:
var = var.detach()
var.requires_grad = True
return var
return err_fn(*[fake_requires_grad(v) for v in outputs])
return wrapper
def traceable(fn_cls):
r"""Marks Function as traceable for the JIT.
Traceable functions have additional restrictions - they can't pass any
data-dependent values to backward (e.g. Prod passes the output, which makes
it non-traceable), and their backward should be implemented entirely in terms
of operations on autograd Tensors in all cases.
DON'T USE THIS DECORATOR. IT IS FOR INTERNAL USE ONLY AND SHOULD BE HANDLED WITH
CARE (or can give incorrect results otherwise).
"""
fn_cls.is_traceable = True
return fn_cls
class InplaceFunction(Function):
def __init__(self, inplace=False):
super(InplaceFunction, self).__init__()
self.inplace = inplace
def _nested_map(condition, fn, condition_msg=None):
def _map(obj):
if condition(obj):
return fn(obj)
elif obj is None:
return None
elif isinstance(obj, (list, tuple)):
mapped = (_map(x) for x in obj)
if hasattr(obj, '_fields'):
# obj is namedtuple
return type(obj)(*mapped)
return type(obj)(mapped)
elif isinstance(obj, dict):
return {x : _map(obj[x]) for x in obj}
else:
raise ValueError("Auto nesting doesn't know how to process "
"an input object of type " + torch.typename(obj) +
(". Accepted types: " + condition_msg +
", or lists/tuples of them"
if condition_msg else ""))
return _map
def _jit_unwrap_structured(obj):
if hasattr(obj, "_jit_unwrap"):
return obj._jit_unwrap()
return obj
def _iter_filter(condition, allow_unknown=False, condition_msg=None,
conversion=None):
def _iter(obj):
if conversion is not None:
obj = conversion(obj)
if condition(obj):
yield obj
elif obj is None:
return
elif isinstance(obj, (list, tuple)):
for o in obj:
for var in _iter(o):
yield var
elif isinstance(obj, dict):
# We only accept primitive key types, so we needn't inspect them
for o in obj.values():
for var in _iter(o):
yield var
elif allow_unknown:
yield obj
else:
raise ValueError("Auto nesting doesn't know how to process "
"an input object of type " + torch.typename(obj) +
(". Accepted types: " + condition_msg +
", or lists/tuples of them"
if condition_msg else ""))
return _iter
def _unflatten(input, proto):
# unflatten a list or tuple input into a nested list/tuple structure
# specified by proto
def unflatten_helper(input, proto):
res: List[Optional[torch.Tensor]] = []
if hasattr(proto, "_jit_wrap"):
return proto._jit_wrap(input)
if not isinstance(proto, (list, tuple)):
return input[0], input[1:]
for e in proto:
if e is None:
res.append(e)
else:
res_e, input = unflatten_helper(input, e)
res.append(res_e)
return type(proto)(res), input
return unflatten_helper(input, proto)[0]
_iter_jit_values = _iter_filter(lambda o: o is None or isinstance(o, torch._C.Value),
condition_msg="jit's Values or None")
_iter_tensors = _iter_filter(lambda x: isinstance(x, torch.Tensor), condition_msg="Tensors",
conversion=_jit_unwrap_structured)
_iter_tensors_permissive = _iter_filter(lambda x: isinstance(x, torch.Tensor),
allow_unknown=True,
condition_msg="Tensors (permissive)")
_iter_None_tensors = _iter_filter(lambda o: o is None or isinstance(o, torch.Tensor),
condition_msg="Tensors or None")
_map_tensor_data = _nested_map(lambda x: isinstance(x, torch.Tensor), lambda o: o.data,
condition_msg="Tensors")
class NestedIOFunction(Function):
# The 'type: ignore' statements are needed here because these functions are declared as '@staticmethod' in the
# superclass (Function) but are instance methods here, which mypy reports as incompatible.
def _do_forward(self, *input):
self._nested_input = input
flat_input = tuple(_iter_tensors(input))
flat_output = super(NestedIOFunction, self)._do_forward(*flat_input)
nested_output = self._nested_output
nested_tensors = _unflatten(flat_output, self._nested_output)
return nested_tensors
def _do_backward(self, gradients, retain_variables):
self.retain_variables = retain_variables
result = super(NestedIOFunction, self)._do_backward(gradients, retain_variables)
if not retain_variables:
del self._nested_output
del self._to_save_nested
return result
def backward(self, *gradients: Any) -> Any: # type: ignore[override]
nested_gradients = _unflatten(gradients, self._nested_output)
result = self.backward_extended(*nested_gradients) # type: ignore[func-returns-value]
return tuple(_iter_None_tensors(result))
__call__ = _do_forward
def forward(self, *args: Any) -> Any: # type: ignore[override]
nested_tensors = _map_tensor_data(self._nested_input)
result = self.forward_extended(*nested_tensors) # type: ignore[func-returns-value]
del self._nested_input
self._nested_output = result
return tuple(_iter_tensors(result))
def save_for_backward(self, *args: Any) -> None:
self.to_save = tuple(_iter_tensors(args))
self._to_save_nested = args
@property
def saved_tensors(self):
flat_tensors = super(NestedIOFunction, self).saved_tensors
return _unflatten(flat_tensors, self._to_save_nested)
def mark_dirty(self, *args: Any, **kwargs: Any) -> None:
self.dirty_tensors = tuple(_iter_tensors((args, kwargs)))
def mark_non_differentiable(self, *args: Any, **kwargs: Any) -> None:
self.non_differentiable = tuple(_iter_tensors((args, kwargs)))
def forward_extended(self, *input: Any) -> None:
raise NotImplementedError
def backward_extended(self, *grad_output: Any) -> None:
raise NotImplementedError
| pytorch-master | torch/autograd/function.py |
from .tensor import * # noqa: F403
| pytorch-master | torch/autograd/_functions/__init__.py |
from functools import reduce
import torch
import torch._utils
from ..function import Function
class Type(Function):
@staticmethod
def forward(ctx, i, dest_type):
ctx.input_type = type(i)
ctx.input_device = -1 if not i.is_cuda else i.get_device()
return i.type(dest_type)
@staticmethod
def backward(ctx, grad_output):
if ctx.input_device == -1:
return grad_output.type(ctx.input_type), None
else:
with torch.cuda.device(ctx.input_device):
return grad_output.type(ctx.input_type), None
# TODO: deprecate this
class Resize(Function):
@staticmethod
def forward(ctx, tensor, sizes):
ctx.sizes = sizes
ctx.numel = reduce(lambda x, y: x * y, sizes, 1)
if tensor.numel() != ctx.numel:
raise RuntimeError(("requested resize to {} ({} elements in total), "
"but the given tensor has a size of {} ({} elements). "
"autograd's resize can only change the shape of a given "
"tensor, while preserving the number of elements. ").format(
'x'.join(map(str, sizes)), ctx.numel,
'x'.join(map(str, tensor.size())), tensor.numel()))
ctx.input_sizes = tensor.size()
if tensor.is_quantized:
tensor.copy_(tensor)
return tensor.contiguous().view(*sizes)
if tensor.is_contiguous():
result = tensor.new(tensor).contiguous().view(*sizes)
return result
else:
return tensor.contiguous().view(*sizes)
@staticmethod
def backward(ctx, grad_output):
assert grad_output.numel() == ctx.numel
return grad_output.contiguous().view(ctx.input_sizes), None
| pytorch-master | torch/autograd/_functions/tensor.py |
from functools import reduce
def maybe_view(tensor, size, check_same_size=True):
if check_same_size and tensor.size() == size:
return tensor
return tensor.contiguous().view(size)
def maybe_unexpand(tensor, old_size, check_same_size=True):
if check_same_size and tensor.size() == old_size:
return tensor
num_unsqueezed = tensor.dim() - len(old_size)
expanded_dims = [dim for dim, (expanded, original)
in enumerate(zip(tensor.size()[num_unsqueezed:], old_size))
if expanded != original]
for _ in range(num_unsqueezed):
tensor = tensor.sum(0, keepdim=False)
for dim in expanded_dims:
tensor = tensor.sum(dim, keepdim=True)
return tensor
# Check whether the op enable broadcasting, and whether it is supported by ONNX.
# If dims1 and dims2 are different, then broadcast is True.
# We always assume the combination of dims1 and dims2 is broadcastable.
# The following types of broadcasting are supported in ONNX:
# 1) Only one element in dims2, such as dims2 = [1, 1]
# 2) dims2 is suffix of dims1, such as dims1 = [2, 3, 4], and dims2 = [3, 4]
# Details can be found here: https://github.com/onnx/onnx/blob/master/docs/Operators.md#Gemm
def check_onnx_broadcast(dims1, dims2):
broadcast = False
supported = True
len1 = len(dims1)
len2 = len(dims2)
numel1 = reduce(lambda x, y: x * y, dims1)
numel2 = reduce(lambda x, y: x * y, dims2)
if len1 < len2:
broadcast = True
if numel2 != 1:
supported = False
elif len1 > len2:
broadcast = True
if numel2 != 1 and dims1[len1 - len2:] != dims2:
supported = False
else:
if dims1 != dims2:
broadcast = True
if numel2 != 1:
supported = False
if not supported:
raise ValueError("Numpy style broadcasting is not supported in ONNX. "
"Input dims are: {}, {}".format(dims1, dims2))
return broadcast
| pytorch-master | torch/autograd/_functions/utils.py |
from typing import Callable, Any, Tuple, List, Dict, Type, NamedTuple
from torch.utils._pytree import PyTree, TreeSpec, LeafSpec
from collections import namedtuple
FlattenFuncSpec = Callable[[PyTree, TreeSpec], List]
SUPPORTED_NODES: Dict[Type[Any], Any] = {}
def register_pytree_flatten_spec(typ: Any, flatten_fn_spec: FlattenFuncSpec) -> None:
SUPPORTED_NODES[typ] = flatten_fn_spec
def tree_flatten_spec(pytree: PyTree, spec: TreeSpec) -> List[Any]:
if isinstance(spec, LeafSpec):
return [pytree]
if spec.type not in SUPPORTED_NODES:
raise RuntimeError(
f"{type(pytree)} does not have a flatten_fn_spec associated with it. Please register one with"
"torch.fx._pytree.register_pytree_flatten_spec. If you have serialized your model, make"
"sure that any custom pytrees have been registered before loading it.")
flatten_fn_spec = SUPPORTED_NODES[spec.type]
child_pytrees = flatten_fn_spec(pytree, spec)
result = []
for child, child_spec in zip(child_pytrees, spec.children_specs):
flat = tree_flatten_spec(child, child_spec)
result += flat
return result
def _dict_flatten_spec(d: Dict[Any, Any], spec: TreeSpec) -> List[Any]:
return list([d[k] for k in spec.context])
def _list_flatten_spec(d: List[Any], spec: TreeSpec) -> List[Any]:
return [d[i] for i in range(len(spec.children_specs))]
def _tuple_flatten_spec(d: Tuple[Any], spec: TreeSpec) -> List[Any]:
return [d[i] for i in range(len(spec.children_specs))]
def _namedtuple_flatten_spec(d: NamedTuple, spec: TreeSpec) -> List[Any]:
return [d[i] for i in range(len(spec.children_specs))]
register_pytree_flatten_spec(dict, _dict_flatten_spec)
register_pytree_flatten_spec(list, _list_flatten_spec)
register_pytree_flatten_spec(tuple, _tuple_flatten_spec)
register_pytree_flatten_spec(namedtuple, _tuple_flatten_spec)
| pytorch-master | torch/fx/_pytree.py |
import torch
import torch.nn as nn
import torch.overrides
from torch.nn.modules.module import _addindent
from torch.package import PackageImporter, PackageExporter
import linecache
from typing import Type, Dict, List, Any, Union, Optional, Set
from .graph import Graph, _PyTreeCodeGen, _is_from_torch, _custom_builtins, PythonCode
from ._compatibility import compatibility
from torch.package import Importer, sys_importer
import copy
import itertools
import sys
import traceback
from pathlib import Path
import os
import warnings
# Normal exec loses the source code, however we can work with
# the linecache module to recover it.
# Using _exec_with_source will add it to our local cache
# and then tools like TorchScript will be able to get source info.
class _EvalCacheLoader(object):
def __init__(self):
self.eval_cache = {}
self.next_id = 0
def cache(self, src: str, globals: Dict[str, Any]):
"""Store the source in a private cache, and add a lazy entry in linecache
that allows the source to be retrieved by 'filename'.
Args:
src (str): The module source to cache
globals (dict): The module globals
Returns:
str: The cache key (and dummy filename) generated for src.
"""
key = self._get_key()
self.eval_cache[key] = src
# Don't mutate globals so that this loader is only used
# to populate linecache, and doesn't interact with other modules
# that might check `__loader__`
globals_copy = globals.copy()
globals_copy['__file__'] = key
globals_copy['__name__'] = key
globals_copy['__loader__'] = self
linecache.lazycache(key, globals_copy)
return key
# Part of the loader protocol (PEP 302)
# linecache will use this method when trying to find source code
def get_source(self, module_name) -> Optional[str]:
if module_name in self.eval_cache:
return self.eval_cache[module_name]
return None
def _get_key(self):
key = f'<eval_with_key>.{self.next_id}'
self.next_id += 1
return key
_loader = _EvalCacheLoader()
def _exec_with_source(src: str, globals: Dict[str, Any]):
key = _loader.cache(src, globals)
exec(compile(src, key, 'exec'), globals)
def _forward_from_src(src: str, globals: Dict[str, Any]):
# avoid mutating the passed in dict
globals_copy = globals.copy()
_exec_with_source(src, globals_copy)
forward_fn = globals_copy['forward']
del globals_copy['forward']
return forward_fn
def _format_import_statement(name: str, obj: Any, importer: Importer) -> str:
if name in _custom_builtins:
return _custom_builtins[name].import_str
if _is_from_torch(name):
return 'import torch'
module_name, attr_name = importer.get_name(obj)
return f'from {module_name} import {attr_name} as {name}'
def _format_import_block(globals: Dict[str, Any], importer: Importer):
import_strs: Set[str] = set()
for name, obj in globals.items():
import_strs.add(_format_import_statement(name, obj, importer))
return '\n'.join(import_strs)
@compatibility(is_backward_compatible=True)
def reduce_graph_module(body: Dict[Any, Any], import_block: str) -> torch.nn.Module:
# BC: attribute name was changed from `code` to `_code` to facilitate
# making `code` into a property and adding a docstring to it
fn_src = body.get('_code') or body['code']
forward = _forward_from_src(import_block + fn_src, {})
return _deserialize_graph_module(forward, body)
@compatibility(is_backward_compatible=True)
def reduce_package_graph_module(
importer: PackageImporter, body: Dict[Any, Any], generated_module_name: str
) -> torch.nn.Module:
forward = importer.import_module(generated_module_name).forward
return _deserialize_graph_module(forward, body)
@compatibility(is_backward_compatible=True)
def reduce_deploy_graph_module(
importer: PackageImporter, body: Dict[Any, Any], import_block: str
) -> torch.nn.Module:
ns = dict()
ns["__builtins__"] = importer.patched_builtins
fn_src = body.get('_code')
assert fn_src is not None
forward = _forward_from_src(import_block + fn_src, ns)
return _deserialize_graph_module(forward, body)
def _deserialize_graph_module(forward, body: Dict[Any, Any]) -> torch.nn.Module:
"""
Deserialize a GraphModule given the dictionary of the original module,
using the code to reconstruct the graph. We delete the actual graph before
saving the dictionary so that changes to the in-memory graph format do not
get serialized.
"""
# We create a dummy class here because symbolic_trace pulls the forward()
# function off of the class, rather than the instance
class CodeOnlyModule(torch.nn.Module):
def __init__(self, body):
super().__init__()
self.__dict__ = body
# Try to retrieve the forward source in a backward-compatible way
CodeOnlyModule.forward = forward
tracer_cls = body.get('_tracer_cls')
if tracer_cls is None:
from ._symbolic_trace import Tracer
tracer_cls = Tracer
graphmodule_cls_name = body.get('_graphmodule_cls_name', 'GraphModule')
# This is a workaround for a mypy linter issue related to
# passing base class as an argument - https://github.com/python/mypy/issues/5865.
cls_tracer : Any = tracer_cls
class KeepModules(cls_tracer):
# we shouldn't trace into any of the submodules,
# because they were not traced in the original GraphModule
def is_leaf_module(self, _: torch.nn.Module, __: str) -> bool:
return True
com = CodeOnlyModule(body)
tracer_extras = body.get('_tracer_extras', {})
graph = KeepModules().trace(com, **tracer_extras)
# Manually set Tracer class on the reconstructed Graph, to avoid
# referencing the private local subclass KeepModules.
graph._tracer_cls = tracer_cls
gm = GraphModule(com, graph, class_name=graphmodule_cls_name)
# The GraphModule constructor only retains attributes referenced by the graph.
# In this case, our goal is return a GraphModule as close to identical as the one
# put into the package. If any additional attributes were present in body,
# we should keep them.
for k, v in body.items():
if not hasattr(gm, k):
setattr(gm, k, v)
return gm
# copy an attribute value with qualified name 'target' from 'from_module' to 'to_module'
# This installs empty Modules where none exist yet if they are subpaths of target
def _copy_attr(from_module: torch.nn.Module, to_module: torch.nn.Module, target: str):
*prefix, field = target.split('.')
for item in prefix:
f = getattr(from_module, item)
t = getattr(to_module, item, None)
if f is t:
# we have already installed one of its parents
# (e.g. target = root.linear.weight, but we have already installed root.linear)
# once we install a parent, we no longer need to copy the children
# since all the needed properties will already be present
return
if t is None:
t = torch.nn.Module()
setattr(to_module, item, t)
from_module, to_module = f, t
orig = getattr(from_module, field)
# If it is a tensor and not a parameter attribute of a module, it should be a named buffer.
# So, we register it as a named buffer in the target module.
if isinstance(orig, torch.Tensor) and not isinstance(orig, torch.nn.Parameter):
to_module.register_buffer(field, orig)
else:
setattr(to_module, field, orig)
# Assign attribute 'from_obj' to the qualified name 'target' on 'to_module
# This installs empty Modules where none exist yet if they are subpaths of target
def _assign_attr(from_obj: Any, to_module: torch.nn.Module, target: str):
*prefix, field = target.split('.')
for item in prefix:
t = getattr(to_module, item, None)
if t is None:
t = torch.nn.Module()
setattr(to_module, item, t)
to_module = t
# If it is a tensor and not a parameter attribute of a module, it should be a named buffer.
# So, we register it as a named buffer in the target module.
if isinstance(from_obj, torch.Tensor) and not isinstance(from_obj, torch.nn.Parameter):
to_module.register_buffer(field, from_obj)
else:
setattr(to_module, field, from_obj)
class _WrappedCall:
def __init__(self, cls, cls_call):
self.cls = cls
self.cls_call = cls_call
# Previously, if an error occurred when valid
# symbolically-traced code was run with an invalid input, the
# user would see the source of the error as coming from
# `File "<eval_with_key_N">`, where N is some number. We use
# this function to generate a more informative error message. We
# return the traceback itself, a message explaining that the
# error occurred in a traced Module's generated forward
# function, and five lines of context surrounding the faulty
# line
@staticmethod
def _generate_error_message(frame_summary: traceback.FrameSummary) -> str:
# auxiliary variables (for readability)
err_lineno = frame_summary.lineno
assert err_lineno is not None
line = frame_summary.line
assert line is not None
err_line_len = len(line)
all_src_lines = linecache.getlines(frame_summary.filename)
# constituent substrings of the error message
tb_repr = traceback.format_exc()
custom_msg = ("Call using an FX-traced Module, "
f"line {err_lineno} of the traced Module's "
"generated forward function:")
before_err = "".join(all_src_lines[err_lineno - 2 : err_lineno])
marker = "~" * err_line_len + "~~~ <--- HERE"
err_and_after_err = "\n".join(all_src_lines[err_lineno : err_lineno + 2])
# joined message
return "\n".join([tb_repr, custom_msg, before_err, marker, err_and_after_err])
def __call__(self, obj, *args, **kwargs):
try:
if self.cls_call is not None:
return self.cls_call(obj, *args, **kwargs)
else:
return super(self.cls, obj).__call__(*args, **kwargs) # type: ignore[misc]
except Exception as e:
assert e.__traceback__
topmost_framesummary: traceback.FrameSummary = \
traceback.StackSummary.extract(traceback.walk_tb(e.__traceback__))[-1] # type: ignore[arg-type]
if "eval_with_key" in topmost_framesummary.filename:
print(_WrappedCall._generate_error_message(topmost_framesummary),
file=sys.stderr)
raise e.with_traceback(None)
else:
raise e
@compatibility(is_backward_compatible=True)
class GraphModule(torch.nn.Module):
"""
GraphModule is an nn.Module generated from an fx.Graph. Graphmodule has a
``graph`` attribute, as well as ``code`` and ``forward`` attributes generated
from that ``graph``.
.. warning::
When ``graph`` is reassigned, ``code`` and ``forward`` will be automatically
regenerated. However, if you edit the contents of the ``graph`` without reassigning
the ``graph`` attribute itself, you must call ``recompile()`` to update the generated
code.
"""
def __new__(cls: 'Type[GraphModule]', *args, **kwargs):
# each instance of a graph module needs its own forward method
# so create a new singleton class for each instance.
# it is a subclass of the user-defined class, the only difference
# is an extra layer to install the forward method
# address issue described at https://github.com/pytorch/pytorch/issues/63883
# in other words, traverse class hierarchy to fix the redundant class definition problem
for t in cls.__mro__:
c = t.__qualname__.split('.')[-1]
if c != 'GraphModuleImpl':
cls = t
break
class GraphModuleImpl(cls): # type: ignore[misc, valid-type]
pass
return super().__new__(GraphModuleImpl)
@compatibility(is_backward_compatible=True)
def __init__(self,
root: Union[torch.nn.Module, Dict[str, Any]],
graph: Graph,
class_name: str = 'GraphModule'):
"""
Construct a GraphModule.
Args:
root (Union[torch.nn.Module, Dict[str, Any]):
``root`` can either be an nn.Module instance or a Dict mapping strings to any attribute type.
In the case that ``root`` is a Module, any references to Module-based objects (via qualified
name) in the Graph's Nodes' ``target`` field will be copied over from the respective place
within ``root``'s Module hierarchy into the GraphModule's module hierarchy.
In the case that ``root`` is a dict, the qualified name found in a Node's ``target`` will be
looked up directly in the dict's keys. The object mapped to by the Dict will be copied
over into the appropriate place within the GraphModule's module hierarchy.
graph (Graph): ``graph`` contains the nodes this GraphModule should use for code generation
class_name (str): ``name`` denotes the name of this GraphModule for debugging purposes. If it's unset, all
error messages will report as originating from ``GraphModule``. It may be helpful to set this
to ``root``'s original name or a name that makes sense within the context of your transform.
"""
super().__init__()
self.__class__.__name__ = class_name
if isinstance(root, torch.nn.Module):
if hasattr(root, 'training'):
self.training = root.training
for node in graph.nodes:
if node.op in ['get_attr', 'call_module']:
assert isinstance(node.target, str)
_copy_attr(root, self, node.target)
elif isinstance(root, dict):
targets_to_copy = []
for node in graph.nodes:
if node.op in ['get_attr', 'call_module']:
assert isinstance(node.target, str)
if node.target not in root:
raise RuntimeError('Node ' + str(node) + ' referenced target ' + node.target +
' but that target was not provided in ``root``!')
targets_to_copy.append(node.target)
# Sort targets in ascending order of the # of atoms.
# This will ensure that less deeply nested attributes are assigned
# before more deeply nested attributes. For example, foo.bar
# will be assigned before foo.bar.baz. Otherwise, we might assign
# the user-provided ``foo.bar`` and wipe out the previously-assigned
# ``foo.bar.baz``
targets_to_copy.sort(key=lambda t: t.count('.'))
for target_to_copy in targets_to_copy:
_assign_attr(root[target_to_copy], self, target_to_copy)
else:
raise RuntimeError('Unsupported type ' + str(root) + ' passed for root!')
self.graph = graph
# Store the Tracer class responsible for creating a Graph separately as part of the
# GraphModule state, except when the Tracer is defined in a local namespace.
# Locally defined Tracers are not pickleable. This is needed because torch.package will
# serialize a GraphModule without retaining the Graph, and needs to use the correct Tracer
# to re-create the Graph during deserialization.
self._tracer_cls = None
if self.graph._tracer_cls and '<locals>' not in self.graph._tracer_cls.__qualname__:
self._tracer_cls = self.graph._tracer_cls
self._tracer_extras = {}
if self.graph._tracer_extras:
self._tracer_extras = self.graph._tracer_extras
# TorchScript breaks trying to compile the graph setter because of the
# continued string literal. Issue here: https://github.com/pytorch/pytorch/issues/44842
#
# Shouldn't be an issue since these methods shouldn't be used in TorchScript anyway
__jit_unused_properties__ = ['graph']
@property
def graph(self) -> Graph:
"""
Return the ``Graph`` underlying this ``GraphModule``
"""
return self._graph
@graph.setter
def graph(self, g : Graph) -> None:
"""
Set the underlying ``Graph`` for this ``GraphModule``. This will internally
recompile the ``GraphModule`` so that the generated ``forward()`` function
corresponds to ``g``
"""
assert isinstance(g, Graph), f'Expected a Graph instance, but got {type(g)}'
self._graph = g
g.owning_module = self
self.recompile()
@compatibility(is_backward_compatible=False)
def to_folder(self, folder: Union[str, os.PathLike], module_name : str = "FxModule"):
"""Dumps out module to ``folder`` with ``module_name`` so that it can be
imported with ``from <folder> import <module_name>``
Args:
folder (Union[str, os.PathLike]): The folder to write the code out to
module_name (str): Top-level name to use for the ``Module`` while
writing out the code
"""
folder = Path(folder)
Path(folder).mkdir(exist_ok=True)
torch.save(self.state_dict(), folder / 'state_dict.pt')
tab = " " * 4
custom_builtins = '\n'.join([v.import_str for v in _custom_builtins.values()])
model_str = f"""
import torch
{custom_builtins}
from torch.nn import *
class {module_name}(torch.nn.Module):
def __init__(self):
super().__init__()
"""
def _gen_model_repr(module_name: str, module: torch.nn.Module) -> Optional[str]:
safe_reprs = [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]
if type(module) in safe_reprs:
return f"{module.__repr__()}"
else:
return None
blobified_modules = []
for module_name, module in self.named_children():
module_str = _gen_model_repr(module_name, module)
if module_str is None:
module_file = folder / f'{module_name}.pt'
torch.save(module, module_file)
blobified_modules.append(module_name)
module_repr = module.__repr__().replace('\r', ' ').replace('\n', ' ')
module_str = f"torch.load(r'{module_file}') # {module_repr}"
model_str += f"{tab*2}self.{module_name} = {module_str}\n"
for buffer_name, buffer in self._buffers.items():
if buffer is None:
continue
model_str += f"{tab*2}self.register_buffer('{buffer_name}', torch.empty({list(buffer.shape)}, dtype={buffer.dtype}))\n"
for param_name, param in self._parameters.items():
if param is None:
continue
model_str += f"{tab*2}self.{param_name} = torch.nn.Parameter(torch.empty({list(param.shape)}, dtype={param.dtype}))\n"
model_str += f"{tab*2}self.load_state_dict(torch.load(r'{folder}/state_dict.pt'))\n"
model_str += f"{_addindent(self.code, 4)}\n"
module_file = folder / 'module.py'
module_file.write_text(model_str)
init_file = folder / '__init__.py'
init_file.write_text('from .module import *')
if len(blobified_modules) > 0:
warnings.warn("Was not able to save the following children modules as reprs -"
f"saved as pickled files instead: {blobified_modules}")
@compatibility(is_backward_compatible=True)
def add_submodule(self, target: str, m: torch.nn.Module) -> bool:
"""
Adds the given submodule to ``self``.
This installs empty Modules where none exist yet if they are
subpaths of ``target``.
Args:
target: The fully-qualified string name of the new submodule
(See example in ``nn.Module.get_submodule`` for how to
specify a fully-qualified string.)
m: The submodule itself; the actual object we want to
install in the current Module
Return:
bool: Whether or not the submodule could be inserted. For
this method to return True, each object in the chain
denoted by ``target`` must either a) not exist yet,
or b) reference an ``nn.Module`` (not a parameter or
other attribute)
"""
*prefix, field = target.split('.')
mod: torch.nn.Module = self
for item in prefix:
submod = getattr(mod, item, None)
if submod is None:
submod = torch.nn.Module()
setattr(mod, item, submod)
if not isinstance(submod, torch.nn.Module):
return False
mod = submod
mod.add_module(field, m)
return True
@compatibility(is_backward_compatible=True)
def delete_submodule(self, target: str) -> bool:
"""
Deletes the given submodule from ``self``.
The module will not be deleted if ``target`` is not a valid
target.
Args:
target: The fully-qualified string name of the new submodule
(See example in ``nn.Module.get_submodule`` for how to
specify a fully-qualified string.)
Returns:
bool: Whether or not the target string referenced a
submodule we want to delete. A return value of ``False``
means that the ``target`` was not a valid reference to
a submodule.
"""
atoms = target.split(".")
path, target_submod = atoms[:-1], atoms[-1]
mod: torch.nn.Module = self
# Get the parent module
for item in path:
if not hasattr(mod, item):
return False
mod = getattr(mod, item)
if not isinstance(mod, torch.nn.Module):
return False
if not hasattr(mod, target_submod):
return False
if not isinstance(getattr(mod, target_submod), torch.nn.Module):
return False
delattr(mod, target_submod)
return True
@compatibility(is_backward_compatible=True)
def delete_all_unused_submodules(self) -> None:
"""
Deletes all unused submodules from ``self``.
A Module is considered "used" if any one of the following is
true:
1. It has children that are used
2. Its forward is called directly via a ``call_module`` node
3. It has a non-Module attribute that is used from a
``get_attr`` node
This method can be called to clean up an ``nn.Module`` without
manually calling ``delete_submodule`` on each unused submodule.
"""
used: List[str] = []
for node in self.graph.nodes:
if node.op == "call_module" or node.op == "get_attr":
# A list of strings representing the different parts
# of the path. For exmaple, `foo.bar.baz` gives us
# ["foo", "bar", "baz"]
fullpath = node.target.split(".")
# If we're looking at multiple parts of a path, join
# join them with a dot. Otherwise, return that single
# element without doing anything to it.
def join_fn(x: str, y: str) -> str:
return '.'.join([x, y] if y else [x])
# Progressively collect all the names of intermediate
# modules. For example, if we have the target
# `foo.bar.baz`, we'll add `foo`, `foo.bar`, and
# `foo.bar.baz` to the list.
for path in itertools.accumulate(fullpath, join_fn):
used.append(path)
# For a `call_module` node, also register all recursive submodules
# as used
if node.op == "call_module":
try:
submod = self.get_submodule(node.target)
for submod_name, _ in submod.named_modules():
if submod_name != '':
used.append('.'.join([node.target, submod_name]))
except AttributeError:
# Node referenced nonexistent submodule, don't need to
# worry about GCing anything
pass
to_delete = [name for name, _ in self.named_modules()
if name not in used]
for name in to_delete:
self.delete_submodule(name)
@property
def code(self) -> str:
"""
Return the Python code generated from the ``Graph`` underlying this
``GraphModule``.
"""
if not hasattr(self, '_code'):
raise RuntimeError('Code has not been generated! Please report a bug to PyTorch')
return self._code
@compatibility(is_backward_compatible=True)
def recompile(self) -> PythonCode:
"""
Recompile this GraphModule from its ``graph`` attribute. This should be
called after editing the contained ``graph``, otherwise the generated
code of this ``GraphModule`` will be out of date.
"""
if isinstance(self._graph._codegen, _PyTreeCodeGen):
self._in_spec = self._graph._codegen.pytree_info.in_spec
self._out_spec = self._graph._codegen.pytree_info.out_spec
python_code = self._graph.python_code(root_module='self')
self._code = python_code.src
cls = type(self)
cls.forward = _forward_from_src(self._code, python_code.globals)
# Determine whether this class explicitly defines a __call__ implementation
# to wrap. If it does, save it in order to have wrapped_call invoke it.
# If it does not, wrapped_call can use a dynamic call to super() instead.
# In most cases, super().__call__ should be torch.nn.Module.__call__.
# We do not want to hold a reference to Module.__call__ here; doing so will
# bypass patching of torch.nn.Module.__call__ done while symbolic tracing.
cls_call = cls.__call__ if "__call__" in vars(cls) else None
if '_wrapped_call' not in vars(cls):
cls._wrapped_call = _WrappedCall(cls, cls_call) # type: ignore[attr-defined]
def call_wrapped(self, *args, **kwargs):
return self._wrapped_call(self, *args, **kwargs)
cls.__call__ = call_wrapped
return python_code
# Passing Tracer as argument allows subclasses extending fx.GraphModule
# define their own Tracer (extending fx.Tracer).
def __reduce_deploy__(self, importer: Importer):
dict_without_graph = self.__dict__.copy()
dict_without_graph['_graphmodule_cls_name'] = self.__class__.__name__
del dict_without_graph['_graph']
python_code = self.recompile()
import_block = _format_import_block(python_code.globals, importer)
return (reduce_deploy_graph_module, (dict_without_graph, import_block))
def __reduce_package__(self, exporter: PackageExporter):
dict_without_graph = self.__dict__.copy()
dict_without_graph['_graphmodule_cls_name'] = self.__class__.__name__
del dict_without_graph['_graph']
generated_module_name = f'fx-generated._{exporter.get_unique_id()}'
python_code = self.recompile()
import_block = _format_import_block(python_code.globals, exporter.importer)
module_code = import_block + self.code
exporter.save_source_string(generated_module_name, module_code)
return (reduce_package_graph_module, (dict_without_graph, generated_module_name))
def __reduce__(self):
"""
Serialization of GraphModule. We serialize only the generated code, not
the underlying ``Graph``. This is because ``Graph`` does not have on-disk
backward-compatibility guarantees, whereas Python source code does.
On the deserialization side, we symbolically trace through the generated
code to regenerate the underlying ``Graph``
"""
dict_without_graph = self.__dict__.copy()
python_code = self.recompile()
import_block = _format_import_block(python_code.globals, sys_importer)
del dict_without_graph['_graph']
return (reduce_graph_module, (dict_without_graph, import_block))
# because __reduce__ is defined for serialization,
# we need to define deepcopy otherwise it will call __reduce__
# and cause symbolic tracing to occur every time we try to copy the object
def __deepcopy__(self, memo):
fake_mod = torch.nn.Module()
fake_mod.__dict__ = copy.deepcopy(self.__dict__)
return GraphModule(fake_mod, fake_mod.__dict__['_graph'])
def __copy__(self):
return GraphModule(self, self.graph)
@compatibility(is_backward_compatible=False)
def nested_str(self) -> str:
"""
Return the Python code generated for current GraphModule and its children GraphModules
"""
module_code = self.code
module_code = module_code.lstrip('\n')
module_code = f"class {self._get_name()}(torch.nn.Module):\n" + module_code
module_code = _addindent(module_code, 4)
submodule_code_list = [""]
for submodule in self.children():
if isinstance(submodule, GraphModule):
submodule_code_list.append(submodule.__nested_code())
submodule_code = "\n".join(submodule_code_list)
submodule_code = _addindent(submodule_code, 4)
return module_code + submodule_code
def __str__(self) -> str:
orig_str = super().__str__()
return '\n'.join([orig_str, self._code])
def _replicate_for_data_parallel(self):
new_gm = self.__copy__()
new_gm._is_replica = True
return new_gm
# workarounds for issues in __torch_function__
# WAR for __torch_function__ not handling tensor lists,
# fix is in https://github.com/pytorch/pytorch/pull/34725
# orig_cat = torch.cat
# def patched_cat(*args, **kwargs):
# tensors = args[0]
# for t in tensors:
# if isinstance(t, Proxy):
# return t.__torch_function__(patched_cat, (), args, kwargs)
# return orig_cat(*args, **kwargs)
# patched_cat.__module__ = 'torch'
# patched_cat.__name__ = 'cat'
# torch.cat = patched_cat
| pytorch-master | torch/fx/graph_module.py |
import torch
import inspect
import numbers
import types
import typing
import enum
import warnings
from typing import Any, Callable, Dict, List, Optional, Tuple, NamedTuple, cast, TYPE_CHECKING
from torch._jit_internal import boolean_dispatched
from ._compatibility import compatibility
from torch._ops import OpOverloadPacket, OpOverload
if TYPE_CHECKING:
from .node import Argument
@compatibility(is_backward_compatible=False)
class ArgsKwargsPair(NamedTuple):
"""
Simple named tuple for wrapping args/kwargs pairs.
"""
args: Tuple[Any, ...]
kwargs: Dict[str, Any]
_manual_overrides : Dict[Callable, List[inspect.Signature]] = {}
def _nonzero_schemas():
signatures = []
def nonzero(self):
pass
signatures.append(inspect.signature(nonzero))
def nonzero(self, *, as_tuple : bool): # type: ignore[no-redef]
pass
signatures.append(inspect.signature(nonzero))
return signatures
_manual_overrides[torch.nonzero] = _nonzero_schemas()
class _FakeGlobalNamespace:
def __getattr__(self, name):
if name == 'torch':
return torch
raise RuntimeError('Expected a torch namespace lookup')
_type_eval_globals = {'Tensor' : torch.Tensor, 'Device' : torch.device, 'Layout' : torch.layout,
'number' : numbers.Number, 'Future' : torch.jit.Future,
'AnyEnumType' : enum.Enum, 'QScheme' : torch.qscheme,
'__torch__': _FakeGlobalNamespace(), 'NoneType': type(None),
't': typing.TypeVar('t')}
for k in dir(typing):
_type_eval_globals[k] = getattr(typing, k)
def _torchscript_type_to_python_type(ts_type : 'torch._C.JitType') -> Any:
"""
Convert a TorchScript type to a Python type (including subtypes) via
eval'ing the annotation_str. _type_eval_globals sets up expressions
like "List" and "Future" to map to actual types (typing.List and jit.Future)
"""
return eval(ts_type.annotation_str, _type_eval_globals)
def _torchscript_schema_to_signature(ts_schema : torch._C.FunctionSchema) -> inspect.Signature:
parameters : List[inspect.Parameter] = []
for arg in ts_schema.arguments:
arg_type = _torchscript_type_to_python_type(arg.type)
default = arg.default_value if arg.has_default_value() else inspect.Parameter.empty
# TODO: Figure out if this is safe. It seems like when generating the type signatures for
# PythonArgParser, we emit signatures with `input` instead of `self` as the first tensor
# argument name. Downstream, if someone converts that positional argument to a keyword
# argument, the name mismatch will break things, so here we're going to normalize the
# name to "input"
name = arg.name if arg.name != 'self' else 'input'
kind = inspect.Parameter.KEYWORD_ONLY if arg.kwarg_only else inspect.Parameter.POSITIONAL_OR_KEYWORD
parameters.append(inspect.Parameter(name=name, kind=kind, default=default, annotation=arg_type))
return_types = [_torchscript_type_to_python_type(ret.type) for ret in ts_schema.returns]
if len(return_types) == 0:
return_type = None
elif len(return_types) == 1:
return_type = return_types[0]
else:
return_type = tuple(return_types)
return inspect.Signature(parameters, return_annotation=return_type)
@compatibility(is_backward_compatible=False)
def check_for_mutable_operation(target : Callable, args : Tuple['Argument', ...], kwargs : Dict[str, 'Argument']):
signatures, schemas = get_signature_for_torch_op(target, return_schemas=True)
if signatures and schemas:
matched_schemas = []
# Iterate through all of the schema until we find one that matches
# If one matches, populate `new_args_and_kwargs` with the new args/kwargs
# values. If none matches, `new_args_and_kwargs` will be None
for candidate_signature, schema in zip(signatures, schemas):
try:
candidate_signature.bind(*args, **kwargs)
matched_schemas.append((candidate_signature, schema))
except TypeError as e:
continue
def throw_if_mutable(schema):
if schema.is_mutable:
raise RuntimeError(f'Tried to trace mutable operation {schema}. FX only supports functional '
f'code, so operations that mutate operands in-place (e.g. via `out` arguments) '
f'are not supported')
if len(matched_schemas) == 0:
# Did not match any schema. Cannot check for mutation
pass
elif len(matched_schemas) == 1:
# Matched exactly one schema, unambiguous
_, schema_to_check = matched_schemas[0]
throw_if_mutable(schema_to_check)
pass
else:
# Ambiguous schema match. Since mutability checking is best effort,
# do nothing.
pass
@compatibility(is_backward_compatible=False)
def get_signature_for_torch_op(op : Callable, return_schemas : bool = False):
"""
Given an operator on the `torch` namespace, return a list of `inspect.Signature`
objects corresponding to the overloads of that op.. May return `None` if a signature
could not be retrieved.
Args:
op (Callable): An operator on the `torch` namespace to look up a signature for
Returns:
Optional[List[inspect.Signature]]: A list of signatures for the overloads of this
operator, or None if the operator signatures could not be retrieved. If
return_schemas=True, returns a tuple containing the optional Python signatures
and the optional TorchScript Function signature
"""
if isinstance(op, OpOverload):
schemas = [op._schema]
elif isinstance(op, OpOverloadPacket):
schemas = [getattr(op, overload)._schema for overload in op.overloads()]
else:
override = _manual_overrides.get(op)
if override:
return (override, None) if return_schemas else None
aten_fn = torch.jit._builtins._find_builtin(op)
if aten_fn is None:
return (None, None) if return_schemas else None
schemas = torch._C._jit_get_schemas_for_operator(aten_fn)
signatures = [_torchscript_schema_to_signature(schema) for schema in schemas]
return (signatures, schemas) if return_schemas else signatures
@compatibility(is_backward_compatible=False)
def create_type_hint(x):
try:
if isinstance(x, list) or isinstance(x, tuple):
# todo(chilli): Figure out the right way for mypy to handle this
if isinstance(x, list):
def ret_type(x):
return List[x] # type: ignore[valid-type]
else:
def ret_type(x):
return Tuple[x, ...]
if len(x) == 0:
return ret_type(Any)
base_type = x[0]
for t in x:
if issubclass(t, base_type):
continue
elif issubclass(base_type, t):
base_type = t
else:
return ret_type(Any)
return ret_type(base_type)
except Exception as e:
# We tried to create a type hint for list but failed.
warnings.warn(f"We were not able to successfully create type hint from the type {x}")
pass
return x
@compatibility(is_backward_compatible=False)
def type_matches(signature_type : Any, argument_type : Any):
sig_origin_type = getattr(signature_type, '__origin__', signature_type)
if signature_type is argument_type:
return True
# Union types in signature. Given type needs to match one of the
# contained types in the Union
if sig_origin_type is typing.Union and signature_type != argument_type:
sig_contained = signature_type.__args__
return any(type_matches(c, argument_type) for c in sig_contained)
if signature_type is List[int] and argument_type is int:
# int can be promoted to List[int]
return True
if getattr(signature_type, '__origin__', None) in {list, List}:
sig_el_type = signature_type.__args__[0]
if not inspect.isclass(sig_el_type):
warnings.warn(
f"Does not support nested parametric types, got {signature_type}. Please file a bug.")
return False
if getattr(argument_type, '__origin__', None) in {list, List}:
return issubclass(argument_type.__args__[0], sig_el_type)
def is_homogeneous_tuple(t):
if not getattr(t, '__origin__', None) in {tuple, Tuple}:
return False
contained = t.__args__
if t.__args__ == ((),): # Tuple[()].__args__ == ((),) for some reason
return True
return all((c is Ellipsis) or issubclass(c, sig_el_type) for c in contained)
# Tuple[T] is accepted for List[T] parameters
return is_homogeneous_tuple(argument_type)
# Dtype is an int in schemas
if signature_type is int and argument_type is torch.dtype:
return True
if signature_type is numbers.Number and argument_type in {int, float}:
return True
if inspect.isclass(argument_type) and inspect.isclass(signature_type):
return issubclass(argument_type, signature_type)
return False
@compatibility(is_backward_compatible=False)
def normalize_function(
target: Callable, args: Tuple[Any], kwargs : Optional[Dict[str, Any]] = None, arg_types : Optional[Tuple[Any]] = None,
kwarg_types : Optional[Dict[str, Any]] = None,
normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]:
"""
Returns normalized arguments to PyTorch functions. This means that
`args/kwargs` will be matched up to the functional's
signature and return exclusively kwargs in positional order if
`normalize_to_only_use_kwargs` is True.
Also populates default values. Does not support positional-only
parameters or varargs parameters (*args, **kwargs). Does not support modules.
May require `arg_types` and `kwarg_types` in order to disambiguate overloads.
Args:
target (Callable): Function that we are normalizing
args (Tuple[Any]): Tuple of args to the function
kwargs (Optional[Dict[str, Any]]): Dict of kwargs to the function
arg_types (Optional[Tuple[Any]]): Tuple of arg types for the args
kwarg_types (Optional[Dict[str, Any]]): Dict of arg types for the kwargs
normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
Returns:
Returns normalized_args_and_kwargs, or `None` if not successful.
"""
if kwargs is None:
kwargs = {}
new_args_and_kwargs = None
if not isinstance(target, types.BuiltinFunctionType) and not (
isinstance(target, OpOverloadPacket) or isinstance(target, OpOverload)
):
target_for_analysis = target
if target in boolean_dispatched:
# HACK: `boolean_dispatch` as used in `torch.nn.functional` makes it so that we have
# a 2-way dispatch based on a boolean value. Here we check that the `true` and `false`
# branches of the dispatch have exactly the same signature. If they do, use the `true`
# branch signature for analysis. Otherwise, leave this un-normalized
assert not isinstance(target, str)
dispatched = boolean_dispatched[target]
if_true, if_false = dispatched['if_true'], dispatched['if_false']
if inspect.signature(if_true).parameters != inspect.signature(if_false).parameters:
return None
target_for_analysis = if_true
assert callable(target_for_analysis)
sig = inspect.signature(inspect.unwrap(target_for_analysis))
new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(sig, args, kwargs, normalize_to_only_use_kwargs)
else:
assert callable(target)
torch_op_schemas = get_signature_for_torch_op(target)
matched_schemas = []
if torch_op_schemas:
# Iterate through all of the schema until we find one that matches
# If one matches, populate `new_args_and_kwargs` with the new args/kwargs
# values. If none matches, `new_args_and_kwargs` will be None
for candidate_signature in torch_op_schemas:
try:
candidate_signature.bind(*args, **kwargs)
matched_schemas.append(candidate_signature)
except TypeError as e:
continue
if len(matched_schemas) == 0:
# Did not match any schema. Cannot normalize
pass
elif len(matched_schemas) == 1:
# Matched exactly one schema, unambiguous
new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(matched_schemas[0], args, kwargs,
normalize_to_only_use_kwargs)
else:
if arg_types is not None or kwarg_types is not None:
arg_types = arg_types if arg_types else cast(Tuple[Any], ())
kwarg_types = kwarg_types if kwarg_types else {}
for candidate_signature in torch_op_schemas:
sig_matches = True
try:
bound_types = candidate_signature.bind(*arg_types, **kwarg_types)
for arg_name, arg_type in bound_types.arguments.items():
param = candidate_signature.parameters[arg_name]
sig_matches = sig_matches and type_matches(param.annotation, arg_type)
except TypeError as e:
sig_matches = False
if sig_matches:
new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(candidate_signature, args, kwargs,
normalize_to_only_use_kwargs)
break
else:
# Matched more than one schema. In this situation, the caller must provide the types of
# the arguments of the overload they expect.
schema_printouts = '\n'.join(str(schema) for schema in matched_schemas)
raise RuntimeError(f'Tried to normalize arguments to {torch.typename(target)} but '
f'the schema match was ambiguous! Please provide argument types to '
f'the normalize_arguments() call. Available schemas:\n{schema_printouts}')
return new_args_and_kwargs
@compatibility(is_backward_compatible=False)
def normalize_module(
root: torch.nn.Module, target: str, args: Tuple[Any], kwargs : Optional[Dict[str, Any]] = None,
normalize_to_only_use_kwargs : bool = False) -> Optional[ArgsKwargsPair]:
"""
Returns normalized arguments to PyTorch modules. This means that
`args/kwargs` will be matched up to the functional's
signature and return exclusively kwargs in positional order if
`normalize_to_only_use_kwargs` is True.
Also populates default values. Does not support positional-only
parameters or varargs parameters (*args, **kwargs).
Args:
root (nn.Module): root module upon which we query modules
target (Callable): Function that we are normalizing
args (Tuple[Any]): Tuple of args to the function
kwargs (Optional[Dict[str, Any]]): Dict of kwargs to the function
normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
Returns:
Returns normalized_args_and_kwargs, or `None` if not successful.
"""
try:
submod = root.get_submodule(target)
except AttributeError:
raise RuntimeError(f"Tried to normalize node with target {target} but root did not "
f"have that target!")
if hasattr(submod.__class__, '__name__'):
classname = submod.__class__.__name__
if getattr(torch.nn, classname, None) == submod.__class__:
sig = inspect.signature(inspect.unwrap(submod.forward))
if kwargs is None:
kwargs = {}
new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(sig, args, kwargs,
normalize_to_only_use_kwargs)
return new_args_and_kwargs
return None
def _args_kwargs_to_normalized_args_kwargs(sig : inspect.Signature, args : Tuple[Any, ...],
kwargs : Dict[str, Any],
normalize_to_only_use_kwargs : bool) -> Optional[ArgsKwargsPair]:
"""
Given a call target, args, and kwargs, return the arguments normalized into
an ArgsKwargsPair, or None if the type signature is not supported by
this normalization.
Args:
target (inspect.Signature): Signature object for the target
args (Tuple): Arguments that appear at the callsite for `target`
kwargs (Dict): Keyword arguments that appear at the callsite for `target`
normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs.
Returns:
Optional[ArgsKwargsPair]: Normalized args and kwargs for `target`, or `None` if
this target is not supported.
"""
# Don't currently support positional-only
# or varargs (*args, **kwargs) signatures
supported_parameter_types = {
inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY}
if any(p.kind not in supported_parameter_types for p in sig.parameters.values()):
return None
bound_args = sig.bind(*args, **kwargs)
bound_args.apply_defaults()
new_kwargs : Dict[str, Any] = {}
new_args : List[Any] = []
for i, param in enumerate(sig.parameters):
if not normalize_to_only_use_kwargs and i < len(args):
new_args.append(bound_args.arguments[param])
else:
new_kwargs[param] = bound_args.arguments[param]
return ArgsKwargsPair(tuple(new_args), new_kwargs)
| pytorch-master | torch/fx/operator_schemas.py |
import dis
import torch
import inspect
import operator
import traceback
from .graph import magic_methods, reflectable_magic_methods, Graph
from typing import Tuple, Dict, Optional, Iterable, Any, Iterator, Callable
from .node import Target, Node, Argument, base_types, map_aggregate
from ._compatibility import compatibility
from .operator_schemas import check_for_mutable_operation
import torch.fx.traceback as fx_traceback
__all__ = ['TracerBase', 'GraphAppendingTracer', 'TraceError', 'Proxy', 'Attribute', 'ParameterProxy']
@compatibility(is_backward_compatible=True)
class TracerBase:
graph: Graph
record_stack_traces : bool = False
# Feature flag for mutable schema checking
# Enableby default in 1.12
check_mutable_operations : bool = False
# Feature flag for assert tracing
trace_asserts : bool = False
# Feature flag for proxying accesses to buffer values
proxy_buffer_attributes : bool = False
# Name of the function to be traced. It will only be used when
# ``root`` is an instance of ``nn.Module``
traced_func_name: str = "forward"
@compatibility(is_backward_compatible=True)
def create_node(self, kind : str, target : Target,
args : Tuple[Argument, ...], kwargs : Dict[str, Argument], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
"""
Inserts a graph node given target, args, kwargs, and name.
This method can be overridden to do extra checking, validation, or
modification of values used in node creation. For example, one might
want to disallow in-place operations from being recorded.
"""
if kind == 'call_function' and self.check_mutable_operations:
check_for_mutable_operation(target, args, kwargs)
return self.graph.create_node(kind, target, args, kwargs, name, type_expr)
@compatibility(is_backward_compatible=True)
def proxy(self, node: Node) -> 'Proxy':
return Proxy(node, self)
@compatibility(is_backward_compatible=True)
def create_proxy(self, kind: str, target: Target, args: Tuple[Any, ...], kwargs: Dict[str, Any],
name: Optional[str] = None, type_expr : Optional[Any] = None,
proxy_factory_fn: Callable[[Node], 'Proxy'] = None):
'''
Create a Node from the given arguments, then return the Node
wrapped in a Proxy object.
If kind = 'placeholder', then we're creating a Node that
represents the parameter of a function. If we need to encode
a default parameter, we use the ``args`` tuple. ``args`` is
otherwise empty for ``placeholder`` Nodes.
'''
args_ = self.create_arg(args)
kwargs_ = self.create_arg(kwargs)
assert isinstance(args_, tuple)
assert isinstance(kwargs_, dict)
node = self.create_node(kind, target, args_, kwargs_, name, type_expr)
if not proxy_factory_fn:
proxy = self.proxy(node)
else:
proxy = proxy_factory_fn(node)
# Optionally set stack trace on the created Node for debugging purposes
if fx_traceback.is_stack_trace_overridden():
stacks = fx_traceback.format_stack()
proxy.node.stack_trace = '\n'.join(reversed(stacks))
elif self.record_stack_traces:
user_frame = self._find_user_frame()
if user_frame:
walk_stack_gen = traceback.walk_stack(user_frame)
summary = traceback.StackSummary.extract(walk_stack_gen) # type: ignore[arg-type]
tb_lines = summary.format()
proxy.node.stack_trace = ''.join(tb_lines)
return proxy
def _find_user_frame(self):
"""
Find the Python stack frame executing the user code during
symbolic tracing.
"""
# We have to do a little dance here. Basically, walk up the callstack and
# record the first frame not in the FX source. This is the frame executing
# the user code during tracing.
frame = inspect.currentframe()
fx_files = ['torch/fx/proxy.py', 'torch/fx/_symbolic_trace.py']
while frame:
frame = frame.f_back
if frame and all(not frame.f_code.co_filename.endswith(file) for file in fx_files):
break
if not frame:
return None
return frame
@compatibility(is_backward_compatible=True)
def create_arg(self, a: Any) -> Argument:
"""
A method that lowers the objects seen as arguments during symbolic evaluation
into Argument types that can be stored in IR.
Can be override to support more trace-specific types.
"""
if not isinstance(a, Proxy) and hasattr(a, '__fx_create_arg__'):
return a.__fx_create_arg__(self)
# aggregates
elif isinstance(a, tuple) and hasattr(a, '_fields'):
# NamedTuple constructors don't seem to like getting a generator
# expression as an argument to their constructor, so build this
# intermediate tuple and unpack it into the NamedTuple constructor
args = tuple(self.create_arg(elem) for elem in a)
return type(a)(*args) # type: ignore[arg-type]
elif isinstance(a, (tuple, list)):
return type(a)(self.create_arg(elem) for elem in a)
elif isinstance(a, dict):
r = {}
for k, v in a.items():
# Check for invalid dict keys. We do not want a Proxy to appear
# anywhere within the key. Since keys can be collection types,
# we iterate through the key with map_aggregate
k = self.create_arg(k)
def no_node(arg):
if isinstance(arg, Node):
raise RuntimeError("Keys for dictionaries used as an argument cannot contain a "
"Node. Got key: {k}")
map_aggregate(k, no_node)
r[k] = self.create_arg(v)
return r
elif isinstance(a, slice):
return slice(self.create_arg(a.start), self.create_arg(a.stop), self.create_arg(a.step))
if isinstance(a, Proxy):
# base case: we unwrap the Proxy object
return a.node
elif isinstance(a, base_types) or a is None or a is ...:
return a
raise NotImplementedError(f"argument of type: {type(a)}")
@compatibility(is_backward_compatible=True)
def to_bool(self, obj: 'Proxy') -> bool:
"""Called when a proxy object is being converted to a boolean, such as
when used in control flow. Normally we don't know what to do because
we don't know the value of the proxy, but a custom tracer can attach more
information to the graph node using create_node and can choose to return a value.
"""
raise TraceError('symbolically traced variables cannot be used as inputs to control flow')
@compatibility(is_backward_compatible=True)
def iter(self, obj: 'Proxy') -> Iterator:
"""Called when a proxy object is being iterated over, such as
when used in control flow. Normally we don't know what to do because
we don't know the value of the proxy, but a custom tracer can attach more
information to the graph node using create_node and can choose to return an iterator.
"""
raise TraceError('Proxy object cannot be iterated. This can be '
'attempted when the Proxy is used in a loop or'
' as a *args or **kwargs function argument. '
'See the torch.fx docs on pytorch.org for a '
'more detailed explanation of what types of '
'control flow can be traced, and check out the'
' Proxy docstring for help troubleshooting '
'Proxy iteration errors')
@compatibility(is_backward_compatible=True)
def keys(self, obj: 'Proxy') -> Any:
"""Called when a proxy object is has the keys() method called.
This is what happens when ** is called on a proxy. This should return an
iterator it ** is suppose to work in your custom tracer.
"""
return Attribute(obj, 'keys')()
# used in Proxy object when just appending to the graph while not tracing.
@compatibility(is_backward_compatible=True)
class GraphAppendingTracer(TracerBase):
def __init__(self, graph: Graph):
super().__init__()
self.graph = graph
@compatibility(is_backward_compatible=False)
def assert_fn(x):
assert x
@compatibility(is_backward_compatible=True)
class TraceError(ValueError):
pass
@compatibility(is_backward_compatible=True)
class Proxy:
"""
``Proxy`` objects are ``Node`` wrappers that flow through the
program during symbolic tracing and record all the operations
(``torch`` function calls, method calls, operators) that they touch
into the growing FX Graph.
If you're doing graph transforms, you can wrap your own ``Proxy``
method around a raw ``Node`` so that you can use the overloaded
operators to add additional things to a ``Graph``.
``Proxy`` objects cannot be iterated. In other words, the symbolic
tracer will throw an error if a ``Proxy`` is used in a loop or as
an ``*args``/``**kwargs`` function argument.
There are two main ways around this:
1. Factor out the untraceable logic into a top-level function and
use ``fx.wrap`` on it.
2. If the control flow is static (i.e. the loop trip count is
based on some hyperparameter), the code can be kept in its original
position and refactored into something like::
for i in range(self.some_hyperparameter):
indexed_item = proxied_value[i]
For a more detailed description into the Proxy internals, check out
the "Proxy" section in `torch/fx/OVERVIEW.md`
"""
@compatibility(is_backward_compatible=True)
def __init__(self, node: Node, tracer: 'Optional[TracerBase]' = None):
if tracer is None:
# This allows you to create a Proxy object around a raw Node
tracer = GraphAppendingTracer(node.graph)
self.tracer = tracer
self.node = node
def __repr__(self) -> str:
return f'Proxy({self.node.name})'
def __getattr__(self, k) -> 'Attribute':
# note: not added to the graph yet, if this is a method call
# we peephole optimize to the method invocation
return Attribute(self, k)
def __call__(self, *args, **kwargs) -> 'Proxy':
return self.tracer.create_proxy('call_method', '__call__', (self,) + args, kwargs)
def __iter__(self) -> Iterable['Proxy']:
frame = inspect.currentframe()
assert frame is not None
calling_frame = frame.f_back
assert calling_frame is not None
inst = list(dis.get_instructions(calling_frame.f_code))[calling_frame.f_lasti // 2]
if inst.opname == 'UNPACK_SEQUENCE':
return (self[i] for i in range(inst.argval)) # type: ignore[index]
return self.tracer.iter(self)
def __bool__(self) -> bool:
if self.tracer.trace_asserts:
# check if this boolean is used in an assertion, bytecode pattern for assertions
# is pretty stable for Python 3.7--3.9
frame = inspect.currentframe()
assert frame is not None
calling_frame = frame.f_back
assert calling_frame is not None
insts = list(dis.get_instructions(calling_frame.f_code))
cur = calling_frame.f_lasti // 2
inst = insts[cur]
if inst.opname == 'POP_JUMP_IF_TRUE':
first = insts[cur + 1]
assert inst.arg is not None
last = insts[inst.arg // 2 - 1]
starts_with_assert = (first.opname == 'LOAD_GLOBAL' and first.argval == 'AssertionError'
or first.opname == 'LOAD_ASSERTION_ERROR')
if starts_with_assert and last.opname == 'RAISE_VARARGS':
self.tracer.create_proxy('call_function', assert_fn, (self,), {})
return True
return self.tracer.to_bool(self)
@compatibility(is_backward_compatible=True)
def keys(self):
return self.tracer.keys(self)
def __len__(self):
raise RuntimeError("'len' is not supported in symbolic tracing by default. If you want "
"this call to be recorded, please call torch.fx.wrap('len') at "
"module scope")
@classmethod
def __torch_function__(cls, orig_method, types, args=None, kwargs=None):
args = args if args else ()
kwargs = kwargs if kwargs else {}
tracers : Dict[Any, None] = {}
def find_tracer(a):
if isinstance(a, cls):
tracers[a.tracer] = None
torch.fx.node.map_aggregate(args, find_tracer)
torch.fx.node.map_aggregate(kwargs, find_tracer)
if len(tracers) > 1:
raise RuntimeError(f'Found multiple different tracers {list(tracers.keys())} while '
f'trying to trace operations {orig_method}')
tracer = next(iter(tracers.keys()))
if isinstance(orig_method, torch._C.ScriptMethod):
args = (orig_method.owner,) + args
return tracer.create_proxy('call_method', orig_method.name, args, kwargs)
if torch.overrides.is_tensor_method_or_property(orig_method):
return tracer.create_proxy('call_method', orig_method.__name__, args, kwargs)
else:
return tracer.create_proxy('call_function', orig_method, args, kwargs,
name=tracer.graph._target_to_str(orig_method.__name__))
@compatibility(is_backward_compatible=True)
class Attribute(Proxy):
@compatibility(is_backward_compatible=True)
def __init__(self, root: Proxy, attr: str):
self.root = root
self.attr = attr
self.tracer = root.tracer
self._node: Optional[Node] = None
@property
def node(self):
# the node for attributes is added lazily, since most will just be method calls
# which do not rely on the getitem call
if self._node is None:
self._node = self.tracer.create_proxy('call_function', getattr, (self.root, self.attr), {}).node
return self._node
def __call__(self, *args, **kwargs):
return self.tracer.create_proxy('call_method', self.attr, (self.root,) + args, kwargs)
@compatibility(is_backward_compatible=False)
class ParameterProxy(Proxy):
"""
A special proxy which lets "shape", "size", "dim", and a few other
attribute accesses pass through to the underlying module parameter object,
so that conditional tests on these attributes will not throw exception during tracing
"""
def __init__(self, tracer: TracerBase, node: Node, name, param):
super().__init__(node, tracer)
assert(isinstance(param, torch.nn.Parameter))
self.param = param
self.name = name
def __repr__(self) -> str:
return f'ParameterProxy({self.name})'
@property
def shape(self):
return self.param.shape
def size(self):
return self.param.size()
def dim(self):
return self.param.dim()
@property
def ndim(self):
return self.param.ndim
def numel(self):
return self.param.numel()
def nelement(self):
return self.param.nelement()
for method in magic_methods:
def _scope(method):
def impl(*args, **kwargs):
tracer = args[0].tracer
target = getattr(operator, method)
return tracer.create_proxy('call_function', target, args, kwargs)
impl.__name__ = method
as_magic = f'__{method.strip("_")}__'
setattr(Proxy, as_magic, impl)
_scope(method)
def _define_reflectable(orig_method_name):
method_name = f'__r{orig_method_name.strip("_")}__'
def impl(self, rhs):
target = getattr(operator, orig_method_name)
return self.tracer.create_proxy('call_function', target, (rhs, self), {})
impl.__name__ = method_name
impl.__qualname__ = method_name
setattr(Proxy, method_name, impl)
for orig_method_name in reflectable_magic_methods:
_define_reflectable(orig_method_name)
| pytorch-master | torch/fx/proxy.py |
import traceback
from contextlib import contextmanager
from typing import Optional, List
from ._compatibility import compatibility
__all__ = ['override_stack_trace', 'append_stack_trace', 'format_stack', 'is_stack_trace_overridden']
current_stack: List[str] = []
is_overridden = False
@compatibility(is_backward_compatible=False)
@contextmanager
def override_stack_trace():
global is_overridden
saved_is_overridden = is_overridden
try:
is_overridden = True
yield
finally:
is_overridden = saved_is_overridden
@compatibility(is_backward_compatible=False)
@contextmanager
def append_stack_trace(stack : Optional[str]):
"""
The content of stack here is an entire stacktraces as a string
"""
global current_stack
if is_overridden and stack:
try:
current_stack.append(stack)
yield
finally:
current_stack.pop()
else:
yield
@compatibility(is_backward_compatible=False)
def format_stack() -> List[str]:
if is_overridden:
return current_stack
else:
# fallback to traceback.format_stack()
return traceback.format_stack()
@compatibility(is_backward_compatible=False)
def is_stack_trace_overridden() -> bool:
return is_overridden
| pytorch-master | torch/fx/traceback.py |
from .node import Node, Argument, Target, map_arg, _type_repr, _get_qualified_name
import torch.utils._pytree as pytree
from . import _pytree as fx_pytree
from ._compatibility import compatibility
import contextlib
from typing import TYPE_CHECKING, Callable, Any, List, Dict, NamedTuple, Optional, Tuple, Set, FrozenSet, Type
from dataclasses import dataclass
from contextlib import contextmanager
import copy
import torch
import keyword
import re
import builtins
import math
import warnings
import inspect
if TYPE_CHECKING:
from .graph_module import GraphModule # noqa: F401
from ._symbolic_trace import Tracer # noqa: F401
# Mapping of builtins to their `typing` equivalent.
_origin_type_map = {
list: List,
dict: Dict,
set: Set,
frozenset: FrozenSet,
tuple: Tuple,
}
# Signature for functions thattransforms the body (`list[str]`) of the
# generated code
TransformCodeFunc = Callable[[List[str]], List[str]]
class _CustomBuiltin(NamedTuple):
"""Additional objs that we add to every graph's globals.
The repr() for some standard library objects is not valid Python code without
an import. For common objects of this sort, we bundle them in the globals of
every FX graph.
"""
# How to import this object from the standard library.
import_str: str
# The actual object, produced from that import string.
obj: Any
_custom_builtins: Dict[str, _CustomBuiltin] = {}
def _register_custom_builtin(name: str, import_str: str, obj: Any):
_custom_builtins[name] = _CustomBuiltin(import_str, obj)
_register_custom_builtin('inf', 'from math import inf', math.inf)
_register_custom_builtin('nan', 'from math import nan', math.nan)
_register_custom_builtin('NoneType', 'NoneType = type(None)', type(None))
_register_custom_builtin('torch', 'import torch', torch)
_register_custom_builtin('device', 'from torch import device', torch.device)
_register_custom_builtin('fx_pytree', 'import torch.fx._pytree as fx_pytree', fx_pytree)
_register_custom_builtin('pytree', 'import torch.utils._pytree as pytree', pytree)
def _is_magic(x: str) -> bool:
return x.startswith('__') and x.endswith('__')
def _snake_case(s: str) -> str:
"""
Transforms the given string ``s`` to a Python-style variable name
Examples:
``mod.snake_case`` -> ``mod.snake_case``
``mod.pascalCase``-> ``mod.pascal_case``
``mod.ALL_CAPS`` -> ``mod.all_caps``
"""
chars = []
prev_lower = False
for c in s:
if prev_lower and c.isupper():
chars.append('_')
chars.append(c.lower())
prev_lower = c.islower()
return ''.join(chars)
def _is_from_torch(obj: Any) -> bool:
module_name = getattr(obj, '__module__', None)
if module_name is not None:
base_module = module_name.partition('.')[0]
return base_module == 'torch'
name = getattr(obj, '__name__', None)
# exclude torch because torch.torch.torch.torch works. idk mang
if name is not None and name != 'torch':
for guess in [torch, torch.nn.functional]:
if getattr(guess, name, None) is obj:
return True
return False
class _Namespace:
"""A context for associating names uniquely with objects.
The following invariants are enforced:
- Each object gets a single name.
- Each name is unique within a given namespace.
- Names generated do not shadow builtins, unless the object is indeed that builtin.
"""
def __init__(self):
self._obj_to_name: Dict[Any, str] = {}
self._unassociated_names = set()
self._used_names: Dict[str, int] = {}
self._illegal_char_regex = re.compile('[^0-9a-zA-Z_]+')
self._name_suffix_regex = re.compile(r"(.*)_(\d+)$")
def create_name(self, candidate: str, obj: Optional[Any]) -> str:
"""Create a unique name.
Arguments:
candidate: used as the basis for the unique name, relevant to the user.
obj: If not None, an object that will be associated with the unique name.
"""
if obj is not None and obj in self._obj_to_name:
return self._obj_to_name[obj]
# delete all characters that are illegal in a Python identifier
candidate = self._illegal_char_regex.sub('_', candidate)
if candidate[0].isdigit():
candidate = f'_{candidate}'
match = self._name_suffix_regex.match(candidate)
if match is None:
base = candidate
num = None
else:
base, num_str = match.group(1, 2)
num = int(num_str)
candidate = base if num is None else f'{base}_{num}'
num = num if num else 0
while candidate in self._used_names or self._is_illegal_name(candidate, obj):
num += 1
candidate = f'{base}_{num}'
self._used_names.setdefault(candidate, 0)
if obj is None:
self._unassociated_names.add(candidate)
else:
self._obj_to_name[obj] = candidate
return candidate
def associate_name_with_obj(self, name: str, obj: Any):
"""Associate a unique name with an object.
Neither `name` nor `obj` should be associated already.
"""
assert obj not in self._obj_to_name
assert name in self._unassociated_names
self._obj_to_name[obj] = name
self._unassociated_names.remove(name)
def _is_illegal_name(self, name: str, obj: Any) -> bool:
# 1. keywords are never allowed as names.
if name in keyword.kwlist:
return True
# 2. Can't shadow a builtin name, unless you *are* that builtin.
if name in builtins.__dict__:
return obj is not builtins.__dict__[name]
# 3. Can't shadow our custom builtins either
if name in _custom_builtins:
return obj is not _custom_builtins[name].obj
return False
@compatibility(is_backward_compatible=True)
@dataclass
class PythonCode:
"""
Represents all the information necessary to exec or save a graph as Python code.
"""
# Python source code for the forward function definition.
src: str
# Values in global scope during exection of `src_def`.
globals: Dict[str, Any]
def _format_target(base: str, target: str) -> str:
elems = target.split('.')
r = base
for e in elems:
if not e.isidentifier():
r = f'getattr({r}, "{e}")'
else:
r = f'{r}.{e}'
return r
class _InsertPoint:
def __init__(self, graph, new_insert):
self.graph = graph
self.orig_insert, graph._insert = graph._insert, new_insert
def __enter__(self):
pass
def __exit__(self, type, value, tb):
self.graph._insert = self.orig_insert
class _node_list:
def __init__(self, graph: 'Graph', direction: str = '_next'):
assert direction in ['_next', '_prev']
self.graph = graph
self.direction = direction
def __len__(self):
return self.graph._len
def __iter__(self):
root, direction = self.graph._root, self.direction
cur = getattr(root, direction)
while cur is not root:
if not cur._erased:
yield cur
cur = getattr(cur, direction)
def __reversed__(self):
return _node_list(self.graph, '_next' if self.direction == '_prev' else '_prev')
class _PyTreeInfo(NamedTuple):
"""
Contains extra info stored when we're using Pytrees
"""
orig_args: List[str]
in_spec: pytree.TreeSpec
out_spec: Optional[pytree.TreeSpec]
@compatibility(is_backward_compatible=False)
class CodeGen(object):
def __init__(self):
self._body_transformer: Optional[TransformCodeFunc] = None
def gen_fn_def(self, free_vars: List[str], maybe_return_annotation: str) -> str:
"""
Given the free variables and a return annotation, generates the beginning of the FX function.
By default, `gen_fn_def(['a', 'b'], '') == 'def forward(a, b):'`
"""
# If the original function didn't have self as its first argument, we
# would have added it.
if len(free_vars) == 0 or free_vars[0] != 'self':
free_vars.insert(0, 'self')
return f"def forward({', '.join(free_vars)}){maybe_return_annotation}:"
def generate_output(self, output_args: Argument) -> str:
"""
Given the output arguments, generates the return statement of the FX function.
Note: The returned statement should not be indented.
"""
return f'return {repr(output_args)}'
def process_inputs(self, *args: Any) -> Any:
"""
Transforms the inputs so that the graph can take them as arguments, as
non-default codegen may result in the inputs to the function being
different from the inputs to the graph.
If the graph was directly runnable, this invariant should hold true
`f.graph.process_outputs(f.graph(*f.graph.process_inputs(*inputs))) == f(*inputs)`
"""
return args
def process_outputs(self, outputs: Any) -> Any:
"""
Transforms the outputs of the graph to be identical to the codegen.
See ``process_inputs`` for more details.
"""
return outputs
def additional_globals(self) -> List[Tuple[str, Any]]:
"""
If your codegen uses extra global values, add tuples of (identifier,reference to the value) here.
For example, return ['List', typing.List] if you need ``List`` in the global context.
"""
return []
def _gen_python_code(self, nodes, root_module: str, namespace: _Namespace) -> PythonCode:
free_vars: List[str] = []
body: List[str] = []
globals_: Dict[str, Any] = {}
wrapped_fns: Dict[str, None] = {}
# Wrap string in list to pass by reference
maybe_return_annotation : List[str] = ['']
def add_global(name_hint: str, obj: Any):
"""Add an obj to be tracked as a global.
We call this for names that reference objects external to the
Graph, like functions or types.
Returns: the global name that should be used to reference 'obj' in generated source.
"""
if _is_from_torch(obj) and obj != torch.device: # to support registering torch.device
# HACK: workaround for how torch custom ops are registered. We
# can't import them like normal modules so they must retain their
# fully qualified name.
return _get_qualified_name(obj)
# normalize the name hint to get a proper identifier
global_name = namespace.create_name(name_hint, obj)
if global_name in globals_:
assert globals_[global_name] is obj
return global_name
globals_[global_name] = obj
return global_name
# Pre-fill the globals table with registered builtins.
for name, (_, obj) in _custom_builtins.items():
add_global(name, obj)
def type_repr(o : Any):
if o == ():
# Empty tuple is used for empty tuple type annotation Tuple[()]
return '()'
typename = _type_repr(o)
if hasattr(o, '__origin__'):
# This is a generic type, e.g. typing.List[torch.Tensor]
origin_type = _origin_type_map.get(o.__origin__, o.__origin__)
origin_typename = add_global(_type_repr(origin_type), origin_type)
if hasattr(o, '__args__'):
# Assign global names for each of the inner type variables.
args = [type_repr(arg) for arg in o.__args__]
if len(args) == 0:
# Bare type, such as `typing.Tuple` with no subscript
# This code-path used in Python < 3.9
return origin_typename
return f'{origin_typename}[{",".join(args)}]'
else:
# Bare type, such as `typing.Tuple` with no subscript
# This code-path used in Python 3.9+
return origin_typename
# Common case: this is a regular module name like 'foo.bar.baz'
return add_global(typename, o)
def _format_args(args: Tuple[Argument, ...], kwargs: Dict[str, Argument]) -> str:
def _get_repr(arg):
# Handle NamedTuples (if it has `_fields`) via add_global.
if isinstance(arg, tuple) and hasattr(arg, '_fields'):
qualified_name = _get_qualified_name(type(arg))
global_name = add_global(qualified_name, type(arg))
return f"{global_name}{repr(tuple(arg))}"
return repr(arg)
args_s = ', '.join(_get_repr(a) for a in args)
kwargs_s = ', '.join(f'{k} = {_get_repr(v)}' for k, v in kwargs.items())
if args_s and kwargs_s:
return f'{args_s}, {kwargs_s}'
return args_s or kwargs_s
# Run through reverse nodes and record the first instance of a use
# of a given node. This represents the *last* use of the node in the
# execution order of the program, which we will use to free unused
# values
node_to_last_use : Dict[Node, Node] = {}
user_to_last_uses : Dict[Node, List[Node]] = {}
def register_last_uses(n : Node, user : Node):
if n not in node_to_last_use:
node_to_last_use[n] = user
user_to_last_uses.setdefault(user, []).append(n)
for node in reversed(nodes):
map_arg(node.args, lambda n: register_last_uses(n, node))
map_arg(node.kwargs, lambda n: register_last_uses(n, node))
def delete_unused_values(user : Node):
"""
Delete values after their last use. This ensures that values that are
not used in the remainder of the code are freed and the memory usage
of the code is optimal.
"""
if user.op == 'placeholder':
return
if user.op == 'output':
body.append('\n')
return
nodes_to_delete = user_to_last_uses.get(user, [])
if len(nodes_to_delete):
to_delete_str = ' = '.join([repr(n) for n in nodes_to_delete] + ['None'])
body.append(f'; {to_delete_str}\n')
else:
body.append('\n')
def emit_node(node : Node):
maybe_type_annotation = '' if node.type is None else f' : {type_repr(node.type)}'
if node.op == 'placeholder':
assert isinstance(node.target, str)
maybe_default_arg = '' if not node.args else f' = {repr(node.args[0])}'
free_vars.append(f'{node.target}{maybe_type_annotation}{maybe_default_arg}')
raw_name = node.target.replace('*', '')
if raw_name != repr(node):
body.append(f'{repr(node)} = {raw_name}\n')
return
elif node.op == 'call_method':
assert isinstance(node.target, str)
body.append(
f'{repr(node)}{maybe_type_annotation} = {_format_target(repr(node.args[0]), node.target)}'
f'({_format_args(node.args[1:], node.kwargs)})')
return
elif node.op == 'call_function':
assert callable(node.target)
# pretty print operators
if node.target.__module__ == '_operator' and node.target.__name__ in magic_methods:
assert isinstance(node.args, tuple)
body.append(f'{repr(node)}{maybe_type_annotation} = '
f'{magic_methods[node.target.__name__].format(*(repr(a) for a in node.args))}')
return
# pretty print inplace operators; required for jit.script to work properly
# not currently supported in normal FX graphs, but generated by torchdynamo
if node.target.__module__ == '_operator' and node.target.__name__ in inplace_methods:
body.append(f'{inplace_methods[node.target.__name__].format(*(repr(a) for a in node.args))}; '
f'{repr(node)}{maybe_type_annotation} = {repr(node.args[0])}')
return
qualified_name = _get_qualified_name(node.target)
global_name = add_global(qualified_name, node.target)
# special case for getattr: node.args could be 2-argument or 3-argument
# 2-argument: attribute access; 3-argument: fall through to attrib function call with default value
if global_name == 'getattr' and \
isinstance(node.args, tuple) and \
isinstance(node.args[1], str) and \
node.args[1].isidentifier() and \
len(node.args) == 2:
body.append(f'{repr(node)}{maybe_type_annotation} = {_format_target(repr(node.args[0]), node.args[1])}')
return
body.append(f'{repr(node)}{maybe_type_annotation} = {global_name}({_format_args(node.args, node.kwargs)})')
if node.meta.get('is_wrapped', False):
wrapped_fns.setdefault(global_name)
return
elif node.op == 'call_module':
assert isinstance(node.target, str)
body.append(f'{repr(node)}{maybe_type_annotation} = '
f'{_format_target(root_module, node.target)}({_format_args(node.args, node.kwargs)})')
return
elif node.op == 'get_attr':
assert isinstance(node.target, str)
body.append(f'{repr(node)}{maybe_type_annotation} = {_format_target(root_module, node.target)}')
return
elif node.op == 'output':
if node.type is not None:
maybe_return_annotation[0] = f" -> {type_repr(node.type)}"
body.append(self.generate_output(node.args[0]))
return
raise NotImplementedError(f'node: {node.op} {node.target}')
for node in nodes:
# NOTE: emit_node does not emit a string with newline. It depends
# on delete_unused_values to append one
emit_node(node)
delete_unused_values(node)
if len(body) == 0:
# If the Graph has no non-placeholder nodes, no lines for the body
# have been emitted. To continue to have valid Python code, emit a
# single pass statement
body.append('pass\n')
if len(wrapped_fns) > 0:
wrap_name = add_global('wrap', torch.fx.wrap)
wrap_stmts = '\n'.join([f'{wrap_name}("{name}")' for name in wrapped_fns])
else:
wrap_stmts = ''
if self._body_transformer:
body = self._body_transformer(body)
for name, value in self.additional_globals():
add_global(name, value)
prologue = self.gen_fn_def(free_vars, maybe_return_annotation[0])
code = ''.join(body)
code = '\n'.join(' ' + line for line in code.split('\n'))
fn_code = f"""
{wrap_stmts}
{prologue}
{code}"""
return PythonCode(fn_code, globals_)
# Ideally, we'd like to refactor all of the pytree logic into this codegen
# class. Unfortunately, there are 3 areas we currently need extra logic in FX.
# 1. In the initial symbolic trace, the pytree logic is tied up with `concrete_args`.
# 2. In the FX graph, we need to access 2 attributes - in_spec and out_spec.
# Since we can't access .graph within the FX forward, we need to copy the attribute to the module.
# 3. We currently can't register the pytree imports with `add_global` - not sure why.
class _PyTreeCodeGen(CodeGen):
def __init__(self, pytree_info: _PyTreeInfo):
super().__init__()
self.pytree_info: _PyTreeInfo = pytree_info
def process_inputs(self, *inputs: Any) -> Any:
flat_args, _ = pytree.tree_flatten(inputs)
return flat_args
def process_outputs(self, out: Any) -> Any:
if self.pytree_info is None:
return out
if not isinstance(out, list):
out = [out]
assert(self.pytree_info.out_spec is not None)
return pytree.tree_unflatten(out, self.pytree_info.out_spec)
def gen_fn_def(self, free_vars, maybe_return_annotation):
if self.pytree_info is None:
return super().gen_fn_def(free_vars, maybe_return_annotation)
function_args = self.pytree_info.orig_args
has_orig_self = (function_args[0] == 'self')
if has_orig_self:
free_vars.insert(0, 'self')
function_definition = super().gen_fn_def(function_args[:], maybe_return_annotation)
if len(free_vars) > 0: # pytree has placeholders in it
function_definition += f"""
{', '.join(free_vars)}, = fx_pytree.tree_flatten_spec([{', '.join(function_args)}], self._in_spec)"""
return function_definition
def generate_output(self, output_args):
if self.pytree_info:
return f'return pytree.tree_unflatten({repr(output_args)}, self._out_spec)'
else:
return super().generate_output(output_args)
@compatibility(is_backward_compatible=True)
class Graph:
"""
``Graph`` is the main data structure used in the FX Intermediate Representation.
It consists of a series of ``Node`` s, each representing callsites (or other
syntactic constructs). The list of ``Node`` s, taken together, constitute a
valid Python function.
For example, the following code
.. code-block:: python
import torch
import torch.fx
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return torch.topk(torch.sum(self.linear(x + self.linear.weight).relu(), dim=-1), 3)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
Will produce the following Graph::
print(gm.graph)
.. code-block:: text
graph(x):
%linear_weight : [#users=1] = self.linear.weight
%add_1 : [#users=1] = call_function[target=operator.add](args = (%x, %linear_weight), kwargs = {})
%linear_1 : [#users=1] = call_module[target=linear](args = (%add_1,), kwargs = {})
%relu_1 : [#users=1] = call_method[target=relu](args = (%linear_1,), kwargs = {})
%sum_1 : [#users=1] = call_function[target=torch.sum](args = (%relu_1,), kwargs = {dim: -1})
%topk_1 : [#users=1] = call_function[target=torch.topk](args = (%sum_1, 3), kwargs = {})
return topk_1
For the semantics of operations represented in the ``Graph``, please see :class:`Node`.
"""
@compatibility(is_backward_compatible=True)
def __init__(self, owning_module: Optional["GraphModule"] = None, tracer_cls: Optional[Type["Tracer"]] = None,
tracer_extras: Optional[Dict[str, Any]] = None):
"""
Construct an empty Graph.
"""
self._root : Node = Node(self, '', 'root', '', (), {})
self._used_names : Dict[str, int] = {} # base name -> number
self._insert = self._root.prepend
self._len = 0
self._graph_namespace = _Namespace()
self._owners = 0
self._owning_module = owning_module
self._tracer_cls = tracer_cls
self._tracer_extras = tracer_extras
self._codegen = CodeGen()
@property
def owning_module(self):
"""
Return the module that owns this ``GraphModule``, if there is one,
``None`` if there is no owning module or if there are multiple owning
modules.
"""
return self._owning_module
@owning_module.setter
def owning_module(self, mod: Optional["GraphModule"]):
if mod:
self._owning_module = mod if not self._owners else None
self._owners += 1
@property
def nodes(self) -> _node_list:
"""
Get the list of Nodes that constitute this Graph.
Note that this ``Node`` list representation is a doubly-linked list. Mutations
during iteration (e.g. delete a Node, add a Node) are safe.
Returns:
A doubly-linked list of Nodes. Note that ``reversed`` can be called on
this list to switch iteration order.
"""
return _node_list(self)
@compatibility(is_backward_compatible=True)
def graph_copy(self, g : 'Graph', val_map : Dict[Node, Node], return_output_node=False) -> 'Optional[Argument]':
"""
Copy all nodes from a given graph into ``self``.
Args:
g (Graph): The source graph from which to copy Nodes.
val_map (Dict[Node, Node]): a dictionary that will be populated with a mapping
from nodes in ``g`` to nodes in ``self``. Note that ``val_map`` can be passed
in with values in it already to override copying of certain values.
Returns:
The value in ``self`` that is now equivalent to the output value in ``g``,
if ``g`` had an ``output`` node. ``None`` otherwise.
"""
for node in g.nodes:
if node in val_map:
continue
if node.op == 'output':
rv = map_arg(node.args[0], lambda n: val_map[n])
return rv if not return_output_node else (rv, node)
val_map[node] = self.node_copy(node, lambda n : val_map[n])
return None
def __deepcopy__(self, memo=None) -> 'Graph':
"""
Explicitly implement __deepcopy__ to prevent excessive recursion depth
from the default implementation. This uses graph_copy to copy the nodes
in an iterative way, rather than recursive. It also populates the
memoization table to prevent unnecessary copies (e.g. references to
nodes or other parts of the Graph from a custom GraphModule implementation.
"""
memo = memo if memo else {}
g = Graph(tracer_cls=self._tracer_cls)
output_vals = g.graph_copy(self, val_map=memo, return_output_node=True)
g._codegen = copy.deepcopy(self._codegen)
assert isinstance(output_vals, tuple)
output_val, old_output_val = output_vals
g.output(output_val, type_expr=getattr(old_output_val, 'type', None))
return g
@compatibility(is_backward_compatible=True)
def create_node(self, op: str, target: 'Target',
args: Optional[Tuple['Argument', ...]] = None,
kwargs: Optional[Dict[str, 'Argument']] = None,
name: Optional[str] = None,
type_expr: Optional[Any] = None) -> Node:
"""
Create a ``Node`` and add it to the ``Graph`` at the current insert-point.
Note that the current insert-point can be set via :meth:`Graph.inserting_before`
and :meth:`Graph.inserting_after`.
Args:
op (str): the opcode for this Node. One of 'call_function', 'call_method', 'get_attr',
'call_module', 'placeholder', or 'output'. The semantics of these opcodes are
described in the ``Graph`` docstring.
args (Optional[Tuple[Argument, ...]]): is a tuple of arguments to this node.
kwargs (Optional[Dict[str, Argument]]): the kwargs of this Node
name (Optional[str]): an optional string name for the ``Node``.
This will influence the name of the value assigned to in the
Python generated code.
type_expr (Optional[Any]): an optional type annotation representing the
Python type the output of this node will have.
Returns:
The newly-created and inserted node.
"""
assert op in ('call_function', 'call_method', 'get_attr', 'call_module', 'placeholder', 'output')
args = () if args is None else args
kwargs = {} if kwargs is None else kwargs
assert isinstance(args, tuple), "args must be a tuple"
assert isinstance(kwargs, dict), "kwargs must be a dict"
candidate = name if name is not None else self._target_to_str(target)
name = self._graph_namespace.create_name(candidate, None)
n = Node(self, name, op, target, args, kwargs, type_expr)
self._graph_namespace.associate_name_with_obj(name, n)
self._insert(n)
self._len += 1
return n
@compatibility(is_backward_compatible=False)
def process_inputs(self, *args):
"""
Processes args so that they can be passed to the FX graph.
"""
return self._codegen.process_inputs(*args)
@compatibility(is_backward_compatible=False)
def process_outputs(self, out):
return self._codegen.process_outputs(out)
@compatibility(is_backward_compatible=True)
def erase_node(self, to_erase : Node) -> None:
"""
Erases a ``Node`` from the ``Graph``. Throws an exception if
there are still users of that node in the ``Graph``.
Args:
to_erase (Node): The ``Node`` to erase from the ``Graph``.
"""
if len(to_erase.users) > 0:
raise RuntimeError(f'Tried to erase Node {to_erase} but it still had {len(to_erase.users)} '
f'users in the graph: {to_erase.users}!')
to_erase._remove_from_list()
to_erase._erased = True # iterators may retain handles to erased nodes
self._len -= 1
# Null out this Node's argument nodes so that the Nodes referred to
# can update their ``users`` accordingly
new_args = map_arg(to_erase.args, lambda n: None)
assert isinstance(new_args, tuple)
to_erase.args = new_args
new_kwargs = map_arg(to_erase.kwargs, lambda n: None)
assert isinstance(new_kwargs, dict)
to_erase.kwargs = new_kwargs
@compatibility(is_backward_compatible=True)
def inserting_before(self, n: Optional[Node] = None):
"""Set the point at which create_node and companion methods will insert into the graph.
When used within a 'with' statement, this will temporary set the insert point and
then restore it when the with statement exits::
with g.inserting_before(n):
... # inserting before node n
... # insert point restored to what it was previously
g.inserting_before(n) # set the insert point permanently
Args:
n (Optional[Node]): The node before which to insert. If None this will insert before
the beginning of the entire graph.
Returns:
A resource manager that will restore the insert point on ``__exit__``.
"""
if n is None:
return self.inserting_after(self._root)
assert n.graph == self, "Node to insert before is not in graph."
return _InsertPoint(self, n.prepend)
@compatibility(is_backward_compatible=True)
def inserting_after(self, n: Optional[Node] = None):
"""Set the point at which create_node and companion methods will insert into the graph.
When used within a 'with' statement, this will temporary set the insert point and
then restore it when the with statement exits::
with g.inserting_after(n):
... # inserting after node n
... # insert point restored to what it was previously
g.inserting_after(n) # set the insert point permanently
Args:
n (Optional[Node]): The node before which to insert. If None this will insert after
the beginning of the entire graph.
Returns:
A resource manager that will restore the insert point on ``__exit__``.
"""
if n is None:
return self.inserting_before(self._root)
assert n.graph == self, "Node to insert after is not in graph."
return _InsertPoint(self, n.append)
@compatibility(is_backward_compatible=True)
def placeholder(self, name: str, type_expr: Optional[Any] = None,
default_value : Any = inspect.Signature.empty) -> Node:
"""
Insert a ``placeholder`` node into the Graph. A ``placeholder`` represents
a function input.
Args:
name (str): A name for the input value. This corresponds to the name
of the positional argument to the function this ``Graph`` represents.
type_expr (Optional[Any]): an optional type annotation representing the
Python type the output of this node will have. This is needed in some
cases for proper code generation (e.g. when the function is used
subsequently in TorchScript compilation).
default_value (Any): The default value this function argument should take
on. NOTE: to allow for `None` as a default value, `inspect.Signature.empty`
should be passed as this argument to specify that the parameter does _not_
have a default value.
.. note::
The same insertion point and type expression rules apply for this method
as ``Graph.create_node``.
"""
args = () if default_value is inspect.Signature.empty else (default_value,)
return self.create_node('placeholder', name, args=args, type_expr=type_expr)
@compatibility(is_backward_compatible=True)
def get_attr(self, qualified_name: str, type_expr: Optional[Any] = None) -> Node:
"""
Insert a ``get_attr`` node into the Graph. A ``get_attr`` ``Node`` represents the
fetch of an attribute from the ``Module`` hierarchy.
Args:
qualified_name (str): the fully-qualified name of the attribute to be retrieved.
For example, if the traced Module has a submodule named ``foo``, which has a
submodule named ``bar``, which has an attribute named ``baz``, the qualified
name ``foo.bar.baz`` should be passed as ``qualified_name``.
type_expr (Optional[Any]): an optional type annotation representing the
Python type the output of this node will have.
Returns:
The newly-created and inserted ``get_attr`` node.
.. note::
The same insertion point and type expression rules apply for this method
as ``Graph.create_node``.
"""
def _get_attr_reference_exists(mod: torch.nn.Module, qualified_name: str) -> bool:
module_path, _, name = qualified_name.rpartition(".")
try:
submod: torch.nn.Module = mod.get_submodule(module_path)
except AttributeError:
warnings.warn(f"Failed to fetch module {module_path}!")
return False
if not hasattr(submod, name):
return False
res = getattr(submod, name)
if (not isinstance(res, torch.nn.Module)
and not isinstance(res, torch.nn.Parameter)
and name not in submod._buffers):
return False
return True
if (self.owning_module and
not _get_attr_reference_exists(self.owning_module, qualified_name)):
warnings.warn("Attempted to insert a get_attr Node with no "
"underlying reference in the owning "
"GraphModule! Call "
"GraphModule.add_submodule to add the "
"necessary submodule, "
"GraphModule.add_parameter to add the "
"necessary Parameter, or "
"nn.Module.register_buffer to add the "
"necessary buffer", stacklevel=2)
return self.create_node('get_attr', qualified_name, type_expr=type_expr)
@compatibility(is_backward_compatible=True)
def call_module(self,
module_name: str,
args: Optional[Tuple['Argument', ...]] = None,
kwargs: Optional[Dict[str, 'Argument']] = None,
type_expr: Optional[Any] = None) -> Node:
"""
Insert a ``call_module`` ``Node`` into the ``Graph``. A ``call_module`` node
represents a call to the forward() function of a ``Module`` in the ``Module``
hierarchy.
Args:
module_name (str): The qualified name of the ``Module`` in the ``Module``
hierarchy to be called. For example, if the traced ``Module`` has a
submodule named ``foo``, which has a submodule named ``bar``, the
qualified name ``foo.bar`` should be passed as ``module_name`` to
call that module.
args (Optional[Tuple[Argument, ...]]): The positional arguments to be passed
to the called method. Note that this should *not* include a ``self`` argument.
kwargs (Optional[Dict[str, Argument]]): The keyword arguments to be passed
to the called method
type_expr (Optional[Any]): an optional type annotation representing the
Python type the output of this node will have.
Returns:
The newly-created and inserted ``call_module`` node.
.. note::
The same insertion point and type expression rules apply for this method
as :meth:`Graph.create_node`.
"""
if (self.owning_module and
self.owning_module.get_submodule(module_name) is None):
warnings.warn("Attempted to insert a call_module Node with "
"no underlying reference in the owning "
"GraphModule! Call "
"GraphModule.add_submodule to add the "
"necessary submodule")
return self.create_node('call_module', module_name, args, kwargs, type_expr=type_expr)
@compatibility(is_backward_compatible=True)
def call_method(self,
method_name: str,
args: Optional[Tuple['Argument', ...]] = None,
kwargs: Optional[Dict[str, 'Argument']] = None,
type_expr: Optional[Any] = None) -> Node:
"""
Insert a ``call_method`` ``Node`` into the ``Graph``. A ``call_method`` node
represents a call to a given method on the 0th element of ``args``.
Args:
method_name (str): The name of the method to apply to the self argument.
For example, if args[0] is a ``Node`` representing a ``Tensor``,
then to call ``relu()`` on that ``Tensor``, pass ``relu`` to ``method_name``.
args (Optional[Tuple[Argument, ...]]): The positional arguments to be passed
to the called method. Note that this *should* include a ``self`` argument.
kwargs (Optional[Dict[str, Argument]]): The keyword arguments to be passed
to the called method
type_expr (Optional[Any]): an optional type annotation representing the
Python type the output of this node will have.
Returns:
The newly created and inserted ``call_method`` node.
.. note::
The same insertion point and type expression rules apply for this method
as :meth:`Graph.create_node`.
"""
return self.create_node('call_method', method_name, args, kwargs, type_expr=type_expr)
@compatibility(is_backward_compatible=True)
def call_function(self,
the_function: Callable[..., Any],
args: Optional[Tuple['Argument', ...]] = None,
kwargs: Optional[Dict[str, 'Argument']] = None,
type_expr: Optional[Any] = None) -> Node:
"""
Insert a ``call_function`` ``Node`` into the ``Graph``. A ``call_function`` node
represents a call to a Python callable, specified by ``the_function``.
Args:
the_function (Callable[..., Any]): The function to be called. Can be any PyTorch
operator, Python function, or member of the ``builtins`` or ``operator``
namespaces.
args (Optional[Tuple[Argument, ...]]): The positional arguments to be passed
to the called function.
kwargs (Optional[Dict[str, Argument]]): The keyword arguments to be passed
to the called function
type_expr (Optional[Any]): an optional type annotation representing the
Python type the output of this node will have.
Returns:
The newly created and inserted ``call_function`` node.
.. note::
The same insertion point and type expression rules apply for this method
as :meth:`Graph.create_node`.
"""
return self.create_node('call_function', the_function, args, kwargs, type_expr=type_expr)
@compatibility(is_backward_compatible=True)
def node_copy(self, node: Node, arg_transform: Callable[[Node], 'Argument'] = lambda x: x) -> Node:
"""
Copy a node from one graph into another. ``arg_transform`` needs to transform arguments from
the graph of node to the graph of self. Example::
# Copying all the nodes in `g` into `new_graph`
g : torch.fx.Graph = ...
new_graph = torch.fx.graph()
value_remap = {}
for node in g.nodes:
value_remap[node] = new_graph.node_copy(node, lambda n : value_remap[n])
Args:
node (Node): The node to copy into ``self``.
arg_transform (Callable[[Node], Argument]): A function that transforms
``Node`` arguments in node's ``args`` and ``kwargs`` into the
equivalent argument in ``self``. In the simplest case, this should
retrieve a value out of a table mapping Nodes in the original
graph to ``self``.
"""
args = map_arg(node.args, arg_transform)
kwargs = map_arg(node.kwargs, arg_transform)
assert isinstance(args, tuple)
assert isinstance(kwargs, dict)
result_node = self.create_node(node.op, node.target, args, kwargs, node.name, node.type)
result_node.meta = copy.copy(node.meta)
return result_node
@compatibility(is_backward_compatible=True)
def output(self, result: 'Argument', type_expr: Optional[Any] = None):
"""
Insert an ``output`` ``Node`` into the ``Graph``. An ``output`` node represents
a ``return`` statement in Python code. ``result`` is the value that should
be returned.
Args:
result (Argument): The value to be returned.
type_expr (Optional[Any]): an optional type annotation representing the
Python type the output of this node will have.
.. note::
The same insertion point and type expression rules apply for this method
as ``Graph.create_node``.
"""
return self.create_node(op='output', target='output', args=(result,), type_expr=type_expr)
def _target_to_str(self, target : Target) -> str:
if callable(target):
op = target.__name__
else:
assert isinstance(target, str)
op = target
if _is_magic(op):
op = op[2:-2]
op = _snake_case(op)
return op
@compatibility(is_backward_compatible=True)
def python_code(self, root_module: str) -> PythonCode:
"""
Turn this ``Graph`` into valid Python code.
Args:
root_module (str): The name of the root module on which to look-up
qualified name targets. This is usually 'self'.
Returns:
A PythonCode object, consisting of two fields:
src: the Python source code representing the object
globals: a dictionary of global names in `src` -> the objects that they reference.
"""
# NOTE: [Graph Namespaces]
#
# There are two types of symbols in generated Python source code:
# locals and globals.
# Locals are locally defined by the output of a node in the Graph.
# Globals are references to external objects, like functions or types.
#
# When generating Python code, we need to make sure to name things
# appropriately. In particular:
# - All names should be unique, to avoid weird shadowing bugs.
# - These names need to be consistent, e.g. a object should always be
# referenced by the same name.
#
# To do this, we create a new namespace just for this source. All names
# that get printed must come from this namespace.
#
# Why can't we re-use node.name? Because it was generated within the
# namespace `self._graph_namespace`. In order to provide uniqueness
# over both locals (node.name) *and* globals, we create a completely
# new namespace to put all identifiers in.
namespace = _Namespace()
# Override Node's repr to generate a valid name within our namespace.
# Since repr() is designed to produce a valid Python expression, it
# makes sense to re-use it. This way, it's easy to print something like
# Tuple[Node, Node] by simply calling repr() on it. Node's __repr__ is
# implemented cooperatively to allow this.
def node_repr(n: Node):
return namespace.create_name(n.name, n)
@contextmanager
def override_node_repr(graph: Graph):
orig_repr_fns = {}
for node in graph.nodes:
orig_repr_fns[node] = node._repr_fn
node._repr_fn = node_repr
try:
yield None
finally:
# restore the original repr functions
for node in graph.nodes:
node._repr_fn = orig_repr_fns[node]
with override_node_repr(self):
return self._python_code(root_module, namespace)
def _python_code(self, root_module: str, namespace: _Namespace) -> PythonCode:
return self._codegen._gen_python_code(self.nodes, root_module, namespace)
def __str__(self) -> str:
"""
Return a human-readable (not machine-readable) string representation
of this Graph
"""
placeholder_names : List[str] = []
# This is a one-element array just so ``format_node`` can modify the closed
# over value
maybe_return_typename : List[str] = ['']
node_strs = [node.format_node(placeholder_names) for node in self.nodes]
param_str = ', '.join(placeholder_names)
s = f'graph({param_str}){maybe_return_typename[0]}:'
for node_str in node_strs:
if node_str:
s += '\n ' + node_str
return s
@compatibility(is_backward_compatible=True)
def print_tabular(self):
"""
Prints the intermediate representation of the graph in tabular
format. Note that this API requires the ``tabulate`` module to be
installed.
"""
try:
from tabulate import tabulate
except ImportError:
print("`print_tabular` relies on the library `tabulate`, "
"which could not be found on this machine. Run `pip "
"install tabulate` to install the library.")
node_specs = [[n.op, n.name, n.target, n.args, n.kwargs]
for n in self.nodes]
print(tabulate(node_specs,
headers=['opcode', 'name', 'target', 'args', 'kwargs']))
@compatibility(is_backward_compatible=True)
def lint(self):
"""
Runs various checks on this Graph to make sure it is well-formed. In
particular:
- Checks Nodes have correct ownership (owned by this graph)
- Checks Nodes appear in topological order
- If this Graph has an owning GraphModule, checks that targets
exist in that GraphModule
"""
# Check topo order
def check_arg(arg : Node, n : Optional[Node] = None) -> None:
context_str = f' of Node \'{n}\' ' if n else ' '
if arg.graph is not self:
raise RuntimeError(f'Argument \'{arg}\'{context_str}does not belong to this Graph, '
f'but was used as an argument! If you are copying nodes from another graph, make '
f'sure to use ``arg_transform`` on node_copy() to remap values\n{self}')
if arg not in seen_values:
raise RuntimeError(f'Argument \'{arg}\'{context_str}was used before it has been '
f'defined! Please check that Nodes in the graph are topologically ordered\n{self}')
seen_names : Set[str] = set()
seen_values : Set[Node] = set()
for node in self.nodes:
if node.op not in ['placeholder', 'call_method', 'call_module', 'call_function', 'get_attr', 'output']:
raise RuntimeError(f'Node {node} had unknown opcode {node.op}!')
if node.graph is not self:
raise RuntimeError(f'Node \'{node}\' does not belong to this Graph!')
map_arg(node.args, lambda arg: check_arg(arg, node))
map_arg(node.kwargs, lambda arg: check_arg(arg, node))
seen_values.add(node)
if node.name in seen_names:
raise RuntimeError(f'Node redefined name {node.name}!')
seen_names.add(node.name)
# Check targets are legit
if self.owning_module:
for node in self.nodes:
if node.op == 'call_function':
if not callable(node.target):
raise ValueError(f'Node {node} target {node.target} has type {torch.typename(node.target)} but '
'a Callable is expected')
else:
if not isinstance(node.target, str):
raise ValueError(f'Node {node} target {node.target} has type {torch.typename(node.target)} but '
'a str is expected')
if node.op in ['get_attr', 'call_module']:
target_atoms = node.target.split('.')
m_itr = self.owning_module
for i, atom in enumerate(target_atoms):
new_m_itr = getattr(m_itr, atom, None)
seen_qualname = '.'.join(target_atoms[:i])
if new_m_itr is None:
raise RuntimeError(f'Node {node} target {node.target} references nonexistent attribute '
f'{atom} of {seen_qualname}')
if (node.op == "call_module"
and not isinstance(new_m_itr, torch.nn.Module)):
raise RuntimeError(f'Node {node} target {node.target} {atom} of {seen_qualname} does '
'not reference an nn.Module')
elif (node.op == "get_attr"
and not isinstance(new_m_itr, torch.nn.Module)
and not isinstance(new_m_itr, torch.nn.Parameter)
and atom not in m_itr._buffers):
warnings.warn(f'Node {node} target {node.target} {atom} of {seen_qualname} does '
'not reference an nn.Module, nn.Parameter, or buffer, which is '
'what \'get_attr\' Nodes typically target')
else:
m_itr = new_m_itr
@compatibility(is_backward_compatible=True)
def eliminate_dead_code(self):
"""
Remove all dead code from the graph, based on each node's number of
users, and whether the nodes have any side effects. The graph must be
topologically sorted before calling.
Returns:
bool: Whether the graph was changed as a result of the pass.
Example:
Before dead code is eliminated, `a` from `a = x + 1` below has no users
and thus can be eliminated from the graph without having an effect.
.. code-block:: python
def forward(self, x):
a = x + 1
return x + self.attr_1
After dead code is eliminated, `a = x + 1` has been removed, and the rest
of `forward` remains.
.. code-block:: python
def forward(self, x):
return x + self.attr_1
.. warning::
Dead code elimination has some heuristics to avoid removing
side-effectful nodes (see Node.is_impure) but in general coverage
is very bad, so you should assume that this method is not sound
to call unless you know that your FX graph consists entirely
of functional operations.
"""
# Lint the graph first to make sure its topologically sorted, otherwise
# DCE below will not behave as expected.
self.lint()
# Reverse iterate so that when we remove a node, any nodes used as an
# input to that node have an updated user count that no longer reflects
# the removed node.
changed = False
for node in reversed(self.nodes):
if not node.is_impure() and len(node.users) == 0:
self.erase_node(node)
changed = True
return changed
@compatibility(is_backward_compatible=False)
def set_codegen(self, codegen: CodeGen):
self._codegen = codegen
@compatibility(is_backward_compatible=False)
def on_generate_code(
self,
make_transformer: Callable[[Optional[TransformCodeFunc]], TransformCodeFunc]
):
"""Register a transformer function when python code is generated
Args:
make_transformer (Callable[[Optional[TransformCodeFunc]], TransformCodeFunc]):
a function that returns a code transformer to be registered.
This function is called by `on_generate_code` to obtain the
code transformer.
This function is also given as its input the currently
registered code transformer (or None if nothing is registered),
in case it is not desirable to overwrite it. This is useful to
chain code transformers together.
Returns:
a context manager that when used in a `with` statement, to automatically
restore the previously registered code transformer.
Example:
.. code-block:: python
gm: fx.GraphModule = ...
# This is a code transformer we want to register. This code
# transformer prepends a pdb import and trace statement at the very
# beginning of the generated torch.fx code to allow for manual
# debugging with the PDB library.
def insert_pdb(body):
return ["import pdb; pdb.set_trace()\\n", *body]
# Registers `insert_pdb`, and overwrites the current registered
# code transformer (given by `_` to the lambda):
gm.graph.on_generate_code(
lambda _: insert_pdb
)
# Or alternatively, registers a code transformer which first
# runs `body` through existing registered transformer, then
# through `insert_pdb`:
gm.graph.on_generate_code(
lambda current_trans: (
lambda body: insert_pdb(
current_trans(body) if current_trans
else body
)
)
)
gm.recompile()
gm(*inputs) # drops into pdb
This function can also be used as a context manager, with the benefit to
automatically restores the previously registered code transformer:
.. code-block:: python
# ... continue from previous example
with gm.graph.on_generate_code(lambda _: insert_pdb):
# do more stuff with `gm`...
gm.recompile()
gm(*inputs) # drops into pdb
# now previous code transformer is restored (but `gm`'s code with pdb
# remains - that means you can run `gm` with pdb here too, until you
# run next `recompile()`).
"""
on_gen_code_old = self._codegen._body_transformer
self._codegen._body_transformer = make_transformer(on_gen_code_old)
@contextlib.contextmanager
def on_generate_code_context_manager():
try:
yield
finally:
self._codegen._body_transformer = on_gen_code_old
return on_generate_code_context_manager()
reflectable_magic_methods = {
'add': '{} + {}',
'sub': '{} - {}',
'mul': '{} * {}',
'floordiv': '{} // {}',
'truediv': '{} / {}',
'div': '{} / {}',
'mod': '{} % {}',
'pow': '{} ** {}',
'lshift': '{} << {}',
'rshift': '{} >> {}',
'and_': '{} & {}',
'or_': '{} | {}',
'xor': '{} ^ {}',
'getitem': '{}[{}]',
'matmul': '{} @ {}',
}
magic_methods = dict({
'eq': '{} == {}',
'ne': '{} != {}',
'lt': '{} < {}',
'gt': '{} > {}',
'le': '{} <= {}',
'ge': '{} >= {}',
'pos': '+{}',
'neg': '-{}',
'invert': '~{}'}, **reflectable_magic_methods)
inplace_methods = {
'iadd': '{} += {}',
'iand': '{} &= {}',
'ifloordiv': '{} //= {}',
'ilshift': '{} <<= {}',
'imod': '{} %= {}',
'imul': '{} *= {}',
'imatmul': '{} @= {}',
'ior': '{} |= {}',
'ipow': '{} **= {}',
'irshift': '{} >>= {}',
'isub': '{} -= {}',
'itruediv': '{} /= {}',
'ixor': '{} ^= {}',
'setitem': '{}[{}] = {}',
}
| pytorch-master | torch/fx/graph.py |
from torch.fx.experimental.unification import Var # type: ignore[attr-defined]
from ._compatibility import compatibility
@compatibility(is_backward_compatible=False)
class TensorType:
"""
TensorType defines a type for tensors, which consists of a list of dimensions.
Example:
class M(torch.nn.Module):
def forward(self, x:TensorType((1,2,3, Dyn)), y:TensorType((1,2,3, Dyn))):
return torch.add(x, y)
"""
def __init__(self, dim):
self.__origin__ = TensorType
self.__args__ = dim
def __repr__(self):
return f'TensorType[{self.__args__}]'
def __eq__(self, other):
if isinstance(other, self.__class__):
return list(self.__args__) == list(other.__args__)
else:
return False
@staticmethod
def __class_getitem__(*args):
return TensorType(args[0])
class _DynType:
"""
_DynType defines a type which stands for the absence of type information.
"""
def __init__(self):
self.__name__ = '_DynType'
def __eq__(self, other):
return isinstance(other, self.__class__)
def __str__(self):
return "Dyn"
def __repr__(self):
return "Dyn"
Dyn = _DynType()
@compatibility(is_backward_compatible=False)
def is_consistent(t1, t2):
"""
A binary relation denoted by ~ that determines if t1 is consistent with t2.
The relation is reflexive, semmetric but not transitive.
returns True if t1 and t2 are consistent and False otherwise.
Example:
Dyn ~ TensorType((1,2,3))
int ~ Dyn
int ~ int
TensorType((1,Dyn,3)) ~ TensorType((1,2,3))
"""
if t1 == t2:
return True
if t1 == Dyn or t2 == Dyn or isinstance(t1, Var) or isinstance(t2, Var):
return True
if isinstance(t1, TensorType) and isinstance(t2, TensorType):
return len(t1.__args__) == len(t2.__args__) and \
all([is_consistent(elem1, elem2) for elem1, elem2 in zip(t1.__args__, t2.__args__)])
else:
return False
@compatibility(is_backward_compatible=False)
def is_more_precise(t1, t2):
"""
A binary relation denoted by <= that determines if t1 is more precise than t2.
The relation is reflexive and transitive.
returns True if t1 is more precise than t2 and False otherwise.
Example:
Dyn >= TensorType((1,2,3))
int >= Dyn
int >= int
TensorType((1,Dyn,3)) <= TensorType((1,2,3))
"""
if t1 == t2:
return True
if isinstance(t2, _DynType):
return True
if isinstance(t1, TensorType) and isinstance(t2, TensorType):
return len(t1.__args__) == len(t2.__args__) and \
all([is_more_precise(elem1, elem2) for elem1, elem2 in zip(t1.__args__, t2.__args__)])
else:
return False
| pytorch-master | torch/fx/tensor_type.py |
r'''
FX is a toolkit for developers to use to transform ``nn.Module``
instances. FX consists of three main components: a **symbolic tracer,**
an **intermediate representation**, and **Python code generation**. A
demonstration of these components in action:
::
import torch
# Simple module for demonstration
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
module = MyModule()
from torch.fx import symbolic_trace
# Symbolic tracing frontend - captures the semantics of the module
symbolic_traced : torch.fx.GraphModule = symbolic_trace(module)
# High-level intermediate representation (IR) - Graph representation
print(symbolic_traced.graph)
"""
graph():
%x : [#users=1] = placeholder[target=x]
%param : [#users=1] = get_attr[target=param]
%add : [#users=1] = call_function[target=operator.add](args = (%x, %param), kwargs = {})
%linear : [#users=1] = call_module[target=linear](args = (%add,), kwargs = {})
%clamp : [#users=1] = call_method[target=clamp](args = (%linear,), kwargs = {min: 0.0, max: 1.0})
return clamp
"""
# Code generation - valid Python code
print(symbolic_traced.code)
"""
def forward(self, x):
param = self.param
add = x + param; x = param = None
linear = self.linear(add); add = None
clamp = linear.clamp(min = 0.0, max = 1.0); linear = None
return clamp
"""
The **symbolic tracer** performs "symbolic execution" of the Python
code. It feeds fake values, called Proxies, through the code. Operations
on theses Proxies are recorded. More information about symbolic tracing
can be found in the :func:`symbolic_trace` and :class:`Tracer`
documentation.
The **intermediate representation** is the container for the operations
that were recorded during symbolic tracing. It consists of a list of
Nodes that represent function inputs, callsites (to functions, methods,
or :class:`torch.nn.Module` instances), and return values. More information
about the IR can be found in the documentation for :class:`Graph`. The
IR is the format on which transformations are applied.
**Python code generation** is what makes FX a Python-to-Python (or
Module-to-Module) transformation toolkit. For each Graph IR, we can
create valid Python code matching the Graph's semantics. This
functionality is wrapped up in :class:`GraphModule`, which is a
:class:`torch.nn.Module` instance that holds a :class:`Graph` as well as a
``forward`` method generated from the Graph.
Taken together, this pipeline of components (symbolic tracing ->
intermediate representation -> transforms -> Python code generation)
constitutes the Python-to-Python transformation pipeline of FX. In
addition, these components can be used separately. For example,
symbolic tracing can be used in isolation to capture a form of
the code for analysis (and not transformation) purposes. Code
generation can be used for programmatically generating models, for
example from a config file. There are many uses for FX!
Several example transformations can be found at the
`examples <https://github.com/pytorch/examples/tree/master/fx>`__
repository.
'''
from .graph_module import GraphModule
from ._symbolic_trace import symbolic_trace, Tracer, wrap, PH, ProxyableClassMeta
from .graph import Graph, CodeGen
from .node import Node, map_arg
from .proxy import Proxy
from .interpreter import Interpreter as Interpreter, Transformer as Transformer
from .subgraph_rewriter import replace_pattern
| pytorch-master | torch/fx/__init__.py |
from typing import Any, Dict, Tuple, List
from ._compatibility import compatibility
from torch.utils._pytree import Context, _register_pytree_node
_help_mutation = """\
If you are attempting to modify the kwargs or args of a torch.fx.Node object,
instead create a new copy of it and assign the copy to the node:
new_args = ... # copy and mutate args
node.args = new_args
"""
def _no_mutation(self, *args, **kwargs):
raise NotImplementedError(f"'{type(self).__name__}' object does not support mutation. {_help_mutation}")
def _create_immutable_container(base, mutable_functions):
container = type('immutable_' + base.__name__, (base,), {})
for attr in mutable_functions:
setattr(container, attr, _no_mutation)
return container
immutable_list = _create_immutable_container(list,
['__delitem__', '__iadd__', '__imul__', '__setitem__', 'append',
'clear', 'extend', 'insert', 'pop', 'remove'])
immutable_list.__reduce__ = lambda self: (immutable_list, (tuple(iter(self)),))
compatibility(is_backward_compatible=True)(immutable_list)
immutable_dict = _create_immutable_container(dict, ['__delitem__', '__setitem__', 'clear', 'pop', 'popitem', 'update'])
immutable_dict.__reduce__ = lambda self: (immutable_dict, (iter(self.items()),))
compatibility(is_backward_compatible=True)(immutable_dict)
# Register immutable collections for PyTree operations
def _immutable_dict_flatten(d: Dict[Any, Any]) -> Tuple[List[Any], Context]:
return list(d.values()), list(d.keys())
def _immutable_dict_unflatten(values: List[Any], context: Context) -> Dict[Any, Any]:
return immutable_dict({key: value for key, value in zip(context, values)})
def _immutable_list_flatten(d: List[Any]) -> Tuple[List[Any], Context]:
return d, None
def _immutable_list_unflatten(values: List[Any], context: Context) -> List[Any]:
return immutable_list(values)
_register_pytree_node(immutable_dict, _immutable_dict_flatten, _immutable_dict_unflatten)
_register_pytree_node(immutable_list, _immutable_list_flatten, _immutable_list_unflatten)
| pytorch-master | torch/fx/immutable_collections.py |
from torch.fx.proxy import Proxy
from ._compatibility import compatibility
@compatibility(is_backward_compatible=False)
def annotate(val, type):
# val could be either a regular value (not tracing)
# or fx.Proxy (tracing)
if isinstance(val, Proxy):
if val.node.type:
raise RuntimeError(f"Tried to annotate a value that already had a type on it!"
f" Existing type is {val.node.type} "
f"and new type is {type}. "
f"This could happen if you tried to annotate a function parameter "
f"value (in which case you should use the type slot "
f"on the function signature) or you called "
f"annotate on the same value twice")
else:
val.node.type = type
return val
else:
return val
| pytorch-master | torch/fx/annotate.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.