diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2341ef9542e6f1ce609216f0f87392f4b183fe5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/common_types.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/common_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dae8e64403ad97736df49645f7d939419f4a2c1b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/common_types.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/functional.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/functional.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..730af63a1ac762a52b950cfeef7ce6a1a9e2e0e6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/functional.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/attention/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/attention/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1f87cfd6ccfbd5c80f1104ce1039f4f23fa73f0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/attention/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/attention/__pycache__/_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/attention/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..586278636b517b105fb19edecd166bdaefc1aca7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/attention/__pycache__/_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/attention/__pycache__/bias.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/attention/__pycache__/bias.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6570897ace4c0d8f7851d7b0feadeedbf7731fe4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/attention/__pycache__/bias.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/attention/_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/attention/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6662eb58f361f1d650bb5f217d7d72571d6652a1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/attention/_utils.py @@ -0,0 +1,57 @@ +"""Defines utilities for interacting with scaled_dot_product_attention""" +import math +from typing import List, Optional + +import torch + +__all__: List[str] = [] + + +def _input_requires_grad(*tensors: torch.Tensor) -> bool: + """Returns True if any of the tensors requires grad""" + return any(t.requires_grad for t in tensors) + + +def _postprocess_flash_output(inpt_tensor: torch.Tensor, og_size: int) -> torch.Tensor: + """Handles the unpad of the last dimension""" + if inpt_tensor.size(-1) != og_size: + return inpt_tensor[..., :og_size] + return inpt_tensor + + +def _calculate_scale(head_dim_size: int, scale: Optional[float]) -> float: + """ + For FlashAttention we pad the head dimension to be a multiple of 8 so we need to scale the output + by the original head size and not the padded. + """ + if scale is not None: + return scale + return 1.0 / math.sqrt(head_dim_size) + + +def _validate_sdpa_input( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attn_mask: Optional[torch.Tensor] = None, + dropout_p=0.0, + is_causal=False, + scale=None, +): + if query.dtype != key.dtype or query.dtype != value.dtype: + raise ValueError( + f"Expected query, key, and value to have the same dtype, " + f"but got query.dtype: {query.dtype}, key.dtype: {key.dtype}, " + f"and value.dtype: {value.dtype} instead." + ) + if query.device != key.device or query.device != value.device: + raise ValueError( + f"Expected query, key, and value to have the same device type, " + f"but got query.device: {query.device}, key.device: {key.device}, " + f"and value.device: {value.device} instead." + ) + if query.dim() < 2 or key.dim() < 2 or value.dim() < 2: + raise ValueError( + f"Expected query, key, and value to all be at least 2 dimensional, but got query.dim: " + f"{query.dim()}, key.dim: {key.dim()} and value.dim: {value.dim()} instead." + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/attention/bias.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/attention/bias.py new file mode 100644 index 0000000000000000000000000000000000000000..d54ed8915789d4ac2cd9c328c95003e4c27e7e43 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/attention/bias.py @@ -0,0 +1,353 @@ +"""Defines bias subclasses that work with scaled_dot_product_attention""" +from enum import auto, IntEnum +from typing import Optional +from warnings import warn + +import torch +from torch.backends.cuda import ( + can_use_efficient_attention, + can_use_flash_attention, + SDPAParams, +) +from torch.nn.attention import _raise_kernel_warnings +from torch.nn.attention._utils import ( + _calculate_scale, + _input_requires_grad, + _postprocess_flash_output, + _validate_sdpa_input, +) +from torch.nn.functional import scaled_dot_product_attention + +__all__ = ["causal_upper_left", "causal_lower_right", "CausalVariant", "CausalBias"] + + +torch._dynamo.allow_in_graph(can_use_flash_attention) +torch._dynamo.allow_in_graph(can_use_efficient_attention) +torch._dynamo.allow_in_graph(SDPAParams) + + +class CausalVariant(IntEnum): + r""" + Enum for causal variants used in attention mechanisms. + + Defines two types of causal biases: + + `UPPER_LEFT`: Represents upper-left triangular bias for standard causal attention. + The equivalent pytorch code for constructing this bias is: + + .. code-block:: python + + torch.tril(torch.ones(size, dtype=torch.bool)) + + For instance, with `shape=(3,4)`, the materialized bias tensor will be: + + .. code-block:: text + + [[1, 0, 0, 0], + [1, 1, 0, 0], + [1, 1, 1, 0]] + + + `LOWER_RIGHT`: Represents lower-right triangular bias, the include values are aligned to the lower + right corner of the matrix. + + The equivalent pytorch code for constructing this bias is: + + .. code-block:: python + + diagonal_offset = size[1] - size[0] + torch.tril( + torch.ones(size, dtype=torch.bool), + diagonal=diagonal_offset, + ) + + For instance, with `shape=(3,4)`, the materialized bias tensor will be: + + .. code-block:: text + + [[1, 1, 0, 0], + [1, 1, 1, 0], + [1, 1, 1, 1]] + + Note that these variants are equivalent to each other when the sequence lengths of the query and key/value + tensors are equal since the triangular matrix is square. + + .. warning:: This enum is a prototype and subject to change. + """ + + UPPER_LEFT = auto() + LOWER_RIGHT = auto() + + +class CausalBias(torch.Tensor): + """ + A bias representing causal attention patterns. For an overview of the bias structure, see the :class:`CausalVariant` enum. + + This class is used for defining causal (triangular) attention biases. For construing the bias, there exist + two factory functions: :func:`causal_upper_left` and :func:`causal_lower_right`. + + Example: + + .. code-block:: python + + from torch.nn.attention.bias import causal_lower_right + + bsz, num_heads, seqlen_q, seqlen_kv, head_dim = 32, 8, 4, 12, 8 + + # Create a lower-right causal bias + attn_bias = causal_lower_right(seqlen_q, seqlen_kv) + + q = torch.randn(bsz, num_heads, seqlen_q, head_dim, device="cuda", dtype=torch.float16) + k = torch.randn(bsz, num_heads, seqlen_kv, head_dim, device="cuda", dtype=torch.float16) + v = torch.randn(bsz, num_heads, seqlen_kv, head_dim, device="cuda", dtype=torch.float16) + + out = F.scaled_dot_product_attention(q, k, v, attn_bias) + + .. warning:: This class is a prototype and subject to change. + """ + + def __init__(self, variant: CausalVariant, seq_len_q: int, seq_len_kv: int): + """ + Initializes the CausalBias instance with a specified variant and sequence lengths. + + Args: + variant (CausalVariant): The type of causal bias to use (either UPPER_LEFT or LOWER_RIGHT). + seq_len_q (int): The sequence length of the query tensor. + seq_len_kv (int): The sequence length of the key/value tensor. + + Raises a warning if the LOWER_RIGHT variant is used with seq_len_q > seq_len_kv, as it may produce NaNs. + """ + assert isinstance(variant, CausalVariant) + self.variant = variant + self.seq_len_q = seq_len_q + self.seq_len_kv = seq_len_kv + if seq_len_q > seq_len_kv and variant == CausalVariant.LOWER_RIGHT: + warn( + "Lower right causal bias will produce NaNs in the output when seq_len_q > seq_len_kv!" + ) + + def _upper_left(self, device: torch.device) -> torch.Tensor: + """Upper left causal bias""" + return torch.tril( + torch.ones(self.seq_len_q, self.seq_len_kv, device=device, dtype=torch.bool) + ) + + def _lower_right(self, device: torch.device) -> torch.Tensor: + """Lower right causal bias""" + diagonal_offset = self.seq_len_kv - self.seq_len_q + return torch.tril( + torch.ones( + self.seq_len_q, self.seq_len_kv, device=device, dtype=torch.bool + ), + diagonal=diagonal_offset, + ) + + def _materialize(self, device: Optional[torch.device] = None) -> torch.Tensor: + """ + Materializes the causal bias into a tensor form. + + Depending on the variant, this method generates either an upper-left or lower-right + triangular matrix to represent the causal bias. + + Args: + device (Optional[torch.device]): The device on which to create the tensor. Defaults to CPU. + + Returns: + torch.Tensor: The materialized bias tensor. + """ + if device is None: + device = torch.device("cpu") + if self.variant == CausalVariant.UPPER_LEFT: + return self._upper_left(device) + elif self.variant == CausalVariant.LOWER_RIGHT: + return self._lower_right(device) + + @staticmethod + def _dispatch( + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attn_mask: "CausalBias", + dropout_p: float = 0.0, + is_causal: bool = False, + scale: Optional[float] = None, + ) -> torch.Tensor: + r""" + Handles the logic for computing attention with the specified causal bias. + + Args: + query (Tensor): Query tensor; shape :math:`(N, ..., L, E)`. + key (Tensor): Key tensor; shape :math:`(N, ..., S, E)`. + value (Tensor): Value tensor; shape :math:`(N, ..., S, Ev)`. + attn_mask (CausalBias): The type of causal attention to apply. + A boolean mask where a value of True indicates that the element *should* take part in attention. + A float mask of the same type as query, key, value that is added to the attention score. + dropout_p (float): Dropout probability; if greater than 0.0, dropout is applied + is_causal (bool): If true, assumes upper left causal attention masking and errors if both attn_mask and is_causal + are set. + scale (optional float): Scaling factor applied prior to softmax. If None, the default value is set + to :math:`\frac{1}{\sqrt{E}}`. + + Returns: + output (Tensor): Attention output; shape :math:`(N, ..., L, Ev)`. + + Raises: + ValueError: If the causal bias variant is not a CausalVariant type. + + """ + if is_causal: + raise ValueError("CausalBias should not be used with causal=True") + + if ( + attn_mask.seq_len_q == attn_mask.seq_len_kv + or attn_mask.variant == CausalVariant.UPPER_LEFT + ): + return scaled_dot_product_attention( + query, + key, + value, + attn_mask=None, + dropout_p=dropout_p, + is_causal=True, + scale=scale, + ) + elif attn_mask.variant == CausalVariant.LOWER_RIGHT: + _validate_sdpa_input(query, key, value, None, dropout_p, is_causal, scale) + sdpa_params = SDPAParams(query, key, value, None, dropout_p, is_causal) + if can_use_flash_attention(sdpa_params): + needs_padding = query.size(-1) % 8 != 0 + og_head_size = query.size(-1) + og_scale = _calculate_scale(og_head_size, scale) + if needs_padding: + query = torch.nn.functional.pad(query, (0, 8 - query.size(-1) % 8)) + key = torch.nn.functional.pad(key, (0, 8 - key.size(-1) % 8)) + value = torch.nn.functional.pad(value, (0, 8 - value.size(-1) % 8)) + out = torch.ops.aten._scaled_dot_product_flash_attention( + query, + key, + value, + dropout_p, + is_causal=True, # TODO: Flash accepts causal = True and for this particular op it means lower right + return_debug_mask=False, + scale=og_scale, + )[0] + return _postprocess_flash_output(out, og_head_size) + if can_use_efficient_attention(sdpa_params): + compute_log_sumexp = False + if _input_requires_grad(query, key, value): + compute_log_sumexp = True + return torch.ops.aten._efficient_attention_forward( + query.transpose(1, 2), + key.transpose(1, 2), + value.transpose(1, 2), + bias=None, + cu_seqlens_q=None, + cu_seqlens_k=None, + max_seqlen_q=None, + max_seqlen_k=None, + dropout_p=dropout_p, + custom_mask_type=int(attn_mask.variant), + compute_log_sumexp=compute_log_sumexp, + scale=scale, + causal_diagonal=None, + seqlen_k=None, + )[0].transpose(1, 2) + else: + _raise_kernel_warnings(sdpa_params) + # We cant use efficient attention the only support for lower right is via materialization + return scaled_dot_product_attention( + query, + key, + value, + attn_mask=attn_mask._materialize(query.device), + dropout_p=dropout_p, + is_causal=False, + scale=scale, + ) + else: + raise ValueError( + f"CausalBias.variant must be a CausalVariant type, but found: {attn_mask.variant}" + ) + + @classmethod + def __torch_function__(cls, func, types, args=(), kwargs=None): + """Defines the behavior of torch.nn.functional.scaled_dot_product_attention when the attn_bias is an AttnBias""" + if kwargs is None: + kwargs = {} + if func != torch.nn.functional.scaled_dot_product_attention: + raise NotImplementedError( + "CausalBias only supports scaled_dot_product_attention" + ) + return cls._dispatch(*args, **kwargs) + + def __repr__(self): + return self._materialize().__repr__() + + +def causal_upper_left(*size) -> CausalBias: + """ + Creates an upper-left triangular causal bias. + + This function generates a upper-left triangular matrix to represent causal attention bias with a + diagonal offset set so that the inclusive values are aligned to the upper left corner of the matrix. + This equivalent to the `is_causal=True` argument in `scaled_dot_product_attention`. + + The equivalent pytorch code for constructing this bias is: + + .. code-block:: python + + torch.tril(torch.ones(size, dtype=torch.bool)) + + For instance, with `shape=(3,4)`, the materialized bias tensor will be: + + .. code-block:: text + + [[1, 0, 0, 0], + [1, 1, 0, 0], + [1, 1, 1, 0]] + + Args: + size: The size of the bias matrix. + + Returns: + CausalBias: The UPPER_LEFT triangular causal bias variant. + """ + assert len(size) == 2, "causal_upper_left only supports 2D tensors" + seq_len_q, seq_len_kv = size + return CausalBias(CausalVariant.UPPER_LEFT, seq_len_q, seq_len_kv) + + +def causal_lower_right(*size) -> CausalBias: + """ + Creates a lower-right triangular causal bias. + + This function generates a lower-right triangular matrix to represent causal attention bias with a + diagonal offset set so that the inclusive values are aligned to the lower right corner of the matrix. + + The equivalent pytorch code for constructing this bias is: + + .. code-block:: python + + diagonal_offset = size[1] - size[0] + torch.tril( + torch.ones(size, dtype=torch.bool), + diagonal=diagonal_offset, + ) + + For instance, with `shape=(3,4)`, the materialized bias tensor will be: + + .. code-block:: text + + [[1, 1, 0, 0], + [1, 1, 1, 0], + [1, 1, 1, 1]] + + Args: + size: The size of the bias matrix. + + Returns: + CausalBias: The LOWER_RIGHT triangular causal bias variant. + """ + assert len(size) == 2, "causal_lower_right only supports 2D tensors" + seq_len_q, seq_len_kv = size + return CausalBias(CausalVariant.LOWER_RIGHT, seq_len_q, seq_len_kv) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/_functions.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..d987ed2bc427462bf517665bda8e1889d5a58c90 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/_functions.py @@ -0,0 +1,126 @@ +import warnings + +import torch +from . import comm +from torch.autograd import Function +from torch._utils import _get_device_index +from typing import List, Optional + + +class Broadcast(Function): + + @staticmethod + def forward(ctx, target_gpus, *inputs): + assert all(i.device.type != 'cpu' for i in inputs), ( + 'Broadcast function not implemented for CPU tensors' + ) + target_gpus = [_get_device_index(x, True) for x in target_gpus] + ctx.target_gpus = target_gpus + if len(inputs) == 0: + return tuple() + ctx.num_inputs = len(inputs) + ctx.input_device = inputs[0].get_device() + outputs = comm.broadcast_coalesced(inputs, ctx.target_gpus) + non_differentiables = [] + for idx, input_requires_grad in enumerate(ctx.needs_input_grad[1:]): + if not input_requires_grad: + for output in outputs: + non_differentiables.append(output[idx]) + ctx.mark_non_differentiable(*non_differentiables) + return tuple([t for tensors in outputs for t in tensors]) + + @staticmethod + def backward(ctx, *grad_outputs): + return (None,) + ReduceAddCoalesced.apply(ctx.input_device, ctx.num_inputs, *grad_outputs) + + +class ReduceAddCoalesced(Function): + + @staticmethod + def forward(ctx, destination, num_inputs, *grads): + ctx.target_gpus = [grads[i].get_device() for i in range(0, len(grads), num_inputs)] + + grads_ = [grads[i:i + num_inputs] + for i in range(0, len(grads), num_inputs)] + return comm.reduce_add_coalesced(grads_, destination) + + @staticmethod + def backward(ctx, *grad_outputs): + return (None, None,) + Broadcast.apply(ctx.target_gpus, *grad_outputs) + + +class Gather(Function): + + @staticmethod + def forward(ctx, target_device, dim, *inputs): + assert all(i.device.type != 'cpu' for i in inputs), ( + 'Gather function not implemented for CPU tensors' + ) + if (target_device == 'cpu'): + ctx.target_device = 'cpu' + else: + target_device = _get_device_index(target_device, True) + ctx.target_device = target_device + ctx.dim = dim + ctx.input_gpus = tuple(i.get_device() for i in inputs) + if all(t.dim() == 0 for t in inputs) and dim == 0: + inputs = tuple(t.view(1) for t in inputs) + warnings.warn('Was asked to gather along dimension 0, but all ' + 'input tensors were scalars; will instead unsqueeze ' + 'and return a vector.') + ctx.unsqueezed_scalar = True + else: + ctx.unsqueezed_scalar = False + ctx.input_sizes = tuple(i.size(ctx.dim) for i in inputs) + return comm.gather(inputs, ctx.dim, ctx.target_device) + + @staticmethod + def backward(ctx, grad_output): + scattered_grads = Scatter.apply(ctx.input_gpus, ctx.input_sizes, ctx.dim, grad_output) + if ctx.unsqueezed_scalar: + scattered_grads = tuple(g[0] for g in scattered_grads) + return (None, None) + scattered_grads + + +class Scatter(Function): + + @staticmethod + def forward(ctx, target_gpus, chunk_sizes, dim, input): + target_gpus = [_get_device_index(x, True) for x in target_gpus] + ctx.dim = dim + ctx.input_device = input.get_device() if input.device.type != "cpu" else -1 + streams = None + if torch.cuda.is_available() and ctx.input_device == -1: + # Perform CPU to GPU copies in a background stream + streams = [_get_stream(torch.device("cuda", device)) for device in target_gpus] + outputs = comm.scatter(input, target_gpus, chunk_sizes, ctx.dim, streams) + # Synchronize with the copy stream + if streams is not None: + for i, output in enumerate(outputs): + with torch.cuda.device(target_gpus[i]): + main_stream = torch.cuda.current_stream() + main_stream.wait_stream(streams[i]) + output.record_stream(main_stream) + return outputs + + @staticmethod + def backward(ctx, *grad_output): + return None, None, None, Gather.apply(ctx.input_device, ctx.dim, *grad_output) + + +# background streams used for copying +_streams: Optional[List[Optional[torch.Stream]]] = None + +def _get_stream(device: torch.device): + """Get a background stream for copying between CPU and target device.""" + global _streams + if device.type == "cpu": + return None + device_mod = getattr(torch, device.type, None) + if device_mod is None: + return None + if _streams is None: + _streams = [None] * device_mod.device_count() + if _streams[device.index] is None: + _streams[device.index] = device_mod.Stream(device.index) + return _streams[device.index] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/comm.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/comm.py new file mode 100644 index 0000000000000000000000000000000000000000..764775587d6859665fb13e2140c588ed5d91dc1c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/comm.py @@ -0,0 +1,236 @@ +import warnings +import torch +from torch.cuda import nccl +from torch._utils import _take_tensors, _flatten_dense_tensors, \ + _unflatten_dense_tensors, _reorder_tensors_as, _get_device_index, _handle_complex +from typing import List + +def broadcast(tensor, devices=None, *, out=None): + r"""Broadcasts a tensor to specified GPU devices. + + Args: + tensor (Tensor): tensor to broadcast. Can be on CPU or GPU. + devices (Iterable[torch.device, str or int], optional): an iterable of + GPU devices, among which to broadcast. + out (Sequence[Tensor], optional, keyword-only): the GPU tensors to + store output results. + + .. note:: + Exactly one of :attr:`devices` and :attr:`out` must be specified. + + Returns: + - If :attr:`devices` is specified, + a tuple containing copies of :attr:`tensor`, placed on + :attr:`devices`. + - If :attr:`out` is specified, + a tuple containing :attr:`out` tensors, each containing a copy of + :attr:`tensor`. + """ + tensor = _handle_complex(tensor) + if not ((devices is None) ^ (out is None)): + raise RuntimeError( + f"Exactly one of 'devices' and 'out' must be specified, but got devices={devices} and out={out}") + if devices is not None: + devices = [_get_device_index(d) for d in devices] + return torch._C._broadcast(tensor, devices) + else: + return torch._C._broadcast_out(tensor, out) + + +def broadcast_coalesced(tensors, devices, buffer_size=10485760): + """Broadcast a sequence of tensors to the specified GPUs. + + Small tensors are first coalesced into a buffer to reduce the number of synchronizations. + + Args: + tensors (sequence): tensors to broadcast. Must be on the same device, + either CPU or GPU. + devices (Iterable[torch.device, str or int]): an iterable of GPU + devices, among which to broadcast. + buffer_size (int): maximum size of the buffer used for coalescing + + Returns: + A tuple containing copies of :attr:`tensor`, placed on :attr:`devices`. + """ + devices = [_get_device_index(d) for d in devices] + tensors = [_handle_complex(t) for t in tensors] + return torch._C._broadcast_coalesced(tensors, devices, buffer_size) + + +def reduce_add(inputs, destination=None): + """Sum tensors from multiple GPUs. + + All inputs should have matching shapes, dtype, and layout. The output tensor + will be of the same shape, dtype, and layout. + + Args: + inputs (Iterable[Tensor]): an iterable of tensors to add. + destination (int, optional): a device on which the output will be + placed (default: current device). + + Returns: + A tensor containing an elementwise sum of all inputs, placed on the + :attr:`destination` device. + """ + destination = _get_device_index(destination, optional=True) + input_size = inputs[0].size() + root_index = None # index of input tensor that already is on the correct device + for i, inp in enumerate(inputs): + assert inp.device.type != "cpu", "reduce_add expects all inputs to be on GPUs" + if inp.get_device() == destination: + root_index = i + if inp.size() != input_size: + got = 'x'.join(str(x) for x in inp.size()) + expected = 'x'.join(str(x) for x in input_size) + raise ValueError(f"input {i} has invalid size: got {got}, but expected {expected}") + if root_index is None: + raise RuntimeError("reduce_add expects destination to be on the same GPU with one of the tensors") + + if len(inputs) == 1: + return inputs[0] + + if nccl.is_available(inputs): + result = torch.empty_like(inputs[root_index]) + nccl.reduce(inputs, output=result, root=root_index) + else: + destination_device = torch.device(inputs[root_index].device.type, destination) + nonroot = [t for i, t in enumerate(inputs) if i != root_index] + # make a new tensor w/o clone + result = inputs[root_index] + nonroot[0].to(device=destination_device, non_blocking=True) + for other in nonroot[1:]: + result.add_(other.to(device=destination_device, non_blocking=True)) + return result + + +def reduce_add_coalesced(inputs, destination=None, buffer_size=10485760): + """Sum tensors from multiple GPUs. + + Small tensors are first coalesced into a buffer to reduce the number + of synchronizations. + + Args: + inputs (Iterable[Iterable[Tensor]]): iterable of iterables that + contain tensors from a single device. + destination (int, optional): a device on which the output will be + placed (default: current device). + buffer_size (int): maximum size of the buffer used for coalescing + + Returns: + A tuple of tensors containing an elementwise sum of each group of + inputs, placed on the ``destination`` device. + """ + # TODO: When `len(inputs) == 1` and all inputs are on `destination`, just + # return `inputs`. + dense_tensors: List[List] = [[] for _ in inputs] # shape (num_gpus, num_tensors) + output = [] + ref_order = [] + # process sparse ones first since they may have different sizes on different gpus + for tensor_at_gpus in zip(*inputs): + if all(t.is_sparse for t in tensor_at_gpus): + result = reduce_add(tensor_at_gpus, destination) # this will be sparse too + output.append(result) + ref_order.append(tensor_at_gpus[0]) + else: + for coll, t in zip(dense_tensors, tensor_at_gpus): + coll.append(t.to_dense() if t.is_sparse else t) + ref_order.append(dense_tensors[0][-1]) + itrs = [_take_tensors(tensors, buffer_size) for tensors in dense_tensors] + # now the dense ones, which have consistent sizes + for chunks in zip(*itrs): + flat_tensors = [_flatten_dense_tensors(chunk) for chunk in chunks] # (num_gpus,) + flat_result = reduce_add(flat_tensors, destination) + for t in _unflatten_dense_tensors(flat_result, chunks[0]): + # The unflattened tensors do not share storage, and we don't expose + # base flat tensor anyways, so give them different version counters. + # See NOTE [ Version Counter in comm.*_coalesced ] + output.append(t.data) + return tuple(_reorder_tensors_as(output, ref_order)) + + +def scatter(tensor, devices=None, chunk_sizes=None, dim=0, streams=None, *, out=None): + """Scatters tensor across multiple GPUs. + + Args: + tensor (Tensor): tensor to scatter. Can be on CPU or GPU. + devices (Iterable[torch.device, str or int], optional): an iterable of + GPU devices, among which to scatter. + chunk_sizes (Iterable[int], optional): sizes of chunks to be placed on + each device. It should match :attr:`devices` in length and sums to + ``tensor.size(dim)``. If not specified, :attr:`tensor` will be divided + into equal chunks. + dim (int, optional): A dimension along which to chunk :attr:`tensor`. + Default: ``0``. + streams (Iterable[torch.cuda.Stream], optional): an iterable of Streams, among + which to execute the scatter. If not specified, the default stream will + be utilized. + out (Sequence[Tensor], optional, keyword-only): the GPU tensors to + store output results. Sizes of these tensors must match that of + :attr:`tensor`, except for :attr:`dim`, where the total size must + sum to ``tensor.size(dim)``. + + .. note:: + Exactly one of :attr:`devices` and :attr:`out` must be specified. When + :attr:`out` is specified, :attr:`chunk_sizes` must not be specified and + will be inferred from sizes of :attr:`out`. + + Returns: + - If :attr:`devices` is specified, + a tuple containing chunks of :attr:`tensor`, placed on + :attr:`devices`. + - If :attr:`out` is specified, + a tuple containing :attr:`out` tensors, each containing a chunk of + :attr:`tensor`. + """ + tensor = _handle_complex(tensor) + if out is None: + devices = [_get_device_index(d) for d in devices] + return tuple(torch._C._scatter(tensor, devices, chunk_sizes, dim, streams)) + else: + if devices is not None: + raise RuntimeError( + f"'devices' must not be specified when 'out' is specified, but got devices={devices}") + if chunk_sizes is not None: + raise RuntimeError( + f"'chunk_sizes' must not be specified when 'out' is specified, but got chunk_sizes={chunk_sizes}") + return tuple(torch._C._scatter_out(tensor, out, dim, streams)) + + +def gather(tensors, dim=0, destination=None, *, out=None): + r"""Gathers tensors from multiple GPU devices. + + Args: + tensors (Iterable[Tensor]): an iterable of tensors to gather. + Tensor sizes in all dimensions other than :attr:`dim` have to match. + dim (int, optional): a dimension along which the tensors will be + concatenated. Default: ``0``. + destination (torch.device, str, or int, optional): the output device. + Can be CPU or CUDA. Default: the current CUDA device. + out (Tensor, optional, keyword-only): the tensor to store gather result. + Its sizes must match those of :attr:`tensors`, except for :attr:`dim`, + where the size must equal ``sum(tensor.size(dim) for tensor in tensors)``. + Can be on CPU or CUDA. + + .. note:: + :attr:`destination` must not be specified when :attr:`out` is specified. + + Returns: + - If :attr:`destination` is specified, + a tensor located on :attr:`destination` device, that is a result of + concatenating :attr:`tensors` along :attr:`dim`. + - If :attr:`out` is specified, + the :attr:`out` tensor, now containing results of concatenating + :attr:`tensors` along :attr:`dim`. + """ + tensors = [_handle_complex(t) for t in tensors] + if out is None: + if destination == -1: + warnings.warn( + 'Using -1 to represent CPU tensor is deprecated. Please use a ' + 'device object or string instead, e.g., "cpu".') + destination = _get_device_index(destination, allow_cpu=True, optional=True) + return torch._C._gather(tensors, dim, destination) + else: + if destination is not None: + raise RuntimeError( + f"'destination' must not be specified when 'out' is specified, but got destination={destination}") + return torch._C._gather_out(tensors, out, dim) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..4471cee6f379fec8850e37163c11cde6838338fd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/data_parallel.py @@ -0,0 +1,269 @@ +import operator +import torch +import warnings +from itertools import chain +from typing import Any, Dict, Generic, List, Optional, Sequence, Tuple, TypeVar, Union +from ..modules import Module +from .scatter_gather import scatter_kwargs, gather +from .replicate import replicate +from .parallel_apply import parallel_apply +from torch._utils import ( + _get_all_device_indices, + _get_available_device_type, + _get_device_index, + _get_devices_properties +) + +__all__ = ['DataParallel', 'data_parallel'] + +def _check_balance(device_ids: Sequence[Union[int, torch.device]]) -> None: + imbalance_warn = """ + There is an imbalance between your GPUs. You may want to exclude GPU {} which + has less than 75% of the memory or cores of GPU {}. You can do so by setting + the device_ids argument to DataParallel, or by setting the CUDA_VISIBLE_DEVICES + environment variable.""" + device_ids = [_get_device_index(x, True) for x in device_ids] + dev_props = _get_devices_properties(device_ids) + + def warn_imbalance(get_prop): + values = [get_prop(props) for props in dev_props] + min_pos, min_val = min(enumerate(values), key=operator.itemgetter(1)) + max_pos, max_val = max(enumerate(values), key=operator.itemgetter(1)) + if min_val / max_val < 0.75: + warnings.warn(imbalance_warn.format(device_ids[min_pos], device_ids[max_pos])) + return True + return False + + if warn_imbalance(lambda props: props.total_memory): + return + if warn_imbalance(lambda props: props.multi_processor_count): + return + + +T = TypeVar("T", bound=Module) + + +class DataParallel(Module, Generic[T]): + r"""Implements data parallelism at the module level. + + This container parallelizes the application of the given :attr:`module` by + splitting the input across the specified devices by chunking in the batch + dimension (other objects will be copied once per device). In the forward + pass, the module is replicated on each device, and each replica handles a + portion of the input. During the backwards pass, gradients from each replica + are summed into the original module. + + The batch size should be larger than the number of GPUs used. + + .. warning:: + It is recommended to use :class:`~torch.nn.parallel.DistributedDataParallel`, + instead of this class, to do multi-GPU training, even if there is only a single + node. See: :ref:`cuda-nn-ddp-instead` and :ref:`ddp`. + + Arbitrary positional and keyword inputs are allowed to be passed into + DataParallel but some types are specially handled. tensors will be + **scattered** on dim specified (default 0). tuple, list and dict types will + be shallow copied. The other types will be shared among different threads + and can be corrupted if written to in the model's forward pass. + + The parallelized :attr:`module` must have its parameters and buffers on + ``device_ids[0]`` before running this :class:`~torch.nn.DataParallel` + module. + + .. warning:: + In each forward, :attr:`module` is **replicated** on each device, so any + updates to the running module in ``forward`` will be lost. For example, + if :attr:`module` has a counter attribute that is incremented in each + ``forward``, it will always stay at the initial value because the update + is done on the replicas which are destroyed after ``forward``. However, + :class:`~torch.nn.DataParallel` guarantees that the replica on + ``device[0]`` will have its parameters and buffers sharing storage with + the base parallelized :attr:`module`. So **in-place** updates to the + parameters or buffers on ``device[0]`` will be recorded. E.g., + :class:`~torch.nn.BatchNorm2d` and :func:`~torch.nn.utils.spectral_norm` + rely on this behavior to update the buffers. + + .. warning:: + Forward and backward hooks defined on :attr:`module` and its submodules + will be invoked ``len(device_ids)`` times, each with inputs located on + a particular device. Particularly, the hooks are only guaranteed to be + executed in correct order with respect to operations on corresponding + devices. For example, it is not guaranteed that hooks set via + :meth:`~torch.nn.Module.register_forward_pre_hook` be executed before + `all` ``len(device_ids)`` :meth:`~torch.nn.Module.forward` calls, but + that each such hook be executed before the corresponding + :meth:`~torch.nn.Module.forward` call of that device. + + .. warning:: + When :attr:`module` returns a scalar (i.e., 0-dimensional tensor) in + :func:`forward`, this wrapper will return a vector of length equal to + number of devices used in data parallelism, containing the result from + each device. + + .. note:: + There is a subtlety in using the + ``pack sequence -> recurrent network -> unpack sequence`` pattern in a + :class:`~torch.nn.Module` wrapped in :class:`~torch.nn.DataParallel`. + See :ref:`pack-rnn-unpack-with-data-parallelism` section in FAQ for + details. + + + Args: + module (Module): module to be parallelized + device_ids (list of int or torch.device): CUDA devices (default: all devices) + output_device (int or torch.device): device location of output (default: device_ids[0]) + + Attributes: + module (Module): the module to be parallelized + + Example:: + + >>> # xdoctest: +SKIP + >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2]) + >>> output = net(input_var) # input_var can be on any device, including CPU + """ + + # TODO: update notes/cuda.rst when this class handles 8+ GPUs well + + def __init__( + self, + module: T, + device_ids: Optional[Sequence[Union[int, torch.device]]] = None, + output_device: Optional[Union[int, torch.device]] = None, + dim: int = 0, + ) -> None: + super().__init__() + torch._C._log_api_usage_once("torch.nn.parallel.DataParallel") + device_type = _get_available_device_type() + if device_type is None: + self.module = module + self.device_ids = [] + return + + if device_ids is None: + device_ids = _get_all_device_indices() + + if device_ids is None: + raise RuntimeError("no available devices were found") + + if output_device is None: + output_device = device_ids[0] + + self.dim = dim + self.module = module + self.device_ids = [_get_device_index(x, True) for x in device_ids] + self.output_device = _get_device_index(output_device, True) + self.src_device_obj = torch.device(device_type, self.device_ids[0]) + + if device_type == "cuda": + _check_balance(self.device_ids) + + if len(self.device_ids) == 1: + self.module.to(self.src_device_obj) + + def forward(self, *inputs: Any, **kwargs: Any) -> Any: + with torch.autograd.profiler.record_function("DataParallel.forward"): + if not self.device_ids: + return self.module(*inputs, **kwargs) + + for t in chain(self.module.parameters(), self.module.buffers()): + if t.device != self.src_device_obj: + raise RuntimeError("module must have its parameters and buffers " + f"on device {self.src_device_obj} (device_ids[0]) but found one of " + f"them on device: {t.device}") + + inputs, module_kwargs = self.scatter(inputs, kwargs, self.device_ids) + # for forward function without any inputs, empty list and dict will be created + # so the module can be executed on one device which is the first one in device_ids + if not inputs and not module_kwargs: + inputs = ((),) + module_kwargs = ({},) + + if len(self.device_ids) == 1: + return self.module(*inputs[0], **module_kwargs[0]) + replicas = self.replicate(self.module, self.device_ids[:len(inputs)]) + outputs = self.parallel_apply(replicas, inputs, module_kwargs) + return self.gather(outputs, self.output_device) + + def replicate(self, module: T, device_ids: Sequence[Union[int, torch.device]]) -> List[T]: + return replicate(module, device_ids, not torch.is_grad_enabled()) + + def scatter( + self, + inputs: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]], + device_ids: Sequence[Union[int, torch.device]], + ) -> Any: + return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) + + def parallel_apply(self, replicas: Sequence[T], inputs: Sequence[Any], kwargs: Any) -> List[Any]: + return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)]) + + def gather(self, outputs: Any, output_device: Union[int, torch.device]) -> Any: + return gather(outputs, output_device, dim=self.dim) + + +def data_parallel( + module: Module, + inputs: Any, + device_ids: Optional[Sequence[Union[int, torch.device]]] = None, + output_device: Optional[Union[int, torch.device]] = None, + dim: int = 0, + module_kwargs: Optional[Any] = None, +) -> torch.Tensor: + r"""Evaluate module(input) in parallel across the GPUs given in device_ids. + + This is the functional version of the DataParallel module. + + Args: + module (Module): the module to evaluate in parallel + inputs (Tensor): inputs to the module + device_ids (list of int or torch.device): GPU ids on which to replicate module + output_device (list of int or torch.device): GPU location of the output Use -1 to indicate the CPU. + (default: device_ids[0]) + Returns: + a Tensor containing the result of module(input) located on + output_device + """ + if not isinstance(inputs, tuple): + inputs = (inputs,) if inputs is not None else () + + device_type = _get_available_device_type() + + if device_type is None: + raise RuntimeError("device type could not be determined") + + if device_ids is None: + device_ids = _get_all_device_indices() + + if device_ids is None: + raise RuntimeError("no available devices were found") + + if output_device is None: + output_device = device_ids[0] + + device_ids = [_get_device_index(x, True) for x in device_ids] + output_device = _get_device_index(output_device, True) + src_device_obj = torch.device(device_type, device_ids[0]) + + for t in chain(module.parameters(), module.buffers()): + if t.device != src_device_obj: + raise RuntimeError("module must have its parameters and buffers " + f"on device {src_device_obj} (device_ids[0]) but found one of " + f"them on device: {t.device}") + + inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim) + # for module without any inputs, empty list and dict will be created + # so the module can be executed on one device which is the first one in device_ids + if not inputs and not module_kwargs: + inputs = ((),) + module_kwargs = ({},) + + assert module_kwargs is not None + + if len(device_ids) == 1: + return module(*inputs[0], **module_kwargs[0]) + used_device_ids = device_ids[:len(inputs)] + replicas = replicate(module, used_device_ids) + outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids) + return gather(outputs, output_device, dim) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/distributed.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/distributed.py new file mode 100644 index 0000000000000000000000000000000000000000..e968a99cf85ddd09f4d6cf1bc2ad76e69f1b552b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/distributed.py @@ -0,0 +1,2350 @@ +import copy +import functools +import inspect +import itertools +import logging +import os +import sys +import warnings +import weakref +from collections import defaultdict, deque +from contextlib import contextmanager +from dataclasses import dataclass, fields, is_dataclass +from enum import auto, Enum +from typing import Any, Callable, List, Optional, Tuple, Type + +import torch +import torch.distributed as dist +from torch.autograd import Function, Variable +from torch.distributed.algorithms.join import Join, Joinable, JoinHook +from torch.utils._pytree import tree_flatten, tree_unflatten +from torch.utils.hooks import RemovableHandle + +RPC_AVAILABLE = False +if dist.is_available(): + from torch.distributed.distributed_c10d import ( + _get_default_group, + _rank_not_in_group, + ReduceOp, + ) + from torch.distributed.utils import ( + _alloc_storage, + _cast_forward_inputs, + _free_storage, + _sync_module_states, + _to_kwargs, + _verify_param_shape_across_processes, + ) +if torch.distributed.rpc.is_available(): + RPC_AVAILABLE = True + from torch.distributed.rpc import RRef + +from torch._utils import _get_device_index + +from ..modules import Module +from .scatter_gather import gather, scatter_kwargs # noqa: F401 + +__all__ = ["DistributedDataParallel"] + +logger = logging.getLogger(__name__) + + +@dataclass +class _MixedPrecision: + """ + This configures DDP-native mixed precision training. + + Attributes: + param_dtype (torch.dtype): This specifies the dtype for model + parameters, inputs (when ``cast_forward_inputs`` is set to + ``True``), and therefore the dtype for computation. + However, outside the forward and backward passes, parameters are in + full precision. Model checkpointing always happens in full + precision. + reduce_dtype (torch.dtype): This specifies the dtype for gradient + reduction, which is permitted to differ from ``param_dtype``. + buffer_dtype (torch.dtype): This specifies the dtype for buffers. + + .. note:: This API is experimental and subject to change. + + .. note:: Only floating point tensors are cast to their specified dtypes. + + .. note:: ``state_dict`` checkpoints parameters and buffers in full + precision. + + .. note:: Each low precision dtype must be specified explicitly. For + example, ``_MixedPrecision(reduce_dtype=torch.float16)`` only specifies + the reduction dtype to be low precision, and DDP will not cast + parameters or buffers. + + .. note:: If a ``reduce_dtype`` is not specified, then gradient reduction + happens in ``param_dtype`` if specified or the original parameter dtype + otherwise. For example, ``_MixedPrecision(param_dtype=torch.float16)`` + would result in communication occurring in fp16. + """ + + param_dtype: Optional[torch.dtype] = None + reduce_dtype: Optional[torch.dtype] = None + buffer_dtype: Optional[torch.dtype] = None + # TODO (rohan-varma): keep_low_precision_grads: bool = False + # TODO (rohan-varma): APIs to allow users to run batchnorm and layernorm + # in full precision. For DDP, this can be implemented by not performing the + # parameter cast for BN and LN units. + + +def _cast_buffers(mixed_precision_config, root_module): + """Casts buffers to the given ``buffer_dtype``.""" + for buf in root_module.buffers(): + if hasattr(buf, "_ddp_ignored") and buf._ddp_ignored: + continue + + buf.data = buf.to(dtype=mixed_precision_config.buffer_dtype) + + +def _setup_mixed_precision_params(mixed_precision_config, root_module): + """Create and free storage for the mixed precision parameters.""" + for param in root_module.parameters(): + # Do not setup mixed precision for DDP ignored parameters. + if hasattr(param, "_ddp_ignored") and param._ddp_ignored: + continue + + if not hasattr(param, "_mp_param"): + param._mp_param = torch.zeros_like( + param, + device=param.device, + dtype=mixed_precision_config.param_dtype, + requires_grad=param.requires_grad, + ) + _free_storage(param._mp_param) + # _fp_param will point to the full precision param so it can be switched + # back to at the end of forward / backward. + param._fp_param = param.data + + +def _tree_flatten_with_rref(output): + output_is_rref = RPC_AVAILABLE and isinstance(output, RRef) + if output_is_rref: + output_tensor_list, treespec = tree_flatten(output.local_value()) + else: + output_tensor_list, treespec = tree_flatten(output) + # Need to return flattened tensors, spec to re-pack them, as well + # as if the return type was actually an RRef to reconstruct. + return output_tensor_list, treespec, output_is_rref + + +def _tree_unflatten_with_rref(output, treespec, output_is_rref): + output = tree_unflatten(output, treespec) + if output_is_rref: + output = RRef(output) + return output + + +def _find_tensors(obj): + r"""Recursively find all tensors contained in the specified object.""" + if RPC_AVAILABLE and isinstance(obj, RRef): + # If the current node is the owner of the RRef, unwrap it and try to + # find Tensors. + # TODO: Expand to remote RRefs. + if obj.is_owner(): + return _find_tensors(obj.local_value()) + if isinstance(obj, torch.Tensor): + return [obj] + if isinstance(obj, (list, tuple)): + return itertools.chain.from_iterable(map(_find_tensors, obj)) + if isinstance(obj, dict): + return itertools.chain.from_iterable(map(_find_tensors, obj.values())) + if is_dataclass(obj): + return itertools.chain.from_iterable( + map(_find_tensors, (getattr(obj, f.name) for f in fields(obj))) + ) + + return [] + + +def _dump_DDP_relevant_env_vars(): + relevant_env_vars = [ + "RANK", + "LOCAL_RANK", + "WORLD_SIZE", + "MASTER_PORT", + "MASTER_ADDR", + "CUDA_VISIBLE_DEVICES", + "GLOO_SOCKET_IFNAME", + "GLOO_DEVICE_TRANSPORT", + "NCCL_SOCKET_IFNAME", + "TORCH_NCCL_BLOCKING_WAIT", + "NCCL_DEBUG", + "NCCL_DEBUG_SUBSYS", + "NCCL_IB_DISABLE", + # More NCCL env vars: + "NCCL_P2P_DISABLE", + "NCCL_P2P_LEVEL", + "NCCL_SHM_DISABLE", + "NCCL_SOCKET_NTHREADS", + "NCCL_NSOCKS_PERTHREAD", + "NCCL_BUFFSIZE", + "NCCL_NTHREADS", + "NCCL_RINGS", + "NCCL_MAX_NCHANNELS", + "NCCL_MIN_NCHANNELS", + "NCCL_CHECKS_DISABLE", + "NCCL_CHECK_POINTERS", + "NCCL_LAUNCH_MODE", + "NCCL_IB_HCA", + "NCCL_IB_TIMEOUT", + "NCCL_IB_RETRY_CNT", + "NCCL_IB_GID_INDEX", + "NCCL_IB_SL", + "NCCL_IB_TC", + "NCCL_IB_AR_THRESHOLD", + "NCCL_IB_CUDA_SUPPORT", + "NCCL_NET_GDR_LEVEL", + "NCCL_NET_GDR_READ", + "NCCL_SINGLE_RING_THRESHOLD", + "NCCL_LL_THRESHOLD", + "NCCL_TREE_THRESHOLD", + "NCCL_ALGO", + "NCCL_PROTO", + "NCCL_IGNORE_CPU_AFFINITY", + "NCCL_DEBUG_FILE", + "NCCL_COLLNET_ENABLE", + "NCCL_TOPO_FILE", + "NCCL_TOPO_DUMP_FILE", + "TORCH_NCCL_ASYNC_ERROR_HANDLING", + ] + formatted_output = "" + for var in relevant_env_vars: + value = os.environ[var] if var in os.environ else "N/A" + formatted_output += f"env:{var}={value}\n" + print(formatted_output) + + +class _BufferCommHookLocation(Enum): + PRE_FORWARD = auto() + POST_FORWARD = auto() + + +@dataclass +class _BufferCommHook: + buffer_comm_hook: Callable + buffer_comm_hook_state: Any + buffer_comm_hook_location: _BufferCommHookLocation + + +# Add a DDPSink to run various functions when backwards starts, such as +# queueing call back of out-most backward/graph task, +# this helps call back is fired after all gradients' calculation +# is completed. +class _DDPSink(Function): + @staticmethod + def forward(ctx, ddp_weakref, *inputs): + # set_materialize_grads(False) will ensure that None gradients stay as + # None and are not filled with zeros. + ctx.set_materialize_grads(False) + ctx.ddp_weakref = ddp_weakref + ret = tuple( + inp.clone() if isinstance(inp, torch.Tensor) else inp for inp in inputs + ) + return ret + + @staticmethod + def backward(ctx, *grad_outputs): + # Enqueue delay allreduce for static graph training on the first + # iteration. + ddp_weakref = ctx.ddp_weakref() + reducer = ddp_weakref.reducer + static_graph = ddp_weakref.static_graph + delay_ar_enqueued = ( + static_graph and ddp_weakref._static_graph_delay_allreduce_enqueued + ) + if static_graph and not delay_ar_enqueued: + Variable._execution_engine.queue_callback( # type: ignore[call-arg,misc] + reducer._delay_all_reduce + ) + ddp_weakref._static_graph_delay_allreduce_enqueued = True + + return (None, *grad_outputs) + + +class _DDPJoinHook(JoinHook): + def __init__(self, ddp, divide_by_initial_world_size): + """Set config variables for internal usage.""" + assert isinstance(ddp, DistributedDataParallel), ( + "DDP join hook requires passing in a DistributedDataParallel " + "instance as the state" + ) + assert ddp.logger is not None + ddp.logger._set_uneven_input_join() + self.ddp = ddp + self.ddp._divide_by_initial_world_size = divide_by_initial_world_size + super().__init__() + + def main_hook(self): + """Shadow the DDP collective communication operations in the forward and backward passes.""" + ddp = self.ddp + # Buckets are rebuilt only once during a training period + ddp.reducer._rebuild_buckets() + + # Schedule a broadcast if we are syncing module buffers in the + # forward pass + # TODO: make DDP uneven inputs context manager support buffer + # comm hook (https://github.com/pytorch/pytorch/issues/65436) + ddp._check_and_sync_module_buffers() + + # Check if need to sync in the backward pass + should_sync_backwards = ddp._check_global_requires_backward_grad_sync( + is_joined_rank=True + ) + # Forward parameter sync is disabled in the next iteration if we + # are skipping gradient sync this iteration, so set + # `require_forward_param_sync` accordingly + ddp.require_forward_param_sync = should_sync_backwards + if not should_sync_backwards: + return + + # Schedule one allreduce per gradient bucket to match the backward + # pass allreduce + ddp._match_all_reduce_for_bwd_pass() + + # Check if we need to allreduce locally unused parameters + if ddp.find_unused_parameters: + ddp._match_unused_params_allreduce() + + # Rebuilt parameters are pushed only once during a training period + ddp.reducer._push_all_rebuilt_params() + + def post_hook(self, is_last_joiner: bool): + """Sync the final model to ensure that the model is the same across all processes.""" + self.ddp._sync_final_model(is_last_joiner) + + +class DistributedDataParallel(Module, Joinable): + r"""Implement distributed data parallelism based on ``torch.distributed`` at module level. + + This container provides data parallelism by synchronizing gradients + across each model replica. The devices to synchronize across are + specified by the input ``process_group``, which is the entire world + by default. Note that ``DistributedDataParallel`` does not chunk or + otherwise shard the input across participating GPUs; the user is + responsible for defining how to do so, for example through the use + of a :class:`DistributedSampler`. + + See also: :ref:`distributed-basics` and :ref:`cuda-nn-ddp-instead`. + The same constraints on input as in :class:`torch.nn.DataParallel` apply. + + Creation of this class requires that ``torch.distributed`` to be already + initialized, by calling :func:`torch.distributed.init_process_group`. + + ``DistributedDataParallel`` is proven to be significantly faster than + :class:`torch.nn.DataParallel` for single-node multi-GPU data + parallel training. + + To use ``DistributedDataParallel`` on a host with N GPUs, you should spawn + up ``N`` processes, ensuring that each process exclusively works on a single + GPU from 0 to N-1. This can be done by either setting + ``CUDA_VISIBLE_DEVICES`` for every process or by calling: + + >>> # xdoctest: +SKIP("undefined variables") + >>> torch.cuda.set_device(i) + + where i is from 0 to N-1. In each process, you should refer the following + to construct this module: + + >>> # xdoctest: +SKIP("undefined variables") + >>> torch.distributed.init_process_group( + >>> backend='nccl', world_size=N, init_method='...' + >>> ) + >>> model = DistributedDataParallel(model, device_ids=[i], output_device=i) + + In order to spawn up multiple processes per node, you can use either + ``torch.distributed.launch`` or ``torch.multiprocessing.spawn``. + + .. note:: + Please refer to `PyTorch Distributed Overview `__ + for a brief introduction to all features related to distributed training. + + .. note:: + ``DistributedDataParallel`` can be used in conjunction with + :class:`torch.distributed.optim.ZeroRedundancyOptimizer` to reduce + per-rank optimizer states memory footprint. Please refer to + `ZeroRedundancyOptimizer recipe `__ + for more details. + + .. note:: ``nccl`` backend is currently the fastest and highly recommended + backend when using GPUs. This applies to both single-node and + multi-node distributed training. + + .. note:: This module also supports mixed-precision distributed training. + This means that your model can have different types of parameters such + as mixed types of ``fp16`` and ``fp32``, the gradient reduction on these + mixed types of parameters will just work fine. + + .. note:: If you use ``torch.save`` on one process to checkpoint the module, + and ``torch.load`` on some other processes to recover it, make sure that + ``map_location`` is configured properly for every process. Without + ``map_location``, ``torch.load`` would recover the module to devices + where the module was saved from. + + .. note:: When a model is trained on ``M`` nodes with ``batch=N``, the + gradient will be ``M`` times smaller when compared to the same model + trained on a single node with ``batch=M*N`` if the loss is summed (NOT + averaged as usual) across instances in a batch (because the gradients + between different nodes are averaged). You should take this into + consideration when you want to obtain a mathematically equivalent + training process compared to the local training counterpart. But in most + cases, you can just treat a DistributedDataParallel wrapped model, a + DataParallel wrapped model and an ordinary model on a single GPU as the + same (E.g. using the same learning rate for equivalent batch size). + + .. note:: + Parameters are never broadcast between processes. The module performs + an all-reduce step on gradients and assumes that they will be modified + by the optimizer in all processes in the same way. Buffers + (e.g. BatchNorm stats) are broadcast from the module in process of rank + 0, to all other replicas in the system in every iteration. + + .. note:: + If you are using DistributedDataParallel in conjunction with the + :ref:`distributed-rpc-framework`, you should always use + :meth:`torch.distributed.autograd.backward` to compute gradients and + :class:`torch.distributed.optim.DistributedOptimizer` for optimizing + parameters. + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> import torch.distributed.autograd as dist_autograd + >>> from torch.nn.parallel import DistributedDataParallel as DDP + >>> import torch + >>> from torch import optim + >>> from torch.distributed.optim import DistributedOptimizer + >>> import torch.distributed.rpc as rpc + >>> from torch.distributed.rpc import RRef + >>> + >>> t1 = torch.rand((3, 3), requires_grad=True) + >>> t2 = torch.rand((3, 3), requires_grad=True) + >>> rref = rpc.remote("worker1", torch.add, args=(t1, t2)) + >>> ddp_model = DDP(my_model) + >>> + >>> # Setup optimizer + >>> optimizer_params = [rref] + >>> for param in ddp_model.parameters(): + >>> optimizer_params.append(RRef(param)) + >>> + >>> dist_optim = DistributedOptimizer( + >>> optim.SGD, + >>> optimizer_params, + >>> lr=0.05, + >>> ) + >>> + >>> with dist_autograd.context() as context_id: + >>> pred = ddp_model(rref.to_here()) + >>> loss = loss_func(pred, target) + >>> dist_autograd.backward(context_id, [loss]) + >>> dist_optim.step(context_id) + + .. note:: + DistributedDataParallel currently offers limited support for gradient + checkpointing with :meth:`torch.utils.checkpoint`. + If the checkpoint is done with use_reentrant=False (recommended), DDP + will work as expected without any limitations. + If, however, the checkpoint is done with use_reentrant=True (the default), + DDP will work as expected when there are no unused parameters in the model + and each layer is checkpointed at most once (make sure you are not passing + `find_unused_parameters=True` to DDP). We currently do not support the + case where a layer is checkpointed multiple times, or when there unused + parameters in the checkpointed model. + + .. note:: + To let a non-DDP model load a state dict from a DDP model, + :meth:`~torch.nn.modules.utils.consume_prefix_in_state_dict_if_present` + needs to be applied to strip the prefix "module." in the DDP state dict before loading. + + .. warning:: + Constructor, forward method, and differentiation of the output (or a + function of the output of this module) are distributed synchronization + points. Take that into account in case different processes might be + executing different code. + + .. warning:: + This module assumes all parameters are registered in the model by the + time it is created. No parameters should be added nor removed later. + Same applies to buffers. + + .. warning:: + This module assumes all parameters are registered in the model of each + distributed processes are in the same order. The module itself will + conduct gradient ``allreduce`` following the reverse order of the + registered parameters of the model. In other words, it is users' + responsibility to ensure that each distributed process has the exact + same model and thus the exact same parameter registration order. + + .. warning:: + This module allows parameters with non-rowmajor-contiguous strides. + For example, your model may contain some parameters whose + :class:`torch.memory_format` is ``torch.contiguous_format`` + and others whose format is ``torch.channels_last``. However, + corresponding parameters in different processes must have the + same strides. + + .. warning:: + This module doesn't work with :func:`torch.autograd.grad` (i.e. it will + only work if gradients are to be accumulated in ``.grad`` attributes of + parameters). + + .. warning:: + If you plan on using this module with a ``nccl`` backend or a ``gloo`` + backend (that uses Infiniband), together with a DataLoader that uses + multiple workers, please change the multiprocessing start method to + ``forkserver`` (Python 3 only) or ``spawn``. Unfortunately + Gloo (that uses Infiniband) and NCCL2 are not fork safe, and you will + likely experience deadlocks if you don't change this setting. + + .. warning:: + You should never try to change your model's parameters after wrapping + up your model with ``DistributedDataParallel``. Because, when + wrapping up your model with ``DistributedDataParallel``, the constructor + of ``DistributedDataParallel`` will register the additional gradient + reduction functions on all the parameters of the model itself at the + time of construction. If you change the model's parameters afterwards, + gradient reduction functions no longer match the correct set of + parameters. + + .. warning:: + Using ``DistributedDataParallel`` in conjunction with the + :ref:`distributed-rpc-framework` is experimental and subject to change. + + Args: + module (Module): module to be parallelized + device_ids (list of int or torch.device): CUDA devices. + 1) For single-device modules, ``device_ids`` can + contain exactly one device id, which represents the only + CUDA device where the input module corresponding to this process resides. + Alternatively, ``device_ids`` can also be ``None``. + 2) For multi-device modules and CPU modules, + ``device_ids`` must be ``None``. + + When ``device_ids`` is ``None`` for both cases, + both the input data for the forward pass and the actual module + must be placed on the correct device. + (default: ``None``) + output_device (int or torch.device): Device location of output for + single-device CUDA modules. For multi-device modules and + CPU modules, it must be ``None``, and the module itself + dictates the output location. (default: ``device_ids[0]`` + for single-device modules) + broadcast_buffers (bool): Flag that enables syncing (broadcasting) + buffers of the module at beginning of the ``forward`` + function. (default: ``True``) + process_group: The process group to be used for distributed data + all-reduction. If ``None``, the default process group, which + is created by :func:`torch.distributed.init_process_group`, + will be used. (default: ``None``) + bucket_cap_mb: ``DistributedDataParallel`` will bucket parameters into + multiple buckets so that gradient reduction of each + bucket can potentially overlap with backward computation. + :attr:`bucket_cap_mb` controls the bucket size in + MegaBytes (MB). (default: 25) + find_unused_parameters (bool): Traverse the autograd graph from all + tensors contained in the return value of the + wrapped module's ``forward`` function. Parameters + that don't receive gradients as part of this + graph are preemptively marked as being ready to + be reduced. In addition, parameters that may have + been used in the wrapped module's ``forward`` + function but were not part of loss computation and + thus would also not receive gradients are + preemptively marked as ready to be reduced. + (default: ``False``) + check_reduction: This argument is deprecated. + gradient_as_bucket_view (bool): When set to ``True``, gradients will be views + pointing to different offsets of ``allreduce`` communication + buckets. This can reduce peak memory usage, where the + saved memory size will be equal to the total gradients + size. Moreover, it avoids the overhead of copying between + gradients and ``allreduce`` communication buckets. When + gradients are views, ``detach_()`` cannot be called on the + gradients. If hitting such errors, please fix it by + referring to the :meth:`~torch.optim.Optimizer.zero_grad` + function in ``torch/optim/optimizer.py`` as a solution. + Note that gradients will be views after first iteration, so + the peak memory saving should be checked after first iteration. + static_graph (bool): When set to ``True``, DDP knows the trained graph is + static. Static graph means 1) The set of used and unused + parameters will not change during the whole training loop; in + this case, it does not matter whether users set + ``find_unused_parameters = True`` or not. 2) How the graph is trained + will not change during the whole training loop (meaning there is + no control flow depending on iterations). + When static_graph is set to be ``True``, DDP will support cases that + can not be supported in the past: + 1) Reentrant backwards. + 2) Activation checkpointing multiple times. + 3) Activation checkpointing when model has unused parameters. + 4) There are model parameters that are outside of forward function. + 5) Potentially improve performance when there are unused parameters, + as DDP will not search graph in each iteration to detect unused + parameters when static_graph is set to be ``True``. + To check whether you can set static_graph to be ``True``, one way is to + check ddp logging data at the end of your previous model training, + if ``ddp_logging_data.get("can_set_static_graph") == True``, mostly you + can set ``static_graph = True`` as well. + + Example:: + >>> # xdoctest: +SKIP("undefined variables") + >>> model_DDP = torch.nn.parallel.DistributedDataParallel(model) + >>> # Training loop + >>> ... + >>> ddp_logging_data = model_DDP._get_ddp_logging_data() + >>> static_graph = ddp_logging_data.get("can_set_static_graph") + delay_all_reduce_named_params (list of tuple of str and torch.nn.Parameter): a list + of named parameters whose all reduce will be delayed when the gradient of + the parameter specified in ``param_to_hook_all_reduce`` is ready. Other + arguments of DDP do not apply to named params specified in this argument + as these named params will be ignored by DDP reducer. + param_to_hook_all_reduce (torch.nn.Parameter): a parameter to hook delayed all reduce + of parameters specified in ``delay_all_reduce_named_params``. + + + Attributes: + module (Module): the module to be parallelized. + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...') + >>> net = torch.nn.parallel.DistributedDataParallel(model) + """ + + # used to track whether the given thread is inside ddp forward for torchdynamo purposes + _active_ddp_module: Optional["DistributedDataParallel"] = None + + def __init__( + self, + module, + device_ids=None, + output_device=None, + dim=0, + broadcast_buffers=True, + process_group=None, + bucket_cap_mb=25, + find_unused_parameters=False, + check_reduction=False, + gradient_as_bucket_view=False, + static_graph=False, + delay_all_reduce_named_params=None, + param_to_hook_all_reduce=None, + mixed_precision: Optional[_MixedPrecision] = None, + device_mesh=None, + ): + super().__init__() + Joinable.__init__(self) + self.logger = None + if bool(delay_all_reduce_named_params is not None) != bool( + param_to_hook_all_reduce is not None + ): + self._log_and_throw( + ValueError, + "delay_all_reduce_named_params and param_to_hook_all_reduce " + "need to be set at the same time.", + ) + + self._delay_all_reduce_params = [] + if hasattr(module, "_ddp_params_and_buffers_to_ignore"): + self.parameters_to_ignore = set(module._ddp_params_and_buffers_to_ignore) + else: + self.parameters_to_ignore = set() + if delay_all_reduce_named_params is not None: + for name, param in delay_all_reduce_named_params: + self.parameters_to_ignore.add(name) + self._delay_all_reduce_params.append(param) + + self._module_parameters = [ + p + for n, p in module.named_parameters() + if n not in self.parameters_to_ignore + ] + if not any(p.requires_grad for p in self._module_parameters): + if len(self._delay_all_reduce_params): + logger.info("Delay the AllReduce of all parameters.") + else: + self._log_and_throw( + RuntimeError, + "DistributedDataParallel is not needed when a module " + "doesn't have any parameter that requires a gradient.", + ) + + if device_ids is not None and len(device_ids) > 1: + self._log_and_throw( + ValueError, + "device_ids can only be None or contain a single element.", + ) + + self.is_multi_device_module = ( + len({p.device for p in self._module_parameters}) > 1 + ) + distinct_device_types = { + p.device.type for p in self._module_parameters if p.device is not None + } + if len(distinct_device_types) != 1: + self._log_and_throw( + ValueError, + "DistributedDataParallel's input module must be on " + f"the same type of devices, but input module parameters locate in {distinct_device_types}.", + ) + + self.device_type = next(iter(distinct_device_types)) + + if ( + device_ids is None + or len(device_ids) == 0 # For backward compatibility. + or self.device_type == "cpu" + or self.is_multi_device_module + ): + if device_ids or output_device: + self._log_and_throw( + ValueError, + "DistributedDataParallel device_ids and output_device arguments " + "only work with single-device/multiple-device GPU modules or CPU modules, " + "but got device_ids {}, output_device {}, and module parameters {}.".format( + device_ids, + output_device, + {p.device for p in self._module_parameters}, + ), + ) + + self.device_ids = None + self.output_device = None + else: + self.device_ids = [_get_device_index(x, True) for x in device_ids] + + if output_device is None: + output_device = device_ids[0] + + self.output_device = _get_device_index(output_device, True) + + if process_group and device_mesh is not None: + raise RuntimeError( + "Cannot specify both process_group and device_mesh arguments." + ) + elif process_group is None and device_mesh is None: + self.process_group = _get_default_group() + elif device_mesh is None: + self.process_group = process_group + else: + if device_mesh.ndim != 1: + raise RuntimeError( + f"Only 1D device mesh is supported, but got {device_mesh}." + ) + self.device_mesh = device_mesh + self.process_group = device_mesh.get_group(mesh_dim=0) + + self.static_graph = False + self.dim = dim + self.module = module + self.device = next(iter(self._module_parameters)).device + self.broadcast_buffers = broadcast_buffers + self.find_unused_parameters = find_unused_parameters + self.require_backward_grad_sync = True + self.require_forward_param_sync = True + self.gradient_as_bucket_view = gradient_as_bucket_view + self.mixed_precision = mixed_precision + if self.mixed_precision is not None: + logger.warning("Received mixed precision config %s", self.mixed_precision) + + if check_reduction: + # This argument is no longer used since the reducer + # will ensure reduction completes even if some parameters + # do not receive gradients. + warnings.warn( + "The `check_reduction` argument in `DistributedDataParallel` " + "module is deprecated. Please avoid using it." + ) + + # Check that a module does not have Uninitialized parameters + for param in self._module_parameters: + if isinstance(param, torch.nn.parameter.UninitializedParameter): + self._log_and_throw( + RuntimeError, + "Modules with uninitialized parameters can't be used with `DistributedDataParallel`. " + "Run a dummy forward pass to correctly initialize the modules", + ) + # used for intra-node param sync and inter-node sync as well + self.broadcast_bucket_size = int(250 * 1024 * 1024) + + # reduction bucket size + self.bucket_bytes_cap = int(bucket_cap_mb * 1024 * 1024) + # Whether to perform input tensor CPU to GPU copies on a side-stream + self.use_side_stream_for_tensor_copies = ( + os.environ.get("PYTORCH_DDP_USE_SIDE_STREAM", "1") == "1" + ) + + # Initialize gradient buffers and register all reduce hook + self._delay_grad_buffer = None + self._delay_grad_views: List[torch.Tensor] = [] + self._delay_all_reduce_all_params = False + if len(self._delay_all_reduce_params) != 0: + self._register_delay_all_reduce_hook( + bucket_cap_mb=bucket_cap_mb, + param_to_hook_all_reduce=param_to_hook_all_reduce, + device_ids=device_ids, + ) + if self._delay_all_reduce_all_params: + return + + # Build parameters for reducer. + parameters, expect_sparse_gradient = self._build_params_for_reducer() + # Verify model equivalence. + _verify_param_shape_across_processes(self.process_group, parameters) + # Sync params and buffers. Ensures all DDP models start off at the same value. + _sync_module_states( + module=self.module, + process_group=self.process_group, + broadcast_bucket_size=self.broadcast_bucket_size, + src=0, + params_and_buffers_to_ignore=self.parameters_to_ignore, + broadcast_buffers=self.broadcast_buffers, + ) + # In debug mode, build a mapping of parameter index -> parameter. + param_to_name_mapping = self._build_debug_param_to_name_mapping(parameters) + + # Builds reducer. + self._ddp_init_helper( + parameters, + expect_sparse_gradient, + param_to_name_mapping, + static_graph, + ) + self._comm_hooks: List[Tuple[Callable, object]] = [] + + if self.mixed_precision is not None: + _setup_mixed_precision_params(self.mixed_precision, self.module) + _cast_buffers(self.mixed_precision, self.module) + # Stream used for async low precision copies. + self._mp_stream = torch.cuda.Stream() + self._submodule_to_event = defaultdict(deque) # type: ignore[var-annotated] + # Add forward pre-hook to root module to kick off copies to lower + # precision. + self.module.register_forward_pre_hook( + self._root_copy_hook, prepend=False, with_kwargs=True + ) + # Add forward pre hook to all submodules to wait for copy events + # before running computation. + for module in self.module.modules(): + module.register_forward_pre_hook( + self._module_wait_for_copy_hook, + prepend=False, + with_kwargs=True, + ) + # Set up callbacks in backward to upcast and use full precision + # params. TODO (rohan-varma): Make this compose with general + # comm hooks and apply_optimizer_in_backward. Importing inline to + # avoid circular import issue. + from torch.distributed.algorithms.ddp_comm_hooks.mixed_precision_hooks import ( + _AllreduceUpcastHookState, + _reducer_allreduce_and_upcast_hook, + ) + + upcast_hook_state = _AllreduceUpcastHookState( + ddp_weakref=weakref.ref(self), + upcast_stream=torch.cuda.Stream(), + ) + self.register_comm_hook( + upcast_hook_state, + _reducer_allreduce_and_upcast_hook, + ) + # Inform reducer of reduced precision param dtype for correctness + # of type checks between gradient and bucket. + self.reducer._set_mixed_precision_param_dtype( # type: ignore[attr-defined] + self.mixed_precision.param_dtype + ) + + self._has_rebuilt_buckets = False + + if static_graph: + self._set_static_graph() + + self._lazy_init_ran = False + + # Register the AccumulateGrad post hooks if optimize_ddp is + # True. The hooks will be deregistered if compiled_autograd is not + # enabled. + self._accum_grad_hooks: List[RemovableHandle] = [] + optimize_ddp = torch._dynamo.config._get_optimize_ddp_mode() + self._use_python_reducer = optimize_ddp in ( + "python_reducer", + "python_reducer_without_compiled_forward", + ) + self._force_to_disable_cpp_reducer = ( + optimize_ddp == "python_reducer_without_compiled_forward" + ) + if self._use_python_reducer: + self._register_accum_grad_hook() + + def _register_accum_grad_hook(self): + import torch.distributed._functional_collectives as fcol + + def compiled_accum_grad_hook( + param, + *, + param_index: int, + ): + if not self.require_backward_grad_sync: + return + + if param.grad is None: + return + + if self._comm_hooks: + for hook, state in self._comm_hooks: + hook(state, (param.grad, param)) + else: + gradient = param.grad / self.process_group.size() + gradient = fcol.all_reduce(gradient, "sum", self.process_group) + param.grad.copy_(gradient) + + for index, param in enumerate(self._module_parameters): + self._accum_grad_hooks.append( + param.register_post_accumulate_grad_hook( + functools.partial( + compiled_accum_grad_hook, + param_index=index, + ) + ) + ) + + def _delayed_all_reduce_hook(self, grad): + world_size = dist.get_world_size(self.process_group) + + self._delay_grad_buffer.div_(world_size) # type: ignore[union-attr] + _ = dist.all_reduce( + self._delay_grad_buffer, group=self.process_group, async_op=True + ) + return grad + + def _register_delay_all_reduce_hook( + self, + bucket_cap_mb, + param_to_hook_all_reduce, + device_ids, + ): + # 1. Create gradient buffer + device = torch.device("cpu") if device_ids is None else device_ids[0] + self._delay_grad_buffer = torch.zeros( + sum([p.numel() for p in self._delay_all_reduce_params]), + device=device, + ) + + # 2. Broadcast the parameters + detached_params = [p.detach() for p in self._delay_all_reduce_params] + dist._broadcast_coalesced(self.process_group, detached_params, bucket_cap_mb, 0) + + # 3. Hook all reduce to the specified parameter + param_to_hook_all_reduce.register_hook(self._delayed_all_reduce_hook) + + # 4. Build tensor views for gradients + offset = 0 + for param in self._delay_all_reduce_params: + grad_view = self._delay_grad_buffer[offset : (offset + param.numel())].view( + param.shape + ) + self._delay_grad_views.append(grad_view) + offset = offset + param.numel() + + # 5. Check whether the all reduce of all params requiring grad is delayed. + for module_name, module in self.module.named_modules(): + for param_name, param in module.named_parameters(recurse=False): + if param.requires_grad: + full_name = f"{module_name}.{param_name}" + if full_name not in self.parameters_to_ignore: + # There is at least a param whose all reduce will not be delayed. + # In this case, we should not set self._delay_all_reduce_all_params + # to True. + return + self._delay_all_reduce_all_params = True + + def _setup_in_backward_optimizers(self): + # Check if user has used apply_optim_in_backward to overlap optimizer + # step + DDP backward. Current constraints: + # 1. Only allreduce is supported at the moment, no custom communication. + # 2. For DDP-managed parameters that have their optimizer run in + # backward, their gradients are set to ``None``. If your use case + # requires DDP parameters grad not to be set to ``None`` after their + # in-backward optimizer runs, please ping + # https://github.com/pytorch/pytorch/issues/90052. + # NOTE: we use self._module_parameters instead of .parameters() since + # the former excludes ignored (non-DDP managed) parameters. + if any(hasattr(p, "_in_backward_optimizers") for p in self._module_parameters): + torch._C._log_api_usage_once("ddp.optimizer_in_backward") + # Remove hooks that apply_optim_in_backward had registered because + # DDP customizes how optimizer is overlapped with backward due to + # the allreduce. + param_to_handle_map = ( + dist.optim.apply_optimizer_in_backward.param_to_optim_hook_handle_map + ) + for p in self._module_parameters: + for handle in param_to_handle_map.get(p, []): + handle.remove() + + # Need a weakref to DDP instance to run all_reduce (from reducer) + # and get managed DDP parameters. + ddp_weakref = weakref.ref(self) + # Note: importing in function, otherwise this will cause a circular + # import. + from torch.distributed.algorithms.ddp_comm_hooks.optimizer_overlap_hooks import ( + _apply_optim_in_backward_hook, + ) + + self.register_comm_hook( + ddp_weakref, + _apply_optim_in_backward_hook( + gradient_is_bucket_view=self.gradient_as_bucket_view + ), + ) + + self.reducer._set_optimizer_in_backward() # type: ignore[attr-defined] + + def _fire_reducer_autograd_hook(self, idx, *unused): + """ + Fire the reducer's autograd hook to allreduce params in a Reducer bucket. + + Note that this is only used during mixed precision training as the + Reducer's hooks installed during construction time would not be called + as we're working in the low precision parameter setting. + """ + self.reducer._autograd_hook(idx) # type: ignore[attr-defined] + + def _root_copy_hook(self, *args: Any, **kwargs: Any) -> None: + """ + For DDP mixed precision, put low precision copies on separate stream and create events to wait for them. + + When training with DDP mixed precision, this root pre-forward hook kicks + off low precision copies on a separate stream and creates respective + events to wait for them. + """ + # Clear out previous iteration submodule to event. This is because we + # may have populated some events for modules that didn't end up being + # used. + self._submodule_to_event = defaultdict(deque) # type: ignore[var-annotated] + with torch.cuda.stream(self._mp_stream): + for submodule in self.module.modules(): + for param in submodule.parameters(recurse=False): + # Do not cast DDP ignored parameters. + if hasattr(param, "_ddp_ignored") and param._ddp_ignored: + continue + _alloc_storage(param._mp_param, param.size()) + # copy() implicitly casts to low precision + with torch.no_grad(): + param._mp_param.copy_(param.data) + # TODO: when zero_grad(set_to_none=False) or in grad + # accumulation case, accumulated grads can be in fp32 + # which can cause errors when running DDP backwards due + # to mismatched incoming and accumulated gradient types. + # So we manually cast the accumulated grad down for now, + # in the future we may shift to FSDP style gradient + # accumulation management where the accumulated gradient + # is saved and .grad field is set to None, bypassing + # this issue. + if param.grad is not None: + param.grad.data = param.grad.to( + self.mixed_precision.param_dtype # type: ignore[union-attr] + ) + param.data = param._mp_param + copy_event = torch.cuda.Event() + copy_event.record() + self._submodule_to_event[submodule].append(copy_event) + + def _module_wait_for_copy_hook( + self, + module, + *args: Any, + **kwargs: Any, + ) -> None: + """Before carrying out computation, wait on the appropriate event to ensure low precision copies have finished.""" + try: + event = self._submodule_to_event[module].popleft() + except IndexError: + # copy event has already been waited on + return + + event.wait(stream=torch.cuda.current_stream()) + for p in module.parameters(recurse=False): + # Don't register hooks if param does not require grad + if not p.requires_grad or (hasattr(p, "_ddp_ignored") and p._ddp_ignored): + continue + # We need to register autograd hook here instead of DDP's ctor + # since we're working with the low precision param. Register them + # via obtaining the gradient accumulator. + tmp = p.expand_as(p) + grad_acc = tmp.grad_fn.next_functions[0][0] + + hook = grad_acc.register_hook( + functools.partial(self._fire_reducer_autograd_hook, p._idx) + ) + p._ddp_mp_hook_state = (grad_acc, hook) + + def _log_and_throw(self, err_type, err_msg): + if self.logger is not None: + self.logger.set_error_and_log(f"{str(err_type)}: {err_msg}") + raise err_type(err_msg) + + def _ddp_init_helper( + self, + parameters, + expect_sparse_gradient, + param_to_name_mapping, + static_graph, + ): + """ + DDP init helper function to manage parameters, grad hooks, logging, and SyncBatchNorm. + + Initialization helper function that does the following: + (1) bucketing the parameters for reductions + (2) resetting the bucketing states + (3) registering the grad hooks + (4) Logging construction-time DDP logging data + (5) passing a handle of DDP to SyncBatchNorm Layer + """ + # Notice, the parameters order is not in the order in which they are used, + # especially in models with control flow. + # + # Alongside parameters are not presented in the real execution order, + # if a certain model happens to also + # 1) have other collectives comm ops in its backward graph. + # 2) have unused parameter in subset ranks of the whole world. + # bucketing could insert ALL-REDUCE comm op too early on the rank with unused parameter, + # matching up with other collectives comm ops on other ranks unexpectedly. + # + # In order to handle this corner case, when the parameters are not in the real execution order, + # we don't do bucketing, thus only one ALL-REDUCE is inserted after all the gradients + # of the whole graph are computed. + # + # Notice, here we only disable bucketing for the first iteration. + # After the first iteration, it's OK to rebuild buckets, + # because "bucket rebuild" bucketizes parameters based on its real execution order in backward graph. + + # Can remove this branching once #73732 is landed. + if static_graph is True or self.find_unused_parameters is False: + bucket_size_limits = [sys.maxsize] + else: + bucket_size_limits = [ + dist._DEFAULT_FIRST_BUCKET_BYTES, + self.bucket_bytes_cap, + ] + ( + bucket_indices, + per_bucket_size_limits, + ) = dist._compute_bucket_assignment_by_size( + parameters, + bucket_size_limits, + expect_sparse_gradient, + ) + + # Remember index for parameters if we are in mixed precision, as we + # need to pass in index to Reducer's autograd hook via python. + if self.mixed_precision is not None: + for i, p in enumerate(parameters): + p._idx = i + + # Note: reverse list of buckets because we want to approximate the + # order in which their gradients are produced, and assume they + # are used in the forward pass in the order they are defined. + self.reducer = dist.Reducer( + parameters, + list(reversed(bucket_indices)), + list(reversed(per_bucket_size_limits)), + self.process_group, + expect_sparse_gradient, + # The bucket size limit is specified in the constructor. + # Additionally, we allow for a single small bucket for parameters + # that are defined first, such that their gradients don't spill into + # a much larger bucket, adding unnecessary latency after gradient + # computation finishes. Experiments showed 1MB is a reasonable value. + self.bucket_bytes_cap, + self.find_unused_parameters, + self.gradient_as_bucket_view, + param_to_name_mapping, + # User can set dist._DEFAULT_FIRST_BUCKET_BYTES to tune DDP first + # bucket. + dist._DEFAULT_FIRST_BUCKET_BYTES, + ) + + self.logger = dist.Logger(self.reducer) + # Set as a weak reference to avoid reference cycle between + # logger and reducer. + self.reducer.set_logger(self.logger) + + has_sync_bn = False + for submodule in self.module.modules(): + if isinstance(submodule, torch.nn.SyncBatchNorm): + has_sync_bn = True + break + + # Set logging data that can be got during construction time. + self.logger.set_construction_data_and_log( + self.module.__class__.__name__, + [] if self.device_ids is None else self.device_ids, + -1 if self.output_device is None else self.output_device, + self.broadcast_buffers, + has_sync_bn, + static_graph, + ) + + # passing a handle to torch.nn.SyncBatchNorm layer + self._passing_sync_batchnorm_handle(self.module) + + def __getstate__(self): + self._check_default_group() + attrs = copy.copy(self.__dict__) + del attrs["process_group"] + del attrs["reducer"] + del attrs["logger"] + return attrs + + def __setstate__(self, state): + # If serializable, then the process group should be the default one + self.process_group = _get_default_group() + super().__setstate__(state) + self.__dict__.setdefault("require_forward_param_sync", True) + self.__dict__.setdefault("require_backward_grad_sync", True) + parameters, expect_sparse_gradient = self._build_params_for_reducer() + # In debug mode, build a mapping of parameter index -> parameter. + param_to_name_mapping = self._build_debug_param_to_name_mapping(parameters) + # Builds reducer. + self._ddp_init_helper( + parameters, + expect_sparse_gradient, + param_to_name_mapping, + self.static_graph, + ) + if self.static_graph: + self.reducer._set_static_graph() + assert self.logger is not None + self.logger._set_static_graph() + + def _build_params_for_reducer(self): + # Build tuple of (module, parameter) for all parameters that require grads. + modules_and_parameters = [ + (module, parameter) + for module_name, module in self.module.named_modules() + for parameter in [ + param + # Note that we access module.named_parameters instead of + # parameters(module). parameters(module) is only needed in the + # single-process multi device case, where it accesses replicated + # parameters through _former_parameters. + for param_name, param in module.named_parameters(recurse=False) + if param.requires_grad + and f"{module_name}.{param_name}" not in self.parameters_to_ignore + ] + ] + + # Deduplicate any parameters that might be shared across child modules. + memo = set() + modules_and_parameters = [ + # "p not in memo" is the deduplication check. + # "not memo.add(p)" is always True, and it's only there to cause "add(p)" if needed. + (m, p) + for m, p in modules_and_parameters + if p not in memo and not memo.add(p) # type: ignore[func-returns-value] + ] + + # Build list of parameters. + parameters = [parameter for _, parameter in modules_and_parameters] + + # Checks if a module will produce a sparse gradient. + def produces_sparse_gradient(module): + if isinstance(module, (torch.nn.Embedding, torch.nn.EmbeddingBag)): + return module.sparse + return False + + # Build list of booleans indicating whether or not to expect sparse + # gradients for the corresponding parameters. + expect_sparse_gradient = [ + produces_sparse_gradient(module) for module, _ in modules_and_parameters + ] + + self._assign_modules_buffers() + + return parameters, expect_sparse_gradient + + def _assign_modules_buffers(self): + """ + Assign self.module.named_buffers to self.modules_buffers. + + Assigns module buffers to self.modules_buffers which are then used to + broadcast across ranks when broadcast_buffers=True. Note that this + must be called every time buffers need to be synced because buffers can + be reassigned by user module, + see https://github.com/pytorch/pytorch/issues/63916. + """ + # Collect buffers for modules, filtering out buffers that should be ignored. + named_module_buffers = [ + (buffer, buffer_name) + for buffer_name, buffer in self.module.named_buffers() + if buffer_name not in self.parameters_to_ignore + ] + self.modules_buffers = [ + buffer for (buffer, buffer_name) in named_module_buffers + ] + # Dict[str, tensor] representing module buffers not ignored by DDP. + self.named_module_buffers = { + buffer_name: buffer for (buffer, buffer_name) in named_module_buffers + } + + def _build_debug_param_to_name_mapping(self, parameters): + param_to_param_index = {parameters[i]: i for i in range(len(parameters))} + param_set = set(parameters) + param_index_to_param_fqn = {} + for module_name, module in self.module.named_modules(): + for param_name, param in module.named_parameters(recurse=False): + fqn = f"{module_name}.{param_name}" + # Bypass ignored parameters since those are not reduced by DDP + # to begin with. + if fqn not in self.parameters_to_ignore and param.requires_grad: + if param not in param_set: + self._log_and_throw( + ValueError, + f"Param with name {fqn} found in module parameters, but not DDP parameters." + " This indicates a bug in DDP, please report an issue to PyTorch.", + ) + param_index = param_to_param_index[param] + param_index_to_param_fqn[param_index] = fqn + + # Ensure we covered all parameters + if len(param_set) != len(param_index_to_param_fqn): + self._log_and_throw( + ValueError, + ( + "Expected param to name mapping to cover all parameters, but" + f" got conflicting lengths: {len(param_set)} vs " + f"{len(param_index_to_param_fqn)}. This indicates a bug in DDP" + ", please report an issue to PyTorch." + ), + ) + + return param_index_to_param_fqn + + def _get_parameters(self, m, recurse=True): + """Return a generator of module parameters.""" + + def model_parameters(m): + ps = ( + m._former_parameters.values() + if hasattr(m, "_former_parameters") + else m.parameters(recurse=False) + ) + yield from ps + + for mod in m.modules() if recurse else [m]: + yield from model_parameters(mod) + + def _check_default_group(self): + pickle_not_supported = False + try: + if self.process_group != _get_default_group(): + pickle_not_supported = True + except RuntimeError: + pickle_not_supported = True + + if pickle_not_supported: + self._log_and_throw( + RuntimeError, + "DDP Pickling/Unpickling are only supported " + "when using DDP with the default process " + "group. That is, when you have called " + "init_process_group and have not passed " + "process_group argument to DDP constructor", + ) + + @contextmanager + def no_sync(self): + r""" + Context manager to disable gradient synchronizations across DDP processes. + + Within this context, gradients will be accumulated on module + variables, which will later be synchronized in the first + forward-backward pass exiting the context. + + Example:: + + >>> # xdoctest: +SKIP("undefined variables") + >>> ddp = torch.nn.parallel.DistributedDataParallel(model, pg) + >>> with ddp.no_sync(): + >>> for input in inputs: + >>> ddp(input).backward() # no synchronization, accumulate grads + >>> ddp(another_input).backward() # synchronize grads + + .. warning:: + The forward pass should be included inside the context manager, or + else gradients will still be synchronized. + """ + old_require_backward_grad_sync = self.require_backward_grad_sync + self.require_backward_grad_sync = False + try: + yield + finally: + self.require_backward_grad_sync = old_require_backward_grad_sync + + @classmethod + def _get_active_ddp_module(cls): + """`TorchDynamo` requires DDP's status and module for cooperative optimization.""" + return cls._active_ddp_module + + # note, this ctxmgr function is marked 'skip' in torchdynamo, so dynamo only kicks in + # for the 'module_to_run' underneath + # see torch._dynamo/eval_frame.py TorchPatcher.patch for more details + @contextmanager + @torch._disable_dynamo(recursive=False) + def _inside_ddp_forward(self): + DistributedDataParallel._active_ddp_module = self + try: + yield + finally: + DistributedDataParallel._active_ddp_module = None + + def _run_ddp_forward(self, *inputs, **kwargs): + if self._use_python_reducer: + return self.module(*inputs, **kwargs) # type: ignore[index] + else: + with self._inside_ddp_forward(): + return self.module(*inputs, **kwargs) # type: ignore[index] + + def _clear_grad_buffer(self): + # Making param.grad points to the grad buffers before backward is based on the + # assumption that the grad accumulation is done in place in autograd engine, + # for some edge cases, if the grad accumulation in autograd engine is not in + # place, then the param.grad and grad buffers are detached. + if self._delay_grad_buffer is not None: + # We batch zero_grad for all params by resetting the whole grad + # buffer when the grad of all params is set to None. + all_param_grad_none = all( + param.grad is None for param in self._delay_all_reduce_params + ) + + for index, param in enumerate(self._delay_all_reduce_params): + if param.grad is None: + param.grad = self._delay_grad_views[index] + if not all_param_grad_none: + param.grad.zero_() + + if all_param_grad_none: + self._delay_grad_buffer.zero_() + + def _lazy_init(self): + # Initialization for DDP that occurs after construction, but lazily + # before the first forward pass. + self._setup_in_backward_optimizers() + self._lazy_init_ran = True + + def _should_disable_cpp_reducer(self) -> bool: + return self._use_python_reducer and ( + torch._utils.is_compiling() or self._force_to_disable_cpp_reducer + ) + + def _pre_forward(self, *inputs, **kwargs): + if self._should_disable_cpp_reducer(): + return inputs, kwargs + + # Disable the python reducer if compiled_autograd is not enabled. + if self._accum_grad_hooks: + for index, h in enumerate(self._accum_grad_hooks): + h.remove() + self._accum_grad_hooks.clear() + + if not self._lazy_init_ran and not torch._utils.is_compiling(): + self._lazy_init() + + if self._delay_all_reduce_all_params: + return inputs, kwargs + + if torch.is_grad_enabled() and self.require_backward_grad_sync: + assert self.logger is not None + self.logger.set_runtime_stats_and_log() + self.reducer.prepare_for_forward() + + # Notify the join context that this process has not joined, if + # needed + work = Join.notify_join_context(self) + if work: + self.reducer._set_forward_pass_work_handle( + work, self._divide_by_initial_world_size # type: ignore[arg-type] + ) + + # Calling _rebuild_buckets before forward computation, + # It may allocate new buckets before deallocating old buckets + # inside _rebuild_buckets. To save peak memory usage, + # call _rebuild_buckets before the peak memory usage increases + # during forward computation. + # This should be called only once during whole training period. + if torch.is_grad_enabled() and self.reducer._rebuild_buckets(): + logger.info("Reducer buckets have been rebuilt in this iteration.") + self._has_rebuilt_buckets = True + + # sync params according to location (before/after forward) user + # specified as part of hook, if hook was specified. + if self._check_sync_bufs_pre_fwd(): + self._sync_buffers() + + if self._join_config.enable: + # Notify joined ranks whether they should sync in backwards pass or not. + self._check_global_requires_backward_grad_sync(is_joined_rank=False) + + if self.device_ids: + moved_inputs, moved_kwargs = _to_kwargs( + inputs, + kwargs, + torch.device(self.device_type, self.device_ids[0]), + self.use_side_stream_for_tensor_copies, + ) + args, kwargs = moved_inputs[0], moved_kwargs[0] + # Cast inputs to reduced precision if needed. + if self.mixed_precision is not None: + args, kwargs = _cast_forward_inputs( + self.mixed_precision.param_dtype, + *args, + **kwargs, + ) + return args, kwargs + else: + # Cast inputs to reduced precision if needed. + # TODO (rohan-varma) test this codepath. + if self.mixed_precision is not None: + inputs, kwargs = _cast_forward_inputs( + self.mixed_precision.param_dtype, + *inputs, + **kwargs, + ) + return inputs, kwargs + + def _post_forward(self, output): + if self._should_disable_cpp_reducer(): + return output + + if self._delay_all_reduce_all_params: + self._clear_grad_buffer() + return output + + # sync params according to location (before/after forward) user + # specified as part of hook, if hook was specified. + if self._check_sync_bufs_post_fwd(): + self._sync_buffers() + + if torch.is_grad_enabled() and self.require_backward_grad_sync: + self.require_forward_param_sync = True + # We'll return the output object verbatim since it is a freeform + # object. We need to find any tensors in this object, though, + # because we need to figure out which parameters were used during + # this forward pass, to ensure we short circuit reduction for any + # unused parameters. Only if `find_unused_parameters` is set. + if self.find_unused_parameters and not self.static_graph: + # Do not need to populate this for static graph. + self.reducer.prepare_for_backward(list(_find_tensors(output))) + else: + self.reducer.prepare_for_backward([]) + else: + self.require_forward_param_sync = False + + # TODO: DDPSink is currently enabled for unused parameter detection and + # static graph training for first iteration. + if (self.find_unused_parameters and not self.static_graph) or ( + self.static_graph and not self._static_graph_delay_allreduce_enqueued + ): + ( + output_tensor_list, + treespec, + output_is_rref, + ) = _tree_flatten_with_rref(output) + output_placeholders = [None for _ in range(len(output_tensor_list))] + # Do not touch tensors that have no grad_fn, which can cause issues + # such as https://github.com/pytorch/pytorch/issues/60733 + for i, output in enumerate(output_tensor_list): + if torch.is_tensor(output) and output.grad_fn is None: + output_placeholders[i] = output + + # When find_unused_parameters=True, makes tensors which require grad + # run through the DDPSink backward pass. When not all outputs are + # used in loss, this makes those corresponding tensors receive + # undefined gradient which the reducer then handles to ensure + # param.grad field is not touched and we don't error out. + passthrough_tensor_list = _DDPSink.apply( + weakref.ref(self), + *output_tensor_list, + ) + for i in range(len(output_placeholders)): + if output_placeholders[i] is None: + output_placeholders[i] = passthrough_tensor_list[i] + + # Reconstruct output data structure. + output = _tree_unflatten_with_rref( + output_placeholders, treespec, output_is_rref + ) + + # At the end of the forward pass, reset the grad buffer and grad views + self._clear_grad_buffer() + return output + + def forward(self, *inputs, **kwargs): + with torch.autograd.profiler.record_function("DistributedDataParallel.forward"): + inputs, kwargs = self._pre_forward(*inputs, **kwargs) + output = ( + self.module.forward(*inputs, **kwargs) + if self._delay_all_reduce_all_params + else self._run_ddp_forward(*inputs, **kwargs) + ) + return self._post_forward(output) + + def scatter(self, inputs, kwargs, device_ids): + return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) + + def to_kwargs(self, inputs, kwargs, device_id): + # Kept for BC + return _to_kwargs( + inputs, + kwargs, + torch.device(self.device_type, device_id), + self.use_side_stream_for_tensor_copies, + ) + + def gather(self, outputs, output_device): + return gather(outputs, output_device, dim=self.dim) + + def train(self, mode=True): + super().train(mode) + return self + + # When running in join mode, schedules an allreduce to notify joined ranks + # of whether backwards pass synchronization will run this iteration or not. + def _check_global_requires_backward_grad_sync(self, is_joined_rank): + if not is_joined_rank and self.require_backward_grad_sync: + requires_sync_tensor = torch.ones(1, device=self.device) + else: + requires_sync_tensor = torch.zeros(1, device=self.device) + + work = dist.all_reduce( + requires_sync_tensor, group=self.process_group, async_op=True + ) + + # (kwen2501) This if condition is a plain translation of previous + # behavior, i.e. in the `is_joined_rank=False` case, `work.wait()` + # is not called and it doesn't care about the result. I am guessing + # that it just wants to fire a matching all-reduce and does not want + # the main stream to wait. + if is_joined_rank: + work.wait() + should_sync_backwards = requires_sync_tensor.item() != 0 + return should_sync_backwards + else: + return None # Return value is not/should not be used. + + # When running in join mode, checks and performs sync of module buffers if + # the models have buffers that should be synchronized in the forward pass. + def _check_and_sync_module_buffers(self): + if self._check_sync_bufs_pre_fwd(): + authoritative_rank = self._find_common_rank(self._distributed_rank, False) + self._sync_module_buffers(authoritative_rank) + + # When running in join model, agrees upon a common rank and broadcast model + # parameters to all other ranks. + def _sync_final_model(self, is_last_joiner): + # Agree upon the process that will be the authoritative model copy. + # The current rank is a candidate for being the authoritative copy if + # is_last_joiner=True. We break ties via picking the larger rank. + self._authoritative_rank = self._find_common_rank( + self._distributed_rank, is_last_joiner + ) + _sync_module_states( + module=self.module, + process_group=self.process_group, + broadcast_bucket_size=self.broadcast_bucket_size, + src=self._authoritative_rank, + params_and_buffers_to_ignore=self.parameters_to_ignore, + broadcast_buffers=self.broadcast_buffers, + ) + + # Schedule comm ops to match those scheduled in the reducer's backward + # pass. + def _match_all_reduce_for_bwd_pass(self): + comm_work = [] + # Schedule comm in the same order as Reducer schedules them, i.e. + # the order of the buckets. Retrieving the bucket order from the reducer + # ensures that we keep the same order in join mode, such as when bucket + # order is rebuilt dynamically. + + # Returns grad_buckets in order, but real tensors are substituted with + # zero tensors of the same shape. + grad_buckets = self.reducer._get_zeros_like_grad_buckets() + for grad_bucket in grad_buckets: + # Joined processes contribute zero gradient. In the case that + # divide_by_initial_world_size=True, we divide grads by the static + # world size, if not, the dividing factor is reduced by the number + # of joined processes. + work = self.reducer._run_comm_hook(grad_bucket) + comm_work.append(work) + for work in comm_work: + work.wait() + + # Allreduces the used parameter mapping across ranks. + def _match_unused_params_allreduce(self): + locally_used_param_map = self.reducer._get_local_used_map() + self.process_group.allreduce(locally_used_param_map) + + def join( + self, + divide_by_initial_world_size: bool = True, + enable: bool = True, + throw_on_early_termination: bool = False, + ): + r""" + Context manager for training with uneven inputs across processes in DDP. + + This context manager will keep track of already-joined DDP processes, + and "shadow" the forward and backward passes by inserting collective + communication operations to match with the ones created by non-joined + DDP processes. This will ensure each collective call has a corresponding + call by already-joined DDP processes, preventing hangs or errors that + would otherwise happen when training with uneven inputs across + processes. Alternatively, if the flag ``throw_on_early_termination`` is + specified to be ``True``, all trainers will throw an error once one rank + runs out of inputs, allowing these errors to be caught and handled + according to application logic. + + Once all DDP processes have joined, the context manager will broadcast + the model corresponding to the last joined process to all processes to + ensure the model is the same across all processes + (which is guaranteed by DDP). + + To use this to enable training with uneven inputs across processes, + simply wrap this context manager around your training loop. No further + modifications to the model or data loading is required. + + .. warning:: + If the model or training loop this context manager is wrapped around + has additional distributed collective operations, such as + ``SyncBatchNorm`` in the model's forward pass, then the flag + ``throw_on_early_termination`` must be enabled. This is because this + context manager is not aware of non-DDP collective communication. + This flag will cause all ranks to throw when any one rank + exhausts inputs, allowing these errors to be caught and recovered + from across all ranks. + + Args: + divide_by_initial_world_size (bool): If ``True``, will divide + gradients by the initial ``world_size`` DDP training was launched + with. If ``False``, will compute the effective world size + (number of ranks that have not depleted their inputs yet) and + divide gradients by that during allreduce. Set + ``divide_by_initial_world_size=True`` to ensure every input + sample including the uneven inputs have equal weight in terms of + how much they contribute to the global gradient. This is + achieved by always dividing the gradient by the initial + ``world_size`` even when we encounter uneven inputs. If you set + this to ``False``, we divide the gradient by the remaining + number of nodes. This ensures parity with training on a smaller + ``world_size`` although it also means the uneven inputs would + contribute more towards the global gradient. Typically, you + would want to set this to ``True`` for cases where the last few + inputs of your training job are uneven. In extreme cases, where + there is a large discrepancy in the number of inputs, setting + this to ``False`` might provide better results. + enable (bool): Whether to enable uneven input detection or not. Pass + in ``enable=False`` to disable in cases where you know that + inputs are even across participating processes. Default is + ``True``. + throw_on_early_termination (bool): Whether to throw an error + or continue training when at least one rank has exhausted + inputs. If ``True``, will throw upon the first rank reaching end + of data. If ``False``, will continue training with a smaller + effective world size until all ranks are joined. Note that if + this flag is specified, then the flag + ``divide_by_initial_world_size`` would be ignored. Default + is ``False``. + + + Example:: + + >>> # xdoctest: +SKIP("Distributed") + >>> import torch + >>> import torch.distributed as dist + >>> import os + >>> import torch.multiprocessing as mp + >>> import torch.nn as nn + >>> # On each spawned worker + >>> def worker(rank): + >>> dist.init_process_group("nccl", rank=rank, world_size=2) + >>> torch.cuda.set_device(rank) + >>> model = nn.Linear(1, 1, bias=False).to(rank) + >>> model = torch.nn.parallel.DistributedDataParallel( + >>> model, device_ids=[rank], output_device=rank + >>> ) + >>> # Rank 1 gets one more input than rank 0. + >>> inputs = [torch.tensor([1]).float() for _ in range(10 + rank)] + >>> with model.join(): + >>> for _ in range(5): + >>> for inp in inputs: + >>> loss = model(inp).sum() + >>> loss.backward() + >>> # Without the join() API, the below synchronization will hang + >>> # blocking for rank 1's allreduce to complete. + >>> torch.cuda.synchronize(device=rank) + """ + return Join( + [self], + enable, + throw_on_early_termination, + divide_by_initial_world_size=divide_by_initial_world_size, + ) + + def join_hook( + self, + **kwargs, + ): + r""" + DDP join hook enables training on uneven inputs by mirroring communications in forward and backward passes. + + Arguments: + kwargs (dict): a :class:`dict` containing any keyword arguments + to modify the behavior of the join hook at run time; all + :class:`Joinable` instances sharing the same join context + manager are forwarded the same value for ``kwargs``. + + The hook supports the following keyword arguments: + divide_by_initial_world_size (bool, optional): + If ``True``, then gradients are divided by the initial world + size that DDP was launched with. + If ``False``, then gradients are divided by the effective world + size (i.e. the number of non-joined processes), meaning that + the uneven inputs contribute more toward the global gradient. + Typically, this should be set to ``True`` if the degree of + unevenness is small but can be set to ``False`` in extreme + cases for possibly better results. + Default is ``True``. + """ + divide_by_initial_world_size = kwargs.get("divide_by_initial_world_size", True) + return _DDPJoinHook( + self, divide_by_initial_world_size=divide_by_initial_world_size + ) + + @property + def join_device(self): + return self.device + + @property + def join_process_group(self): + return self.process_group + + def _register_buffer_comm_hook( + self, + state, + hook: Callable, + comm_hook_location=_BufferCommHookLocation.POST_FORWARD, + ): + r""" + Allow custom registration of hooks that define how buffer are synchronized across ranks. + + The hook takes in an optional state and is passed in a Dict[str, Tensor] + corresponding to buffer names and the buffers, and can run arbitrary reductions + on buffers as opposed to DDP's default broadcast from rank 0. This is useful for + example if a counter needs to be summed or averaged across ranks every iteration. + + Args: + state (Any): Optional state that is passed to the hook. + hook (Callable): Callable with the following signature: + ``hook(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]`` + comm_hook_location (_BufferCommHookLocation): Enum value indicating + where to run the hook. + _BufferCommHookLocation.PRE_FORWARD means that the + hook will run _before_ the forward pass, and + _BufferCommHookLocation.POST_FORWARD means that the + hook will run _after_ the forward pass. + + NOTE: To maximize performance, users can return a + List[torch.futures.Future] from their hook, and DDP will + install and await these hooks appropriately at the end of + the backward pass. This will ensure all buffers are + synchronized by the end of the backward pass. If this + setting is used, it is recommended to pass + comm_hook_location=_BufferCommHookLocation.POST_FORWARD, + which will trigger the hook after the forward pass. + If _BufferCommHookLocation.PRE_FORWARD is used, users must + ensure appropriate synchronization when manipulating GPU + buffers in the forward pass. + """ + assert callable(hook) + self.buffer_hook = _BufferCommHook( + buffer_comm_hook=hook, + buffer_comm_hook_state=state, + buffer_comm_hook_location=comm_hook_location, + ) + + def register_comm_hook(self, state: object, hook: Callable): + r""" + Register communication hook for user-defined DDP aggregation of gradients across multiple workers. + + This hook would be very useful for researchers to try out new ideas. For + example, this hook can be used to implement several algorithms like GossipGrad + and gradient compression which involve different communication strategies for + parameter syncs while running Distributed DataParallel training. + + Args: + state (object): Passed to the hook to maintain any state information during the training process. + Examples include error feedback in gradient compression, + peers to communicate with next in GossipGrad, etc. + + It is locally stored by each worker + and shared by all the gradient tensors on the worker. + hook (Callable): Callable with the following signature: + ``hook(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]``: + + This function is called once the bucket is ready. The + hook can perform whatever processing is needed and return + a Future indicating completion of any async work (ex: allreduce). + If the hook doesn't perform any communication, it still + must return a completed Future. The Future should hold the + new value of grad bucket's tensors. Once a bucket is ready, + c10d reducer would call this hook and use the tensors returned + by the Future and copy grads to individual parameters. + Note that the future's return type must be a single tensor. + + We also provide an API called ``get_future`` to retrieve a + Future associated with the completion of ``c10d.ProcessGroup.Work``. + ``get_future`` is currently supported for NCCL and also supported for most + operations on GLOO and MPI, except for peer to peer operations (send/recv). + + .. warning :: + Grad bucket's tensors will not be predivided by world_size. User is responsible + to divide by the world_size in case of operations like allreduce. + + .. warning :: + DDP communication hook can only be registered once and should be registered + before calling backward. + + .. warning :: + The Future object that hook returns should contain a single tensor + that has the same shape with the tensors inside grad bucket. + + .. warning :: + ``get_future`` API supports NCCL, and partially GLOO and MPI backends (no support + for peer-to-peer operations like send/recv) and will return a ``torch.futures.Future``. + + Example:: + Below is an example of a noop hook that returns the same tensor. + + >>> # xdoctest: +SKIP('undefined name') + >>> def noop(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]: + >>> fut = torch.futures.Future() + >>> fut.set_result(bucket.buffer()) + >>> return fut + >>> ddp.register_comm_hook(state=None, hook=noop) + + Example:: + Below is an example of a Parallel SGD algorithm where gradients are encoded before + allreduce, and then decoded after allreduce. + + >>> # xdoctest: +SKIP('undefined name') + >>> def encode_and_decode(state: object, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]: + >>> encoded_tensor = encode(bucket.buffer()) # encode gradients + >>> fut = torch.distributed.all_reduce(encoded_tensor).get_future() + >>> # Define the then callback to decode. + >>> def decode(fut): + >>> decoded_tensor = decode(fut.value()[0]) # decode gradients + >>> return decoded_tensor + >>> return fut.then(decode) + >>> ddp.register_comm_hook(state=None, hook=encode_and_decode) + """ + self._check_comm_hook(hook) + if hook.__name__ in ["bf16_compress_hook", "fp16_compress_hook"]: + # If we pass None, then the hook will try to get the world size + # by calling `dist.group.WORLD.size()`, which causes compilation + # errors. So we pre-decode the process group and pass it to the + # hook. + if state is None: + state = dist.group.WORLD + assert self.logger is not None + self.logger._set_comm_hook_name(hook.__qualname__) + self._comm_hooks.append((hook, state)) + dist._register_comm_hook(self.reducer, state, hook) + + def _register_builtin_comm_hook(self, comm_hook_type): + r""" + Register a built-in communication hook that specifies how DDP aggregates gradients across multiple workers. + + The built-in hooks aim to provide efficient C++ implementations for certain hooks, + which might not be as efficient if implemented in Python using a Python communication hook. + + Args: + comm_hook_type (dist.BuiltinCommHookType): type of communication hook, such as ALLREDUCE, FP16_COMPRESS, etc. + + .. warning :: + DDP communication hook can only be registered once and should be registered + before calling backward. + + Example:: + Below is an example of a FP16 compression where gradients are + compressed into 16-bit floating-point numbers before allreduce, and + then decompressed after allreduce. + + >>> # xdoctest: +SKIP('undefined name') + >>> ddp._register_builtin_comm_hook(dist.BuiltinCommHookType.FP16_COMPRESS) + + """ + assert self.logger is not None + self.logger._set_comm_hook_name(str(comm_hook_type)) + dist._register_builtin_comm_hook(self.reducer, comm_hook_type) + + def _register_fused_optim(self, optim: Type, *args, optim_params=None, **kwargs): + r""" + Register an optimizer in DDP to optimize parameter immediately after its gradient reduction. + + Registers an optimizer with DDP such that the optimization for a + parameter will run immediately when that parameter's gradient is + finished with reduction, instead of waiting for all parameters' + gradients to finish reduction. This can result in a training speedup + depending on your workload since the optimizer can run while gradient + reduction for other parameters are still ongoing. In addition, this has + the potential to reduce peak memory consumption during training, as it + only needs to load the per-parameter optimizer states of a single + parameter at a time, instead of loading all per-parameter optimizer + states at once. + + Args: + optim (Type): a ``torch.optim.Optimizer`` class to be registered + as a fused optimizer. + *args (Sequence[Any]): Arguments to forward to `optim`. + optim_params (Optional[Iterable[torch.Tensor]]): Set of parameters + to optimize, similar to `params` argument of traditional `torch.optim` + Optimizers. If this is omitted, all DDP model parameters will be + optimized. + **kwargs: (Dict[str, Any]): Keyword arguments to forward to `optim`. + + .. warning :: + _register_fused_optim should only be called once on a DDP instance, + and registering multiple fused optimizers for the same DDP model + is not currently supported. Please ping + https://github.com/pytorch/pytorch/issues/71595 if this is necessary + for your use case. + + .. warning :: + _register_fused_optim and register_comm_hook currently do not + compose together, meaning that custom DDP communication hooks are + not supported with overlapped optimizers. Please ping + https://github.com/pytorch/pytorch/issues/71595 if this is necessary + for your use case. + + .. warning :: + Gradient accumulation and DDP `no_sync` are currently not supported + with overlapped optimizer. Please ping + https://github.com/pytorch/pytorch/issues/71595 if this is necessary + for your use case. + + Example:: + + >>> # xdoctest: +SKIP("No rendezvous handler") + >>> torch.distributed.init_process_group(backend='nccl', world_size=4, init_method='...') + >>> net = torch.nn.parallel.DistributedDataParallel(model, pg) + >>> lr = 1e-2 + >>> betas = (0.9, 0.99) + >>> eps = 1e-6 + >>> net._register_fused_optim(torch.optim.Adam, lr, betas=betas, eps=eps) + >>> # Example with subset of parameters + >>> params_to_opt = [list(net.parameters())[0]] + >>> net._register_fused_optim( + ... torch.optim.Adam, lr, optim_params=params_to_opt, betas=betas, eps=eps + ... ) + """ + # Note: importing in function, otherwise this will cause a circular + # import as optimizer_overlap module needs to import DistributedDataParallel. + from torch.distributed.algorithms._optimizer_overlap import _as_overlapped_optim + + overlapped_optim = _as_overlapped_optim(optim, optim_params, *args, **kwargs) + try: + overlapped_optim.register_ddp(self) + except NotImplementedError as e: + raise RuntimeError( + f"{optim} does not support overlapped DDP. Please file an issue to PyTorch or the respective owner of {optim}." + ) from e + + def _distributed_broadcast_coalesced( + self, tensors, buffer_size, authoritative_rank=0 + ): + dist._broadcast_coalesced( + self.process_group, tensors, buffer_size, authoritative_rank + ) + + def _check_sync_bufs_post_fwd(self): + return ( + self.will_sync_module_buffers() + and hasattr(self, "buffer_hook") + and self.buffer_hook.buffer_comm_hook_location + == _BufferCommHookLocation.POST_FORWARD + ) + + def _check_sync_bufs_pre_fwd(self): + return self.will_sync_module_buffers() and ( + not hasattr(self, "buffer_hook") + or self.buffer_hook.buffer_comm_hook_location + == _BufferCommHookLocation.PRE_FORWARD + ) + + def will_sync_module_buffers(self): + return ( + self.require_forward_param_sync + and self.broadcast_buffers + and len(self.modules_buffers) > 0 + ) + + def _find_common_rank(self, input_rank, rank_cond): + # -1 indicates that this rank is not under consideration to be the + # common_rank + rank_to_use = torch.tensor( + [input_rank if rank_cond else -1], + device=self.device, + ) + dist.all_reduce(rank_to_use, op=ReduceOp.MAX, group=self.process_group) + if rank_to_use.item() == -1: + self._log_and_throw( + ValueError, + "BUG! Expected rank_cond to be true for at least one process." + " This indicates a bug in PyTorch, please report an issue.", + ) + return rank_to_use.item() + + def _sync_buffers(self): + with torch.no_grad(): + # module buffer sync + # Synchronize buffers across processes. + # If we are running DDP with the join manager, we have to agree + # upon a rank to sync module buffers from, since rank 0 may + # already have been joined and have stale module buffers. + if self._join_config.enable: + authoritative_rank = self._find_common_rank( + self._distributed_rank, True + ) + else: + # The process with rank 0 is considered the authoritative copy. + authoritative_rank = 0 + # Update self.modules_buffers incase any buffers were + # reassigned. + self._assign_modules_buffers() + self._sync_module_buffers(authoritative_rank) + + def _sync_module_buffers(self, authoritative_rank): + if not hasattr(self, "buffer_hook"): + self._default_broadcast_coalesced(authoritative_rank=authoritative_rank) + else: + hook = self.buffer_hook.buffer_comm_hook + state = self.buffer_hook.buffer_comm_hook_state + futs = hook(state, self.named_module_buffers) + if futs is not None: + self.reducer._install_post_backward_futures(futs) + + def _default_broadcast_coalesced( + self, bufs=None, bucket_size=None, authoritative_rank=0 + ): + """ + Broadcasts buffers from rank 0 to rest of workers. + + If bufs, bucket_size are None, default values self.modules_buffers + and self.broadcast_bucket_size are used instead. + """ + if bufs is None: + bufs = self.modules_buffers + if bucket_size is None: + bucket_size = self.broadcast_bucket_size + + self._distributed_broadcast_coalesced(bufs, bucket_size, authoritative_rank) + + def _passing_sync_batchnorm_handle(self, module): + for layer in module.modules(): + if isinstance(layer, torch.nn.modules.SyncBatchNorm): + if self.device_type == "cpu": + self._log_and_throw( + ValueError, + "SyncBatchNorm layers only work with GPU modules", + ) + + def _check_comm_hook(self, hook): + if not callable(hook): + self._log_and_throw(TypeError, "Communication hook must be callable.") + + sig = inspect.signature(hook) + if ( + sig.parameters["bucket"].annotation != inspect._empty + and sig.parameters["bucket"].annotation != dist.GradBucket + ): + self._log_and_throw( + ValueError, + "Communication hook: bucket annotation should be dist.GradBucket.", + ) + + if ( + sig.return_annotation != inspect._empty + and sig.return_annotation != torch.futures.Future[torch.Tensor] + ): + self._log_and_throw( + ValueError, + "Communication hook: return annotation should be torch.futures.Future[torch.Tensor].", + ) + + if hook.__name__ in [ + "bf16_compress_hook", + "bf16_compress_wrapper_hook", + ] and ( + (torch.version.cuda is None and torch.version.hip is None) + or ( + torch.version.cuda is not None + and int(torch.version.cuda.split(".")[0]) < 11 + ) + or not dist.is_available() + or not dist.is_nccl_available() + or torch.cuda.nccl.version() < (2, 10) + ): + self._log_and_throw( + TypeError, + "BF16 all reduce communication hook required CUDA 11+ and NCCL 2.10+.", + ) + + @property + def _distributed_rank(self): + return dist.get_rank(self.process_group) + + @staticmethod + def _get_data_parallel_params(module, named_params=False): + """Return a generator of parameters managed by a given DDP unit.""" + for param in ( + module.parameters() if not named_params else module.named_parameters() + ): + if not hasattr(param, "_ddp_ignored"): + yield param + + @staticmethod + def _set_params_and_buffers_to_ignore_for_model( + module, params_and_buffers_to_ignore + ): + """ + Set parameters and buffers to be ignored by DDP. + + Expected format for parameters is the fully qualified name: {module_name}.{param_name}, and + similarly, {module_name}.{buffer_name} for buffers. For example: + params_to_ignore = [] + # NB: model here is vanilla PyTorch module, not yet wrapped with DDP. + for module_name, module in model.named_modules(): + for param_name, param in module.named_parameters(recurse=False): + if should_ignore(param): + # Create expected format + fqn = f"{module_name}.{param_name}" + params_to_ignore.append(fqn) + torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( + model, + params_to_ignore + ) + """ + # This is a workaround to set parameters and buffers DDP should ignore + # during synchronization. It will be removed when the API is finalized + # as part of addressing https://github.com/pytorch/pytorch/issues/43690. + module._ddp_params_and_buffers_to_ignore = params_and_buffers_to_ignore + for name, param in module.named_parameters(): + if name in params_and_buffers_to_ignore: + param._ddp_ignored = True + for name, buffer in module.named_buffers(): + if name in params_and_buffers_to_ignore: + buffer._ddp_ignored = True + + def _get_ddp_logging_data(self): + r""" + Return a dictionary of logging data for debugging and analysis. + + This interface can be called after DistributedDataParallel() is + constructed. It returns a dictionary of logging data. It could help + for debugging and analysis. The logging data includes DistributedDataParallel + constructor input parameters, some internal states of DistributedDataParallel + and performance metrics. Simply print the dictionary and see what + these metrics are. + This is a prototype interface and subject to change in the future. + """ + assert self.logger is not None + ddp_logging_data = self.logger._get_ddp_logging_data() + return {**ddp_logging_data.strs_map, **ddp_logging_data.ints_map} + + def _set_ddp_runtime_logging_sample_rate(self, sample_rate): + r""" + Set sample_rate of collecting runtime stats. + + This interface allows users to set sample_rate of collecting + runtime stats. The runtime stats will be recorded for the + first 10 iterations, after 10 iterations runtime stats will be + recorded once every "sample_rate" training iterations. In + default, runtime stats are recorded for the first 10 iterations, + after 10 iterations runtime stats are recorded once every + "kDDPRuntimeLoggingSampleRate=100" training iterations. + This is a prototype interface and subject to change in the future. + """ + if sample_rate < 1: + self._log_and_throw( + ValueError, + "DDP runtime logging sample rate should be equal or greater than 1", + ) + self.reducer._set_ddp_runtime_logging_sample_rate(sample_rate) + + def _set_static_graph(self): + """ + Set static graph for DDP. + + It is recommended to set static graph in the DDP constructor, which will + call this private API internally. + """ + # If self.static_graph has been set, no need to set it again + if self.static_graph: + warnings.warn( + "You've set static_graph to be True, no need to set it again." + ) + return + self.static_graph = True + self._static_graph_delay_allreduce_enqueued = False + self.reducer._set_static_graph() + assert self.logger is not None + self.logger._set_static_graph() + if self.find_unused_parameters: + warnings.warn( + "You passed find_unused_parameters=true to DistributedDataParallel, " + "`_set_static_graph` will detect unused parameters automatically, so " + "you do not need to set find_unused_parameters=true, just be sure these " + "unused parameters will not change during training loop while calling " + "`_set_static_graph`." + ) + + def _remove_autograd_hooks(self): + """Remove autograd hooks registered by the reducer on the model parameters.""" + self.reducer._remove_autograd_hooks() + + def _check_reducer_finalized(self): + """ + Check if the reducer has processed all buckets and finalized the backward appropriately. + + It is useful to call this method after calling .backward() in your training loop + in order to avoid subsequent hard to debug errors down the road due to the + reducer not finalizing backward. + """ + self.reducer._check_reducer_finalized() + + def _set_sparse_metadata(self, global_unique_ids): + self.reducer._set_sparse_metadata(global_unique_ids) + + def _update_process_group(self, new_process_group): + """ + Dynamically updates the process group for DDP so that we can shrink/expand DDP + world size without having to reinitialize DDP. + + NOTE: If you are using custom communications hooks via, register_comm_hook, + you need to update the process groups for those hooks separately. + """ + # Force a rebuild of buckets for a new process group. This ensures all ranks + # are synchronized in terms of when they will rebuild buckets and also + # re-evaluates previous assumptions of buckets given the world size might have + # changed. + self._has_rebuilt_buckets = False + self.reducer._reset_state() + + if not _rank_not_in_group(new_process_group): + self.process_group = new_process_group + self.reducer._update_process_group(new_process_group) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.py new file mode 100644 index 0000000000000000000000000000000000000000..6a90f897fa8ada1575524e50474df402b9c42a0d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/parallel_apply.py @@ -0,0 +1,110 @@ +import threading +import torch +from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, cast +from ..modules import Module +from torch.cuda._utils import _get_device_index +from torch.cuda.amp import autocast +from torch._utils import ExceptionWrapper + +__all__ = ['get_a_var', 'parallel_apply'] + +def get_a_var(obj: Union[torch.Tensor, List[Any], Tuple[Any, ...], Dict[Any, Any]]) -> Optional[torch.Tensor]: + if isinstance(obj, torch.Tensor): + return obj + + if isinstance(obj, (list, tuple)): + for result in map(get_a_var, obj): + if isinstance(result, torch.Tensor): + return result + if isinstance(obj, dict): + for result in map(get_a_var, obj.items()): + if isinstance(result, torch.Tensor): + return result + return None + +def parallel_apply( + modules: Sequence[Module], + inputs: Sequence[Any], + kwargs_tup: Optional[Sequence[Dict[str, Any]]] = None, + devices: Optional[Sequence[Optional[Union[int, torch.device]]]] = None, +) -> List[Any]: + r"""Apply each `module` in :attr:`modules` in parallel on each of :attr:`devices`. + + Args: + modules (Module): modules to be parallelized + inputs (tensor): inputs to the modules + devices (list of int or torch.device): CUDA devices + + :attr:`modules`, :attr:`inputs`, :attr:`kwargs_tup` (if given), and + :attr:`devices` (if given) should all have same length. Moreover, each + element of :attr:`inputs` can either be a single object as the only argument + to a module, or a collection of positional arguments. + """ + assert len(modules) == len(inputs), f'The number of modules {len(modules)} is not equal to the number of inputs {len(inputs)}' + if kwargs_tup is not None: + assert len(modules) == len(kwargs_tup) + else: + kwargs_tup = (cast(Dict[str, Any], {}),) * len(modules) + if devices is not None: + assert len(modules) == len(devices) + else: + devices = [None] * len(modules) + devices = [_get_device_index(x, True) for x in devices] + streams = [torch.cuda.current_stream(x) for x in devices] + lock = threading.Lock() + results = {} + grad_enabled, autocast_enabled = torch.is_grad_enabled(), torch.is_autocast_enabled() + + def _worker( + i: int, + module: Module, + input: Any, + kwargs: Dict[str, Any], + device: Optional[Union[int, torch.device]] = None, + stream: Optional[torch.cuda.Stream] = None, + ) -> None: + torch.set_grad_enabled(grad_enabled) + if device is None: + t = get_a_var(input) + if t is None: + with lock: + results[i] = ExceptionWrapper( + where=f"in replica {i}, no device was provided and no tensor input was found; " + "device cannot be resolved") + return + device = t.get_device() + if stream is None: + stream = torch.cuda.current_stream(device) + try: + with torch.cuda.device(device), torch.cuda.stream(stream), autocast(enabled=autocast_enabled): + # this also avoids accidental slicing of `input` if it is a Tensor + if not isinstance(input, (list, tuple)): + input = (input,) + output = module(*input, **kwargs) + with lock: + results[i] = output + except Exception: + with lock: + results[i] = ExceptionWrapper( + where=f"in replica {i} on device {device}") + + if len(modules) > 1: + threads = [threading.Thread(target=_worker, + args=(i, module, input, kwargs, device, stream)) + for i, (module, input, kwargs, device, stream) in + enumerate(zip(modules, inputs, kwargs_tup, devices, streams))] + + for thread in threads: + thread.start() + for thread in threads: + thread.join() + else: + _worker(0, modules[0], inputs[0], kwargs_tup[0], devices[0], streams[0]) + + outputs = [] + for i in range(len(inputs)): + output = results[i] + if isinstance(output, ExceptionWrapper): + output.reraise() + outputs.append(output) + return outputs diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/replicate.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/replicate.py new file mode 100644 index 0000000000000000000000000000000000000000..016a6fbd0c40d510d4c123923e16d62514c71c45 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/replicate.py @@ -0,0 +1,186 @@ +import torch +from ..modules import Module +from . import comm +from typing import TYPE_CHECKING, Dict, Iterator, List, Optional, Sequence, Set, TypeVar, Union, cast +from torch._utils import _get_device_index + +from collections import OrderedDict + +if TYPE_CHECKING: + import torch.jit + import torch.jit._state + +__all__ = ['replicate'] + +def _is_script_module(module: Module) -> bool: + import torch.jit + return isinstance(module, torch.jit.ScriptModule) + + +def _is_script_method(module: Module) -> bool: + import torch.jit + return isinstance(module, torch._C.ScriptMethod) + + +def _init_script_module() -> "torch.jit.ScriptModule": + import torch.jit + return torch.jit.ScriptModule() + + +def _is_jit_enabled() -> "torch.jit._state.EnabledProxy": + import torch.jit._state + return torch.jit._state._enabled + + +# Check if we can safely replicate the module. +# there are two types of module: +# 1. python modules +# 2. ScriptModule +# +# currently a module cannot be replicated properly if the descendants of +# any ScriptModule contains python module (type 1 above) +def _replicatable_module(module: Module, memo: Optional[Set[Module]] = None) -> bool: + + # module.modules() contains module itself as the first element + def descendant_modules(module: Module) -> Iterator[Module]: + gen = module.modules() + next(gen) + return gen + + if not _is_jit_enabled(): + return True + if memo is None: + memo = set() + + # memoize visited modules + memo.add(module) + if _is_script_module(module): + memo.update(descendant_modules(module)) + return all(_is_script_module(descendant) for + descendant in descendant_modules(module)) + + for child in module.children(): + # since any unreplicatable module will cause the check to return + # False early, visited modules here can be safely ignored. + if child in memo: + continue + if not _replicatable_module(child, memo): + return False + + return True + +def _broadcast_coalesced_reshape( + tensors: Sequence[torch.Tensor], + devices: Sequence[Union[int, torch.device]], + detach: bool = False, +) -> List[List[torch.Tensor]]: + from ._functions import Broadcast + if detach: + return comm.broadcast_coalesced(tensors, devices) + else: + # Use the autograd function to broadcast if not detach + if len(tensors) > 0: + tensor_copies = Broadcast.apply(devices, *tensors) + return [tensor_copies[i:i + len(tensors)] + for i in range(0, len(tensor_copies), len(tensors))] + else: + return [] + + +T = TypeVar("T", bound=Module) + + +def replicate( + network: T, + devices: Sequence[Union[int, torch.device]], + detach: bool = False, +) -> List[T]: + if not _replicatable_module(network): + raise RuntimeError("Cannot replicate network where python modules are " + "childrens of ScriptModule") + + if not devices: + return [] + + devices = [_get_device_index(x, True) for x in devices] + num_replicas = len(devices) + + params = list(network.parameters()) + param_indices = {param: idx for idx, param in enumerate(params)} + param_copies = _broadcast_coalesced_reshape(params, devices, detach) + + buffers = list(network.buffers()) + buffers_rg: List[torch.Tensor] = [] + buffers_not_rg: List[torch.Tensor] = [] + for buf in buffers: + if buf.requires_grad and not detach: + buffers_rg.append(buf) + else: + buffers_not_rg.append(buf) + + buffer_indices_rg = {buf: idx for idx, buf in enumerate(buffers_rg)} + buffer_indices_not_rg = {buf: idx for idx, buf in enumerate(buffers_not_rg)} + + buffer_copies_rg = _broadcast_coalesced_reshape(buffers_rg, devices, detach=detach) + buffer_copies_not_rg = _broadcast_coalesced_reshape(buffers_not_rg, devices, detach=True) + + modules = list(network.modules()) + module_copies: List[List[Module]] = [[] for _ in devices] + module_indices: Dict[Module, int] = {} + + for i, module in enumerate(modules): + module_indices[module] = i + for j in range(num_replicas): + replica = module._replicate_for_data_parallel() + # This is a temporary fix for DDP. DDP needs to access the + # replicated model parameters. It used to do so through + # `mode.parameters()`. The fix added in #33907 for DP stops the + # `parameters()` API from exposing the replicated parameters. + # Hence, we add a `_former_parameters` dict here to support DDP. + replica._former_parameters = OrderedDict() + + module_copies[j].append(replica) + + for i, module in enumerate(modules): + for key, child in module._modules.items(): + if child is None: + for j in range(num_replicas): + replica = module_copies[j][i] + replica._modules[key] = None + else: + module_idx = module_indices[child] + for j in range(num_replicas): + replica = module_copies[j][i] + setattr(replica, key, module_copies[j][module_idx]) + for key, param in module._parameters.items(): + if param is None: + for j in range(num_replicas): + replica = module_copies[j][i] + replica._parameters[key] = None + else: + param_idx = param_indices[param] + for j in range(num_replicas): + replica = module_copies[j][i] + param_copy = param_copies[j][param_idx] + # parameters in replicas are no longer leaves, + # so setattr them as non-parameter attributes + setattr(replica, key, param_copy) + # expose the parameter for DDP + replica._former_parameters[key] = param_copy + for key, buf in module._buffers.items(): # type: ignore[assignment] + if buf is None: + for j in range(num_replicas): + replica = module_copies[j][i] + replica._buffers[key] = None + else: + if buf.requires_grad and not detach: + buffer_copies = buffer_copies_rg + buffer_idx = buffer_indices_rg[buf] + else: + buffer_copies = buffer_copies_not_rg + buffer_idx = buffer_indices_not_rg[buf] + for j in range(num_replicas): + replica = module_copies[j][i] + setattr(replica, key, buffer_copies[j][buffer_idx]) + + return [cast(T, module_copies[j][0]) for j in range(num_replicas)] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/scatter_gather.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/scatter_gather.py new file mode 100644 index 0000000000000000000000000000000000000000..8daa1117bfaf98246f83acf9e2b79666ccdf6ef8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/scatter_gather.py @@ -0,0 +1,107 @@ +import torch +from typing import Any, Dict, List, Optional, Sequence, Tuple, TypeVar, Union, overload +from ._functions import Scatter, Gather +import warnings + +__all__ = ['scatter', 'scatter_kwargs', 'gather'] + +def is_namedtuple(obj: Any) -> bool: + # Check if type was created from collections.namedtuple or a typing.NamedTuple. + warnings.warn("is_namedtuple is deprecated, please use the python checks instead") + return _is_namedtuple(obj) + +def _is_namedtuple(obj: Any) -> bool: + # Check if type was created from collections.namedtuple or a typing.NamedTuple. + return ( + isinstance(obj, tuple) and hasattr(obj, "_asdict") and hasattr(obj, "_fields") + ) + + +T = TypeVar("T", dict, list, tuple) + +# For some reason, 'scatter' returns a tuple when given a single Tensor input but a list otherwise. +@overload +def scatter( + inputs: torch.Tensor, + target_gpus: Sequence[Union[int, torch.device]], + dim: int = ..., +) -> Tuple[torch.Tensor, ...]: + ... + +@overload +def scatter(inputs: T, target_gpus: Sequence[Union[int, torch.device]], dim: int = ...) -> List[T]: + ... + +def scatter(inputs, target_gpus, dim=0): + r"""Slice tensors into approximately equal chunks and distributes them across given GPUs. + + Duplicates references to objects that are not tensors. + """ + def scatter_map(obj): + if isinstance(obj, torch.Tensor): + return Scatter.apply(target_gpus, None, dim, obj) + if _is_namedtuple(obj): + return [type(obj)(*args) for args in zip(*map(scatter_map, obj))] + if isinstance(obj, tuple) and len(obj) > 0: + return list(zip(*map(scatter_map, obj))) + if isinstance(obj, list) and len(obj) > 0: + return [list(i) for i in zip(*map(scatter_map, obj))] + if isinstance(obj, dict) and len(obj) > 0: + return [type(obj)(i) for i in zip(*map(scatter_map, obj.items()))] + return [obj for _ in target_gpus] + + # After scatter_map is called, a scatter_map cell will exist. This cell + # has a reference to the actual function scatter_map, which has references + # to a closure that has a reference to the scatter_map cell (because the + # fn is recursive). To avoid this reference cycle, we set the function to + # None, clearing the cell + try: + res = scatter_map(inputs) + finally: + scatter_map = None # type: ignore[assignment] + return res + + +def scatter_kwargs( + inputs: Tuple[Any, ...], + kwargs: Optional[Dict[str, Any]], + target_gpus: Sequence[Union[int, torch.device]], + dim: int = 0, +) -> Tuple[Tuple[Any, ...], Tuple[Dict[str, Any], ...]]: + r"""Scatter with support for kwargs dictionary.""" + scattered_inputs = scatter(inputs, target_gpus, dim) if inputs else [] + scattered_kwargs = scatter(kwargs, target_gpus, dim) if kwargs else [] + if len(scattered_inputs) < len(scattered_kwargs): + scattered_inputs.extend(() for _ in range(len(scattered_kwargs) - len(scattered_inputs))) + elif len(scattered_kwargs) < len(inputs): + scattered_kwargs.extend({} for _ in range(len(scattered_inputs) - len(scattered_kwargs))) + return tuple(scattered_inputs), tuple(scattered_kwargs) + + +def gather(outputs: Any, target_device: Union[int, torch.device], dim: int = 0) -> Any: + r"""Gather tensors from different GPUs on a specified device. + + Use 'cpu' for CPU to avoid a deprecation warning. + """ + def gather_map(outputs): + out = outputs[0] + if isinstance(out, torch.Tensor): + return Gather.apply(target_device, dim, *outputs) + if out is None: + return None + if isinstance(out, dict): + if not all(len(out) == len(d) for d in outputs): + raise ValueError('All dicts must have the same number of keys') + return type(out)((k, gather_map([d[k] for d in outputs])) + for k in out) + if _is_namedtuple(out): + return type(out)._make(map(gather_map, zip(*outputs))) + return type(out)(map(gather_map, zip(*outputs))) + + # Recursive function calls like this create reference cycles. + # Setting the function to None clears the refcycle. + try: + res = gather_map(outputs) + finally: + gather_map = None # type: ignore[assignment] + return res diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3d79bdbfe83209f18b17cc8c7b245f322871d6c0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/__init__.py @@ -0,0 +1 @@ +from .modules import * # noqa: F403 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee384ae0d508277bbd0e8cf5b5da88af77ffe4ac Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a1257b404b7346c6a96c4de3adb45c6e63564fac --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/__init__.py @@ -0,0 +1,9 @@ +from torch.ao.nn.quantizable.modules.activation import MultiheadAttention +from torch.ao.nn.quantizable.modules.rnn import LSTM +from torch.ao.nn.quantizable.modules.rnn import LSTMCell + +__all__ = [ + 'LSTM', + 'LSTMCell', + 'MultiheadAttention', +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5a85e657b6a3bb965f57fb001561908037fbd88 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/activation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/activation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e16198ca2208309b450aeec8dd2ea25bd90c0e69 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/activation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/rnn.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/rnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de3c6e523e8a8e9c35543f8ea51c4efeb3a7b54b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/__pycache__/rnn.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/activation.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/activation.py new file mode 100644 index 0000000000000000000000000000000000000000..e4f7a5ca3b540edc9f9b1fc15899b63240b7ac79 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/activation.py @@ -0,0 +1,10 @@ +# flake8: noqa: F401 +r"""Quantizable Modules. + +This file is in the process of migration to `torch/ao/nn/quantizable`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantizable/modules`, +while adding an import statement here. +""" +from torch.ao.nn.quantizable.modules.activation import MultiheadAttention diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/rnn.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..a767ae060f96d0d509dbd3411d33c87ba99bb4d9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantizable/modules/rnn.py @@ -0,0 +1,11 @@ +# flake8: noqa: F401 +r"""Quantizable Modules. + +This file is in the process of migration to `torch/ao/nn/quantizable`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantizable/modules`, +while adding an import statement here. +""" +from torch.ao.nn.quantizable.modules.rnn import LSTM +from torch.ao.nn.quantizable.modules.rnn import LSTMCell diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c53b961e9494353094150da627341a9e950e3f35 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/__init__.py @@ -0,0 +1,40 @@ +from . import dynamic # noqa: F403 +from . import functional # noqa: F403 +from . import modules # noqa: F403 +from .modules import * # noqa: F403 +from .modules import MaxPool2d + +__all__ = [ + 'BatchNorm2d', + 'BatchNorm3d', + 'Conv1d', + 'Conv2d', + 'Conv3d', + 'ConvTranspose1d', + 'ConvTranspose2d', + 'ConvTranspose3d', + 'DeQuantize', + 'Dropout', + 'ELU', + 'Embedding', + 'EmbeddingBag', + 'GroupNorm', + 'Hardswish', + 'InstanceNorm1d', + 'InstanceNorm2d', + 'InstanceNorm3d', + 'LayerNorm', + 'LeakyReLU', + 'Linear', + 'LSTM', + 'MultiheadAttention', + 'PReLU', + 'Quantize', + 'ReLU6', + 'Sigmoid', + 'Softmax', + # Wrapper modules + 'FloatFunctional', + 'FXFloatFunctional', + 'QFunctional', +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a302ebfebf0830dd2042ad152fe265670696dbf0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/__pycache__/functional.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/__pycache__/functional.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0d0543fd616e2a3622e9e569842736d23edd442 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/__pycache__/functional.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3d79bdbfe83209f18b17cc8c7b245f322871d6c0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/__init__.py @@ -0,0 +1 @@ +from .modules import * # noqa: F403 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f51d9bfb1f3a077b3341ebd9450797ed2edb558e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a1ff60ed6a8b00777e3ca8ad5f8cf46f201016f3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__init__.py @@ -0,0 +1,31 @@ +# flake8: noqa: F401 +r"""Quantized Reference Modules. + +This module is in the process of migration to +`torch/ao/nn/quantized/reference`, and is kept here for +compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantized/reference`, +while adding an import statement here. +""" + +from torch.ao.nn.quantized.reference.modules.linear import Linear +from torch.ao.nn.quantized.reference.modules.conv import Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d +from torch.ao.nn.quantized.reference.modules.rnn import RNNCell, LSTMCell, GRUCell, LSTM +from torch.ao.nn.quantized.reference.modules.sparse import Embedding, EmbeddingBag + +__all__ = [ + 'Linear', + 'Conv1d', + 'Conv2d', + 'Conv3d', + 'ConvTranspose1d', + 'ConvTranspose2d', + 'ConvTranspose3d', + 'RNNCell', + 'LSTMCell', + 'GRUCell', + 'LSTM', + 'Embedding', + 'EmbeddingBag', +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea0f47ed4a28df5bcbc5b07d3edf44647c1a3afa Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/conv.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/conv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72e860912eced63bff34d695b8b666b7d6b93753 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/conv.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/linear.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ceb1dafcf730d3de05d67740ced305a9a43275f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/linear.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/rnn.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/rnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4dcf16d17cae2e5d7dce5c9d753456e937ab97e9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/rnn.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/sparse.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/sparse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aece9cf91a1da42fbe99b6f87c78968a53b23ea0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/sparse.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a3834fd701f3f6a3dddc3380f1230b59d6df410 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/conv.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/conv.py new file mode 100644 index 0000000000000000000000000000000000000000..bbfeb2959f4b4c4030c5496fd3a4f666c9330569 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/conv.py @@ -0,0 +1,19 @@ +# flake8: noqa: F401 +r"""Quantized Reference Modules. + +This module is in the process of migration to +`torch/ao/nn/quantized/reference`, and is kept here for +compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantized/reference`, +while adding an import statement here. +""" + +from torch.ao.nn.quantized.reference.modules.conv import _ConvNd +from torch.ao.nn.quantized.reference.modules.conv import Conv1d +from torch.ao.nn.quantized.reference.modules.conv import Conv2d +from torch.ao.nn.quantized.reference.modules.conv import Conv3d +from torch.ao.nn.quantized.reference.modules.conv import _ConvTransposeNd +from torch.ao.nn.quantized.reference.modules.conv import ConvTranspose1d +from torch.ao.nn.quantized.reference.modules.conv import ConvTranspose2d +from torch.ao.nn.quantized.reference.modules.conv import ConvTranspose3d diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/linear.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/linear.py new file mode 100644 index 0000000000000000000000000000000000000000..6be6d5a140bb58f76b0e6061eb4ccb37d385757f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/linear.py @@ -0,0 +1,12 @@ +# flake8: noqa: F401 +r"""Quantized Reference Modules. + +This module is in the process of migration to +`torch/ao/nn/quantized/reference`, and is kept here for +compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantized/reference`, +while adding an import statement here. +""" + +from torch.ao.nn.quantized.reference.modules.linear import Linear diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/rnn.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..2464eab87b52469a5ee9c0ef3e0a9ce13fb814bf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/rnn.py @@ -0,0 +1,17 @@ +# flake8: noqa: F401 +r"""Quantized Reference Modules. + +This module is in the process of migration to +`torch/ao/nn/quantized/reference`, and is kept here for +compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantized/reference`, +while adding an import statement here. +""" + +from torch.ao.nn.quantized.reference.modules.rnn import RNNCellBase +from torch.ao.nn.quantized.reference.modules.rnn import RNNCell +from torch.ao.nn.quantized.reference.modules.rnn import LSTMCell +from torch.ao.nn.quantized.reference.modules.rnn import GRUCell +from torch.ao.nn.quantized.reference.modules.rnn import RNNBase +from torch.ao.nn.quantized.reference.modules.rnn import LSTM diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/sparse.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/sparse.py new file mode 100644 index 0000000000000000000000000000000000000000..e01f4e9b14897e051e15ed0de65a2772ffd46299 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/sparse.py @@ -0,0 +1,13 @@ +# flake8: noqa: F401 +r"""Quantized Reference Modules. + +This module is in the process of migration to +`torch/ao/nn/quantized/reference`, and is kept here for +compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantized/reference`, +while adding an import statement here. +""" + +from torch.ao.nn.quantized.reference.modules.sparse import Embedding +from torch.ao.nn.quantized.reference.modules.sparse import EmbeddingBag diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/utils.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f79835124931eca4763677b2cdc6c1a748dd74c1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/_reference/modules/utils.py @@ -0,0 +1,15 @@ +# flake8: noqa: F401 +r"""Quantized Reference Modules. + +This module is in the process of migration to +`torch/ao/nn/quantized/reference`, and is kept here for +compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantized/reference`, +while adding an import statement here. +""" +from torch.ao.nn.quantized.reference.modules.utils import _quantize_weight +from torch.ao.nn.quantized.reference.modules.utils import _quantize_and_dequantize_weight +from torch.ao.nn.quantized.reference.modules.utils import _save_weight_qparams +from torch.ao.nn.quantized.reference.modules.utils import _get_weight_qparam_keys +from torch.ao.nn.quantized.reference.modules.utils import ReferenceQuantizedModule diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1b08cd1bc7149c5506db3a952fff488eb06749f5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/__init__.py @@ -0,0 +1 @@ +from torch.ao.nn.quantized.dynamic import * # noqa: F403 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..246f2b2990601394b0dbc5491aba8cf72f1c4dbd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/linear.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/linear.py new file mode 100644 index 0000000000000000000000000000000000000000..592384dbdb34425cc713f06511f286bee2235b73 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/linear.py @@ -0,0 +1,10 @@ +# flake8: noqa: F401 +r"""Quantized Dynamic Modules. + +This file is in the process of migration to `torch/ao/nn/quantized/dynamic`, +and is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantized/dynamic/modules`, +while adding an import statement here. +""" +from torch.ao.nn.quantized.dynamic.modules.linear import Linear diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/functional.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/functional.py new file mode 100644 index 0000000000000000000000000000000000000000..d763e171fdb432c8ba2059cc2332e7ac6424854a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/functional.py @@ -0,0 +1,10 @@ +r"""nn.quantized.functional. + +Quantized equivalents of the `nn.functional`. + +Note:: + This location is in the process of being deprecated. + Please, use the `torch.ao.nn.quantized.functional` instead. +""" + +from torch.ao.nn.quantized.functional import * # noqa: F401,F403 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/activation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/activation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..75f50b8128e2d6c1febe60df989190712ba1e636 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/activation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ddc61d26d24876a84ad1b78b8adee3aa73f9ed52 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/__init__.py @@ -0,0 +1,32 @@ +from . import rnn +from .clip_grad import clip_grad_norm, clip_grad_norm_, clip_grad_value_ +from .weight_norm import weight_norm, remove_weight_norm +from .convert_parameters import parameters_to_vector, vector_to_parameters +from .spectral_norm import spectral_norm, remove_spectral_norm +from .fusion import fuse_conv_bn_eval, fuse_conv_bn_weights, fuse_linear_bn_eval, fuse_linear_bn_weights +from .memory_format import convert_conv2d_weight_memory_format, convert_conv3d_weight_memory_format +from . import parametrizations +from .init import skip_init +from . import stateless + +__all__ = [ + "clip_grad_norm", + "clip_grad_norm_", + "clip_grad_value_", + "convert_conv2d_weight_memory_format", + "convert_conv3d_weight_memory_format", + "fuse_conv_bn_eval", + "fuse_conv_bn_weights", + "fuse_linear_bn_eval", + "fuse_linear_bn_weights", + "parameters_to_vector", + "parametrizations", + "remove_spectral_norm", + "remove_weight_norm", + "rnn", + "skip_init", + "spectral_norm", + "stateless", + "vector_to_parameters", + "weight_norm", +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_deprecation_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_deprecation_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1b2a9b6e29f2f2f0157f97e8210d13751e0bcb8c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_deprecation_utils.py @@ -0,0 +1,45 @@ +from typing import List, Callable +import importlib +import warnings + + +_MESSAGE_TEMPLATE = r"Usage of '{old_location}' is deprecated; please use '{new_location}' instead." + +def lazy_deprecated_import(all: List[str], old_module: str, new_module: str) -> Callable: + r"""Import utility to lazily import deprecated packages / modules / functional. + + The old_module and new_module are also used in the deprecation warning defined + by the `_MESSAGE_TEMPLATE`. + + Args: + all: The list of the functions that are imported. Generally, the module's + __all__ list of the module. + old_module: Old module location + new_module: New module location / Migrated location + + Returns: + Callable to assign to the `__getattr__` + + Usage: + + # In the `torch/nn/quantized/functional.py` + from torch.nn.utils._deprecation_utils import lazy_deprecated_import + _MIGRATED_TO = "torch.ao.nn.quantized.functional" + __getattr__ = lazy_deprecated_import( + all=__all__, + old_module=__name__, + new_module=_MIGRATED_TO) + """ + warning_message = _MESSAGE_TEMPLATE.format( + old_location=old_module, + new_location=new_module) + + def getattr_dunder(name): + if name in all: + # We are using the "RuntimeWarning" to make sure it is not + # ignored by default. + warnings.warn(warning_message, RuntimeWarning) + package = importlib.import_module(new_module) + return getattr(package, name) + raise AttributeError(f"Module {new_module!r} has no attribute {name!r}.") + return getattr_dunder diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..102474614238efec588ea4dc69d1d568d4fc60bb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__init__.py @@ -0,0 +1,9 @@ +from .conv_expanded_weights import ConvPerSampleGrad +from .embedding_expanded_weights import EmbeddingPerSampleGrad +from .group_norm_expanded_weights import GroupNormPerSampleGrad +from .instance_norm_expanded_weights import InstanceNormPerSampleGrad +from .layer_norm_expanded_weights import LayerNormPerSampleGrad +from .linear_expanded_weights import LinearPerSampleGrad +from .expanded_weights_impl import ExpandedWeight + +__all__ = ['ExpandedWeight'] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/conv_expanded_weights.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/conv_expanded_weights.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..562eefac1178c3f9c245052e1dd3ebba88a73546 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/conv_expanded_weights.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/embedding_expanded_weights.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/embedding_expanded_weights.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae180d2c6413d2c8b3fbddc659bceaaaf2c2b780 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/embedding_expanded_weights.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/expanded_weights_impl.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/expanded_weights_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ceeff81aa0cb08f0d6afb6b1755b5f18318ada7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/expanded_weights_impl.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/linear_expanded_weights.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/linear_expanded_weights.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c047e2ce26a8f106c7fcc847158f224cce2b2df4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__pycache__/linear_expanded_weights.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/conv_expanded_weights.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/conv_expanded_weights.py new file mode 100644 index 0000000000000000000000000000000000000000..c10ccb90ae92f1f57513de5c0ab7a56c26996298 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/conv_expanded_weights.py @@ -0,0 +1,52 @@ +import torch +import torch.nn.functional as F + +from .conv_utils import conv_backward, conv_args_and_kwargs, conv_picker, conv_input_for_string_padding +from .expanded_weights_impl import ExpandedWeight, implements_per_sample_grads +from .expanded_weights_utils import forward_helper + +@implements_per_sample_grads(F.conv1d) +@implements_per_sample_grads(F.conv2d) +@implements_per_sample_grads(F.conv3d) +class ConvPerSampleGrad(torch.autograd.Function): + @staticmethod + def forward(ctx, kwarg_names, conv_fn, *expanded_args_and_kwargs): + expanded_args, expanded_kwargs = conv_args_and_kwargs(kwarg_names, expanded_args_and_kwargs) + orig_input = expanded_args[0] + was_same_padding = expanded_kwargs['padding'] == "same" + + if isinstance(expanded_kwargs['padding'], str): + # if padding is a string, we'll do the necessary padding (slowly) using F.pad + kernel_size = expanded_args[1].shape[2:] + padding, dilation = expanded_kwargs['padding'], expanded_kwargs['dilation'] + input = conv_input_for_string_padding(conv_fn, padding, expanded_args[0], dilation, kernel_size) + expanded_args = (input, expanded_args[1]) + # since we've already done the padding, don't need any more + expanded_kwargs['padding'] = 0 + + output = forward_helper(conv_fn, expanded_args, expanded_kwargs) + input, weight = expanded_args + batched_dim_size = conv_picker(conv_fn, 3, 4, 5) + if input.dim() != batched_dim_size: + raise RuntimeError(f"Expanded Weights only support convolution with batched input, got {conv_fn} with an" + f"unbatched input of dim {input.dim()}, expected input of dim {batched_dim_size}") + + ctx.conv_fn = conv_fn + + ctx.batch_size = orig_input.shape[0] + ctx.input_required_grad = orig_input.requires_grad + ctx.orig_input_shape = orig_input.shape + ctx.was_same_padding = was_same_padding + ctx.stride, ctx.padding = expanded_kwargs['stride'], expanded_kwargs['padding'] + ctx.dilation, ctx.groups = expanded_kwargs['dilation'], expanded_kwargs['groups'] + + if isinstance(weight, ExpandedWeight): + ctx.input = input + ctx.weight = weight + ctx.bias = expanded_kwargs['bias'] + + return output + + @staticmethod + def backward(ctx, grad_output): + return conv_backward(ctx.conv_fn, ctx, grad_output) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/conv_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/conv_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b675e3b892bdb848f2599d566e6079427684e8e4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/conv_utils.py @@ -0,0 +1,240 @@ +import torch +import torch.nn.functional as F + +import numpy as np +from typing import List, Optional + +from .expanded_weights_utils import \ + set_grad_sample_if_exists, unpack_expanded_weight_or_tensor + +THRESHOLD = 32 + + +def conv_picker(func, conv1dOpt, conv2dOpt, conv3dOpt): + if func == F.conv1d: + return conv1dOpt + if func == F.conv2d: + return conv2dOpt + else: + assert func == F.conv3d + return conv3dOpt + + +def conv_args_and_kwargs(kwarg_names, expanded_args_and_kwargs): + args = expanded_args_and_kwargs[:len(expanded_args_and_kwargs) - len(kwarg_names)] + kwargs = expanded_args_and_kwargs[len(expanded_args_and_kwargs) - len(kwarg_names):] + kwargs = dict(zip(kwarg_names, kwargs)) + + return conv_normalizer(*args, **kwargs) + + +def conv_normalizer(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1): + return (input, weight), {'bias': bias, 'stride': stride, 'padding': padding, 'dilation': dilation, 'groups': groups} + + +def conv_input_for_string_padding(func, padding_style, input, dilation, kernel_size): + if padding_style == "valid": + return input + else: + padding = int_padding_for_string_padding(func, padding_style, dilation, kernel_size) + return F.pad(input, padding) + + +def int_padding_for_string_padding(func, padding_style, dilation, kernel_size): + def get_dilation(i): + return dilation[i] if isinstance(dilation, tuple) else dilation + + if padding_style == "same": + padding: List[int] = [] + # F.pad needs the padding in reverse order from what conv expects + for i in range(conv_picker(func, 0, 1, 2), -1, -1): + padding += conv_padding_for_same(get_dilation(i), kernel_size[i]) + return padding + elif padding_style == "valid": + return conv_picker(func, 2, 4, 6) * (0,) + else: + raise RuntimeError(f"got padding type of {padding_style}, only accept 'same' or 'valid'") + + +def conv_padding_for_same(dilation, kernel_size): + total_pad = dilation * (kernel_size - 1) + left_pad = total_pad // 2 + right_pad = total_pad - left_pad + return left_pad, right_pad + + +def conv_backward(func, ctx, grad_output): + + def weight_grad_sample(weight): + if (batch_size < THRESHOLD and groups == 1): + return conv_group_weight_grad_sample(ctx.input, grad_output, weight_shape, stride, padding, dilation, batch_size, func) + else: + return conv_unfold_weight_grad_sample(ctx.input, grad_output, weight_shape, kernel_size, + stride, padding, dilation, groups, func) + + def expand(param): + if isinstance(param, int): + return conv_picker(func, (param,), (param, param), (param, param, param)) + else: + return param + + def calc_total_padding(func, was_same, padding, dilation, kernel_size): + if was_same: + all_padding = int_padding_for_string_padding(func, "same", dilation, kernel_size) + # F.pad needs the padding in reverse order from what conv expects + total_padding = tuple(all_padding[i] + all_padding[i - 1] for i in range(len(all_padding) - 1, -1, -2)) + return total_padding + else: + return tuple(2 * pad for pad in padding) + + weight_shape = ctx.weight.shape + stride, padding, dilation, groups = expand(ctx.stride), expand(ctx.padding), expand(ctx.dilation), ctx.groups + + kernel_size = [] + for i in range(2, conv_picker(func, 3, 4, 5)): + kernel_size.append(weight_shape[i]) + + batch_size = ctx.batch_size + results: List[Optional[torch.Tensor]] = [] + results.append(None) # for kwarg names + results.append(None) # for op reference + + # "same" padding may give uneven padding on either side so we need to separate the "padding" attr and total padding + total_padding = calc_total_padding(func, ctx.was_same_padding, padding, dilation, kernel_size) + + if ctx.input_required_grad: + output_padding = [] + input_dims = conv_picker(func, 1, 2, 3) + for i in range(input_dims): + input_dim = ctx.orig_input_shape[2 + i] + output_padding.append((total_padding[i] + input_dim - (kernel_size[i] * dilation[i] - dilation[i] + 1)) % stride[i]) + weight_ = unpack_expanded_weight_or_tensor(ctx.weight) + transpose_func = conv_picker(func, F.conv_transpose1d, F.conv_transpose2d, F.conv_transpose3d) + out = transpose_func(grad_output, weight_, None, stride, padding, tuple(output_padding), groups, dilation) + + if ctx.was_same_padding: + for i in range(len(total_padding)): + out = torch.narrow(out, 2 + i, total_padding[i] // 2, ctx.orig_input_shape[2 + i]) + + results.append(out) + else: + results.append(None) + # weight and bias don't compute batched gradients; no other arguments are differentiable + results = results + [None] * 6 + + # set grad_sample field for weight and bias with per sample gradients + set_grad_sample_if_exists(ctx.weight, weight_grad_sample) + set_grad_sample_if_exists(ctx.bias, lambda _: grad_output.reshape(*grad_output.shape[:2], -1).sum(dim=2)) + return tuple(results) + + +def conv_unfold_weight_grad_sample(input, grad_output, weight_shape, kernel_size, stride, padding, dilation, groups, func): + n = input.shape[0] + in_channels = input.shape[1] + + unfold_func = conv_picker( + func, + lambda: F.unfold(input.unsqueeze(-2), + kernel_size=(1, kernel_size[0]), + dilation=(1, dilation[0]), + padding=(0, padding[0]), + stride=(1, stride[0])), + lambda: F.unfold(input, kernel_size, dilation=dilation, padding=padding, stride=stride), + lambda: unfold3d(input, kernel_size, padding, stride, dilation) + ) + + input = unfold_func() + grad_output = grad_output.reshape(n, -1, input.shape[-1]) + + # n=batch_sz; o=num_out_channels; p=(num_in_channels/groups)*kernel_sz + weight_grad_sample = torch.einsum("noq,npq->nop", grad_output, input) + # rearrange the above tensor and extract diagonals. + weight_grad_sample = weight_grad_sample.view( + n, + groups, + -1, + groups, + int(in_channels / groups), + np.prod(kernel_size), + ) + weight_grad_sample = torch.einsum("ngrg...->ngr...", weight_grad_sample).contiguous() + shape = [n] + list(weight_shape) + weight_grad_sample = weight_grad_sample.view(shape) + return weight_grad_sample + + +def conv_group_weight_grad_sample(input, grad_output, weight_shape, stride, padding, dilation, batch_size, func): + I = input.shape[1] + O = grad_output.shape[1] + + input_ = input.transpose(0, 1) + grad_output_ = grad_output.view(grad_output.shape[0] * grad_output.shape[1], 1, *grad_output.shape[2:]) + + weight_grad_sample = func(input_, grad_output_, None, stride=dilation, padding=padding, dilation=stride, groups=batch_size) + input_dims = conv_picker(func, 3, 4, 5) + for i in range(2, input_dims): + weight_grad_sample = weight_grad_sample.narrow(i, 0, weight_shape[i]) + weight_grad_sample = weight_grad_sample.view(I, batch_size, O, *weight_grad_sample.shape[2:]) + weight_grad_sample = weight_grad_sample.movedim(0, 2) + return weight_grad_sample + + +def unfold3d( + tensor, + kernel_size, + padding, + stride, + dilation, +): + r""" + Extract sliding local blocks from an batched input tensor. + + :class:`torch.nn.Unfold` only supports 4D inputs (batched image-like tensors). + This method implements the same action for 5D inputs + Args: + tensor: An input tensor of shape ``(B, C, D, H, W)``. + kernel_size: the size of the sliding blocks + padding: implicit zero padding to be added on both sides of input + stride: the stride of the sliding blocks in the input spatial dimensions + dilation: the spacing between the kernel points. + Returns: + A tensor of shape ``(B, C * np.prod(kernel_size), L)``, where L - output spatial dimensions. + See :class:`torch.nn.Unfold` for more details + Example: + >>> # xdoctest: +SKIP + >>> B, C, D, H, W = 3, 4, 5, 6, 7 + >>> tensor = torch.arange(1, B * C * D * H * W + 1.).view(B, C, D, H, W) + >>> unfold3d(tensor, kernel_size=2, padding=0, stride=1).shape + torch.Size([3, 32, 120]) + """ + if len(tensor.shape) != 5: + raise ValueError( + f"Input tensor must be of the shape [B, C, D, H, W]. Got{tensor.shape}" + ) + + if dilation != (1, 1, 1): + raise NotImplementedError(f"dilation={dilation} not supported.") + + batch_size, channels, _, _, _ = tensor.shape + + # Input shape: (B, C, D, H, W) + tensor = F.pad( + tensor, (padding[2], padding[2], padding[1], padding[1], padding[0], padding[0]) + ) + # Output shape: (B, C, D+2*padding[2], H+2*padding[1], W+2*padding[0]) + + tensor = tensor.unfold(dimension=2, size=kernel_size[0], step=stride[0]) + tensor = tensor.unfold(dimension=3, size=kernel_size[1], step=stride[1]) + tensor = tensor.unfold(dimension=4, size=kernel_size[2], step=stride[2]) + # Output shape: (B, C, D_out, H_out, W_out, kernel_size[0], kernel_size[1], kernel_size[2]) + # For D_out, H_out, W_out definitions see :class:`torch.nn.Unfold` + + tensor = tensor.permute(0, 2, 3, 4, 1, 5, 6, 7) + # Output shape: (B, D_out, H_out, W_out, C, kernel_size[0], kernel_size[1], kernel_size[2]) + + tensor = tensor.reshape(batch_size, -1, channels * np.prod(kernel_size)).transpose( + 1, 2 + ) + # Output shape: (B, D_out * H_out * W_out, C * kernel_size[0] * kernel_size[1] * kernel_size[2] + + return tensor diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/embedding_expanded_weights.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/embedding_expanded_weights.py new file mode 100644 index 0000000000000000000000000000000000000000..c7956a3a1b1f666708eefbec69d031af2da18592 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/embedding_expanded_weights.py @@ -0,0 +1,54 @@ +import torch +import torch.nn.functional as F +from .expanded_weights_impl import implements_per_sample_grads +from .expanded_weights_utils import standard_kwargs, forward_helper, set_grad_sample_if_exists + +from typing import List, Optional + +@implements_per_sample_grads(F.embedding) +class EmbeddingPerSampleGrad(torch.autograd.Function): + @staticmethod + def forward(ctx, kwarg_names, _, *expanded_args_and_kwargs): + expanded_args, expanded_kwargs = standard_kwargs(kwarg_names, expanded_args_and_kwargs) + if len(expanded_args[0].shape) == 1: + raise RuntimeError(f"Expanded Weights needs an input with a batch size, got a 1D tensor, {expanded_args[0]}") + output = forward_helper(F.embedding, expanded_args, expanded_kwargs) + ctx.input, ctx.weight = expanded_args + ctx.padding_idx, ctx.scale_grad_by_freq = expanded_kwargs['padding_idx'], expanded_kwargs['scale_grad_by_freq'] + ctx.sparse = expanded_kwargs['sparse'] + return output + + @staticmethod + def backward(ctx, grad_output): + input, weight = ctx.input, ctx.weight + padding_idx, scale_grad_by_freq, sparse = ctx.padding_idx, ctx.scale_grad_by_freq, ctx.sparse + + def weight_per_sample_grad(weight): + batch_size = input.shape[0] + embedding_dim = weight.shape[1] + index = ( + input.unsqueeze(-1) + .expand(*input.shape, embedding_dim) + .reshape(batch_size, -1, embedding_dim) + ) + grad_sample = torch.zeros( + batch_size, *weight.shape, device=weight.device, dtype=grad_output.dtype + ) + return grad_sample.scatter_add_(1, index, grad_output.reshape(batch_size, -1, embedding_dim)) + + results: List[Optional[torch.Tensor]] = [] + results.append(None) # for kwarg names + results.append(None) # for op reference + + if input.requires_grad: + bw_fn = torch.ops.aten.embedding_backward + results.append(bw_fn(grad_output, input, weight.shape[0], padding_idx, scale_grad_by_freq, sparse)) + else: + results.append(None) + + # weight doesn't compute batched gradients; no other arguments are differentiable (2 not saved from forward) + results = results + [None] * 6 + + # set grad_sample field for weight with per sample gradients + set_grad_sample_if_exists(weight, weight_per_sample_grad) + return tuple(results) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/expanded_weights_impl.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/expanded_weights_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..94e6041c6de5df13986ef329c8e13e0671326f54 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/expanded_weights_impl.py @@ -0,0 +1,153 @@ +from contextlib import contextmanager + +import torch +import functools +from torch._decomp import decomposition_table + +from typing import Callable, Dict + +from torch.utils._pytree import tree_map_only + +HANDLED_FUNCTIONS: Dict[Callable, torch.autograd.Function] = {} + +aten = torch._ops.ops.aten +# __torch_function__ runs before the pydispatcher so we need to manually use the same +# decompositions indexed by their torch equivalent +expanded_weights_rnn_decomps = { + # func: (input_decomp, data_decomp) + torch.rnn_relu: (decomposition_table[aten.rnn_relu.input], decomposition_table[aten.rnn_relu.data]), + torch.rnn_tanh: (decomposition_table[aten.rnn_tanh.input], decomposition_table[aten.rnn_tanh.data]), + torch.lstm: (decomposition_table[aten.lstm.input], decomposition_table[aten.lstm.data]), + torch.gru: (decomposition_table[aten.gru.input], decomposition_table[aten.gru.data]), +} + +# all of the RNN decomps run linear with the batch dimension second, even if batch_first was set +@contextmanager +def batch_second(args, kwargs): + def set_batch_second(ew): + ew.set_batch_first(False) + + def reset_batch_first(ew): + ew.set_batch_first(True) + + tree_map_only(ExpandedWeight, set_batch_second, args) + tree_map_only(ExpandedWeight, set_batch_second, kwargs) + try: + yield + finally: + tree_map_only(ExpandedWeight, reset_batch_first, args) + tree_map_only(ExpandedWeight, reset_batch_first, kwargs) + +# to support packed sequences, we need to allow for smaller batches. Expanded weights represents the largest batch +@contextmanager +def allow_smaller_batches(args, kwargs): + def allow(ew): + ew.set_allow_smaller_batches(True) + + def reset(ew): + ew.set_allow_smaller_batches(False) + + tree_map_only(ExpandedWeight, allow, args) + tree_map_only(ExpandedWeight, allow, kwargs) + try: + yield + finally: + tree_map_only(ExpandedWeight, reset, args) + tree_map_only(ExpandedWeight, reset, kwargs) + +@contextmanager +def setup_rnn(use_input_variant, args, kwargs): + with batch_second(args, kwargs) if use_input_variant else allow_smaller_batches(args, kwargs): + yield + + +def implements_per_sample_grads(torch_function): + @functools.wraps(torch_function) + def decorator(autograd_func): + HANDLED_FUNCTIONS[torch_function] = autograd_func + return autograd_func + return decorator + +# ExpandedWeight represents a weight (parameter) Tensor that has an expanded +# batch dimension. Operations on the ExpandedWeight Tensor act exactly like +# those without an expanded batch dimension but a call to .backward() populates +# the original (unexpanded) tensor with per-sample-gradients for in the grad_sample field +# +# ExpandedWeight has a fallback that always fails since we cannot know what the batch +# dimension of the input tensor is and therefore cannot know if this is a valid call +# +# This is a __torch_function__ object but it could have also been a Tensor Extension +# with a dispatch key. +# +# Needs to be a tensor subclass to allow reparamaterization +class ExpandedWeight(torch.Tensor): + def __init__(self, orig_weight, batch_size, loss_reduction): + self.batch_size = batch_size + self.batch_first = True + self.allow_smaller_batches = False + self.orig_weight = orig_weight + self.loss_reduction = loss_reduction + + handled_functions = HANDLED_FUNCTIONS + + def __new__(cls, orig_weight, batch_size, loss_reduction): + if not isinstance(orig_weight, torch.Tensor): + raise RuntimeError(f"Can only make Expanded Weights of Tensors, got {type(orig_weight).__name__}") + if not orig_weight.requires_grad: + raise RuntimeError("Can only build ExpandedWeights objects of tensors that require_grad") + ret = torch.Tensor._make_subclass(cls, orig_weight, True) + return ret + + @classmethod + def __torch_function__(cls, func, _, args=(), kwargs=None): + if kwargs is None: + kwargs = {} + if func in expanded_weights_rnn_decomps: + # in aten, choosing the input or data variants is done by parsing logic. This mimics some of that + decomp_opts = expanded_weights_rnn_decomps[func] + use_input_variant = isinstance(args[2], list) # data variant uses a list here + decomp = decomp_opts[0] if use_input_variant else decomp_opts[1] + + if decomp is not None: + with setup_rnn(use_input_variant, args, kwargs): + return decomp(*args, **kwargs) + if func == torch._cudnn_rnn_flatten_weight: + # since we aren't using the fused cuda kernels for RNNs, don't do this + return + if func in cls.handled_functions: + return cls.handled_functions[func].apply(tuple(kwargs.keys()), func, *(args + tuple(kwargs.values()))) + # We cannot use a fallback here because we do not know the batch dimension for any regular tensor inputs, + # i.e. torch.add(torch.Tensor, ExpandedWeight) + raise RuntimeError(f"Expanded Weights encountered but cannot handle function {func.__name__}") + + @property + def dtype(self): + return self.orig_weight.dtype + + @property + def data(self): + return self.orig_weight.data + + @property + def shape(self): + return self.orig_weight.shape + + @property + def device(self): + return self.orig_weight.device + + @property + def is_cuda(self): + return self.orig_weight.is_cuda + + def data_ptr(self): + return self.orig_weight.data_ptr() + + def get_device(self): + return self.orig_weight.get_device() + + def set_allow_smaller_batches(self, is_allow_smaller_batches): + self.allow_smaller_batches = is_allow_smaller_batches + + def set_batch_first(self, is_batch_first=True): + self.batch_first = is_batch_first diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/expanded_weights_utils.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/expanded_weights_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..249dbe59120434b856acb654bc6ba8bd65b926c0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/expanded_weights_utils.py @@ -0,0 +1,145 @@ +from typing import Optional + +import torch +from .expanded_weights_impl import ExpandedWeight + +def is_batch_first(expanded_args_and_kwargs): + batch_first = None + for arg in expanded_args_and_kwargs: + if not isinstance(arg, ExpandedWeight): + continue + + if not batch_first: + batch_first = arg.batch_first + elif arg.batch_first != batch_first: + raise RuntimeError("Got conflicting batch_first arguments in the same layer") + return batch_first + +def standard_kwargs(kwarg_names, expanded_args): + r"""Separate args and kwargs from `__torch_function__`s that standardize kwargs. + + Most `__torch_function__`s standardize the kwargs that they give, so this will separate + the args and kwargs they pass. Functions that don't are linear and convND. + """ + kwarg_values = expanded_args[len(expanded_args) - len(kwarg_names):] + expanded_args_without_kwargs = expanded_args[:len(expanded_args) - len(kwarg_names)] + expanded_kwargs = dict(zip(kwarg_names, kwarg_values)) + return expanded_args_without_kwargs, expanded_kwargs + +def forward_helper(func, expanded_args, expanded_kwargs): + r"""Compute the forward pass for a function that has expanded weight(s) passed to it. + + It will run the forward pass where all ExpandedWeights are their original + weight. It runs checks on the given arguments and detaches the outputs. + + .. note:: First argument in :attr:`expanded_args` must be the input with the batch + dimension as the first element of the shape + + .. note:: :attr:`func` must return a Tensor or tuple of Tensors + + Args: + func: The function to be called + expanded_args: Arguments to be passed to :attr:`func`. Will include arguments + that need to be unpacked because they are ExpandedWeights + expanded_kwargs: Keyword arguments to be passed to :attr:`func`. + Similar to :attr:`expanded_args`. + """ + unexpanded_args, unexpanded_kwargs = _check_and_unexpand_args(func, expanded_args, expanded_kwargs) + return func(*unexpanded_args, **unexpanded_kwargs) + +def _check_and_unexpand_args(func, expanded_args, expanded_kwargs): + # input must be the first argument passed + input = expanded_args[0] + if isinstance(input, ExpandedWeight): + raise RuntimeError("Expanded Weights do not support inputs that are also ExpandedWeights. " + f"Input must be a Tensor, got {type(input).__name__} in function {func.__name__}") + if not isinstance(input, torch.Tensor): + raise RuntimeError("Expanded Weights requires a Tensor as the first input to get the batch dimension, " + f"got {type(input).__name__} in function {func.__name__}") + if len(input.shape) == 0: + raise RuntimeError(f"Expanded Weights requires a batch dimension but got an input of size 0 in function {func.__name__}") + if input.shape[0] == 0: + raise RuntimeError("0 is not a valid batch size for Expanded Weights but got input tensor of " + f"{input} in function {func.__name__}") + for arg in expanded_args + tuple(expanded_kwargs.values()): + if not isinstance(arg, ExpandedWeight): + continue + batch_size = input.shape[0] if arg.batch_first else input.shape[1] + if (arg.allow_smaller_batches and batch_size > arg.batch_size) or \ + (not arg.allow_smaller_batches and arg.batch_size != batch_size): + raise RuntimeError("Expected ExpandedWeights to have batch size matching input but got " + f"input batch size of {batch_size} with ExpandedWeight of batch size {arg.batch_size}") + + loss_reduction: Optional[str] = None + for arg in expanded_args + tuple(expanded_kwargs.values()): + if isinstance(arg, ExpandedWeight): + if loss_reduction is None: + loss_reduction = arg.loss_reduction + elif loss_reduction != arg.loss_reduction: + raise RuntimeError("Expected ExpandedWeights to all have the same loss_reduction argument but got one" + f"with {loss_reduction} and one with {arg.loss_reduction}") + + unexpanded_args = tuple(arg.orig_weight if isinstance(arg, ExpandedWeight) else arg for arg in expanded_args) + unexpanded_kwargs = {name: arg.orig_weight if isinstance(arg, ExpandedWeight) else arg + for (name, arg) in expanded_kwargs.items()} + return unexpanded_args, unexpanded_kwargs + +def maybe_scale_by_batch_size(grad_sample, expanded_weight): + if expanded_weight.loss_reduction == "mean": + return grad_sample * expanded_weight.batch_size + else: + return grad_sample + +def set_grad_sample_if_exists(maybe_expanded_weight, per_sample_grad_fn): + unpacked = unpack_expanded_weight_or_tensor(maybe_expanded_weight) + if isinstance(maybe_expanded_weight, ExpandedWeight): + grad_sample_contribution = maybe_scale_by_batch_size(per_sample_grad_fn(unpacked), maybe_expanded_weight) + + if maybe_expanded_weight.batch_size > grad_sample_contribution.shape[0]: + # this only passes the other checks if the arg allows smaller batch sizes + intermediate = torch.zeros(maybe_expanded_weight.batch_size, *grad_sample_contribution.shape[1:], + dtype=grad_sample_contribution.dtype, + device=grad_sample_contribution.device) + intermediate[:grad_sample_contribution.shape[0]] = grad_sample_contribution + grad_sample_contribution = intermediate + + if hasattr(unpacked, "grad_sample") and unpacked.grad_sample is not None: + unpacked.grad_sample = unpacked.grad_sample + grad_sample_contribution + else: + unpacked.grad_sample = grad_sample_contribution + +def unpack_expanded_weight_or_tensor(maybe_expanded_weight, func=lambda x: x): + if isinstance(maybe_expanded_weight, ExpandedWeight): + orig_weight = maybe_expanded_weight.orig_weight + return func(orig_weight) + elif isinstance(maybe_expanded_weight, torch.Tensor) and not maybe_expanded_weight.requires_grad: + return func(maybe_expanded_weight) + elif isinstance(maybe_expanded_weight, torch.Tensor): + raise RuntimeError("ExpandedWeights currently does not support a mixture of ExpandedWeight parameters " + "and normal Parameters. Please file and issue with pytorch/pytorch") + + + +def sum_over_all_but_batch_and_last_n( + tensor: torch.Tensor, n_dims: int +) -> torch.Tensor: + r""" + Calculate the sum over all dimensions, except the first (batch dimension), and excluding the last n_dims. + + This function will ignore the first dimension and it will + not aggregate over the last n_dims dimensions. + Args: + tensor: An input tensor of shape ``(B, ..., X[n_dims-1])``. + n_dims: Number of dimensions to keep. + Example: + >>> tensor = torch.ones(1, 2, 3, 4, 5) + >>> sum_over_all_but_batch_and_last_n(tensor, n_dims=2).shape + torch.Size([1, 4, 5]) + Returns: + A tensor of shape ``(B, ..., X[n_dims-1])`` + """ + if tensor.dim() == n_dims + 1: + return tensor + else: + dims = list(range(1, tensor.dim() - n_dims)) + return tensor.sum(dim=dims) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/group_norm_expanded_weights.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/group_norm_expanded_weights.py new file mode 100644 index 0000000000000000000000000000000000000000..fe29b1eafbe2c0be87a96f4e24d8c026b310b3d7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/group_norm_expanded_weights.py @@ -0,0 +1,64 @@ +from functools import reduce +import operator +import torch +import torch.nn.functional as F +from .expanded_weights_impl import ExpandedWeight, implements_per_sample_grads +from .expanded_weights_utils import standard_kwargs, \ + forward_helper, set_grad_sample_if_exists, unpack_expanded_weight_or_tensor +from typing import List, Optional + +@implements_per_sample_grads(F.group_norm) +class GroupNormPerSampleGrad(torch.autograd.Function): + @staticmethod + def forward(ctx, kwarg_names, _, *expanded_args_and_kwargs): + expanded_args, expanded_kwargs = standard_kwargs(kwarg_names, expanded_args_and_kwargs) + input, num_groups = expanded_args + N = input.shape[0] + C = input.shape[1] + HxW = reduce(operator.mul, input.shape[2:], 1) + weight, bias, eps = expanded_kwargs['weight'], expanded_kwargs['bias'], expanded_kwargs['eps'] + output, mean, rstd = forward_helper(torch.native_group_norm, (input, weight, bias, N, C, HxW, num_groups, eps), {}) + ctx.input, ctx.num_groups = input, num_groups + ctx.weight, ctx.eps = weight, eps + ctx.mean, ctx.rstd = mean, rstd + if isinstance(bias, ExpandedWeight): + ctx.bias = bias + if input.requires_grad and isinstance(weight, ExpandedWeight): + ctx.weight = weight + return output + + @staticmethod + def backward(ctx, grad_output): + input, num_groups = ctx.input, ctx.num_groups + weight, bias, eps = ctx.weight, ctx.bias, ctx.eps + mean, rstd = ctx.mean, ctx.rstd + + results: List[Optional[torch.Tensor]] = [] + results.append(None) # for kwarg names + results.append(None) # for op reference + + if input.requires_grad: + weight_c = unpack_expanded_weight_or_tensor(weight, lambda t: t.contiguous()) + input_c = input.contiguous() + grad_output_c = grad_output.contiguous() if grad_output is not None else None + N = input.shape[0] + C = input.shape[1] + HxW = 1 + for s in input.shape[2:]: + HxW *= s + bw_fn = torch.ops.aten.native_group_norm_backward + results.append(bw_fn(grad_output_c, input_c, + mean, rstd, weight_c, N, C, HxW, num_groups, (True, False, False))[0]) + else: + results.append(None) + + # weight and bias don't compute batched gradients; no other arguments are differentiable + results = results + [None] * 4 + + # set grad_sample field for weight and bias with per sample gradients + if hasattr(ctx, "weight"): + set_grad_sample_if_exists(weight, + lambda _: torch.einsum("ni...->ni", F.group_norm(input, num_groups, eps=eps) * grad_output)) + if hasattr(ctx, "bias"): + set_grad_sample_if_exists(bias, lambda _: torch.einsum("ni...->ni", grad_output)) + return tuple(results) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/instance_norm_expanded_weights.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/instance_norm_expanded_weights.py new file mode 100644 index 0000000000000000000000000000000000000000..f3e68b940660263f8a9ad13fe109f82c6338de1c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/instance_norm_expanded_weights.py @@ -0,0 +1,60 @@ +from functools import partial +import torch +import torch.nn.functional as F +from .expanded_weights_impl import implements_per_sample_grads +from .expanded_weights_utils import \ + forward_helper, set_grad_sample_if_exists, standard_kwargs, unpack_expanded_weight_or_tensor +from typing import List, Optional + +@implements_per_sample_grads(F.instance_norm) +class InstanceNormPerSampleGrad(torch.autograd.Function): + @staticmethod + def forward(ctx, kwarg_names, _, *expanded_args_and_kwargs): + instance_norm = partial(torch.instance_norm, cudnn_enabled=True) + expanded_args, expanded_kwargs = standard_kwargs(kwarg_names, expanded_args_and_kwargs) + output = forward_helper(instance_norm, expanded_args, expanded_kwargs) + ctx.input = expanded_args[0] + ctx.running_mean, ctx.running_var = expanded_kwargs['running_mean'], expanded_kwargs['running_var'] + ctx.weight, ctx.bias, ctx.eps = expanded_kwargs['weight'], expanded_kwargs['bias'], expanded_kwargs['eps'] + return output + + + @staticmethod + def backward(ctx, grad_output): + input, running_mean, running_var = ctx.input, ctx.running_mean, ctx.running_var + weight, bias, eps = ctx.weight, ctx.bias, ctx.eps + + results: List[Optional[torch.Tensor]] = [] + results.append(None) # for kwarg names + results.append(None) # for op reference + if input.requires_grad: + b = input.shape[0] + c = input.shape[1] + new_shape = (1, b * c, *input.shape[2:]) + + weight_ = unpack_expanded_weight_or_tensor(weight, lambda orig_weight: orig_weight.repeat(b)) + running_mean_ = running_mean.repeat(b) if running_mean is not None else None + running_var_ = running_var.repeat(b) if running_var is not None else None + input_reshaped = input.contiguous().view(new_shape) + grad_output_reshaped = grad_output.contiguous().view(new_shape) + mean = torch.mean(input_reshaped, (0,) + tuple(range(2, input.dim())), False) + var = torch.var(input_reshaped, (0,) + tuple(range(2, input.dim())), keepdim=False, unbiased=False) + rstd = 1 / torch.sqrt(var + eps) + + # must use native batch norm since it supports all inputs. This may have used cuda or openmi during the forward but + # it didn't save the metadata, so we don't know during the backward + res = torch.ops.aten.native_batch_norm_backward( + grad_output_reshaped, input_reshaped, weight_, running_mean_, running_var_, + mean, rstd, True, eps, (True, False, False)) + results.append(res[0].reshape(input.shape)) + else: + results.append(None) + + # weight and bias don't compute batched gradients; no other arguments are differentiable (2 are not saved from the forward) + results = results + [None] * 7 + + # set grad_sample field for weight and bias with per sample gradients + set_grad_sample_if_exists(weight, + lambda _: torch.einsum("ni...->ni", F.instance_norm(input, eps=eps) * grad_output)) + set_grad_sample_if_exists(bias, lambda _: torch.einsum("ni...->ni", grad_output)) + return tuple(results) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/layer_norm_expanded_weights.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/layer_norm_expanded_weights.py new file mode 100644 index 0000000000000000000000000000000000000000..f2ead2d4c08fb03aafec2469d86c672ebe9bb222 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/layer_norm_expanded_weights.py @@ -0,0 +1,59 @@ + +import torch +import torch.nn.functional as F +from .expanded_weights_impl import ExpandedWeight, implements_per_sample_grads +from .expanded_weights_utils import forward_helper, set_grad_sample_if_exists, \ + standard_kwargs, sum_over_all_but_batch_and_last_n, unpack_expanded_weight_or_tensor +from typing import List, Optional + +@implements_per_sample_grads(F.layer_norm) +class LayerNormPerSampleGrad(torch.autograd.Function): + @staticmethod + def forward(ctx, kwarg_names, _, *expanded_args_and_kwargs): + expanded_args, expanded_kwargs = standard_kwargs(kwarg_names, expanded_args_and_kwargs) + input = expanded_args[0] + normalized_shape = expanded_args[1] + if len(input.shape) <= len(normalized_shape): + raise RuntimeError("Expanded Weights: Layer norm should not normalize over batch dimension for per sample gradient" + f"computations but got that normalized shape, {normalized_shape}, matched input shape.") + output, mean, rstd = forward_helper(torch.native_layer_norm, expanded_args, expanded_kwargs) + ctx.args = expanded_args + + if input.requires_grad or isinstance(expanded_kwargs['weight'], ExpandedWeight): + ctx.weight = expanded_kwargs['weight'] + if input.requires_grad or isinstance(expanded_kwargs['bias'], ExpandedWeight): + ctx.bias = expanded_kwargs['bias'] + ctx.eps = expanded_kwargs['eps'] + ctx.mean, ctx.rstd = mean, rstd + return output + + + @staticmethod + def backward(ctx, grad_output): + + def weight_per_sample_grad(weight): + return sum_over_all_but_batch_and_last_n(F.layer_norm(input, normalized_shape, eps=ctx.eps) * grad_output, weight.dim()) + + input, normalized_shape = ctx.args + mean, rstd = ctx.mean, ctx.rstd + + results: List[Optional[torch.Tensor]] = [] + results.append(None) # for kwarg names + results.append(None) # for op reference + if input.requires_grad: + weight_ = unpack_expanded_weight_or_tensor(ctx.weight) + bias_ = unpack_expanded_weight_or_tensor(ctx.bias) + results.append(torch.ops.aten.native_layer_norm_backward( + grad_output, input, normalized_shape, mean, rstd, weight_, bias_, (True, False, False))[0]) + else: + results.append(None) + + # weight and bias don't compute batched gradients; no other arguments are differentiable + results = results + [None] * 4 + + # set grad_sample field for weight and bias with per sample gradients + if hasattr(ctx, "weight"): + set_grad_sample_if_exists(ctx.weight, weight_per_sample_grad) + if hasattr(ctx, "bias"): + set_grad_sample_if_exists(ctx.bias, lambda bias: sum_over_all_but_batch_and_last_n(grad_output, bias.dim())) + return tuple(results) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/linear_expanded_weights.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/linear_expanded_weights.py new file mode 100644 index 0000000000000000000000000000000000000000..c2cbae63f33651a0f44e287cb0fa6d5d4a25bc62 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/linear_expanded_weights.py @@ -0,0 +1,44 @@ +import torch +import torch.nn.functional as F +from .expanded_weights_impl import implements_per_sample_grads +from .expanded_weights_utils import \ + forward_helper, set_grad_sample_if_exists, unpack_expanded_weight_or_tensor, is_batch_first +from typing import List, Optional + +@implements_per_sample_grads(F.linear) +class LinearPerSampleGrad(torch.autograd.Function): + @staticmethod + def forward(ctx, _, __, *expanded_args_and_kwargs): + if len(expanded_args_and_kwargs[0].shape) <= 1: + raise RuntimeError("Input does not have a batch dimension. Expanded Weights expected input " + f"of at least rank 2, got of rank {len(expanded_args_and_kwargs[0].shape)}") + expanded_kwargs = {'bias': expanded_args_and_kwargs[2] if len(expanded_args_and_kwargs) == 3 else None} + expanded_args = expanded_args_and_kwargs[:2] + ctx.batch_first = is_batch_first(expanded_args_and_kwargs) + output = forward_helper(F.linear, expanded_args, expanded_kwargs) + ctx.args = expanded_args + ctx.kwargs = expanded_kwargs + return output + + @staticmethod + def backward(ctx, grad_output): + input, weight = ctx.args + bias = ctx.kwargs['bias'] + results: List[Optional[torch.Tensor]] = [] + results.append(None) # for kwarg_names + results.append(None) # for op reference + + if input.requires_grad: + results.append(grad_output.matmul(unpack_expanded_weight_or_tensor(weight))) + else: + results.append(None) + results.extend([None] * 2) # weight and bias don't compute batched gradients + + if not ctx.batch_first: + grad_output = grad_output.transpose(0, 1) + input = input.transpose(0, 1) + + # weight and bias get their grad_sample fields set directly if they exist + set_grad_sample_if_exists(weight, lambda _: torch.einsum("n...i,n...j->nij", grad_output, input)) + set_grad_sample_if_exists(bias, lambda _: torch.einsum("n...k->nk", grad_output)) + return tuple(results) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_named_member_accessor.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_named_member_accessor.py new file mode 100644 index 0000000000000000000000000000000000000000..3a82b2b426aa0a1bdbe64cdc177ba42219b78fdc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_named_member_accessor.py @@ -0,0 +1,374 @@ +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +from typing import Dict, Iterable, List, Tuple + +import torch + + +_MISSING: torch.Tensor = object() # type: ignore[assignment] + + +def set_tensor(module: "torch.nn.Module", name: str, tensor: torch.Tensor) -> None: + if not isinstance(module, torch.nn.Module): + raise TypeError(f"{module} is not an instance of torch.nn.Module") + if not isinstance(tensor, torch.Tensor) and tensor is not None: + raise TypeError(f"{tensor} is not an instance of torch.Tensor") + if "." in name: + raise KeyError('tensor name can\'t contain "."') + if name == "": + raise KeyError('tensor name can\'t be empty string ""') + if name in module._parameters: + module._parameters[name] = tensor # type: ignore[assignment] + elif name in module._buffers: + module._buffers[name] = tensor + else: + setattr(module, name, tensor) + + +def swap_tensor( + module: "torch.nn.Module", + name: str, + tensor: torch.Tensor, + allow_missing: bool = False, +) -> torch.Tensor: + if not isinstance(module, torch.nn.Module): + raise TypeError(f"{module} is not an instance of torch.nn.Module") + if ( + tensor is not _MISSING + and not isinstance(tensor, torch.Tensor) + and tensor is not None + ): + raise TypeError(f"{tensor} is not an instance of torch.Tensor") + if "." in name: + raise KeyError('tensor name can\'t contain "."') + if name == "": + raise KeyError('tensor name can\'t be empty string ""') + + orig_tensor: torch.Tensor + if name in module._parameters: + orig_tensor = module._parameters[name] # type: ignore[assignment] + if tensor is not _MISSING: + module._parameters[name] = tensor # type: ignore[assignment] + else: + del module._parameters[name] + elif name in module._buffers: + orig_tensor = module._buffers[name] # type: ignore[assignment] + if tensor is not _MISSING: + module._buffers[name] = tensor + else: + del module._buffers[name] + else: + try: + orig_tensor = getattr(module, name) + except AttributeError as ex: + if not allow_missing: + raise AttributeError( + f"{module._get_name()} has no attribute `{name}`" + ) from ex + orig_tensor = _MISSING + if ( + orig_tensor is not _MISSING + and not isinstance(orig_tensor, torch.Tensor) + and orig_tensor is not None + ): + raise TypeError( + f"attribute `{name}`: {orig_tensor} is not an instance of torch.Tensor" + ) + if tensor is not _MISSING: + setattr(module, name, tensor) + elif hasattr(module, name): + delattr(module, name) + return orig_tensor + + +def swap_submodule( + module: "torch.nn.Module", + name: str, + submodule: "torch.nn.Module", +) -> "torch.nn.Module": + if not isinstance(module, torch.nn.Module): + raise TypeError(f"{module} is not an instance of torch.nn.Module") + if not isinstance(submodule, torch.nn.Module): + raise TypeError(f"{submodule} is not an instance of torch.nn.Module") + if "." in name: + raise KeyError('submodule name can\'t contain "."') + if name == "": + raise KeyError('submodule name can\'t be empty string ""') + if name not in module._modules: + raise KeyError(f"submodule {name} does not exist") + + orig_submodule = module._modules[name] + if not isinstance(orig_submodule, torch.nn.Module): + raise TypeError(f"{name} attribute is not an instance of torch.nn.Module") + module._modules[name] = submodule + return orig_submodule + + +class NamedMemberAccessor: + """ + A class that provides a way to access the submodules and parameters/buffers of a module. + + It provides caching mechanism to speed up submodule lookups. + This is useful for functional programming to manipulate the module state. + """ + + def __init__(self, module: "torch.nn.Module") -> None: + self.module = module + self.memo: Dict[str, torch.nn.Module] = {} + + # Nested attribute access + + def get_submodule(self, name: str) -> "torch.nn.Module": + """ + Return the submodule specified by the given path. + + For example, to get the submodule mod.layer1.conv1, + use accessor.get_submodule("layer1.conv1") + + Compare to mod.get_submodule("layer1.conv1"), this method will cache the + intermediate submodule access to speed up future lookups. + """ + if not name: + return self.module + + try: + return self.memo[name] + except KeyError: + prefix, dot, attr = name.rpartition(".") + if dot: + module = self.get_submodule(prefix) + else: + module = self.module + try: + submodule = getattr(module, attr) + except AttributeError as ex: + raise AttributeError( + f"{module._get_name()} has no attribute `{attr}`" + ) from ex + if not isinstance(submodule, torch.nn.Module): + raise TypeError( # noqa: TRY200 + f"submodule `{name}`: {submodule} is not an instance of torch.nn.Module" + ) + self.memo[name] = submodule + return submodule + + def swap_submodule(self, path: str, value: "torch.nn.Module") -> "torch.nn.Module": + """ + Swap the submodule specified by the given ``path`` to ``value``. + + For example, to swap the attribute mod.layer1.conv1 use + ``accessor.swap_submodule("layer1.conv1", conv2)``. + """ + prefix, _, attr = path.rpartition(".") + return swap_submodule(self.get_submodule(prefix), attr, value) + + def get_tensor(self, name: str) -> torch.Tensor: + """ + Get the tensor specified by the given path to value. + + For example, to get the attribute mod.layer1.conv1.weight, + use accessor.get_tensor('layer1.conv1.weight') + + Compare to mod.get_parameter("layer1.conv1.weight"), this method will + cache the intermediate submodule access to speed up future lookups. + """ + prefix, _, attr = name.rpartition(".") + submodule = self.get_submodule(prefix) + try: + tensor = getattr(submodule, attr) + except AttributeError as ex: + raise AttributeError( + f"{submodule._get_name()} has no attribute `{name}`" + ) from ex + if not isinstance(tensor, torch.Tensor) and tensor is not None: + raise TypeError(f"{tensor} is not an instance of torch.Tensor") + return tensor # type: ignore[return-value] + + def set_tensor(self, name: str, value: torch.Tensor) -> None: + """ + Set the attribute specified by the given path to value. + + For example, to set the attribute mod.layer1.conv1.weight, + use accessor.set_tensor("layer1.conv1.weight", value) + """ + prefix, _, attr = name.rpartition(".") + set_tensor(self.get_submodule(prefix), attr, value) + + def del_tensor(self, name: str) -> None: + """ + Delete the attribute specified by the given path. + + For example, to delete the attribute mod.layer1.conv1.weight, + use accessor.del_tensor("layer1.conv1.weight") + """ + prefix, _, attr = name.rpartition(".") + submodule = self.get_submodule(prefix) + try: + delattr(submodule, attr) + except AttributeError as ex: + raise AttributeError( + f"{submodule._get_name()} has no attribute `{name}`" + ) from ex + + def swap_tensor( + self, name: str, value: torch.Tensor, allow_missing: bool = False + ) -> torch.Tensor: + """ + Swap the attribute specified by the given path to value. + + For example, to swap the attribute mod.layer1.conv1.weight, + use accessor.swap_tensor("layer1.conv1.weight", value) + """ + prefix, _, attr = name.rpartition(".") + return swap_tensor( + self.get_submodule(prefix), attr, value, allow_missing=allow_missing + ) + + # Batched operations + + def get_tensors(self, names: Iterable[str]) -> List[torch.Tensor]: + """ + Get the tensors specified by the given paths. + + For example, to get the attributes mod.layer1.conv1.weight and + mod.layer1.conv1.bias, use accessor.get_tensors(["layer1.conv1.weight", + "layer1.conv1.bias"]) + """ + return [self.get_tensor(name) for name in names] + + def set_tensors(self, names: Iterable[str], values: Iterable[torch.Tensor]) -> None: + """ + Set the attributes specified by the given paths to values. + + For example, to set the attributes mod.layer1.conv1.weight and + mod.layer1.conv1.bias, use accessor.set_tensors(["layer1.conv1.weight", + "layer1.conv1.bias"], [weight, bias]) + """ + if not isinstance(names, (list, tuple)): + names = list(names) + if not isinstance(values, (list, tuple)): + values = list(values) + assert len(names) == len(values), "names and values must have the same length" + + for name, value in zip(names, values): + self.set_tensor(name, value) + + def set_tensors_dict(self, named_tensors: Dict[str, torch.Tensor]) -> None: + """ + Set the attributes specified by the given paths to values. + + For example, to set the attributes mod.layer1.conv1.weight and + mod.layer1.conv1.bias, use accessor.set_tensors_dict({ + "layer1.conv1.weight": weight, + "layer1.conv1.bias": bias, + }) + """ + for name, value in named_tensors.items(): + self.set_tensor(name, value) + + def del_tensors(self, names: Iterable[str]) -> None: + """ + Delete the attributes specified by the given paths. + + For example, to delete the attributes mod.layer1.conv1.weight and + mod.layer1.conv1.bias, use accessor.del_tensors(["layer1.conv1.weight", + "layer1.conv1.bias"]) + """ + for name in names: + self.del_tensor(name) + + def swap_tensors( + self, + names: Iterable[str], + values: Iterable[torch.Tensor], + allow_missing: bool = False, + ) -> List[torch.Tensor]: + """ + Swap the attributes specified by the given paths to values. + + For example, to swap the attributes mod.layer1.conv1.weight and + mod.layer1.conv1.bias, use accessor.swap_tensors(["layer1.conv1.weight", + "layer1.conv1.bias"], [weight, bias]) + """ + if not isinstance(names, (list, tuple)): + names = list(names) + if not isinstance(values, (list, tuple)): + values = list(values) + assert len(names) == len(values), "names and values must have the same length" + + return [ + self.swap_tensor(name, value, allow_missing=allow_missing) + for name, value in zip(names, values) + ] + + def swap_tensors_dict( + self, named_tensors: Dict[str, torch.Tensor], allow_missing: bool = False + ) -> Tuple[Dict[str, torch.Tensor], List[str]]: + """ + Swap the attributes specified by the given paths to values. + + For example, to swap the attributes mod.layer1.conv1.weight and + mod.layer1.conv1.bias, use accessor.swap_tensors_dict({ + "layer1.conv1.weight": weight, + "layer1.conv1.bias": bias, + }) + """ + orig_named_tensors = {} + missing_keys = [] + try: + for name, tensor in named_tensors.items(): + orig_tensor = self.swap_tensor(name, tensor, allow_missing=True) + if orig_tensor is _MISSING: + missing_keys.append(name) + orig_named_tensors[name] = orig_tensor + except Exception: + # Swap back if any exception occurs + for name, orig_tensor in orig_named_tensors.items(): + self.swap_tensor(name, orig_tensor, allow_missing=True) + raise + if missing_keys and not allow_missing: + # Swap back if any key is missing when allow_missing is False + for name, orig_tensor in orig_named_tensors.items(): + self.swap_tensor(name, orig_tensor, allow_missing=True) + raise RuntimeError(f"Missing key(s): {', '.join(map(repr, missing_keys))}.") + return orig_named_tensors, missing_keys + + def check_keys(self, keys: Iterable[str]) -> Tuple[List[str], List[str]]: + """Check that the given keys are valid.""" + keys = set(keys) + valid_keys = {name for name, _ in self.named_tensors(remove_duplicate=False)} + missing_keys = valid_keys - keys + unexpected_keys = keys - valid_keys + return sorted(missing_keys), sorted(unexpected_keys) + + # Shortcut methods + + def named_parameters( + self, + remove_duplicate: bool = True, + ) -> Iterable[Tuple[str, torch.Tensor]]: + """Iterate over all the parameters in the module.""" + yield from self.module.named_parameters(remove_duplicate=remove_duplicate) + + def named_buffers( + self, + remove_duplicate: bool = True, + ) -> Iterable[Tuple[str, torch.Tensor]]: + """Iterate over all the buffers in the module.""" + yield from self.module.named_buffers(remove_duplicate=remove_duplicate) + + def named_tensors( + self, + remove_duplicate: bool = True, + ) -> Iterable[Tuple[str, torch.Tensor]]: + """Iterate over all the tensors in the module.""" + yield from self.module.named_parameters(remove_duplicate=remove_duplicate) + yield from self.module.named_buffers(remove_duplicate=remove_duplicate) + + def named_modules( + self, + remove_duplicate: bool = True, + ) -> Iterable[Tuple[str, "torch.nn.Module"]]: + """Iterate over all the modules in the module.""" + yield from self.module.named_modules(remove_duplicate=remove_duplicate) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_per_sample_grad.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_per_sample_grad.py new file mode 100644 index 0000000000000000000000000000000000000000..0644ab5d2535e07360c77cebe838ab680c842362 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/_per_sample_grad.py @@ -0,0 +1,102 @@ +import functools + +import torch +from torch.nn.utils._expanded_weights.expanded_weights_impl import ExpandedWeight + +from torch.utils import _pytree as pytree + + +# dependency on `functional_call` means that this can't be exposed in utils +# without creating circular dependency +def call_for_per_sample_grads(module, *, batch_size=None, loss_reduction="sum", batch_first=True): + r""" + Return a forward function for a module, populating grad_sample with per sample gradients on backward invocation. + + Args: + module: The ``nn.Module`` to get per sample gradients with respect to. All trainable + parameters will compute per sample gradients, located in a ``grad_sample`` + field when ``backward`` is invoked + batch_size: The batch size of the input. If None is passed, all tensor arguments in args and kwargs must have + the same batch size, which is the size of the first dimension. Otherwise, it must be passed manually. + Default: None + loss_reduction: Indicates if the loss reduction (for aggregating the gradients) is a sum or a mean operation. If + "mean", per sample gradients will be scaled by the batch size to offset the crossbatch interaction from + running mean across a batch. Must be "mean" or "sum". Default: "sum" + batch_first: Indicates if the batch dimension is the first dimension. If True, the batch dimension is the first + dimension. If False, it's the second dimension. Default: True. + + Examples:: + >>> # xdoctest: +SKIP + >>> model = nn.Linear(4, 3) + >>> batched_input = torch.randn(5, 4) # batch size of 5 + >>> res = call_for_per_sample_grads(model)(batched_input).sum() + >>> res.backward() + >>> assert model.weight.shape == (3, 4) + >>> assert model.weight.grad_sample.shape == (5, 3, 4) + >>> assert model.weight.grad is None + >>> assert model.bias.shape == (3,) + >>> assert model.bias.grad_sample.shape == (5, 3) + >>> assert model.bias.grad is None + + An example using "mean" loss reduction. The grad_sample fields will be scaled by batch_size from what they would be + if we ran the same code with loss_reduction="sum". This is because the mean at the end will scale all + grad_outputs by 1 / batch_size from cross batch interaction. + >>> model = nn.Linear(4, 3) + >>> batched_input = torch.randn(5, 4) # batch size of 5 + >>> res = call_for_per_sample_grads(model, 5, loss_reduction="mean")(batched_input).mean() + >>> res.backward() + + Note:: + Does not work with any `nn.RNN`, including `nn.GRU` or `nn.LSTM`. Please use custom + rewrites that wrap an `nn.Linear` module. See Opacus for an example + """ + + def maybe_build_expanded_weight(og_tensor, batch_size): + if og_tensor.requires_grad: + return ExpandedWeight(og_tensor, batch_size, loss_reduction) + else: + return og_tensor + + def compute_batch_size(*args, **kwargs): + args_and_kwargs = pytree.arg_tree_leaves(*args, **kwargs) + batch_size = None + for arg in args_and_kwargs: + if not isinstance(arg, torch.Tensor): + continue + + arg_batch_size = arg.shape[0] if batch_first else arg.shape[1] + if batch_size is not None and batch_size != arg_batch_size: + raise RuntimeError("When computing batch size, found at least one input with batch size " + f"{batch_size} and one with batch size {arg_batch_size}. Please specify it " + "explicitly using the batch size kwarg in call_for_per_sample_grads") + batch_size = arg_batch_size + if batch_size is None: + raise RuntimeError("Unable to find a tensor in the passed args and kwargs. They may not be pytree-able " + "and so ExpandedWeights cannot compute the batch size from the inputs. Please specify " + "it explicitly") + return batch_size + + if loss_reduction not in ["sum", "mean"]: + raise RuntimeError(f"Expected loss_reduction argument to be sum or mean, got {loss_reduction}") + + if not isinstance(module, torch.nn.Module): + raise RuntimeError(f"Module passed must be nn.Module, got {type(module).__name__}") + if not (batch_size is None or isinstance(batch_size, int)): + raise RuntimeError(f"Batch size passed must be None or an integer, got {type(batch_size).__name__}") + if batch_size is not None and batch_size < 1: + raise RuntimeError(f"Batch size must be positive, got {batch_size}") + for weight in module.parameters(): + if hasattr(weight, "grad_sample") and weight.grad_sample is not None: # type: ignore[attr-defined] + raise RuntimeError("Current Expanded Weights accumulates the gradients, which will be incorrect for multiple " + f"calls without clearing gradients. Please clear out the grad_sample parameter of {weight} or " + "post an issue to pytorch/pytorch to prioritize correct behavior") + + @functools.wraps(module.forward) + def wrapper(*args, **kwargs): + wrapper_batch_size = batch_size + if wrapper_batch_size is None: + wrapper_batch_size = compute_batch_size(*args, **kwargs) + + params = {name: maybe_build_expanded_weight(value, wrapper_batch_size) for (name, value) in module.named_parameters()} + return torch.func.functional_call(module, params, args, kwargs) + return wrapper diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/clip_grad.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/clip_grad.py new file mode 100644 index 0000000000000000000000000000000000000000..6549a6f3e2c8db1c9f46ba5f6a28d641e8871f6f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/clip_grad.py @@ -0,0 +1,151 @@ +import warnings +import functools +from typing import Union, Iterable, List, Dict, Tuple, Optional, cast + +import torch +from torch import Tensor +from torch.utils._foreach_utils import _group_tensors_by_device_and_dtype, _has_foreach_support, _device_has_foreach_support + +_tensor_or_tensors = Union[torch.Tensor, Iterable[torch.Tensor]] + +__all__ = ['clip_grad_norm_', 'clip_grad_norm', 'clip_grad_value_'] + +def _no_grad(func): + """ + This wrapper is needed to avoid a circular import when using @torch.no_grad on the exposed functions + clip_grad_norm_ and clip_grad_value_ themselves. + """ + def _no_grad_wrapper(*args, **kwargs): + with torch.no_grad(): + return func(*args, **kwargs) + functools.update_wrapper(_no_grad_wrapper, func) + return _no_grad_wrapper + +@_no_grad +def clip_grad_norm_( + parameters: _tensor_or_tensors, max_norm: float, norm_type: float = 2.0, + error_if_nonfinite: bool = False, foreach: Optional[bool] = None) -> torch.Tensor: + r"""Clip the gradient norm of an iterable of parameters. + + The norm is computed over all gradients together, as if they were + concatenated into a single vector. Gradients are modified in-place. + + Args: + parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a + single Tensor that will have gradients normalized + max_norm (float): max norm of the gradients + norm_type (float): type of the used p-norm. Can be ``'inf'`` for + infinity norm. + error_if_nonfinite (bool): if True, an error is thrown if the total + norm of the gradients from :attr:`parameters` is ``nan``, + ``inf``, or ``-inf``. Default: False (will switch to True in the future) + foreach (bool): use the faster foreach-based implementation. + If ``None``, use the foreach implementation for CUDA and CPU native tensors and silently + fall back to the slow implementation for other device types. + Default: ``None`` + + Returns: + Total norm of the parameter gradients (viewed as a single vector). + """ + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + grads = [p.grad for p in parameters if p.grad is not None] + max_norm = float(max_norm) + norm_type = float(norm_type) + if len(grads) == 0: + return torch.tensor(0.) + first_device = grads[0].device + grouped_grads: Dict[Tuple[torch.device, torch.dtype], Tuple[List[List[Tensor]], List[int]]] \ + = _group_tensors_by_device_and_dtype([grads]) # type: ignore[assignment] + + norms: List[Tensor] = [] + for ((device, _), ([device_grads], _)) in grouped_grads.items(): # type: ignore[assignment] + if ( + (foreach is None and _has_foreach_support(device_grads, device)) + or (foreach and _device_has_foreach_support(device)) + ): + norms.extend(torch._foreach_norm(device_grads, norm_type)) + elif foreach: + raise RuntimeError(f'foreach=True was passed, but can\'t use the foreach API on {device.type} tensors') + else: + norms.extend([torch.linalg.vector_norm(g, norm_type) for g in device_grads]) + + total_norm = torch.linalg.vector_norm(torch.stack([norm.to(first_device) for norm in norms]), norm_type) + + if error_if_nonfinite and torch.logical_or(total_norm.isnan(), total_norm.isinf()): + raise RuntimeError( + f'The total norm of order {norm_type} for gradients from ' + '`parameters` is non-finite, so it cannot be clipped. To disable ' + 'this error and scale the gradients by the non-finite norm anyway, ' + 'set `error_if_nonfinite=False`') + clip_coef = max_norm / (total_norm + 1e-6) + # Note: multiplying by the clamped coef is redundant when the coef is clamped to 1, but doing so + # avoids a `if clip_coef < 1:` conditional which can require a CPU <=> device synchronization + # when the gradients do not reside in CPU memory. + clip_coef_clamped = torch.clamp(clip_coef, max=1.0) + for ((device, _), ([device_grads], _)) in grouped_grads.items(): # type: ignore[assignment] + if ( + (foreach is None and _has_foreach_support(device_grads, device)) + or (foreach and _device_has_foreach_support(device)) + ): + torch._foreach_mul_(device_grads, clip_coef_clamped.to(device)) + elif foreach: + raise RuntimeError(f'foreach=True was passed, but can\'t use the foreach API on {device.type} tensors') + else: + clip_coef_clamped_device = clip_coef_clamped.to(device) + for g in device_grads: + g.mul_(clip_coef_clamped_device) + + return total_norm + + +def clip_grad_norm( + parameters: _tensor_or_tensors, max_norm: float, norm_type: float = 2., + error_if_nonfinite: bool = False, foreach: Optional[bool] = None) -> torch.Tensor: + r"""Clip the gradient norm of an iterable of parameters. + + .. warning:: + This method is now deprecated in favor of + :func:`torch.nn.utils.clip_grad_norm_`. + """ + warnings.warn("torch.nn.utils.clip_grad_norm is now deprecated in favor " + "of torch.nn.utils.clip_grad_norm_.", stacklevel=2) + return clip_grad_norm_(parameters, max_norm, norm_type, error_if_nonfinite, foreach) + + +@_no_grad +def clip_grad_value_(parameters: _tensor_or_tensors, clip_value: float, foreach: Optional[bool] = None) -> None: + r"""Clip the gradients of an iterable of parameters at specified value. + + Gradients are modified in-place. + + Args: + parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a + single Tensor that will have gradients normalized + clip_value (float): maximum allowed value of the gradients. + The gradients are clipped in the range + :math:`\left[\text{-clip\_value}, \text{clip\_value}\right]` + foreach (bool): use the faster foreach-based implementation + If ``None``, use the foreach implementation for CUDA and CPU native tensors and + silently fall back to the slow implementation for other device types. + Default: ``None`` + """ + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + clip_value = float(clip_value) + + grads = [p.grad for p in parameters if p.grad is not None] + grouped_grads = _group_tensors_by_device_and_dtype([grads]) + + for ((device, _), ([grads], _)) in grouped_grads.items(): # type: ignore[assignment] + if ( + (foreach is None and _has_foreach_support(cast(List[Tensor], grads), device=device)) + or (foreach and _device_has_foreach_support(device)) + ): + torch._foreach_clamp_min_(cast(List[Tensor], grads), -clip_value) + torch._foreach_clamp_max_(cast(List[Tensor], grads), clip_value) + elif foreach: + raise RuntimeError(f'foreach=True was passed, but can\'t use the foreach API on {device.type} tensors') + else: + for grad in grads: + cast(Tensor, grad).clamp_(min=-clip_value, max=clip_value) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/convert_parameters.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/convert_parameters.py new file mode 100644 index 0000000000000000000000000000000000000000..e23352b6b6d9bb2f32df6fb26401e4b8d9281636 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/convert_parameters.py @@ -0,0 +1,83 @@ +import torch +from typing import Iterable, Optional + + +def parameters_to_vector(parameters: Iterable[torch.Tensor]) -> torch.Tensor: + r"""Flatten an iterable of parameters into a single vector. + + Args: + parameters (Iterable[Tensor]): an iterable of Tensors that are the + parameters of a model. + + Returns: + The parameters represented by a single vector + """ + # Flag for the device where the parameter is located + param_device = None + + vec = [] + for param in parameters: + # Ensure the parameters are located in the same device + param_device = _check_param_device(param, param_device) + + vec.append(param.view(-1)) + return torch.cat(vec) + + +def vector_to_parameters(vec: torch.Tensor, parameters: Iterable[torch.Tensor]) -> None: + r"""Copy slices of a vector into an iterable of parameters. + + Args: + vec (Tensor): a single vector representing the parameters of a model. + parameters (Iterable[Tensor]): an iterable of Tensors that are the + parameters of a model. + """ + # Ensure vec of type Tensor + if not isinstance(vec, torch.Tensor): + raise TypeError(f'expected torch.Tensor, but got: {torch.typename(vec)}') + # Flag for the device where the parameter is located + param_device = None + + # Pointer for slicing the vector for each parameter + pointer = 0 + for param in parameters: + # Ensure the parameters are located in the same device + param_device = _check_param_device(param, param_device) + + # The length of the parameter + num_param = param.numel() + # Slice the vector, reshape it, and replace the old data of the parameter + param.data = vec[pointer:pointer + num_param].view_as(param).data + + # Increment the pointer + pointer += num_param + + +def _check_param_device(param: torch.Tensor, old_param_device: Optional[int]) -> int: + r"""Check if the parameters are located on the same device. + + Currently, the conversion between model parameters and single vector form is not supported + for multiple allocations, e.g. parameters in different GPUs/PrivateUse1s, or mixture of CPU/GPU/PrivateUse1. + + Args: + param ([Tensor]): a Tensor of a parameter of a model + old_param_device (int): the device where the first parameter of a + model is allocated. + + Returns: + old_param_device (int): report device for the first time + """ + # Meet the first parameter + support_device_types = ["cuda", torch._C._get_privateuse1_backend_name()] + if old_param_device is None: + old_param_device = param.get_device() if param.device.type in support_device_types else -1 + else: + warn = False + if param.device.type in support_device_types: # Check if in same GPU/PrivateUse1 + warn = (param.get_device() != old_param_device) + else: # Check if in CPU + warn = (old_param_device != -1) + if warn: + raise TypeError('Found two parameters on different devices, ' + 'this is currently not supported.') + return old_param_device diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/fusion.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/fusion.py new file mode 100644 index 0000000000000000000000000000000000000000..9433d9c376df81787e91a4ca4dd3698107f32bc5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/fusion.py @@ -0,0 +1,152 @@ +from __future__ import annotations + +import copy +from typing import Optional, Tuple, TypeVar + +import torch + +__all__ = ['fuse_conv_bn_eval', 'fuse_conv_bn_weights', 'fuse_linear_bn_eval', 'fuse_linear_bn_weights'] + +ConvT = TypeVar("ConvT", bound="torch.nn.modules.conv._ConvNd") +LinearT = TypeVar("LinearT", bound="torch.nn.Linear") + +def fuse_conv_bn_eval(conv: ConvT, bn: torch.nn.modules.batchnorm._BatchNorm, transpose: bool = False) -> ConvT: + r"""Fuse a convolutional module and a BatchNorm module into a single, new convolutional module. + + Args: + conv (torch.nn.modules.conv._ConvNd): A convolutional module. + bn (torch.nn.modules.batchnorm._BatchNorm): A BatchNorm module. + transpose (bool, optional): If True, transpose the convolutional weight. Defaults to False. + + Returns: + torch.nn.modules.conv._ConvNd: The fused convolutional module. + + .. note:: + Both ``conv`` and ``bn`` must be in eval mode, and ``bn`` must have its running buffers computed. + """ + assert not (conv.training or bn.training), "Fusion only for eval!" + fused_conv = copy.deepcopy(conv) + + assert bn.running_mean is not None and bn.running_var is not None + fused_conv.weight, fused_conv.bias = fuse_conv_bn_weights( + fused_conv.weight, fused_conv.bias, + bn.running_mean, bn.running_var, bn.eps, bn.weight, bn.bias, transpose) + + return fused_conv + +def fuse_conv_bn_weights( + conv_w: torch.Tensor, + conv_b: Optional[torch.Tensor], + bn_rm: torch.Tensor, + bn_rv: torch.Tensor, + bn_eps: float, + bn_w: Optional[torch.Tensor], + bn_b: Optional[torch.Tensor], + transpose: bool = False +) -> Tuple[torch.nn.Parameter, torch.nn.Parameter]: + r"""Fuse convolutional module parameters and BatchNorm module parameters into new convolutional module parameters. + + Args: + conv_w (torch.Tensor): Convolutional weight. + conv_b (Optional[torch.Tensor]): Convolutional bias. + bn_rm (torch.Tensor): BatchNorm running mean. + bn_rv (torch.Tensor): BatchNorm running variance. + bn_eps (float): BatchNorm epsilon. + bn_w (Optional[torch.Tensor]): BatchNorm weight. + bn_b (Optional[torch.Tensor]): BatchNorm bias. + transpose (bool, optional): If True, transpose the conv weight. Defaults to False. + + Returns: + Tuple[torch.nn.Parameter, torch.nn.Parameter]: Fused convolutional weight and bias. + """ + conv_weight_dtype = conv_w.dtype + conv_bias_dtype = conv_b.dtype if conv_b is not None else conv_weight_dtype + if conv_b is None: + conv_b = torch.zeros_like(bn_rm) + if bn_w is None: + bn_w = torch.ones_like(bn_rm) + if bn_b is None: + bn_b = torch.zeros_like(bn_rm) + bn_var_rsqrt = torch.rsqrt(bn_rv + bn_eps) + + if transpose: + shape = [1, -1] + [1] * (len(conv_w.shape) - 2) + else: + shape = [-1, 1] + [1] * (len(conv_w.shape) - 2) + + fused_conv_w = (conv_w * (bn_w * bn_var_rsqrt).reshape(shape)).to(dtype=conv_weight_dtype) + fused_conv_b = ((conv_b - bn_rm) * bn_var_rsqrt * bn_w + bn_b).to(dtype=conv_bias_dtype) + + return ( + torch.nn.Parameter(fused_conv_w, conv_w.requires_grad), torch.nn.Parameter(fused_conv_b, conv_b.requires_grad) + ) + +def fuse_linear_bn_eval(linear: LinearT, bn: torch.nn.modules.batchnorm._BatchNorm) -> LinearT: + r"""Fuse a linear module and a BatchNorm module into a single, new linear module. + + Args: + linear (torch.nn.Linear): A Linear module. + bn (torch.nn.modules.batchnorm._BatchNorm): A BatchNorm module. + + Returns: + torch.nn.Linear: The fused linear module. + + .. note:: + Both ``linear`` and ``bn`` must be in eval mode, and ``bn`` must have its running buffers computed. + """ + assert not (linear.training or bn.training), "Fusion only for eval!" + fused_linear = copy.deepcopy(linear) + + """ + Linear-BN needs to be fused while preserving the shapes of linear weight/bias. + To preserve the shapes of linear weight/bias, the channel dim of bn needs to be broadcastable with the last dim of linear, + because bn operates over the channel dim, (N, C_in, H, W) while linear operates over the last dim, (*, H_in). + To be broadcastable, the number of features in bn and + the number of output features from linear must satisfy the following condition: + 1. they are equal, or + 2. the number of features in bn is 1 + Otherwise, skip the folding path + """ + assert ( + linear.out_features == bn.num_features or bn.num_features == 1 + ), "To fuse, linear.out_features == bn.num_features or bn.num_features == 1" + + assert bn.running_mean is not None and bn.running_var is not None + fused_linear.weight, fused_linear.bias = fuse_linear_bn_weights( + fused_linear.weight, fused_linear.bias, + bn.running_mean, bn.running_var, bn.eps, bn.weight, bn.bias) + + return fused_linear + +def fuse_linear_bn_weights( + linear_w: torch.Tensor, + linear_b: Optional[torch.Tensor], + bn_rm: torch.Tensor, + bn_rv: torch.Tensor, + bn_eps: float, + bn_w: torch.Tensor, + bn_b: torch.Tensor, +) -> Tuple[torch.nn.Parameter, torch.nn.Parameter]: + r"""Fuse linear module parameters and BatchNorm module parameters into new linear module parameters. + + Args: + linear_w (torch.Tensor): Linear weight. + linear_b (Optional[torch.Tensor]): Linear bias. + bn_rm (torch.Tensor): BatchNorm running mean. + bn_rv (torch.Tensor): BatchNorm running variance. + bn_eps (float): BatchNorm epsilon. + bn_w (torch.Tensor): BatchNorm weight. + bn_b (torch.Tensor): BatchNorm bias. + transpose (bool, optional): If True, transpose the conv weight. Defaults to False. + + Returns: + Tuple[torch.nn.Parameter, torch.nn.Parameter]: Fused linear weight and bias. + """ + if linear_b is None: + linear_b = torch.zeros_like(bn_rm) + bn_scale = bn_w * torch.rsqrt(bn_rv + bn_eps) + + fused_w = linear_w * bn_scale.unsqueeze(-1) + fused_b = (linear_b - bn_rm) * bn_scale + bn_b + + return torch.nn.Parameter(fused_w, linear_w.requires_grad), torch.nn.Parameter(fused_b, linear_b.requires_grad) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/init.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/init.py new file mode 100644 index 0000000000000000000000000000000000000000..416ad0db8ef7ef64301614184f611a52c1a01e31 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/init.py @@ -0,0 +1,53 @@ +import inspect +import torch + + +def skip_init(module_cls, *args, **kwargs): + r""" + Given a module class object and args / kwargs, instantiate the module without initializing parameters / buffers. + + This can be useful if initialization is slow or if custom initialization will + be performed, making the default initialization unnecessary. There are some caveats to this, due to + the way this function is implemented: + + 1. The module must accept a `device` arg in its constructor that is passed to any parameters + or buffers created during construction. + + 2. The module must not perform any computation on parameters in its constructor except + initialization (i.e. functions from :mod:`torch.nn.init`). + + If these conditions are satisfied, the module can be instantiated with parameter / buffer values + uninitialized, as if having been created using :func:`torch.empty`. + + Args: + module_cls: Class object; should be a subclass of :class:`torch.nn.Module` + args: args to pass to the module's constructor + kwargs: kwargs to pass to the module's constructor + + Returns: + Instantiated module with uninitialized parameters / buffers + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> import torch + >>> m = torch.nn.utils.skip_init(torch.nn.Linear, 5, 1) + >>> m.weight + Parameter containing: + tensor([[0.0000e+00, 1.5846e+29, 7.8307e+00, 2.5250e-29, 1.1210e-44]], + requires_grad=True) + >>> m2 = torch.nn.utils.skip_init(torch.nn.Linear, in_features=6, out_features=1) + >>> m2.weight + Parameter containing: + tensor([[-1.4677e+24, 4.5915e-41, 1.4013e-45, 0.0000e+00, -1.4677e+24, + 4.5915e-41]], requires_grad=True) + + """ + if not issubclass(module_cls, torch.nn.Module): + raise RuntimeError(f'Expected a Module; got {module_cls}') + if 'device' not in inspect.signature(module_cls).parameters: + raise RuntimeError('Module must support a \'device\' arg to skip initialization') + + final_device = kwargs.pop('device', 'cpu') + kwargs['device'] = 'meta' + return module_cls(*args, **kwargs).to_empty(device=final_device) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/memory_format.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/memory_format.py new file mode 100644 index 0000000000000000000000000000000000000000..c8fc22bea51cfc47006d1918d977afd2c4f3310b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/memory_format.py @@ -0,0 +1,143 @@ +import torch + + +def convert_conv2d_weight_memory_format(module, memory_format): + r"""Convert ``memory_format`` of ``nn.Conv2d.weight`` to ``memory_format``. + + The conversion recursively applies to nested ``nn.Module``, including ``module``. + Note that it only changes the memory_format, but not the semantics of each dimensions. + This function is used to facilitate the computation to adopt NHWC kernels, which + provides considerable speed up for fp16 data on CUDA devices with compute capability >= 7.0 + + .. note:: + Calling ``model.to(memory_format=torch.channels_last)`` is more aggressive + than the utility function ``convert_conv2d_weight_memory_format``. Any + layer with 4d weight will be affected by ``model.to``, which does not + necessarily benefit from conversion to specified ``memory_format``. + One place we are confident in is that NHWC(channels_last) conversion for + convolution in cuDNN, As it is beneficial to run convolution in NHWC, + even in cases where we have to apply permutation to input tensors. + + Hence our strategy here is to convert only the weight of convolution to + channels_last. This ensures that; + 1. Fast convolution kernels will be used, the benefit of which could + outweigh overhead of permutation (if input is not in the same format) + 2. No unnecessary permutations are applied on layers that do not benefit + from memory_format conversion. + + The optimal case is that, layers between convolution layers are channels + last compatible. Input tensor would be permuted to channels last when it + encounters the first convolution layer and stay in that memory format. + Hence following convolutions will not need to permute its input tensor. + + In case where a channels last incompatible layer is between convolution + layers, we need to permute the input tensor back to contiguous format + for that layer. The input tensor will go through the remaining layers in + contiguous format and be permuted to channels last when it encounters + another convolution layer. There's no point in propagating that + permutation to an earlier layer, as most layers are quite agnostic to + ``memory_format``. + + This claim might change when PyTorch supports fusion of permutation, as + there might have been a better spot to fuse the permutation other than + immediately before a convolution. + + Args: + module (nn.Module): ``nn.Conv2d`` & ``nn.ConvTranspose2d`` or container + ``nn.Module`` + memory_format: user specified ``memory_format``, + e.g. ``torch.channels_last`` or ``torch.contiguous_format`` + + Returns: + The original module with updated ``nn.Conv2d`` + + Example: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> # xdoctest: +REQUIRES(env:CUBLAS_WORKSPACE_CONFIG) + >>> input = torch.randint(1, 10, (2, 8, 4, 4), dtype=torch.float16, device="cuda") + >>> model = nn.Sequential( + >>> nn.Conv2d(8, 4, 3)).cuda().half() + >>> # This is identical to: + >>> # nn.utils.convert_conv2d_weight_memory_format(model, torch.channels_last) + >>> model = nn.utils.convert_conv2d_weight_memory_format(model, torch.channels_last) + >>> out = model(input) + """ + # TODO: expand this to `_ConvNd` when channels_last support is extended + # beyond only 4d tensors. + if isinstance(module, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)): + weight_data = module.weight.detach().clone().contiguous(memory_format=memory_format) + module.weight.data = weight_data.resize_(weight_data.size(), memory_format=memory_format) + for child in module.children(): + convert_conv2d_weight_memory_format(child, memory_format) + return module + + +def convert_conv3d_weight_memory_format(module, memory_format): + r"""Convert ``memory_format`` of ``nn.Conv3d.weight`` to ``memory_format`` + The conversion recursively applies to nested ``nn.Module``, including ``module``. + Note that it only changes the memory_format, but not the semantics of each dimensions. + This function is used to facilitate the computation to adopt NHWC kernels, which + provides considerable speed up for fp16 data on CUDA devices with compute capability >= 7.0 + + .. note:: + Calling ``model.to(memory_format=torch.channels_last)`` is more aggressive + than the utility function ``convert_conv3d_weight_memory_format``. Any + layer with 4d weight will be affected by ``model.to``, which does not + necessarily benefit from conversion to specified ``memory_format``. + One place we are confident in is that NHWC(channels_last) conversion for + convolution in cuDNN, As it is beneficial to run convolution in NHWC, + even in cases where we have to apply permutation to input tensors. + + Hence our strategy here is to convert only the weight of convolution to + channels_last. This ensures that; + 1. Fast convolution kernels will be used, the benefit of which could + outweigh overhead of permutation (if input is not in the same format) + 2. No unnecessary permutations are applied on layers that do not benefit + from memory_format conversion. + + The optimal case is that, layers between convolution layers are channels + last compatible. Input tensor would be permuted to channels last when it + encounters the first convolution layer and stay in that memory format. + Hence following convolutions will not need to permute its input tensor. + + In case where a channels last incompatible layer is between convolution + layers, we need to permute the input tensor back to contiguous format + for that layer. The input tensor will go through the remaining layers in + contiguous format and be permuted to channels last when it encounters + another convolution layer. There's no point in propagating that + permutation to an earlier layer, as most layers are quite agnostic to + ``memory_format``. + + This claim might change when PyTorch supports fusion of permutation, as + there might have been a better spot to fuse the permutation other than + immediately before a convolution. + + Args: + module (nn.Module): ``nn.Conv3d`` & ``nn.ConvTranspose3d`` or container + ``nn.Module`` + memory_format: user specified ``memory_format``, + e.g. ``torch.channels_last`` or ``torch.contiguous_format`` + + Returns: + The original module with updated ``nn.Conv3d`` + + Example: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> # xdoctest: +REQUIRES(env:CUBLAS_WORKSPACE_CONFIG) + >>> input = torch.randint(1, 10, (2, 8, 4, 4, 4), dtype=torch.float16, device="cuda") + >>> model = nn.Sequential( + >>> nn.Conv3d(8, 4, 3)).cuda().half() + >>> # This is identical to: + >>> # nn.utils.convert_conv3d_weight_memory_format(model, torch.channels_last) + >>> model = nn.utils.convert_conv3d_weight_memory_format(model, torch.channels_last) + >>> out = model(input) + """ + + # TODO: expand this to `_ConvNd` when channels_last support is extended + # beyond only 4d tensors. + if isinstance(module, (torch.nn.Conv3d, torch.nn.ConvTranspose3d)): + weight_data = module.weight.detach().clone().contiguous(memory_format=memory_format) + module.weight.data = weight_data.resize_(weight_data.size(), memory_format=memory_format) + for child in module.children(): + convert_conv3d_weight_memory_format(child, memory_format) + return module diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/parametrizations.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/parametrizations.py new file mode 100644 index 0000000000000000000000000000000000000000..f9b25bcac0cb7bbc67b8f99bfc24960b2e54b8f7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/parametrizations.py @@ -0,0 +1,571 @@ +from enum import Enum, auto + +import torch +from torch import Tensor +from ..utils import parametrize +from ..modules import Module +from .. import functional as F + +from typing import Optional + +__all__ = ['orthogonal', 'spectral_norm', 'weight_norm'] + + +def _is_orthogonal(Q, eps=None): + n, k = Q.size(-2), Q.size(-1) + Id = torch.eye(k, dtype=Q.dtype, device=Q.device) + # A reasonable eps, but not too large + eps = 10. * n * torch.finfo(Q.dtype).eps + return torch.allclose(Q.mH @ Q, Id, atol=eps) + + +def _make_orthogonal(A): + """Assume that A is a tall matrix. + + Compute the Q factor s.t. A = QR (A may be complex) and diag(R) is real and non-negative. + """ + X, tau = torch.geqrf(A) + Q = torch.linalg.householder_product(X, tau) + # The diagonal of X is the diagonal of R (which is always real) so we normalise by its signs + Q *= X.diagonal(dim1=-2, dim2=-1).sgn().unsqueeze(-2) + return Q + + +class _OrthMaps(Enum): + matrix_exp = auto() + cayley = auto() + householder = auto() + + +class _Orthogonal(Module): + base: Tensor + + def __init__(self, + weight, + orthogonal_map: _OrthMaps, + *, + use_trivialization=True) -> None: + super().__init__() + + # Note [Householder complex] + # For complex tensors, it is not possible to compute the tensor `tau` necessary for + # linalg.householder_product from the reflectors. + # To see this, note that the reflectors have a shape like: + # 0 0 0 + # * 0 0 + # * * 0 + # which, for complex matrices, give n(n-1) (real) parameters. Now, you need n^2 parameters + # to parametrize the unitary matrices. Saving tau on its own does not work either, because + # not every combination of `(A, tau)` gives a unitary matrix, meaning that if we optimise + # them as independent tensors we would not maintain the constraint + # An equivalent reasoning holds for rectangular matrices + if weight.is_complex() and orthogonal_map == _OrthMaps.householder: + raise ValueError("The householder parametrization does not support complex tensors.") + + self.shape = weight.shape + self.orthogonal_map = orthogonal_map + if use_trivialization: + self.register_buffer("base", None) + + def forward(self, X: torch.Tensor) -> torch.Tensor: + n, k = X.size(-2), X.size(-1) + transposed = n < k + if transposed: + X = X.mT + n, k = k, n + # Here n > k and X is a tall matrix + if self.orthogonal_map == _OrthMaps.matrix_exp or self.orthogonal_map == _OrthMaps.cayley: + # We just need n x k - k(k-1)/2 parameters + X = X.tril() + if n != k: + # Embed into a square matrix + X = torch.cat([X, X.new_zeros(n, n - k).expand(*X.shape[:-2], -1, -1)], dim=-1) + A = X - X.mH + # A is skew-symmetric (or skew-hermitian) + if self.orthogonal_map == _OrthMaps.matrix_exp: + Q = torch.matrix_exp(A) + elif self.orthogonal_map == _OrthMaps.cayley: + # Computes the Cayley retraction (I+A/2)(I-A/2)^{-1} + Id = torch.eye(n, dtype=A.dtype, device=A.device) + Q = torch.linalg.solve(torch.add(Id, A, alpha=-0.5), torch.add(Id, A, alpha=0.5)) + # Q is now orthogonal (or unitary) of size (..., n, n) + if n != k: + Q = Q[..., :k] + # Q is now the size of the X (albeit perhaps transposed) + else: + # X is real here, as we do not support householder with complex numbers + A = X.tril(diagonal=-1) + tau = 2. / (1. + (A * A).sum(dim=-2)) + Q = torch.linalg.householder_product(A, tau) + # The diagonal of X is 1's and -1's + # We do not want to differentiate through this or update the diagonal of X hence the casting + Q = Q * X.diagonal(dim1=-2, dim2=-1).int().unsqueeze(-2) + + if hasattr(self, "base"): + Q = self.base @ Q + if transposed: + Q = Q.mT + return Q # type: ignore[possibly-undefined] + + @torch.autograd.no_grad() + def right_inverse(self, Q: torch.Tensor) -> torch.Tensor: + if Q.shape != self.shape: + raise ValueError(f"Expected a matrix or batch of matrices of shape {self.shape}. " + f"Got a tensor of shape {Q.shape}.") + + Q_init = Q + n, k = Q.size(-2), Q.size(-1) + transpose = n < k + if transpose: + Q = Q.mT + n, k = k, n + + # We always make sure to always copy Q in every path + if not hasattr(self, "base"): + # Note [right_inverse expm cayley] + # If we do not have use_trivialization=True, we just implement the inverse of the forward + # map for the Householder. To see why, think that for the Cayley map, + # we would need to find the matrix X \in R^{n x k} such that: + # Y = torch.cat([X.tril(), X.new_zeros(n, n - k).expand(*X.shape[:-2], -1, -1)], dim=-1) + # A = Y - Y.mH + # cayley(A)[:, :k] + # gives the original tensor. It is not clear how to do this. + # Perhaps via some algebraic manipulation involving the QR like that of + # Corollary 2.2 in Edelman, Arias and Smith? + if self.orthogonal_map == _OrthMaps.cayley or self.orthogonal_map == _OrthMaps.matrix_exp: + raise NotImplementedError("It is not possible to assign to the matrix exponential " + "or the Cayley parametrizations when use_trivialization=False.") + + # If parametrization == _OrthMaps.householder, make Q orthogonal via the QR decomposition. + # Here Q is always real because we do not support householder and complex matrices. + # See note [Householder complex] + A, tau = torch.geqrf(Q) + # We want to have a decomposition X = QR with diag(R) > 0, as otherwise we could + # decompose an orthogonal matrix Q as Q = (-Q)@(-Id), which is a valid QR decomposition + # The diagonal of Q is the diagonal of R from the qr decomposition + A.diagonal(dim1=-2, dim2=-1).sign_() + # Equality with zero is ok because LAPACK returns exactly zero when it does not want + # to use a particular reflection + A.diagonal(dim1=-2, dim2=-1)[tau == 0.] *= -1 + return A.mT if transpose else A + else: + if n == k: + # We check whether Q is orthogonal + if not _is_orthogonal(Q): + Q = _make_orthogonal(Q) + else: # Is orthogonal + Q = Q.clone() + else: + # Complete Q into a full n x n orthogonal matrix + N = torch.randn(*(Q.size()[:-2] + (n, n - k)), dtype=Q.dtype, device=Q.device) + Q = torch.cat([Q, N], dim=-1) + Q = _make_orthogonal(Q) + self.base = Q + + # It is necessary to return the -Id, as we use the diagonal for the + # Householder parametrization. Using -Id makes: + # householder(torch.zeros(m,n)) == torch.eye(m,n) + # Poor man's version of eye_like + neg_Id = torch.zeros_like(Q_init) + neg_Id.diagonal(dim1=-2, dim2=-1).fill_(-1.) + return neg_Id + + +def orthogonal(module: Module, + name: str = 'weight', + orthogonal_map: Optional[str] = None, + *, + use_trivialization: bool = True) -> Module: + r"""Apply an orthogonal or unitary parametrization to a matrix or a batch of matrices. + + Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, the parametrized + matrix :math:`Q \in \mathbb{K}^{m \times n}` is **orthogonal** as + + .. math:: + + \begin{align*} + Q^{\text{H}}Q &= \mathrm{I}_n \mathrlap{\qquad \text{if }m \geq n}\\ + QQ^{\text{H}} &= \mathrm{I}_m \mathrlap{\qquad \text{if }m < n} + \end{align*} + + where :math:`Q^{\text{H}}` is the conjugate transpose when :math:`Q` is complex + and the transpose when :math:`Q` is real-valued, and + :math:`\mathrm{I}_n` is the `n`-dimensional identity matrix. + In plain words, :math:`Q` will have orthonormal columns whenever :math:`m \geq n` + and orthonormal rows otherwise. + + If the tensor has more than two dimensions, we consider it as a batch of matrices of shape `(..., m, n)`. + + The matrix :math:`Q` may be parametrized via three different ``orthogonal_map`` in terms of the original tensor: + + - ``"matrix_exp"``/``"cayley"``: + the :func:`~torch.matrix_exp` :math:`Q = \exp(A)` and the `Cayley map`_ + :math:`Q = (\mathrm{I}_n + A/2)(\mathrm{I}_n - A/2)^{-1}` are applied to a skew-symmetric + :math:`A` to give an orthogonal matrix. + - ``"householder"``: computes a product of Householder reflectors + (:func:`~torch.linalg.householder_product`). + + ``"matrix_exp"``/``"cayley"`` often make the parametrized weight converge faster than + ``"householder"``, but they are slower to compute for very thin or very wide matrices. + + If ``use_trivialization=True`` (default), the parametrization implements the "Dynamic Trivialization Framework", + where an extra matrix :math:`B \in \mathbb{K}^{n \times n}` is stored under + ``module.parametrizations.weight[0].base``. This helps the + convergence of the parametrized layer at the expense of some extra memory use. + See `Trivializations for Gradient-Based Optimization on Manifolds`_ . + + Initial value of :math:`Q`: + If the original tensor is not parametrized and ``use_trivialization=True`` (default), the initial value + of :math:`Q` is that of the original tensor if it is orthogonal (or unitary in the complex case) + and it is orthogonalized via the QR decomposition otherwise (see :func:`torch.linalg.qr`). + Same happens when it is not parametrized and ``orthogonal_map="householder"`` even when ``use_trivialization=False``. + Otherwise, the initial value is the result of the composition of all the registered + parametrizations applied to the original tensor. + + .. note:: + This function is implemented using the parametrization functionality + in :func:`~torch.nn.utils.parametrize.register_parametrization`. + + + .. _`Cayley map`: https://en.wikipedia.org/wiki/Cayley_transform#Matrix_map + .. _`Trivializations for Gradient-Based Optimization on Manifolds`: https://arxiv.org/abs/1909.09501 + + Args: + module (nn.Module): module on which to register the parametrization. + name (str, optional): name of the tensor to make orthogonal. Default: ``"weight"``. + orthogonal_map (str, optional): One of the following: ``"matrix_exp"``, ``"cayley"``, ``"householder"``. + Default: ``"matrix_exp"`` if the matrix is square or complex, ``"householder"`` otherwise. + use_trivialization (bool, optional): whether to use the dynamic trivialization framework. + Default: ``True``. + + Returns: + The original module with an orthogonal parametrization registered to the specified + weight + + Example:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK) + >>> orth_linear = orthogonal(nn.Linear(20, 40)) + >>> orth_linear + ParametrizedLinear( + in_features=20, out_features=40, bias=True + (parametrizations): ModuleDict( + (weight): ParametrizationList( + (0): _Orthogonal() + ) + ) + ) + >>> # xdoctest: +IGNORE_WANT + >>> Q = orth_linear.weight + >>> torch.dist(Q.T @ Q, torch.eye(20)) + tensor(4.9332e-07) + """ + weight = getattr(module, name, None) + if not isinstance(weight, Tensor): + raise ValueError( + f"Module '{module}' has no parameter or buffer with name '{name}'" + ) + + # We could implement this for 1-dim tensors as the maps on the sphere + # but I believe it'd bite more people than it'd help + if weight.ndim < 2: + raise ValueError("Expected a matrix or batch of matrices. " + f"Got a tensor of {weight.ndim} dimensions.") + + if orthogonal_map is None: + orthogonal_map = "matrix_exp" if weight.size(-2) == weight.size(-1) or weight.is_complex() else "householder" + + orth_enum = getattr(_OrthMaps, orthogonal_map, None) + if orth_enum is None: + raise ValueError('orthogonal_map has to be one of "matrix_exp", "cayley", "householder". ' + f'Got: {orthogonal_map}') + orth = _Orthogonal(weight, + orth_enum, + use_trivialization=use_trivialization) + parametrize.register_parametrization(module, name, orth, unsafe=True) + return module + + +class _WeightNorm(Module): + def __init__( + self, + dim: Optional[int] = 0, + ) -> None: + super().__init__() + if dim is None: + dim = -1 + self.dim = dim + + def forward(self, weight_g, weight_v): + return torch._weight_norm(weight_v, weight_g, self.dim) + + def right_inverse(self, weight): + weight_g = torch.norm_except_dim(weight, 2, self.dim) + weight_v = weight + + return weight_g, weight_v + + +def weight_norm(module: Module, name: str = 'weight', dim: int = 0): + r"""Apply weight normalization to a parameter in the given module. + + .. math:: + \mathbf{w} = g \dfrac{\mathbf{v}}{\|\mathbf{v}\|} + + Weight normalization is a reparameterization that decouples the magnitude + of a weight tensor from its direction. This replaces the parameter specified + by :attr:`name` with two parameters: one specifying the magnitude + and one specifying the direction. + + By default, with ``dim=0``, the norm is computed independently per output + channel/plane. To compute a norm over the entire weight tensor, use + ``dim=None``. + + See https://arxiv.org/abs/1602.07868 + + Args: + module (Module): containing module + name (str, optional): name of weight parameter + dim (int, optional): dimension over which to compute the norm + + Returns: + The original module with the weight norm hook + + Example:: + + >>> m = weight_norm(nn.Linear(20, 40), name='weight') + >>> m + ParametrizedLinear( + in_features=20, out_features=40, bias=True + (parametrizations): ModuleDict( + (weight): ParametrizationList( + (0): _WeightNorm() + ) + ) + ) + >>> m.parametrizations.weight.original0.size() + torch.Size([40, 1]) + >>> m.parametrizations.weight.original1.size() + torch.Size([40, 20]) + + """ + _weight_norm = _WeightNorm(dim) + parametrize.register_parametrization(module, name, _weight_norm, unsafe=True) + + def _weight_norm_compat_hook(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): + g_key = f"{prefix}{name}_g" + v_key = f"{prefix}{name}_v" + if g_key in state_dict and v_key in state_dict: + original0 = state_dict.pop(g_key) + original1 = state_dict.pop(v_key) + state_dict[f"{prefix}parametrizations.{name}.original0"] = original0 + state_dict[f"{prefix}parametrizations.{name}.original1"] = original1 + module._register_load_state_dict_pre_hook(_weight_norm_compat_hook) + return module + + +class _SpectralNorm(Module): + def __init__( + self, + weight: torch.Tensor, + n_power_iterations: int = 1, + dim: int = 0, + eps: float = 1e-12 + ) -> None: + super().__init__() + ndim = weight.ndim + if dim >= ndim or dim < -ndim: + raise IndexError("Dimension out of range (expected to be in range of " + f"[-{ndim}, {ndim - 1}] but got {dim})") + + if n_power_iterations <= 0: + raise ValueError('Expected n_power_iterations to be positive, but ' + f'got n_power_iterations={n_power_iterations}') + self.dim = dim if dim >= 0 else dim + ndim + self.eps = eps + if ndim > 1: + # For ndim == 1 we do not need to approximate anything (see _SpectralNorm.forward) + self.n_power_iterations = n_power_iterations + weight_mat = self._reshape_weight_to_matrix(weight) + h, w = weight_mat.size() + + u = weight_mat.new_empty(h).normal_(0, 1) + v = weight_mat.new_empty(w).normal_(0, 1) + self.register_buffer('_u', F.normalize(u, dim=0, eps=self.eps)) + self.register_buffer('_v', F.normalize(v, dim=0, eps=self.eps)) + + # Start with u, v initialized to some reasonable values by performing a number + # of iterations of the power method + self._power_method(weight_mat, 15) + + def _reshape_weight_to_matrix(self, weight: torch.Tensor) -> torch.Tensor: + # Precondition + assert weight.ndim > 1 + + if self.dim != 0: + # permute dim to front + weight = weight.permute(self.dim, *(d for d in range(weight.dim()) if d != self.dim)) + + return weight.flatten(1) + + @torch.autograd.no_grad() + def _power_method(self, weight_mat: torch.Tensor, n_power_iterations: int) -> None: + # See original note at torch/nn/utils/spectral_norm.py + # NB: If `do_power_iteration` is set, the `u` and `v` vectors are + # updated in power iteration **in-place**. This is very important + # because in `DataParallel` forward, the vectors (being buffers) are + # broadcast from the parallelized module to each module replica, + # which is a new module object created on the fly. And each replica + # runs its own spectral norm power iteration. So simply assigning + # the updated vectors to the module this function runs on will cause + # the update to be lost forever. And the next time the parallelized + # module is replicated, the same randomly initialized vectors are + # broadcast and used! + # + # Therefore, to make the change propagate back, we rely on two + # important behaviors (also enforced via tests): + # 1. `DataParallel` doesn't clone storage if the broadcast tensor + # is already on correct device; and it makes sure that the + # parallelized module is already on `device[0]`. + # 2. If the out tensor in `out=` kwarg has correct shape, it will + # just fill in the values. + # Therefore, since the same power iteration is performed on all + # devices, simply updating the tensors in-place will make sure that + # the module replica on `device[0]` will update the _u vector on the + # parallelized module (by shared storage). + # + # However, after we update `u` and `v` in-place, we need to **clone** + # them before using them to normalize the weight. This is to support + # backproping through two forward passes, e.g., the common pattern in + # GAN training: loss = D(real) - D(fake). Otherwise, engine will + # complain that variables needed to do backward for the first forward + # (i.e., the `u` and `v` vectors) are changed in the second forward. + + # Precondition + assert weight_mat.ndim > 1 + + for _ in range(n_power_iterations): + # Spectral norm of weight equals to `u^T W v`, where `u` and `v` + # are the first left and right singular vectors. + # This power iteration produces approximations of `u` and `v`. + self._u = F.normalize(torch.mv(weight_mat, self._v), # type: ignore[has-type] + dim=0, eps=self.eps, out=self._u) # type: ignore[has-type] + self._v = F.normalize(torch.mv(weight_mat.H, self._u), + dim=0, eps=self.eps, out=self._v) # type: ignore[has-type] + + def forward(self, weight: torch.Tensor) -> torch.Tensor: + if weight.ndim == 1: + # Faster and more exact path, no need to approximate anything + return F.normalize(weight, dim=0, eps=self.eps) + else: + weight_mat = self._reshape_weight_to_matrix(weight) + if self.training: + self._power_method(weight_mat, self.n_power_iterations) + # See above on why we need to clone + u = self._u.clone(memory_format=torch.contiguous_format) + v = self._v.clone(memory_format=torch.contiguous_format) + # The proper way of computing this should be through F.bilinear, but + # it seems to have some efficiency issues: + # https://github.com/pytorch/pytorch/issues/58093 + sigma = torch.vdot(u, torch.mv(weight_mat, v)) + return weight / sigma + + def right_inverse(self, value: torch.Tensor) -> torch.Tensor: + # we may want to assert here that the passed value already + # satisfies constraints + return value + + +def spectral_norm(module: Module, + name: str = 'weight', + n_power_iterations: int = 1, + eps: float = 1e-12, + dim: Optional[int] = None) -> Module: + r"""Apply spectral normalization to a parameter in the given module. + + .. math:: + \mathbf{W}_{SN} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})}, + \sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2} + + When applied on a vector, it simplifies to + + .. math:: + \mathbf{x}_{SN} = \dfrac{\mathbf{x}}{\|\mathbf{x}\|_2} + + Spectral normalization stabilizes the training of discriminators (critics) + in Generative Adversarial Networks (GANs) by reducing the Lipschitz constant + of the model. :math:`\sigma` is approximated performing one iteration of the + `power method`_ every time the weight is accessed. If the dimension of the + weight tensor is greater than 2, it is reshaped to 2D in power iteration + method to get spectral norm. + + + See `Spectral Normalization for Generative Adversarial Networks`_ . + + .. _`power method`: https://en.wikipedia.org/wiki/Power_iteration + .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957 + + .. note:: + This function is implemented using the parametrization functionality + in :func:`~torch.nn.utils.parametrize.register_parametrization`. It is a + reimplementation of :func:`torch.nn.utils.spectral_norm`. + + .. note:: + When this constraint is registered, the singular vectors associated to the largest + singular value are estimated rather than sampled at random. These are then updated + performing :attr:`n_power_iterations` of the `power method`_ whenever the tensor + is accessed with the module on `training` mode. + + .. note:: + If the `_SpectralNorm` module, i.e., `module.parametrization.weight[idx]`, + is in training mode on removal, it will perform another power iteration. + If you'd like to avoid this iteration, set the module to eval mode + before its removal. + + Args: + module (nn.Module): containing module + name (str, optional): name of weight parameter. Default: ``"weight"``. + n_power_iterations (int, optional): number of power iterations to + calculate spectral norm. Default: ``1``. + eps (float, optional): epsilon for numerical stability in + calculating norms. Default: ``1e-12``. + dim (int, optional): dimension corresponding to number of outputs. + Default: ``0``, except for modules that are instances of + ConvTranspose{1,2,3}d, when it is ``1`` + + Returns: + The original module with a new parametrization registered to the specified + weight + + Example:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> snm = spectral_norm(nn.Linear(20, 40)) + >>> snm + ParametrizedLinear( + in_features=20, out_features=40, bias=True + (parametrizations): ModuleDict( + (weight): ParametrizationList( + (0): _SpectralNorm() + ) + ) + ) + >>> torch.linalg.matrix_norm(snm.weight, 2) + tensor(1.0081, grad_fn=) + """ + weight = getattr(module, name, None) + if not isinstance(weight, Tensor): + raise ValueError( + f"Module '{module}' has no parameter or buffer with name '{name}'" + ) + + if dim is None: + if isinstance(module, (torch.nn.ConvTranspose1d, + torch.nn.ConvTranspose2d, + torch.nn.ConvTranspose3d)): + dim = 1 + else: + dim = 0 + parametrize.register_parametrization(module, name, _SpectralNorm(weight, n_power_iterations, dim, eps)) + return module diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/parametrize.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/parametrize.py new file mode 100644 index 0000000000000000000000000000000000000000..e73aada232abf7e0754319428abe7b8f88289bd9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/parametrize.py @@ -0,0 +1,758 @@ +import torch +from torch.nn.modules.container import ModuleList, ModuleDict, Module +from torch.nn.parameter import Parameter +from torch import Tensor + +import collections +import copyreg +from copy import deepcopy +from contextlib import contextmanager +from typing import Union, Optional, Dict, Tuple, Sequence + +__all__ = ['cached', 'ParametrizationList', 'register_parametrization', 'is_parametrized', 'remove_parametrizations', + 'type_before_parametrizations', 'transfer_parametrizations_and_params'] + +_cache_enabled = 0 +_cache: Dict[Tuple[int, str], Optional[Tensor]] = {} + + +@contextmanager +def cached(): + r"""Context manager that enables the caching system within parametrizations registered with :func:`register_parametrization`. + + The value of the parametrized objects is computed and cached the first time + they are required when this context manager is active. The cached values are + discarded when leaving the context manager. + + This is useful when using a parametrized parameter more than once in the forward pass. + An example of this is when parametrizing the recurrent kernel of an RNN or when + sharing weights. + + The simplest way to activate the cache is by wrapping the forward pass of the neural network + + .. code-block:: python + + import torch.nn.utils.parametrize as P + ... + with P.cached(): + output = model(inputs) + + in training and evaluation. One may also wrap the parts of the modules that use + several times the parametrized tensors. For example, the loop of an RNN with a + parametrized recurrent kernel: + + .. code-block:: python + + with P.cached(): + for x in xs: + out_rnn = self.rnn_cell(x, out_rnn) + """ + global _cache + global _cache_enabled + _cache_enabled += 1 + try: + yield + finally: + _cache_enabled -= 1 + if not _cache_enabled: + _cache = {} + + +def _register_parameter_or_buffer(module, name, X): + if isinstance(X, Parameter): + module.register_parameter(name, X) + else: + module.register_buffer(name, X) + + +class ParametrizationList(ModuleList): + r"""A sequential container that holds and manages the original parameters or buffers of a parametrized :class:`torch.nn.Module`. + + It is the type of ``module.parametrizations[tensor_name]`` when ``module[tensor_name]`` + has been parametrized with :func:`register_parametrization`. + + If the first registered parametrization has a ``right_inverse`` that returns one tensor or + does not have a ``right_inverse`` (in which case we assume that ``right_inverse`` is the identity), + it will hold the tensor under the name ``original``. + If it has a ``right_inverse`` that returns more than one tensor, these will be registered as + ``original0``, ``original1``, ... + + .. warning:: + This class is used internally by :func:`register_parametrization`. It is documented + here for completeness. It shall not be instantiated by the user. + + Args: + modules (sequence): sequence of modules representing the parametrizations + original (Parameter or Tensor): parameter or buffer that is parametrized + unsafe (bool): a boolean flag that denotes whether the parametrization + may change the dtype and shape of the tensor. Default: `False` + Warning: the parametrization is not checked for consistency upon registration. + Enable this flag at your own risk. + """ + + original: Tensor + unsafe: bool + + def __init__( + self, modules: Sequence[Module], original: Union[Tensor, Parameter], unsafe: bool = False + ) -> None: + # We require this because we need to treat differently the first parametrization + # This should never throw, unless this class is used from the outside + if len(modules) == 0: + raise ValueError("ParametrizationList requires one or more modules.") + + super().__init__(modules) + self.unsafe = unsafe + + # In plain words: + # module.weight must keep its dtype and shape. + # Furthermore, if there is no right_inverse or the right_inverse returns a tensor, + # this should be of the same dtype as the original tensor + # + # We check that the following invariants hold: + # X = module.weight + # Y = param.right_inverse(X) + # assert isinstance(Y, Tensor) or + # (isinstance(Y, collections.abc.Sequence) and all(isinstance(t, Tensor) for t in Y)) + # Z = param(Y) if isinstance(Y, Tensor) else param(*Y) + # # Consistency checks + # assert X.dtype == Z.dtype and X.shape == Z.shape + # # If it has one input, this allows to be able to use set_ to be able to + # # move data to/from the original tensor without changing its id (which is what the + # # optimizer uses to track parameters) + # if isinstance(Y, Tensor) + # assert X.dtype == Y.dtype + # Below we use original = X, new = Y + + original_shape = original.shape + original_dtype = original.dtype + + # Compute new + with torch.no_grad(): + new = original + for module in reversed(self): # type: ignore[call-overload] + if hasattr(module, "right_inverse"): + try: + new = module.right_inverse(new) + except NotImplementedError: + pass + # else, or if it throws, we assume that right_inverse is the identity + + if not isinstance(new, Tensor) and not isinstance(new, collections.abc.Sequence): + raise ValueError("'right_inverse' must return a Tensor or a Sequence of tensors (list, tuple...). " + f"Got {type(new).__name__}") + + # Set the number of original tensors + self.is_tensor = isinstance(new, Tensor) + self.ntensors = 1 if self.is_tensor else len(new) + + # Register the tensor(s) + if self.is_tensor: + if original.dtype != new.dtype: + raise ValueError( + "When `right_inverse` outputs one tensor, it may not change the dtype.\n" + f"original.dtype: {original.dtype}\n" + f"right_inverse(original).dtype: {new.dtype}" + ) + # Set the original to original so that the user does not need to re-register the parameter + # manually in the optimiser + with torch.no_grad(): + original.set_(new) # type: ignore[call-overload] + _register_parameter_or_buffer(self, "original", original) + else: + for i, originali in enumerate(new): + if not isinstance(originali, Tensor): + raise ValueError("'right_inverse' must return a Tensor or a Sequence of tensors " + "(list, tuple...). " + f"Got element {i} of the sequence with type {type(originali).__name__}.") + + # If the original tensor was a Parameter that required grad, we expect the user to + # add the new parameters to the optimizer after registering the parametrization + # (this is documented) + if isinstance(original, Parameter): + originali = Parameter(originali) + originali.requires_grad_(original.requires_grad) + _register_parameter_or_buffer(self, f"original{i}", originali) + + if not self.unsafe: + # Consistency checks: + # Since f : A -> B, right_inverse : B -> A, Z and original should live in B + # Z = forward(right_inverse(original)) + Z = self() + if not isinstance(Z, Tensor): + raise ValueError( + f"A parametrization must return a tensor. Got {type(Z).__name__}." + ) + if Z.dtype != original_dtype: + raise ValueError( + "Registering a parametrization may not change the dtype of the tensor, unless `unsafe` flag is enabled.\n" + f"unparametrized dtype: {original_dtype}\n" + f"parametrized dtype: {Z.dtype}" + ) + if Z.shape != original_shape: + raise ValueError( + "Registering a parametrization may not change the shape of the tensor, unless `unsafe` flag is enabled.\n" + f"unparametrized shape: {original_shape}\n" + f"parametrized shape: {Z.shape}" + ) + + def right_inverse(self, value: Tensor) -> None: + r"""Call the ``right_inverse`` methods of the parametrizations in the inverse registration order. + + Then, it stores the result in ``self.original`` if ``right_inverse`` outputs one tensor + or in ``self.original0``, ``self.original1``, ... if it outputs several. + + Args: + value (Tensor): Value to which initialize the module + """ + # All the exceptions in this function should almost never throw. + # They could throw if, for example, right_inverse function returns a different + # dtype when given a different input, which should most likely be caused by a + # bug in the user's code + + with torch.no_grad(): + # See https://github.com/pytorch/pytorch/issues/53103 + for module in reversed(self): # type: ignore[call-overload] + if hasattr(module, "right_inverse"): + value = module.right_inverse(value) + else: + raise RuntimeError(f"parametrization {type(module).__name__} does not implement " + "right_inverse.") + if self.is_tensor: + # These exceptions should only throw when a right_inverse function does not + # return the same dtype for every input, which should most likely be caused by a bug + if not isinstance(value, Tensor): + raise ValueError( + f"`right_inverse` should return a tensor. Got {type(value).__name__}" + ) + if value.dtype != self.original.dtype: + raise ValueError( + f"The tensor returned by `right_inverse` has dtype {value.dtype} " + f"while `original` has dtype {self.original.dtype}" + ) + # We know that the result is going to have the same dtype + self.original.set_(value) # type: ignore[call-overload] + else: + if not isinstance(value, collections.abc.Sequence): + raise ValueError( + "'right_inverse' must return a sequence of tensors. " + f"Got {type(value).__name__}." + ) + if len(value) != self.ntensors: + raise ValueError( + "'right_inverse' must return a sequence of tensors of length " + f"{self.ntensors}. Got a sequence of length {len(value)}." + ) + for i, tensor in enumerate(value): + original_i = getattr(self, f"original{i}") + if not isinstance(tensor, Tensor): + raise ValueError( + f"`right_inverse` must return a sequence of tensors. " + f"Got element {i} of type {type(tensor).__name__}" + ) + if original_i.dtype != tensor.dtype: + raise ValueError( + f"Tensor {i} returned by `right_inverse` has dtype {tensor.dtype} " + f"while `original{i}` has dtype {original_i.dtype}" + ) + original_i.set_(tensor) + + def forward(self) -> Tensor: + if torch.jit.is_scripting(): + raise RuntimeError('Parametrization is not working with scripting.') + # Unpack the originals for the first parametrization + if self.is_tensor: + x = self[0](self.original) + else: + originals = (getattr(self, f"original{i}") for i in range(self.ntensors)) + x = self[0](*originals) + # It's not possible to call self[1:] here, so we have to be a bit more cryptic + # Also we want to skip all non-integer keys + curr_idx = 1 + while hasattr(self, str(curr_idx)): + x = self[curr_idx](x) + curr_idx += 1 + return x + + +def _inject_new_class(module: Module) -> None: + r"""Set up a module to be parametrized. + + This works by substituting the class of the module by a class + that extends it to be able to inject a property + + Args: + module (nn.Module): module into which to inject the property + """ + cls = module.__class__ + + def default_deepcopy(self, memo): + # Just emulate a standard deepcopy procedure when __deepcopy__ doesn't exist in the current class. + obj = memo.get(id(self), None) + if obj is not None: + return obj + replica = self.__new__(self.__class__) + memo[id(self)] = replica + replica.__dict__ = deepcopy(self.__dict__, memo) + # Also save all slots if they exist. + slots_to_save = copyreg._slotnames(self.__class__) # type: ignore[attr-defined] + for slot in slots_to_save: + if hasattr(self, slot): + setattr(replica, slot, deepcopy(getattr(self, slot), memo)) + return replica + + def getstate(self): + raise RuntimeError( + "Serialization of parametrized modules is only " + "supported through state_dict(). See:\n" + "https://pytorch.org/tutorials/beginner/saving_loading_models.html" + "#saving-loading-a-general-checkpoint-for-inference-and-or-resuming-training" + ) + + dct = {"__getstate__": getstate} + # We don't allow serialization of parametrized modules but should still allow deepcopying. + # Default 'deepcopy' function invokes __deepcopy__ method instead of __getstate__ when it exists. + if not hasattr(cls, "__deepcopy__"): + dct["__deepcopy__"] = default_deepcopy # type: ignore[assignment] + + param_cls = type( + f"Parametrized{cls.__name__}", + (cls,), + dct, + ) + + module.__class__ = param_cls + + +def _inject_property(module: Module, tensor_name: str) -> None: + r"""Injects a property into module[tensor_name]. + + It assumes that the class in the module has already been modified from its + original one using _inject_new_class and that the tensor under :attr:`tensor_name` + has already been moved out + + Args: + module (nn.Module): module into which to inject the property + tensor_name (str): name of the name of the property to create + """ + # We check the precondition. + # This should never fire if register_parametrization is correctly implemented + assert not hasattr(module, tensor_name) + + @torch.jit.unused + def get_cached_parametrization(parametrization) -> Tensor: + global _cache + key = (id(module), tensor_name) + tensor = _cache.get(key) + if tensor is None: + tensor = parametrization() + _cache[key] = tensor + return tensor + + def get_parametrized(self) -> Tensor: + if torch.jit.is_scripting(): + raise RuntimeError('Parametrization is not working with scripting.') + parametrization = self.parametrizations[tensor_name] + if _cache_enabled: + if torch.jit.is_scripting(): + # Scripting + raise RuntimeError('Caching is not implemented for scripting. ' + 'Either disable caching or avoid scripting.') + elif torch._C._get_tracing_state() is not None: + # Tracing + raise RuntimeError('Cannot trace a model while caching parametrizations.') + else: + return get_cached_parametrization(parametrization) + else: + # If caching is not active, this function just evaluates the parametrization + return parametrization() + + def set_original(self, value: Tensor) -> None: + if torch.jit.is_scripting(): + raise RuntimeError('Parametrization is not working with scripting.') + self.parametrizations[tensor_name].right_inverse(value) + + setattr(module.__class__, tensor_name, property(get_parametrized, set_original)) + +def register_parametrization( + module: Module, tensor_name: str, parametrization: Module, *, unsafe: bool = False, +) -> Module: + r"""Register a parametrization to a tensor in a module. + + Assume that ``tensor_name="weight"`` for simplicity. When accessing ``module.weight``, + the module will return the parametrized version ``parametrization(module.weight)``. + If the original tensor requires a gradient, the backward pass will differentiate + through :attr:`parametrization`, and the optimizer will update the tensor accordingly. + + The first time that a module registers a parametrization, this function will add an attribute + ``parametrizations`` to the module of type :class:`~ParametrizationList`. + + The list of parametrizations on the tensor ``weight`` will be accessible under + ``module.parametrizations.weight``. + + The original tensor will be accessible under + ``module.parametrizations.weight.original``. + + Parametrizations may be concatenated by registering several parametrizations + on the same attribute. + + The training mode of a registered parametrization is updated on registration + to match the training mode of the host module + + Parametrized parameters and buffers have an inbuilt caching system that can be activated + using the context manager :func:`cached`. + + A :attr:`parametrization` may optionally implement a method with signature + + .. code-block:: python + + def right_inverse(self, X: Tensor) -> Union[Tensor, Sequence[Tensor]] + + This method is called on the unparametrized tensor when the first parametrization + is registered to compute the initial value of the original tensor. + If this method is not implemented, the original tensor will be just the unparametrized tensor. + + If all the parametrizations registered on a tensor implement `right_inverse` it is possible + to initialize a parametrized tensor by assigning to it, as shown in the example below. + + It is possible for the first parametrization to depend on several inputs. + This may be implemented returning a tuple of tensors from ``right_inverse`` + (see the example implementation of a ``RankOne`` parametrization below). + + In this case, the unconstrained tensors are also located under ``module.parametrizations.weight`` + with names ``original0``, ``original1``,... + + .. note:: + + If unsafe=False (default) both the forward and right_inverse methods will be called + once to perform a number of consistency checks. + If unsafe=True, then right_inverse will be called if the tensor is not parametrized, + and nothing will be called otherwise. + + .. note:: + + In most situations, ``right_inverse`` will be a function such that + ``forward(right_inverse(X)) == X`` (see + `right inverse `_). + Sometimes, when the parametrization is not surjective, it may be reasonable + to relax this. + + .. warning:: + + If a parametrization depends on several inputs, :func:`~register_parametrization` + will register a number of new parameters. If such parametrization is registered + after the optimizer is created, these new parameters will need to be added manually + to the optimizer. See :meth:`torch.Optimizer.add_param_group`. + + Args: + module (nn.Module): module on which to register the parametrization + tensor_name (str): name of the parameter or buffer on which to register + the parametrization + parametrization (nn.Module): the parametrization to register + Keyword args: + unsafe (bool): a boolean flag that denotes whether the parametrization + may change the dtype and shape of the tensor. Default: `False` + Warning: the parametrization is not checked for consistency upon registration. + Enable this flag at your own risk. + + Raises: + ValueError: if the module does not have a parameter or a buffer named :attr:`tensor_name` + + Examples: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK) + >>> import torch + >>> import torch.nn as nn + >>> import torch.nn.utils.parametrize as P + >>> + >>> class Symmetric(nn.Module): + >>> def forward(self, X): + >>> return X.triu() + X.triu(1).T # Return a symmetric matrix + >>> + >>> def right_inverse(self, A): + >>> return A.triu() + >>> + >>> m = nn.Linear(5, 5) + >>> P.register_parametrization(m, "weight", Symmetric()) + >>> print(torch.allclose(m.weight, m.weight.T)) # m.weight is now symmetric + True + >>> A = torch.rand(5, 5) + >>> A = A + A.T # A is now symmetric + >>> m.weight = A # Initialize the weight to be the symmetric matrix A + >>> print(torch.allclose(m.weight, A)) + True + + >>> class RankOne(nn.Module): + >>> def forward(self, x, y): + >>> # Form a rank 1 matrix multiplying two vectors + >>> return x.unsqueeze(-1) @ y.unsqueeze(-2) + >>> + >>> def right_inverse(self, Z): + >>> # Project Z onto the rank 1 matrices + >>> U, S, Vh = torch.linalg.svd(Z, full_matrices=False) + >>> # Return rescaled singular vectors + >>> s0_sqrt = S[0].sqrt().unsqueeze(-1) + >>> return U[..., :, 0] * s0_sqrt, Vh[..., 0, :] * s0_sqrt + >>> + >>> linear_rank_one = P.register_parametrization(nn.Linear(4, 4), "weight", RankOne()) + >>> print(torch.linalg.matrix_rank(linear_rank_one.weight).item()) + 1 + + """ + parametrization.train(module.training) + if is_parametrized(module, tensor_name): + # Correctness checks. + # If A is the space of tensors with shape and dtype equal to module.weight + # we check that parametrization.forward and parametrization.right_inverse are + # functions from A to A + if not unsafe: + Y = getattr(module, tensor_name) + X = parametrization(Y) + if not isinstance(X, Tensor): + raise ValueError( + f"A parametrization must return a tensor. Got {type(X).__name__}." + ) + if X.dtype != Y.dtype: + raise ValueError( + "Registering a parametrization may not change the dtype of the tensor, unless the `unsafe` flag is enabled.\n" + f"module.{tensor_name}.dtype: {Y.dtype}\n" + f"parametrization(module.{tensor_name}).dtype: {X.dtype}" + ) + if X.shape != Y.shape: + raise ValueError( + "Registering a parametrization may not change the shape of the tensor, unless the `unsafe` flag is enabled.\n" + f"module.{tensor_name}.shape: {Y.shape}\n" + f"parametrization(module.{tensor_name}).shape: {X.shape}" + ) + if hasattr(parametrization, "right_inverse"): + try: + Z = parametrization.right_inverse(X) # type: ignore[operator] + except NotImplementedError: + pass + else: + if not isinstance(Z, Tensor): + raise ValueError( + f"parametrization.right_inverse must return a tensor. Got: {type(Z).__name__}" + ) + if Z.dtype != Y.dtype: + raise ValueError( + "The tensor returned by parametrization.right_inverse must have the same dtype " + f"as module.{tensor_name}, unless the `unsafe` flag is enabled.\n" + f"module.{tensor_name}.dtype: {Y.dtype}\n" + f"returned dtype: {Z.dtype}" + ) + if Z.shape != Y.shape: + raise ValueError( + "The tensor returned by parametrization.right_inverse must have the same shape " + f"as module.{tensor_name}, unless the `unsafe` flag is enabled.\n" + f"module.{tensor_name}.shape: {Y.shape}\n" + f"returned shape: {Z.shape}" + ) + # else right_inverse is assumed to be the identity + + # add the new parametrization to the parametrization list + assert isinstance(module.parametrizations, ModuleDict) # Make mypy happy + module.parametrizations[tensor_name].append(parametrization) + # If unsafe was True in previous parametrization, keep it enabled + module.parametrizations[tensor_name].unsafe |= unsafe # type: ignore[index, union-attr] + elif tensor_name in module._buffers or tensor_name in module._parameters: + # Set the parametrization mechanism + # Fetch the original buffer or parameter + original = getattr(module, tensor_name) + # We create this early to check for possible errors + parametrizations = ParametrizationList([parametrization], original, unsafe=unsafe) + # Delete the previous parameter or buffer + delattr(module, tensor_name) + # If this is the first parametrization registered on the module, + # we prepare the module to inject the property + if not is_parametrized(module): + # Change the class + _inject_new_class(module) + # Inject a ``ModuleDict`` into the instance under module.parametrizations + module.parametrizations = ModuleDict() + # Add a property into the class + _inject_property(module, tensor_name) + # Add a ParametrizationList + assert isinstance(module.parametrizations, ModuleDict) # Make mypy happy + module.parametrizations[tensor_name] = parametrizations + else: + raise ValueError( + f"Module '{module}' does not have a parameter, a buffer, or a " + f"parametrized element with name '{tensor_name}'" + ) + return module + + +def is_parametrized(module: Module, tensor_name: Optional[str] = None) -> bool: + r"""Determine if a module has a parametrization. + + Args: + module (nn.Module): module to query + tensor_name (str, optional): name of the parameter in the module + Default: ``None`` + Returns: + ``True`` if :attr:`module` has a parametrization for the parameter named :attr:`tensor_name`, + or if it has any parametrization when :attr:`tensor_name` is ``None``; + otherwise ``False`` + """ + parametrizations = getattr(module, "parametrizations", None) + if parametrizations is None or not isinstance(parametrizations, ModuleDict): + return False + if tensor_name is None: + # Check that there is at least one parametrized buffer or Parameter + return len(parametrizations) > 0 + else: + return tensor_name in parametrizations + +def remove_parametrizations( + module: Module, tensor_name: str, leave_parametrized: bool = True +) -> Module: + r"""Remove the parametrizations on a tensor in a module. + + - If ``leave_parametrized=True``, ``module[tensor_name]`` will be set to + its current output. In this case, the parametrization shall not change the ``dtype`` + of the tensor. + - If ``leave_parametrized=False``, ``module[tensor_name]`` will be set to + the unparametrised tensor in ``module.parametrizations[tensor_name].original``. + This is only possible when the parametrization depends on just one tensor. + + Args: + module (nn.Module): module from which remove the parametrization + tensor_name (str): name of the parametrization to be removed + leave_parametrized (bool, optional): leave the attribute :attr:`tensor_name` parametrized. + Default: ``True`` + + Returns: + Module: module + + Raises: + ValueError: if ``module[tensor_name]`` is not parametrized + ValueError: if ``leave_parametrized=False`` and the parametrization depends on several tensors + """ + if not is_parametrized(module, tensor_name): + raise ValueError(f"Module {module} does not have a parametrization on {tensor_name}") + + # Fetch the original tensor + assert isinstance(module.parametrizations, ModuleDict) # Make mypy happy + parametrizations = module.parametrizations[tensor_name] + if parametrizations.is_tensor: + original = parametrizations.original + if leave_parametrized: + with torch.no_grad(): + t = getattr(module, tensor_name) + # We know they have the same dtype because we have checked this when registering the + # parametrizations. As such, we can use set_ + # We do this so that the parameter does not to change the id() + # This way the user does not need to update the optimizer + with torch.no_grad(): + if type(original) is torch.Tensor: + original.set_(t) + else: + try: + original.set_(t) + except RuntimeError as e: + # TODO: Fix this for tensor subclasses that are parameters: + # RuntimeError: set_storage is not allowed on a Tensor created from .data or .detach(). + raise RuntimeError("Calling remove_parametrizations() with leave_parametrized=True " + "for a parameter that is an instance of a tensor subclass requires " + "set_() to be implemented correctly for the tensor subclass. Either " + "set leave_parametrized=False or provide a working implementation for " + "set_() in the tensor subclass.") from e + else: + if leave_parametrized: + # We cannot use no_grad because we need to know whether one or more + # original tensors required grad + t = getattr(module, tensor_name) + # We'll have to trust the user to add it to the optimizer + original = Parameter(t) if t.requires_grad else t + else: + raise ValueError("Cannot leave unparametrized (`leave_parametrized=False`) a tensor " + "that is parametrized in terms of a sequence of tensors.") + + # Delete the property that manages the parametrization + delattr(module.__class__, tensor_name) + # Delete the ParametrizationList + del module.parametrizations[tensor_name] + + # Restore the parameter / buffer into the main class + _register_parameter_or_buffer(module, tensor_name, original) + + # Roll back the parametrized class if no other buffer or parameter + # is currently parametrized in this class + if not is_parametrized(module): + delattr(module, "parametrizations") + # Restore class + orig_cls = module.__class__.__bases__[0] + module.__class__ = orig_cls + return module + +def type_before_parametrizations(module: Module) -> type: + r"""Return the module type before parametrizations were applied and if not, then it returns the module type. + + Args: + module (nn.Module): module to get type of + """ + if is_parametrized(module): + return module.__class__.__bases__[0] + else: + return type(module) + +def transfer_parametrizations_and_params( + from_module: Module, to_module: Module, tensor_name: Optional[str] = None +) -> Module: + r"""Transfer parametrizations and the parameters they parametrize from :attr:`from_module` to :attr:`to_module`. + + If :attr:`tensor_name` is specified, only transfers the specified parameter, otherwise + transfers all parametrized parameters. If those parameters do not exist in to_module, it will create them. + Does nothing if from_module is not parametrized. + + Args: + from_module (nn.Module): module to transfer from + to_module (nn.Module): module to transfer to + tensor_name (str, optional): parameter to transfer + + Returns: + Module: to_module + """ + if is_parametrized(from_module): + assert isinstance(from_module.parametrizations, ModuleDict) # for mypy + + # get list of all params or the single param to transfer + parameters_to_transfer: Union[list, ModuleDict] = ( + from_module.parametrizations if tensor_name is None else [tensor_name] + ) + + assert hasattr(parameters_to_transfer, "__iter__") # for mypy + for parameter_name in parameters_to_transfer: + + # initialize the to-be-transferred param in to_module if it doesn't exist already + if not hasattr(to_module, parameter_name): + setattr( + to_module, + parameter_name, + Parameter(getattr(from_module, parameter_name)), + ) + + # apply the params's parametrizations to to_module + for param_func in from_module.parametrizations[parameter_name]: + register_parametrization(to_module, parameter_name, param_func) + assert isinstance(to_module.parametrizations, ModuleDict) # for mypy + + # make values match, original values can be stored in either original or + # original0, original1..., need to check both cases + if hasattr(from_module.parametrizations[parameter_name], "original"): + to_module.parametrizations[parameter_name].original = \ + from_module.parametrizations[parameter_name].original + else: + num = 0 + orig_num = "original" + str(num) + # loop through each original# until all values have been set + while hasattr(from_module.parametrizations[parameter_name], orig_num): + setattr( + to_module.parametrizations[parameter_name], + orig_num, + getattr(from_module.parametrizations[parameter_name], orig_num), + ) + num = num + 1 + orig_num = "original" + str(num) + + return to_module diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/prune.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/prune.py new file mode 100644 index 0000000000000000000000000000000000000000..0375106d69e02d872372d8ae61fb163950bba848 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/prune.py @@ -0,0 +1,1379 @@ +r"""Pruning methods.""" +import numbers +from abc import ABC, abstractmethod +from collections.abc import Iterable +from typing import Tuple + +import torch + + +class BasePruningMethod(ABC): + r"""Abstract base class for creation of new pruning techniques. + + Provides a skeleton for customization requiring the overriding of methods + such as :meth:`compute_mask` and :meth:`apply`. + """ + + _tensor_name: str + + def __call__(self, module, inputs): + r"""Multiply the mask into original tensor and store the result. + + Multiplies the mask (stored in ``module[name + '_mask']``) + into the original tensor (stored in ``module[name + '_orig']``) + and stores the result into ``module[name]`` by using :meth:`apply_mask`. + + Args: + module (nn.Module): module containing the tensor to prune + inputs: not used. + """ + setattr(module, self._tensor_name, self.apply_mask(module)) + + @abstractmethod + def compute_mask(self, t, default_mask): + r"""Compute and returns a mask for the input tensor ``t``. + + Starting from a base ``default_mask`` (which should be a mask of ones + if the tensor has not been pruned yet), generate a random mask to + apply on top of the ``default_mask`` according to the specific pruning + method recipe. + + Args: + t (torch.Tensor): tensor representing the importance scores of the + parameter to prune. + default_mask (torch.Tensor): Base mask from previous pruning + iterations, that need to be respected after the new mask is + applied. Same dims as ``t``. + + Returns: + mask (torch.Tensor): mask to apply to ``t``, of same dims as ``t`` + """ + pass + + def apply_mask(self, module): + r"""Simply handles the multiplication between the parameter being pruned and the generated mask. + + Fetches the mask and the original tensor from the module + and returns the pruned version of the tensor. + + Args: + module (nn.Module): module containing the tensor to prune + + Returns: + pruned_tensor (torch.Tensor): pruned version of the input tensor + """ + # to carry out the multiplication, the mask needs to have been computed, + # so the pruning method must know what tensor it's operating on + assert self._tensor_name is not None, f"Module {module} has to be pruned" # this gets set in apply() + mask = getattr(module, self._tensor_name + "_mask") + orig = getattr(module, self._tensor_name + "_orig") + pruned_tensor = mask.to(dtype=orig.dtype) * orig + return pruned_tensor + + @classmethod + def apply(cls, module, name, *args, importance_scores=None, **kwargs): + r"""Add pruning on the fly and reparametrization of a tensor. + + Adds the forward pre-hook that enables pruning on the fly and + the reparametrization of a tensor in terms of the original tensor + and the pruning mask. + + Args: + module (nn.Module): module containing the tensor to prune + name (str): parameter name within ``module`` on which pruning + will act. + args: arguments passed on to a subclass of + :class:`BasePruningMethod` + importance_scores (torch.Tensor): tensor of importance scores (of + same shape as module parameter) used to compute mask for pruning. + The values in this tensor indicate the importance of the + corresponding elements in the parameter being pruned. + If unspecified or None, the parameter will be used in its place. + kwargs: keyword arguments passed on to a subclass of a + :class:`BasePruningMethod` + """ + + def _get_composite_method(cls, module, name, *args, **kwargs): + # Check if a pruning method has already been applied to + # `module[name]`. If so, store that in `old_method`. + old_method = None + found = 0 + # there should technically be only 1 hook with hook.name == name + # assert this using `found` + hooks_to_remove = [] + for k, hook in module._forward_pre_hooks.items(): + # if it exists, take existing thing, remove hook, then + # go through normal thing + if isinstance(hook, BasePruningMethod) and hook._tensor_name == name: + old_method = hook + hooks_to_remove.append(k) + found += 1 + assert ( + found <= 1 + ), f"Avoid adding multiple pruning hooks to the\ + same tensor {name} of module {module}. Use a PruningContainer." + + for k in hooks_to_remove: + del module._forward_pre_hooks[k] + + # Apply the new pruning method, either from scratch or on top of + # the previous one. + method = cls(*args, **kwargs) # new pruning + # Have the pruning method remember what tensor it's been applied to + method._tensor_name = name + + # combine `methods` with `old_method`, if `old_method` exists + if old_method is not None: # meaning that there was a hook + # if the hook is already a pruning container, just add the + # new pruning method to the container + if isinstance(old_method, PruningContainer): + old_method.add_pruning_method(method) + method = old_method # rename old_method --> method + + # if the hook is simply a single pruning method, create a + # container, add the old pruning method and the new one + elif isinstance(old_method, BasePruningMethod): + container = PruningContainer(old_method) + # Have the pruning method remember the name of its tensor + # setattr(container, '_tensor_name', name) + container.add_pruning_method(method) + method = container # rename container --> method + return method + + method = _get_composite_method(cls, module, name, *args, **kwargs) + # at this point we have no forward_pre_hooks but we could have an + # active reparametrization of the tensor if another pruning method + # had been applied (in which case `method` would be a PruningContainer + # and not a simple pruning method). + + # Pruning is to be applied to the module's tensor named `name`, + # starting from the state it is found in prior to this iteration of + # pruning. The pruning mask is calculated based on importances scores. + + orig = getattr(module, name) + if importance_scores is not None: + assert ( + importance_scores.shape == orig.shape + ), f"importance_scores should have the same shape as parameter {name} of {module}" + else: + importance_scores = orig + + # If this is the first time pruning is applied, take care of moving + # the original tensor to a new parameter called name + '_orig' and + # and deleting the original parameter + if not isinstance(method, PruningContainer): + # copy `module[name]` to `module[name + '_orig']` + module.register_parameter(name + "_orig", orig) + # temporarily delete `module[name]` + del module._parameters[name] + default_mask = torch.ones_like(orig) # temp + # If this is not the first time pruning is applied, all of the above + # has been done before in a previous pruning iteration, so we're good + # to go + else: + default_mask = ( + getattr(module, name + "_mask") + .detach() + .clone(memory_format=torch.contiguous_format) + ) + + # Use try/except because if anything goes wrong with the mask + # computation etc., you'd want to roll back. + try: + # get the final mask, computed according to the specific method + mask = method.compute_mask(importance_scores, default_mask=default_mask) + # reparameterize by saving mask to `module[name + '_mask']`... + module.register_buffer(name + "_mask", mask) + # ... and the new pruned tensor to `module[name]` + setattr(module, name, method.apply_mask(module)) + # associate the pruning method to the module via a hook to + # compute the function before every forward() (compile by run) + module.register_forward_pre_hook(method) + + except Exception as e: + if not isinstance(method, PruningContainer): + orig = getattr(module, name + "_orig") + module.register_parameter(name, orig) + del module._parameters[name + "_orig"] + raise e + + return method + + def prune(self, t, default_mask=None, importance_scores=None): + r"""Compute and returns a pruned version of input tensor ``t``. + + According to the pruning rule specified in :meth:`compute_mask`. + + Args: + t (torch.Tensor): tensor to prune (of same dimensions as + ``default_mask``). + importance_scores (torch.Tensor): tensor of importance scores (of + same shape as ``t``) used to compute mask for pruning ``t``. + The values in this tensor indicate the importance of the + corresponding elements in the ``t`` that is being pruned. + If unspecified or None, the tensor ``t`` will be used in its place. + default_mask (torch.Tensor, optional): mask from previous pruning + iteration, if any. To be considered when determining what + portion of the tensor that pruning should act on. If None, + default to a mask of ones. + + Returns: + pruned version of tensor ``t``. + """ + if importance_scores is not None: + assert ( + importance_scores.shape == t.shape + ), "importance_scores should have the same shape as tensor t" + else: + importance_scores = t + default_mask = default_mask if default_mask is not None else torch.ones_like(t) + return t * self.compute_mask(importance_scores, default_mask=default_mask) + + def remove(self, module): + r"""Remove the pruning reparameterization from a module. + + The pruned parameter named ``name`` remains permanently pruned, + and the parameter named ``name+'_orig'`` is removed from the parameter list. + Similarly, the buffer named ``name+'_mask'`` is removed from the buffers. + + Note: + Pruning itself is NOT undone or reversed! + """ + # before removing pruning from a tensor, it has to have been applied + assert ( + self._tensor_name is not None + ), f"Module {module} has to be pruned before pruning can be removed" # this gets set in apply() + + # to update module[name] to latest trained weights + weight = self.apply_mask(module) # masked weights + + # delete and reset + if hasattr(module, self._tensor_name): + delattr(module, self._tensor_name) + orig = module._parameters[self._tensor_name + "_orig"] + orig.data = weight.data + del module._parameters[self._tensor_name + "_orig"] + del module._buffers[self._tensor_name + "_mask"] + setattr(module, self._tensor_name, orig) + + +class PruningContainer(BasePruningMethod): + """Container holding a sequence of pruning methods for iterative pruning. + + Keeps track of the order in which pruning methods are applied and handles + combining successive pruning calls. + + Accepts as argument an instance of a BasePruningMethod or an iterable of + them. + """ + + def __init__(self, *args): + self._pruning_methods: Tuple[BasePruningMethod, ...] = tuple() + if not isinstance(args, Iterable): # only 1 item + self._tensor_name = args._tensor_name + self.add_pruning_method(args) + elif len(args) == 1: # only 1 item in a tuple + self._tensor_name = args[0]._tensor_name + self.add_pruning_method(args[0]) + else: # manual construction from list or other iterable (or no args) + for method in args: + self.add_pruning_method(method) + + def add_pruning_method(self, method): + r"""Add a child pruning ``method`` to the container. + + Args: + method (subclass of BasePruningMethod): child pruning method + to be added to the container. + """ + # check that we're adding a pruning method to the container + if not isinstance(method, BasePruningMethod) and method is not None: + raise TypeError( + f"{type(method)} is not a BasePruningMethod subclass" + ) + elif method is not None and self._tensor_name != method._tensor_name: + raise ValueError( + "Can only add pruning methods acting on " + f"the parameter named '{self._tensor_name}' to PruningContainer {self}." + + f" Found '{method._tensor_name}'" + ) + # if all checks passed, add to _pruning_methods tuple + self._pruning_methods += (method,) # type: ignore[operator] + + def __len__(self): + return len(self._pruning_methods) + + def __iter__(self): + return iter(self._pruning_methods) + + def __getitem__(self, idx): + return self._pruning_methods[idx] + + def compute_mask(self, t, default_mask): + r"""Apply the latest ``method`` by computing the new partial masks and returning its combination with the ``default_mask``. + + The new partial mask should be computed on the entries or channels + that were not zeroed out by the ``default_mask``. + Which portions of the tensor ``t`` the new mask will be calculated from + depends on the ``PRUNING_TYPE`` (handled by the type handler): + + * for 'unstructured', the mask will be computed from the raveled + list of nonmasked entries; + + * for 'structured', the mask will be computed from the nonmasked + channels in the tensor; + + * for 'global', the mask will be computed across all entries. + + Args: + t (torch.Tensor): tensor representing the parameter to prune + (of same dimensions as ``default_mask``). + default_mask (torch.Tensor): mask from previous pruning iteration. + + Returns: + mask (torch.Tensor): new mask that combines the effects + of the ``default_mask`` and the new mask from the current + pruning ``method`` (of same dimensions as ``default_mask`` and + ``t``). + """ + + def _combine_masks(method, t, mask): + r"""Combine the masks from all pruning methods and returns a new mask. + + Args: + method (a BasePruningMethod subclass): pruning method + currently being applied. + t (torch.Tensor): tensor representing the parameter to prune + (of same dimensions as mask). + mask (torch.Tensor): mask from previous pruning iteration + + Returns: + new_mask (torch.Tensor): new mask that combines the effects + of the old mask and the new mask from the current + pruning method (of same dimensions as mask and t). + """ + new_mask = mask # start off from existing mask + new_mask = new_mask.to(dtype=t.dtype) + + # compute a slice of t onto which the new pruning method will operate + if method.PRUNING_TYPE == "unstructured": + # prune entries of t where the mask is 1 + slc = mask == 1 + + # for struct pruning, exclude channels that have already been + # entirely pruned + elif method.PRUNING_TYPE == "structured": + if not hasattr(method, "dim"): + raise AttributeError( + "Pruning methods of PRUNING_TYPE " + '"structured" need to have the attribute `dim` defined.' + ) + + # find the channels to keep by removing the ones that have been + # zeroed out already (i.e. where sum(entries) == 0) + n_dims = t.dim() # "is this a 2D tensor? 3D? ..." + dim = method.dim + # convert negative indexing + if dim < 0: + dim = n_dims + dim + # if dim is still negative after subtracting it from n_dims + if dim < 0: + raise IndexError( + f"Index is out of bounds for tensor with dimensions {n_dims}" + ) + # find channels along dim = dim that aren't already tots 0ed out + keep_channel = mask.sum(dim=[d for d in range(n_dims) if d != dim]) != 0 + # create slice to identify what to prune + slc = [slice(None)] * n_dims + slc[dim] = keep_channel + + elif method.PRUNING_TYPE == "global": + n_dims = len(t.shape) # "is this a 2D tensor? 3D? ..." + slc = [slice(None)] * n_dims + + else: + raise ValueError( + f"Unrecognized PRUNING_TYPE {method.PRUNING_TYPE}" + ) + + # compute the new mask on the unpruned slice of the tensor t + partial_mask = method.compute_mask(t[slc], default_mask=mask[slc]) + new_mask[slc] = partial_mask.to(dtype=new_mask.dtype) + + return new_mask + + method = self._pruning_methods[-1] + mask = _combine_masks(method, t, default_mask) + return mask + + +class Identity(BasePruningMethod): + r"""Utility pruning method that does not prune any units but generates the pruning parametrization with a mask of ones.""" + + PRUNING_TYPE = "unstructured" + + def compute_mask(self, t, default_mask): + mask = default_mask + return mask + + @classmethod + def apply(cls, module, name): + r"""Add pruning on the fly and reparametrization of a tensor. + + Adds the forward pre-hook that enables pruning on the fly and + the reparametrization of a tensor in terms of the original tensor + and the pruning mask. + + Args: + module (nn.Module): module containing the tensor to prune + name (str): parameter name within ``module`` on which pruning + will act. + """ + return super().apply(module, name) + + +class RandomUnstructured(BasePruningMethod): + r"""Prune (currently unpruned) units in a tensor at random. + + Args: + name (str): parameter name within ``module`` on which pruning + will act. + amount (int or float): quantity of parameters to prune. + If ``float``, should be between 0.0 and 1.0 and represent the + fraction of parameters to prune. If ``int``, it represents the + absolute number of parameters to prune. + """ + + PRUNING_TYPE = "unstructured" + + def __init__(self, amount): + # Check range of validity of pruning amount + _validate_pruning_amount_init(amount) + self.amount = amount + + def compute_mask(self, t, default_mask): + # Check that the amount of units to prune is not > than the number of + # parameters in t + tensor_size = t.nelement() + # Compute number of units to prune: amount if int, + # else amount * tensor_size + nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size) + # This should raise an error if the number of units to prune is larger + # than the number of units in the tensor + _validate_pruning_amount(nparams_toprune, tensor_size) + + mask = default_mask.clone(memory_format=torch.contiguous_format) + + if nparams_toprune != 0: # k=0 not supported by torch.kthvalue + prob = torch.rand_like(t) + topk = torch.topk(prob.view(-1), k=nparams_toprune) + mask.view(-1)[topk.indices] = 0 + + return mask + + @classmethod + def apply(cls, module, name, amount): + r"""Add pruning on the fly and reparametrization of a tensor. + + Adds the forward pre-hook that enables pruning on the fly and + the reparametrization of a tensor in terms of the original tensor + and the pruning mask. + + Args: + module (nn.Module): module containing the tensor to prune + name (str): parameter name within ``module`` on which pruning + will act. + amount (int or float): quantity of parameters to prune. + If ``float``, should be between 0.0 and 1.0 and represent the + fraction of parameters to prune. If ``int``, it represents the + absolute number of parameters to prune. + """ + return super().apply(module, name, amount=amount) + + +class L1Unstructured(BasePruningMethod): + r"""Prune (currently unpruned) units in a tensor by zeroing out the ones with the lowest L1-norm. + + Args: + amount (int or float): quantity of parameters to prune. + If ``float``, should be between 0.0 and 1.0 and represent the + fraction of parameters to prune. If ``int``, it represents the + absolute number of parameters to prune. + """ + + PRUNING_TYPE = "unstructured" + + def __init__(self, amount): + # Check range of validity of pruning amount + _validate_pruning_amount_init(amount) + self.amount = amount + + def compute_mask(self, t, default_mask): + # Check that the amount of units to prune is not > than the number of + # parameters in t + tensor_size = t.nelement() + # Compute number of units to prune: amount if int, + # else amount * tensor_size + nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size) + # This should raise an error if the number of units to prune is larger + # than the number of units in the tensor + _validate_pruning_amount(nparams_toprune, tensor_size) + + mask = default_mask.clone(memory_format=torch.contiguous_format) + + if nparams_toprune != 0: # k=0 not supported by torch.kthvalue + # largest=True --> top k; largest=False --> bottom k + # Prune the smallest k + topk = torch.topk(torch.abs(t).view(-1), k=nparams_toprune, largest=False) + # topk will have .indices and .values + mask.view(-1)[topk.indices] = 0 + + return mask + + @classmethod + def apply(cls, module, name, amount, importance_scores=None): + r"""Add pruning on the fly and reparametrization of a tensor. + + Adds the forward pre-hook that enables pruning on the fly and + the reparametrization of a tensor in terms of the original tensor + and the pruning mask. + + Args: + module (nn.Module): module containing the tensor to prune + name (str): parameter name within ``module`` on which pruning + will act. + amount (int or float): quantity of parameters to prune. + If ``float``, should be between 0.0 and 1.0 and represent the + fraction of parameters to prune. If ``int``, it represents the + absolute number of parameters to prune. + importance_scores (torch.Tensor): tensor of importance scores (of same + shape as module parameter) used to compute mask for pruning. + The values in this tensor indicate the importance of the corresponding + elements in the parameter being pruned. + If unspecified or None, the module parameter will be used in its place. + """ + return super().apply( + module, name, amount=amount, importance_scores=importance_scores + ) + + +class RandomStructured(BasePruningMethod): + r"""Prune entire (currently unpruned) channels in a tensor at random. + + Args: + amount (int or float): quantity of parameters to prune. + If ``float``, should be between 0.0 and 1.0 and represent the + fraction of parameters to prune. If ``int``, it represents the + absolute number of parameters to prune. + dim (int, optional): index of the dim along which we define + channels to prune. Default: -1. + """ + + PRUNING_TYPE = "structured" + + def __init__(self, amount, dim=-1): + # Check range of validity of amount + _validate_pruning_amount_init(amount) + self.amount = amount + self.dim = dim + + def compute_mask(self, t, default_mask): + r"""Compute and returns a mask for the input tensor ``t``. + + Starting from a base ``default_mask`` (which should be a mask of ones + if the tensor has not been pruned yet), generate a random mask to + apply on top of the ``default_mask`` by randomly zeroing out channels + along the specified dim of the tensor. + + Args: + t (torch.Tensor): tensor representing the parameter to prune + default_mask (torch.Tensor): Base mask from previous pruning + iterations, that need to be respected after the new mask is + applied. Same dims as ``t``. + + Returns: + mask (torch.Tensor): mask to apply to ``t``, of same dims as ``t`` + + Raises: + IndexError: if ``self.dim >= len(t.shape)`` + """ + # Check that tensor has structure (i.e. more than 1 dimension) such + # that the concept of "channels" makes sense + _validate_structured_pruning(t) + + # Check that self.dim is a valid dim to index t, else raise IndexError + _validate_pruning_dim(t, self.dim) + + # Check that the amount of channels to prune is not > than the number of + # channels in t along the dim to prune + tensor_size = t.shape[self.dim] + # Compute number of units to prune: amount if int, + # else amount * tensor_size + nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size) + # This should raise an error if the number of units to prune is larger + # than the number of units in the tensor + _validate_pruning_amount(nparams_toprune, tensor_size) + + # Compute binary mask by initializing it to all 0s and then filling in + # 1s wherever topk.indices indicates, along self.dim. + # mask has the same shape as tensor t + def make_mask(t, dim, nchannels, nchannels_toprune): + # generate a random number in [0, 1] to associate to each channel + prob = torch.rand(nchannels) + # generate mask for each channel by 0ing out the channels that + # got assigned the k = nchannels_toprune lowest values in prob + threshold = torch.kthvalue(prob, k=nchannels_toprune).values + channel_mask = prob > threshold + + mask = torch.zeros_like(t) + slc = [slice(None)] * len(t.shape) + slc[dim] = channel_mask + mask[slc] = 1 + return mask + + if nparams_toprune == 0: # k=0 not supported by torch.kthvalue + mask = default_mask + else: + # apply the new structured mask on top of prior (potentially + # unstructured) mask + mask = make_mask(t, self.dim, tensor_size, nparams_toprune) + mask *= default_mask.to(dtype=mask.dtype) + return mask + + @classmethod + def apply(cls, module, name, amount, dim=-1): + r"""Add pruning on the fly and reparametrization of a tensor. + + Adds the forward pre-hook that enables pruning on the fly and + the reparametrization of a tensor in terms of the original tensor + and the pruning mask. + + Args: + module (nn.Module): module containing the tensor to prune + name (str): parameter name within ``module`` on which pruning + will act. + amount (int or float): quantity of parameters to prune. + If ``float``, should be between 0.0 and 1.0 and represent the + fraction of parameters to prune. If ``int``, it represents the + absolute number of parameters to prune. + dim (int, optional): index of the dim along which we define + channels to prune. Default: -1. + """ + return super().apply(module, name, amount=amount, dim=dim) + + +class LnStructured(BasePruningMethod): + r"""Prune entire (currently unpruned) channels in a tensor based on their L\ ``n``-norm. + + Args: + amount (int or float): quantity of channels to prune. + If ``float``, should be between 0.0 and 1.0 and represent the + fraction of parameters to prune. If ``int``, it represents the + absolute number of parameters to prune. + n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid + entries for argument ``p`` in :func:`torch.norm`. + dim (int, optional): index of the dim along which we define + channels to prune. Default: -1. + """ + + PRUNING_TYPE = "structured" + + def __init__(self, amount, n, dim=-1): + # Check range of validity of amount + _validate_pruning_amount_init(amount) + self.amount = amount + self.n = n + self.dim = dim + + def compute_mask(self, t, default_mask): + r"""Compute and returns a mask for the input tensor ``t``. + + Starting from a base ``default_mask`` (which should be a mask of ones + if the tensor has not been pruned yet), generate a mask to apply on + top of the ``default_mask`` by zeroing out the channels along the + specified dim with the lowest L\ ``n``-norm. + + Args: + t (torch.Tensor): tensor representing the parameter to prune + default_mask (torch.Tensor): Base mask from previous pruning + iterations, that need to be respected after the new mask is + applied. Same dims as ``t``. + + Returns: + mask (torch.Tensor): mask to apply to ``t``, of same dims as ``t`` + + Raises: + IndexError: if ``self.dim >= len(t.shape)`` + """ + # Check that tensor has structure (i.e. more than 1 dimension) such + # that the concept of "channels" makes sense + _validate_structured_pruning(t) + # Check that self.dim is a valid dim to index t, else raise IndexError + _validate_pruning_dim(t, self.dim) + + # Check that the amount of channels to prune is not > than the number of + # channels in t along the dim to prune + tensor_size = t.shape[self.dim] + # Compute number of units to prune: amount if int, + # else amount * tensor_size + nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size) + nparams_tokeep = tensor_size - nparams_toprune + # This should raise an error if the number of units to prune is larger + # than the number of units in the tensor + _validate_pruning_amount(nparams_toprune, tensor_size) + + # Structured pruning prunes entire channels so we need to know the + # L_n norm along each channel to then find the topk based on this + # metric + norm = _compute_norm(t, self.n, self.dim) + # largest=True --> top k; largest=False --> bottom k + # Keep the largest k channels along dim=self.dim + topk = torch.topk(norm, k=nparams_tokeep, largest=True) + # topk will have .indices and .values + + # Compute binary mask by initializing it to all 0s and then filling in + # 1s wherever topk.indices indicates, along self.dim. + # mask has the same shape as tensor t + def make_mask(t, dim, indices): + # init mask to 0 + mask = torch.zeros_like(t) + # e.g.: slc = [None, None, None], if len(t.shape) = 3 + slc = [slice(None)] * len(t.shape) + # replace a None at position=dim with indices + # e.g.: slc = [None, None, [0, 2, 3]] if dim=2 & indices=[0,2,3] + slc[dim] = indices + # use slc to slice mask and replace all its entries with 1s + # e.g.: mask[:, :, [0, 2, 3]] = 1 + mask[slc] = 1 + return mask + + if nparams_toprune == 0: # k=0 not supported by torch.kthvalue + mask = default_mask + else: + mask = make_mask(t, self.dim, topk.indices) + mask *= default_mask.to(dtype=mask.dtype) + + return mask + + @classmethod + def apply(cls, module, name, amount, n, dim, importance_scores=None): + r"""Add pruning on the fly and reparametrization of a tensor. + + Adds the forward pre-hook that enables pruning on the fly and + the reparametrization of a tensor in terms of the original tensor + and the pruning mask. + + Args: + module (nn.Module): module containing the tensor to prune + name (str): parameter name within ``module`` on which pruning + will act. + amount (int or float): quantity of parameters to prune. + If ``float``, should be between 0.0 and 1.0 and represent the + fraction of parameters to prune. If ``int``, it represents the + absolute number of parameters to prune. + n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid + entries for argument ``p`` in :func:`torch.norm`. + dim (int): index of the dim along which we define channels to + prune. + importance_scores (torch.Tensor): tensor of importance scores (of same + shape as module parameter) used to compute mask for pruning. + The values in this tensor indicate the importance of the corresponding + elements in the parameter being pruned. + If unspecified or None, the module parameter will be used in its place. + """ + return super().apply( + module, + name, + amount=amount, + n=n, + dim=dim, + importance_scores=importance_scores, + ) + + +class CustomFromMask(BasePruningMethod): + + PRUNING_TYPE = "global" + + def __init__(self, mask): + self.mask = mask + + def compute_mask(self, t, default_mask): + assert default_mask.shape == self.mask.shape + mask = default_mask * self.mask.to(dtype=default_mask.dtype) + return mask + + @classmethod + def apply(cls, module, name, mask): + r"""Add pruning on the fly and reparametrization of a tensor. + + Adds the forward pre-hook that enables pruning on the fly and + the reparametrization of a tensor in terms of the original tensor + and the pruning mask. + + Args: + module (nn.Module): module containing the tensor to prune + name (str): parameter name within ``module`` on which pruning + will act. + """ + return super().apply(module, name, mask=mask) + + +def identity(module, name): + r"""Apply pruning reparametrization without pruning any units. + + Applies pruning reparametrization to the tensor corresponding to the + parameter called ``name`` in ``module`` without actually pruning any + units. Modifies module in place (and also return the modified module) + by: + + 1) adding a named buffer called ``name+'_mask'`` corresponding to the + binary mask applied to the parameter ``name`` by the pruning method. + 2) replacing the parameter ``name`` by its pruned version, while the + original (unpruned) parameter is stored in a new parameter named + ``name+'_orig'``. + + Note: + The mask is a tensor of ones. + + Args: + module (nn.Module): module containing the tensor to prune. + name (str): parameter name within ``module`` on which pruning + will act. + + Returns: + module (nn.Module): modified (i.e. pruned) version of the input module + + Examples: + >>> # xdoctest: +SKIP + >>> m = prune.identity(nn.Linear(2, 3), 'bias') + >>> print(m.bias_mask) + tensor([1., 1., 1.]) + """ + Identity.apply(module, name) + return module + + +def random_unstructured(module, name, amount): + r"""Prune tensor by removing random (currently unpruned) units. + + Prunes tensor corresponding to parameter called ``name`` in ``module`` + by removing the specified ``amount`` of (currently unpruned) units + selected at random. + Modifies module in place (and also return the modified module) by: + + 1) adding a named buffer called ``name+'_mask'`` corresponding to the + binary mask applied to the parameter ``name`` by the pruning method. + 2) replacing the parameter ``name`` by its pruned version, while the + original (unpruned) parameter is stored in a new parameter named + ``name+'_orig'``. + + Args: + module (nn.Module): module containing the tensor to prune + name (str): parameter name within ``module`` on which pruning + will act. + amount (int or float): quantity of parameters to prune. + If ``float``, should be between 0.0 and 1.0 and represent the + fraction of parameters to prune. If ``int``, it represents the + absolute number of parameters to prune. + + Returns: + module (nn.Module): modified (i.e. pruned) version of the input module + + Examples: + >>> # xdoctest: +SKIP + >>> m = prune.random_unstructured(nn.Linear(2, 3), 'weight', amount=1) + >>> torch.sum(m.weight_mask == 0) + tensor(1) + + """ + RandomUnstructured.apply(module, name, amount) + return module + + +def l1_unstructured(module, name, amount, importance_scores=None): + r"""Prune tensor by removing units with the lowest L1-norm. + + Prunes tensor corresponding to parameter called ``name`` in ``module`` + by removing the specified `amount` of (currently unpruned) units with the + lowest L1-norm. + Modifies module in place (and also return the modified module) + by: + + 1) adding a named buffer called ``name+'_mask'`` corresponding to the + binary mask applied to the parameter ``name`` by the pruning method. + 2) replacing the parameter ``name`` by its pruned version, while the + original (unpruned) parameter is stored in a new parameter named + ``name+'_orig'``. + + Args: + module (nn.Module): module containing the tensor to prune + name (str): parameter name within ``module`` on which pruning + will act. + amount (int or float): quantity of parameters to prune. + If ``float``, should be between 0.0 and 1.0 and represent the + fraction of parameters to prune. If ``int``, it represents the + absolute number of parameters to prune. + importance_scores (torch.Tensor): tensor of importance scores (of same + shape as module parameter) used to compute mask for pruning. + The values in this tensor indicate the importance of the corresponding + elements in the parameter being pruned. + If unspecified or None, the module parameter will be used in its place. + + Returns: + module (nn.Module): modified (i.e. pruned) version of the input module + + Examples: + >>> # xdoctest: +SKIP + >>> m = prune.l1_unstructured(nn.Linear(2, 3), 'weight', amount=0.2) + >>> m.state_dict().keys() + odict_keys(['bias', 'weight_orig', 'weight_mask']) + """ + L1Unstructured.apply( + module, name, amount=amount, importance_scores=importance_scores + ) + return module + + +def random_structured(module, name, amount, dim): + r"""Prune tensor by removing random channels along the specified dimension. + + Prunes tensor corresponding to parameter called ``name`` in ``module`` + by removing the specified ``amount`` of (currently unpruned) channels + along the specified ``dim`` selected at random. + Modifies module in place (and also return the modified module) + by: + + 1) adding a named buffer called ``name+'_mask'`` corresponding to the + binary mask applied to the parameter ``name`` by the pruning method. + 2) replacing the parameter ``name`` by its pruned version, while the + original (unpruned) parameter is stored in a new parameter named + ``name+'_orig'``. + + Args: + module (nn.Module): module containing the tensor to prune + name (str): parameter name within ``module`` on which pruning + will act. + amount (int or float): quantity of parameters to prune. + If ``float``, should be between 0.0 and 1.0 and represent the + fraction of parameters to prune. If ``int``, it represents the + absolute number of parameters to prune. + dim (int): index of the dim along which we define channels to prune. + + Returns: + module (nn.Module): modified (i.e. pruned) version of the input module + + Examples: + >>> # xdoctest: +SKIP + >>> m = prune.random_structured( + ... nn.Linear(5, 3), 'weight', amount=3, dim=1 + ... ) + >>> columns_pruned = int(sum(torch.sum(m.weight, dim=0) == 0)) + >>> print(columns_pruned) + 3 + """ + RandomStructured.apply(module, name, amount, dim) + return module + + +def ln_structured(module, name, amount, n, dim, importance_scores=None): + r"""Prune tensor by removing channels with the lowest L\ ``n``-norm along the specified dimension. + + Prunes tensor corresponding to parameter called ``name`` in ``module`` + by removing the specified ``amount`` of (currently unpruned) channels + along the specified ``dim`` with the lowest L\ ``n``-norm. + Modifies module in place (and also return the modified module) + by: + + 1) adding a named buffer called ``name+'_mask'`` corresponding to the + binary mask applied to the parameter ``name`` by the pruning method. + 2) replacing the parameter ``name`` by its pruned version, while the + original (unpruned) parameter is stored in a new parameter named + ``name+'_orig'``. + + Args: + module (nn.Module): module containing the tensor to prune + name (str): parameter name within ``module`` on which pruning + will act. + amount (int or float): quantity of parameters to prune. + If ``float``, should be between 0.0 and 1.0 and represent the + fraction of parameters to prune. If ``int``, it represents the + absolute number of parameters to prune. + n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid + entries for argument ``p`` in :func:`torch.norm`. + dim (int): index of the dim along which we define channels to prune. + importance_scores (torch.Tensor): tensor of importance scores (of same + shape as module parameter) used to compute mask for pruning. + The values in this tensor indicate the importance of the corresponding + elements in the parameter being pruned. + If unspecified or None, the module parameter will be used in its place. + + Returns: + module (nn.Module): modified (i.e. pruned) version of the input module + + Examples: + >>> from torch.nn.utils import prune + >>> m = prune.ln_structured( + ... nn.Conv2d(5, 3, 2), 'weight', amount=0.3, dim=1, n=float('-inf') + ... ) + """ + LnStructured.apply( + module, name, amount, n, dim, importance_scores=importance_scores + ) + return module + + +def global_unstructured(parameters, pruning_method, importance_scores=None, **kwargs): + r""" + Globally prunes tensors corresponding to all parameters in ``parameters`` by applying the specified ``pruning_method``. + + Modifies modules in place by: + + 1) adding a named buffer called ``name+'_mask'`` corresponding to the + binary mask applied to the parameter ``name`` by the pruning method. + 2) replacing the parameter ``name`` by its pruned version, while the + original (unpruned) parameter is stored in a new parameter named + ``name+'_orig'``. + + Args: + parameters (Iterable of (module, name) tuples): parameters of + the model to prune in a global fashion, i.e. by aggregating all + weights prior to deciding which ones to prune. module must be of + type :class:`nn.Module`, and name must be a string. + pruning_method (function): a valid pruning function from this module, + or a custom one implemented by the user that satisfies the + implementation guidelines and has ``PRUNING_TYPE='unstructured'``. + importance_scores (dict): a dictionary mapping (module, name) tuples to + the corresponding parameter's importance scores tensor. The tensor + should be the same shape as the parameter, and is used for computing + mask for pruning. + If unspecified or None, the parameter will be used in place of its + importance scores. + kwargs: other keyword arguments such as: + amount (int or float): quantity of parameters to prune across the + specified parameters. + If ``float``, should be between 0.0 and 1.0 and represent the + fraction of parameters to prune. If ``int``, it represents the + absolute number of parameters to prune. + + Raises: + TypeError: if ``PRUNING_TYPE != 'unstructured'`` + + Note: + Since global structured pruning doesn't make much sense unless the + norm is normalized by the size of the parameter, we now limit the + scope of global pruning to unstructured methods. + + Examples: + >>> from torch.nn.utils import prune + >>> from collections import OrderedDict + >>> net = nn.Sequential(OrderedDict([ + ... ('first', nn.Linear(10, 4)), + ... ('second', nn.Linear(4, 1)), + ... ])) + >>> parameters_to_prune = ( + ... (net.first, 'weight'), + ... (net.second, 'weight'), + ... ) + >>> prune.global_unstructured( + ... parameters_to_prune, + ... pruning_method=prune.L1Unstructured, + ... amount=10, + ... ) + >>> print(sum(torch.nn.utils.parameters_to_vector(net.buffers()) == 0)) + tensor(10) + + """ + # ensure parameters is a list or generator of tuples + if not isinstance(parameters, Iterable): + raise TypeError("global_unstructured(): parameters is not an Iterable") + + importance_scores = importance_scores if importance_scores is not None else {} + if not isinstance(importance_scores, dict): + raise TypeError("global_unstructured(): importance_scores must be of type dict") + + # flatten importance scores to consider them all at once in global pruning + relevant_importance_scores = torch.nn.utils.parameters_to_vector( + [ + importance_scores.get((module, name), getattr(module, name)) + for (module, name) in parameters + ] + ) + # similarly, flatten the masks (if they exist), or use a flattened vector + # of 1s of the same dimensions as t + default_mask = torch.nn.utils.parameters_to_vector( + [ + getattr(module, name + "_mask", torch.ones_like(getattr(module, name))) + for (module, name) in parameters + ] + ) + + # use the canonical pruning methods to compute the new mask, even if the + # parameter is now a flattened out version of `parameters` + container = PruningContainer() + container._tensor_name = "temp" # to make it match that of `method` + method = pruning_method(**kwargs) + method._tensor_name = "temp" # to make it match that of `container` + if method.PRUNING_TYPE != "unstructured": + raise TypeError( + 'Only "unstructured" PRUNING_TYPE supported for ' + f"the `pruning_method`. Found method {pruning_method} of type {method.PRUNING_TYPE}" + ) + + container.add_pruning_method(method) + + # use the `compute_mask` method from `PruningContainer` to combine the + # mask computed by the new method with the pre-existing mask + final_mask = container.compute_mask(relevant_importance_scores, default_mask) + + # Pointer for slicing the mask to match the shape of each parameter + pointer = 0 + for module, name in parameters: + + param = getattr(module, name) + # The length of the parameter + num_param = param.numel() + # Slice the mask, reshape it + param_mask = final_mask[pointer : pointer + num_param].view_as(param) + # Assign the correct pre-computed mask to each parameter and add it + # to the forward_pre_hooks like any other pruning method + custom_from_mask(module, name, mask=param_mask) + + # Increment the pointer to continue slicing the final_mask + pointer += num_param + + +def custom_from_mask(module, name, mask): + r"""Prune tensor corresponding to parameter called ``name`` in ``module`` by applying the pre-computed mask in ``mask``. + + Modifies module in place (and also return the modified module) by: + + 1) adding a named buffer called ``name+'_mask'`` corresponding to the + binary mask applied to the parameter ``name`` by the pruning method. + 2) replacing the parameter ``name`` by its pruned version, while the + original (unpruned) parameter is stored in a new parameter named + ``name+'_orig'``. + + Args: + module (nn.Module): module containing the tensor to prune + name (str): parameter name within ``module`` on which pruning + will act. + mask (Tensor): binary mask to be applied to the parameter. + + Returns: + module (nn.Module): modified (i.e. pruned) version of the input module + + Examples: + >>> from torch.nn.utils import prune + >>> m = prune.custom_from_mask( + ... nn.Linear(5, 3), name='bias', mask=torch.tensor([0, 1, 0]) + ... ) + >>> print(m.bias_mask) + tensor([0., 1., 0.]) + + """ + CustomFromMask.apply(module, name, mask) + return module + + +def remove(module, name): + r"""Remove the pruning reparameterization from a module and the pruning method from the forward hook. + + The pruned parameter named ``name`` remains permanently pruned, and the parameter + named ``name+'_orig'`` is removed from the parameter list. Similarly, + the buffer named ``name+'_mask'`` is removed from the buffers. + + Note: + Pruning itself is NOT undone or reversed! + + Args: + module (nn.Module): module containing the tensor to prune + name (str): parameter name within ``module`` on which pruning + will act. + + Examples: + >>> m = random_unstructured(nn.Linear(5, 7), name='weight', amount=0.2) + >>> m = remove(m, name='weight') + """ + for k, hook in module._forward_pre_hooks.items(): + if isinstance(hook, BasePruningMethod) and hook._tensor_name == name: + hook.remove(module) + del module._forward_pre_hooks[k] + return module + + raise ValueError( + f"Parameter '{name}' of module {module} has to be pruned before pruning can be removed" + ) + + +def is_pruned(module): + r"""Check if a module is pruned by looking for pruning pre-hooks. + + Check whether ``module`` is pruned by looking for + ``forward_pre_hooks`` in its modules that inherit from the + :class:`BasePruningMethod`. + + Args: + module (nn.Module): object that is either pruned or unpruned + + Returns: + binary answer to whether ``module`` is pruned. + + Examples: + >>> from torch.nn.utils import prune + >>> m = nn.Linear(5, 7) + >>> print(prune.is_pruned(m)) + False + >>> prune.random_unstructured(m, name='weight', amount=0.2) + >>> print(prune.is_pruned(m)) + True + """ + for _, submodule in module.named_modules(): + for hook in submodule._forward_pre_hooks.values(): + if isinstance(hook, BasePruningMethod): + return True + return False + + +def _validate_pruning_amount_init(amount): + r"""Validate helper to check the range of amount at init. + + Args: + amount (int or float): quantity of parameters to prune. + If float, should be between 0.0 and 1.0 and represent the + fraction of parameters to prune. If int, it represents the + absolute number of parameters to prune. + + Raises: + ValueError: if amount is a float not in [0, 1], or if it's a negative + integer. + TypeError: if amount is neither a float nor an integer. + + Note: + This does not take into account the number of parameters in the + tensor to be pruned, which is known only at prune. + """ + if not isinstance(amount, numbers.Real): + raise TypeError( + f"Invalid type for amount: {amount}. Must be int or float." + ) + + if (isinstance(amount, numbers.Integral) and amount < 0) or ( + not isinstance(amount, numbers.Integral) # so it's a float + and (float(amount) > 1.0 or float(amount) < 0.0) + ): + raise ValueError( + f"amount={amount} should either be a float in the range [0, 1] or a non-negative integer" + ) + + +def _validate_pruning_amount(amount, tensor_size): + r"""Validate that the pruning amount is meaningful wrt to the size of the data. + + Validation helper to check that the amount of parameters to prune + is meaningful wrt to the size of the data (`tensor_size`). + + Args: + amount (int or float): quantity of parameters to prune. + If float, should be between 0.0 and 1.0 and represent the + fraction of parameters to prune. If int, it represents the + absolute number of parameters to prune. + tensor_size (int): absolute number of parameters in the tensor + to prune. + """ + # TODO: consider removing this check and allowing users to specify + # a number of units to prune that is greater than the number of units + # left to prune. In this case, the tensor will just be fully pruned. + + if isinstance(amount, numbers.Integral) and amount > tensor_size: + raise ValueError( + f"amount={amount} should be smaller than the number of parameters to prune={tensor_size}" + ) + + +def _validate_structured_pruning(t): + r"""Validate that the tensor to be pruned is at least 2-Dimensional. + + Validation helper to check that the tensor to be pruned is multi- + dimensional, such that the concept of "channels" is well-defined. + + Args: + t (torch.Tensor): tensor representing the parameter to prune + + Raises: + ValueError: if the tensor `t` is not at least 2D. + """ + shape = t.shape + if len(shape) <= 1: + raise ValueError( + "Structured pruning can only be applied to " + "multidimensional tensors. Found tensor of shape " + f"{shape} with {len(shape)} dims" + ) + + +def _compute_nparams_toprune(amount, tensor_size): + r"""Convert the pruning amount from a percentage to absolute value. + + Since amount can be expressed either in absolute value or as a + percentage of the number of units/channels in a tensor, this utility + function converts the percentage to absolute value to standardize + the handling of pruning. + + Args: + amount (int or float): quantity of parameters to prune. + If float, should be between 0.0 and 1.0 and represent the + fraction of parameters to prune. If int, it represents the + absolute number of parameters to prune. + tensor_size (int): absolute number of parameters in the tensor + to prune. + + Returns: + int: the number of units to prune in the tensor + """ + # incorrect type already checked in _validate_pruning_amount_init + if isinstance(amount, numbers.Integral): + return amount + else: + return round(amount * tensor_size) + + +def _validate_pruning_dim(t, dim): + r"""Validate that the pruning dimension is within the bounds of the tensor dimension. + + Args: + t (torch.Tensor): tensor representing the parameter to prune + dim (int): index of the dim along which we define channels to prune + """ + if dim >= t.dim(): + raise IndexError(f"Invalid index {dim} for tensor of size {t.shape}") + + +def _compute_norm(t, n, dim): + r"""Compute the L_n-norm of a tensor along all dimensions except for the specified dimension. + + The L_n-norm will be computed across all entries in tensor `t` along all dimension + except for the one identified by dim. + Example: if `t` is of shape, say, 3x2x4 and dim=2 (the last dim), + then norm will have Size [4], and each entry will represent the + `L_n`-norm computed using the 3x2=6 entries for each of the 4 channels. + + Args: + t (torch.Tensor): tensor representing the parameter to prune + n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid + entries for argument p in torch.norm + dim (int): dim identifying the channels to prune + + Returns: + norm (torch.Tensor): L_n norm computed across all dimensions except + for `dim`. By construction, `norm.shape = t.shape[-1]`. + """ + # dims = all axes, except for the one identified by `dim` + dims = list(range(t.dim())) + # convert negative indexing + if dim < 0: + dim = dims[dim] + dims.remove(dim) + + norm = torch.norm(t, p=n, dim=dims) + return norm diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/rnn.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..2a3ff1f1de9a90c2570e92cdcdcdd8b56730cad5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/rnn.py @@ -0,0 +1,517 @@ +import warnings +from typing import Iterable, List, NamedTuple, Tuple, Union + +import torch +from torch import Tensor +from ... import _VF +from ..._jit_internal import Optional + + +__all__ = ['PackedSequence', 'invert_permutation', 'pack_padded_sequence', 'pad_packed_sequence', 'pad_sequence', + 'unpad_sequence', 'pack_sequence', 'unpack_sequence'] + + +class PackedSequence_(NamedTuple): + data: torch.Tensor + batch_sizes: torch.Tensor + sorted_indices: Optional[torch.Tensor] + unsorted_indices: Optional[torch.Tensor] + + +def bind(optional, fn): + if optional is None: + return None + return fn(optional) + + +class PackedSequence(PackedSequence_): + r"""Holds the data and list of :attr:`batch_sizes` of a packed sequence. + + All RNN modules accept packed sequences as inputs. + + Note: + Instances of this class should never be created manually. They are meant + to be instantiated by functions like :func:`pack_padded_sequence`. + + Batch sizes represent the number elements at each sequence step in + the batch, not the varying sequence lengths passed to + :func:`pack_padded_sequence`. For instance, given data ``abc`` and ``x`` + the :class:`PackedSequence` would contain data ``axbc`` with + ``batch_sizes=[2,1,1]``. + + Attributes: + data (Tensor): Tensor containing packed sequence + batch_sizes (Tensor): Tensor of integers holding + information about the batch size at each sequence step + sorted_indices (Tensor, optional): Tensor of integers holding how this + :class:`PackedSequence` is constructed from sequences. + unsorted_indices (Tensor, optional): Tensor of integers holding how this + to recover the original sequences with correct order. + + .. note:: + :attr:`data` can be on arbitrary device and of arbitrary dtype. + :attr:`sorted_indices` and :attr:`unsorted_indices` must be ``torch.int64`` + tensors on the same device as :attr:`data`. + + However, :attr:`batch_sizes` should always be a CPU ``torch.int64`` tensor. + + This invariant is maintained throughout :class:`PackedSequence` class, + and all functions that construct a :class:`PackedSequence` in PyTorch + (i.e., they only pass in tensors conforming to this constraint). + + """ + + def __new__(cls, data, batch_sizes=None, sorted_indices=None, unsorted_indices=None): + return super().__new__( + cls, + *_packed_sequence_init_args(data, batch_sizes, sorted_indices, + unsorted_indices)) + + # NOTE [ device and dtype of a PackedSequence ] + # + # See the note above in doc string (starting with ":attr:`data` can be on + # arbitrary device..."). + def pin_memory(self): + # Why not convert `batch_sizes`? + # See NOTE [ device and dtype of a PackedSequence ] + return type(self)(self.data.pin_memory(), self.batch_sizes, + bind(self.sorted_indices, lambda t: t.pin_memory()), + bind(self.unsorted_indices, lambda t: t.pin_memory())) + + def cuda(self, *args, **kwargs): + # Tests to see if 'cuda' should be added to kwargs + ex = torch.tensor((), dtype=self.data.dtype, device=self.data.device).to(*args, **kwargs) + if ex.is_cuda: + return self.to(*args, **kwargs) + return self.to(*args, device='cuda', **kwargs) + + def cpu(self, *args, **kwargs): + + ex = torch.tensor((), dtype=self.data.dtype, device=self.data.device).to(*args, **kwargs) + if ex.device.type == 'cpu': + return self.to(*args, **kwargs) + return self.to(*args, device='cpu', **kwargs) + + def double(self): + return self.to(dtype=torch.double) + + def float(self): + return self.to(dtype=torch.float) + + def half(self): + return self.to(dtype=torch.half) + + def long(self): + return self.to(dtype=torch.long) + + def int(self): + return self.to(dtype=torch.int) + + def short(self): + return self.to(dtype=torch.short) + + def char(self): + return self.to(dtype=torch.int8) + + def byte(self): + return self.to(dtype=torch.uint8) + + def to(self, *args, **kwargs): + r"""Perform dtype and/or device conversion on `self.data`. + + It has similar signature as :meth:`torch.Tensor.to`, except optional + arguments like `non_blocking` and `copy` should be passed as kwargs, + not args, or they will not apply to the index tensors. + + .. note:: + + If the ``self.data`` Tensor already has the correct :class:`torch.dtype` + and :class:`torch.device`, then ``self`` is returned. + Otherwise, returns a copy with the desired configuration. + """ + # Why not convert `batch_sizes`? + # See NOTE [ device and dtype of a PackedSequence ] + data = self.data.to(*args, **kwargs) + if data is self.data: + return self + else: + # Does not forward device or dtype arg/kwargs, device is set from data.device + kwargs = dict(filter(lambda t: t[0] != 'device' and t[0] != 'dtype', kwargs.items())) + sorted_indices = bind(self.sorted_indices, lambda t: t.to(data.device, **kwargs)) + unsorted_indices = bind(self.unsorted_indices, lambda t: t.to(data.device, **kwargs)) + return type(self)(data, self.batch_sizes, sorted_indices, unsorted_indices) + + @property + def is_cuda(self): + r"""Return true if `self.data` stored on a gpu.""" + return self.data.is_cuda + + def is_pinned(self): + r"""Return true if `self.data` stored on in pinned memory.""" + return self.data.is_pinned() + + +# TorchScript doesn't support constructors on named tuples, so we use this helper +# method to construct PackedSequence +def _packed_sequence_init_args( + data: Tensor, + batch_sizes: Optional[Tensor] = None, + sorted_indices: Optional[Tensor] = None, + unsorted_indices: Optional[Tensor] = None, +) -> Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]: + # NB: if unsorted_indices is provided, it should be the inverse permutation + # to sorted_indices. Don't assert it here because the PackedSequence ctor + # should only be used internally. + + if unsorted_indices is None: + unsorted_indices = invert_permutation(sorted_indices) + + # support being called as `PackedSequence(data, batch_sizes, sorted_indices)` + if batch_sizes is not None: + # TODO: Re-enable this check (.type isn't supported in TorchScript) + if batch_sizes.device.type != 'cpu': + raise ValueError( + "batch_sizes should always be on CPU. " + "Instances of PackedSequence should never be created manually. " + "They should be instantiated by functions like pack_sequence " + "and pack_padded_sequences in nn.utils.rnn. " + "https://pytorch.org/docs/stable/nn.html#torch.nn.utils.rnn.pack_sequence") + return data, batch_sizes, sorted_indices, unsorted_indices + + # support being called as `PackedSequence((data, batch_sizes), *, sorted_indices)` + else: + assert isinstance(data, (list, tuple)) and len(data) == 2 + return data[0], data[1], sorted_indices, unsorted_indices + + +def _packed_sequence_init( + data: Tensor, + batch_sizes: Optional[Tensor] = None, + sorted_indices: Optional[Tensor] = None, + unsorted_indices: Optional[Tensor] = None, +) -> PackedSequence: + data, batch_sizes, sorted_indices, unsorted_indices = _packed_sequence_init_args( + data, batch_sizes, sorted_indices, unsorted_indices) + return PackedSequence(data, batch_sizes, sorted_indices, unsorted_indices) + + +def invert_permutation(permutation: Optional[Tensor]) -> Optional[Tensor]: + if permutation is None: + return None + output = torch.empty_like(permutation, memory_format=torch.legacy_contiguous_format) + output.scatter_(0, permutation, + torch.arange(0, permutation.numel(), device=permutation.device)) + return output + + +def pack_padded_sequence( + input: Tensor, + lengths: Tensor, + batch_first: bool = False, + enforce_sorted: bool = True, +) -> PackedSequence: + r"""Packs a Tensor containing padded sequences of variable length. + + :attr:`input` can be of size ``T x B x *`` where `T` is the length of the + longest sequence (equal to ``lengths[0]``), ``B`` is the batch size, and + ``*`` is any number of dimensions (including 0). If ``batch_first`` is + ``True``, ``B x T x *`` :attr:`input` is expected. + + For unsorted sequences, use `enforce_sorted = False`. If :attr:`enforce_sorted` is + ``True``, the sequences should be sorted by length in a decreasing order, i.e. + ``input[:,0]`` should be the longest sequence, and ``input[:,B-1]`` the shortest + one. `enforce_sorted = True` is only necessary for ONNX export. + + Note: + This function accepts any input that has at least two dimensions. You + can apply it to pack the labels, and use the output of the RNN with + them to compute the loss directly. A Tensor can be retrieved from + a :class:`PackedSequence` object by accessing its ``.data`` attribute. + + Args: + input (Tensor): padded batch of variable length sequences. + lengths (Tensor or list(int)): list of sequence lengths of each batch + element (must be on the CPU if provided as a tensor). + batch_first (bool, optional): if ``True``, the input is expected in ``B x T x *`` + format. + enforce_sorted (bool, optional): if ``True``, the input is expected to + contain sequences sorted by length in a decreasing order. If + ``False``, the input will get sorted unconditionally. Default: ``True``. + + Returns: + a :class:`PackedSequence` object + """ + if not isinstance(lengths, torch.Tensor): + if torch._C._get_tracing_state(): + warnings.warn('pack_padded_sequence has been called with a Python list of ' + 'sequence lengths. The tracer cannot track the data flow of Python ' + 'values, and it will treat them as constants, likely rendering ' + 'the trace incorrect for any other combination of lengths.', + stacklevel=2) + lengths = torch.as_tensor(lengths, dtype=torch.int64, device='cpu') + else: + lengths = lengths.to(dtype=torch.int64) + + if enforce_sorted: + sorted_indices = None + else: + lengths, sorted_indices = torch.sort(lengths, descending=True) + sorted_indices = sorted_indices.to(input.device) + batch_dim = 0 if batch_first else 1 + input = input.index_select(batch_dim, sorted_indices) + + data, batch_sizes = \ + _VF._pack_padded_sequence(input, lengths, batch_first) + return _packed_sequence_init(data, batch_sizes, sorted_indices, None) + + +def pad_packed_sequence( + sequence: PackedSequence, + batch_first: bool = False, + padding_value: float = 0.0, + total_length: Optional[int] = None, +) -> Tuple[Tensor, Tensor]: + r"""Pad a packed batch of variable length sequences. + + It is an inverse operation to :func:`pack_padded_sequence`. + + The returned Tensor's data will be of size ``T x B x *``, where `T` is the length + of the longest sequence and `B` is the batch size. If ``batch_first`` is True, + the data will be transposed into ``B x T x *`` format. + + Example: + >>> from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence + >>> seq = torch.tensor([[1, 2, 0], [3, 0, 0], [4, 5, 6]]) + >>> lens = [2, 1, 3] + >>> packed = pack_padded_sequence(seq, lens, batch_first=True, enforce_sorted=False) + >>> packed + PackedSequence(data=tensor([4, 1, 3, 5, 2, 6]), batch_sizes=tensor([3, 2, 1]), + sorted_indices=tensor([2, 0, 1]), unsorted_indices=tensor([1, 2, 0])) + >>> seq_unpacked, lens_unpacked = pad_packed_sequence(packed, batch_first=True) + >>> seq_unpacked + tensor([[1, 2, 0], + [3, 0, 0], + [4, 5, 6]]) + >>> lens_unpacked + tensor([2, 1, 3]) + + .. note:: + :attr:`total_length` is useful to implement the + ``pack sequence -> recurrent network -> unpack sequence`` pattern in a + :class:`~torch.nn.Module` wrapped in :class:`~torch.nn.DataParallel`. + See :ref:`this FAQ section ` for + details. + + Args: + sequence (PackedSequence): batch to pad + batch_first (bool, optional): if ``True``, the output will be in ``B x T x *`` + format. + padding_value (float, optional): values for padded elements. + total_length (int, optional): if not ``None``, the output will be padded to + have length :attr:`total_length`. This method will throw :class:`ValueError` + if :attr:`total_length` is less than the max sequence length in + :attr:`sequence`. + + Returns: + Tuple of Tensor containing the padded sequence, and a Tensor + containing the list of lengths of each sequence in the batch. + Batch elements will be re-ordered as they were ordered originally when + the batch was passed to ``pack_padded_sequence`` or ``pack_sequence``. + + + + + """ + max_seq_length = sequence.batch_sizes.size(0) + if total_length is not None: + if total_length < max_seq_length: + raise ValueError("Expected total_length to be at least the length " + "of the longest sequence in input, but got " + f"total_length={total_length} and max sequence length being {max_seq_length}" + ) + max_seq_length = total_length + padded_output, lengths = _VF._pad_packed_sequence( + sequence.data, sequence.batch_sizes, batch_first, padding_value, max_seq_length) + unsorted_indices = sequence.unsorted_indices + if unsorted_indices is not None: + batch_dim = 0 if batch_first else 1 + return padded_output.index_select(batch_dim, unsorted_indices), lengths[unsorted_indices.cpu()] + return padded_output, lengths + +# NOTE: .pyi stub allows Iterable[Tensor], but for JIT-compatibility we need to be more restrictive here. +def pad_sequence( + sequences: Union[Tensor, List[Tensor]], + batch_first: bool = False, + padding_value: float = 0.0, +) -> Tensor: + r"""Pad a list of variable length Tensors with ``padding_value``. + + ``pad_sequence`` stacks a list of Tensors along a new dimension, + and pads them to equal length. For example, if the input is a list of + sequences with size ``L x *`` and ``batch_first`` is False, the output is + of size ``T x B x *``. + + `B` is batch size. It is equal to the number of elements in ``sequences``. + `T` is length of the longest sequence. + `L` is length of the sequence. + `*` is any number of trailing dimensions, including none. + + Example: + >>> from torch.nn.utils.rnn import pad_sequence + >>> a = torch.ones(25, 300) + >>> b = torch.ones(22, 300) + >>> c = torch.ones(15, 300) + >>> pad_sequence([a, b, c]).size() + torch.Size([25, 3, 300]) + + Note: + This function returns a Tensor of size ``T x B x *`` or ``B x T x *`` + where `T` is the length of the longest sequence. This function assumes + trailing dimensions and type of all the Tensors in sequences are same. + + Args: + sequences (list[Tensor]): list of variable length sequences. + batch_first (bool, optional): output will be in ``B x T x *`` if True, or in + ``T x B x *`` otherwise. Default: False. + padding_value (float, optional): value for padded elements. Default: 0. + + Returns: + Tensor of size ``T x B x *`` if :attr:`batch_first` is ``False``. + Tensor of size ``B x T x *`` otherwise + """ + if not (torch.jit.is_tracing() or torch.jit.is_scripting()): + # JIT doesn't support `Iterable` + if not isinstance(sequences, Iterable): + msg = ('pad_sequence: Expected iterable for input sequences, but got arg of type: ' + f'{type(sequences)}') + raise RuntimeError(msg) + + # In JIT context this leads to, + # RuntimeError: cannot statically infer the expected size of a list in this context + sequences = tuple(sequences) + else: + # For JIT, we only support Union[Tensor, Tuple[Tensor]] + if isinstance(sequences, torch.Tensor): + sequences = sequences.unbind(0) + + # assuming trailing dimensions and type of all the Tensors + # in sequences are same and fetching those from sequences[0] + return torch._C._nn.pad_sequence(sequences, batch_first, padding_value) + + +def unpad_sequence( + padded_sequences: Tensor, + lengths: Tensor, + batch_first: bool = False, +) -> List[Tensor]: + r"""Unpad padded Tensor into a list of variable length Tensors. + + ``unpad_sequence`` unstacks padded Tensor into a list of variable length Tensors. + + Example: + >>> from torch.nn.utils.rnn import pad_sequence, unpad_sequence + >>> a = torch.ones(25, 300) + >>> b = torch.ones(22, 300) + >>> c = torch.ones(15, 300) + >>> sequences = [a, b, c] + >>> padded_sequences = pad_sequence(sequences) + >>> lengths = torch.as_tensor([v.size(0) for v in sequences]) + >>> unpadded_sequences = unpad_sequence(padded_sequences, lengths) + >>> torch.allclose(sequences[0], unpadded_sequences[0]) + True + >>> torch.allclose(sequences[1], unpadded_sequences[1]) + True + >>> torch.allclose(sequences[2], unpadded_sequences[2]) + True + + Args: + padded_sequences (Tensor): padded sequences. + lengths (Tensor): length of original (unpadded) sequences. + batch_first (bool, optional): whether batch dimension first or not. Default: False. + + Returns: + a list of :class:`Tensor` objects + """ + unpadded_sequences = [] + + if not batch_first: + padded_sequences.transpose_(0, 1) + + max_length = padded_sequences.shape[1] + idx = torch.arange(max_length, device=lengths.device) + + for seq, length in zip(padded_sequences, lengths): + mask = idx < length + unpacked_seq = seq[mask] + unpadded_sequences.append(unpacked_seq) + + return unpadded_sequences + + +def pack_sequence(sequences: List[Tensor], enforce_sorted: bool = True) -> PackedSequence: + r"""Packs a list of variable length Tensors. + + Consecutive call of the next functions: ``pad_sequence``, ``pack_padded_sequence``. + + ``sequences`` should be a list of Tensors of size ``L x *``, where `L` is + the length of a sequence and `*` is any number of trailing dimensions, + including zero. + + For unsorted sequences, use `enforce_sorted = False`. If ``enforce_sorted`` + is ``True``, the sequences should be sorted in the order of decreasing length. + ``enforce_sorted = True`` is only necessary for ONNX export. + + + Example: + >>> from torch.nn.utils.rnn import pack_sequence + >>> a = torch.tensor([1, 2, 3]) + >>> b = torch.tensor([4, 5]) + >>> c = torch.tensor([6]) + >>> pack_sequence([a, b, c]) + PackedSequence(data=tensor([1, 4, 6, 2, 5, 3]), batch_sizes=tensor([3, 2, 1]), sorted_indices=None, unsorted_indices=None) + + + Args: + sequences (list[Tensor]): A list of sequences of decreasing length. + enforce_sorted (bool, optional): if ``True``, checks that the input + contains sequences sorted by length in a decreasing order. If + ``False``, this condition is not checked. Default: ``True``. + + Returns: + a :class:`PackedSequence` object + """ + lengths = torch.as_tensor([v.size(0) for v in sequences]) + return pack_padded_sequence(pad_sequence(sequences), lengths, enforce_sorted=enforce_sorted) + + +def unpack_sequence(packed_sequences: PackedSequence) -> List[Tensor]: + r"""Unpack PackedSequence into a list of variable length Tensors. + + ``packed_sequences`` should be a PackedSequence object. + + + Example: + >>> from torch.nn.utils.rnn import pack_sequence, unpack_sequence + >>> a = torch.tensor([1, 2, 3]) + >>> b = torch.tensor([4, 5]) + >>> c = torch.tensor([6]) + >>> sequences = [a, b, c] + >>> print(sequences) + [tensor([1, 2, 3]), tensor([4, 5]), tensor([6])] + >>> packed_sequences = pack_sequence(sequences) + >>> print(packed_sequences) + PackedSequence(data=tensor([1, 4, 6, 2, 5, 3]), batch_sizes=tensor([3, 2, 1]), sorted_indices=None, unsorted_indices=None) + >>> unpacked_sequences = unpack_sequence(packed_sequences) + >>> print(unpacked_sequences) + [tensor([1, 2, 3]), tensor([4, 5]), tensor([6])] + + + Args: + packed_sequences (PackedSequence): A PackedSequence object. + + Returns: + a list of :class:`Tensor` objects + """ + padded_sequences, lengths = pad_packed_sequence(packed_sequences, batch_first=True) + unpacked_sequences = unpad_sequence(padded_sequences, lengths, batch_first=True) + return unpacked_sequences diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/spectral_norm.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/spectral_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..bda54b9a1222203791556b0fc2193bab59f33644 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/spectral_norm.py @@ -0,0 +1,312 @@ +"""Spectral Normalization from https://arxiv.org/abs/1802.05957.""" +import torch +from torch.nn.functional import normalize +from typing import Any, Optional, TypeVar +from ..modules import Module + +__all__ = ['SpectralNorm', 'SpectralNormLoadStateDictPreHook', 'SpectralNormStateDictHook', + 'spectral_norm', 'remove_spectral_norm'] + +class SpectralNorm: + # Invariant before and after each forward call: + # u = normalize(W @ v) + # NB: At initialization, this invariant is not enforced + + _version: int = 1 + # At version 1: + # made `W` not a buffer, + # added `v` as a buffer, and + # made eval mode use `W = u @ W_orig @ v` rather than the stored `W`. + name: str + dim: int + n_power_iterations: int + eps: float + + def __init__(self, name: str = 'weight', n_power_iterations: int = 1, dim: int = 0, eps: float = 1e-12) -> None: + self.name = name + self.dim = dim + if n_power_iterations <= 0: + raise ValueError('Expected n_power_iterations to be positive, but ' + f'got n_power_iterations={n_power_iterations}') + self.n_power_iterations = n_power_iterations + self.eps = eps + + def reshape_weight_to_matrix(self, weight: torch.Tensor) -> torch.Tensor: + weight_mat = weight + if self.dim != 0: + # permute dim to front + weight_mat = weight_mat.permute(self.dim, + *[d for d in range(weight_mat.dim()) if d != self.dim]) + height = weight_mat.size(0) + return weight_mat.reshape(height, -1) + + def compute_weight(self, module: Module, do_power_iteration: bool) -> torch.Tensor: + # NB: If `do_power_iteration` is set, the `u` and `v` vectors are + # updated in power iteration **in-place**. This is very important + # because in `DataParallel` forward, the vectors (being buffers) are + # broadcast from the parallelized module to each module replica, + # which is a new module object created on the fly. And each replica + # runs its own spectral norm power iteration. So simply assigning + # the updated vectors to the module this function runs on will cause + # the update to be lost forever. And the next time the parallelized + # module is replicated, the same randomly initialized vectors are + # broadcast and used! + # + # Therefore, to make the change propagate back, we rely on two + # important behaviors (also enforced via tests): + # 1. `DataParallel` doesn't clone storage if the broadcast tensor + # is already on correct device; and it makes sure that the + # parallelized module is already on `device[0]`. + # 2. If the out tensor in `out=` kwarg has correct shape, it will + # just fill in the values. + # Therefore, since the same power iteration is performed on all + # devices, simply updating the tensors in-place will make sure that + # the module replica on `device[0]` will update the _u vector on the + # parallelized module (by shared storage). + # + # However, after we update `u` and `v` in-place, we need to **clone** + # them before using them to normalize the weight. This is to support + # backproping through two forward passes, e.g., the common pattern in + # GAN training: loss = D(real) - D(fake). Otherwise, engine will + # complain that variables needed to do backward for the first forward + # (i.e., the `u` and `v` vectors) are changed in the second forward. + weight = getattr(module, self.name + '_orig') + u = getattr(module, self.name + '_u') + v = getattr(module, self.name + '_v') + weight_mat = self.reshape_weight_to_matrix(weight) + + if do_power_iteration: + with torch.no_grad(): + for _ in range(self.n_power_iterations): + # Spectral norm of weight equals to `u^T W v`, where `u` and `v` + # are the first left and right singular vectors. + # This power iteration produces approximations of `u` and `v`. + v = normalize(torch.mv(weight_mat.t(), u), dim=0, eps=self.eps, out=v) + u = normalize(torch.mv(weight_mat, v), dim=0, eps=self.eps, out=u) + if self.n_power_iterations > 0: + # See above on why we need to clone + u = u.clone(memory_format=torch.contiguous_format) + v = v.clone(memory_format=torch.contiguous_format) + + sigma = torch.dot(u, torch.mv(weight_mat, v)) + weight = weight / sigma + return weight + + def remove(self, module: Module) -> None: + with torch.no_grad(): + weight = self.compute_weight(module, do_power_iteration=False) + delattr(module, self.name) + delattr(module, self.name + '_u') + delattr(module, self.name + '_v') + delattr(module, self.name + '_orig') + module.register_parameter(self.name, torch.nn.Parameter(weight.detach())) + + def __call__(self, module: Module, inputs: Any) -> None: + setattr(module, self.name, self.compute_weight(module, do_power_iteration=module.training)) + + def _solve_v_and_rescale(self, weight_mat, u, target_sigma): + # Tries to returns a vector `v` s.t. `u = normalize(W @ v)` + # (the invariant at top of this class) and `u @ W @ v = sigma`. + # This uses pinverse in case W^T W is not invertible. + v = torch.linalg.multi_dot([weight_mat.t().mm(weight_mat).pinverse(), weight_mat.t(), u.unsqueeze(1)]).squeeze(1) + return v.mul_(target_sigma / torch.dot(u, torch.mv(weight_mat, v))) + + @staticmethod + def apply(module: Module, name: str, n_power_iterations: int, dim: int, eps: float) -> 'SpectralNorm': + for hook in module._forward_pre_hooks.values(): + if isinstance(hook, SpectralNorm) and hook.name == name: + raise RuntimeError(f"Cannot register two spectral_norm hooks on the same parameter {name}") + + fn = SpectralNorm(name, n_power_iterations, dim, eps) + weight = module._parameters[name] + if weight is None: + raise ValueError(f'`SpectralNorm` cannot be applied as parameter `{name}` is None') + if isinstance(weight, torch.nn.parameter.UninitializedParameter): + raise ValueError( + 'The module passed to `SpectralNorm` can\'t have uninitialized parameters. ' + 'Make sure to run the dummy forward before applying spectral normalization') + + with torch.no_grad(): + weight_mat = fn.reshape_weight_to_matrix(weight) + + h, w = weight_mat.size() + # randomly initialize `u` and `v` + u = normalize(weight.new_empty(h).normal_(0, 1), dim=0, eps=fn.eps) + v = normalize(weight.new_empty(w).normal_(0, 1), dim=0, eps=fn.eps) + + delattr(module, fn.name) + module.register_parameter(fn.name + "_orig", weight) + # We still need to assign weight back as fn.name because all sorts of + # things may assume that it exists, e.g., when initializing weights. + # However, we can't directly assign as it could be an nn.Parameter and + # gets added as a parameter. Instead, we register weight.data as a plain + # attribute. + setattr(module, fn.name, weight.data) + module.register_buffer(fn.name + "_u", u) + module.register_buffer(fn.name + "_v", v) + + module.register_forward_pre_hook(fn) + module._register_state_dict_hook(SpectralNormStateDictHook(fn)) + module._register_load_state_dict_pre_hook(SpectralNormLoadStateDictPreHook(fn)) + return fn + + +# This is a top level class because Py2 pickle doesn't like inner class nor an +# instancemethod. +class SpectralNormLoadStateDictPreHook: + # See docstring of SpectralNorm._version on the changes to spectral_norm. + def __init__(self, fn) -> None: + self.fn = fn + + # For state_dict with version None, (assuming that it has gone through at + # least one training forward), we have + # + # u = normalize(W_orig @ v) + # W = W_orig / sigma, where sigma = u @ W_orig @ v + # + # To compute `v`, we solve `W_orig @ x = u`, and let + # v = x / (u @ W_orig @ x) * (W / W_orig). + def __call__(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs) -> None: + fn = self.fn + version = local_metadata.get('spectral_norm', {}).get(fn.name + '.version', None) + if version is None or version < 1: + weight_key = prefix + fn.name + if version is None and all(weight_key + s in state_dict for s in ('_orig', '_u', '_v')) and \ + weight_key not in state_dict: + # Detect if it is the updated state dict and just missing metadata. + # This could happen if the users are crafting a state dict themselves, + # so we just pretend that this is the newest. + return + has_missing_keys = False + for suffix in ('_orig', '', '_u'): + key = weight_key + suffix + if key not in state_dict: + has_missing_keys = True + if strict: + missing_keys.append(key) + if has_missing_keys: + return + with torch.no_grad(): + weight_orig = state_dict[weight_key + '_orig'] + weight = state_dict.pop(weight_key) + sigma = (weight_orig / weight).mean() + weight_mat = fn.reshape_weight_to_matrix(weight_orig) + u = state_dict[weight_key + '_u'] + v = fn._solve_v_and_rescale(weight_mat, u, sigma) + state_dict[weight_key + '_v'] = v + + +# This is a top level class because Py2 pickle doesn't like inner class nor an +# instancemethod. +class SpectralNormStateDictHook: + # See docstring of SpectralNorm._version on the changes to spectral_norm. + def __init__(self, fn) -> None: + self.fn = fn + + def __call__(self, module, state_dict, prefix, local_metadata) -> None: + if 'spectral_norm' not in local_metadata: + local_metadata['spectral_norm'] = {} + key = self.fn.name + '.version' + if key in local_metadata['spectral_norm']: + raise RuntimeError(f"Unexpected key in metadata['spectral_norm']: {key}") + local_metadata['spectral_norm'][key] = self.fn._version + + +T_module = TypeVar('T_module', bound=Module) + +def spectral_norm(module: T_module, + name: str = 'weight', + n_power_iterations: int = 1, + eps: float = 1e-12, + dim: Optional[int] = None) -> T_module: + r"""Apply spectral normalization to a parameter in the given module. + + .. math:: + \mathbf{W}_{SN} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})}, + \sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2} + + Spectral normalization stabilizes the training of discriminators (critics) + in Generative Adversarial Networks (GANs) by rescaling the weight tensor + with spectral norm :math:`\sigma` of the weight matrix calculated using + power iteration method. If the dimension of the weight tensor is greater + than 2, it is reshaped to 2D in power iteration method to get spectral + norm. This is implemented via a hook that calculates spectral norm and + rescales weight before every :meth:`~Module.forward` call. + + See `Spectral Normalization for Generative Adversarial Networks`_ . + + .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957 + + Args: + module (nn.Module): containing module + name (str, optional): name of weight parameter + n_power_iterations (int, optional): number of power iterations to + calculate spectral norm + eps (float, optional): epsilon for numerical stability in + calculating norms + dim (int, optional): dimension corresponding to number of outputs, + the default is ``0``, except for modules that are instances of + ConvTranspose{1,2,3}d, when it is ``1`` + + Returns: + The original module with the spectral norm hook + + .. note:: + This function has been reimplemented as + :func:`torch.nn.utils.parametrizations.spectral_norm` using the new + parametrization functionality in + :func:`torch.nn.utils.parametrize.register_parametrization`. Please use + the newer version. This function will be deprecated in a future version + of PyTorch. + + Example:: + + >>> m = spectral_norm(nn.Linear(20, 40)) + >>> m + Linear(in_features=20, out_features=40, bias=True) + >>> m.weight_u.size() + torch.Size([40]) + + """ + if dim is None: + if isinstance(module, (torch.nn.ConvTranspose1d, + torch.nn.ConvTranspose2d, + torch.nn.ConvTranspose3d)): + dim = 1 + else: + dim = 0 + SpectralNorm.apply(module, name, n_power_iterations, dim, eps) + return module + + +def remove_spectral_norm(module: T_module, name: str = 'weight') -> T_module: + r"""Remove the spectral normalization reparameterization from a module. + + Args: + module (Module): containing module + name (str, optional): name of weight parameter + + Example: + >>> m = spectral_norm(nn.Linear(40, 10)) + >>> remove_spectral_norm(m) + """ + for k, hook in module._forward_pre_hooks.items(): + if isinstance(hook, SpectralNorm) and hook.name == name: + hook.remove(module) + del module._forward_pre_hooks[k] + break + else: + raise ValueError(f"spectral_norm of '{name}' not found in {module}") + + for k, hook in module._state_dict_hooks.items(): + if isinstance(hook, SpectralNormStateDictHook) and hook.fn.name == name: + del module._state_dict_hooks[k] + break + + for k, hook in module._load_state_dict_pre_hooks.items(): + if isinstance(hook, SpectralNormLoadStateDictPreHook) and hook.fn.name == name: + del module._load_state_dict_pre_hooks[k] + break + + return module diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/stateless.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/stateless.py new file mode 100644 index 0000000000000000000000000000000000000000..ae7ebcdf3df7f00cc9bde5b108b81c65eb0f884b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/stateless.py @@ -0,0 +1,263 @@ +import contextlib +import warnings +from collections import defaultdict +from typing import Any, Dict, Iterator, Optional, Set, Tuple, Union + +import torch +from torch import Tensor +from torch.nn.utils._named_member_accessor import NamedMemberAccessor + +__all__ = ["functional_call"] + + +def _untie_named_tensors_map( + module: "torch.nn.Module", + parameters_and_buffers: Dict[str, Tensor], +) -> Dict[str, Tensor]: + """ + Unties all tied tensors in the module to parameters_and_buffers. + + This function returns a new untied_parameters_and_buffers dictionary and leave the original + untied_parameters_and_buffers dictionary unchanged. It adds new (missing) keys for tied tensors + in the module to untied_parameters_and_buffers. The value of the new key is the user-given value + in the original parameters_and_buffers dictionary. + + If there are more than one user-given values for the same tied tensor, it will raise an error. + + For example, if the module has two tied weights self.foo and self.tied_foo and the user passes + {'foo': foo_value, ...}, this will return {'foo': foo_value, 'tied_foo': foo_value, ...}. If the + user passes {'foo': foo_value, 'tied_foo': tied_foo_value, ...}, it will raise an error. If the + user passes {'foo': foo_value, 'tied_foo': foo_value, ...}, it will not raise an error. + + Args: + module (torch.nn.Module): the module to determine which tensors are tied. + parameters_and_buffers (Dict[str, Tensor]): a map of {name: tensor} for reparamaterizing the module. + + Returns: + A new untied version of the parameters_and_buffers dictionary. + + Raises: + ValueError: if there are more than one user-given values for the same tied tensor. + """ + # A map of {name: tensor} for all tensors (including tied ones) in the module. + all_named_tensors: Dict[str, Tensor] = {} + all_named_tensors.update(module.named_parameters(remove_duplicate=False)) + all_named_tensors.update(module.named_buffers(remove_duplicate=False)) + + # A map of {tensor: set(all_tied_names)} for all tensor names in the module. + tensor_to_tied_names_map: Dict[Tensor, Set[str]] = defaultdict(set) + for name, tensor in all_named_tensors.items(): + tensor_to_tied_names_map[tensor].add(name) + + # A map of {tied_name: set(all_tied_names)} for all tensor names in the module. + # If a name is not tied, it will not be in this map. + tied_names_map: Dict[str, Set[str]] = {} + for tied_names in tensor_to_tied_names_map.values(): + if len(tied_names) > 1: + for tied_name in tied_names: + tied_names_map[tied_name] = tied_names + + # Make sure the user didn't pass multiple values for the same tied tensor. + given_names = set(parameters_and_buffers.keys()) + given_names_for_tied_tensors = given_names.intersection(tied_names_map.keys()) + for given_name in given_names_for_tied_tensors: + tied_names = tied_names_map[given_name] + if ( + # Detect if there are multiple keys present for the same tied tensor. + len(tied_names.intersection(given_names_for_tied_tensors)) > 1 + # Only raise an error if the user passed multiple values for the same tied tensor. + # If all given values are the same, don't raise. + and len({parameters_and_buffers[tied_name] for tied_name in tied_names}) + != 1 + ): + raise ValueError( + f"functional_call got multiple values for keys {sorted(tied_names)}, " + f"which are tied. Consider using tie_weights=False" + ) + + # Untie the given named tensor map + # Make a copy for not modifying the original dict + untied_parameters_and_buffers = parameters_and_buffers.copy() + for given_name in given_names_for_tied_tensors: + for tied_name in tied_names_map[given_name]: + untied_parameters_and_buffers[tied_name] = parameters_and_buffers[ + given_name + ] + return untied_parameters_and_buffers + + +@contextlib.contextmanager +def _reparametrize_module( + module: "torch.nn.Module", + parameters_and_buffers: Dict[str, Tensor], + *, + tie_weights: bool = False, + strict: bool = False, +) -> Iterator[None]: + if tie_weights: + untied_parameters_and_buffers = _untie_named_tensors_map( + module, parameters_and_buffers + ) + else: + untied_parameters_and_buffers = parameters_and_buffers + + accessor = NamedMemberAccessor(module) + if strict: + missing_keys, unexpected_keys = accessor.check_keys( + untied_parameters_and_buffers + ) + error_msgs = [] + if len(unexpected_keys) > 0: + error_msgs.append( + f"Unexpected key(s): {', '.join(map(repr, unexpected_keys))}." + ) + if len(missing_keys) > 0: + error_msgs.append(f"Missing key(s): {', '.join(map(repr, missing_keys))}.") + if len(error_msgs) > 0: + raise RuntimeError( + "Error(s) in reparametrizing for {}:\n\t{}".format( + module._get_name(), "\n\t".join(error_msgs) + ) + ) + + orig_parameters_and_buffers: Dict[str, Tensor] = {} + try: + orig_parameters_and_buffers, _ = accessor.swap_tensors_dict( + untied_parameters_and_buffers, allow_missing=True + ) + yield + finally: + new_parameters_and_buffers, _ = accessor.swap_tensors_dict( + orig_parameters_and_buffers, allow_missing=True + ) + # Sometimes the module is not completely stateless and has some in-place modifications on + # the _parameters and _buffers dictionaries. + # Write the changed parameters and buffers back to the original dict. + parameters_and_buffers.update( + { + k: new_parameters_and_buffers[k] + for k in parameters_and_buffers + if k in new_parameters_and_buffers + } + ) + + +def functional_call( + module: "torch.nn.Module", + parameters_and_buffers: Dict[str, Tensor], + args: Union[Any, Tuple], + kwargs: Optional[Dict[str, Any]] = None, + *, + tie_weights: bool = True, + strict: bool = False, +): + r"""Perform a functional call on the module by replacing the module parameters and buffers with the provided ones. + + .. warning:: + + This API is deprecated as of PyTorch 2.0 and will be removed in a future + version of PyTorch. Please use :func:`torch.func.functional_call` instead, + which is a drop-in replacement for this API. + + .. note:: If the module has active parametrizations, passing a value in the + :attr:`parameters_and_buffers` argument with the name set to the regular parameter + name will completely disable the parametrization. + If you want to apply the parametrization function to the value passed + please set the key as ``{submodule_name}.parametrizations.{parameter_name}.original``. + + .. note:: If the module performs in-place operations on parameters/buffers, these will be reflected + in the `parameters_and_buffers` input. + + Example:: + + >>> a = {'foo': torch.zeros(())} + >>> # xdoctest: +SKIP + >>> mod = Foo() # does self.foo = self.foo + 1 + >>> print(mod.foo) # tensor(0.) + >>> functional_call(mod, a, torch.ones(())) + >>> print(mod.foo) # tensor(0.) + >>> print(a['foo']) # tensor(1.) + + .. note:: If the module has tied weights, whether or not functional_call respects the tying is determined by the + tie_weights flag. + + Example:: + + >>> a = {'foo': torch.zeros(())} + >>> # xdoctest: +SKIP + >>> mod = Foo() # has both self.foo and self.foo_tied which are tied. Returns x + self.foo + self.foo_tied + >>> print(mod.foo) # tensor(1.) + >>> mod(torch.zeros(())) # tensor(2.) + >>> functional_call(mod, a, torch.zeros(())) # tensor(0.) since it will change self.foo_tied too + >>> functional_call(mod, a, torch.zeros(()), tie_weights=False) # tensor(1.)--self.foo_tied is not updated + >>> new_a = {'foo': torch.zeros(()), 'foo_tied': torch.zeros(())} + >>> functional_call(mod, new_a, torch.zeros()) # tensor(0.) + + Args: + module (torch.nn.Module): the module to call + parameters_and_buffers (dict of str and Tensor): the parameters that will be used in + the module call. + args (Any or tuple): arguments to be passed to the module call. If not a tuple, considered a single argument. + kwargs (dict): keyword arguments to be passed to the module call + tie_weights (bool, optional): If True, then parameters and buffers tied in the original model will be treated as + tied in the reparamaterized version. Therefore, if True and different values are passed for the tied + parameters and buffers, it will error. If False, it will not respect the originally tied parameters and + buffers unless the values passed for both weights are the same. Default: True. + strict (bool, optional): If True, then the parameters and buffers passed in must match the parameters and + buffers in the original module. Therefore, if True and there are any missing or unexpected keys, it will + error. Default: False. + + Returns: + Any: the result of calling ``module``. + """ + warnings.warn( + "This API is deprecated as of PyTorch 2.0 and will be removed in a future " + "version of PyTorch. Please use torch.func.functional_call instead " + "which is a drop-in replacement for this API." + ) + + return _functional_call( + module, + parameters_and_buffers, + args, + kwargs, + tie_weights=tie_weights, + strict=strict, + ) + + +def _functional_call( + module: "torch.nn.Module", + parameters_and_buffers: Dict[str, Tensor], + args: Union[Any, Tuple], + kwargs: Optional[Dict[str, Any]] = None, + *, + tie_weights: bool = True, + strict: bool = False, +): + # TODO allow kwargs such as unsafe and others for parametrization + if ( + torch.jit.is_tracing() + or torch.jit.is_scripting() + or isinstance( + module, + ( + torch.jit.RecursiveScriptModule, + torch.jit.ScriptModule, + torch.jit.ScriptFunction, + ), + ) + ): + raise RuntimeError("The stateless API can't be used with Jitted modules") + if isinstance(module, torch.nn.DataParallel): + raise RuntimeError( + "The stateless API can't be used with nn.DataParallel module" + ) + if kwargs is None: + kwargs = {} + if not isinstance(args, tuple): + args = (args,) + with _reparametrize_module( + module, parameters_and_buffers, tie_weights=tie_weights, strict=strict + ): + return module(*args, **kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/weight_norm.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/weight_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..942a13a4eb83c4bac35f69f61bddf6ea6ca4645c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/utils/weight_norm.py @@ -0,0 +1,151 @@ +r"""Weight Normalization from https://arxiv.org/abs/1602.07868.""" +from torch.nn.parameter import Parameter, UninitializedParameter +from torch import _weight_norm, norm_except_dim +from typing import Any, TypeVar +import warnings +from ..modules import Module + +__all__ = ['WeightNorm', 'weight_norm', 'remove_weight_norm'] + +class WeightNorm: + name: str + dim: int + + def __init__(self, name: str, dim: int) -> None: + if dim is None: + dim = -1 + self.name = name + self.dim = dim + + # TODO Make return type more specific + def compute_weight(self, module: Module) -> Any: + g = getattr(module, self.name + '_g') + v = getattr(module, self.name + '_v') + return _weight_norm(v, g, self.dim) + + @staticmethod + def apply(module, name: str, dim: int) -> 'WeightNorm': + warnings.warn("torch.nn.utils.weight_norm is deprecated in favor of torch.nn.utils.parametrizations.weight_norm.") + + for hook in module._forward_pre_hooks.values(): + if isinstance(hook, WeightNorm) and hook.name == name: + raise RuntimeError(f"Cannot register two weight_norm hooks on the same parameter {name}") + + if dim is None: + dim = -1 + + fn = WeightNorm(name, dim) + + weight = getattr(module, name) + if isinstance(weight, UninitializedParameter): + raise ValueError( + 'The module passed to `WeightNorm` can\'t have uninitialized parameters. ' + 'Make sure to run the dummy forward before applying weight normalization') + # remove w from parameter list + del module._parameters[name] + + # add g and v as new parameters and express w as g/||v|| * v + module.register_parameter(name + '_g', Parameter(norm_except_dim(weight, 2, dim).data)) + module.register_parameter(name + '_v', Parameter(weight.data)) + setattr(module, name, fn.compute_weight(module)) + + # recompute weight before every forward() + module.register_forward_pre_hook(fn) + + return fn + + def remove(self, module: Module) -> None: + weight = self.compute_weight(module) + delattr(module, self.name) + del module._parameters[self.name + '_g'] + del module._parameters[self.name + '_v'] + setattr(module, self.name, Parameter(weight.data)) + + def __call__(self, module: Module, inputs: Any) -> None: + setattr(module, self.name, self.compute_weight(module)) + + +T_module = TypeVar('T_module', bound=Module) + +def weight_norm(module: T_module, name: str = 'weight', dim: int = 0) -> T_module: + r"""Apply weight normalization to a parameter in the given module. + + .. math:: + \mathbf{w} = g \dfrac{\mathbf{v}}{\|\mathbf{v}\|} + + Weight normalization is a reparameterization that decouples the magnitude + of a weight tensor from its direction. This replaces the parameter specified + by :attr:`name` (e.g. ``'weight'``) with two parameters: one specifying the magnitude + (e.g. ``'weight_g'``) and one specifying the direction (e.g. ``'weight_v'``). + Weight normalization is implemented via a hook that recomputes the weight + tensor from the magnitude and direction before every :meth:`~Module.forward` + call. + + By default, with ``dim=0``, the norm is computed independently per output + channel/plane. To compute a norm over the entire weight tensor, use + ``dim=None``. + + See https://arxiv.org/abs/1602.07868 + + .. warning:: + + This function is deprecated. Use :func:`torch.nn.utils.parametrizations.weight_norm` + which uses the modern parametrization API. The new ``weight_norm`` is compatible + with ``state_dict`` generated from old ``weight_norm``. + + Migration guide: + + * The magnitude (``weight_g``) and direction (``weight_v``) are now expressed + as ``parametrizations.weight.original0`` and ``parametrizations.weight.original1`` + respectively. If this is bothering you, please comment on + https://github.com/pytorch/pytorch/issues/102999 + + * To remove the weight normalization reparametrization, use + :func:`torch.nn.utils.parametrize.remove_parametrizations`. + + * The weight is no longer recomputed once at module forward; instead, it will + be recomputed on every access. To restore the old behavior, use + :func:`torch.nn.utils.parametrize.cached` before invoking the module + in question. + + Args: + module (Module): containing module + name (str, optional): name of weight parameter + dim (int, optional): dimension over which to compute the norm + + Returns: + The original module with the weight norm hook + + Example:: + + >>> m = weight_norm(nn.Linear(20, 40), name='weight') + >>> m + Linear(in_features=20, out_features=40, bias=True) + >>> m.weight_g.size() + torch.Size([40, 1]) + >>> m.weight_v.size() + torch.Size([40, 20]) + + """ + WeightNorm.apply(module, name, dim) + return module + + +def remove_weight_norm(module: T_module, name: str = 'weight') -> T_module: + r"""Remove the weight normalization reparameterization from a module. + + Args: + module (Module): containing module + name (str, optional): name of weight parameter + + Example: + >>> m = weight_norm(nn.Linear(20, 40)) + >>> remove_weight_norm(m) + """ + for k, hook in module._forward_pre_hooks.items(): + if isinstance(hook, WeightNorm) and hook.name == name: + hook.remove(module) + del module._forward_pre_hooks[k] + return module + + raise ValueError(f"weight_norm of '{name}' not found in {module}")