diff --git a/ckpts/universal/global_step120/zero/18.input_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step120/zero/18.input_layernorm.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..d29a52e308dfbb4cadb8fd7e0549d166125d4e50 --- /dev/null +++ b/ckpts/universal/global_step120/zero/18.input_layernorm.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad01436c399a30e54c45db8c6b8313628737f7fdc6939a76ab61d7c29054deaa +size 9372 diff --git a/ckpts/universal/global_step120/zero/18.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step120/zero/18.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..f4bac6878fa2b9bc860d979fc3577e530bcada84 --- /dev/null +++ b/ckpts/universal/global_step120/zero/18.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80f8df3c11c25505cda7aff772aab3184b918cbe1b84b7146cdc6b37fc1d33a1 +size 9387 diff --git a/ckpts/universal/global_step120/zero/18.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step120/zero/18.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..5be9e5eebeeae78ddca952b559e8d08658cc6653 --- /dev/null +++ b/ckpts/universal/global_step120/zero/18.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec9763f703d3503eb2e5a786f5e0bd2379b616665c964c1b4646a759f1406bed +size 9293 diff --git a/venv/lib/python3.10/site-packages/torch/autograd/__init__.py b/venv/lib/python3.10/site-packages/torch/autograd/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e12aef1158a8edf567bb46b60b4267218daa0e8f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/autograd/__init__.py @@ -0,0 +1,515 @@ +""" +``torch.autograd`` provides classes and functions implementing automatic +differentiation of arbitrary scalar valued functions. It requires minimal +changes to the existing code - you only need to declare :class:`Tensor` s +for which gradients should be computed with the ``requires_grad=True`` keyword. +As of now, we only support autograd for floating point :class:`Tensor` types ( +half, float, double and bfloat16) and complex :class:`Tensor` types (cfloat, cdouble). +""" +import warnings +from typing import Any, Callable, cast, List, Optional, Sequence, Tuple, Union + +import torch + +from torch.types import _size, _TensorOrTensors, _TensorOrTensorsOrGradEdge +from .. import _vmap_internals +from ..overrides import handle_torch_function, has_torch_function, is_tensor_like +from . import forward_ad, functional, graph +from .anomaly_mode import detect_anomaly, set_detect_anomaly +from .function import Function, NestedIOFunction +from .grad_mode import ( + _force_original_view_tracking, + _unsafe_preserve_version_counter, + enable_grad, + inference_mode, + no_grad, + set_grad_enabled, + set_multithreading_enabled, +) +from .gradcheck import gradcheck, gradgradcheck +from .graph import _engine_run_backward + +from .variable import Variable + +__all__ = ["Variable", "Function", "backward", "grad_mode"] + +_OptionalTensor = Optional[torch.Tensor] +_ShapeorNestedShape = Union[_size, Sequence[_size], torch.Tensor] + + +def _calculate_shape( + output: torch.Tensor, grad: torch.Tensor, is_grads_batched: bool +) -> Tuple[_ShapeorNestedShape, _ShapeorNestedShape]: + # is_same_size ensures that both tensors are either nested or non nested + # circular import + from torch.nested._internal.nested_tensor import NestedTensor + + if output.is_nested and not isinstance(output, NestedTensor): + if is_grads_batched: + raise RuntimeError("Batched grads are not supported with Nested Tensor.") + out_shape = output._nested_tensor_size() + grad_shape = grad._nested_tensor_size() + + return out_shape, grad_shape + + reg_out_shape = output.shape + reg_grad_shape = grad.shape if not is_grads_batched else grad.shape[1:] + return reg_out_shape, reg_grad_shape + + +def _make_grads( + outputs: Sequence[torch.Tensor], + grads: Sequence[_OptionalTensor], + is_grads_batched: bool, +) -> Tuple[_OptionalTensor, ...]: + new_grads: List[_OptionalTensor] = [] + for out, grad in zip(outputs, grads): + if isinstance(grad, torch.Tensor): + from torch.fx.experimental.symbolic_shapes import expect_true, sym_eq + + first_grad = grad if not is_grads_batched else grad[0] + # TODO: We can remove this conditional once we uniformly use + # singleton int to represent jagged dimension, so that size() call + # on nested tensor works + if out.is_nested or first_grad.is_nested: + shape_matches = torch.is_same_size(out, first_grad) + else: + # We need to do a regular size check, without going through + # the operator, to be able to handle unbacked symints + # (expect_true ensures we can deal with unbacked) + shape_matches = expect_true(sym_eq(out.size(), first_grad.size())) + if not shape_matches: + out_shape, grad_shape = _calculate_shape( + out, first_grad, is_grads_batched + ) + if is_grads_batched: + raise RuntimeError( + "If `is_grads_batched=True`, we interpret the first " + "dimension of each grad_output as the batch dimension. " + "The sizes of the remaining dimensions are expected to match " + "the shape of corresponding output, but a mismatch " + "was detected: grad_output[" + + str(grads.index(grad)) + + "] has a shape of " + + str(grad_shape) + + " and output[" + + str(outputs.index(out)) + + "] has a shape of " + + str(out_shape) + + ". " + "If you only want some tensors in `grad_output` to be considered " + "batched, consider using vmap." + ) + else: + raise RuntimeError( + "Mismatch in shape: grad_output[" + + str(grads.index(grad)) + + "] has a shape of " + + str(grad_shape) + + " and output[" + + str(outputs.index(out)) + + "] has a shape of " + + str(out_shape) + + "." + ) + if out.dtype.is_complex != grad.dtype.is_complex: + raise RuntimeError( + "For complex Tensors, both grad_output and output" + " are required to have the same dtype." + " Mismatch in dtype: grad_output[" + + str(grads.index(grad)) + + "] has a dtype of " + + str(grad.dtype) + + " and output[" + + str(outputs.index(out)) + + "] has a dtype of " + + str(out.dtype) + + "." + ) + new_grads.append(grad) + elif grad is None: + if out.requires_grad: + if out.numel() != 1: + raise RuntimeError( + "grad can be implicitly created only for scalar outputs" + ) + if not out.dtype.is_floating_point: + msg = ( + "grad can be implicitly created only for real scalar outputs" + f" but got {out.dtype}" + ) + raise RuntimeError(msg) + new_grads.append( + torch.ones_like(out, memory_format=torch.preserve_format) + ) + else: + new_grads.append(None) + else: + raise TypeError( + "gradients can be either Tensors or None, but got " + + type(grad).__name__ + ) + return tuple(new_grads) + + +def _tensor_or_tensors_to_tuple( + tensors: Optional[_TensorOrTensors], length: int +) -> Tuple[_OptionalTensor, ...]: + if tensors is None: + return (None,) * length + if isinstance(tensors, torch.Tensor): + return (tensors,) + return tuple(tensors) + + +def backward( + tensors: _TensorOrTensors, + grad_tensors: Optional[_TensorOrTensors] = None, + retain_graph: Optional[bool] = None, + create_graph: bool = False, + grad_variables: Optional[_TensorOrTensors] = None, + inputs: Optional[_TensorOrTensorsOrGradEdge] = None, +) -> None: + r"""Computes the sum of gradients of given tensors with respect to graph + leaves. + + The graph is differentiated using the chain rule. If any of ``tensors`` + are non-scalar (i.e. their data has more than one element) and require + gradient, then the Jacobian-vector product would be computed, in this + case the function additionally requires specifying ``grad_tensors``. + It should be a sequence of matching length, that contains the "vector" + in the Jacobian-vector product, usually the gradient of the differentiated + function w.r.t. corresponding tensors (``None`` is an acceptable value for + all tensors that don't need gradient tensors). + + This function accumulates gradients in the leaves - you might need to zero + ``.grad`` attributes or set them to ``None`` before calling it. + See :ref:`Default gradient layouts` + for details on the memory layout of accumulated gradients. + + .. note:: + Using this method with ``create_graph=True`` will create a reference cycle + between the parameter and its gradient which can cause a memory leak. + We recommend using ``autograd.grad`` when creating the graph to avoid this. + If you have to use this function, make sure to reset the ``.grad`` fields of your + parameters to ``None`` after use to break the cycle and avoid the leak. + + .. note:: + + If you run any forward ops, create ``grad_tensors``, and/or call ``backward`` + in a user-specified CUDA stream context, see + :ref:`Stream semantics of backward passes`. + + .. note:: + + When ``inputs`` are provided and a given input is not a leaf, + the current implementation will call its grad_fn (even though it is not strictly needed to get this gradients). + It is an implementation detail on which the user should not rely. + See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details. + + Args: + tensors (Sequence[Tensor] or Tensor): Tensors of which the derivative will be + computed. + grad_tensors (Sequence[Tensor or None] or Tensor, optional): The "vector" in + the Jacobian-vector product, usually gradients w.r.t. each element of + corresponding tensors. None values can be specified for scalar Tensors or + ones that don't require grad. If a None value would be acceptable for all + grad_tensors, then this argument is optional. + retain_graph (bool, optional): If ``False``, the graph used to compute the grad + will be freed. Note that in nearly all cases setting this option to ``True`` + is not needed and often can be worked around in a much more efficient + way. Defaults to the value of ``create_graph``. + create_graph (bool, optional): If ``True``, graph of the derivative will + be constructed, allowing to compute higher order derivative products. + Defaults to ``False``. + inputs (Sequence[Tensor] or Tensor or Sequence[GradientEdge], optional): Inputs w.r.t. which the gradient + be will accumulated into ``.grad``. All other Tensors will be ignored. If + not provided, the gradient is accumulated into all the leaf Tensors that + were used to compute the :attr:`tensors`. + """ + if torch._C._are_functorch_transforms_active(): + raise RuntimeError( + "backward() called inside a functorch transform. This is not " + "supported, please use functorch.grad or functorch.vjp instead " + "or call backward() outside of functorch transforms." + ) + + if grad_variables is not None: + warnings.warn("'grad_variables' is deprecated. Use 'grad_tensors' instead.") + if grad_tensors is None: + grad_tensors = grad_variables + else: + raise RuntimeError( + "'grad_tensors' and 'grad_variables' (deprecated) " + "arguments both passed to backward(). Please only " + "use 'grad_tensors'." + ) + if inputs is not None and len(inputs) == 0: + raise RuntimeError("'inputs' argument to backward() cannot be empty.") + + tensors = (tensors,) if isinstance(tensors, torch.Tensor) else tuple(tensors) + inputs = ( + (inputs,) + if isinstance(inputs, (torch.Tensor, graph.GradientEdge)) + else tuple(inputs) + if inputs is not None + else tuple() + ) + + grad_tensors_ = _tensor_or_tensors_to_tuple(grad_tensors, len(tensors)) + grad_tensors_ = _make_grads(tensors, grad_tensors_, is_grads_batched=False) + if retain_graph is None: + retain_graph = create_graph + + # The reason we repeat the same comment below is that + # some Python versions print out the first line of a multi-line function + # calls in the traceback and some print out the last line + _engine_run_backward( + tensors, + grad_tensors_, + retain_graph, + create_graph, + inputs, + allow_unreachable=True, + accumulate_grad=True, + ) + + +def grad( + outputs: _TensorOrTensors, + inputs: _TensorOrTensorsOrGradEdge, + grad_outputs: Optional[_TensorOrTensors] = None, + retain_graph: Optional[bool] = None, + create_graph: bool = False, + only_inputs: bool = True, + allow_unused: Optional[bool] = None, + is_grads_batched: bool = False, + materialize_grads: bool = False, +) -> Tuple[torch.Tensor, ...]: + r"""Computes and returns the sum of gradients of outputs with respect to + the inputs. + + ``grad_outputs`` should be a sequence of length matching ``output`` + containing the "vector" in vector-Jacobian product, usually the pre-computed + gradients w.r.t. each of the outputs. If an output doesn't require_grad, + then the gradient can be ``None``). + + .. note:: + + If you run any forward ops, create ``grad_outputs``, and/or call ``grad`` + in a user-specified CUDA stream context, see + :ref:`Stream semantics of backward passes`. + + .. note:: + + ``only_inputs`` argument is deprecated and is ignored now (defaults to ``True``). + To accumulate gradient for other parts of the graph, please use + ``torch.autograd.backward``. + + Args: + outputs (sequence of Tensor): outputs of the differentiated function. + inputs (sequence of Tensor or GradientEdge): Inputs w.r.t. which the gradient will be + returned (and not accumulated into ``.grad``). + grad_outputs (sequence of Tensor): The "vector" in the vector-Jacobian product. + Usually gradients w.r.t. each output. None values can be specified for scalar + Tensors or ones that don't require grad. If a None value would be acceptable + for all grad_tensors, then this argument is optional. Default: None. + retain_graph (bool, optional): If ``False``, the graph used to compute the grad + will be freed. Note that in nearly all cases setting this option to ``True`` + is not needed and often can be worked around in a much more efficient + way. Defaults to the value of ``create_graph``. + create_graph (bool, optional): If ``True``, graph of the derivative will + be constructed, allowing to compute higher order derivative products. + Default: ``False``. + allow_unused (Optional[bool], optional): If ``False``, specifying inputs + that were not used when computing outputs (and therefore their grad is + always zero) is an error. Defaults to the value of ``materialize_grads``. + is_grads_batched (bool, optional): If ``True``, the first dimension of each + tensor in ``grad_outputs`` will be interpreted as the batch dimension. + Instead of computing a single vector-Jacobian product, we compute a + batch of vector-Jacobian products for each "vector" in the batch. + We use the vmap prototype feature as the backend to vectorize calls + to the autograd engine so that this computation can be performed in a + single call. This should lead to performance improvements when compared + to manually looping and performing backward multiple times. Note that + due to this feature being experimental, there may be performance + cliffs. Please use ``torch._C._debug_only_display_vmap_fallback_warnings(True)`` + to show any performance warnings and file an issue on github if warnings exist + for your use case. Defaults to ``False``. + materialize_grads (bool, optional): If ``True``, set the gradient for unused inputs + to zero instead of None. This is useful when computing higher-order derivatives. + If ``materialize_grads`` is ``True`` and ``allow_unused`` is ``False``, an error + will be raised. Defaults to ``False``. + + """ + if materialize_grads and allow_unused is False: + raise ValueError( + "Expected allow_unused to be True or not passed when materialize_grads=True, " + "but got: allow_unused=False." + ) + if allow_unused is None: + allow_unused = materialize_grads + t_outputs = cast( + Tuple[torch.Tensor, ...], + (outputs,) if is_tensor_like(outputs) else tuple(outputs), + ) + if is_tensor_like(inputs) or isinstance(inputs, graph.GradientEdge): + inputs = cast(_TensorOrTensorsOrGradEdge, (inputs,)) + else: + inputs = tuple(inputs) + t_inputs = tuple(i for i in inputs if is_tensor_like(i)) + overridable_args = t_outputs + t_inputs + if has_torch_function(overridable_args): + return handle_torch_function( + grad, + overridable_args, + t_outputs, + inputs, + grad_outputs=grad_outputs, + retain_graph=retain_graph, + create_graph=create_graph, + only_inputs=only_inputs, + allow_unused=allow_unused, + is_grads_batched=is_grads_batched, + materialize_grads=materialize_grads, + ) + + if not only_inputs: + warnings.warn( + "only_inputs argument is deprecated and is ignored now " + "(defaults to True). To accumulate gradient for other " + "parts of the graph, please use torch.autograd.backward." + ) + + grad_outputs_ = _tensor_or_tensors_to_tuple(grad_outputs, len(t_outputs)) + grad_outputs_ = _make_grads( + t_outputs, grad_outputs_, is_grads_batched=is_grads_batched + ) + + if retain_graph is None: + retain_graph = create_graph + + # The reason we repeat the same comment several times below is because + # some Python versions print out the first line of multi-line function + # calls in the traceback and some print out the last line + if is_grads_batched: + + def vjp(gO): + return _engine_run_backward( + t_outputs, + gO, + retain_graph, + create_graph, + inputs, + allow_unused, + accumulate_grad=False, + ) + + result = _vmap_internals._vmap(vjp, 0, 0, allow_none_pass_through=True)( + grad_outputs_ + ) + else: + result = _engine_run_backward( + t_outputs, + grad_outputs_, + retain_graph, + create_graph, + inputs, + allow_unused, + accumulate_grad=False, + ) + if materialize_grads: + if any( + result[i] is None and not is_tensor_like(inputs[i]) + for i in range(len(inputs)) + ): + raise RuntimeError( + "materialize_grads cannot be used when the given input is a GradientEdge" + ) + result = tuple( + output + if output is not None + else torch.zeros_like(input, requires_grad=True) + for (output, input) in zip(result, inputs) + ) + return result + + +# This function applies in case of gradient checkpointing for memory +# optimization. Currently, gradient checkpointing is supported only if the +# execution engine is invoked through torch.autograd.backward() and its +# inputs argument is not passed. It is not supported for torch.autograd.grad(). +# This is because if inputs are specified, the gradient won't be calculated for +# anything else e.g. model parameters like weights, bias etc. +# +# This function returns whether the checkpointing is valid i.e. torch.autograd.backward +# or not i.e. torch.autograd.grad. The implementation works by maintaining a thread +# local variable in torch/csrc/autograd/engine.cpp which looks at the NodeTask +# in the stack and before a NodeTask is executed in evaluate_function, it +# checks for whether reentrant backwards is imperative or not. +# See https://github.com/pytorch/pytorch/pull/4594 for more discussion/context +def _is_checkpoint_valid(): + return Variable._execution_engine.is_checkpoint_valid() + + +def variable(*args, **kwargs): + raise RuntimeError( + "torch.autograd.variable(...) is deprecated, use torch.tensor(...) instead" + ) + + +# Monkey patching variable.Variable to fix FX codegen. FX generates a call by roughly doing +# f"{fn.__module__}.{fn.__name__}(...). This yields torch.autograd.variable.Variable(...) in the +# output of an FX graph. Unfortunately the module name torch.autograd.variable is shadowed by the +# deprecated function - variable(...). +variable.Variable = Variable # type: ignore[attr-defined] + +if not torch._C._autograd_init(): + raise RuntimeError("autograd initialization failed") + +# Import all native method/classes +from torch._C._autograd import ( + _add_metadata_json, + _disable_profiler, + _disable_profiler_legacy, + _enable_profiler, + _enable_profiler_legacy, + _enable_record_function, + _get_sequence_nr, + _kineto_step, + _KinetoEvent, + _pop_saved_tensors_default_hooks, + _prepare_profiler, + _profiler_enabled, + _ProfilerResult, + _push_saved_tensors_default_hooks, + _record_function_with_args_enter, + _record_function_with_args_exit, + _set_empty_test_observer, + _supported_activities, + DeviceType, + kineto_available, + ProfilerEvent, + SavedTensor, +) + +from torch._C._profiler import ProfilerActivity, ProfilerConfig, ProfilerState + +from . import profiler + + +def _register_py_tensor_class_for_device(device, cls): + if not isinstance(cls, type): + raise RuntimeError("cls isn't a typeinfo object") + torch._C._register_py_class_for_device(device, cls) + + +is_multithreading_enabled = torch._C._is_multithreading_enabled +torch._C._add_docstr( + is_multithreading_enabled, "Returns True if multithreading is currently enabled." +) + +is_view_replay_enabled = torch._C._is_view_replay_enabled +torch._C._add_docstr( + is_view_replay_enabled, "Returns True if view-replay is currently enabled." +) diff --git a/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba58831edf2b2dbca57c904f2e9e6e5766354f45 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/anomaly_mode.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/anomaly_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ff84d521a46cc34844e83a8c4c2468083b558f4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/anomaly_mode.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/forward_ad.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/forward_ad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6656b167ef9624791defd8229c0bd5017d1d9857 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/forward_ad.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/function.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6329a7a8656dd05fb7fe942b19f052d1cfea0fbb Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/function.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/functional.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/functional.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92e62c22e3332b77d83a4265e8d40831b512c9ec Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/functional.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/grad_mode.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/grad_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..529600701de5538760179004ff5e025d1185f5a4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/grad_mode.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/gradcheck.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/gradcheck.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7dc2f14dc7fd13e16020b204b92eb5797760a1ff Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/gradcheck.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/graph.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e7f547c9bd3f0223b194144c5345aee607c2a05 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/graph.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b6a555cb25166acb8e3fe7697eec10c052ce3fa Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler_legacy.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler_legacy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a468c68178ea26e2727c76c6123dd3e4acd2855c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler_legacy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler_util.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..691915304093e77080607c15e6225662b163bec6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/profiler_util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/variable.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/variable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1efbd18d2a40925eb6e1a35d97abacb0f224b3e Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/autograd/__pycache__/variable.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/autograd/_functions/__init__.py b/venv/lib/python3.10/site-packages/torch/autograd/_functions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4170fad3eeac788dcb36b6ae1ddbee1b44dc25a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/autograd/_functions/__init__.py @@ -0,0 +1 @@ +from .tensor import * # noqa: F403 diff --git a/venv/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4986bcfae4103e6254c29ec82e442cbada1bc2d Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/tensor.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb3972a5d2653fd7933a82a5ba8a115286834b8c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/tensor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e13c7ad5b374e12c95a98259e1af64bbc41bbb9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/autograd/_functions/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/autograd/_functions/tensor.py b/venv/lib/python3.10/site-packages/torch/autograd/_functions/tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..f091d38777fcd578904b6bc1b9f22f2063877066 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/autograd/_functions/tensor.py @@ -0,0 +1,63 @@ +import operator +import warnings +from functools import reduce + +import torch +import torch._utils +from ..function import Function + + +class Type(Function): + @staticmethod + def forward(ctx, i, dest_type): + warnings.warn( + "torch.autograd._functions.Type is deprecated as of PyTorch 2.1, please use " + "torch.tensor.to(dtype=dtype) instead." + ) + ctx.input_type = type(i) + ctx.input_device = -1 if not i.is_cuda else i.get_device() + return i.type(dest_type) + + @staticmethod + def backward(ctx, grad_output): + if ctx.input_device == -1: + return grad_output.type(ctx.input_type), None + else: + with torch.cuda.device(ctx.input_device): + return grad_output.type(ctx.input_type), None + + +# TODO: deprecate this +class Resize(Function): + @staticmethod + def forward(ctx, tensor, sizes): + ctx.sizes = sizes + ctx.numel = reduce(operator.mul, sizes, 1) + if tensor.numel() != ctx.numel: + raise RuntimeError( + ( + "requested resize to {} ({} elements in total), " + "but the given tensor has a size of {} ({} elements). " + "autograd's resize can only change the shape of a given " + "tensor, while preserving the number of elements. " + ).format( + "x".join(map(str, sizes)), + ctx.numel, + "x".join(map(str, tensor.size())), + tensor.numel(), + ) + ) + ctx.input_sizes = tensor.size() + if tensor.is_quantized: + tensor.copy_(tensor) + return tensor.contiguous().view(*sizes) + if tensor.is_contiguous(): + result = tensor.new(tensor).contiguous().view(*sizes) + return result + else: + return tensor.contiguous().view(*sizes) + + @staticmethod + def backward(ctx, grad_output): + assert grad_output.numel() == ctx.numel + return grad_output.contiguous().view(ctx.input_sizes), None diff --git a/venv/lib/python3.10/site-packages/torch/autograd/_functions/utils.py b/venv/lib/python3.10/site-packages/torch/autograd/_functions/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7111d893400f2f2aee77cf51fe2f6ac914ccd58e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/autograd/_functions/utils.py @@ -0,0 +1,62 @@ +import operator +from functools import reduce + + +def maybe_view(tensor, size, check_same_size=True): + if check_same_size and tensor.size() == size: + return tensor + return tensor.contiguous().view(size) + + +def maybe_unexpand(tensor, old_size, check_same_size=True): + if check_same_size and tensor.size() == old_size: + return tensor + num_unsqueezed = tensor.dim() - len(old_size) + expanded_dims = [ + dim + for dim, (expanded, original) in enumerate( + zip(tensor.size()[num_unsqueezed:], old_size) + ) + if expanded != original + ] + + for _ in range(num_unsqueezed): + tensor = tensor.sum(0, keepdim=False) + for dim in expanded_dims: + tensor = tensor.sum(dim, keepdim=True) + return tensor + + +# Check whether the op enable broadcasting, and whether it is supported by ONNX. +# If dims1 and dims2 are different, then broadcast is True. +# We always assume the combination of dims1 and dims2 is broadcastable. +# The following types of broadcasting are supported in ONNX: +# 1) Only one element in dims2, such as dims2 = [1, 1] +# 2) dims2 is suffix of dims1, such as dims1 = [2, 3, 4], and dims2 = [3, 4] +# Details can be found here: https://github.com/onnx/onnx/blob/master/docs/Operators.md#Gemm +def check_onnx_broadcast(dims1, dims2): + broadcast = False + supported = True + len1 = len(dims1) + len2 = len(dims2) + numel1 = reduce(operator.mul, dims1) + numel2 = reduce(operator.mul, dims2) + if len1 < len2: + broadcast = True + if numel2 != 1: + supported = False + elif len1 > len2: + broadcast = True + if numel2 != 1 and dims1[len1 - len2 :] != dims2: + supported = False + else: + if dims1 != dims2: + broadcast = True + if numel2 != 1: + supported = False + + if not supported: + raise ValueError( + f"Numpy style broadcasting is not supported in ONNX. Input dims are: {dims1}, {dims2}" + ) + return broadcast diff --git a/venv/lib/python3.10/site-packages/torch/autograd/anomaly_mode.py b/venv/lib/python3.10/site-packages/torch/autograd/anomaly_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..80a2526a81ded1c9776ca9c8cea1879465ff3066 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/autograd/anomaly_mode.py @@ -0,0 +1,119 @@ +import warnings + +import torch + +__all__ = ["detect_anomaly", "set_detect_anomaly"] + + +class detect_anomaly: + r"""Context-manager that enable anomaly detection for the autograd engine. + + This does two things: + + - Running the forward pass with detection enabled will allow the backward + pass to print the traceback of the forward operation that created the failing + backward function. + - If ``check_nan`` is ``True``, any backward computation that generate "nan" + value will raise an error. Default ``True``. + + .. warning:: + This mode should be enabled only for debugging as the different tests + will slow down your program execution. + + Example: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_ANOMALY) + >>> import torch + >>> from torch import autograd + >>> class MyFunc(autograd.Function): + ... @staticmethod + ... def forward(ctx, inp): + ... return inp.clone() + ... @staticmethod + ... def backward(ctx, gO): + ... # Error during the backward pass + ... raise RuntimeError("Some error in backward") + ... return gO.clone() + >>> def run_fn(a): + ... out = MyFunc.apply(a) + ... return out.sum() + >>> inp = torch.rand(10, 10, requires_grad=True) + >>> out = run_fn(inp) + >>> out.backward() + Traceback (most recent call last): + File "", line 1, in + File "/your/pytorch/install/torch/_tensor.py", line 93, in backward + torch.autograd.backward(self, gradient, retain_graph, create_graph) + File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward + allow_unreachable=True) # allow_unreachable flag + File "/your/pytorch/install/torch/autograd/function.py", line 76, in apply + return self._forward_cls.backward(self, *args) + File "", line 8, in backward + RuntimeError: Some error in backward + >>> with autograd.detect_anomaly(): + ... inp = torch.rand(10, 10, requires_grad=True) + ... out = run_fn(inp) + ... out.backward() + Traceback of forward call that caused the error: + File "tmp.py", line 53, in + out = run_fn(inp) + File "tmp.py", line 44, in run_fn + out = MyFunc.apply(a) + Traceback (most recent call last): + File "", line 4, in + File "/your/pytorch/install/torch/_tensor.py", line 93, in backward + torch.autograd.backward(self, gradient, retain_graph, create_graph) + File "/your/pytorch/install/torch/autograd/__init__.py", line 90, in backward + allow_unreachable=True) # allow_unreachable flag + File "/your/pytorch/install/torch/autograd/function.py", line 76, in apply + return self._forward_cls.backward(self, *args) + File "", line 8, in backward + RuntimeError: Some error in backward + + """ + + def __init__(self, check_nan=True) -> None: + self.prev = torch.is_anomaly_enabled() + self.check_nan = check_nan + self.prev_check_nan = torch.is_anomaly_check_nan_enabled() + warnings.warn( + "Anomaly Detection has been enabled. " + "This mode will increase the runtime " + "and should only be enabled for debugging.", + stacklevel=2, + ) + + def __enter__(self) -> None: + torch.set_anomaly_enabled(True, self.check_nan) + + def __exit__(self, *args: object) -> None: + torch.set_anomaly_enabled(self.prev, self.prev_check_nan) + + +class set_detect_anomaly: + r"""Context-manager that sets the anomaly detection for the autograd engine on or off. + + ``set_detect_anomaly`` will enable or disable the autograd anomaly detection + based on its argument :attr:`mode`. + It can be used as a context-manager or as a function. + + See ``detect_anomaly`` above for details of the anomaly detection behaviour. + + Args: + mode (bool): Flag whether to enable anomaly detection (``True``), + or disable (``False``). + check_nan (bool): Flag whether to raise an error when the backward + generate "nan" + + """ + + def __init__(self, mode: bool, check_nan: bool = True) -> None: + self.prev = torch.is_anomaly_enabled() + self.prev_check_nan = torch.is_anomaly_check_nan_enabled() + torch.set_anomaly_enabled(mode, check_nan) + + def __enter__(self) -> None: + pass + + def __exit__(self, *args: object) -> None: + torch.set_anomaly_enabled(self.prev, self.prev_check_nan) diff --git a/venv/lib/python3.10/site-packages/torch/autograd/forward_ad.py b/venv/lib/python3.10/site-packages/torch/autograd/forward_ad.py new file mode 100644 index 0000000000000000000000000000000000000000..a082f1c5837e0c1caa5d8529884be5ab7a66c776 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/autograd/forward_ad.py @@ -0,0 +1,227 @@ +import os +from collections import namedtuple + +from typing import Any + +import torch +from .grad_mode import _DecoratorContextManager + +__all__ = [ + "UnpackedDualTensor", + "enter_dual_level", + "exit_dual_level", + "make_dual", + "unpack_dual", + "dual_level", +] + +# Global variable used to make the python API simpler to use +_current_level = -1 + + +def enter_dual_level(): + r"""Enter a new forward grad level. + + This level can be used to make and unpack dual Tensors to compute + forward gradients. + + This function also updates the current level that is used by default + by the other functions in this API. + """ + global _current_level + new_level = torch._C._enter_dual_level() + if new_level != _current_level + 1: + raise RuntimeError( + "Entering a new forward AD level but the current level " + "is not valid. Make sure you did not modified it directly." + ) + _current_level = new_level + return new_level + + +def exit_dual_level(*, level=None): + r"""Exit a forward grad level. + + This function deletes all the gradients associated with this + level. Only deleting the latest entered level is allowed. + + This function also updates the current level that is used by default + by the other functions in this API. + """ + global _current_level + if level is None: + level = _current_level + if level != _current_level: + raise RuntimeError( + "Trying to exit a forward AD level that was not the last one " + "that was created. This is not supported." + ) + torch._C._exit_dual_level(level=level) + _current_level = level - 1 + + +def make_dual(tensor, tangent, *, level=None): + r"""Associate a tensor value with its tangent to create a "dual tensor" for forward AD gradient computation. + + The result is a new tensor aliased to :attr:`tensor` with :attr:`tangent` embedded + as an attribute as-is if it has the same storage layout or copied otherwise. + The tangent attribute can be recovered with :func:`unpack_dual`. + + This function is backward differentiable. + + Given a function `f` whose jacobian is `J`, it allows one to compute the Jacobian-vector product (`jvp`) + between `J` and a given vector `v` as follows. + + Example:: + + >>> # xdoctest: +SKIP("Undefined variables") + >>> with dual_level(): + ... inp = make_dual(x, v) + ... out = f(inp) + ... y, jvp = unpack_dual(out) + + Please see the `forward-mode AD tutorial `__ + for detailed steps on how to use this API. + + """ + # See NOTE: [forward-mode AD decompositions mechanism] + # + # Import from torch._decomp import decompositions_for_jvp to register + # decompositions for jvp to the jit registry + # + # FIXME: We specify that __debug__ must be True because + # if python is run with -OO or -O flags (i.e., __debug__ is False), we encounter the + # following error: + # + # Return value was annotated as having type Tuple[NoneType, NoneType] but is actually of + # type Tuple[Tensor, Tensor]: + # File ".../torch/_decomp/__init__.py", line 1585 + # else: + # buffer = z + # return min - torch.log1p(z), buffer + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE + if os.environ.get("PYTORCH_JIT", "1") == "1" and __debug__: + from torch._decomp import decompositions_for_jvp # noqa: F401 + + if level is None: + level = _current_level + + if level < 0: + raise RuntimeError( + "Trying to create a dual Tensor for forward AD but no level " + "exists, make sure to enter_dual_level() first." + ) + if not (tensor.is_floating_point() or tensor.is_complex()): + raise ValueError( + f"Expected primal to be floating point or complex, but got: {tensor.dtype}" + ) + if not (tangent.is_floating_point() or tangent.is_complex()): + raise ValueError( + f"Expected tangent to be floating point or complex, but got: {tangent.dtype}" + ) + + return torch._VF._make_dual(tensor, tangent, level=level) + + +_UnpackedDualTensor = namedtuple("_UnpackedDualTensor", ["primal", "tangent"]) + + +class UnpackedDualTensor(_UnpackedDualTensor): + r"""Namedtuple returned by :func:`unpack_dual` containing the primal and tangent components of the dual tensor. + + See :func:`unpack_dual` for more details. + + """ + + pass + + +def unpack_dual(tensor, *, level=None): + r"""Unpack a "dual tensor" to get both its Tensor value and its forward AD gradient. + + The result is a namedtuple ``(primal, tangent)`` where ``primal`` is a view of + :attr:`tensor`'s primal and ``tangent`` is :attr:`tensor`'s tangent as-is. + Neither of these tensors can be dual tensor of level :attr:`level`. + + This function is backward differentiable. + + Example:: + + >>> # xdoctest: +SKIP("Undefined variables") + >>> with dual_level(): + ... inp = make_dual(x, x_t) + ... out = f(inp) + ... y, jvp = unpack_dual(out) + ... jvp = unpack_dual(out).tangent + + Please see the `forward-mode AD tutorial `__ + for detailed steps on how to use this API. + """ + if level is None: + level = _current_level + + if level < 0: + return UnpackedDualTensor(tensor, None) + + primal, dual = torch._VF._unpack_dual(tensor, level=level) + + return UnpackedDualTensor(primal, dual) + + +class dual_level(_DecoratorContextManager): + r"""Context-manager for forward AD, where all forward AD computation must occur within the ``dual_level`` context. + + .. Note:: + + The ``dual_level`` context appropriately enters and exit the dual level to + controls the current forward AD level, which is used by default by the other + functions in this API. + + We currently don't plan to support nested ``dual_level`` contexts, however, so + only a single forward AD level is supported. To compute higher-order + forward grads, one can use :func:`torch.func.jvp`. + + Example:: + + >>> # xdoctest: +SKIP("Undefined variables") + >>> x = torch.tensor([1]) + >>> x_t = torch.tensor([1]) + >>> with dual_level(): + ... inp = make_dual(x, x_t) + ... # Do computations with inp + ... out = your_fn(inp) + ... _, grad = unpack_dual(out) + >>> grad is None + False + >>> # After exiting the level, the grad is deleted + >>> _, grad_after = unpack_dual(out) + >>> grad is None + True + + Please see the `forward-mode AD tutorial `__ + for detailed steps on how to use this API. + """ + + def __enter__(self): + return enter_dual_level() + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + exit_dual_level() + + +# Private helper functions +_is_fwd_grad_enabled = torch._C._is_fwd_grad_enabled + + +# Private helper function to enable or disable fwd grad. +# If you're a user and want to use this, please file an issue to discuss the use case. +class _set_fwd_grad_enabled(_DecoratorContextManager): + def __init__(self, mode: bool) -> None: + self.prev = _is_fwd_grad_enabled() + torch._C._set_fwd_grad_enabled(mode) + + def __enter__(self) -> None: + pass + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + torch._C._set_fwd_grad_enabled(self.prev) diff --git a/venv/lib/python3.10/site-packages/torch/autograd/function.py b/venv/lib/python3.10/site-packages/torch/autograd/function.py new file mode 100644 index 0000000000000000000000000000000000000000..dfe3cde05ab659a95875d6ca7f4e0fc15fb74dee --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/autograd/function.py @@ -0,0 +1,883 @@ +import functools +import inspect +import itertools +import warnings +from collections import OrderedDict +from typing import Any, List, Optional, Tuple + +import torch +import torch._C as _C +import torch._functorch as _functorch +import torch.utils.hooks as hooks +from torch._C import _functions +from torch._functorch.autograd_function import custom_function_call + +__all__ = [ + "FunctionCtx", + "BackwardCFunction", + "FunctionMeta", + "Function", + "once_differentiable", + "traceable", + "InplaceFunction", + "NestedIOFunction", +] + +# Unique id provider for each class inheriting from Function +# This is incremented in FunctionMeta during class definition +AUTOGRAD_FUNCTION_COUNTER = itertools.count() + + +# Formerly known as: _ContextMethodMixin +class FunctionCtx: + def save_for_backward(self, *tensors: torch.Tensor): + r"""Save given tensors for a future call to :func:`~Function.backward`. + + ``save_for_backward`` should be called at most once, only from inside the + :func:`forward` method, and only with tensors. + + All tensors intended to be used in the backward pass should be saved + with ``save_for_backward`` (as opposed to directly on ``ctx``) to prevent + incorrect gradients and memory leaks, and enable the application of saved + tensor hooks. See :class:`torch.autograd.graph.saved_tensors_hooks`. + + Note that if intermediary tensors, tensors that are neither inputs + nor outputs of :func:`forward`, are saved for backward, your custom Function + may not support double backward. + Custom Functions that do not support double backward should decorate their + :func:`backward` method with ``@once_differentiable`` so that performing + double backward raises an error. If you'd like to support double backward, + you can either recompute intermediaries based on the inputs during backward + or return the intermediaries as the outputs of the custom Function. See the + `double backward tutorial `_ + for more details. + + In :func:`backward`, saved tensors can be accessed through the :attr:`saved_tensors` + attribute. Before returning them to the user, a check is made to ensure + they weren't used in any in-place operation that modified their content. + + Arguments can also be ``None``. This is a no-op. + + See :ref:`extending-autograd` for more details on how to use this method. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> class Func(Function): + >>> @staticmethod + >>> def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int): + >>> w = x * z + >>> out = x * y + y * z + w * y + >>> ctx.save_for_backward(x, y, w, out) + >>> ctx.z = z # z is not a tensor + >>> return out + >>> + >>> @staticmethod + >>> @once_differentiable + >>> def backward(ctx, grad_out): + >>> x, y, w, out = ctx.saved_tensors + >>> z = ctx.z + >>> gx = grad_out * (y + y * z) + >>> gy = grad_out * (x + z + w) + >>> gz = None + >>> return gx, gy, gz + >>> + >>> a = torch.tensor(1., requires_grad=True, dtype=torch.double) + >>> b = torch.tensor(2., requires_grad=True, dtype=torch.double) + >>> c = 4 + >>> d = Func.apply(a, b, c) + + """ + self.to_save = tensors + + def save_for_forward(self, *tensors: torch.Tensor): + r"""Save given tensors for a future call to :func:`~Function.jvp`. + + ``save_for_forward`` should be only called once, from inside the :func:`forward` + method, and only be called with tensors. + + In :func:`jvp`, saved objects can be accessed through the :attr:`saved_tensors` + attribute. + + Arguments can also be ``None``. This is a no-op. + + See :ref:`extending-autograd` for more details on how to use this method. + + Example:: + >>> # xdoctest: +SKIP + >>> class Func(torch.autograd.Function): + >>> @staticmethod + >>> def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int): + >>> ctx.save_for_backward(x, y) + >>> ctx.save_for_forward(x, y) + >>> ctx.z = z + >>> return x * y * z + >>> + >>> @staticmethod + >>> def jvp(ctx, x_t, y_t, _): + >>> x, y = ctx.saved_tensors + >>> z = ctx.z + >>> return z * (y * x_t + x * y_t) + >>> + >>> @staticmethod + >>> def vjp(ctx, grad_out): + >>> x, y = ctx.saved_tensors + >>> z = ctx.z + >>> return z * grad_out * y, z * grad_out * x, None + >>> + >>> a = torch.tensor(1., requires_grad=True, dtype=torch.double) + >>> t = torch.tensor(1., dtype=torch.double) + >>> b = torch.tensor(2., requires_grad=True, dtype=torch.double) + >>> c = 4 + >>> + >>> with fwAD.dual_level(): + >>> a_dual = fwAD.make_dual(a, t) + >>> d = Func.apply(a_dual, b, c) + + """ + for tensor in tensors: + assert isinstance(tensor, torch.Tensor) or tensor is None, ( + "save_for_forward expects all arguments to be tensors; you should " + "save non-tensors as attributes on ctx." + ) + + self.saved_for_forward = tensors + + def mark_dirty(self, *args: torch.Tensor): + r"""Mark given tensors as modified in an in-place operation. + + **This should be called at most once, only from inside the** + :func:`forward` **method, and all arguments should be inputs.** + + Every tensor that's been modified in-place in a call to :func:`forward` + should be given to this function, to ensure correctness of our checks. + It doesn't matter whether the function is called before or after + modification. + + Examples:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> class Inplace(Function): + >>> @staticmethod + >>> def forward(ctx, x): + >>> x_npy = x.numpy() # x_npy shares storage with x + >>> x_npy += 1 + >>> ctx.mark_dirty(x) + >>> return x + >>> + >>> @staticmethod + >>> @once_differentiable + >>> def backward(ctx, grad_output): + >>> return grad_output + >>> + >>> a = torch.tensor(1., requires_grad=True, dtype=torch.double).clone() + >>> b = a * a + >>> Inplace.apply(a) # This would lead to wrong gradients! + >>> # but the engine would not know unless we mark_dirty + >>> # xdoctest: +SKIP + >>> b.backward() # RuntimeError: one of the variables needed for gradient + >>> # computation has been modified by an inplace operation + + """ + self.dirty_tensors = args + + def mark_shared_storage(self, *pairs): + warnings.warn( + "mark_shared_storage is deprecated. " + "Tensors with shared storages are automatically tracked. Note " + "that calls to `set_()` are not tracked" + ) + + def mark_non_differentiable(self, *args: torch.Tensor): + r"""Mark outputs as non-differentiable. + + **This should be called at most once, only from inside the** + :func:`forward` **method, and all arguments should be tensor outputs.** + + This will mark outputs as not requiring gradients, increasing the + efficiency of backward computation. You still need to accept a gradient + for each output in :meth:`~Function.backward`, but it's always going to + be a zero tensor with the same shape as the shape of a corresponding + output. + + This is used e.g. for indices returned from a sort. See example:: + >>> class Func(Function): + >>> @staticmethod + >>> def forward(ctx, x): + >>> sorted, idx = x.sort() + >>> ctx.mark_non_differentiable(idx) + >>> ctx.save_for_backward(x, idx) + >>> return sorted, idx + >>> + >>> @staticmethod + >>> @once_differentiable + >>> def backward(ctx, g1, g2): # still need to accept g2 + >>> x, idx = ctx.saved_tensors + >>> grad_input = torch.zeros_like(x) + >>> grad_input.index_add_(0, idx, g1) + >>> return grad_input + + """ + self.non_differentiable = args + + def set_materialize_grads(self, value: bool): + r"""Set whether to materialize grad tensors. Default is ``True``. + + **This should be called only from inside the** :func:`forward` **method** + + If ``True``, undefined grad tensors will be expanded to tensors full of zeros + prior to calling the :func:`backward` and :func:`jvp` methods. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> class SimpleFunc(Function): + >>> @staticmethod + >>> def forward(ctx, x): + >>> return x.clone(), x.clone() + >>> + >>> @staticmethod + >>> @once_differentiable + >>> def backward(ctx, g1, g2): + >>> return g1 + g2 # No check for None necessary + >>> + >>> # We modify SimpleFunc to handle non-materialized grad outputs + >>> class Func(Function): + >>> @staticmethod + >>> def forward(ctx, x): + >>> ctx.set_materialize_grads(False) + >>> ctx.save_for_backward(x) + >>> return x.clone(), x.clone() + >>> + >>> @staticmethod + >>> @once_differentiable + >>> def backward(ctx, g1, g2): + >>> x, = ctx.saved_tensors + >>> grad_input = torch.zeros_like(x) + >>> if g1 is not None: # We must check for None now + >>> grad_input += g1 + >>> if g2 is not None: + >>> grad_input += g2 + >>> return grad_input + >>> + >>> a = torch.tensor(1., requires_grad=True) + >>> b, _ = Func.apply(a) # induces g2 to be undefined + + """ + self.materialize_grads = value + + +# DO NOT USE: This is only defined to be able to load old serialized models +_ContextMethodMixin = FunctionCtx + + +class _HookMixin: + @staticmethod + def _register_hook(backward_hooks, hook): + if backward_hooks is None: + backward_hooks = OrderedDict() + handle = hooks.RemovableHandle(backward_hooks) + backward_hooks[handle.id] = hook + return backward_hooks, handle + + +class BackwardCFunction(_C._FunctionBase, FunctionCtx, _HookMixin): + r""" + This class is used for internal autograd work. Do not use. + """ + + def apply(self, *args): + r""" + Apply method used when executing this Node during the backward + """ + # _forward_cls is defined by derived class + # The user should define either backward or vjp but never both. + backward_fn = self._forward_cls.backward # type: ignore[attr-defined] + vjp_fn = self._forward_cls.vjp # type: ignore[attr-defined] + if backward_fn is not Function.backward and vjp_fn is not Function.vjp: + raise RuntimeError( + "Implementing both 'backward' and 'vjp' for a custom " + "Function is not allowed. You should only implement one " + "of them." + ) + user_fn = vjp_fn if vjp_fn is not Function.vjp else backward_fn + return user_fn(self, *args) + + def apply_jvp(self, *args): + r""" + Apply method used when executing forward mode AD during the forward + """ + # _forward_cls is defined by derived class + return self._forward_cls.jvp(self, *args) # type: ignore[attr-defined] + + def _compiled_autograd_key(self): + return self._forward_cls._compiled_autograd_key(self) # type: ignore[attr-defined] + + +def _warn_traceable_deprecated(): + warnings.warn( + "The is_traceable field on torch.autograd.Function is deprecated " + "and will be removed in PyTorch 2.4.", + stacklevel=3, + ) + + +class FunctionMeta(type): + """Function metaclass. + + This metaclass sets up the following properties: + _backward_cls: The Function class corresponding to the differentiated + version of this function (which is generated on the fly by this + metaclass). + """ + + def __init__(cls, name, bases, attrs): + backward_fn = type( + name + "Backward", (BackwardCFunction,), {"_forward_cls": cls} + ) + backward_fn._autograd_function_id = next(AUTOGRAD_FUNCTION_COUNTER) # type: ignore[attr-defined] + backward_fn._compiled_autograd_should_lift = attrs.get( # type: ignore[attr-defined] + "_compiled_autograd_should_lift", True + ) + cls._backward_cls = backward_fn + + if "is_traceable" in attrs and attrs["is_traceable"] is True: + _warn_traceable_deprecated() + + super().__init__(name, bases, attrs) + + def __getattribute__(cls, name): + if name == "is_traceable": + _warn_traceable_deprecated() + return super().__getattribute__(name) + + def __setattr__(cls, name, value): + if name == "is_traceable" and value is True: + warnings.warn( + "The is_traceable field on torch.autograd.Function is deprecated " + "and will be removed in PyTorch 2.4.", + stacklevel=2, + ) + return super().__setattr__(name, value) + + +class _SingleLevelFunction( + _C._FunctionBase, FunctionCtx, _HookMixin, metaclass=FunctionMeta +): + @staticmethod + def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any: + r"""Define the forward of the custom autograd Function. + + This function is to be overridden by all subclasses. + There are two ways to define forward: + + Usage 1 (Combined forward and ctx):: + + @staticmethod + def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any: + pass + + - It must accept a context ctx as the first argument, followed by any + number of arguments (tensors or other types). + - See :ref:`combining-forward-context` for more details + + Usage 2 (Separate forward and ctx):: + + @staticmethod + def forward(*args: Any, **kwargs: Any) -> Any: + pass + + @staticmethod + def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None: + pass + + - The forward no longer accepts a ctx argument. + - Instead, you must also override the :meth:`torch.autograd.Function.setup_context` + staticmethod to handle setting up the ``ctx`` object. + ``output`` is the output of the forward, ``inputs`` are a Tuple of inputs + to the forward. + - See :ref:`extending-autograd` for more details + + The context can be used to store arbitrary data that can be then + retrieved during the backward pass. Tensors should not be stored + directly on `ctx` (though this is not currently enforced for + backward compatibility). Instead, tensors should be saved either with + :func:`ctx.save_for_backward` if they are intended to be used in + ``backward`` (equivalently, ``vjp``) or :func:`ctx.save_for_forward` + if they are intended to be used for in ``jvp``. + """ + raise NotImplementedError( + "You must implement the forward function for custom autograd.Function." + ) + + @staticmethod + def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> Any: + r"""There are two ways to define the forward pass of an autograd.Function. + + Either: + + 1. Override forward with the signature ``forward(ctx, *args, **kwargs)``. + ``setup_context`` is not overridden. Setting up the ctx for backward + happens inside the ``forward``. + 2. Override forward with the signature ``forward(*args, **kwargs)`` and + override ``setup_context``. Setting up the ctx for backward happens + inside ``setup_context`` (as opposed to inside the ``forward``) + + See :meth:`torch.autograd.Function.forward` and :ref:`extending-autograd` for more details. + """ + raise NotImplementedError("setup_context is not implemented.") + + @staticmethod + def backward(ctx: Any, *grad_outputs: Any) -> Any: + r"""Define a formula for differentiating the operation with backward mode automatic differentiation. + + This function is to be overridden by all subclasses. + (Defining this function is equivalent to defining the ``vjp`` function.) + + It must accept a context :attr:`ctx` as the first argument, followed by + as many outputs as the :func:`forward` returned (None will be passed in + for non tensor outputs of the forward function), + and it should return as many tensors, as there were inputs to + :func:`forward`. Each argument is the gradient w.r.t the given output, + and each returned value should be the gradient w.r.t. the + corresponding input. If an input is not a Tensor or is a Tensor not + requiring grads, you can just pass None as a gradient for that input. + + The context can be used to retrieve tensors saved during the forward + pass. It also has an attribute :attr:`ctx.needs_input_grad` as a tuple + of booleans representing whether each input needs gradient. E.g., + :func:`backward` will have ``ctx.needs_input_grad[0] = True`` if the + first input to :func:`forward` needs gradient computed w.r.t. the + output. + """ + raise NotImplementedError( + "You must implement either the backward or vjp method for " + "your custom autograd.Function to use it with backward " + "mode AD." + ) + + # vjp and backward are alias of each other + vjp = backward + + @staticmethod + def jvp(ctx: Any, *grad_inputs: Any) -> Any: + r"""Define a formula for differentiating the operation with forward mode automatic differentiation. + + This function is to be overridden by all subclasses. + It must accept a context :attr:`ctx` as the first argument, followed by + as many inputs as the :func:`forward` got (None will be passed in + for non tensor inputs of the forward function), + and it should return as many tensors as there were outputs to + :func:`forward`. Each argument is the gradient w.r.t the given input, + and each returned value should be the gradient w.r.t. the + corresponding output. If an output is not a Tensor or the function is not + differentiable with respect to that output, you can just pass None as a + gradient for that input. + + You can use the :attr:`ctx` object to pass any value from the forward to this + functions. + """ + raise NotImplementedError( + "You must implement the jvp function for custom " + "autograd.Function to use it with forward mode AD." + ) + + +class Function(_SingleLevelFunction): + r"""Base class to create custom `autograd.Function`. + + To create a custom `autograd.Function`, subclass this class and implement + the :meth:`forward` and :meth:`backward` static methods. Then, to use your custom + op in the forward pass, call the class method ``apply``. Do not call + :meth:`forward` directly. + + To ensure correctness and best performance, make sure you are calling the + correct methods on ``ctx`` and validating your backward function using + :func:`torch.autograd.gradcheck`. + + See :ref:`extending-autograd` for more details on how to use this class. + + Examples:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> class Exp(Function): + >>> @staticmethod + >>> def forward(ctx, i): + >>> result = i.exp() + >>> ctx.save_for_backward(result) + >>> return result + >>> + >>> @staticmethod + >>> def backward(ctx, grad_output): + >>> result, = ctx.saved_tensors + >>> return grad_output * result + >>> + >>> # Use it by calling the apply method: + >>> # xdoctest: +SKIP + >>> output = Exp.apply(input) + """ + + def __init__(self, *args, **kwargs): + cls = self.__class__ + warnings.warn( + f"{cls} should not be instantiated. Methods on autograd functions" + "are all static, so you should invoke them on the class itself. " + "Instantiating an autograd function will raise an " + "error in a future version of PyTorch.", + DeprecationWarning, + stacklevel=2, + ) + + def __call__(self, *args, **kwargs): + raise RuntimeError( + "Legacy autograd function with non-static forward method is deprecated. " + "Please use new-style autograd function with static forward method. " + "(Example: https://pytorch.org/docs/stable/autograd.html#torch.autograd.Function)" + ) + + # for the tracer + is_traceable = False + + """ + Bool that specifies if PyTorch should attempt to autogenerate + :func:`torch.vmap` support for this autograd.Function. You may set this to + True only if this autograd.Function's forward, backward, and jvp (if they + exist) are written using PyTorch operations; otherwise, please override + :meth:`torch.autograd.Function.vmap` to add support for :func:`torch.vmap`. + + Please see :ref:`func-autograd-function` for more details. + """ + generate_vmap_rule = False + + @staticmethod + def vmap(info, in_dims, *args): + r"""Define the behavior for this autograd.Function underneath :func:`torch.vmap`. + + For a :func:`torch.autograd.Function` to support + :func:`torch.vmap`, you must either override this static method, or set + ``generate_vmap_rule`` to ``True`` (you may not do both). + + If you choose to override this staticmethod: it must accept + + - an ``info`` object as the first argument. ``info.batch_size`` + specifies the size of the dimension being vmapped over, + while ``info.randomness`` is the randomness option passed to + :func:`torch.vmap`. + - an ``in_dims`` tuple as the second argument. + For each arg in ``args``, ``in_dims`` has a corresponding + ``Optional[int]``. It is ``None`` if the arg is not a Tensor or if + the arg is not being vmapped over, otherwise, it is an integer + specifying what dimension of the Tensor is being vmapped over. + - ``*args``, which is the same as the args to :meth:`~Function.forward`. + + The return of the vmap staticmethod is a tuple of ``(output, out_dims)``. + Similar to ``in_dims``, ``out_dims`` should be of the same structure as + ``output`` and contain one ``out_dim`` per output that specifies if the + output has the vmapped dimension and what index it is in. + + Please see :ref:`func-autograd-function` for more details. + """ + raise NotImplementedError( + "To use autograd.Function with vmap, you must either override the " + "vmap staticmethod or set generate_vmap_rule=True." + ) + + @classmethod + def apply(cls, *args, **kwargs): + def bind_default_args(func, *args, **kwargs): + signature = inspect.signature(func) + bound_args = signature.bind(*args, **kwargs) + bound_args.apply_defaults() + + return bound_args.args + + is_setup_ctx_defined = cls.setup_context != _SingleLevelFunction.setup_context + if is_setup_ctx_defined: + args = bind_default_args(cls.forward, *args, **kwargs) + + if not torch._C._are_functorch_transforms_active(): + # See NOTE: [functorch vjp and autograd interaction] + args = _functorch.utils.unwrap_dead_wrappers(args) + return super().apply(*args, **kwargs) # type: ignore[misc] + + if not is_setup_ctx_defined: + raise RuntimeError( + "In order to use an autograd.Function with functorch transforms " + "(vmap, grad, jvp, jacrev, ...), it must override the setup_context " + "staticmethod. For more details, please see " + "https://pytorch.org/docs/master/notes/extending.func.html" + ) + + return custom_function_call(cls, *args, **kwargs) + + @staticmethod + def _compiled_autograd_key(ctx): + return (ctx._autograd_function_id,) + + +def once_differentiable(fn): + @functools.wraps(fn) + def wrapper(ctx, *args): + with torch.no_grad(): + outputs = fn(ctx, *args) + + if not torch.is_grad_enabled(): + return outputs + + # If any of the inputs have requires_grad=True, we force the outputs + # to have requires_grad=True but point to a grad_fn which throws an + # error message during (double) back-propagation. + # XXX: this is only an approximation of requires_grad - there's no way + # to figure out if fn didn't use ctx.saved_tensors and as a result + # some Tensors might require grad, even if no args do. + # Unfortunately, this leads to unexpected error messages ("no nodes + # require computing gradients"), but I don't have a better idea. + # These functions would raise an error in backward anyway. + requires_grad = any( + isinstance(arg, torch.Tensor) and arg.requires_grad for arg in args + ) + if not requires_grad: + return outputs + + if not isinstance(outputs, tuple): + outputs = (outputs,) + + err_fn = _functions.DelayedError( + b"trying to differentiate twice a function that was marked " + b"with @once_differentiable", + len(outputs), + ) + + # Create aliases of each output that has requires_grad=True. We need + # at least one of the inputs to err_fn to require grad so that the + # output will have a grad_fn. + def fake_requires_grad(var): + if var is not None: + var = var.detach() + var.requires_grad = True + return var + + return err_fn(*[fake_requires_grad(v) for v in outputs]) + + return wrapper + + +def traceable(fn_cls): + r"""Mark Function as traceable for the JIT. + + Traceable functions have additional restrictions - they can't pass any + data-dependent values to backward (e.g. Prod passes the output, which makes + it non-traceable), and their backward should be implemented entirely in terms + of operations on autograd Tensors in all cases. + + DON'T USE THIS DECORATOR. IT IS FOR INTERNAL USE ONLY AND SHOULD BE HANDLED WITH + CARE (or can give incorrect results otherwise). + """ + warnings.warn( + "torch.autograd.function.traceable is deprecated " + "and will be removed in PyTorch 2.4.", + stacklevel=2, + ) + fn_cls.is_traceable = True + return fn_cls + + +class InplaceFunction(Function): + r""" + This class is here only for backward compatibility reasons. + Use :class:`Function` instead of this for any new use case. + """ + + def __init__(self, inplace=False): + super().__init__() + self.inplace = inplace + + +def _nested_map(condition, fn, condition_msg=None): + def _map(obj): + if condition(obj): + return fn(obj) + elif obj is None: + return None + elif isinstance(obj, (list, tuple)): + mapped = (_map(x) for x in obj) + if hasattr(obj, "_fields"): + # obj is namedtuple + return type(obj)(*mapped) + return type(obj)(mapped) + elif isinstance(obj, dict): + return {x: _map(obj[x]) for x in obj} + else: + raise ValueError( + "Auto nesting doesn't know how to process " + "an input object of type " + + torch.typename(obj) + + ( + ". Accepted types: " + condition_msg + ", or lists/tuples of them" + if condition_msg + else "" + ) + ) + + return _map + + +def _jit_unwrap_structured(obj): + if hasattr(obj, "_jit_unwrap"): + return obj._jit_unwrap() + return obj + + +def _iter_filter(condition, allow_unknown=False, condition_msg=None, conversion=None): + def _iter(obj): + if conversion is not None: + obj = conversion(obj) + if condition(obj): + yield obj + elif obj is None: + return + elif isinstance(obj, (list, tuple)): + for o in obj: + yield from _iter(o) + elif isinstance(obj, dict): + # We only accept primitive key types, so we needn't inspect them + for o in obj.values(): + yield from _iter(o) + elif allow_unknown: + yield obj + else: + raise ValueError( + "Auto nesting doesn't know how to process " + "an input object of type " + + torch.typename(obj) + + ( + ". Accepted types: " + condition_msg + ", or lists/tuples of them" + if condition_msg + else "" + ) + ) + + return _iter + + +def _unflatten(input, proto): + # unflatten a list or tuple input into a nested list/tuple structure + # specified by proto + def unflatten_helper(input, proto): + res: List[Optional[torch.Tensor]] = [] + if hasattr(proto, "_jit_wrap"): + return proto._jit_wrap(input) + if not isinstance(proto, (list, tuple)): + return input[0], input[1:] + for e in proto: + if e is None: + res.append(e) + else: + res_e, input = unflatten_helper(input, e) + res.append(res_e) + return type(proto)(res), input + + return unflatten_helper(input, proto)[0] + + +_iter_jit_values = _iter_filter( + lambda o: o is None or isinstance(o, torch._C.Value), + condition_msg="jit's Values or None", +) +_iter_tensors = _iter_filter( + lambda x: isinstance(x, torch.Tensor), + condition_msg="Tensors", + conversion=_jit_unwrap_structured, +) +_iter_tensors_permissive = _iter_filter( + lambda x: isinstance(x, torch.Tensor), + allow_unknown=True, + condition_msg="Tensors (permissive)", +) +_iter_None_tensors = _iter_filter( + lambda o: o is None or isinstance(o, torch.Tensor), condition_msg="Tensors or None" +) +_map_tensor_data = _nested_map( + lambda x: isinstance(x, torch.Tensor), lambda o: o.data, condition_msg="Tensors" +) + + +class NestedIOFunction(Function): + r""" + This class is here only for backward compatibility reasons. + Use :class:`Function` instead of this for any new use case. + """ + # The 'type: ignore' statements are needed here because these functions are declared as '@staticmethod' in the + # superclass (Function) but are instance methods here, which mypy reports as incompatible. + + def _do_forward(self, *input): + self._nested_input = input + flat_input = tuple(_iter_tensors(input)) + flat_output = super()._do_forward(*flat_input) # type: ignore[misc] + nested_output = self._nested_output + nested_tensors = _unflatten(flat_output, self._nested_output) + return nested_tensors + + def _do_backward(self, gradients, retain_variables): + self.retain_variables = retain_variables + result = super()._do_backward(gradients, retain_variables) # type: ignore[misc] + if not retain_variables: + del self._nested_output + del self._to_save_nested + return result + + def backward(self, *gradients: Any) -> Any: # type: ignore[override] + r""" + Shared backward utility. + """ + nested_gradients = _unflatten(gradients, self._nested_output) + result = self.backward_extended(*nested_gradients) # type: ignore[func-returns-value] + return tuple(_iter_None_tensors(result)) + + __call__ = _do_forward + + def forward(self, *args: Any) -> Any: # type: ignore[override] + r""" + Shared forward utility. + """ + nested_tensors = _map_tensor_data(self._nested_input) + result = self.forward_extended(*nested_tensors) # type: ignore[func-returns-value] + del self._nested_input + self._nested_output = result + return tuple(_iter_tensors(result)) + + def save_for_backward(self, *args: Any) -> None: + r""" + See :meth:`Function.save_for_backward`. + """ + self.to_save = tuple(_iter_tensors(args)) + self._to_save_nested = args + + @property + def saved_tensors(self): + r""" + See :meth:`Function.saved_tensors`. + """ + flat_tensors = super().saved_tensors # type: ignore[misc] + return _unflatten(flat_tensors, self._to_save_nested) + + def mark_dirty(self, *args: Any, **kwargs: Any) -> None: + r""" + See :meth:`Function.mark_dirty`. + """ + self.dirty_tensors = tuple(_iter_tensors((args, kwargs))) + + def mark_non_differentiable(self, *args: Any, **kwargs: Any) -> None: + r""" + See :meth:`Function.mark_non_differentiable`. + """ + self.non_differentiable = tuple(_iter_tensors((args, kwargs))) + + def forward_extended(self, *input: Any) -> None: + r""" + User defined forward. + """ + raise NotImplementedError + + def backward_extended(self, *grad_output: Any) -> None: + r""" + User defined backward. + """ + raise NotImplementedError diff --git a/venv/lib/python3.10/site-packages/torch/autograd/functional.py b/venv/lib/python3.10/site-packages/torch/autograd/functional.py new file mode 100644 index 0000000000000000000000000000000000000000..6701efbedac1c15b286e91be5440d2e45ffad945 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/autograd/functional.py @@ -0,0 +1,1182 @@ +from typing import List, Tuple + +import torch +from torch._vmap_internals import _vmap +from . import forward_ad as fwAD + +__all__ = ["vjp", "jvp", "jacobian", "hessian", "hvp", "vhp"] + +# Utility functions + + +def _as_tuple_nocheck(x): + if isinstance(x, tuple): + return x + elif isinstance(x, list): + return tuple(x) + else: + return (x,) + + +def _as_tuple(inp, arg_name=None, fn_name=None): + # Ensures that inp is a tuple of Tensors + # Returns whether or not the original inp was a tuple and the tupled version of the input + if arg_name is None and fn_name is None: + return _as_tuple_nocheck(inp) + + is_inp_tuple = True + if not isinstance(inp, tuple): + inp = (inp,) + is_inp_tuple = False + + for i, el in enumerate(inp): + if not isinstance(el, torch.Tensor): + if is_inp_tuple: + raise TypeError( + f"The {arg_name} given to {fn_name} must be either a Tensor or a tuple of Tensors but the" + f" value at index {i} has type {type(el)}." + ) + else: + raise TypeError( + f"The {arg_name} given to {fn_name} must be either a Tensor or a tuple of Tensors but the" + f" given {arg_name} has type {type(el)}." + ) + + return is_inp_tuple, inp + + +def _tuple_postprocess(res, to_unpack): + # Unpacks a potentially nested tuple of Tensors + # to_unpack should be a single boolean or a tuple of two booleans. + # It is used to: + # - invert _as_tuple when res should match the inp given to _as_tuple + # - optionally remove nesting of two tuples created by multiple calls to _as_tuple + if isinstance(to_unpack, tuple): + assert len(to_unpack) == 2 + if not to_unpack[1]: + res = tuple(el[0] for el in res) + if not to_unpack[0]: + res = res[0] + else: + if not to_unpack: + res = res[0] + return res + + +def _grad_preprocess(inputs, create_graph, need_graph): + # Preprocess the inputs to make sure they require gradient + # inputs is a tuple of Tensors to preprocess + # create_graph specifies if the user wants gradients to flow back to the Tensors in inputs + # need_graph specifies if we internally want gradients to flow back to the Tensors in res + # Note that we *always* create a new Tensor object to be able to see the difference between + # inputs given as arguments and the same Tensors automatically captured by the user function. + # Check this issue for more details on how that can happen: https://github.com/pytorch/pytorch/issues/32576 + res = [] + for inp in inputs: + if create_graph and inp.requires_grad: + # Create at least a new Tensor object in a differentiable way + if not inp.is_sparse: + # Use .view_as() to get a shallow copy + res.append(inp.view_as(inp)) + else: + # We cannot use view for sparse Tensors so we clone + res.append(inp.clone()) + else: + res.append(inp.detach().requires_grad_(need_graph)) + return tuple(res) + + +def _grad_postprocess(inputs, create_graph): + # Postprocess the generated Tensors to avoid returning Tensors with history when the user did not + # request it. + if isinstance(inputs[0], torch.Tensor): + if not create_graph: + return tuple(inp.detach() for inp in inputs) + else: + return inputs + else: + return tuple(_grad_postprocess(inp, create_graph) for inp in inputs) + + +def _validate_v(v, other, is_other_tuple): + # This assumes that other is the correct shape, and v should match + # Both are assumed to be tuples of Tensors + if len(other) != len(v): + if is_other_tuple: + raise RuntimeError( + f"v is a tuple of invalid length: should be {len(other)} but got {len(v)}." + ) + else: + raise RuntimeError("The given v should contain a single Tensor.") + + for idx, (el_v, el_other) in enumerate(zip(v, other)): + if el_v.size() != el_other.size(): + prepend = "" + if is_other_tuple: + prepend = f"Entry {idx} in " + raise RuntimeError( + f"{prepend}v has invalid size: should be {el_other.size()} but got {el_v.size()}." + ) + + +def _check_requires_grad(inputs, input_type, strict): + # Used to make all the necessary checks to raise nice errors in strict mode. + if not strict: + return + + if input_type not in ["outputs", "grad_inputs", "jacobian", "hessian"]: + raise RuntimeError("Invalid input_type to _check_requires_grad") + for i, inp in enumerate(inputs): + if inp is None: + # This can only be reached for grad_inputs. + raise RuntimeError( + f"The output of the user-provided function is independent of input {i}." + " This is not allowed in strict mode." + ) + if not inp.requires_grad: + if input_type == "hessian": + raise RuntimeError( + f"The hessian of the user-provided function with respect to input {i}" + " is independent of the input. This is not allowed in strict mode." + " You should ensure that your function is thrice differentiable and that" + " the hessian depends on the inputs." + ) + elif input_type == "jacobian": + raise RuntimeError( + "While computing the hessian, found that the jacobian of the user-provided" + f" function with respect to input {i} is independent of the input. This is not" + " allowed in strict mode. You should ensure that your function is twice" + " differentiable and that the jacobian depends on the inputs (this would be" + " violated by a linear function for example)." + ) + elif input_type == "grad_inputs": + raise RuntimeError( + f"The gradient with respect to input {i} is independent of the inputs of the" + " user-provided function. This is not allowed in strict mode." + ) + else: + raise RuntimeError( + f"Output {i} of the user-provided function does not require gradients." + " The outputs must be computed in a differentiable manner from the input" + " when running in strict mode." + ) + + +def _autograd_grad( + outputs, + inputs, + grad_outputs=None, + create_graph=False, + retain_graph=None, + is_grads_batched=False, +): + # Version of autograd.grad that accepts `None` in outputs and do not compute gradients for them. + # This has the extra constraint that inputs has to be a tuple + assert isinstance(outputs, tuple) + if grad_outputs is None: + grad_outputs = (None,) * len(outputs) + assert isinstance(grad_outputs, tuple) + assert len(outputs) == len(grad_outputs) + + new_outputs: Tuple[torch.Tensor, ...] = tuple() + new_grad_outputs: Tuple[torch.Tensor, ...] = tuple() + for out, grad_out in zip(outputs, grad_outputs): + if out is not None and out.requires_grad: + new_outputs += (out,) + new_grad_outputs += (grad_out,) + + if len(new_outputs) == 0: + # No differentiable output, we don't need to call the autograd engine + return (None,) * len(inputs) + else: + return torch.autograd.grad( + new_outputs, + inputs, + new_grad_outputs, + allow_unused=True, + create_graph=create_graph, + retain_graph=retain_graph, + is_grads_batched=is_grads_batched, + ) + + +def _fill_in_zeros(grads, refs, strict, create_graph, stage): + # Used to detect None in the grads and depending on the flags, either replace them + # with Tensors full of 0s of the appropriate size based on the refs or raise an error. + # strict and create graph allow us to detect when it is appropriate to raise an error + # stage gives us information of which backward call we consider to give good error message + if stage not in ["back", "back_trick", "double_back", "double_back_trick"]: + raise RuntimeError(f"Invalid stage argument '{stage}' to _fill_in_zeros") + + res: Tuple[torch.Tensor, ...] = tuple() + for i, grads_i in enumerate(grads): + if grads_i is None: + if strict: + if stage == "back": + raise RuntimeError( + "The output of the user-provided function is independent of " + f"input {i}. This is not allowed in strict mode." + ) + elif stage == "back_trick": + raise RuntimeError( + f"The gradient with respect to the input is independent of entry {i}" + " in the grad_outputs when using the double backward trick to compute" + " forward mode gradients. This is not allowed in strict mode." + ) + elif stage == "double_back": + raise RuntimeError( + "The jacobian of the user-provided function is independent of " + f"input {i}. This is not allowed in strict mode." + ) + else: + raise RuntimeError( + "The hessian of the user-provided function is independent of " + f"entry {i} in the grad_jacobian. This is not allowed in strict " + "mode as it prevents from using the double backward trick to " + "replace forward mode AD." + ) + + grads_i = torch.zeros_like(refs[i]) + else: + if strict and create_graph and not grads_i.requires_grad: + if "double" not in stage: + raise RuntimeError( + "The jacobian of the user-provided function is independent of " + f"input {i}. This is not allowed in strict mode when create_graph=True." + ) + else: + raise RuntimeError( + "The hessian of the user-provided function is independent of " + f"input {i}. This is not allowed in strict mode when create_graph=True." + ) + + res += (grads_i,) + + return res + + +# Public API + + +def vjp(func, inputs, v=None, create_graph=False, strict=False): + r"""Compute the dot product between a vector ``v`` and the Jacobian of the given function at the point given by the inputs. + + Args: + func (function): a Python function that takes Tensor inputs and returns + a tuple of Tensors or a Tensor. + inputs (tuple of Tensors or Tensor): inputs to the function ``func``. + v (tuple of Tensors or Tensor): The vector for which the vector + Jacobian product is computed. Must be the same size as the output + of ``func``. This argument is optional when the output of ``func`` + contains a single element and (if it is not provided) will be set + as a Tensor containing a single ``1``. + create_graph (bool, optional): If ``True``, both the output and result + will be computed in a differentiable way. Note that when ``strict`` + is ``False``, the result can not require gradients or be + disconnected from the inputs. Defaults to ``False``. + strict (bool, optional): If ``True``, an error will be raised when we + detect that there exists an input such that all the outputs are + independent of it. If ``False``, we return a Tensor of zeros as the + vjp for said inputs, which is the expected mathematical value. + Defaults to ``False``. + + Returns: + output (tuple): tuple with: + func_output (tuple of Tensors or Tensor): output of ``func(inputs)`` + + vjp (tuple of Tensors or Tensor): result of the dot product with + the same shape as the inputs. + + Example: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> def exp_reducer(x): + ... return x.exp().sum(dim=1) + >>> inputs = torch.rand(4, 4) + >>> v = torch.ones(4) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> vjp(exp_reducer, inputs, v) + (tensor([5.7817, 7.2458, 5.7830, 6.7782]), + tensor([[1.4458, 1.3962, 1.3042, 1.6354], + [2.1288, 1.0652, 1.5483, 2.5035], + [2.2046, 1.1292, 1.1432, 1.3059], + [1.3225, 1.6652, 1.7753, 2.0152]])) + + >>> vjp(exp_reducer, inputs, v, create_graph=True) + (tensor([5.7817, 7.2458, 5.7830, 6.7782], grad_fn=), + tensor([[1.4458, 1.3962, 1.3042, 1.6354], + [2.1288, 1.0652, 1.5483, 2.5035], + [2.2046, 1.1292, 1.1432, 1.3059], + [1.3225, 1.6652, 1.7753, 2.0152]], grad_fn=)) + + >>> def adder(x, y): + ... return 2 * x + 3 * y + >>> inputs = (torch.rand(2), torch.rand(2)) + >>> v = torch.ones(2) + >>> vjp(adder, inputs, v) + (tensor([2.4225, 2.3340]), + (tensor([2., 2.]), tensor([3., 3.]))) + """ + with torch.enable_grad(): + is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "vjp") + inputs = _grad_preprocess(inputs, create_graph=create_graph, need_graph=True) + + outputs = func(*inputs) + is_outputs_tuple, outputs = _as_tuple( + outputs, "outputs of the user-provided function", "vjp" + ) + _check_requires_grad(outputs, "outputs", strict=strict) + + if v is not None: + _, v = _as_tuple(v, "v", "vjp") + v = _grad_preprocess(v, create_graph=create_graph, need_graph=False) + _validate_v(v, outputs, is_outputs_tuple) + else: + if len(outputs) != 1 or outputs[0].nelement() != 1: + raise RuntimeError( + "The vector v can only be None if the " + "user-provided function returns " + "a single Tensor with a single element." + ) + + enable_grad = True if create_graph else torch.is_grad_enabled() + with torch.set_grad_enabled(enable_grad): + grad_res = _autograd_grad(outputs, inputs, v, create_graph=create_graph) + vjp = _fill_in_zeros(grad_res, inputs, strict, create_graph, "back") + + # Cleanup objects and return them to the user + outputs = _grad_postprocess(outputs, create_graph) + vjp = _grad_postprocess(vjp, create_graph) + + return _tuple_postprocess(outputs, is_outputs_tuple), _tuple_postprocess( + vjp, is_inputs_tuple + ) + + +def jvp(func, inputs, v=None, create_graph=False, strict=False): + r"""Compute the dot product between the Jacobian of the given function at the point given by the inputs and a vector ``v``. + + Args: + func (function): a Python function that takes Tensor inputs and returns + a tuple of Tensors or a Tensor. + inputs (tuple of Tensors or Tensor): inputs to the function ``func``. + v (tuple of Tensors or Tensor): The vector for which the Jacobian + vector product is computed. Must be the same size as the input of + ``func``. This argument is optional when the input to ``func`` + contains a single element and (if it is not provided) will be set + as a Tensor containing a single ``1``. + create_graph (bool, optional): If ``True``, both the output and result + will be computed in a differentiable way. Note that when ``strict`` + is ``False``, the result can not require gradients or be + disconnected from the inputs. Defaults to ``False``. + strict (bool, optional): If ``True``, an error will be raised when we + detect that there exists an input such that all the outputs are + independent of it. If ``False``, we return a Tensor of zeros as the + jvp for said inputs, which is the expected mathematical value. + Defaults to ``False``. + + Returns: + output (tuple): tuple with: + func_output (tuple of Tensors or Tensor): output of ``func(inputs)`` + + jvp (tuple of Tensors or Tensor): result of the dot product with + the same shape as the output. + + Note: + ``autograd.functional.jvp`` computes the jvp by using the backward of + the backward (sometimes called the double backwards trick). This is not + the most performant way of computing the jvp. Please consider using + :func:`torch.func.jvp` or the + :ref:`low-level forward-mode AD API ` instead. + + Example: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> def exp_reducer(x): + ... return x.exp().sum(dim=1) + >>> inputs = torch.rand(4, 4) + >>> v = torch.ones(4, 4) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> jvp(exp_reducer, inputs, v) + (tensor([6.3090, 4.6742, 7.9114, 8.2106]), + tensor([6.3090, 4.6742, 7.9114, 8.2106])) + + >>> jvp(exp_reducer, inputs, v, create_graph=True) + (tensor([6.3090, 4.6742, 7.9114, 8.2106], grad_fn=), + tensor([6.3090, 4.6742, 7.9114, 8.2106], grad_fn=)) + + >>> def adder(x, y): + ... return 2 * x + 3 * y + >>> inputs = (torch.rand(2), torch.rand(2)) + >>> v = (torch.ones(2), torch.ones(2)) + >>> jvp(adder, inputs, v) + (tensor([2.2399, 2.5005]), + tensor([5., 5.])) + + """ + with torch.enable_grad(): + is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "jvp") + inputs = _grad_preprocess(inputs, create_graph=create_graph, need_graph=True) + + if v is not None: + _, v = _as_tuple(v, "v", "jvp") + v = _grad_preprocess(v, create_graph=create_graph, need_graph=False) + _validate_v(v, inputs, is_inputs_tuple) + else: + if len(inputs) != 1 or inputs[0].nelement() != 1: + raise RuntimeError( + "The vector v can only be None if the input to " + "the user-provided function is a single Tensor " + "with a single element." + ) + + outputs = func(*inputs) + is_outputs_tuple, outputs = _as_tuple( + outputs, "outputs of the user-provided function", "jvp" + ) + _check_requires_grad(outputs, "outputs", strict=strict) + # The backward is linear so the value of grad_outputs is not important as + # it won't appear in the double backward graph. We only need to ensure that + # it does not contain inf or nan. + grad_outputs = tuple( + torch.zeros_like(out, requires_grad=True) for out in outputs + ) + + grad_inputs = _autograd_grad(outputs, inputs, grad_outputs, create_graph=True) + _check_requires_grad(grad_inputs, "grad_inputs", strict=strict) + + if create_graph: + with torch.enable_grad(): + grad_res = _autograd_grad( + grad_inputs, grad_outputs, v, create_graph=create_graph + ) + jvp = _fill_in_zeros(grad_res, outputs, strict, create_graph, "back_trick") + else: + grad_res = _autograd_grad( + grad_inputs, grad_outputs, v, create_graph=create_graph + ) + jvp = _fill_in_zeros(grad_res, outputs, strict, create_graph, "back_trick") + + # Cleanup objects and return them to the user + outputs = _grad_postprocess(outputs, create_graph) + jvp = _grad_postprocess(jvp, create_graph) + + return _tuple_postprocess(outputs, is_outputs_tuple), _tuple_postprocess( + jvp, is_outputs_tuple + ) + + +def _construct_standard_basis_for( + tensors: Tuple[torch.Tensor, ...], tensor_numels: Tuple[int, ...] +) -> Tuple[torch.Tensor, ...]: + # This function: + # - constructs a N=sum(tensor_numels) standard basis. i.e. an NxN identity matrix. + # - Splits the identity matrix into chunks with each chunk size determined by `tensor_numels`. + # - Each chunk corresponds to one tensor. The chunk has the same dtype and + # device as the tensor + # + # For example, with tensor_numels = [1, 2, 1], this function returns: + # ( tensor([[1], tensor([[0, 0], tensor([[0], + # [0], [1, 0], [0], + # [0], [0, 1], [0], + # [0]]) , [0, 0]]) , [1]]) ) + # + # Precondition: tensor_numels == tuple(tensor.numel() for tensor in tensors) + # Precondition: tensors always has at least one element. + # + # See NOTE: [Computing jacobian with vmap and grad for multiple tensors] + # for context behind this function. All the pre-conditions are guarded for + # in torch.autograd.functional.jacobian. + assert len(tensors) == len(tensor_numels) + assert len(tensors) > 0 + total_numel = sum(tensor_numels) + chunks = tuple( + tensor.new_zeros(total_numel, tensor_numel) + for tensor, tensor_numel in zip(tensors, tensor_numels) + ) + diag_start_idx = 0 + for chunk, numel in zip(chunks, tensor_numels): + chunk.diagonal(diag_start_idx).fill_(1) + diag_start_idx -= numel + return chunks + + +def _jacfwd(func, inputs, strict=False, vectorize=False): + if strict: + raise RuntimeError( + "torch.autograd.functional.jacobian: `strict=True` " + 'and `strategy="forward-mode"` are not supported together (yet). ' + "Please either set `strict=False` or " + '`strategy="reverse-mode"`.' + ) + is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "jacobian") + output_info = [] + + if vectorize: + # See NOTE: [Computing jacobian with vmap and grad for multiple outputs] + input_numels = tuple(input.numel() for input in inputs) + + # Step 1: Prepare tangents + tangents = _construct_standard_basis_for(inputs, input_numels) + + # Step 2: Compute vmap over computation with dual tensors + def jvp(tangents): + with fwAD.dual_level(): + dual_inputs = tuple( + fwAD.make_dual(input, tangent.view_as(input)) + for input, tangent in zip(inputs, tangents) + ) + _is_outputs_tuple, dual_outputs = _as_tuple( + func(*dual_inputs), "outputs" + ) + output_info.append(_is_outputs_tuple) + jv = [] + primal_outs = [] + for dual_out in dual_outputs: + primal, tangent = fwAD.unpack_dual(dual_out) + primal_outs.append(primal) + if tangent is not None: + jv.append(tangent) + else: + jv.append(torch.zeros_like(primal)) + output_info.append(primal_outs) + return tuple(jv) + + outputs_before_split = _vmap(jvp)(tangents) + is_outputs_tuple, outputs = output_info + # Step 3: for each of the output tangents, split along dim 0 + jacobian_input_output = [] + for jac_output_i, output_i in zip(outputs_before_split, outputs): + jacobian_output_i_output = [] + for jac, input_j in zip(jac_output_i.split(input_numels, dim=0), inputs): + # We need to transpose the Jacobian because in forward AD, the + # batch dimension represents that of the inputs + jacobian_input_i_output_j = jac.permute(*range(1, jac.ndim), 0).reshape( + (*output_i.shape, *input_j.shape) + ) # noqa: C409 + + jacobian_output_i_output.append(jacobian_input_i_output_j) + jacobian_input_output.append(jacobian_output_i_output) + + # Omit [Step 4] because everything is already transposed w/ forward AD + return _tuple_postprocess( + jacobian_input_output, (is_outputs_tuple, is_inputs_tuple) + ) + else: + raise NotImplementedError( + "Computing Jacobian using forward-AD or forward-over-reverse Hessian is" + "only implemented for `vectorize=True`." + ) + + +def jacobian( + func, + inputs, + create_graph=False, + strict=False, + vectorize=False, + strategy="reverse-mode", +): + r"""Compute the Jacobian of a given function. + + Args: + func (function): a Python function that takes Tensor inputs and returns + a tuple of Tensors or a Tensor. + inputs (tuple of Tensors or Tensor): inputs to the function ``func``. + create_graph (bool, optional): If ``True``, the Jacobian will be + computed in a differentiable manner. Note that when ``strict`` is + ``False``, the result can not require gradients or be disconnected + from the inputs. Defaults to ``False``. + strict (bool, optional): If ``True``, an error will be raised when we + detect that there exists an input such that all the outputs are + independent of it. If ``False``, we return a Tensor of zeros as the + jacobian for said inputs, which is the expected mathematical value. + Defaults to ``False``. + vectorize (bool, optional): This feature is experimental. + Please consider using :func:`torch.func.jacrev` or + :func:`torch.func.jacfwd` instead if you are looking for something + less experimental and more performant. + When computing the jacobian, usually we invoke + ``autograd.grad`` once per row of the jacobian. If this flag is + ``True``, we perform only a single ``autograd.grad`` call with + ``batched_grad=True`` which uses the vmap prototype feature. + Though this should lead to performance improvements in many cases, + because this feature is still experimental, there may be performance + cliffs. See :func:`torch.autograd.grad`'s ``batched_grad`` parameter for + more information. + strategy (str, optional): Set to ``"forward-mode"`` or ``"reverse-mode"`` to + determine whether the Jacobian will be computed with forward or reverse + mode AD. Currently, ``"forward-mode"`` requires ``vectorized=True``. + Defaults to ``"reverse-mode"``. If ``func`` has more outputs than + inputs, ``"forward-mode"`` tends to be more performant. Otherwise, + prefer to use ``"reverse-mode"``. + + Returns: + Jacobian (Tensor or nested tuple of Tensors): if there is a single + input and output, this will be a single Tensor containing the + Jacobian for the linearized inputs and output. If one of the two is + a tuple, then the Jacobian will be a tuple of Tensors. If both of + them are tuples, then the Jacobian will be a tuple of tuple of + Tensors where ``Jacobian[i][j]`` will contain the Jacobian of the + ``i``\th output and ``j``\th input and will have as size the + concatenation of the sizes of the corresponding output and the + corresponding input and will have same dtype and device as the + corresponding input. If strategy is ``forward-mode``, the dtype will be + that of the output; otherwise, the input. + + Example: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> def exp_reducer(x): + ... return x.exp().sum(dim=1) + >>> inputs = torch.rand(2, 2) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> jacobian(exp_reducer, inputs) + tensor([[[1.4917, 2.4352], + [0.0000, 0.0000]], + [[0.0000, 0.0000], + [2.4369, 2.3799]]]) + + >>> jacobian(exp_reducer, inputs, create_graph=True) + tensor([[[1.4917, 2.4352], + [0.0000, 0.0000]], + [[0.0000, 0.0000], + [2.4369, 2.3799]]], grad_fn=) + + >>> def exp_adder(x, y): + ... return 2 * x.exp() + 3 * y + >>> inputs = (torch.rand(2), torch.rand(2)) + >>> jacobian(exp_adder, inputs) + (tensor([[2.8052, 0.0000], + [0.0000, 3.3963]]), + tensor([[3., 0.], + [0., 3.]])) + """ + assert strategy in ("forward-mode", "reverse-mode"), ( + 'Expected strategy to be either "forward-mode" or "reverse-mode". Hint: If your ' + 'function has more outputs than inputs, "forward-mode" tends to be more performant. ' + 'Otherwise, prefer to use "reverse-mode".' + ) + if strategy == "forward-mode": + if create_graph: + raise NotImplementedError( + "torch.autograd.functional.jacobian: `create_graph=True` " + 'and `strategy="forward-mode"` are not supported together (yet). ' + "Please either set `create_graph=False` or " + '`strategy="reverse-mode"`.' + ) + return _jacfwd(func, inputs, strict, vectorize) + + with torch.enable_grad(): + is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "jacobian") + inputs = _grad_preprocess(inputs, create_graph=create_graph, need_graph=True) + + outputs = func(*inputs) + is_outputs_tuple, outputs = _as_tuple( + outputs, "outputs of the user-provided function", "jacobian" + ) + _check_requires_grad(outputs, "outputs", strict=strict) + + if vectorize: + if strict: + raise RuntimeError( + "torch.autograd.functional.jacobian: `strict=True` " + "and `vectorized=True` are not supported together. " + "Please either set `strict=False` or " + "`vectorize=False`." + ) + # NOTE: [Computing jacobian with vmap and grad for multiple outputs] + # + # Let's consider f(x) = (x**2, x.sum()) and let x = torch.randn(3). + # It turns out we can compute the jacobian of this function with a single + # call to autograd.grad by using vmap over the correct grad_outputs. + # + # Firstly, one way to compute the jacobian is to stack x**2 and x.sum() + # into a 4D vector. E.g., use g(x) = torch.stack([x**2, x.sum()]) + # + # To get the first row of the jacobian, we call + # >>> autograd.grad(g(x), x, grad_outputs=torch.tensor([1, 0, 0, 0])) + # To get the 2nd row of the jacobian, we call + # >>> autograd.grad(g(x), x, grad_outputs=torch.tensor([0, 1, 0, 0])) + # and so on. + # + # Using vmap, we can vectorize all 4 of these computations into one by + # passing the standard basis for R^4 as the grad_output. + # vmap(partial(autograd.grad, g(x), x))(torch.eye(4)). + # + # Now, how do we compute the jacobian *without stacking the output*? + # We can just split the standard basis across the outputs. So to + # compute the jacobian of f(x), we'd use + # >>> autograd.grad(f(x), x, grad_outputs=_construct_standard_basis_for(...)) + # The grad_outputs looks like the following: + # ( torch.tensor([[1, 0, 0], + # [0, 1, 0], + # [0, 0, 1], + # [0, 0, 0]]), + # torch.tensor([[0], + # [0], + # [0], + # [1]]) ) + # + # But we're not done yet! + # >>> vmap(partial(autograd.grad(f(x), x, grad_outputs=...))) + # returns a Tensor of shape [4, 3]. We have to remember to split the + # jacobian of shape [4, 3] into two: + # - one of shape [3, 3] for the first output + # - one of shape [ 3] for the second output + + # Step 1: Construct grad_outputs by splitting the standard basis + output_numels = tuple(output.numel() for output in outputs) + grad_outputs = _construct_standard_basis_for(outputs, output_numels) + flat_outputs = tuple(output.reshape(-1) for output in outputs) + + # Step 2: Call vmap + autograd.grad + def vjp(grad_output): + vj = list( + _autograd_grad( + flat_outputs, + inputs, + grad_output, + create_graph=create_graph, + is_grads_batched=True, + ) + ) + for el_idx, vj_el in enumerate(vj): + if vj_el is not None: + continue + vj[el_idx] = torch.zeros_like(inputs[el_idx]).expand( + (sum(output_numels),) + inputs[el_idx].shape + ) + return tuple(vj) + + jacobians_of_flat_output = vjp(grad_outputs) + + # Step 3: The returned jacobian is one big tensor per input. In this step, + # we split each Tensor by output. + jacobian_input_output = [] + for jac_input_i, input_i in zip(jacobians_of_flat_output, inputs): + jacobian_input_i_output = [] + for jac, output_j in zip( + jac_input_i.split(output_numels, dim=0), outputs + ): + jacobian_input_i_output_j = jac.view(output_j.shape + input_i.shape) + jacobian_input_i_output.append(jacobian_input_i_output_j) + jacobian_input_output.append(jacobian_input_i_output) + + # Step 4: Right now, `jacobian` is a List[List[Tensor]]. + # The outer List corresponds to the number of inputs, + # the inner List corresponds to the number of outputs. + # We need to exchange the order of these and convert to tuples + # before returning. + jacobian_output_input = tuple(zip(*jacobian_input_output)) + + jacobian_output_input = _grad_postprocess( + jacobian_output_input, create_graph + ) + return _tuple_postprocess( + jacobian_output_input, (is_outputs_tuple, is_inputs_tuple) + ) + + jacobian: Tuple[torch.Tensor, ...] = tuple() + + for i, out in enumerate(outputs): + # mypy complains that expression and variable have different types due to the empty list + jac_i: Tuple[List[torch.Tensor]] = tuple([] for _ in range(len(inputs))) # type: ignore[assignment] + for j in range(out.nelement()): + vj = _autograd_grad( + (out.reshape(-1)[j],), + inputs, + retain_graph=True, + create_graph=create_graph, + ) + + for el_idx, (jac_i_el, vj_el, inp_el) in enumerate( + zip(jac_i, vj, inputs) + ): + if vj_el is not None: + if strict and create_graph and not vj_el.requires_grad: + msg = ( + "The jacobian of the user-provided function is " + f"independent of input {i}. This is not allowed in " + "strict mode when create_graph=True." + ) + raise RuntimeError(msg) + jac_i_el.append(vj_el) + else: + if strict: + msg = ( + f"Output {i} of the user-provided function is " + f"independent of input {el_idx}. This is not allowed in " + "strict mode." + ) + raise RuntimeError(msg) + jac_i_el.append(torch.zeros_like(inp_el)) + + jacobian += ( + tuple( + torch.stack(jac_i_el, dim=0).view( + out.size() + inputs[el_idx].size() # type: ignore[operator] + ) + for (el_idx, jac_i_el) in enumerate(jac_i) + ), + ) + + jacobian = _grad_postprocess(jacobian, create_graph) + + return _tuple_postprocess(jacobian, (is_outputs_tuple, is_inputs_tuple)) + + +def hessian( + func, + inputs, + create_graph=False, + strict=False, + vectorize=False, + outer_jacobian_strategy="reverse-mode", +): + r"""Compute the Hessian of a given scalar function. + + Args: + func (function): a Python function that takes Tensor inputs and returns + a Tensor with a single element. + inputs (tuple of Tensors or Tensor): inputs to the function ``func``. + create_graph (bool, optional): If ``True``, the Hessian will be computed in + a differentiable manner. Note that when ``strict`` is ``False``, the result can not + require gradients or be disconnected from the inputs. + Defaults to ``False``. + strict (bool, optional): If ``True``, an error will be raised when we detect that there exists an input + such that all the outputs are independent of it. If ``False``, we return a Tensor of zeros as the + hessian for said inputs, which is the expected mathematical value. + Defaults to ``False``. + vectorize (bool, optional): This feature is experimental. + Please consider using :func:`torch.func.hessian` + instead if you are looking for something less experimental and more performant. + When computing the hessian, usually we invoke + ``autograd.grad`` once per row of the hessian. If this flag is + ``True``, we use the vmap prototype feature as the backend to + vectorize calls to ``autograd.grad`` so we only invoke it once + instead of once per row. This should lead to performance + improvements in many use cases, however, due to this feature + being incomplete, there may be performance cliffs. Please + use `torch._C._debug_only_display_vmap_fallback_warnings(True)` + to show any performance warnings and file us issues if + warnings exist for your use case. Defaults to ``False``. + outer_jacobian_strategy (str, optional): The Hessian is computed by + computing the Jacobian of a Jacobian. The inner Jacobian is always + computed in reverse-mode AD. Setting strategy to ``"forward-mode"`` + or ``"reverse-mode"`` determines whether the outer Jacobian will be + computed with forward or reverse mode AD. Currently, computing the outer + Jacobian in ``"forward-mode"`` requires ``vectorized=True``. Defaults + to ``"reverse-mode"``. + + Returns: + Hessian (Tensor or a tuple of tuple of Tensors): if there is a single input, + this will be a single Tensor containing the Hessian for the input. + If it is a tuple, then the Hessian will be a tuple of tuples where + ``Hessian[i][j]`` will contain the Hessian of the ``i``\th input + and ``j``\th input with size the sum of the size of the ``i``\th input plus + the size of the ``j``\th input. ``Hessian[i][j]`` will have the same + dtype and device as the corresponding ``i``\th input. + + Example: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> def pow_reducer(x): + ... return x.pow(3).sum() + >>> inputs = torch.rand(2, 2) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> hessian(pow_reducer, inputs) + tensor([[[[5.2265, 0.0000], + [0.0000, 0.0000]], + [[0.0000, 4.8221], + [0.0000, 0.0000]]], + [[[0.0000, 0.0000], + [1.9456, 0.0000]], + [[0.0000, 0.0000], + [0.0000, 3.2550]]]]) + + >>> hessian(pow_reducer, inputs, create_graph=True) + tensor([[[[5.2265, 0.0000], + [0.0000, 0.0000]], + [[0.0000, 4.8221], + [0.0000, 0.0000]]], + [[[0.0000, 0.0000], + [1.9456, 0.0000]], + [[0.0000, 0.0000], + [0.0000, 3.2550]]]], grad_fn=) + + + >>> def pow_adder_reducer(x, y): + ... return (2 * x.pow(2) + 3 * y.pow(2)).sum() + >>> inputs = (torch.rand(2), torch.rand(2)) + >>> hessian(pow_adder_reducer, inputs) + ((tensor([[4., 0.], + [0., 4.]]), + tensor([[0., 0.], + [0., 0.]])), + (tensor([[0., 0.], + [0., 0.]]), + tensor([[6., 0.], + [0., 6.]]))) + """ + is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "hessian") + assert outer_jacobian_strategy in ( + "forward-mode", + "reverse-mode", + ), 'Expected strategy to be either "forward-mode" or "reverse-mode".' + + def ensure_single_output_function(*inp): + out = func(*inp) + is_out_tuple, t_out = _as_tuple( + out, "outputs of the user-provided function", "hessian" + ) + _check_requires_grad(t_out, "outputs", strict=strict) + + if is_out_tuple or not isinstance(out, torch.Tensor): + raise RuntimeError( + "The function given to hessian should return a single Tensor" + ) + + if out.nelement() != 1: + raise RuntimeError( + "The Tensor returned by the function given to hessian should contain a single element" + ) + + return out.squeeze() + + def jac_func(*inp): + if outer_jacobian_strategy == "forward-mode": + # _grad_preprocess requires create_graph=True and input to require_grad + # or else the input will be detached + inp = tuple(t.requires_grad_(True) for t in inp) + jac = jacobian(ensure_single_output_function, inp, create_graph=True) + _check_requires_grad(jac, "jacobian", strict=strict) + return jac + + res = jacobian( + jac_func, + inputs, + create_graph=create_graph, + strict=strict, + vectorize=vectorize, + strategy=outer_jacobian_strategy, + ) + return _tuple_postprocess(res, (is_inputs_tuple, is_inputs_tuple)) + + +def vhp(func, inputs, v=None, create_graph=False, strict=False): + r"""Compute the dot product between vector ``v`` and Hessian of a given scalar function at a specified point. + + Args: + func (function): a Python function that takes Tensor inputs and returns + a Tensor with a single element. + inputs (tuple of Tensors or Tensor): inputs to the function ``func``. + v (tuple of Tensors or Tensor): The vector for which the vector Hessian + product is computed. Must be the same size as the input of + ``func``. This argument is optional when ``func``'s input contains + a single element and (if it is not provided) will be set as a + Tensor containing a single ``1``. + create_graph (bool, optional): If ``True``, both the output and result + will be computed in a differentiable way. Note that when ``strict`` + is ``False``, the result can not require gradients or be + disconnected from the inputs. + Defaults to ``False``. + strict (bool, optional): If ``True``, an error will be raised when we + detect that there exists an input such that all the outputs are + independent of it. If ``False``, we return a Tensor of zeros as the + vhp for said inputs, which is the expected mathematical value. + Defaults to ``False``. + + Returns: + output (tuple): tuple with: + func_output (tuple of Tensors or Tensor): output of ``func(inputs)`` + + vhp (tuple of Tensors or Tensor): result of the dot product with the + same shape as the inputs. + + Example: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> def pow_reducer(x): + ... return x.pow(3).sum() + >>> inputs = torch.rand(2, 2) + >>> v = torch.ones(2, 2) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> vhp(pow_reducer, inputs, v) + (tensor(0.5591), + tensor([[1.0689, 1.2431], + [3.0989, 4.4456]])) + >>> vhp(pow_reducer, inputs, v, create_graph=True) + (tensor(0.5591, grad_fn=), + tensor([[1.0689, 1.2431], + [3.0989, 4.4456]], grad_fn=)) + >>> def pow_adder_reducer(x, y): + ... return (2 * x.pow(2) + 3 * y.pow(2)).sum() + >>> inputs = (torch.rand(2), torch.rand(2)) + >>> v = (torch.zeros(2), torch.ones(2)) + >>> vhp(pow_adder_reducer, inputs, v) + (tensor(4.8053), + (tensor([0., 0.]), + tensor([6., 6.]))) + """ + with torch.enable_grad(): + is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "vhp") + inputs = _grad_preprocess(inputs, create_graph=create_graph, need_graph=True) + + if v is not None: + _, v = _as_tuple(v, "v", "vhp") + v = _grad_preprocess(v, create_graph=create_graph, need_graph=False) + _validate_v(v, inputs, is_inputs_tuple) + else: + if len(inputs) != 1 or inputs[0].nelement() != 1: + raise RuntimeError( + "The vector v can only be None if the input to the user-provided function " + "is a single Tensor with a single element." + ) + outputs = func(*inputs) + is_outputs_tuple, outputs = _as_tuple( + outputs, "outputs of the user-provided function", "vhp" + ) + _check_requires_grad(outputs, "outputs", strict=strict) + + if is_outputs_tuple or not isinstance(outputs[0], torch.Tensor): + raise RuntimeError( + "The function given to vhp should return a single Tensor" + ) + + if outputs[0].nelement() != 1: + raise RuntimeError( + "The Tensor returned by the function given to vhp should contain a single element" + ) + + jac = _autograd_grad(outputs, inputs, create_graph=True) + _check_requires_grad(jac, "jacobian", strict=strict) + + enable_grad = True if create_graph else torch.is_grad_enabled() + with torch.set_grad_enabled(enable_grad): + grad_res = _autograd_grad(jac, inputs, v, create_graph=create_graph) + vhp = _fill_in_zeros(grad_res, inputs, strict, create_graph, "double_back") + + outputs = _grad_postprocess(outputs, create_graph) + vhp = _grad_postprocess(vhp, create_graph) + + return _tuple_postprocess(outputs, is_outputs_tuple), _tuple_postprocess( + vhp, is_inputs_tuple + ) + + +def hvp(func, inputs, v=None, create_graph=False, strict=False): + r"""Compute the dot product between the scalar function's Hessian and a vector ``v`` at a specified point. + + Args: + func (function): a Python function that takes Tensor inputs and returns + a Tensor with a single element. + inputs (tuple of Tensors or Tensor): inputs to the function ``func``. + v (tuple of Tensors or Tensor): The vector for which the Hessian vector + product is computed. Must be the same size as the input of + ``func``. This argument is optional when ``func``'s input contains + a single element and (if it is not provided) will be set as a + Tensor containing a single ``1``. + create_graph (bool, optional): If ``True``, both the output and result will be + computed in a differentiable way. Note that when ``strict`` is + ``False``, the result can not require gradients or be disconnected + from the inputs. Defaults to ``False``. + strict (bool, optional): If ``True``, an error will be raised when we + detect that there exists an input such that all the outputs are + independent of it. If ``False``, we return a Tensor of zeros as the + hvp for said inputs, which is the expected mathematical value. + Defaults to ``False``. + Returns: + output (tuple): tuple with: + func_output (tuple of Tensors or Tensor): output of ``func(inputs)`` + + hvp (tuple of Tensors or Tensor): result of the dot product with + the same shape as the inputs. + + Example: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> def pow_reducer(x): + ... return x.pow(3).sum() + >>> inputs = torch.rand(2, 2) + >>> v = torch.ones(2, 2) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> hvp(pow_reducer, inputs, v) + (tensor(0.1448), + tensor([[2.0239, 1.6456], + [2.4988, 1.4310]])) + + >>> hvp(pow_reducer, inputs, v, create_graph=True) + (tensor(0.1448, grad_fn=), + tensor([[2.0239, 1.6456], + [2.4988, 1.4310]], grad_fn=)) + + + >>> def pow_adder_reducer(x, y): + ... return (2 * x.pow(2) + 3 * y.pow(2)).sum() + >>> inputs = (torch.rand(2), torch.rand(2)) + >>> v = (torch.zeros(2), torch.ones(2)) + >>> hvp(pow_adder_reducer, inputs, v) + (tensor(2.3030), + (tensor([0., 0.]), + tensor([6., 6.]))) + + Note: + + This function is significantly slower than `vhp` due to backward mode AD constraints. + If your functions is twice continuously differentiable, then hvp = vhp.t(). So if you + know that your function satisfies this condition, you should use vhp instead that is + much faster with the current implementation. + + """ + with torch.enable_grad(): + is_inputs_tuple, inputs = _as_tuple(inputs, "inputs", "hvp") + inputs = _grad_preprocess(inputs, create_graph=create_graph, need_graph=True) + + if v is not None: + _, v = _as_tuple(v, "v", "hvp") + v = _grad_preprocess(v, create_graph=create_graph, need_graph=False) + _validate_v(v, inputs, is_inputs_tuple) + else: + if len(inputs) != 1 or inputs[0].nelement() != 1: + raise RuntimeError( + "The vector v can only be None if the input to the user-provided function " + "is a single Tensor with a single element." + ) + outputs = func(*inputs) + is_outputs_tuple, outputs = _as_tuple( + outputs, "outputs of the user-provided function", "hvp" + ) + _check_requires_grad(outputs, "outputs", strict=strict) + + if is_outputs_tuple or not isinstance(outputs[0], torch.Tensor): + raise RuntimeError( + "The function given to hvp should return a single Tensor" + ) + + if outputs[0].nelement() != 1: + raise RuntimeError( + "The Tensor returned by the function given to hvp should contain a single element" + ) + + jac = _autograd_grad(outputs, inputs, create_graph=True) + _check_requires_grad(jac, "jacobian", strict=strict) + + grad_jac = tuple(torch.zeros_like(inp, requires_grad=True) for inp in inputs) + + double_back = _autograd_grad(jac, inputs, grad_jac, create_graph=True) + _check_requires_grad(jac, "hessian", strict=strict) + + enable_grad = True if create_graph else torch.is_grad_enabled() + with torch.set_grad_enabled(enable_grad): + grad_res = _autograd_grad(double_back, grad_jac, v, create_graph=create_graph) + hvp = _fill_in_zeros( + grad_res, inputs, strict, create_graph, "double_back_trick" + ) + + outputs = _grad_postprocess(outputs, create_graph) + hvp = _grad_postprocess(hvp, create_graph) + + return _tuple_postprocess(outputs, is_outputs_tuple), _tuple_postprocess( + hvp, is_inputs_tuple + ) diff --git a/venv/lib/python3.10/site-packages/torch/autograd/grad_mode.py b/venv/lib/python3.10/site-packages/torch/autograd/grad_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..be173c9b9de0ed8cdb70818ad4d7a2fad16d3c10 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/autograd/grad_mode.py @@ -0,0 +1,396 @@ +from typing import Any + +import torch + +from torch.utils._contextlib import ( + _DecoratorContextManager, + _NoParamDecoratorContextManager, + F, +) + +__all__ = [ + "no_grad", + "enable_grad", + "set_grad_enabled", + "inference_mode", + "set_multithreading_enabled", +] + + +class no_grad(_NoParamDecoratorContextManager): + r"""Context-manager that disables gradient calculation. + + Disabling gradient calculation is useful for inference, when you are sure + that you will not call :meth:`Tensor.backward()`. It will reduce memory + consumption for computations that would otherwise have `requires_grad=True`. + + In this mode, the result of every computation will have + `requires_grad=False`, even when the inputs have `requires_grad=True`. + There is an exception! All factory functions, or functions that create + a new Tensor and take a requires_grad kwarg, will NOT be affected by + this mode. + + This context manager is thread local; it will not affect computation + in other threads. + + Also functions as a decorator. + + .. note:: + No-grad is one of several mechanisms that can enable or + disable gradients locally see :ref:`locally-disable-grad-doc` for + more information on how they compare. + + .. note:: + This API does not apply to :ref:`forward-mode AD `. + If you want to disable forward AD for a computation, you can unpack + your dual tensors. + + Example:: + >>> # xdoctest: +SKIP + >>> x = torch.tensor([1.], requires_grad=True) + >>> with torch.no_grad(): + ... y = x * 2 + >>> y.requires_grad + False + >>> @torch.no_grad() + ... def doubler(x): + ... return x * 2 + >>> z = doubler(x) + >>> z.requires_grad + False + >>> @torch.no_grad + ... def tripler(x): + ... return x * 3 + >>> z = tripler(x) + >>> z.requires_grad + False + >>> # factory function exception + >>> with torch.no_grad(): + ... a = torch.nn.Parameter(torch.rand(10)) + >>> a.requires_grad + True + """ + + def __init__(self) -> None: + if not torch._jit_internal.is_scripting(): + super().__init__() + self.prev = False + + def __enter__(self) -> None: + self.prev = torch.is_grad_enabled() + torch.set_grad_enabled(False) + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + torch.set_grad_enabled(self.prev) + + +class enable_grad(_NoParamDecoratorContextManager): + r"""Context-manager that enables gradient calculation. + + Enables gradient calculation, if it has been disabled via :class:`~no_grad` + or :class:`~set_grad_enabled`. + + This context manager is thread local; it will not affect computation + in other threads. + + Also functions as a decorator. + + .. note:: + enable_grad is one of several mechanisms that can enable or + disable gradients locally see :ref:`locally-disable-grad-doc` for + more information on how they compare. + + .. note:: + This API does not apply to :ref:`forward-mode AD `. + + Example:: + >>> # xdoctest: +SKIP + >>> x = torch.tensor([1.], requires_grad=True) + >>> with torch.no_grad(): + ... with torch.enable_grad(): + ... y = x * 2 + >>> y.requires_grad + True + >>> y.backward() + >>> x.grad + tensor([2.]) + >>> @torch.enable_grad() + ... def doubler(x): + ... return x * 2 + >>> with torch.no_grad(): + ... z = doubler(x) + >>> z.requires_grad + True + >>> @torch.enable_grad + ... def tripler(x): + ... return x * 3 + >>> with torch.no_grad(): + ... z = tripler(x) + >>> z.requires_grad + True + + """ + + def __enter__(self) -> None: + self.prev = torch.is_grad_enabled() + torch._C._set_grad_enabled(True) + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + torch._C._set_grad_enabled(self.prev) + + +class set_grad_enabled(_DecoratorContextManager): + r"""Context-manager that sets gradient calculation on or off. + + ``set_grad_enabled`` will enable or disable grads based on its argument :attr:`mode`. + It can be used as a context-manager or as a function. + + This context manager is thread local; it will not affect computation + in other threads. + + Args: + mode (bool): Flag whether to enable grad (``True``), or disable + (``False``). This can be used to conditionally enable + gradients. + + .. note:: + set_grad_enabled is one of several mechanisms that can enable or + disable gradients locally see :ref:`locally-disable-grad-doc` for + more information on how they compare. + + .. note:: + This API does not apply to :ref:`forward-mode AD `. + + Example:: + >>> # xdoctest: +SKIP + >>> x = torch.tensor([1.], requires_grad=True) + >>> is_train = False + >>> with torch.set_grad_enabled(is_train): + ... y = x * 2 + >>> y.requires_grad + False + >>> _ = torch.set_grad_enabled(True) + >>> y = x * 2 + >>> y.requires_grad + True + >>> _ = torch.set_grad_enabled(False) + >>> y = x * 2 + >>> y.requires_grad + False + + """ + + def __init__(self, mode: bool) -> None: + self.prev = torch.is_grad_enabled() + self.mode = mode + torch._C._set_grad_enabled(mode) + + def __call__(self, orig_func: F) -> F: + torch._C._set_grad_enabled(self.prev) + return super().__call__(orig_func) + + def __enter__(self) -> None: + torch._C._set_grad_enabled(self.mode) + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + torch._C._set_grad_enabled(self.prev) + + def clone(self) -> "set_grad_enabled": + r""" + Create a copy of this class + """ + return self.__class__(self.mode) + + +class inference_mode(_DecoratorContextManager): + r"""Context-manager that enables or disables inference mode. + + InferenceMode is a new context manager analogous to :class:`~no_grad` + to be used when you are certain your operations will have no interactions + with autograd (e.g., model training). Code run under this mode gets better + performance by disabling view tracking and version counter bumps. Note that + unlike some other mechanisms that locally enable or disable grad, + entering inference_mode also disables to :ref:`forward-mode AD `. + + This context manager is thread local; it will not affect computation + in other threads. + + Also functions as a decorator. + + .. note:: + Inference mode is one of several mechanisms that can enable or + disable gradients locally see :ref:`locally-disable-grad-doc` for + more information on how they compare. + + Args: + mode (bool or function): Either a boolean flag whether to enable or + disable inference mode or a Python function to decorate with + inference mode enabled + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> import torch + >>> x = torch.ones(1, 2, 3, requires_grad=True) + >>> with torch.inference_mode(): + ... y = x * x + >>> y.requires_grad + False + >>> # xdoctest: +SKIP("want string isnt quite right") + >>> y._version + Traceback (most recent call last): + File "", line 1, in + RuntimeError: Inference tensors do not track version counter. + >>> @torch.inference_mode() + ... def func(x): + ... return x * x + >>> out = func(x) + >>> out.requires_grad + False + >>> @torch.inference_mode + ... def doubler(x): + ... return x * 2 + >>> out = doubler(x) + >>> out.requires_grad + False + + """ + + def __init__(self, mode: bool = True) -> None: + if not torch._jit_internal.is_scripting(): + super().__init__() + self.mode = mode + + def __new__(cls, mode=True): + if isinstance(mode, bool): + return super().__new__(cls) + return cls()(mode) + + def __enter__(self) -> None: + self._inference_mode_context = torch._C._InferenceMode(self.mode) + self._inference_mode_context.__enter__() + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + self._inference_mode_context.__exit__(exc_type, exc_value, traceback) + + def clone(self) -> "inference_mode": + r""" + Create a copy of this class + """ + return self.__class__(self.mode) + + +def _enter_inference_mode(mode): + mode_context = torch._C._InferenceMode(mode) + mode_context.__enter__() + return mode_context + + +def _exit_inference_mode(mode): + mode.__exit__(None, None, None) + + +class set_multithreading_enabled(_DecoratorContextManager): + r"""Context-manager that sets multithreaded backwards on or off. + + ``set_multithreading_enabled`` will enable or disable multithreaded backwards based on its argument :attr:`mode`. + It can be used as a context-manager or as a function. + + This context manager is thread local; it will not affect computation + in other threads. + + Args: + mode (bool): Flag whether to enable multithreaded backwards (``True``), or disable + (``False``). + + .. note:: + This API does not apply to :ref:`forward-mode AD `. + + """ + + def __init__(self, mode: bool) -> None: + self.prev = torch._C._is_multithreading_enabled() + torch._C._set_multithreading_enabled(mode) + self.mode = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + torch._C._set_multithreading_enabled(self.prev) + + def clone(self) -> "set_multithreading_enabled": + r""" + Create a copy of this class + """ + return self.__class__(self.mode) + + +class _force_original_view_tracking(_DecoratorContextManager): + r"""Context-manager that sets whether or not to always enable view-replay in autograd. + + ``set_view_replay_enabled`` will enable or disable view-replay based on its argument :attr:`mode`. + It can be used as a context-manager or as a function. + + This context manager is thread local; it will not affect computation + in other threads. + + When a tensor view is mutated, the autograd engine needs to decide whether or not + to regenerate the "updated view" by either replaying the chain of views from the updated base, + or with a single call to as_strided. + + If set_view_replay_enabled is set to True, then autograd will always use view replay. + Otherwise, it will fall back to its existing logic. + + Args: + mode (bool): Flag whether to enable view-replay (``True``), or disable + (``False``). + + """ + + def __init__(self, mode: bool) -> None: + self.prev = torch._C._is_view_replay_enabled() + torch._C._set_view_replay_enabled(mode) + self.mode = mode + + def __enter__(self) -> None: + pass + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + torch._C._set_view_replay_enabled(self.prev) + + def clone(self): + return self.__class__(self.mode) + + +class _unsafe_preserve_version_counter(_DecoratorContextManager): + r"""DO NOT USE THIS UNLESS YOU KNOW EXACTLY WHAT YOU'RE DOING. + + This context manager can lead to arbitrary silent-correctness issues in any other part of your code + (even the ones not touched directly by the context manager)! + + Ordinarily, autograd will track mutations to tensors by incrementing it's `._version` attribute. + This is generally important for correctness, as for example, mutating a tensor that autograd has saved + for the backwards pass can result in incorrect gradients, and autograd uses the version counter to detect + and error out in this situation. + + However, there are rare instances where it might be useful to hide mutations from autograd. For example: + if a tensor is very large, and you'd like to free its memory by storing it elsewhere, and re-populate + the tensor right before it is needed by autograd. + + Args: + tensor (torch.Tensor): the tensor in question, that you would like to preserve the version counter of. + + .. note:: + This API does not apply to :ref:`forward-mode AD `. + + """ + + def __init__(self, tensor: torch.Tensor) -> None: + self.tensor = tensor + self.prev_version = tensor._version + + def __enter__(self) -> None: + pass + + def __exit__(self, *args) -> None: + torch._C._autograd._unsafe_set_version_counter(self.tensor, self.prev_version) diff --git a/venv/lib/python3.10/site-packages/torch/autograd/gradcheck.py b/venv/lib/python3.10/site-packages/torch/autograd/gradcheck.py new file mode 100644 index 0000000000000000000000000000000000000000..f2e6aa22fe9462ed727fbeaaf6f950cefd7ea700 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/autograd/gradcheck.py @@ -0,0 +1,2266 @@ +import collections +import functools +import warnings +from itertools import product +from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union + +import torch +import torch.testing +from torch._vmap_internals import _vmap, vmap +from torch.overrides import is_tensor_like +from torch.types import _TensorOrTensors + +# Note: `get_*_jacobian` functions are added here even though we didn't intend to make them public +# since they have been exposed from before we added `__all__` and we already maintain BC for them +# We should eventually deprecate them and remove them from `__all__` +__all__ = [ + "gradcheck", + "gradgradcheck", + "GradcheckError", + "get_numerical_jacobian", + "get_analytical_jacobian", + "get_numerical_jacobian_wrt_specific_input", +] + + +class GradcheckError(RuntimeError): + r"""Error raised by :func:`gradcheck` and :func:`gradgradcheck`.""" + + pass + + +def _is_sparse_compressed_tensor(obj: torch.Tensor): + return obj.layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + } + + +def _is_sparse_any_tensor(obj: torch.Tensor): + return _is_sparse_compressed_tensor(obj) or obj.layout is torch.sparse_coo + + +def _is_float_or_complex_tensor(obj): + return is_tensor_like(obj) and (obj.is_floating_point() or obj.is_complex()) + + +def _allocate_jacobians_with_inputs( + input_tensors: Tuple, numel_output +) -> Tuple[torch.Tensor, ...]: + # Makes zero-filled tensors from inputs. If `numel_output` is not None, for + # each tensor in `input_tensors`, returns a new zero-filled tensor with height + # of `t.numel` and width of `numel_output`. Otherwise, for each tensor, returns + # a 1-d tensor with size `(t.numel,)`. Each new tensor will be strided and have + # the same dtype and device as those of the corresponding input. + out: List[torch.Tensor] = [] + for t in input_tensors: + if _is_float_or_complex_tensor(t) and t.requires_grad: + out.append(t.new_zeros((t.numel(), numel_output), layout=torch.strided)) + return tuple(out) + + +def _allocate_jacobians_with_outputs( + output_tensors: Tuple, numel_input, dtype=None, device=None +) -> Tuple[torch.Tensor, ...]: + # Makes zero-filled tensors from outputs. If `dim` is not None, for each tensor + # in `output_tensors`, returns a new zero-filled tensor with height of `dim` and + # width of `t.numel`. Otherwise, for each tensor, returns a 1-d tensor with size + # (t.numel,). + out: List[torch.Tensor] = [] + options = {"dtype": dtype, "device": device, "layout": torch.strided} + for t in output_tensors: + if _is_float_or_complex_tensor(t): + out.append(t.new_zeros((numel_input, t.numel()), **options)) + return tuple(out) + + +def _iter_tensors( + x: Union[torch.Tensor, Iterable[torch.Tensor]], only_requiring_grad: bool = False +) -> Iterable[torch.Tensor]: + if is_tensor_like(x): + # mypy doesn't narrow type of `x` to torch.Tensor + if x.requires_grad or not only_requiring_grad: # type: ignore[union-attr] + yield x # type: ignore[misc] + elif isinstance(x, collections.abc.Iterable) and not isinstance(x, str): + for elem in x: + yield from _iter_tensors(elem, only_requiring_grad) + + +def _densify(x): + # return a copy of sparse x with all unspecified elements + # "replaced" with zero-valued elements + if isinstance(x, (list, tuple)): + return type(x)(map(_densify, x)) + elif not is_tensor_like(x) or x.layout in {torch.strided, torch._mkldnn}: # type: ignore[attr-defined] # no attr _mkldnn + return x + elif x.layout is torch.sparse_coo: + device = x.device + indices_dtype = x._indices().dtype + tmp = torch.ones(x.shape[: x.sparse_dim()], dtype=torch.int8, device=device) + indices = tmp.nonzero().t().to(dtype=indices_dtype) + values = torch.zeros( + (tmp.numel(), *x.shape[x.sparse_dim() :]), dtype=x.dtype, device=device + ) + x_coalesced = x.detach().coalesce() + if x_coalesced.numel() > 0: + stride = tmp.stride() + flat_indices = ( + x_coalesced.indices() + .mul( + torch.tensor(stride, dtype=indices_dtype, device=device).unsqueeze( + 1 + ) + ) + .sum(0) + ) + values[flat_indices] = x_coalesced.values() + return ( + torch.sparse_coo_tensor(indices, values, x.shape) + ._coalesced_(True) + .requires_grad_(x.requires_grad) + ) + elif _is_sparse_compressed_tensor(x): + blocksize = ( + x.values().shape[1:3] + if x.layout in {torch.sparse_bsr, torch.sparse_bsc} + else None + ) + compressed_indices = ( + x.crow_indices() + if x.layout in {torch.sparse_csr, torch.sparse_bsr} + else x.ccol_indices() + ) + # We'll use intermediate sparse COO for simplicity + r = _densify(x.detach().to_sparse(layout=torch.sparse_coo)).to_sparse( + layout=x.layout, blocksize=blocksize + ) + # Check that all elements are specified also after `to_sparse` op: + dense_numel = r.values().numel() // max(1, r.values().shape[0]) + batch_numel = compressed_indices.numel() // compressed_indices.shape[-1] + sparse_numel = r.numel() // max(1, dense_numel * batch_numel) + if sparse_numel != r._nnz(): + raise AssertionError( + f"{x.layout} densify failed: expected nnz={sparse_numel} but got {r._nnz()}" + ) + return r.requires_grad_(x.requires_grad) + elif _is_sparse_any_tensor(x): + raise NotImplementedError(x.layout) + return x + + +def _iter_tensor(x_tensor): + # (Only used for slow gradcheck) Returns a generator that yields the following + # elements at each iteration: + # 1) a tensor: the same tensor is returned across all iterations. The tensor + # is not the same as the original x_tensor as given as input - it is + # prepared so that it can be modified in-place. Depending on whether the + # input tensor is strided, sparse, or dense, the returned tensor may or may + # not share storage with x_tensor. + # 2) a tuple of indices that can be used with advanced indexing (yielded in + # dictionary order) + # 3) flattened index that will be used to index into the Jacobian tensor + # + # For a tensor t with size (2, 2), _iter_tensor yields: + # `x, (0, 0), 0`, `x, (0, 1), 1`, `x, (1, 0), 2`, `x, (1, 1), 3` + # + # where x is the t.data of the original tensor. Perturbing the entry of x + # at index (1, 1) yields the 3rd column of the overall Jacobian matrix. + if _is_sparse_any_tensor(x_tensor): + + def get_stride(size): + dim = len(size) + tmp = 1 + stride = [0] * dim + for i in reversed(range(dim)): + stride[i] = tmp + tmp *= size[i] + return stride + + x_nnz = x_tensor._nnz() + x_size = list(x_tensor.size()) + if x_tensor.layout is torch.sparse_coo: + x_indices = x_tensor._indices().t() + x_values = x_tensor._values() + elif x_tensor.layout is torch.sparse_csr: + x_indices = torch._convert_indices_from_csr_to_coo( + x_tensor.crow_indices(), x_tensor.col_indices() + ).t() + x_values = x_tensor.values() + elif x_tensor.layout is torch.sparse_csc: + x_indices = torch._convert_indices_from_csr_to_coo( + x_tensor.ccol_indices(), x_tensor.row_indices(), transpose=True + ).t() + x_values = x_tensor.values() + elif x_tensor.layout is torch.sparse_bsr: + x_block_values = x_tensor.values() + x_blocksize = x_block_values.size()[1:3] + x_indices = ( + torch._convert_indices_from_csr_to_coo( + x_tensor.crow_indices(), x_tensor.col_indices() + ) + .repeat_interleave(x_blocksize[0] * x_blocksize[1], 1) + .mul_(torch.tensor(x_blocksize, device=x_tensor.device).reshape(2, 1)) + .add_( + torch.stack( + torch.where(torch.ones(x_blocksize, device=x_tensor.device)) + ).repeat(1, x_nnz) + ) + .t() + ) + x_values = x_block_values.flatten(0, 2) + x_nnz = x_values.size(0) + elif x_tensor.layout is torch.sparse_bsc: + x_block_values = x_tensor.values() + x_blocksize = x_block_values.size()[1:3] + x_indices = ( + torch._convert_indices_from_csr_to_coo( + x_tensor.ccol_indices(), x_tensor.row_indices(), transpose=True + ) + .repeat_interleave(x_blocksize[0] * x_blocksize[1], 1) + .mul_(torch.tensor(x_blocksize, device=x_tensor.device).reshape(2, 1)) + .add_( + torch.stack( + torch.where(torch.ones(x_blocksize, device=x_tensor.device)) + ).repeat(1, x_nnz) + ) + .t() + ) + x_values = x_block_values.flatten(0, 2) + x_nnz = x_values.size(0) + else: + raise NotImplementedError(f"_iter_tensor for {x_tensor.layout} input") + x_stride = get_stride(x_size) + # Use .data here to get around the version check + x_values = x_values.data + for i in range(x_nnz): + x_value = x_values[i] + for x_idx in product(*[range(m) for m in x_values.size()[1:]]): + indices = x_indices[i].tolist() + list(x_idx) + d_idx = sum(indices[k] * x_stride[k] for k in range(len(x_size))) + yield x_value, x_idx, d_idx + elif x_tensor.layout == torch._mkldnn: # type: ignore[attr-defined] + for d_idx, x_idx in enumerate(product(*[range(m) for m in x_tensor.size()])): + # this is really inefficient, but without indexing implemented, there's + # not really a better way than converting back and forth + x_tensor_dense = x_tensor.to_dense() + yield x_tensor_dense, x_idx, d_idx + else: + # Use .data here to get around the version check + x_tensor = x_tensor.data + for d_idx, x_idx in enumerate(product(*[range(m) for m in x_tensor.size()])): + yield x_tensor, x_idx, d_idx + + +def _get_numerical_jacobian( + fn, inputs, outputs=None, target=None, eps=1e-3, is_forward_ad=False +) -> List[Tuple[torch.Tensor, ...]]: + """Compute the numerical Jacobian of `fn(inputs)` with respect to `target`. + + If not specified, targets are the input. Returns M * N Jacobians where N is the + number of tensors in target that require grad and M is the number of non-integral + outputs. + + Args: + fn: the function to compute the jacobian for + inputs: inputs to `fn` + outputs: provide precomputed outputs to avoid one extra invocation of fn + target: the Tensors wrt whom Jacobians are calculated (default=`inputs`) + eps: the magnitude of the perturbation during finite differencing + (default=`1e-3`) + is_forward_ad: if this numerical jacobian is computed to be checked wrt + forward AD gradients (this is used for error checking only) + + Returns: + A list of M N-tuples of tensors + + Note that `target` may not even be part of `input` to `fn`, so please be + **very careful** in this to not clone `target`. + """ + jacobians: List[Tuple[torch.Tensor, ...]] = [] + if outputs is None: + outputs = _as_tuple(fn(*_as_tuple(inputs))) + if not is_forward_ad and any(o.is_complex() for o in outputs): + raise ValueError( + "Expected output to be non-complex. get_numerical_jacobian no " + "longer supports functions that return complex outputs." + ) + if target is None: + target = inputs + inp_indices = [ + i for i, a in enumerate(target) if is_tensor_like(a) and a.requires_grad + ] + for i, (inp, inp_idx) in enumerate(zip(_iter_tensors(target, True), inp_indices)): + jacobians += [ + get_numerical_jacobian_wrt_specific_input( + fn, + inp_idx, + inputs, + outputs, + eps, + input=inp, + is_forward_ad=is_forward_ad, + ) + ] + return jacobians + + +def get_numerical_jacobian(fn, inputs, target=None, eps=1e-3, grad_out=1.0): + """Compute the numerical Jacobian for a given fn and its inputs. + + This is a Deprecated API. + + Args: + fn: the function to compute the Jacobian for (must take inputs as a tuple) + input: input to `fn` + target: the Tensors wrt whom Jacobians are calculated (default=`input`) + eps: the magnitude of the perturbation during finite differencing + (default=`1e-3`) + + Returns: + A list of Jacobians of `fn` (restricted to its first output) with respect to + each input or target, if provided. + + Note that `target` may not even be part of `input` to `fn`, so please be + **very careful** in this to not clone `target`. + """ + warnings.warn( + "get_numerical_jacobian was part of PyTorch's private API and not " + "meant to be exposed. We are deprecating it and it will be removed " + "in a future version of PyTorch. If you have a specific use for " + "this or feature request for this to be a stable API, please file " + "us an issue at https://github.com/pytorch/pytorch/issues/new" + ) + if ( + grad_out != 1.0 + ): # grad_out param is only kept for backward compatibility reasons + raise ValueError( + "Expected grad_out to be 1.0. get_numerical_jacobian no longer " + "supports values of grad_out != 1.0." + ) + + def fn_pack_inps(*inps): + return fn(inps) + + jacobians = _get_numerical_jacobian(fn_pack_inps, inputs, None, target, eps) + + return tuple(jacobian_for_each_output[0] for jacobian_for_each_output in jacobians) + + +def _compute_numerical_gradient(fn, entry, v, norm_v, nbhd_checks_fn): + # Computes numerical directional derivative as finite difference + # of function `fn` at input `entry`, perturbed by vector `v`. + if _is_sparse_compressed_tensor(entry): + # sparse compressed tensors don't implement sub/add/copy_ + # yet. However, in non-masked semantics context entry and v + # have the same sparse indices ... + assert entry.layout == v.layout, (entry.layout, v.layout) + assert entry._nnz() == v._nnz(), (entry._nnz(), v._nnz(), entry.shape) + # ... the finite differencing can be performed on values only: + entry = entry.values() + v = v.values() + # we'll detach to avoid backward computations that sparse + # tensors have limited support for. + entry = entry.detach() + + orig = entry.clone() + entry.copy_(orig - v) + outa = fn() + entry.copy_(orig + v) + outb = fn() + entry.copy_(orig) + + def compute(a, b): + nbhd_checks_fn(a, b) + ret = (b - a) / (2 * norm_v) # use central difference approx + return ret.detach().reshape(-1) + + return tuple(compute(a, b) for (a, b) in zip(outa, outb)) + + +def _compute_numerical_jvps_wrt_specific_input( + jvp_fn, delta, input_is_complex, is_forward_ad=False +) -> List[torch.Tensor]: + # Computing the jacobian only works for real delta + # For details on the algorithm used here, refer: + # Section 3.5.3 https://arxiv.org/pdf/1701.00392.pdf + # s = fn(z) where z = x for real valued input + # and z = x + yj for complex valued input + jvps: List[torch.Tensor] = [] + ds_dx_tup = jvp_fn(delta[0] if isinstance(delta, tuple) else delta) + + if input_is_complex: # C -> R + ds_dy_tup = ( + jvp_fn(delta[1] * 1j) if isinstance(delta, tuple) else jvp_fn(delta * 1j) + ) + for ds_dx, ds_dy in zip(ds_dx_tup, ds_dy_tup): + assert not ds_dx.is_complex() + # conjugate wirtinger derivative + conj_w_d = ds_dx + ds_dy * 1j + jvps.append(conj_w_d) + else: + for ds_dx in ds_dx_tup: # R -> R or (R -> C for the forward AD case) + assert is_forward_ad or not ds_dx.is_complex() + jvps.append(ds_dx) + return jvps + + +def _combine_jacobian_cols( + jacobians_cols: Dict[int, List[torch.Tensor]], outputs, input, numel +) -> Tuple[torch.Tensor, ...]: + # jacobian_cols maps column_idx -> output_idx -> single column of jacobian Tensor + # we return a list that maps output_idx -> full jacobian Tensor + jacobians = _allocate_jacobians_with_outputs( + outputs, numel, dtype=input.dtype if input.dtype.is_complex else None + ) + for i, jacobian in enumerate(jacobians): + for k, v in jacobians_cols.items(): + jacobian[k] = v[i] + return jacobians + + +def _prepare_input( + input: torch.Tensor, maybe_perturbed_input: Optional[torch.Tensor], fast_mode=False +) -> torch.Tensor: + # Prepares the inputs to be passed into the function while including the new + # modified input. + if input.layout == torch._mkldnn: # type: ignore[attr-defined] # no attr _mkldnn + # Convert back to mkldnn + if maybe_perturbed_input is not None: + return maybe_perturbed_input.to_mkldnn() + else: + return input + elif _is_sparse_any_tensor(input): + if fast_mode and maybe_perturbed_input is not None: + # entry is already a "cloned" version of the original tensor + # thus changes to entry are not reflected in the input + return maybe_perturbed_input + else: + return input + else: + # We cannot use entry (input.data) if we want gradgrad to work because + # fn (in the gradgrad case) needs to compute grad wrt input + return input + + +def _check_outputs_same_dtype_and_shape(output1, output2, eps, idx=None) -> None: + # Check that the returned outputs don't have different dtype or shape when you + # perturb the input + on_index = "on index {idx} " if idx is not None else "" + assert output1.shape == output2.shape, ( + f"Expected `func` to return outputs with the same shape" + f" when inputs are perturbed {on_index}by {eps}, but got:" + f" shapes {output1.shape} and {output2.shape}." + ) + assert output1.dtype == output2.dtype, ( + f"Expected `func` to return outputs with the same dtype" + f" when inputs are perturbed {on_index}by {eps}, but got:" + f" dtypes {output1.dtype} and {output2.dtype}." + ) + + +def get_numerical_jacobian_wrt_specific_input( + fn, input_idx, inputs, outputs, eps, input=None, is_forward_ad=False +) -> Tuple[torch.Tensor, ...]: + # Computes the numerical jacobians wrt to a single input. Returns N jacobian + # tensors, where N is the number of outputs. We use a dictionary for + # jacobian_cols because indices aren't necessarily consecutive for sparse inputs + # When we perturb only a single element of the input tensor at a time, the jvp + # is equivalent to a single col of the Jacobian matrix of fn. + jacobian_cols: Dict[int, List[torch.Tensor]] = {} + input = inputs[input_idx] if input is None else input + assert input.requires_grad + for x, idx, d_idx in _iter_tensor(input): + wrapped_fn = _with_prepare_inputs(fn, inputs, input_idx, x) + input_to_perturb = x[idx] + nbhd_checks_fn = functools.partial( + _check_outputs_same_dtype_and_shape, idx=idx, eps=eps + ) + jvp_fn = _get_numerical_jvp_fn( + wrapped_fn, input_to_perturb, eps, nbhd_checks_fn + ) + jacobian_cols[d_idx] = _compute_numerical_jvps_wrt_specific_input( + jvp_fn, eps, x.is_complex(), is_forward_ad + ) + return _combine_jacobian_cols(jacobian_cols, outputs, input, input.numel()) + + +def _get_analytical_jacobian_forward_ad( + fn, inputs, outputs, *, check_grad_dtypes=False, all_u=None +) -> Tuple[Tuple[torch.Tensor, ...], ...]: + """Compute the analytical Jacobian using forward mode AD of `fn(inputs)` using forward mode AD with respect to `target`. + + Return N * M Jacobians where N is the number of tensors in target that require grad and + M is the number of non-integral outputs. + Contrary to other functions here, this function requires "inputs" to actually be used by the function. + The computed value is expected to be wrong if the function captures the inputs by side effect instead of + using the passed ones (many torch.nn tests do this). + + Args: + fn: the function to compute the jacobian for + inputs: inputs to `fn` + outputs: provide precomputed outputs to avoid one extra invocation of fn + check_grad_dtypes: if True, will check that the gradient dtype are valid + all_u (optional): if provided, the Jacobian will be right multiplied with this vector + + Returns: + A tuple of M N-tuples of tensors + """ + # To avoid early import issues + fwAD = torch.autograd.forward_ad + + tensor_inputs = tuple(i for i in inputs if is_tensor_like(i) and i.requires_grad) + + if any(i.is_complex() for i in tensor_inputs): + raise ValueError( + "Expected inputs to be non-complex for _get_analytical_jacobian_forward_ad." + ) + + if all_u: + jacobians = tuple( + _allocate_jacobians_with_outputs(outputs, 1) for i in tensor_inputs + ) + else: + jacobians = tuple( + _allocate_jacobians_with_outputs(outputs, i.numel()) for i in tensor_inputs + ) + + with fwAD.dual_level(): + fw_grads = [] + dual_inputs = [] + for i, inp in enumerate(inputs): + if is_tensor_like(inp) and inp.requires_grad: + if inp.layout == torch._mkldnn: # type: ignore[attr-defined] + raise ValueError( + "MKLDNN inputs are not support for forward AD gradcheck." + ) + + inp = fwAD.make_dual(inp.detach(), torch.zeros_like(inp)) + # If inp is a differentiable view, the dual might not be the tangent given to + # make_dual, so read it explicitly from the dual tensor + fw_grads.append(fwAD.unpack_dual(inp)[1]) + dual_inputs.append(inp) + + if all_u: + # Do the full reduction in one pass + # To be consistent with numerical evaluation, we actually compute one reduction per input + for i, (fw_grad, u) in enumerate(zip(fw_grads, all_u)): + fw_grad.copy_(u.view_as(fw_grad)) + raw_outputs = _as_tuple(fn(*dual_inputs)) + dual_outputs = filter(_is_float_or_complex_tensor, raw_outputs) + for index_o, d_o in enumerate(dual_outputs): + val, res = fwAD.unpack_dual(d_o) + if ( + check_grad_dtypes + and res is not None + and val.is_complex() != res.is_complex() + ): + raise GradcheckError("Forward AD gradient has dtype mismatch.") + + # Remove extra dimension of size 1 corresponding to the reduced input + jacobians[i][index_o].squeeze_(0) + if res is None: + jacobians[i][index_o].zero_() + else: + jacobians[i][index_o].copy_(res.reshape(-1)) + fw_grad.zero_() + else: + # Reconstruct the full Jacobian column by column + for i, fw_grad in enumerate(fw_grads): + for lin_idx, grad_idx in enumerate( + product(*[range(m) for m in fw_grad.size()]) + ): + fw_grad[grad_idx] = 1.0 + raw_outputs = _as_tuple(fn(*dual_inputs)) + dual_outputs = filter(_is_float_or_complex_tensor, raw_outputs) + for index_o, d_o in enumerate(dual_outputs): + val, res = fwAD.unpack_dual(d_o) + if ( + check_grad_dtypes + and res is not None + and val.is_complex() != res.is_complex() + ): + raise GradcheckError( + "Forward AD gradient has dtype mismatch." + ) + + if res is None: + jacobians[i][index_o][lin_idx].zero_() + else: + jacobians[i][index_o][lin_idx].copy_(res.reshape(-1)) + fw_grad[grad_idx] = 0.0 + + return jacobians + + +def _get_input_to_perturb(input): + # Prepare the input so that it can be modified in-place and do certain + # operations that require the tensor to have strides. If fast_mode=False, + # _iter_tensor would handle the below cases: + if input.layout == torch._mkldnn: # type: ignore[attr-defined] # no attr _mkldnn + # Convert to dense so we can perform operations that require strided tensors + input_to_perturb = input.to_dense() + elif _is_sparse_any_tensor(input): + # Clone because input may require grad, and copy_ calls resize_, + # which is not allowed for .data + input_to_perturb = input.clone() + else: + input_to_perturb = input.data + return input_to_perturb + + +def _with_prepare_inputs(fn, inputs, input_idx, input_to_perturb, fast_mode=False): + # Wraps `fn` so that its inputs are already supplied + def wrapped_fn(): + inp = tuple( + _prepare_input(a, input_to_perturb if i == input_idx else None, fast_mode) + if is_tensor_like(a) + else a + for i, a in enumerate(_as_tuple(inputs)) + ) + return tuple(a.clone() for a in _as_tuple(fn(*inp))) + + return wrapped_fn + + +def _get_numerical_jvp_fn(wrapped_fn, input_to_perturb, eps, nbhd_checks_fn): + # Wraps jvp_fn so that certain arguments are already supplied + def jvp_fn(delta): + return _compute_numerical_gradient( + wrapped_fn, input_to_perturb, delta, eps, nbhd_checks_fn + ) + + return jvp_fn + + +def _reshape_tensor_or_tuple(u, shape): + # We don't need to reshape when input corresponding to u is sparse + if isinstance(u, tuple): + if not _is_sparse_any_tensor(u[0]): + return (u[0].reshape(shape), u[1].reshape(shape)) + else: + if not _is_sparse_any_tensor(u): + return u.reshape(shape) + return u + + +def _mul_tensor_or_tuple(u, k): + if isinstance(u, tuple): + return (k * u[0], k * u[1]) + else: + return k * u + + +def _get_numerical_jvp_wrt_specific_input( + fn, input_idx, inputs, u, eps, is_forward_ad=False +) -> List[torch.Tensor]: + input = inputs[input_idx] + input_to_perturb = _get_input_to_perturb(input) + wrapped_fn = _with_prepare_inputs(fn, inputs, input_idx, input_to_perturb, True) + nbhd_checks_fn = functools.partial(_check_outputs_same_dtype_and_shape, eps=eps) + jvp_fn = _get_numerical_jvp_fn(wrapped_fn, input_to_perturb, eps, nbhd_checks_fn) + u = _reshape_tensor_or_tuple(u, input_to_perturb.shape) + u = _mul_tensor_or_tuple(u, eps) + return _compute_numerical_jvps_wrt_specific_input( + jvp_fn, u, input.is_complex(), is_forward_ad + ) + + +def _get_numerical_vJu( + fn, inputs, inp_indices, func_out, all_u, all_v, eps, is_forward_ad +): + # Note that all_v can also be None, in that case, this function only computes Ju. + reduced_jacobians: List[List[torch.Tensor]] = [] + for i, (inp_idx, u) in enumerate(zip(inp_indices, all_u)): + all_Ju = _get_numerical_jvp_wrt_specific_input( + fn, inp_idx, inputs, u, eps, is_forward_ad + ) + # Filter out the Ju for non floating point outputs + filtered_Ju = [] + func_out = _as_tuple(func_out) + assert len(all_Ju) == len(func_out) + for Ju, output in zip(all_Ju, func_out): + if _is_float_or_complex_tensor(output): + filtered_Ju.append(Ju) + else: + # TODO: handle the other Ju + pass + if all_v is not None: + jacobian_scalars: List[torch.Tensor] = [] + for v, Ju in zip(all_v, filtered_Ju): + jacobian_scalars.append(_dot_with_type_promotion(v, Ju)) + reduced_jacobians.append(jacobian_scalars) + else: + reduced_jacobians.append(filtered_Ju) + return reduced_jacobians + + +def _check_jacobians_equal(j1, j2, atol): + # Check whether the max difference between two Jacobian tensors are within some + # tolerance `atol`. + for j1_x, j2_x in zip(j1, j2): + if j1_x.numel() != 0 and (j1_x - j2_x).abs().max() > atol: + return False + return True + + +def _stack_and_check_tensors( + list_of_list_of_tensors, inputs, numel_outputs +) -> Tuple[Tuple[torch.Tensor, ...], bool, bool]: + # For the ith tensor in the inner list checks whether it has the same size and + # dtype as the ith differentiable input. + out_jacobians = _allocate_jacobians_with_inputs(inputs, numel_outputs) + diff_input_list = list(_iter_tensors(inputs, True)) + correct_grad_sizes = True + correct_grad_types = True + for i, tensor_list in enumerate(list_of_list_of_tensors): + inp = diff_input_list[i] + out_jacobian = out_jacobians[i] + for j, tensor in enumerate(tensor_list): + if tensor is not None and tensor.size() != inp.size(): + correct_grad_sizes = False + elif tensor is not None and tensor.dtype != inp.dtype: + correct_grad_types = False + if tensor is None: + out_jacobian[:, j].zero_() + else: + dense = ( + tensor.to_dense() if not tensor.layout == torch.strided else tensor + ) + assert out_jacobian[:, j].numel() == dense.numel() + out_jacobian[:, j] = dense.reshape(-1) + return out_jacobians, correct_grad_sizes, correct_grad_types + + +FAILED_NONDET_MSG = """\n +NOTE: If your op relies on non-deterministic operations i.e., it is listed here: +https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html +this failure might be expected. + +If you are adding a new operator, please file an issue and then use one of the +workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck. +If the test +- manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck + with `nondet_tol=` as a keyword argument. +- is OpInfo-based (e.g., in test_ops_gradients.py), then modify the OpInfo for the test + to have `gradcheck_nondet_tol=`. +- is a Module test (e.g., in common_nn.py), then modify the corresponding + module_test entry to have `gradcheck_nondet_tol=` +""" + + +def _check_analytical_jacobian_attributes( + inputs, output, nondet_tol, check_grad_dtypes, fast_mode=False, v=None +) -> Tuple[torch.Tensor, ...]: + # This is used by both fast and slow mode: + # - For slow mode, vjps[i][j] is the jth row of the Jacobian wrt the ith + # input. + # - For fast mode, vjps[i][0] is a linear combination of the rows + # of the Jacobian wrt the ith input + diff_input_list = list(_iter_tensors(inputs, True)) + + def vjp_fn(grad_output): + return torch.autograd.grad( + output, diff_input_list, grad_output, retain_graph=True, allow_unused=True + ) + + # Compute everything twice to check for nondeterminism (which we call reentrancy) + if fast_mode: + vjps1 = _get_analytical_vjps_wrt_specific_output(vjp_fn, output.clone(), v) + vjps2 = _get_analytical_vjps_wrt_specific_output(vjp_fn, output.clone(), v) + else: + vjps1 = _compute_analytical_jacobian_rows(vjp_fn, output.clone()) + vjps2 = _compute_analytical_jacobian_rows(vjp_fn, output.clone()) + + output_numel = output.numel() if not fast_mode else 1 + jacobians1, types_ok, sizes_ok = _stack_and_check_tensors( + vjps1, inputs, output_numel + ) + jacobians2, _, _ = _stack_and_check_tensors(vjps2, inputs, output_numel) + reentrant = _check_jacobians_equal(jacobians1, jacobians2, nondet_tol) + + if not types_ok and check_grad_dtypes: + raise GradcheckError("Gradient has dtype mismatch") + if not sizes_ok: + raise GradcheckError("Analytical gradient has incorrect size") + if not reentrant: + raise GradcheckError( + "Backward is not reentrant, i.e., running backward with " + "same input and grad_output multiple times gives different values, " + "although analytical gradient matches numerical gradient." + f"The tolerance for nondeterminism was {nondet_tol}." + FAILED_NONDET_MSG + ) + return jacobians1 + + +def _get_analytical_vJu_backward_mode( + inputs, outputs, nondet_tol, check_grad_dtypes, all_v, all_u +): + reduced_jacobians: List[List[torch.Tensor]] = [] + for output, v in zip(outputs, all_v): + all_vJ = _check_analytical_jacobian_attributes( + inputs, output, nondet_tol, check_grad_dtypes, fast_mode=True, v=v + ) + jacobian_scalars: List[torch.Tensor] = [] + for vJ, u in zip(all_vJ, all_u): + # Why do we need squeeze here? vJ is a 2-d tensor so that we can reuse + # the error checking logic from slow mode + vJ = vJ.T.squeeze(0) + if vJ.is_complex(): # C -> R + tv = torch.view_as_real(vJ.resolve_conj()) + tr = tv.select(-1, 0) + ti = tv.select(-1, 1) + jacobian_scalars.append(tr.dot(u[0]) + 1j * ti.dot(u[1])) + else: # R -> R + jacobian_scalars.append(vJ.dot(u)) + reduced_jacobians.append(jacobian_scalars) + return reduced_jacobians + + +def get_analytical_jacobian(inputs, output, nondet_tol=0.0, grad_out=1.0): + # Replicates the behavior of the old get_analytical_jacobian before the refactor + # This shares much of its code with _check_analytical_jacobian_attributes + warnings.warn( + "get_analytical_jacobian was part of PyTorch's private API and not " + "meant to be exposed. We are deprecating it and it will be removed " + "in a future version of PyTorch. If you have a specific use for " + "this or feature request for this to be a stable API, please file " + "us an issue at https://github.com/pytorch/pytorch/issues/new" + ) + if ( + grad_out != 1.0 + ): # grad_out param is only kept for backward compatibility reasons + raise ValueError( + "Expected grad_out to be 1.0. get_analytical_jacobian no longer " + "supports values of grad_out != 1.0." + ) + if output.is_complex(): + raise ValueError( + "Expected output to be non-complex. get_analytical_jacobian no " + "longer supports functions that return complex outputs." + ) + diff_input_list = list(_iter_tensors(inputs, True)) + + def vjp_fn(grad_output): + return torch.autograd.grad( + output, diff_input_list, grad_output, retain_graph=True, allow_unused=True + ) + + # Compute everything twice to check for nondeterminism (which we call reentrancy) + vjps1 = _compute_analytical_jacobian_rows(vjp_fn, output.clone()) + vjps2 = _compute_analytical_jacobian_rows(vjp_fn, output.clone()) + + output_numel = output.numel() + jacobians1, types_ok, sizes_ok = _stack_and_check_tensors( + vjps1, inputs, output_numel + ) + jacobians2, _, _ = _stack_and_check_tensors(vjps2, inputs, output_numel) + reentrant = _check_jacobians_equal(jacobians1, jacobians2, nondet_tol) + + return jacobians1, reentrant, sizes_ok, types_ok + + +def _get_analytical_jacobian(inputs, outputs, input_idx, output_idx): + # Computes the analytical Jacobian in slow mode for a single input-output pair. + # Forgoes performing checks on dtype, shape, and reentrancy. + jacobians = _check_analytical_jacobian_attributes( + inputs, outputs[output_idx], nondet_tol=float("inf"), check_grad_dtypes=False + ) + return jacobians[input_idx] + + +def _compute_analytical_jacobian_rows( + vjp_fn, sample_output +) -> List[List[Optional[torch.Tensor]]]: + # Computes Jacobian row-by-row by projecting `vjp_fn` = v^T J on standard basis + # vectors: vjp_fn(e) = e^T J is a corresponding row of the Jacobian. + # NB: this function does not assume vjp_fn(v) to return tensors with the same + # number of elements for different v. This is checked when we later combine the + # rows into a single tensor. + grad_out_base = torch.zeros_like( + sample_output, memory_format=torch.legacy_contiguous_format + ) + flat_grad_out = grad_out_base.view(-1) + # jacobians_rows[i][j] is the Jacobian jth row for the ith input + jacobians_rows: List[List[Optional[torch.Tensor]]] = [] + for j in range(flat_grad_out.numel()): + flat_grad_out.zero_() + flat_grad_out[j] = 1.0 # projection for jth row of Jacobian + grad_inputs = vjp_fn(grad_out_base) + for i, d_x in enumerate(grad_inputs): + if j == 0: + jacobians_rows.append([]) + jacobians_rows[i] += [ + d_x.clone() if isinstance(d_x, torch.Tensor) else None + ] + return jacobians_rows + + +def _get_analytical_vjps_wrt_specific_output( + vjp_fn, sample_output, v +) -> List[List[Optional[torch.Tensor]]]: + vjps: List[List[Optional[torch.Tensor]]] = [] + grad_inputs = vjp_fn(v.reshape(sample_output.shape)) + for vjp in grad_inputs: + vjps.append([vjp.clone() if isinstance(vjp, torch.Tensor) else None]) + return vjps + + +def _check_inputs(tupled_inputs) -> bool: + # Make sure that gradients are saved for at least one input + any_input_requiring_grad = False + for idx, inp in enumerate(tupled_inputs): + if is_tensor_like(inp) and inp.requires_grad: + if not (inp.dtype == torch.float64 or inp.dtype == torch.complex128): + warnings.warn( + f"Input #{idx} requires gradient and " + "is not a double precision floating point or complex. " + "This check will likely fail if all the inputs are " + "not of double precision floating point or complex. " + ) + if inp.is_sparse: + content = inp._values() + elif _is_sparse_compressed_tensor(inp): + content = inp.values() + else: + content = inp + # TODO: To cover more problematic cases, replace stride = 0 check with + # "any overlap in memory" once we have a proper function to check it. + if content.layout is not torch._mkldnn: # type: ignore[attr-defined] + if not all( + st > 0 or sz <= 1 + for st, sz in zip(content.stride(), content.size()) + ): + raise RuntimeError( + f"The {idx}th input has a dimension with stride 0. gradcheck only " + "supports inputs that are non-overlapping to be able to " + "compute the numerical gradients correctly. You should call " + ".contiguous on the input before passing it to gradcheck." + ) + any_input_requiring_grad = True + + if not any_input_requiring_grad: + raise ValueError( + "gradcheck expects at least one input tensor to require gradient, " + "but none of the them have requires_grad=True." + ) + return True + + +def _check_outputs(outputs) -> None: + if any(_is_sparse_any_tensor(t) for t in outputs if isinstance(t, torch.Tensor)): + # it is easier to call to_dense() on the sparse output than + # to modify analytical jacobian + raise ValueError( + "Sparse output is not supported at gradcheck yet. " + "Please call to_dense(masked_grad=...) on the output of fn for gradcheck." + ) + if any(t.layout == torch._mkldnn for t in outputs if isinstance(t, torch.Tensor)): # type: ignore[attr-defined] + raise ValueError( + "MKLDNN output is not supported at gradcheck yet. " + "Please call to_dense(masked_grad=...) on the output of fn for gradcheck." + ) + + +def _check_no_differentiable_outputs( + func, inputs, func_out, eps, *, is_forward_ad +) -> bool: + # When there are no differentiable outputs, numerical gradient for a function is + # expected to be zero. + jacobians_all_inputs_outputs = _get_numerical_jacobian( + func, inputs, func_out, eps=eps, is_forward_ad=is_forward_ad + ) + for jacobians_all_outputs_and_fixed_input in jacobians_all_inputs_outputs: + for jacobian in jacobians_all_outputs_and_fixed_input: + if torch.ne(jacobian, 0).sum() > 0: + raise GradcheckError( + "Numerical gradient for function expected to be zero" + ) + return True + + +def _check_no_differentiable_outputs_fast( + func, func_out, all_inputs, inputs_indices, all_u, eps, nondet_tol +): + for inp_idx, u in zip(inputs_indices, all_u): + jvps = _get_numerical_jvp_wrt_specific_input(func, inp_idx, all_inputs, u, eps) + for jvp in jvps: + if jvp.numel() == 0: + continue + if (jvp - torch.zeros_like(jvp)).abs().max() > nondet_tol: + raise GradcheckError( + "Numerical gradient for function expected to be zero" + ) + return True + + +FAILED_BATCHED_GRAD_MSG = """ +gradcheck or gradgradcheck failed while testing batched gradient computation. +This could have been invoked in a number of ways (via a test that calls +gradcheck/gradgradcheck directly or via an autogenerated test). + +If you are adding a new operator, please file an issue and then use one of the +workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck. +If the test +- manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck + with `check_batched_grad=False` as a keyword argument. +- is OpInfo-based (e.g., in test_ops_gradients.py), then modify the OpInfo for the test + to have `check_batched_grad=False` and/or `check_batched_gradgrad=False`. + +If you're modifying an existing operator that supports batched grad computation, +or wish to make a new operator work with batched grad computation, please read +the following. + +To compute batched grads (e.g., jacobians, hessians), we vmap over the backward +computation. The most common failure case is if there is a 'vmap-incompatible +operation' in the backward pass. Please see +NOTE: [How to write vmap-compatible backward formulas] +in the codebase for an explanation of how to fix this. +""".strip() + +FAILED_BATCHED_GRAD_MSG_FWD_AD = """ +gradcheck failed while testing batched gradient computation with forward-mode AD. +This test is enabled automatically when both `check_batched_grad=True` +and `check_forward_ad=True`, but can be disabled in the following ways +dependong on how the test was invoked (via a test that calls gradcheck +directly or via an autogenerated test). + +If you are adding a new operator, please file an issue and then use one of the +workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck. +If the test +- manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck + with `check_batched_forward_grad=False` as a keyword argument. +- is OpInfo-based (e.g., in test_ops_gradients.py), then modify the OpInfo for the test + to have `check_batched_forward_grad=False` +""" + + +def _get_failed_batched_grad_test_msg( + output_idx, input_idx, res, exp, is_forward_ad=False +): + return f""" +For output {output_idx} and input {input_idx}: + +{FAILED_BATCHED_GRAD_MSG_FWD_AD if is_forward_ad else FAILED_BATCHED_GRAD_MSG} + +Got: +{res} + +Expected: +{exp} +""".strip() + + +def _test_batched_grad_forward_ad(func, inputs) -> bool: + fwAD = torch.autograd.forward_ad # To avoid early import issues (do we need this?) + assert isinstance(inputs, tuple) + + for input_idx, current_input in enumerate(inputs): + if not (is_tensor_like(current_input) and current_input.requires_grad): + continue + + def jvp(tangent: torch.Tensor): + with fwAD.dual_level(): + dual = fwAD.make_dual(current_input.detach(), tangent) + inputs_with_dual = tuple( + dual + if idx == input_idx + else (inp.detach() if is_tensor_like(inp) else inp) + for idx, inp in enumerate(inputs) + ) + dual_outputs = _as_tuple(func(*inputs_with_dual)) + ret = [] + for dual_output in dual_outputs: + if dual_output is None: + continue + primal_out, tangent_out = fwAD.unpack_dual(dual_output) + if tangent_out is not None: + ret.append(tangent_out) + else: + ret.append( + torch.zeros( + [], dtype=primal_out.dtype, device=primal_out.device + ).expand(primal_out.shape) + ) + return tuple(ret) + + if not _is_float_or_complex_tensor(current_input): + continue + + tangents = [torch.randn_like(current_input) for _ in range(2)] + expected = [jvp(t) for t in tangents] + expected = [torch.stack(shards) for shards in zip(*expected)] + + try: + result = _vmap(jvp)(torch.stack(tangents)) + except RuntimeError as ex: + # Rethrow to provide a better error message + raise GradcheckError( + f"While computing batched gradients, got: {ex}\n\n{FAILED_BATCHED_GRAD_MSG_FWD_AD}" + ) from ex + + for input_idx, (res, exp) in enumerate(zip(result, expected)): + if torch.allclose(res, exp): + continue + raise GradcheckError( + _get_failed_batched_grad_test_msg( + input_idx, input_idx, res, exp, is_forward_ad=True + ) + ) + return True + + +def _test_batched_grad(input, output, output_idx) -> bool: + # NB: _test_batched_grad compares two autograd.grad invocations with a single + # vmap(autograd.grad) invocation. It's not exactly a "gradcheck" in the + # sense that we're not comparing an analytical jacobian with a numeric one, + # but it is morally similar (we could have computed a full analytic jac + # via vmap, but that is potentially slow) + diff_input_list = list(_iter_tensors(input, True)) + grad = functools.partial( + torch.autograd.grad, + output, + diff_input_list, + retain_graph=True, + allow_unused=True, + ) + + def vjp(v): + results = grad(v) + results = tuple( + grad + if grad is not None + else torch.zeros([], dtype=inp.dtype, device=inp.device).expand(inp.shape) + for grad, inp in zip(results, diff_input_list) + ) + return results + + grad_outputs = [torch.randn_like(output) for _ in range(2)] + + expected = [vjp(gO) for gO in grad_outputs] + expected = [torch.stack(shards) for shards in zip(*expected)] + + # Squash warnings since these are expected to happen in most cases + # NB: this doesn't work for CUDA tests: https://github.com/pytorch/pytorch/issues/50209 + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", message="There is a performance drop") + warnings.filterwarnings("ignore", message="Please use torch.vmap") + try: + result = vmap(vjp)(torch.stack(grad_outputs)) + except RuntimeError as ex: + # It's OK that we're not raising the error at the correct callsite. + # That's because the callsite is always going to inside the Python + # autograd.grad instead of the C++ traceback of what line in the + # backward formula + raise GradcheckError( + f"While computing batched gradients, got: {ex}\n\n{FAILED_BATCHED_GRAD_MSG}" + ) from ex + + for input_idx, (res, exp) in enumerate(zip(result, expected)): + if torch.allclose(res, exp): + continue + raise GradcheckError( + _get_failed_batched_grad_test_msg(output_idx, input_idx, res, exp) + ) + return True + + +def _test_backward_mul_by_grad_output(outputs, inputs, masked) -> bool: + # Tests that backward is multiplied by grad_output + diff_input_list: List[torch.Tensor] = list(_iter_tensors(inputs, True)) + if not diff_input_list: + raise GradcheckError("no Tensors requiring grad found in input") + grads_input = torch.autograd.grad( + outputs, + diff_input_list, + [ + torch.zeros_like(o, memory_format=torch.legacy_contiguous_format) + for o in outputs + ], + allow_unused=True, + ) + for gi, di in zip(grads_input, diff_input_list): + if gi is None: + continue + if isinstance(gi, torch.Tensor) and gi.layout != torch.strided: + if gi.layout != di.layout: + raise GradcheckError( + "grad is incorrect layout (" + + str(gi.layout) + + " is not " + + str(di.layout) + + ")" + ) + if _is_sparse_any_tensor(gi): + sparse_kind = str(gi.layout).replace("torch.", "").replace("_coo", "") + if gi.sparse_dim() != di.sparse_dim(): + raise GradcheckError( + f"grad is {sparse_kind} tensor, but has incorrect sparse_dim" + f" {gi.sparse_dim()}, expected {di.sparse_dim()}" + ) + if gi.dense_dim() != di.dense_dim(): + raise GradcheckError( + f"grad is {sparse_kind} tensor, but has incorrect dense_dim" + f" {gi.dense_dim()}, expected {di.dense_dim()}" + ) + gi = gi.to_dense() + di = di.to_dense() + if masked: + if not torch.allclose(gi, torch.zeros_like(gi)): + raise GradcheckError("backward not multiplied by grad_output") + elif not gi.eq(0).all(): + raise GradcheckError("backward not multiplied by grad_output") + if gi.dtype != di.dtype: + raise GradcheckError("grad is incorrect type") + if gi.device != di.device: + raise GradcheckError("grad is incorrect device") + if gi.size() != di.size(): + raise GradcheckError("grad is incorrect size") + return True + + +def _test_undefined_forward_mode(func, outputs, inputs): + fwAD = torch.autograd.forward_ad + + inp_tensors_idx, inp_tensors = _get_inp_tensors(inputs) + all_v, all_u, all_u_dense = _make_vectors(inp_tensors, outputs, use_forward_ad=True) + + tensor_inputs = tuple(i for i in inputs if is_tensor_like(i) and i.requires_grad) + + with fwAD.dual_level(): + fw_grads = [] + dual_inputs = [] + tensor_indices = set() + for i, inp in enumerate(inputs): + if is_tensor_like(inp) and inp.requires_grad: + if inp.layout == torch._mkldnn: # type: ignore[attr-defined] + raise ValueError( + "MKLDNN inputs are not support for forward AD gradcheck." + ) + + inp = fwAD.make_dual(inp.detach(), torch.zeros_like(inp)) + # If inp is a differentiable view, the dual might not be the tangent given to + # make_dual, so read it explicitly from the dual tensor + fw_grads.append(fwAD.unpack_dual(inp)[1]) + tensor_indices.add(i) + dual_inputs.append(inp) + + for i, (fw_grad, u) in enumerate(zip(fw_grads, all_u)): + fw_grad.copy_(u.view_as(fw_grad)) + + for idx, inp in enumerate(inputs): + if idx not in tensor_indices: + continue + dual_inp_obj = dual_inputs[idx] + + # case 1 (Materialized Zero Tensor Tangent) + dual_inputs[idx] = fwAD.make_dual(inp.detach(), torch.zeros_like(inp)) + raw_outputs = _as_tuple(func(*dual_inputs)) + dual_outputs1 = filter(_is_float_or_complex_tensor, raw_outputs) + + # case 2 (Efficient Zero Tensor Tangent since we don't make a dual object and pass a regular tensor) + dual_inputs[idx] = inp.detach() + raw_outputs = _as_tuple(func(*dual_inputs)) + dual_outputs2 = filter(_is_float_or_complex_tensor, raw_outputs) + + # reset + dual_inputs[idx] = dual_inp_obj + + for index_o, (d_o1, d_o2) in enumerate(zip(dual_outputs1, dual_outputs2)): + val1, res1 = fwAD.unpack_dual(d_o1) + val2, res2 = fwAD.unpack_dual(d_o2) + + if not (res1 is None or res2 is None): + if not torch.allclose(res1, res2): + raise GradcheckError( + "Mismatch in tangent values for output with index: ", + index_o, + " when input: ", + inp, + " has an undefined tangent value. ", + " Got: ", + res1, + " but expected: ", + res2, + ) + return True + + +def _test_undefined_backward_mode(func, outputs, inputs) -> bool: + diff_input_list: List[torch.Tensor] = list(_iter_tensors(inputs, True)) + if not diff_input_list: + raise GradcheckError("no Tensors requiring grad found in input") + + def warn_bc_breaking(): + warnings.warn( + "Backwards compatibility: New undefined gradient support checking " + "feature is enabled by default, but it may break existing callers " + "of this function. If this is true for you, you can call this " + 'function with "check_undefined_grad=False" to disable the feature' + ) + + def check_undefined_grad_support(output_to_check): + grads_output = [ + torch.zeros_like(o, memory_format=torch.legacy_contiguous_format) + for o in output_to_check + ] + try: + grads_input = torch.autograd.grad( + output_to_check, diff_input_list, grads_output, allow_unused=True + ) + except RuntimeError as e: + warn_bc_breaking() + raise GradcheckError( + "Expected backward function to handle undefined output grads. " + 'Please look at "Notes about undefined output gradients" in ' + '"tools/autograd/derivatives.yaml"' + ) from e + + for gi, i in zip(grads_input, diff_input_list): + if (gi is not None) and (not gi.eq(0).all()): + warn_bc_breaking() + raise GradcheckError( + "Expected all input grads to be undefined or zero when all output grads are undefined " + 'or zero. Please look at "Notes about undefined output gradients" in ' + '"tools/autograd/derivatives.yaml"' + ) + return True + + # All backward functions must work properly if all output grads are undefined + outputs_to_check = [ + [ + torch._C._functions.UndefinedGrad()(o) + for o in _differentiable_outputs(func(*inputs)) + # This check filters out Tensor-likes that aren't instances of Tensor. + if isinstance(o, torch.Tensor) + ] + ] + + # If there are multiple output grads, we should be able to undef one at a time without error + if len(outputs_to_check[0]) > 1: + for undef_grad_idx in range(len(outputs)): + output_to_check = _differentiable_outputs(func(*inputs)) + outputs_to_check.append( + [ + torch._C._functions.UndefinedGrad()(o) + if idx == undef_grad_idx + else o + for idx, o in enumerate(output_to_check) + ] + ) + + return all(check_undefined_grad_support(output) for output in outputs_to_check) + + +def _as_tuple(x): + if isinstance(x, tuple): + return x + elif isinstance(x, list): + return tuple(x) + else: + return (x,) + + +def _differentiable_outputs(x): + return tuple(o for o in _as_tuple(x) if o.requires_grad) + + +def _get_notallclose_msg( + analytical, + numerical, + output_idx, + input_idx, + complex_indices, + test_imag=False, + is_forward_ad=False, +) -> str: + out_is_complex = ( + (not is_forward_ad) and complex_indices and output_idx in complex_indices + ) + inp_is_complex = is_forward_ad and complex_indices and input_idx in complex_indices + part = "imaginary" if test_imag else "real" + element = "inputs" if is_forward_ad else "outputs" + prefix = ( + "" + if not (out_is_complex or inp_is_complex) + else f"While considering the {part} part of complex {element} only, " + ) + mode = "computed with forward mode " if is_forward_ad else "" + return ( + prefix + "Jacobian %smismatch for output %d with respect to input %d,\n" + "numerical:%s\nanalytical:%s\n" + % (mode, output_idx, input_idx, numerical, analytical) + ) + + +def _transpose(matrix_of_tensors): + # returns list of tuples + return list(zip(*matrix_of_tensors)) + + +def _real_and_imag_output(fn): + # returns new functions real(fn), and imag(fn) where real(fn) and imag(fn) behave the same as + # the original fn, except torch.real or torch.imag are applied to the complex outputs + def apply_to_c_outs(fn, fn_to_apply): + def wrapped_fn(*inputs): + outs = _as_tuple(fn(*inputs)) + return tuple(fn_to_apply(o) if o.is_complex() else o for o in outs) + + return wrapped_fn + + return apply_to_c_outs(fn, torch.real), apply_to_c_outs(fn, torch.imag) + + +def _real_and_imag_input(fn, complex_inp_indices, tupled_inputs): + # returns new functions that take real inputs instead of complex inputs as + # (x, y) -> fn(x + y * 1j). And it computes: inp -> fn(inp + y * 1j) and inp -> fn(x + inp * 1j). + # In each case, the other part is considered constant. + # We do not use 0 for the constant here to make sure we always call the user function with a valid input. + def apply_to_c_inps(fn, fn_to_apply): + def wrapped_fn(*inputs): + new_inputs = list(inputs) + for should_be_complex in complex_inp_indices: + new_inputs[should_be_complex] = fn_to_apply( + new_inputs[should_be_complex], tupled_inputs[should_be_complex] + ) + return _as_tuple(fn(*new_inputs)) + + return wrapped_fn + + real_fn = apply_to_c_inps(fn, lambda inp, orig: inp + orig.imag * 1j) + imag_fn = apply_to_c_inps(fn, lambda inp, orig: orig.real + inp * 1j) + return real_fn, imag_fn + + +def _gradcheck_real_imag( + gradcheck_fn, + func, + func_out, + tupled_inputs, + outputs, + eps, + rtol, + atol, + check_grad_dtypes, + check_forward_ad, + check_backward_ad, + nondet_tol, + check_undefined_grad, +): + complex_out_indices = [i for i, o in enumerate(outputs) if o.is_complex()] + has_any_complex_output = any(o.is_complex() for o in _as_tuple(func_out)) + if check_backward_ad: + if has_any_complex_output: + real_fn, imag_fn = _real_and_imag_output(func) + + imag_func_out = imag_fn(*tupled_inputs) + imag_outputs = _differentiable_outputs(imag_func_out) + gradcheck_fn( + imag_fn, + imag_func_out, + tupled_inputs, + imag_outputs, + eps, + rtol, + atol, + check_grad_dtypes, + nondet_tol, + complex_indices=complex_out_indices, + test_imag=True, + ) + + real_func_out = real_fn(*tupled_inputs) + real_outputs = _differentiable_outputs(real_func_out) + gradcheck_fn( + real_fn, + real_func_out, + tupled_inputs, + real_outputs, + eps, + rtol, + atol, + check_grad_dtypes, + nondet_tol, + complex_indices=complex_out_indices, + ) + else: + gradcheck_fn( + func, + func_out, + tupled_inputs, + outputs, + eps, + rtol, + atol, + check_grad_dtypes, + nondet_tol, + ) + + if check_forward_ad: + complex_inp_indices = [ + i + for i, inp in enumerate(tupled_inputs) + if is_tensor_like(inp) and inp.is_complex() + ] + if complex_inp_indices: + real_fn, imag_fn = _real_and_imag_input( + func, complex_inp_indices, tupled_inputs + ) + + imag_inputs = [ + inp.imag if is_tensor_like(inp) and inp.is_complex() else inp + for inp in tupled_inputs + ] + imag_func_out = imag_fn(*imag_inputs) + diff_imag_func_out = _differentiable_outputs(imag_func_out) + gradcheck_fn( + imag_fn, + imag_func_out, + imag_inputs, + diff_imag_func_out, + eps, + rtol, + atol, + check_grad_dtypes, + nondet_tol, + complex_indices=complex_inp_indices, + test_imag=True, + use_forward_ad=True, + ) + + real_inputs = [ + inp.real if is_tensor_like(inp) and inp.is_complex() else inp + for inp in tupled_inputs + ] + real_func_out = real_fn(*real_inputs) + diff_real_func_out = _differentiable_outputs(real_func_out) + gradcheck_fn( + real_fn, + real_func_out, + real_inputs, + diff_real_func_out, + eps, + rtol, + atol, + check_grad_dtypes, + nondet_tol, + complex_indices=complex_inp_indices, + use_forward_ad=True, + ) + if check_undefined_grad: + _test_undefined_forward_mode(imag_fn, imag_func_out, imag_inputs) + _test_undefined_forward_mode(real_fn, real_func_out, real_inputs) + else: + gradcheck_fn( + func, + func_out, + tupled_inputs, + outputs, + eps, + rtol, + atol, + check_grad_dtypes, + nondet_tol, + use_forward_ad=True, + ) + if check_undefined_grad: + _test_undefined_forward_mode(func, outputs, tupled_inputs) + + +def _slow_gradcheck( + func, + func_out, + tupled_inputs, + outputs, + eps, + rtol, + atol, + check_grad_dtypes, + nondet_tol, + *, + use_forward_ad=False, + complex_indices=None, + test_imag=False, + masked=False, +): + func_out = _as_tuple(func_out) + if not outputs: + return _check_no_differentiable_outputs( + func, tupled_inputs, func_out, eps=eps, is_forward_ad=use_forward_ad + ) + tupled_inputs_numerical = tupled_inputs if masked else _densify(tupled_inputs) + + numerical = _transpose( + _get_numerical_jacobian( + func, + tupled_inputs_numerical, + func_out, + eps=eps, + is_forward_ad=use_forward_ad, + ) + ) + # Note: [numerical vs analytical output length] + # The numerical path returns jacobian quantity for all outputs, even if requires_grad of that + # output is False. This behavior is necessary for _check_no_differentiable_outputs to work. + numerical = [nj for o, nj in zip(func_out, numerical) if o.requires_grad] + if use_forward_ad: + analytical_forward = _get_analytical_jacobian_forward_ad( + func, tupled_inputs, func_out, check_grad_dtypes=check_grad_dtypes + ) + + for i, n_per_out in enumerate(numerical): + for j, n in enumerate(n_per_out): + a = analytical_forward[j][i] + if not _allclose_with_type_promotion(a, n.to(a.device), rtol, atol): + raise GradcheckError( + _get_notallclose_msg( + a, n, i, j, complex_indices, test_imag, is_forward_ad=True + ) + ) + else: + for i, o in enumerate(outputs): + analytical = _check_analytical_jacobian_attributes( + tupled_inputs, o, nondet_tol, check_grad_dtypes + ) + + for j, (a, n) in enumerate(zip(analytical, numerical[i])): + if not _allclose_with_type_promotion(a, n.to(a.device), rtol, atol): + raise GradcheckError( + _get_notallclose_msg(a, n, i, j, complex_indices, test_imag) + ) + + return True + + +def _dot_with_type_promotion(u, v): + assert u.dim() == 1 and v.dim() == 1 + return (u * v).sum() + + +def _allclose_with_type_promotion(a, b, rtol, atol): + promoted_type = torch.promote_types(a.dtype, b.dtype) + a = a.to(dtype=promoted_type) + b = b.to(dtype=promoted_type) + return torch.allclose(a, b, rtol, atol) + + +def _to_real_dtype(dtype): + if dtype == torch.complex128: + return torch.float64 + elif dtype == torch.complex64: + return torch.float32 + else: + return dtype + + +def _vec_from_tensor(x, generator, downcast_complex=False): + # Create a random vector with the same number of elements as x and the same + # dtype/device. If x is complex and downcast_complex is False, we create a + # complex tensor with only real component. + if x.layout == torch.sparse_coo: + # For sparse, create a random sparse vec with random values in the same + # indices. Make sure size is set so that it isn't inferred to be smaller. + x_values = x._values() + dtype = _to_real_dtype(x.dtype) if downcast_complex else x.dtype + values = ( + torch.rand(x_values.numel(), generator=generator) + .to(dtype=dtype, device=x.device) + .view(x_values.shape) + ) + values /= values.norm() + vec = torch.sparse_coo_tensor(x._indices(), values, x.size(), device=x.device) + elif _is_sparse_compressed_tensor(x): + if x.layout in {torch.sparse_csr, torch.sparse_bsr}: + compressed_indices, plain_indices = x.crow_indices(), x.col_indices() + else: + compressed_indices, plain_indices = x.ccol_indices(), x.row_indices() + x_values = x.values() + dtype = _to_real_dtype(x.dtype) if downcast_complex else x.dtype + values = ( + torch.rand(x_values.numel(), generator=generator) + .to(dtype=dtype, device=x.device) + .view(x_values.shape) + ) + values /= values.norm() + vec = torch.sparse_compressed_tensor( + compressed_indices, + plain_indices, + values, + x.size(), + layout=x.layout, + device=x.device, + ) + else: + dtype = _to_real_dtype(x.dtype) if downcast_complex else x.dtype + vec = torch.rand(x.numel(), generator=generator).to( + dtype=dtype, device=x.device + ) + vec /= vec.norm() + return vec + + +def _get_inp_tensors(tupled_inputs): + inp_idx_tup = [ + (i, t) + for i, t in enumerate(tupled_inputs) + if is_tensor_like(t) and t.requires_grad + ] + return [tup[0] for tup in inp_idx_tup], [tup[1] for tup in inp_idx_tup] + + +def _adjusted_atol(atol, u, v): + # In slow gradcheck, we compare A and B element-wise, i.e., for some a, b we + # allow: |a - b| < atol + rtol * b. But since we now compare q1 = v^T A u and + # q2 = v^T B u, we must allow |q1 - q2| < v^T E u + rtol * v^T B u, where E is + # the correctly sized matrix in which each entry is atol. + # + # We see that atol needs to be scaled by v^T M u (where M is an all-ones M x N + # matrix): v^T M u = \sum_{i} \sum_{j} u_i * v_j = (\sum_{i} u_i)(\sum_{i} v_i) + # TODO: properly handle case when u is tuple instead of only taking first element + u = u[0] if isinstance(u, tuple) else u + sum_u = u.sum() + sum_v = 1.0 if v is None else v.sum() + return atol * float(sum_u) * float(sum_v) + + +FAST_FAIL_SLOW_OK_MSG = """ +Fast gradcheck failed but element-wise differences are small. This means that the +test might've passed in slow_mode! + +If you are adding a new operator, please file an issue and then use one of the +workarounds. The workaround depends on how your test invokes gradcheck/gradgradcheck: + +If the test +- manually invokes gradcheck/gradgradcheck, then call gradcheck/gradgradcheck + with `fast_mode=False` as a keyword argument. +- is OpInfo-based (e.g., in test_ops_gradients.py), then modify the OpInfo for the test + to have `gradcheck_fast_mode=False` +- is a Module test (e.g., in common_nn.py), then modify the corresponding + module_test entry to have `gradcheck_fast_mode=False` +""".strip() + + +def _run_slow_mode_and_get_error( + func, tupled_inputs, outputs, input_idx, output_idx, rtol, atol, eps, is_forward_ad +): + # Compute jacobians in slow mode for better error message + slow_numerical = _get_numerical_jacobian( + func, tupled_inputs, outputs, eps=eps, is_forward_ad=is_forward_ad + )[input_idx][output_idx] + if is_forward_ad: + + def new_fn(inp): + new_inputs = list(tupled_inputs) + new_inputs[input_idx] = inp + return _as_tuple(func(*new_inputs))[output_idx] + + slow_analytical = _get_analytical_jacobian_forward_ad( + new_fn, (tupled_inputs[input_idx],), (outputs[output_idx],) + )[0][0] + else: + slow_analytical = _get_analytical_jacobian( + tupled_inputs, outputs, input_idx, output_idx + ) + + # Assume jacobians are non-empty and have the same shape + slow_max_diff = (slow_numerical - slow_analytical).abs().max() + + slow_allclose = torch.allclose(slow_analytical, slow_numerical, rtol, atol) + msg = ( + "\nThe above quantities relating the numerical and analytical jacobians are computed \n" + "in fast mode. See: https://github.com/pytorch/pytorch/issues/53876 for more background \n" + "about fast mode. Below, we recompute numerical and analytical jacobians in slow mode:\n\n" + f"Numerical:\n {slow_numerical}\n" + f"Analytical:\n{slow_analytical}\n\n" + f"The max per-element difference (slow mode) is: {slow_max_diff}.\n" + ) + if slow_allclose: + # Slow gradcheck would've passed! + msg += FAST_FAIL_SLOW_OK_MSG + return msg + + +def _to_flat_dense_if_sparse(tensor): + if _is_sparse_any_tensor(tensor): + return tensor.to_dense().reshape(-1) + else: + return tensor + + +def _make_vectors(inp_tensors, outputs, *, use_forward_ad): + # Use our own generator to avoid messing with the user's RNG state + g_cpu = torch.Generator() + + def _vec_from_tensor_cpu(*args): + # Default allocate all tensors on CPU, so they are on the same device as the generator + # even if the user specified a default device + with torch.device("cpu"): + return _vec_from_tensor(*args) + + all_u = [] + all_u_dense = [] + for inp in inp_tensors: + ur = _vec_from_tensor_cpu(inp, g_cpu, True) + ur_dense = _to_flat_dense_if_sparse(ur) + if inp.is_complex(): + ui = _vec_from_tensor_cpu(inp, g_cpu, True) + all_u.append((ur, ui)) + ui_dense = _to_flat_dense_if_sparse(ui) + all_u_dense.append((ur_dense, ui_dense)) + else: + all_u.append(ur) + all_u_dense.append(ur_dense) + all_v = ( + None + if use_forward_ad + else [_vec_from_tensor_cpu(out, g_cpu) for out in outputs] + ) + return all_v, all_u, all_u_dense + + +def _check_analytical_numerical_equal( + all_analytical, + all_numerical, + complex_indices, + tupled_inputs, + outputs, + func, + all_v, + all_u, + rtol, + atol, + eps, + test_imag, + *, + is_forward_ad=False, +): + for i, all_numerical_for_input_i in enumerate(all_numerical): + for j, n in enumerate(all_numerical_for_input_i): + # Forward AD generates the transpose of what this function expects + if is_forward_ad: + a = all_analytical[i][j] + else: + a = all_analytical[j][i] + n = n.to(device=a.device) + updated_atol = _adjusted_atol(atol, all_u[i], all_v[j] if all_v else None) + if not _allclose_with_type_promotion(a, n.to(a.device), rtol, updated_atol): + jacobians_str = _run_slow_mode_and_get_error( + func, tupled_inputs, outputs, i, j, rtol, atol, eps, is_forward_ad + ) + raise GradcheckError( + _get_notallclose_msg( + a, n, j, i, complex_indices, test_imag, is_forward_ad + ) + + jacobians_str + ) + + +def _fast_gradcheck( + func, + func_out, + inputs, + outputs, + eps, + rtol, + atol, + check_grad_dtypes, + nondet_tol, + *, + use_forward_ad=False, + complex_indices=None, + test_imag=False, + masked=False, +): + # See https://github.com/pytorch/pytorch/issues/53876 for details + inp_tensors_idx, inp_tensors = _get_inp_tensors(inputs) + # Backward mode computes v^T * J (VJP) + # Since we computed J * u (JVP) through finite difference method, we perform an equality check + # between VJP * u, v * JVP + # ---- + # Forward mode computes J * u (JVP) + # Since we already compute JVP through finite difference method, + # we don't need v for correctness check here as asserted below + all_v, all_u, all_u_dense = _make_vectors( + inp_tensors, outputs, use_forward_ad=use_forward_ad + ) + + inputs_numerical, all_u_numerical, all_v_numerical = ( + (inputs, all_u, all_v) if masked else _densify((inputs, all_u, all_v)) + ) + + numerical_vJu = _get_numerical_vJu( + func, + inputs_numerical, + inp_tensors_idx, + func_out, + all_u_numerical, + all_v_numerical, + eps, + is_forward_ad=use_forward_ad, + ) + # TODO: replicate https://github.com/pytorch/pytorch/pull/77743 for fast gradcheck as well + if use_forward_ad: + assert all_v is None + analytical_vJu = _get_analytical_jacobian_forward_ad( + func, + inputs, + _as_tuple(func_out), + all_u=all_u, + check_grad_dtypes=check_grad_dtypes, + ) + else: + if not outputs: + _check_no_differentiable_outputs_fast( + func, func_out, inputs, inp_tensors_idx, all_u, eps, nondet_tol + ) + + analytical_vJu = _get_analytical_vJu_backward_mode( + inputs, outputs, nondet_tol, check_grad_dtypes, all_v, all_u_dense + ) + + _check_analytical_numerical_equal( + analytical_vJu, + numerical_vJu, + complex_indices, + inputs, + outputs, + func, + all_v, + all_u, + rtol, + atol, + eps, + test_imag, + is_forward_ad=use_forward_ad, + ) + + return True + + +# Note [VarArg of Tensors] +# ~~~~~~~~~~~~~~~~~~~~~~~~ +# 'func' accepts a vararg of tensors, which isn't expressable in the type system at the moment. +# If https://mypy.readthedocs.io/en/latest/additional_features.html?highlight=callable#extended-callable-types is accepted, +# the '...' first argument of Callable can be replaced with VarArg(Tensor). +# For now, we permit any input. +def gradcheck( + func: Callable[..., Union[_TensorOrTensors]], # See Note [VarArg of Tensors] + inputs: _TensorOrTensors, + *, + eps: float = 1e-6, + atol: float = 1e-5, + rtol: float = 1e-3, + raise_exception: bool = True, + nondet_tol: float = 0.0, + check_undefined_grad: bool = True, + check_grad_dtypes: bool = False, + check_batched_grad: bool = False, + check_batched_forward_grad: bool = False, + check_forward_ad: bool = False, + check_backward_ad: bool = True, + fast_mode: bool = False, + masked: Optional[bool] = None, +) -> bool: # noqa: D400,D205 + r"""Check gradients computed via small finite differences against analytical + gradients wrt tensors in :attr:`inputs` that are of floating point or complex type + and with ``requires_grad=True``. + + The check between numerical and analytical gradients uses :func:`~torch.allclose`. + + For most of the complex functions we consider for optimization purposes, no notion of + Jacobian exists. Instead, gradcheck verifies if the numerical and analytical values of + the Wirtinger and Conjugate Wirtinger derivatives are consistent. Because the gradient + computation is done under the assumption that the overall function has a real-valued + output, we treat functions with complex output in a special way. For these functions, + gradcheck is applied to two real-valued functions corresponding to taking the real + components of the complex outputs for the first, and taking the imaginary components + of the complex outputs for the second. For more details, check out + :ref:`complex_autograd-doc`. + + .. note:: + The default values are designed for :attr:`input` of double precision. + This check will likely fail if :attr:`input` is of less precision, e.g., + ``FloatTensor``. + + .. note:: + Gradcheck may fail when evaluated on non-differentiable points + because the numerically computed gradients via finite differencing may differ + those computed analytically (not necessarily because either is incorrect). + For more context, see :ref:`non-differentiable-func-grad`. + + .. warning:: + If any checked tensor in :attr:`input` has overlapping memory, i.e., + different indices pointing to the same memory address (e.g., from + :func:`torch.expand`), this check will likely fail because the numerical + gradients computed by point perturbation at such indices will change + values at all other indices that share the same memory address. + + Args: + func (function): a Python function that takes Tensor inputs and returns + a Tensor or a tuple of Tensors + inputs (tuple of Tensor or Tensor): inputs to the function + eps (float, optional): perturbation for finite differences + atol (float, optional): absolute tolerance + rtol (float, optional): relative tolerance + raise_exception (bool, optional): indicating whether to raise an exception if + the check fails. The exception gives more information about the + exact nature of the failure. This is helpful when debugging gradchecks. + nondet_tol (float, optional): tolerance for non-determinism. When running + identical inputs through the differentiation, the results must either match + exactly (default, 0.0) or be within this tolerance. + check_undefined_grad (bool, optional): if ``True``, check if undefined output grads + are supported and treated as zeros, for ``Tensor`` outputs. + check_batched_grad (bool, optional): if ``True``, check if we can compute + batched gradients using prototype vmap support. Defaults to False. + check_batched_forward_grad (bool, optional): if ``True``, checks if we can compute + batched forward gradients using forward ad and prototype vmap support. Defaults to ``False``. + check_forward_ad (bool, optional): if ``True``, check that the gradients computed with forward + mode AD match the numerical ones. Defaults to ``False``. + check_backward_ad (bool, optional): if ``False``, do not perform any checks that rely on + backward mode AD to be implemented. Defaults to ``True``. + fast_mode (bool, optional): Fast mode for gradcheck and gradgradcheck is currently only + implemented for R to R functions. If none of the inputs and outputs are complex + a faster implementation of gradcheck that no longer computes the entire jacobian + is run; otherwise, we fall back to the slow implementation. + masked (bool, optional): if ``True``, the gradients of unspecified elements of + sparse tensors are ignored. Defaults to ``False``. + Returns: + ``True`` if all differences satisfy allclose condition + + """ + assert ( + check_forward_ad or check_backward_ad + ), "Expected at least one of check_forward_ad or check_backward_ad to be True" + assert not ( + check_batched_grad and not check_backward_ad + ), "Setting check_batched_grad=True requires check_backward_ad to be True" + assert not ( + check_batched_forward_grad and not check_forward_ad + ), "Setting check_batched_forward_grad=True requires check_forward_ad to be True" + args = locals().copy() + args.pop("raise_exception") + if not raise_exception: + try: + return _gradcheck_helper(**args) + except GradcheckError as e: + return False + else: + return _gradcheck_helper(**args) + + +def _gradcheck_helper( + func, + inputs, + eps, + atol, + rtol, + nondet_tol, + check_undefined_grad, + check_grad_dtypes, + check_batched_grad, + check_batched_forward_grad, + check_forward_ad, + check_backward_ad, + fast_mode, + masked, +): + tupled_inputs = _as_tuple(inputs) + _check_inputs(tupled_inputs) + + func_out = func(*tupled_inputs) + outputs = _differentiable_outputs(func_out) + _check_outputs(outputs) + + gradcheck_fn = functools.partial( + _fast_gradcheck if fast_mode else _slow_gradcheck, masked=masked + ) + _gradcheck_real_imag( + gradcheck_fn, + func, + func_out, + tupled_inputs, + outputs, + eps, + rtol, + atol, + check_grad_dtypes, + check_forward_ad=check_forward_ad, + check_backward_ad=check_backward_ad, + nondet_tol=nondet_tol, + check_undefined_grad=check_undefined_grad, + ) + + if check_batched_forward_grad: + _test_batched_grad_forward_ad(func, tupled_inputs) + + # Short circuit because remaining tests rely on backward AD to be implemented + if not check_backward_ad: + return True + + for i, o in enumerate(outputs): + if check_batched_grad: + _test_batched_grad(tupled_inputs, o, i) + + _test_backward_mul_by_grad_output(outputs, tupled_inputs, masked) + + if check_undefined_grad and check_backward_ad: + _test_undefined_backward_mode(func, outputs, tupled_inputs) + return True + + +def gradgradcheck( + func: Callable[..., _TensorOrTensors], # See Note [VarArg of Tensors] + inputs: _TensorOrTensors, + grad_outputs: Optional[_TensorOrTensors] = None, + *, + eps: float = 1e-6, + atol: float = 1e-5, + rtol: float = 1e-3, + gen_non_contig_grad_outputs: bool = False, + raise_exception: bool = True, + nondet_tol: float = 0.0, + check_undefined_grad: bool = True, + check_grad_dtypes: bool = False, + check_batched_grad: bool = False, + check_fwd_over_rev: bool = False, + check_rev_over_rev: bool = True, + fast_mode: bool = False, + masked: bool = False, +) -> bool: # noqa: D400,D205 + r"""Check gradients of gradients computed via small finite differences + against analytical gradients wrt tensors in :attr:`inputs` and + :attr:`grad_outputs` that are of floating point or complex type and with + ``requires_grad=True``. + + This function checks that backpropagating through the gradients computed + to the given :attr:`grad_outputs` are correct. + + The check between numerical and analytical gradients uses :func:`~torch.allclose`. + + .. note:: + The default values are designed for :attr:`input` and + :attr:`grad_outputs` of double precision. This check will likely fail if + they are of less precision, e.g., ``FloatTensor``. + + .. warning:: + If any checked tensor in :attr:`input` and :attr:`grad_outputs` has + overlapping memory, i.e., different indices pointing to the same memory + address (e.g., from :func:`torch.expand`), this check will likely fail + because the numerical gradients computed by point perturbation at such + indices will change values at all other indices that share the same + memory address. + + Args: + func (function): a Python function that takes Tensor inputs and returns + a Tensor or a tuple of Tensors + inputs (tuple of Tensor or Tensor): inputs to the function + grad_outputs (tuple of Tensor or Tensor, optional): The gradients with + respect to the function's outputs. + eps (float, optional): perturbation for finite differences + atol (float, optional): absolute tolerance + rtol (float, optional): relative tolerance + gen_non_contig_grad_outputs (bool, optional): if :attr:`grad_outputs` is + ``None`` and :attr:`gen_non_contig_grad_outputs` is ``True``, the + randomly generated gradient outputs are made to be noncontiguous + raise_exception (bool, optional): indicating whether to raise an exception if + the check fails. The exception gives more information about the + exact nature of the failure. This is helpful when debugging gradchecks. + nondet_tol (float, optional): tolerance for non-determinism. When running + identical inputs through the differentiation, the results must either match + exactly (default, 0.0) or be within this tolerance. Note that a small amount + of nondeterminism in the gradient will lead to larger inaccuracies in + the second derivative. + check_undefined_grad (bool, optional): if True, check if undefined output grads + are supported and treated as zeros + check_batched_grad (bool, optional): if True, check if we can compute + batched gradients using prototype vmap support. Defaults to False. + fast_mode (bool, optional): if True, run a faster implementation of gradgradcheck that + no longer computes the entire jacobian. + masked (bool, optional): if True, the gradients of unspecified elements of + sparse tensors are ignored (default, False). + Returns: + True if all differences satisfy allclose condition + """ + assert ( + check_fwd_over_rev or check_rev_over_rev + ), "Expected at least one of check_fwd_over_rev or check_rev_over_rev to be True" + assert not ( + check_undefined_grad and not check_rev_over_rev + ), "Setting check_undefined_grad=True requires check_rev_over_rev to be True" + assert not ( + check_batched_grad and not check_rev_over_rev + ), "Setting check_batched_grad=True requires check_rev_over_rev to be True" + # TODO: do we want to test this too? + # assert not (check_batched_forward_grad and not check_fwd_over_rev), ( + # "Setting check_batched_forward_grad=True requires check_fwd_over_rev to be True") + tupled_inputs = _as_tuple(inputs) + + if grad_outputs is None: + # If grad_outputs is not specified, create random Tensors of the same shape, type, and device as the outputs + + outputs = _differentiable_outputs(func(*tupled_inputs)) + tupled_grad_outputs = tuple( + torch.testing.make_tensor( + x.shape, + dtype=x.dtype + if x.is_floating_point() or x.is_complex() + else torch.double, + device=x.device, + low=-1, + high=1, + requires_grad=True, + noncontiguous=gen_non_contig_grad_outputs, + ) + for x in outputs + ) + else: + tupled_grad_outputs = _as_tuple(grad_outputs) + + num_outputs = len(tupled_grad_outputs) + + # NB: We need to save the requires_grad information about the inputs here because gradcheck detaches inputs + # before running forward mode AD + diff_input_args_indices = { + i for i, x in enumerate(tupled_inputs) if is_tensor_like(x) and x.requires_grad + } + diff_grad_output_indices = { + i for i, x in enumerate(tupled_grad_outputs) if x.requires_grad + } + + def new_func(*args): + # Restore the requires_grad information + input_args = tuple( + x.requires_grad_() if i in diff_input_args_indices else x + for i, x in enumerate(args[:-num_outputs]) + ) + outputs = _differentiable_outputs(func(*input_args)) + grad_outputs = tuple( + x.requires_grad_() if i in diff_grad_output_indices else x + for i, x in enumerate(args[-num_outputs:]) + ) + diff_input_args = tuple( + x for i, x in enumerate(input_args) if i in diff_input_args_indices + ) + grad_inputs = torch.autograd.grad( + outputs, diff_input_args, grad_outputs, create_graph=True, allow_unused=True + ) + grad_inputs = tuple(g for g in grad_inputs if g is not None) + return grad_inputs + + return gradcheck( + new_func, + tupled_inputs + tupled_grad_outputs, + eps=eps, + atol=atol, + rtol=rtol, + raise_exception=raise_exception, + nondet_tol=nondet_tol, + check_undefined_grad=check_undefined_grad, + check_grad_dtypes=check_grad_dtypes, + check_batched_grad=check_batched_grad, + fast_mode=fast_mode, + check_forward_ad=check_fwd_over_rev, + check_backward_ad=check_rev_over_rev, + masked=masked, + ) diff --git a/venv/lib/python3.10/site-packages/torch/autograd/graph.py b/venv/lib/python3.10/site-packages/torch/autograd/graph.py new file mode 100644 index 0000000000000000000000000000000000000000..42922026e7daad9cff6c95cfce5f5a6f690e28de --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/autograd/graph.py @@ -0,0 +1,749 @@ +import abc +import collections +import contextlib +import functools +import logging +import threading +import weakref +from collections import defaultdict, namedtuple +from typing import ( + Any, + Callable, + cast, + Deque, + Dict, + List, + Optional, + Sequence, + Set, + Tuple, + Union, +) + +import torch +from torch.autograd.variable import Variable +from torch.utils._python_dispatch import TorchDispatchMode +from torch.utils.hooks import RemovableHandle + +log = logging.getLogger(__name__) + + +__all__ = [ + "saved_tensors_hooks", + "save_on_cpu", + "disable_saved_tensors_hooks", + "register_multi_grad_hook", + "allow_mutation_on_saved_tensors", + "Node", + "GradientEdge", + "get_gradient_edge", + "increment_version", +] + + +class Node(abc.ABC): + @abc.abstractmethod + def name(self) -> str: + r"""Return the name. + + Example:: + + >>> import torch + >>> a = torch.tensor([0., 0., 0.], requires_grad=True) + >>> b = a.clone() + >>> assert isinstance(b.grad_fn, torch.autograd.graph.Node) + >>> print(b.grad_fn.name()) + CloneBackward0 + """ + ... + + @property + @abc.abstractmethod + def next_functions(self) -> Tuple[Tuple[Optional["Node"], int], ...]: + ... + + @abc.abstractmethod + def metadata(self) -> dict: + r"""Return the metadata.""" + ... + + @abc.abstractmethod + def _register_hook_dict(self, tensor: torch.Tensor) -> None: + ... + + @abc.abstractmethod + def register_hook(self, fn: Callable[..., Any]) -> RemovableHandle: + r"""Register a backward hook. + + The hook will be called every time a gradient with respect to the + Node is computed. The hook should have the following signature:: + + hook(grad_inputs: Tuple[Tensor], grad_outputs: Tuple[Tensor]) -> Tuple[Tensor] or None + + + The hook should not modify its argument, but it can optionally return + a new gradient which will be used in place of :attr:`grad_inputs`. + + This function returns a handle with a method ``handle.remove()`` + that removes the hook from the module. + + .. note:: + See :ref:`backward-hooks-execution` for more information on how when this hook + is executed, and how its execution is ordered relative to other hooks. + + Example:: + + >>> import torch + >>> a = torch.tensor([0., 0., 0.], requires_grad=True) + >>> b = a.clone() + >>> assert isinstance(b.grad_fn, torch.autograd.graph.Node) + >>> handle = b.grad_fn.register_hook(lambda gI, gO: (gO[0] * 2,)) + >>> b.sum().backward(retain_graph=True) + >>> print(a.grad) + tensor([2., 2., 2.]) + >>> handle.remove() # Removes the hook + >>> a.grad = None + >>> b.sum().backward(retain_graph=True) + >>> print(a.grad) + tensor([1., 1., 1.]) + """ + ... + + @abc.abstractmethod + def register_prehook(self, fn: Callable[..., Any]) -> RemovableHandle: + r"""Register a backward pre-hook. + + The hook will be called every time a gradient with respect to the + Node is computed. The hook should have the following signature:: + + hook(grad_outputs: Tuple[Tensor]) -> Tuple[Tensor] or None + + The hook should not modify its argument, but it can optionally return + a new gradient which will be used in place of :attr:`grad_outputs`. + + This function returns a handle with a method ``handle.remove()`` + that removes the hook from the module. + + .. note:: + See :ref:`backward-hooks-execution` for more information on how when this hook + is executed, and how its execution is ordered relative to other hooks. + + Example:: + + >>> a = torch.tensor([0., 0., 0.], requires_grad=True) + >>> b = a.clone() + >>> assert isinstance(b.grad_fn, torch.autograd.graph.Node) + >>> handle = b.grad_fn.register_prehook(lambda gI: (gI[0] * 2,)) + >>> b.sum().backward(retain_graph=True) + >>> print(a.grad) + tensor([2., 2., 2.]) + >>> handle.remove() + >>> a.grad = None + >>> b.sum().backward(retain_graph=True) + >>> print(a.grad) + tensor([1., 1., 1.]) + """ + ... + + @classmethod + def __subclasshook__(cls, C): + if cls is Node: + if ( + C is not None and C is getattr(torch._C._functions, C.__name__, None) + ) or issubclass(C, torch.autograd.function.BackwardCFunction): + return True + return NotImplemented + + +def _get_grad_fn_or_grad_acc(t): + if t.requires_grad and t.grad_fn is None: + return t.view_as(t).grad_fn.next_functions[0][0] + else: + return t.grad_fn + + +GradientEdge = namedtuple("GradientEdge", ("node output_nr")) +GradientEdge.__doc__ = """\ +Object representing a given gradient edge within the autograd graph. +To get the gradient edge where a given Tensor gradient will be computed, +you can do ``edge = autograd.graph.get_gradient_edge(tensor)``. +""" + + +def get_gradient_edge(tensor): + """Get the gradient edge for computing the gradient of the given Tensor. + + In particular, it is equivalent to call + ``g = autograd.grad(loss, input)`` and ``g = autograd.grad(loss, get_gradient_edge(input))``. + """ + if not tensor.requires_grad: + raise RuntimeError( + "It is not possible to get the gradient edge for a Tensor that does not require gradients" + ) + grad_fn = _get_grad_fn_or_grad_acc(tensor) + + # Note that output_nr default to 0 which is the right value + # for the AccumulateGrad node. + return GradientEdge(grad_fn, tensor.output_nr) + + +def increment_version(tensor): + """Update autograd metadata tracking whether the given Tensor was modified in place. + + This is to enable more accurate error checking within the autograd engine. + It is already done automatically by PyTorch functions and within custom Function + when mark_dirty() is called appropriately so you only need to call this explicitly + if you are doing inplace operation on the Tensor data in a way that Pytorch doesn't + know about. For example a custom kernel that reads the Tensor data_ptr and modifies + the memory inplace based on this pointer. + + Note that incrementing the version counter multiple times for a single inplace operation + is not problematic. + """ + torch._C._increment_version(tensor) + + +class saved_tensors_hooks: + """Context-manager that sets a pair of pack / unpack hooks for saved tensors. + + Use this context-manager to define how intermediary results of an operation + should be packed before saving, and unpacked on retrieval. + + In that context, the ``pack_hook`` function will be called everytime an + operation saves a tensor for backward (this includes intermediary results + saved using + :func:`~torch.autograd.function._ContextMethodMixin.save_for_backward` but + also those recorded by a PyTorch-defined operation). The output of + ``pack_hook`` is then stored in the computation graph instead of the + original tensor. + + The ``unpack_hook`` is called when the saved tensor needs to be accessed, + namely when executing :func:`torch.Tensor.backward()` or + :func:`torch.autograd.grad()`. It takes as argument the *packed* object + returned by ``pack_hook`` and should return a tensor which has the same + content as the original tensor (passed as input to the corresponding + ``pack_hook``). + + The hooks should have the following signatures: + + pack_hook(tensor: Tensor) -> Any + + unpack_hook(Any) -> Tensor + + where the return value of ``pack_hook`` is a valid input to ``unpack_hook``. + + In general, you want ``unpack_hook(pack_hook(t))`` to be equal to ``t`` in terms + of value, size, dtype and device. + + Example:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> def pack_hook(x): + ... print("Packing", x) + ... return x + >>> + >>> def unpack_hook(x): + ... print("Unpacking", x) + ... return x + >>> + >>> a = torch.ones(5, requires_grad=True) + >>> b = torch.ones(5, requires_grad=True) * 2 + >>> with torch.autograd.graph.saved_tensors_hooks(pack_hook, unpack_hook): + ... y = a * b + Packing tensor([1., 1., 1., 1., 1.], requires_grad=True) + Packing tensor([2., 2., 2., 2., 2.], grad_fn=) + >>> y.sum().backward() + Unpacking tensor([1., 1., 1., 1., 1.], requires_grad=True) + Unpacking tensor([2., 2., 2., 2., 2.], grad_fn=) + + .. warning :: + Performing an inplace operation on the input to either hooks may lead + to undefined behavior. + + .. warning :: + Only one pair of hooks is allowed at a time. When recursively nesting this + context-manager, only the inner-most pair of hooks will be applied. + """ + + def __init__( + self, + pack_hook: Callable[[torch.Tensor], Any], + unpack_hook: Callable[[Any], torch.Tensor], + ): + self.pack_hook = pack_hook + self.unpack_hook = unpack_hook + + def __enter__(self): + torch._C._autograd._push_saved_tensors_default_hooks( + self.pack_hook, self.unpack_hook + ) + + def __exit__(self, *args: object): + torch._C._autograd._pop_saved_tensors_default_hooks() + + +class save_on_cpu(saved_tensors_hooks): + """Context manager under which tensors saved by the forward pass will be stored on cpu, then retrieved for backward. + + When performing operations within this context manager, intermediary + results saved in the graph during the forward pass will be moved to CPU, + then copied back to the original device when needed for the backward pass. + If the graph was already on CPU, no tensor copy is performed. + + Use this context-manager to trade compute for GPU memory usage (e.g. + when your model doesn't fit in GPU memory during training). + + Args: + pin_memory (bool): If ``True`` tensors will be saved to CPU pinned memory + during packing and copied to GPU asynchronously during unpacking. + Defaults to ``False``. + Also see :ref:`cuda-memory-pinning`. + + + Example:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) + >>> a = torch.randn(5, requires_grad=True, device="cuda") + >>> b = torch.randn(5, requires_grad=True, device="cuda") + >>> c = torch.randn(5, requires_grad=True, device="cuda") + >>> + >>> def f(a, b, c): + ... prod_1 = a * b # a and b are saved on GPU + ... with torch.autograd.graph.save_on_cpu(): + ... prod_2 = prod_1 * c # prod_1 and c are saved on CPU + ... y = prod_2 * a # prod_2 and a are saved on GPU + ... return y + >>> + >>> y = f(a, b, c) + >>> del a, b, c # for illustration only + >>> # the content of a, b, and prod_2 are still alive on GPU + >>> # the content of prod_1 and c only live on CPU + >>> y.sum().backward() # all CPU tensors are moved back to GPU, for backward + >>> # all intermediary tensors are released (deleted) after the call to backward + + """ + + def __init__(self, pin_memory=False, device_type="cuda"): + device_module = getattr(torch, device_type, torch.cuda) + + def pack_to_cpu(tensor): + if not pin_memory: + return (tensor.device, tensor.cpu()) + packed = torch.empty( + tensor.size(), + dtype=tensor.dtype, + layout=tensor.layout, + pin_memory=(device_module.is_available() and not tensor.is_sparse), + ) + packed.copy_(tensor) + return (tensor.device, packed) + + def unpack_from_cpu(packed): + device, tensor = packed + return tensor.to(device, non_blocking=pin_memory) + + super().__init__(pack_to_cpu, unpack_from_cpu) + + +@contextlib.contextmanager +def disable_saved_tensors_hooks(error_message): + """Context-manager that disables the saved tensors default hooks feature. + + Useful for if you are creating a feature that does not work with saved + tensors default hooks. + + Args: + error_message (str): When saved tensors default hooks are used when they + have been are disabled, a RuntimeError with this + error message gets raised. + + Example:: + + >>> # xdoctest: +SKIP(failing) + >>> message = "saved tensors default hooks are disabled" + >>> with torch.autograd.graph.disable_saved_tensors_hooks(message): + ... # Raises RuntimeError: saved tensors default hooks are disabled + ... with torch.autograd.graph.save_on_cpu(): + ... pass + + """ + try: + maybe_prev_message = ( + torch._C._autograd._saved_tensors_hooks_get_disabled_error_message() + ) + torch._C._autograd._saved_tensors_hooks_disable(error_message) + yield + finally: + # See NOTE: [disabled_error_message invariant] + if maybe_prev_message is None: + torch._C._autograd._saved_tensors_hooks_enable() + else: + torch._C._autograd._saved_tensors_hooks_disable(maybe_prev_message) + + +def register_multi_grad_hook( + tensors: Sequence[torch.Tensor], + fn: Union[ + Callable[[Sequence[Optional[torch.Tensor]]], None], + Callable[[torch.Tensor], None], + ], + *, + mode: str = "all", +): + r"""Register a multi-grad backward hook. + + There are two supported modes: ``"all"`` and ``"any"``. + + Under the ``"all"`` mode, the hook will be called after gradients with respect to every tensor in + :attr:`tensors` have been computed. If a tensor is in :attr:`tensors` but + is not part of the graph, or if a tensor is not needed to compute the gradients + for any ``inputs`` specified for the current ``.backward()`` or ``.grad()`` call, + this tensor will be ignored and the hook will not wait for its gradient to be + computed. + + After every non-ignored tensor's gradient has been computed, :attr:`fn` will be + called with those gradients. ``None`` will be passed for tensors that did not + have their gradients computed. + + Under the ``"any"`` mode, the hook will be called after the first gradient + with respect to a tensor in :attr:`tensors` has been computed. The hook + will be called with that gradient as its argument. + + The hook should not modify its arguments. + + This function returns a handle with a method ``handle.remove()`` that removes the hook. + + .. note:: + See :ref:`backward-hooks-execution` for more information on how when this hook + is executed, and how its execution is ordered relative to other hooks. + + Example:: + + >>> import torch + >>> + >>> a = torch.rand(2, 3, requires_grad=True) + >>> b = torch.rand(2, 3, requires_grad=True) + >>> c = a * b + >>> d = a * b + >>> + >>> def fn(grads): + ... print([g is not None for g in grads]) + ... + >>> torch.autograd.graph.register_multi_grad_hook((a, b, c, d), fn) + >>> + >>> c.sum().backward(retain_graph=True) + [True, True, True, False] + >>> c.sum().backward(inputs=(a,), retain_graph=True) + [True, False, True, False] + >>> + """ + supported_modes = ("all", "any") + if mode not in supported_modes: + raise ValueError(f"Expects mode to be one of {supported_modes} but got {mode}") + + class Handle(RemovableHandle): + handles: Tuple[RemovableHandle, ...] + + def __init__(self, handles: Tuple[RemovableHandle, ...]): + self.handles = handles + + def remove(self): + for handle in self.handles: + handle.remove() + + def __getstate__(self): + return self.handles + + def __setstate__(self, state): + self.handles = state + + if mode == "all": + count: Dict[int, int] = dict() + nb_calls = None + buffer: Dict[int, List[Optional[torch.Tensor]]] = dict() + + grad_fns = list(map(_get_grad_fn_or_grad_acc, tensors)) + len_tensors = len(tensors) + + def get_inner_hook(idx): + def inner_hook(grad: torch.Tensor): + nonlocal count, nb_calls, buffer, fn + id = torch._C._current_graph_task_id() + assert ( + id != -1 + ), "expected this hook to be called inside a backward call" + count[id] = count.get(id, 0) + buffer[id] = buffer.get(id, [None] * len_tensors) + + if count[id] == 0: + # On the first call, compute the actual nb_calls and buffer + nb_calls = sum(torch._C._will_engine_execute_node(g) for g in grad_fns) # type: ignore[attr-defined] + + buffer[id][idx] = grad + count[id] += 1 + + if count[id] == nb_calls: + fn = cast(Callable[[Sequence[Optional[torch.Tensor]]], None], fn) + fn(buffer[id]) + del count[id] + del buffer[id] + + return inner_hook + + handles: Tuple[RemovableHandle] = tuple( + t.register_hook(get_inner_hook(i)) for i, t in enumerate(tensors) + ) + elif mode == "any": + fn = cast(Callable[[torch.Tensor], None], fn) + lock = threading.Lock() + ran_hook: Dict[int, bool] = defaultdict(bool) + + @functools.wraps(fn) + def wrapped_fn(grad: torch.Tensor): + nonlocal ran_hook + id = torch._C._current_graph_task_id() + assert id != -1, "expected this hook to be called inside a backward call" + with lock: + prev, ran_hook[id] = ran_hook[id], True + if prev: + return + fn(grad) + + handles = tuple( + tensor.register_hook(wrapped_fn) + for tensor in tensors + if tensor.requires_grad + ) + + return Handle(handles) # type: ignore[possibly-undefined] + + +# NOTE [Allow mutation on tensors saved for backward] +# +# 1. Tensor gets saved for backward +# - remember the python object id and the version of the tensor +# - remember aliasing information (data_ptr of base + version) +# - save the original so we control its lifetime +# 2. Any time a tensor gets in-placed +# - for each tensor aliased to it: +# - check using its object id and version to see if it has been saved +# - if it has been saved, clone it +# - delete the reference to the original +# 3. during backward +# - if the clone exists, the tensor must've been modified in-place +_allow_mutation_on_saved_tensors_enabled = False + + +def _get_tid(t) -> Tuple[int, int, int]: + return (id(t), t.data_ptr(), t._version) + + +def _get_sid(t) -> Tuple[int, int]: + return (t.data_ptr(), t._version) + + +class _Handle: + pass + + +class _swap_with_cloned(saved_tensors_hooks): + def __init__(self, ctx): + def pack_hook(t): + tid = _get_tid(t) + sid = _get_sid(t) + # Tensors saved for backward have an entry in _tid_to_weakhandle + handle: Optional[_Handle] = None + + # Save aliasing information + ctx.sid_to_tid[sid].add(tid) + + # NB: The same tensor (of the same version) can be saved multiple times + if tid not in ctx.tid_to_weakhandle: + handle = _Handle() + ctx.tid_to_weakhandle[tid] = handle + ctx.original[handle] = t + else: + # Store an additional strong reference to the handle + handle = ctx.tid_to_weakhandle[tid] + return handle + + def unpack_hook(tup): + handle = tup + error_msg = ( + "Trying to backward outside of the 'allow_mutation_on_saved_tensors' context" + "in which the graph was originally recorded." + ) + assert _allow_mutation_on_saved_tensors_enabled, error_msg + if handle in ctx.cloned: + res = ctx.cloned[handle] + else: + assert handle in ctx.original, error_msg + res = ctx.original[handle] + return res + + super().__init__(pack_hook, unpack_hook) + + +class _CloneArgBeforeMutateMode(TorchDispatchMode): + def __init__(self, ctx): + self.ctx = ctx + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + kwargs = kwargs or {} + + for idx, arg in enumerate(func._schema.arguments): + if arg.alias_info is not None and arg.alias_info.is_write: + t = kwargs["out"] if arg.is_out else args[idx] + tid = _get_tid(t) + sid = _get_sid(t) + ctx = self.ctx + if sid in ctx.sid_to_tid: + for tid in ctx.sid_to_tid[sid]: + if tid not in ctx.tid_to_weakhandle: + # We know that if tid is in sid_to_tid, then it must also be in + # tid_to_weakhandle. However, it is possible for the tensor to be + # saved at one point, but cleared by backward before it is modified + # in-place. Consider the following example: + # + # >>> a = torch.randn(2, 3, requires_grad=True).clone() + # >>> out = (a**2).sum() + # >>> out.backward() + # >>> a.sin_() + continue + handle = ctx.tid_to_weakhandle[tid] + if handle in ctx.cloned: + # The same exact tensor has been cloned already + continue + ctx.cloned[handle] = ctx.original[handle].clone() + del ctx.original[handle] + + rs = func(*args, **kwargs) + return rs + + +class _AllowMutationOnSavedContext: + def __init__(self): + self.cloned: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary() + self.original: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary() + self.tid_to_weakhandle: weakref.WeakValueDictionary = ( + weakref.WeakValueDictionary() + ) + self.sid_to_tid: Dict[Tuple[int, int], Set[Tuple[int, int, int]]] = defaultdict( + set + ) + + def clear(self): + self.cloned.clear() + self.original.clear() + self.tid_to_weakhandle.clear() + self.sid_to_tid.clear() + + +@contextlib.contextmanager +def allow_mutation_on_saved_tensors(): + """Context manager under which mutating tensors saved for backward is allowed. + + Under this context manager, tensors saved for backward are cloned on mutation, + so the original version can still be used during backward. Normally, mutating a tensor + saved for backward will result in an error raised when it's used during backward. + + To ensure the correct behavior, both the forward and backward should be run under + the same context manager. + + returns: + An _AllowMutationOnSavedContext object storing the state managed by this + context manager. This object can be useful for debugging purposes. The state + managed by the context manager is automatically cleared upon exiting. + + Example:: + + >>> import torch + >>> with torch.autograd.graph.allow_mutation_on_saved_tensors(): + ... # forward + ... a = torch.ones(2, 3, requires_grad=True) + ... b = a.clone() + ... out = (b**2).sum() + ... b.sin_() + ... # backward + ... out.sum().backward() + ... + tensor([[0.8415, 0.8415, 0.8415], + [0.8415, 0.8415, 0.8415]], grad_fn=) + """ + global _allow_mutation_on_saved_tensors_enabled + + ctx = _AllowMutationOnSavedContext() + + with _swap_with_cloned(ctx), _CloneArgBeforeMutateMode(ctx): + try: + if _allow_mutation_on_saved_tensors_enabled: + raise RuntimeError( + "allow_mutation_on_saved_tensors contexts cannot be nested" + ) + _allow_mutation_on_saved_tensors_enabled = True + yield ctx + finally: + ctx.clear() + _allow_mutation_on_saved_tensors_enabled = False + + +def _register_logging_hooks_on_whole_graph(t_outputs: List[torch.Tensor]): + grad_fns = list(map(_get_grad_fn_or_grad_acc, t_outputs)) + + def iter_graph(roots): + if not roots: + return + seen = set() + q: Deque = collections.deque() + for node in roots: + if node is not None: + seen.add(node) + q.append(node) + + while q: + node = q.popleft() + for fn, _idx in node.next_functions: + if fn in seen or fn is None: + continue + seen.add(fn) + q.append(fn) + + yield node + + def fmt(t): + # Avoid circular import + from torch.testing._internal.common_utils import dtype_abbrs + + if t is None: + return "None" + return f"{dtype_abbrs[t.dtype]}[{', '.join(map(str, t.shape))}]" + + def prehook(grad_outputs): + node = torch._C._current_autograd_node() + grad_outputs_str = f"[{','.join(fmt(t) for t in grad_outputs)}]" + log_str = f"Executing: {node} with grad_outputs: {grad_outputs_str}" + log.debug(log_str) + + handles = [] + for node in iter_graph(grad_fns): + handles.append(node.register_prehook(prehook)) + + def unregister_hooks(): + for handle in handles: + handle.remove() + + return unregister_hooks + + +def _engine_run_backward(t_outputs, *args, **kwargs): + attach_logging_hooks = log.getEffectiveLevel() <= logging.DEBUG + if attach_logging_hooks: + unregister_hooks = _register_logging_hooks_on_whole_graph(t_outputs) + try: + return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass + t_outputs, *args, **kwargs + ) # Calls into the C++ engine to run the backward pass + finally: + if attach_logging_hooks: + unregister_hooks() # type: ignore[possibly-undefined] diff --git a/venv/lib/python3.10/site-packages/torch/autograd/profiler.py b/venv/lib/python3.10/site-packages/torch/autograd/profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..e2e3d36e33b43ecf6dc3b7b75d947d73d554766f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/autograd/profiler.py @@ -0,0 +1,1042 @@ +from collections import defaultdict +from typing import Any, Dict, List, Optional +from warnings import warn + +import torch + +import torch.cuda +from torch._C import _get_privateuse1_backend_name +from torch._C._profiler import _ExperimentalConfig + +from torch.autograd import ( + _disable_profiler, + _enable_profiler, + _kineto_step, + _prepare_profiler, + _ProfilerResult, + _supported_activities, + DeviceType, + kineto_available, + ProfilerActivity, + ProfilerConfig, + ProfilerState, +) +from torch.autograd.profiler_util import ( + _filter_name, + _filter_stack_entry, + _rewrite_name, + EventList, + FunctionEvent, + MEMORY_EVENT_NAME, + MemRecordsAcc, + OUT_OF_MEMORY_EVENT_NAME, +) +from torch.futures import Future + +__all__ = [ + "profile", + "record_function", + "emit_itt", + "emit_nvtx", + "load_nvprof", + "EnforceUnique", + "parse_nvprof_trace", + "KinetoStepTracker", + "EventList", + "FunctionEvent", + "MemRecordsAcc", +] + +try: + # Available in Python >= 3.2 + from contextlib import ContextDecorator as _ContextDecorator +except ImportError: + import functools + + class _ContextDecorator: # type: ignore[no-redef] + def __enter__(self): + raise NotImplementedError + + def __exit__(self, exc_type, exc_val, exc_tb): + raise NotImplementedError + + def __call__(self, func): + @functools.wraps(func) + def wrapped(*args, **kwargs): + with self: + return func(*args, **kwargs) + + return wrapped + + +# global python state - whether profiler is currently enabled +# useful for fast python checks to reduce latency +_is_profiler_enabled: bool = False + + +def _set_is_profiler_enabled(enable: bool): + global _is_profiler_enabled + _is_profiler_enabled = enable + + +def _run_on_profiler_start(): + _set_is_profiler_enabled(True) + + +def _run_on_profiler_stop(): + _set_is_profiler_enabled(False) + + +class profile: + """Context manager that manages autograd profiler state and holds a summary of results. + + Under the hood it just records events of functions being executed in C++ and + exposes those events to Python. You can wrap any code into it and it will + only report runtime of PyTorch functions. + Note: profiler is thread local and is automatically propagated into the async tasks + + Args: + enabled (bool, optional): Setting this to False makes this context manager a no-op. + + use_cuda (bool, optional): Enables timing of CUDA events as well using the cudaEvent API. + Adds approximately 4us of overhead to each tensor operation. + + record_shapes (bool, optional): If shapes recording is set, information + about input dimensions will be collected. This allows one to see which + dimensions have been used under the hood and further group by them + using prof.key_averages(group_by_input_shape=True). Please note that + shape recording might skew your profiling data. It is recommended to + use separate runs with and without shape recording to validate the timing. + Most likely the skew will be negligible for bottom most events (in a case + of nested function calls). But for higher level functions the total + self cpu time might be artificially increased because of the shape + collection. + + with_flops (bool, optional): If with_flops is set, the profiler will estimate + the FLOPs (floating point operations) value using the operator's input shape. + This allows one to estimate the hardware performance. Currently, + this option only works for the matrix multiplication and 2D convolution operators. + + profile_memory (bool, optional): track tensor memory allocation/deallocation. + + with_stack (bool, optional): record source information (file and line number) for the ops. + + with_modules (bool): record module hierarchy (including function names) + corresponding to the callstack of the op. e.g. If module A's forward call's + module B's forward which contains an aten::add op, + then aten::add's module hierarchy is A.B + Note that this support exist, at the moment, only for TorchScript models + and not eager mode models. + + use_kineto (bool, optional): experimental, enable profiling with Kineto profiler. + + use_cpu (bool, optional): profile CPU events; setting to ``False`` requires + ``use_kineto=True`` and can be used to lower the overhead for GPU-only profiling. + + experimental_config (_ExperimentalConfig) : A set of experimental options + used by profiler libraries like Kineto. Note, backward compatibility is not guaranteed. + + + .. warning: + Enabling memory profiling or source attribution incurs additional profiler + overhead + + .. warning: + This context managers should not be called recursively, i.e. no nested + instances are allowed + + .. warning: + Due to some CUDA multiprocessing limitations (multiprocessing-cuda-note_), + one cannot use the profiler with ``use_cuda = True`` to benchmark + DataLoaders with ``num_workers > 0``. If you wish to benchmark data loading, + please use ``use_cuda = False`` or ``num_workers = 0``. + + Example: + >>> # xdoctest: +SKIP + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD_PROFILER) + >>> x = torch.randn((1, 1), requires_grad=True) + >>> with torch.autograd.profiler.profile() as prof: + >>> for _ in range(100): # any normal python code, really! + >>> y = x ** 2 + >>> y.backward() + >>> # NOTE: some columns were removed for brevity + >>> print(prof.key_averages().table(sort_by="self_cpu_time_total")) + ----------------------------------- --------------- --------------- --------------- + Name Self CPU total CPU time avg Number of Calls + ----------------------------------- --------------- --------------- --------------- + mul 32.048ms 32.048ms 200 + pow 27.041ms 27.041ms 200 + PowBackward0 9.727ms 55.483ms 100 + torch::autograd::AccumulateGrad 9.148ms 9.148ms 100 + torch::autograd::GraphRoot 691.816us 691.816us 100 + ----------------------------------- --------------- --------------- --------------- + + """ + + def __init__( + self, + enabled=True, + *, + use_cuda=False, + use_device=None, + record_shapes=False, + with_flops=False, + profile_memory=False, + with_stack=False, + with_modules=False, + use_kineto=False, + use_cpu=True, + use_mtia=False, + experimental_config=None, + ): + self.enabled: bool = enabled + if not self.enabled: + return + self.use_cuda = use_cuda + self.use_device: Optional[str] = ( + use_device if use_device != "privateuseone" else None + ) + self.function_events: Optional[EventList] = None + self.entered = False + self.record_shapes = record_shapes + self.with_flops = with_flops + self.record_shapes |= self.with_flops + self.profile_memory = profile_memory + self.with_stack = with_stack + self.with_modules = with_modules + self.use_cpu = use_cpu + self.use_mtia = use_mtia + if experimental_config is None: + experimental_config = _ExperimentalConfig() + self.experimental_config = experimental_config + self.kineto_results: Optional[_ProfilerResult] = None + + if not self.use_cpu: + assert ( + use_kineto + ), "Device-only events supported only with Kineto (use_kineto=True)" + + if self.use_device == "cuda": + self.use_device = None + self.use_cuda = True + + if self.use_device and self.use_device != _get_privateuse1_backend_name(): + warn(f"{self.use_device} doesn't support profile.") + self.use_device = None + + if self.use_cuda and not torch.cuda.is_available(): + warn("CUDA is not available, disabling CUDA profiling") + self.use_cuda = False + + self.kineto_activities = set() + if self.use_cpu: + self.kineto_activities.add(ProfilerActivity.CPU) + if self.use_mtia: + self.kineto_activities.add(ProfilerActivity.MTIA) + + self.profiler_kind = ProfilerState.KINETO + if self.use_cuda: + if not use_kineto or ProfilerActivity.CUDA not in _supported_activities(): + assert self.use_cpu, "Legacy CUDA profiling requires use_cpu=True" + self.profiler_kind = ProfilerState.KINETO_GPU_FALLBACK + else: + self.kineto_activities.add(ProfilerActivity.CUDA) + + if self.use_device: + if ( + not use_kineto + or ProfilerActivity.PrivateUse1 not in _supported_activities() + ): + assert ( + self.use_cpu + ), "Legacy custombackend profiling requires use_cpu=True" + self.profiler_kind = ProfilerState.KINETO_PRIVATEUSE1_FALLBACK + else: + self.kineto_activities.add(ProfilerActivity.PrivateUse1) + self.profiler_kind = ProfilerState.KINETO_PRIVATEUSE1 + + assert ( + len(self.kineto_activities) > 0 + ), "No activities specified for the profiler" + + def config(self): + return ProfilerConfig( + self.profiler_kind, + self.record_shapes, + self.profile_memory, + self.with_stack, + self.with_flops, + self.with_modules, + self.experimental_config, + ) + + def __enter__(self): + if not self.enabled: + return + if self.entered: + raise RuntimeError("Profiler context manager is not reentrant") + self._prepare_trace() + self._start_trace() + return self + + def _prepare_trace(self): + self.entered = True + _prepare_profiler(self.config(), self.kineto_activities) + + def _start_trace(self): + self.entered = True + _run_on_profiler_start() + _enable_profiler(self.config(), self.kineto_activities) + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self.enabled: + return + if self.use_cuda: + torch.cuda.synchronize() + self.kineto_results = _disable_profiler() + _run_on_profiler_stop() + parsed_results = self._parse_kineto_results(self.kineto_results) + self.function_events = EventList( + parsed_results, + use_cuda=self.use_cuda, + use_device=self.use_device, + profile_memory=self.profile_memory, + with_flops=self.with_flops, + ) + self.function_events._build_tree() + return False + + def __repr__(self): + if self.function_events is None: + return "" + return repr(self.function_events) + + def __str__(self): + if self.function_events is None: + return "" + return str(self.function_events) + + def _check_finish(self): + if self.function_events is None: + raise RuntimeError("Profiler didn't finish running") + + def table( + self, + sort_by=None, + row_limit=100, + max_src_column_width=75, + max_name_column_width=55, + max_shapes_column_width=80, + header=None, + top_level_events_only=False, + ): + self._check_finish() + assert self.function_events is not None + return self.function_events.table( + sort_by=sort_by, + row_limit=row_limit, + max_src_column_width=max_src_column_width, + max_name_column_width=max_name_column_width, + max_shapes_column_width=max_shapes_column_width, + header=header, + top_level_events_only=top_level_events_only, + ) + + table.__doc__ = EventList.table.__doc__ + + def export_chrome_trace(self, path): + self._check_finish() + if kineto_available(): + self.kineto_results.save(path) # type: ignore[union-attr] + else: + return self.function_events.export_chrome_trace(path) # type: ignore[union-attr] + + export_chrome_trace.__doc__ = EventList.export_chrome_trace.__doc__ + + def export_stacks(self, path: str, metric: str = "self_cpu_time_total"): + self._check_finish() + assert self.function_events is not None, "Expected profiling results" + assert self.with_stack, "export_stacks() requires with_stack=True" + return self.function_events.export_stacks(path, metric) + + def key_averages(self, group_by_input_shape=False, group_by_stack_n=0): + self._check_finish() + assert self.function_events is not None, "Expected profiling results" + return self.function_events.key_averages(group_by_input_shape, group_by_stack_n) + + key_averages.__doc__ = EventList.key_averages.__doc__ + + def total_average(self): + self._check_finish() + assert self.function_events is not None, "Expected profiling results" + return self.function_events.total_average() + + total_average.__doc__ = EventList.total_average.__doc__ + + @property + def self_cpu_time_total(self): + """Returns total time spent on CPU. + + The total time is a sum of all self times across all the events. + """ + self._check_finish() + assert self.function_events is not None + return self.function_events.self_cpu_time_total + + def _parse_kineto_results(self, result: _ProfilerResult): + # result.events() has most of the events - PyTorch op-level and device-level events + + trace_start_us = result.trace_start_us() + mem_records = [ + [evt, False] for evt in result.events() if evt.name() == MEMORY_EVENT_NAME + ] + oom_records = [ + evt for evt in result.events() if evt.name() == OUT_OF_MEMORY_EVENT_NAME + ] + mem_records_acc = MemRecordsAcc(mem_records) + + def _cpu_memory_usage(mem_record): + return ( + mem_record.nbytes() + if mem_record.device_type() + in [DeviceType.CPU, DeviceType.MKLDNN, DeviceType.IDEEP] + else 0 + ) + + def _cuda_memory_usage(mem_record): + return ( + mem_record.nbytes() + if mem_record.device_type() in [DeviceType.CUDA, DeviceType.HIP] + else 0 + ) + + def _privateuse1_memory_usage(mem_record): + return ( + mem_record.nbytes() + if mem_record.device_type() in [DeviceType.PrivateUse1] + else 0 + ) + + # Create and return FunctionEvent list + function_events = [] + device_corr_map: Dict[int, List[FunctionEvent]] = {} + max_evt_id = 0 + for kineto_event in result.events(): + if _filter_name(kineto_event.name()): + continue + rel_start_us = kineto_event.start_us() - trace_start_us + rel_end_us = rel_start_us + kineto_event.duration_us() + abs_end_us = kineto_event.start_us() + kineto_event.duration_us() + + cpu_memory_usage = 0 + cuda_memory_usage = 0 + privateuse1_memory_usage = 0 + if kineto_event.device_type() == DeviceType.CPU: + # find the corresponding memory allocation events + for mem_record in mem_records_acc.in_interval( + kineto_event.start_us(), abs_end_us + ): + cpu_memory_usage += _cpu_memory_usage(mem_record[0]) + cuda_memory_usage += _cuda_memory_usage(mem_record[0]) + privateuse1_memory_usage += _privateuse1_memory_usage(mem_record[0]) + mem_record[1] = True + + is_async = kineto_event.is_async() or ( + kineto_event.start_thread_id() != kineto_event.end_thread_id() + ) + + fe = FunctionEvent( + id=kineto_event.correlation_id(), + name=_rewrite_name(name=kineto_event.name(), with_wildcard=True), + trace_name=_rewrite_name(name=kineto_event.name(), with_wildcard=False), + thread=kineto_event.start_thread_id(), + start_us=rel_start_us, + end_us=rel_end_us, + fwd_thread=kineto_event.fwd_thread_id(), + input_shapes=kineto_event.shapes(), + concrete_inputs=kineto_event.concrete_inputs(), + stack=[ + entry + for entry in kineto_event.stack() + if _filter_stack_entry(entry) + ], + scope=kineto_event.scope(), + use_device=self.use_device, + cpu_memory_usage=cpu_memory_usage, + cuda_memory_usage=cuda_memory_usage, + privateuse1_memory_usage=privateuse1_memory_usage, + is_async=is_async, + sequence_nr=kineto_event.sequence_nr(), + device_type=kineto_event.device_type(), + device_index=kineto_event.device_index(), + flops=kineto_event.flops(), + ) + max_evt_id = max(max_evt_id, fe.id) + if fe.device_type == DeviceType.CPU and not fe.is_async: + if self.use_device: + privateuse1_time = kineto_event.privateuse1_elapsed_us() + if privateuse1_time > 0: + fe.append_kernel(fe.name, fe.device_index, privateuse1_time) + fe.is_legacy = True + else: + # Check if we have CUDA time as a fallback + cuda_time = kineto_event.cuda_elapsed_us() + if cuda_time > 0: + fe.append_kernel(fe.name, fe.device_index, cuda_time) + fe.is_legacy = True + function_events.append(fe) + corr_id = kineto_event.linked_correlation_id() + if corr_id > 0: + if corr_id not in device_corr_map: + device_corr_map[corr_id] = [] + device_corr_map[corr_id].append(fe) + + # associate CUDA kernels and CUDA runtime (CPU) with CPU events + for fe in function_events: + if ( + fe.device_type == DeviceType.CPU + and not fe.is_async + and fe.id in device_corr_map + ): + for f_evt in device_corr_map[fe.id]: + if f_evt.device_type == DeviceType.CUDA: + fe.append_kernel( + f_evt.name, + f_evt.device_index, + f_evt.time_range.end - f_evt.time_range.start, + ) + elif f_evt.device_type == DeviceType.CPU: + # make sure that 'thread' of a CPU Kineto (e.g. CUDA Runtime) event is associated + # with the 'thread' of the corresponding linked PyTorch event to properly track + # parents and children + f_evt.thread = fe.thread + + def createFunctionEventForMemoryEvents(evt): + rel_start_us = evt.start_us() - trace_start_us + fe = FunctionEvent( + id=max_evt_id, + name=evt.name(), + trace_name=None, # not outputting in the trace + thread=evt.start_thread_id(), + start_us=rel_start_us, + end_us=rel_start_us, # no duration + fwd_thread=evt.start_thread_id(), + input_shapes=[], + stack=[], + scope=0, # RecordScope::FUNCTION + use_device=self.use_device, + cpu_memory_usage=_cpu_memory_usage(evt), + cuda_memory_usage=_cuda_memory_usage(evt), + privateuse1_memory_usage=_privateuse1_memory_usage(evt), + is_async=False, + sequence_nr=-1, + device_type=DeviceType.CPU, + device_index=0, + ) + return fe + + # output top-level memory events + for mem_record in mem_records: + if not mem_record[1]: + max_evt_id += 1 + fe = createFunctionEventForMemoryEvents(mem_record[0]) + function_events.append(fe) + + for oom_record in oom_records: + max_evt_id += 1 + fe = createFunctionEventForMemoryEvents(oom_record) + function_events.append(fe) + + function_events.sort( + key=lambda evt: [evt.time_range.start, -evt.time_range.end] + ) + return function_events + + +class record_function(_ContextDecorator): + """Context manager/function decorator that adds a label to a code block/function when running autograd profiler. + + It is useful when tracing the code profile. + + Args: + name (str): Label assigned to the block of code. + node_id (int): ID of node, for distributed profiling. Unset in + non-distributed cases. + + Example: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD_PROFILER) + >>> x = torch.randn((1, 1), requires_grad=True) + >>> with torch.autograd.profiler.profile() as prof: + ... y = x ** 2 + ... with torch.autograd.profiler.record_function("label-z"): # label the block + ... z = y ** 3 + ... y.backward() + ... + >>> # xdoctest: +IGNORE_WANT + >>> # NOTE: some columns were removed for brevity + >>> print(prof.key_averages().table(sort_by="self_cpu_time_total")) + ----------------------------------- --------------- --------------- --------------- + Name Self CPU total % CPU time avg Number of Calls + ----------------------------------- --------------- --------------- --------------- + pow 60.77% 47.470us 3 + mul 21.73% 25.465us 2 + PowBackward0 12.03% 121.891us 1 + torch::autograd::AccumulateGrad 2.70% 6.324us 1 + label-z 2.13% 12.421us 1 + torch::autograd::GraphRoot 0.64% 1.503us 1 + ----------------------------------- --------------- --------------- --------------- + Self CPU time total: 234.344us + CUDA time total: 0.000us + + """ + + def __init__(self, name: str, args: Optional[str] = None): + self.name: str = name + self.args: Optional[str] = args + # Whether or not we should run record function's end callbacks when exiting. + self.run_callbacks_on_exit: bool = True + # TODO: TorchScript ignores standard type annotation here + # self.record: Optional["torch.classes.profiler._RecordFunction"] = None + self.record = torch.jit.annotate( + Optional["torch.classes.profiler._RecordFunction"], None + ) + + def __enter__(self): + self.record = torch.ops.profiler._record_function_enter_new( + self.name, self.args + ) + return self + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any): + if not self.run_callbacks_on_exit: + return + + # Local variable is needed by TorchScript to refine Optional[T] to T + record = self.record + assert record is not None + + # TODO: Too slow with __torch_function__ handling enabled + # See https://github.com/pytorch/pytorch/issues/76410 + if not torch.jit.is_scripting(): + with torch._C.DisableTorchFunctionSubclass(): + torch.ops.profiler._record_function_exit._RecordFunction(record) + else: + torch.ops.profiler._record_function_exit(record) + + def _call_end_callbacks_on_future(self, fut: Future[Any]) -> Future[Any]: + """Use for profiling async calls that return a future. + + Calling this function will extend recording beyond this scope, until the future is + satisfied. It is useful for profiling the end to end time of asynchronous calls. + This function should only be called once to attach the callback onto the future, and + will throw if called multiple times. + + Args: + fut: (torch._C.Future): future for which to schedule + callback for. + + Returns: + A future that completes with the value of the passed in future when + the profiling callbacks have ran. + + """ + # Throw if we have already attached a callback onto the future. + if not self.run_callbacks_on_exit: + raise RuntimeError("_call_end_callbacks_on_future can only be called once.") + + # We are scheduling to run this RecordFunction's end callbacks when the + # passed in future completes, so don't run end callbacks on exit. + self.run_callbacks_on_exit = False + + # Local variable is needed by TorchScript to refine Optional[T] to T + record = self.record + assert record is not None + + # TODO: Too slow with __torch_function__ handling enabled + # See https://github.com/pytorch/pytorch/issues/76410 + if not torch.jit.is_scripting(): + with torch._C.DisableTorchFunctionSubclass(): + profiled_future = ( + torch.ops.profiler._call_end_callbacks_on_jit_fut._RecordFunction( + record, fut + ) + ) + else: + profiled_future = torch.ops.profiler._call_end_callbacks_on_jit_fut( + record, fut + ) + return profiled_future + + +class emit_itt: + """Context manager that makes every autograd operation emit an ITT range. + + It is useful when running the program under Intel(R) VTune Profiler:: + + vtune <--vtune-flags> + + The Instrumentation and Tracing Technology (ITT) API enables your application to generate and + control the collection of trace data during its execution across different Intel tools. + This context manager is to annotate Intel(R) VTune Profiling trace. With help of this context manager, + you will be able to see labled ranges in Intel(R) VTune Profiler GUI. + + .. warning: + This context manager should not be called recursively, i.e. at most one + instance should be enabled at any given time. + + Args: + enabled (bool, optional): Setting ``enabled=False`` makes this context manager a no-op. + Default: ``True``. + record_shapes (bool, optional): If ``record_shapes=True``, the itt range wrapping + each autograd op will append information about the sizes of Tensor arguments received + by that op, in the following format: + ``[[arg0.size(0), arg0.size(1), ...], [arg1.size(0), arg1.size(1), ...], ...]`` + Non-tensor arguments will be represented by ``[]``. + Arguments will be listed in the order they are received by the backend op. + Please note that this order may not match the order in which those arguments were passed + on the Python side. Also note that shape recording may increase the overhead of itt range creation. + Default: ``False`` + + Example: + >>> # xdoctest: +SKIP("Undefined variables") + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD_PROFILER) + >>> with torch.autograd.profiler.emit_itt(): + ... model(x) + + """ + + def __init__(self, enabled=True, record_shapes=False): + self.enabled = enabled + self.entered = False + self.record_shapes = record_shapes + + def __enter__(self): + if not self.enabled: + return + if self.entered: + raise RuntimeError("ITT annotation context manager is not reentrant") + self.entered = True + _run_on_profiler_start() + _enable_profiler( + ProfilerConfig( + ProfilerState.ITT, + self.record_shapes, + False, + False, + False, + False, + _ExperimentalConfig(), + ), + set(), + ) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self.enabled: + return + _disable_profiler() + _run_on_profiler_stop() + return False + + +class emit_nvtx: + """Context manager that makes every autograd operation emit an NVTX range. + + It is useful when running the program under nvprof:: + + nvprof --profile-from-start off -o trace_name.prof -- + + Unfortunately, there's no way to force nvprof to flush the data it collected + to disk, so for CUDA profiling one has to use this context manager to annotate + nvprof traces and wait for the process to exit before inspecting them. + Then, either NVIDIA Visual Profiler (nvvp) can be used to visualize the timeline, or + :func:`torch.autograd.profiler.load_nvprof` can load the results for inspection + e.g. in Python REPL. + + .. warning: + This context manager should not be called recursively, i.e. at most one + instance should be enabled at any given time. + + Args: + enabled (bool, optional): Setting ``enabled=False`` makes this context manager a no-op. + Default: ``True``. + record_shapes (bool, optional): If ``record_shapes=True``, the nvtx range wrapping + each autograd op will append information about the sizes of Tensor arguments received + by that op, in the following format: + ``[[arg0.size(0), arg0.size(1), ...], [arg1.size(0), arg1.size(1), ...], ...]`` + Non-tensor arguments will be represented by ``[]``. + Arguments will be listed in the order they are received by the backend op. + Please note that this order may not match the order in which those arguments were passed + on the Python side. Also note that shape recording may increase the overhead of nvtx range creation. + Default: ``False`` + + Example: + >>> # xdoctest: +SKIP("undefined variables") + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD_PROFILER) + >>> with torch.cuda.profiler.profile(): + ... model(x) # Warmup CUDA memory allocator and profiler + ... with torch.autograd.profiler.emit_nvtx(): + ... model(x) + + **Forward-backward correlation** + + When viewing a profile created using :class:`emit_nvtx` in the Nvidia Visual Profiler, + correlating each backward-pass op with the corresponding forward-pass op can be difficult. + To ease this task, :class:`emit_nvtx` appends sequence number information to the ranges it + generates. + + During the forward pass, each function range is decorated with ``seq=``. ``seq`` is a running + counter, incremented each time a new backward Function object is created and stashed for backward. + Thus, the ``seq=`` annotation associated with each forward function range tells you that + if a backward Function object is created by this forward function, + the backward object will receive sequence number N. + During the backward pass, the top-level range wrapping each C++ backward Function's + ``apply()`` call is decorated with ``stashed seq=``. ``M`` is the sequence number that + the backward object was created with. By comparing ``stashed seq`` numbers in backward with ``seq`` + numbers in forward, you can track down which forward op created each backward Function. + + Any functions executed during the backward pass are also decorated with ``seq=``. During + default backward (with ``create_graph=False``) this information is irrelevant, and in fact, + ``N`` may simply be 0 for all such functions. Only the top-level ranges associated with + backward Function objects' ``apply()`` methods are useful, as a way to correlate these Function + objects with the earlier forward pass. + + **Double-backward** + + If, on the other hand, a backward pass with ``create_graph=True`` is underway (in other words, + if you are setting up for a double-backward), each function's execution during backward + is given a nonzero, useful ``seq=``. Those functions may themselves create Function objects + to be executed later during double-backward, just as the original functions in the forward pass did. + The relationship between backward and double-backward is conceptually the same as the relationship + between forward and backward: The functions still emit current-sequence-number-tagged ranges, + the Function objects they create still stash those sequence numbers, and during the eventual + double-backward, the Function objects' ``apply()`` ranges are still tagged with ``stashed seq`` + numbers, which can be compared to `seq` numbers from the backward pass. + + .. warning: + The sequence number is thread-local, and some forward functions don't create an associated + backward Function object (instead delegating that to sub-functions further down the call chain). + For these reasons, the correspondence of stashed sequence numbers in + backward Function ``apply()`` ranges with `seq` numbers in forward-pass ranges is + not guaranteed to be 1 to 1. The sequence numbers alone may not be enough to fully + disambiguate which forward function created which + backward Function object. You may need to make a judgment based on analytic knowledge of what + the expected correspondence should be. + """ + + def __init__(self, enabled=True, record_shapes=False): + self.enabled = enabled + self.entered = False + self.record_shapes = record_shapes + + def __enter__(self): + if not self.enabled: + return + if self.entered: + raise RuntimeError("NVTX annotation context manager is not reentrant") + self.entered = True + torch.cuda.synchronize() + _run_on_profiler_start() + _enable_profiler( + ProfilerConfig( + ProfilerState.NVTX, + self.record_shapes, + False, + False, + False, + False, + _ExperimentalConfig(), + ), + set(), + ) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self.enabled: + return + torch.cuda.synchronize() + _disable_profiler() + _run_on_profiler_stop() + return False + + +def load_nvprof(path): + """Open an nvprof trace file and parses autograd annotations. + + Args: + path (str): path to nvprof trace + """ + return EventList(parse_nvprof_trace(path)) + + +class EnforceUnique: + """Raises an error if a key is seen more than once.""" + + def __init__(self): + self.seen = set() + + def see(self, *key): + r""" + Observe a key and raise an error if it is seen multiple times. + """ + if key in self.seen: + raise RuntimeError("duplicate key: " + str(key)) + self.seen.add(key) + + +def parse_nvprof_trace(path): + import sqlite3 + + conn = sqlite3.connect(path) + conn.row_factory = sqlite3.Row + + # Parse strings table + strings = {} + for r in conn.execute("SELECT _id_ as id, value FROM StringTable"): + strings[r["id"]] = torch._C._demangle(r["value"]) + + # First, find all functions and create FunctionEvents for them + marker_query = """ + SELECT + start.id AS marker_id, start.name, start.timestamp AS start_time, end.timestamp AS end_time + FROM + CUPTI_ACTIVITY_KIND_MARKER AS start INNER JOIN CUPTI_ACTIVITY_KIND_MARKER AS end + ON start.id = end.id + WHERE + start.name != 0 AND end.name = 0 + """ + functions = [] + functions_map = {} + unique = EnforceUnique() + for row in conn.execute(marker_query): + unique.see(row["marker_id"]) + evt = FunctionEvent( + id=row["marker_id"], + node_id=0, # missing a node_id when calling FunctionEvent. This is just to ensure + # that pytorch doesn't crash when creating a FunctionEvent() object + name=strings[row["name"]], + start_us=row["start_time"], + end_us=row["end_time"], + thread=0, + ) # TODO: find in sqlite database + functions.append(evt) + functions_map[evt.id] = evt + + # Now, correlate all kernels with FunctionEvents + kernel_query = """ + SELECT + start.id AS marker_id, start.name, start.timestamp, end.timestamp, + runtime._id_ AS runtime_id, runtime.cbid, runtime.start AS runtime_start, runtime.end AS runtime_end, + kernel.start AS kernel_start, kernel.end AS kernel_end, kernel.name AS kernel_name + FROM + CUPTI_ACTIVITY_KIND_MARKER AS start + INNER JOIN CUPTI_ACTIVITY_KIND_MARKER AS end + ON start.id = end.id + INNER JOIN CUPTI_ACTIVITY_KIND_RUNTIME as runtime + ON (start.timestamp < runtime.start AND runtime.end < end.timestamp) + INNER JOIN CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL AS kernel + ON kernel.correlationId = runtime.correlationId + """ + unique = EnforceUnique() + for row in conn.execute(kernel_query): + unique.see(row["marker_id"], row["runtime_id"]) + # 211 is cudaKernelLaunch for cuda >= 9.2 + assert row["cbid"] == 211 + evt = functions_map[row["marker_id"]] + evt.append_kernel( + row["kernel_name"], 0, row["kernel_end"] - row["kernel_start"] + ) + + functions.sort(key=lambda evt: evt.time_range.start) + return functions + + +class KinetoStepTracker: + """Provides an abstraction for incrementing the step count globally. + + Previously, we only had one place to mark that a step() has occurred + in the program via pytorch profiler step(). We will now add step hooks + in the Optimizer class https://github.com/pytorch/pytorch/issues/88446 + + - This could mean programs that already call profiler.step() every + iteration can end up double incrementing step count. + - If a model uses multiple optimizers we can also have double or more + counting of the step. + + We fix this by adding a layer of abstraction before calling step() + to the kineto library. The idea is to maintain steps per requester in a dict: + + .. code-block:: + + { + "ProfilerStep": 100, # triggered by profiler step() call + "Optimizer1Step": 100, # Optimizer 1 or 2 are just examples, could be SGD, Adam etc + "Optimizer2Step": 100, + } + + To figure out the global step count just take the max of dict values (100). + + If one of the count increments the max will go up. + + .. code-block:: + + { + "ProfilerStep": 100, + "Optimizer1Step": 101, # Optimizer1 got incremented first say + "Optimizer2Step": 100, + } + + Then global step count is 101 + We only call the kineto step() function when global count increments. + + NOTE: Please do not use the KinetoStepTracker in modules beside the Optimizer + for now. The result could be incorrect increments of the step count. + """ + + _current_step = 0 + _step_dict: Dict[str, int] = defaultdict(int) + + @classmethod + def init_step_count(cls, requester: str): + r""" + Initialize for a given requester. + """ + cls._step_dict[requester] = cls._current_step + + @classmethod + def erase_step_count(cls, requester: str) -> bool: + r""" + Remove a given requester. + """ + return cls._step_dict.pop(requester, None) is not None + + @classmethod + def increment_step(cls, requester: str) -> int: + """Increments the step count for the requester. + + Additionally if the max over all step counts has incremented then + trigger the _kineto_step() returns global step count + """ + if requester not in cls._step_dict: + cls.init_step_count(requester) + cls._step_dict[requester] += 1 + + new_step = max(cls._step_dict.values()) + if new_step > cls._current_step: + delta = new_step - cls._current_step + if delta > 1: + warn( + "Profiler step count has increased more than 1 - " + f"current_step = {cls._current_step} step dict = {cls._step_dict}" + ) + for _ in range(0, delta): + _kineto_step() + cls._current_step = new_step + return cls._current_step + + @classmethod + def current_step(cls) -> int: + r""" + Get the latest step for any requester + """ + return cls._current_step diff --git a/venv/lib/python3.10/site-packages/torch/autograd/profiler_legacy.py b/venv/lib/python3.10/site-packages/torch/autograd/profiler_legacy.py new file mode 100644 index 0000000000000000000000000000000000000000..32700ffb1cf317abca6fb236ff831781bae9471a --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/autograd/profiler_legacy.py @@ -0,0 +1,303 @@ +import itertools +from warnings import warn + +import torch +import torch.cuda + +from torch.autograd import ( + _disable_profiler_legacy, + _enable_profiler_legacy, + DeviceType, + ProfilerConfig, + ProfilerState, +) +from torch.autograd.profiler_util import ( + _filter_name, + _filter_stack_entry, + _rewrite_name, + EventList, + FunctionEvent, + MEMORY_EVENT_NAME, +) + +__all__ = ["profile"] + + +class profile: + """DEPRECATED: use torch.profiler instead.""" + + def __init__( + self, + enabled=True, + *, + use_cuda=False, + record_shapes=False, + with_flops=False, + profile_memory=False, + with_stack=False, + with_modules=False, + ): + self.enabled: bool = enabled + if not self.enabled: + return + self.use_cuda = use_cuda + self.function_events = None + self.entered = False + self.record_shapes = record_shapes + self.with_flops = with_flops + self.record_shapes |= self.with_flops + self.profile_memory = profile_memory + self.with_stack = with_stack + self.with_modules = with_modules + + if self.use_cuda and not torch.cuda.is_available(): + warn("CUDA is not available, disabling CUDA profiling") + self.use_cuda = False + + if self.use_cuda: + self.profiler_kind = ProfilerState.CUDA + else: + self.profiler_kind = ProfilerState.CPU + + def config(self): + return ProfilerConfig( + self.profiler_kind, + self.record_shapes, + self.profile_memory, + self.with_stack, + self.with_flops, + self.with_modules, + # avoid exposing _ExperimentalConfig this in legacy public API + torch._C._profiler._ExperimentalConfig(), + ) + + def __enter__(self): + if not self.enabled: + return + if self.entered: + raise RuntimeError("Profiler context manager is not reentrant") + self.entered = True + self._start_trace() + return self + + def _start_trace(self): + _enable_profiler_legacy(self.config()) + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self.enabled: + return + if self.use_cuda: + torch.cuda.synchronize() + + records = _disable_profiler_legacy() + parsed_results = _parse_legacy_records(records) + self.function_events = EventList( + parsed_results, + use_cuda=self.use_cuda, + profile_memory=self.profile_memory, + with_flops=self.with_flops, + ) + self.function_events._build_tree() + return False + + def __repr__(self): + if self.function_events is None: + return "" + return repr(self.function_events) + + def __str__(self): + if self.function_events is None: + return "" + return str(self.function_events) + + def _check_finish(self): + if self.function_events is None: + raise RuntimeError("Profiler didn't finish running") + + def table( + self, + sort_by=None, + row_limit=100, + max_src_column_width=75, + max_name_column_width=55, + max_shapes_column_width=80, + header=None, + top_level_events_only=False, + ): + self._check_finish() + assert self.function_events is not None + return self.function_events.table( + sort_by=sort_by, + row_limit=row_limit, + max_src_column_width=max_src_column_width, + max_name_column_width=max_name_column_width, + max_shapes_column_width=max_shapes_column_width, + header=header, + top_level_events_only=top_level_events_only, + ) + + table.__doc__ = EventList.table.__doc__ + + def export_chrome_trace(self, path): + self._check_finish() + assert self.function_events is not None + return self.function_events.export_chrome_trace(path) + + export_chrome_trace.__doc__ = EventList.export_chrome_trace.__doc__ + + def export_stacks(self, path: str, metric: str = "self_cpu_time_total"): + self._check_finish() + assert self.function_events is not None, "Expected profiling results" + assert self.with_stack, "export_stacks() requires with_stack=True" + return self.function_events.export_stacks(path, metric) + + def key_averages(self, group_by_input_shape=False, group_by_stack_n=0): + self._check_finish() + assert self.function_events is not None, "Expected profiling results" + return self.function_events.key_averages(group_by_input_shape, group_by_stack_n) + + key_averages.__doc__ = EventList.key_averages.__doc__ + + def total_average(self): + self._check_finish() + assert self.function_events is not None, "Expected profiling results" + return self.function_events.total_average() + + total_average.__doc__ = EventList.total_average.__doc__ + + @property + def self_cpu_time_total(self): + """Return CPU time as the sum of self times across all events.""" + self._check_finish() + assert self.function_events is not None + return self.function_events.self_cpu_time_total + + +def _parse_legacy_records(thread_records): + def _get_record_key(record): + """Return a tuple for correlating start and end records in `_parse_legacy_records`.""" + return (record.handle(), record.node_id()) + + next_id = 0 + start_record = None + functions = [] + record_stack = [] + + # '__start_profile' is not guaranteed to be first, so we must find it here + for record in itertools.chain.from_iterable(thread_records): + name = record.name() + if start_record is None and name == "__start_profile": + start_record = record + + assert start_record is not None and not start_record.is_remote() + + for thread_record_list in thread_records: + # accumulated memory allocations per handle + cpu_memory_allocs = {} + cuda_memory_allocs = {} + # ranges per handle + range_starts = {} + + filtered_handles = set() + prev_record = None + for record in thread_record_list: + record_key = _get_record_key(record) + if _filter_name(record.name()) or record_key in filtered_handles: + filtered_handles.add(record_key) + continue + + if record.kind() == "push": + # workaround to reduce double logging from operator + # wrappers and redispatch + if prev_record is not None: + duplicate = ( + prev_record.name() == record.name() + and prev_record.kind() == record.kind() + and prev_record.node_id() == record.node_id() + ) + if duplicate: + filtered_handles.add(record_key) + continue + + range_starts[record_key] = record + cpu_memory_allocs[record_key] = 0 + cuda_memory_allocs[record_key] = 0 + elif record.kind() == "pop": + assert ( + record_key in range_starts + ), f"""Expected record with key {record_key} to exist in range_starts. + This means that the pop event did not have a corresponding push.""" + + start = range_starts[record_key] + + cpu_memory_usage = cpu_memory_allocs[record_key] + cuda_memory_usage = cuda_memory_allocs[record_key] + is_async = start.is_async() or (start.thread_id() != record.thread_id()) + is_remote_event = record.is_remote() + start_flops = start.flops() + + fe = FunctionEvent( + id=record.handle(), + node_id=record.node_id(), + name=_rewrite_name(name=start.name(), with_wildcard=True), + trace_name=_rewrite_name(name=start.name(), with_wildcard=False), + thread=start.thread_id(), + start_us=start_record.cpu_elapsed_us(start), + end_us=start_record.cpu_elapsed_us(record), + fwd_thread=start.fwd_thread_id(), + input_shapes=start.shapes(), + stack=[ + entry for entry in start.stack() if _filter_stack_entry(entry) + ], + scope=start.scope(), + cpu_memory_usage=cpu_memory_usage, + cuda_memory_usage=cuda_memory_usage, + is_async=is_async, + is_remote=is_remote_event, + sequence_nr=start.sequence_nr(), + device_type=DeviceType.CPU, + is_legacy=True, + flops=start_flops, + ) + # note: async events have only cpu total time + if not is_async and start.has_cuda(): + duration = start.cuda_elapsed_us(record) + if duration > 0: + fe.append_kernel(start.name(), start.device(), duration) + functions.append(fe) + del range_starts[record_key] + del cpu_memory_allocs[record_key] + del cuda_memory_allocs[record_key] + elif record.kind() == "memory_alloc": + num_open_handles_cpu = len(cpu_memory_allocs) + num_open_handles_cuda = len(cuda_memory_allocs) + assert num_open_handles_cpu == num_open_handles_cuda + for handle in cpu_memory_allocs.keys(): + cpu_memory_allocs[handle] += record.cpu_memory_usage() + for handle in cuda_memory_allocs.keys(): + cuda_memory_allocs[handle] += record.cuda_memory_usage() + if num_open_handles_cpu == 0: + # output event as a top-level memory event + fe = FunctionEvent( + id=0, + name=MEMORY_EVENT_NAME, + trace_name=None, + thread=0, + start_us=0, + end_us=0, + stack=[], + cpu_memory_usage=record.cpu_memory_usage(), + cuda_memory_usage=record.cuda_memory_usage(), + is_legacy=True, + ) + functions.append(fe) + prev_record = record + + # Sort functions by start time then by end time ascending. + # This ensures that--in the case of nested events which + # have the same start time (which may happen due to the + # granularity of the given clock tick)--we always show + # the outermost nested call first. This adds stability + # in how FunctionEvents appear + functions.sort(key=lambda evt: [evt.time_range.start, -evt.time_range.end]) + return functions diff --git a/venv/lib/python3.10/site-packages/torch/autograd/profiler_util.py b/venv/lib/python3.10/site-packages/torch/autograd/profiler_util.py new file mode 100644 index 0000000000000000000000000000000000000000..0d7388b1316ce99b36462cfb7e29bd9da1c3ad65 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/autograd/profiler_util.py @@ -0,0 +1,1178 @@ +import bisect +import itertools +import math + +from collections import defaultdict, namedtuple +from operator import attrgetter + +from typing import Any, Dict, List, Optional, Tuple + +import torch +from torch.autograd import DeviceType + +__all__ = [ + "EventList", + "FormattedTimesMixin", + "Interval", + "Kernel", + "FunctionEvent", + "FunctionEventAvg", + "StringTable", + "MemRecordsAcc", +] + + +class EventList(list): + """A list of Events (for pretty printing).""" + + def __init__(self, *args, **kwargs): + use_cuda = kwargs.pop("use_cuda", True) + use_device = kwargs.pop("use_device", None) + profile_memory = kwargs.pop("profile_memory", False) + with_flops = kwargs.pop("with_flops", False) + super().__init__(*args, **kwargs) + self._use_cuda = use_cuda + self._use_device = use_device + self._profile_memory = profile_memory + self._tree_built = False + self._with_flops = with_flops + + def _build_tree(self): + self._populate_cpu_children() + self._remove_dup_nodes() + self._set_backward_stacktraces() + self._tree_built = True + + def __str__(self): + return self.table() + + def _remove_dup_nodes(self): + while True: + to_delete = set() + for idx in range(len(self)): + if ( + self[idx].cpu_parent is not None + and self[idx].cpu_parent.name == self[idx].name + and len(self[idx].cpu_parent.cpu_children) == 1 + ): + self[idx].cpu_parent.cpu_children = self[idx].cpu_children + self[idx].cpu_parent.kernels = self[idx].kernels # lift kernels up + for ch in self[idx].cpu_children: + ch.cpu_parent = self[idx].cpu_parent + to_delete.add(idx) + if len(to_delete) == 0: + break + new_evts = [ev for ind, ev in enumerate(self) if ind not in to_delete] + self.clear() + self.extend(new_evts) + + def _populate_cpu_children(self): + """Populate child events into each underlying FunctionEvent object. + + One event is a child of another if [s1, e1) is inside [s2, e2). Where + s1 and e1 would be start and end of the child event's interval. And + s2 and e2 start and end of the parent event's interval + + Example: In event list [[0, 10], [1, 3], [3, 4]] would have make [0, 10] + be a parent of two other intervals. + + If for any reason two intervals intersect only partially, this function + will not record a parent child relationship between then. + """ + # Some events can be async (i.e. start and end on different threads), + # since it's generally undefined how to attribute children ranges to + # async ranges, we do not use them when calculating nested ranges and stats + sync_events = [ + evt + for evt in self + if not evt.is_async and evt.device_type == DeviceType.CPU + ] + events = sorted( + sync_events, + key=attrgetter("thread"), + ) + # Group by both thread and node_id, so that events that happen to have + # the same thread_id but are from different nodes aren't incorrectly + # grouped together. + threads = itertools.groupby( + events, key=lambda event: (event.thread, event.node_id) + ) + + # For each thread we keep a stack of current nested parents. + # We maintain the invariant that each interval is a subset of all other + # intervals lower in the stack. + # + # First we sort the intervals by their start time. Then we iterate over them. + # Every time we see a new interval we remove several parents from + # the top until we restore the invariant. Then parent child relationship + # if recorded if the stack is not empty. + # Finally we add new interval to the list + # + # Algorithm has O(N * log(N)) complexity where N is number of + # intervals + for thread_id, thread_events in threads: + thread_events_ = sorted( + thread_events, + key=lambda event: [event.time_range.start, -event.time_range.end], + ) + current_events: List[FunctionEvent] = [] + cur_end = 0 + for event in thread_events_: + while len(current_events) > 0: + parent = current_events[-1] + if ( + event.time_range.start >= parent.time_range.end + or event.time_range.end > parent.time_range.end + ): + # this can't be a parent + current_events.pop() + else: + parent.append_cpu_child(event) + assert ( + event.cpu_parent is None + ), f"There is already a CPU parent event for {event.key}" + event.set_cpu_parent(parent) + break + + current_events.append(event) + + def _set_backward_stacktraces(self): + def bw_parent(evt): + if evt is None: + return None + elif evt.scope == 1: # BACKWARD_FUNCTION + return evt + else: + return bw_parent(evt.cpu_parent) + + fwd_stacks = {} + for evt in self: + if bw_parent(evt) is None and evt.stack is not None: + t = (evt.sequence_nr, evt.thread) + if t not in fwd_stacks: + fwd_stacks[t] = evt.stack + + for evt in self: + p = bw_parent(evt) + if p is not None: + assert p.fwd_thread is not None + t = (p.sequence_nr, p.fwd_thread) + if t in fwd_stacks: + evt.stack = fwd_stacks[t] + else: + evt.stack = [] + + @property + def self_cpu_time_total(self): + return sum([event.self_cpu_time_total for event in self]) + + def table( + self, + sort_by=None, + row_limit=100, + max_src_column_width=75, + max_name_column_width=55, + max_shapes_column_width=80, + header=None, + top_level_events_only=False, + ): + """Print an EventList as a nicely formatted table. + + Args: + sort_by (str, optional): Attribute used to sort entries. By default + they are printed in the same order as they were registered. + Valid keys include: ``cpu_time``, ``cuda_time``, ``cpu_time_total``, + ``cuda_time_total``, ``cpu_memory_usage``, ``cuda_memory_usage``, + ``self_cpu_memory_usage``, ``self_cuda_memory_usage``, ``count``. + top_level_events_only(bool, optional): Boolean flag to determine the + selection of events to display. If true, the profiler will only + display events at top level like top-level invocation of python + `lstm`, python `add` or other functions, nested events like low-level + cpu/cuda ops events are omitted for profiler result readability. + + Returns: + A string containing the table. + """ + return _build_table( + self, + sort_by=sort_by, + row_limit=row_limit, + max_src_column_width=max_src_column_width, + max_name_column_width=max_name_column_width, + max_shapes_column_width=max_shapes_column_width, + header=header, + profile_memory=self._profile_memory, + with_flops=self._with_flops, + top_level_events_only=top_level_events_only, + ) + + def export_chrome_trace(self, path): + """Export an EventList as a Chrome tracing tools file. + + The checkpoint can be later loaded and inspected under ``chrome://tracing`` URL. + + Args: + path (str): Path where the trace will be written. + """ + import os + + device_name = "cuda" if not self._use_device else self._use_device + with open(path, "w") as f: + chrome_events = [] + next_id = 0 + # Use file IO over using json.dump since JSON dumping is very slow and + # this technique is proven to give a 4x speedup. + f.write("[") + for evt in self: + if evt.trace_name is None: + continue + f.write( + '{{"name": "{}", ' + '"ph": "X", ' + '"ts": {}, ' + '"dur": {}, ' + '"tid": {}, ' + '"pid": "CPU functions", ' + '"args": {{}}}}, '.format( + evt.trace_name, + evt.time_range.start, + evt.time_range.elapsed_us(), + evt.thread + if not evt.is_remote + else f'" node_id:{evt.node_id}, thread_id:{evt.thread} "', + ) + ) + for k in evt.kernels: + # 's' and 'f' draw Flow arrows from + # the CPU launch to the GPU kernel + f.write( + f'{{"name": "{evt.trace_name}", ' + '"ph": "s", ' + f'"ts": {evt.time_range.start}, ' + f'"tid": {evt.thread}, ' + '"pid": "CPU functions", ' + f'"id": {next_id}, ' + f'"cat": "cpu_to_{device_name}", ' + '"args": {}}, ' + ) + # Note: use torch.profiler to get device kernel trace + next_id += 1 + if len(self) > 0: + # remove trailing whitespace and comma + f.seek(f.tell() - 2, os.SEEK_SET) + f.truncate() + f.write("]") + + def supported_export_stacks_metrics(self): + return [ + "self_cpu_time_total", + "self_cuda_time_total", + "self_privateuse1_time_total", + ] + + def export_stacks(self, path: str, metric: str): + if metric not in self.supported_export_stacks_metrics(): + raise ValueError( + "metric should be one of: " + + str(self.supported_export_stacks_metrics()) + ) + translate_table = str.maketrans(" ;\t\n", "____") + with open(path, "w") as f: + for evt in self: + if evt.stack and len(evt.stack) > 0: + metric_value = getattr(evt, metric) + if int(metric_value) > 0: + stack_str = "" + for entry in reversed(evt.stack): + stack_str += entry.translate(translate_table) + stack_str += ";" + stack_str = stack_str[:-1] + " " + str(int(metric_value)) + f.write(stack_str + "\n") + + def key_averages(self, group_by_input_shapes=False, group_by_stack_n=0): + """Averages all function events over their keys. + + Args: + group_by_input_shapes: group entries by + (event name, input shapes) rather than just event name. + This is useful to see which input shapes contribute to the runtime + the most and may help with size-specific optimizations or + choosing the best candidates for quantization (aka fitting a roof line) + + group_by_stack_n: group by top n stack trace entries + + Returns: + An EventList containing FunctionEventAvg objects. + """ + assert self._tree_built + stats: Dict[Tuple[str, ...], FunctionEventAvg] = defaultdict(FunctionEventAvg) + + def get_key(event, group_by_input_shapes, group_by_stack_n) -> Tuple[str, ...]: + key = [ + str(event.key), + str(event.node_id), + str(event.device_type), + str(event.is_legacy), + ] + if group_by_input_shapes: + key.append(str(event.input_shapes)) + if group_by_stack_n > 0: + key += event.stack[:group_by_stack_n] + return tuple(key) + + for evt in self: + stats[get_key(evt, group_by_input_shapes, group_by_stack_n)].add(evt) + + avg_list = EventList( + stats.values(), + use_cuda=self._use_cuda, + use_device=self._use_device, + profile_memory=self._profile_memory, + with_flops=self._with_flops, + ) + for evt in avg_list: + evt.stack = evt.stack[:group_by_stack_n] + if not group_by_input_shapes: + evt.input_shapes = "" + return avg_list + + def total_average(self): + """Averages all events. + + Returns: + A FunctionEventAvg object. + """ + total_stat = FunctionEventAvg() + for evt in self: + total_stat += evt + total_stat.key = None + total_stat.key = "Total" + return total_stat + + +def _format_time(time_us): + """Define how to format time in FunctionEvent.""" + US_IN_SECOND = 1000.0 * 1000.0 + US_IN_MS = 1000.0 + if time_us >= US_IN_SECOND: + return f"{time_us / US_IN_SECOND:.3f}s" + if time_us >= US_IN_MS: + return f"{time_us / US_IN_MS:.3f}ms" + return f"{time_us:.3f}us" + + +def _format_time_share(time_us, total_time_us): + """Define how to format time in FunctionEvent.""" + if total_time_us == 0: + assert time_us == 0, f"Expected time_us == 0 but got {time_us}" + return "NaN" + return f"{time_us * 100.0 / total_time_us:.2f}%" + + +def _format_memory(nbytes): + """Return a formatted memory size string.""" + KB = 1024 + MB = 1024 * KB + GB = 1024 * MB + if abs(nbytes) >= GB: + return f"{nbytes * 1.0 / GB:.2f} Gb" + elif abs(nbytes) >= MB: + return f"{nbytes * 1.0 / MB:.2f} Mb" + elif abs(nbytes) >= KB: + return f"{nbytes * 1.0 / KB:.2f} Kb" + else: + return str(nbytes) + " b" + + +def _attr_formatter(name): + return property(lambda self: _format_time(getattr(self, name))) + + +class FormattedTimesMixin: + """Helpers for FunctionEvent and FunctionEventAvg. + + The subclass should define `*_time_total` and `count` attributes. + """ + + cpu_time_str = _attr_formatter("cpu_time") + cuda_time_str = _attr_formatter("cuda_time") + privateuse1_time_str = _attr_formatter("privateuse1_time") + cpu_time_total_str = _attr_formatter("cpu_time_total") + cuda_time_total_str = _attr_formatter("cuda_time_total") + privateuse1_time_total_str = _attr_formatter("privateuse1_time_total") + self_cpu_time_total_str = _attr_formatter("self_cpu_time_total") + self_cuda_time_total_str = _attr_formatter("self_cuda_time_total") + self_privateuse1_time_total_str = _attr_formatter("self_privateuse1_time_total") + + @property + def cpu_time(self): + return 0.0 if self.count == 0 else 1.0 * self.cpu_time_total / self.count # type: ignore[attr-defined] + + @property + def cuda_time(self): + return 0.0 if self.count == 0 else 1.0 * self.cuda_time_total / self.count # type: ignore[attr-defined] + + @property + def privateuse1_time(self): + return 0.0 if self.count == 0 else 1.0 * self.privateuse1_time_total / self.count # type: ignore[attr-defined] + + +class Interval: + def __init__(self, start, end): + self.start = start + self.end = end + + def elapsed_us(self): + r""" + Returns the length of the interval + """ + return self.end - self.start + + +Kernel = namedtuple("Kernel", ["name", "device", "duration"]) + + +class FunctionEvent(FormattedTimesMixin): + """Profiling information about a single function.""" + + def __init__( + self, + id, + name, + thread, + start_us, + end_us, + fwd_thread=None, + input_shapes=None, + stack=None, + scope=0, + use_device=None, + cpu_memory_usage=0, + cuda_memory_usage=0, + privateuse1_memory_usage=0, + is_async=False, + is_remote=False, + sequence_nr=-1, + node_id=-1, + device_type=DeviceType.CPU, + device_index=0, + is_legacy=False, + flops=None, + trace_name=None, + concrete_inputs=None, + ): + self.id: int = id + self.node_id: int = node_id + self.name: str = name + self.trace_name: str = trace_name + self.time_range: Interval = Interval(start_us, end_us) + self.thread: int = thread + self.fwd_thread: Optional[int] = fwd_thread + self.kernels: List[Kernel] = [] + self.count: int = 1 + self.cpu_children: List[FunctionEvent] = [] + self.cpu_parent: Optional[FunctionEvent] = None + self.input_shapes: Tuple[int, ...] = input_shapes + self.concrete_inputs: List[Any] = concrete_inputs + self.stack: List = stack + self.scope: int = scope + self.use_device: Optional[str] = use_device + self.cpu_memory_usage: int = cpu_memory_usage + self.cuda_memory_usage: int = cuda_memory_usage + self.privateuse1_memory_usage: int = privateuse1_memory_usage + self.is_async: bool = is_async + self.is_remote: bool = is_remote + self.sequence_nr: int = sequence_nr + self.device_type: DeviceType = device_type + self.device_index: int = device_index + self.is_legacy: bool = is_legacy + self.flops: Optional[int] = flops + + def append_kernel(self, name, device, duration): + assert self.device_type == DeviceType.CPU + self.kernels.append(Kernel(name, device, duration)) + + def append_cpu_child(self, child): + """Append a CPU child of type FunctionEvent. + + One is supposed to append only direct children to the event to have + correct self cpu time being reported. + """ + assert self.device_type == DeviceType.CPU + assert isinstance(child, FunctionEvent) + assert child.device_type == DeviceType.CPU + self.cpu_children.append(child) + + def set_cpu_parent(self, parent): + """Set the immediate CPU parent of type FunctionEvent. + + One profiling FunctionEvent should have only one CPU parent such that + the child's range interval is completely inside the parent's. We use + this connection to determine the event is from top-level op or not. + """ + assert self.device_type == DeviceType.CPU + assert isinstance(parent, FunctionEvent) + assert parent.device_type == DeviceType.CPU + self.cpu_parent = parent + + # Note: async events don't have children, are not used when computing 'self' + # metrics of other events, have only total cpu time + @property + def self_cpu_memory_usage(self): + if self.is_async or self.device_type != DeviceType.CPU: + return 0 + return self.cpu_memory_usage - sum( + [child.cpu_memory_usage for child in self.cpu_children] + ) + + @property + def self_cuda_memory_usage(self): + if self.is_async or self.device_type != DeviceType.CPU: + return 0 + return self.cuda_memory_usage - sum( + [child.cuda_memory_usage for child in self.cpu_children] + ) + + @property + def self_privateuse1_memory_usage(self): + if self.is_async or self.device_type != DeviceType.CPU: + return 0 + return self.privateuse1_memory_usage - sum( + [child.privateuse1_memory_usage for child in self.cpu_children] + ) + + @property + def self_cpu_time_total(self): + if self.is_async or self.device_type != DeviceType.CPU: + return 0 + return self.cpu_time_total - sum( + [child.cpu_time_total for child in self.cpu_children] + ) + + @property + def cuda_time_total(self): + if self.is_async or self.use_device: + return 0 + if self.device_type == DeviceType.CPU: + if not self.is_legacy: + # account for the kernels in the children ops + return sum(kinfo.duration for kinfo in self.kernels) + sum( + ch.cuda_time_total for ch in self.cpu_children + ) + else: + # each legacy cpu events has a single (fake) kernel + return sum(kinfo.duration for kinfo in self.kernels) + else: + assert self.device_type == DeviceType.CUDA + return self.time_range.elapsed_us() + + @property + def self_cuda_time_total(self): + if self.is_async or self.use_device: + return 0 + if self.device_type == DeviceType.CPU: + return self.cuda_time_total - sum( + [child.cuda_time_total for child in self.cpu_children] + ) + else: + assert self.device_type == DeviceType.CUDA + return self.cuda_time_total + + @property + def cpu_time_total(self): + if self.device_type == DeviceType.CPU: + return self.time_range.elapsed_us() + else: + return 0 + + @property + def self_privateuse1_time_total(self): + if self.is_async or not self.use_device: + return 0 + if self.device_type == DeviceType.CPU: + return self.privateuse1_time_total - sum( + [child.privateuse1_time_total for child in self.cpu_children] + ) + else: + assert self.device_type == DeviceType.CUDA + return self.privateuse1_time_total + + @property + def privateuse1_time_total(self): + if self.is_async or not self.use_device: + return 0 + if self.device_type == DeviceType.CPU: + if not self.is_legacy: + # account for the kernels in the children ops + return sum(kinfo.duration for kinfo in self.kernels) + sum( + ch.privateuse1_time_total for ch in self.cpu_children + ) + else: + # each legacy cpu events has a single (fake) kernel + return sum(kinfo.duration for kinfo in self.kernels) + else: + assert self.device_type == DeviceType.PrivateUse1 + return self.time_range.elapsed_us() + + @property + def key(self): + return self.name + + def __repr__(self): + device_name = "cuda" if not self.use_device else self.use_device + device_time = ( + self.cuda_time_str if not self.use_device else self.privateuse1_time_str + ) + device_memory_usage = ( + self.cuda_memory_usage + if not self.use_device + else self.privateuse1_memory_usage + ) + return ( + "".format( + self.id, + self.name, + self.device_type, + self.node_id, + self.cpu_time_str, + self.time_range.start, + self.time_range.end, + str([child.id for child in self.cpu_children]), + device_name, + device_time, + self.name, + self.thread, + str(self.input_shapes), + self.cpu_memory_usage, + device_name, + device_memory_usage, + self.is_async, + self.is_remote, + self.sequence_nr, + self.is_legacy, + ) + ) + + +class FunctionEventAvg(FormattedTimesMixin): + """Used to average stats over multiple FunctionEvent objects.""" + + def __init__(self): + self.key: Optional[str] = None + self.count: int = 0 + self.node_id: int = 0 + self.is_async: bool = False + self.is_remote: bool = False + self.use_device: Optional[str] = None + self.cpu_time_total: int = 0 + self.cuda_time_total: int = 0 + self.privateuse1_time_total: int = 0 + self.self_cpu_time_total: int = 0 + self.self_cuda_time_total: int = 0 + self.self_privateuse1_time_total: int = 0 + self.input_shapes: Optional[List[List[int]]] = None + self.stack: Optional[List] = None + self.scope: Optional[int] = None + self.cpu_memory_usage: int = 0 + self.cuda_memory_usage: int = 0 + self.privateuse1_memory_usage: int = 0 + self.self_cpu_memory_usage: int = 0 + self.self_cuda_memory_usage: int = 0 + self.self_privateuse1_memory_usage: int = 0 + self.cpu_children: Optional[List[FunctionEvent]] = None + self.cpu_parent: Optional[FunctionEvent] = None + self.device_type: DeviceType = DeviceType.CPU + self.is_legacy: bool = False + self.flops: int = 0 + + def add(self, other): + if self.key is None: + # First function being recorded as part of FunctionEventAvg, propagate + # fields. + self.key = other.key + self.node_id = other.node_id + self.is_async = other.is_async + self.is_remote = other.is_remote + self.cpu_parent = other.cpu_parent + self.cpu_children = other.cpu_children + + self.input_shapes = other.input_shapes + self.stack = other.stack + self.scope = other.scope + self.device_type = other.device_type + self.is_legacy = other.is_legacy + self.use_device = other.use_device + + assert isinstance(other, (FunctionEvent, FunctionEventAvg)) + assert other.key == self.key + self.cpu_time_total += other.cpu_time_total + self.cuda_time_total += other.cuda_time_total + self.privateuse1_time_total += other.privateuse1_time_total + self.self_cpu_time_total += other.self_cpu_time_total + self.self_cuda_time_total += other.self_cuda_time_total + self.self_privateuse1_time_total += other.self_privateuse1_time_total + self.cpu_memory_usage += other.cpu_memory_usage + self.cuda_memory_usage += other.cuda_memory_usage + self.privateuse1_memory_usage += other.privateuse1_memory_usage + self.self_cpu_memory_usage += other.self_cpu_memory_usage + self.self_cuda_memory_usage += other.self_cuda_memory_usage + self.self_privateuse1_memory_usage += other.self_privateuse1_memory_usage + self.count += other.count + if self.flops is None: + self.flops = other.flops + elif other.flops is not None: + self.flops += other.flops + return self + + def __iadd__(self, other): + return self.add(other) + + def __repr__(self): + device_name = "cuda" if not self.use_device else self.use_device + self_device_time = ( + self.self_cuda_time_total_str + if not self.use_device + else self.self_privateuse1_time_total_str + ) + device_time = ( + self.cuda_time_str if not self.use_device else self.privateuse1_time_str + ) + device_memory = ( + self.cuda_memory_usage + if not self.use_device + else self.privateuse1_memory_usage + ) + return ( + "".format( + self.key, + self.self_cpu_time_total_str, + self.cpu_time_str, + device_name, + self_device_time, + device_name, + device_time, + str(self.input_shapes), + self.cpu_memory_usage, + device_name, + device_memory, + ) + ) + + +class StringTable(defaultdict): + def __missing__(self, key): + # manage cases like 't' (demangled to 'unsigned short') separately, + # for now simply check the length to avoid unexpected results for + # the short sequences + self[key] = torch._C._demangle(key) if len(key) > 1 else key + return self[key] + + +class MemRecordsAcc: + """Acceleration structure for accessing mem_records in interval.""" + + def __init__(self, mem_records): + self._mem_records = mem_records + self._start_uses: List[int] = [] + self._indices: List[int] = [] + if len(mem_records) > 0: + tmp = sorted([(r[0].start_us(), i) for i, r in enumerate(mem_records)]) + self._start_uses, self._indices = zip(*tmp) # type: ignore[assignment] + + def in_interval(self, start_us, end_us): + r""" + Return all records in the given interval + """ + start_idx = bisect.bisect_left(self._start_uses, start_us) + end_idx = bisect.bisect_right(self._start_uses, end_us) + for i in range(start_idx, end_idx): + yield self._mem_records[self._indices[i]] + + +def _filter_stack_entry(entry): + filtered_entries = [ + ("autograd/__init__", "_make_grads"), + ("autograd/__init__", "backward"), + ("torch/tensor", "backward"), + ("_internal/common_utils", "prof_callable"), + ("_internal/common_utils", "prof_func_call"), + ("_internal/common_utils", "prof_meth_call"), + ] + return all(not (f[0] in entry and f[1] in entry) for f in filtered_entries) + + +MEMORY_EVENT_NAME = "[memory]" +OUT_OF_MEMORY_EVENT_NAME = "[OutOfMemory]" + + +def _filter_name(name): + # ignoring the following utility ops + filtered_out_names = [ + MEMORY_EVENT_NAME, # used only for the top-level memory events + OUT_OF_MEMORY_EVENT_NAME, + "profiler::_record_function_enter", + "profiler::_record_function_enter_new", + "profiler::_record_function_exit", + "aten::is_leaf", + "aten::output_nr", + "aten::_version", + ] + return name in filtered_out_names + + +# Demangles and optionally rewrites the provided event name, +# with_wildcard - whether to replace certain numbered event names +# with a wildcard name to aggregate them together in the profiler table +# output +def _rewrite_name(name, with_wildcard=False): + string_table = StringTable() + name = string_table[name] + if with_wildcard: + if name.startswith("ProfilerStep#"): + name = "ProfilerStep*" + return name + + +def _build_table( + events, + sort_by=None, + header=None, + row_limit=100, + max_src_column_width=75, + max_name_column_width=55, + max_shapes_column_width=80, + with_flops=False, + profile_memory=False, + top_level_events_only=False, +): + """Print a summary of events (which can be a list of FunctionEvent or FunctionEventAvg).""" + if len(events) == 0: + return "" + + has_cuda_time = any(event.self_cuda_time_total > 0 for event in events) + has_cuda_mem = any(event.self_cuda_memory_usage > 0 for event in events) + has_privateuse1_time = any( + event.self_privateuse1_time_total > 0 for event in events + ) + has_privateuse1_mem = any( + event.self_privateuse1_memory_usage > 0 for event in events + ) + use_device = events[0].use_device + if not use_device and (has_privateuse1_mem or has_privateuse1_time): + raise RuntimeError( + "use_device is None, but there is private device performance data." + ) + + has_input_shapes = any( + (event.input_shapes is not None and len(event.input_shapes) > 0) + for event in events + ) + + if sort_by is not None: + events = EventList( + sorted(events, key=lambda evt: getattr(evt, sort_by), reverse=True), + use_cuda=has_cuda_time, + use_device=use_device, + profile_memory=profile_memory, + with_flops=with_flops, + ) + + name_column_width = max([len(evt.key) for evt in events]) + 4 + if max_name_column_width is not None: + name_column_width = min(name_column_width, max_name_column_width) + + shapes_column_width = max([len(str(evt.input_shapes)) for evt in events]) + 4 + if max_shapes_column_width is not None: + shapes_column_width = min(shapes_column_width, max_shapes_column_width) + + DEFAULT_COLUMN_WIDTH = 12 + flops_column_width = DEFAULT_COLUMN_WIDTH + + src_column_width = None + stacks = [] + for evt in events: + if evt.stack is not None and len(evt.stack) > 0: + stacks.append(evt.stack) + has_stack = len(stacks) > 0 + if has_stack: + src_column_width = ( + max([max([len(entry) for entry in stack]) for stack in stacks]) + 4 + ) + if max_src_column_width is not None: + src_column_width = min(src_column_width, max_src_column_width) + + headers = [ + "Name", + "Self CPU %", + "Self CPU", + "CPU total %", + "CPU total", + "CPU time avg", + ] + if has_cuda_time: + headers.extend( + [ + "Self CUDA", + "Self CUDA %", + "CUDA total", + "CUDA time avg", + ] + ) + if has_privateuse1_time: + privateuse1 = use_device.upper() + headers.extend( + [ + f"Self {privateuse1}", + f"Self {privateuse1} %", + f"{privateuse1} total", + f"{privateuse1} time avg", + ] + ) + if profile_memory: + headers.extend( + [ + "CPU Mem", + "Self CPU Mem", + ] + ) + if has_cuda_mem: + headers.extend( + [ + "CUDA Mem", + "Self CUDA Mem", + ] + ) + if has_privateuse1_mem: + privateuse1 = use_device.upper() + headers.extend( + [ + f"{privateuse1} Mem", + f"Self {privateuse1} Mem", + ] + ) + headers.append("# of Calls") + # Only append Node ID if any event has a valid (>= 0) Node ID + append_node_id = any(evt.node_id != -1 for evt in events) + if append_node_id: + headers.append("Node ID") + + # Have to use a list because nonlocal is Py3 only... + SPACING_SIZE = 2 + row_format_lst = [""] + header_sep_lst = [""] + line_length_lst = [-SPACING_SIZE] + MAX_STACK_ENTRY = 5 + + def add_column(padding, text_dir=">"): + row_format_lst[0] += ( + "{: " + text_dir + str(padding) + "}" + (" " * SPACING_SIZE) + ) + header_sep_lst[0] += "-" * padding + (" " * SPACING_SIZE) + line_length_lst[0] += padding + SPACING_SIZE + + def auto_scale_flops(flops): + flop_headers = [ + "FLOPs", + "KFLOPs", + "MFLOPs", + "GFLOPs", + "TFLOPs", + "PFLOPs", + ] + assert flops > 0 + log_flops = max(0, min(math.log10(flops) / 3, float(len(flop_headers) - 1))) + assert log_flops >= 0 and log_flops < len(flop_headers) + return (pow(10, (math.floor(log_flops) * -3.0)), flop_headers[int(log_flops)]) + + add_column(name_column_width) + for _ in headers[1:]: + add_column(DEFAULT_COLUMN_WIDTH) + + if has_input_shapes: + headers.append("Input Shapes") + add_column(shapes_column_width) + + if has_stack: + headers.append("Source Location") + add_column(src_column_width, text_dir="<") + + if with_flops: + # Auto-scaling of flops header + raw_flops = [] + for evt in events: + if evt.flops > 0: + raw_flops.append(evt.flops) + if len(raw_flops) != 0: + (flops_scale, flops_header) = auto_scale_flops(min(raw_flops)) + headers.append(f"Total {flops_header}") + add_column(flops_column_width) + else: + with_flops = False # can't find any valid flops + + row_format = row_format_lst[0] + header_sep = header_sep_lst[0] + line_length = line_length_lst[0] + add_column = None # type: ignore[assignment] + + # Have to use a list because nonlocal is Py3 only... + result = [] + + def append(s): + result.append(s) + result.append("\n") # Yes, newline after the end as well + + sum_self_cpu_time_total = sum([event.self_cpu_time_total for event in events]) + sum_self_cuda_time_total = 0 + sum_self_privateuse1_time_total = 0 + for evt in events: + if evt.device_type == DeviceType.CPU: + # in legacy profiler, kernel info is stored in cpu events + if evt.is_legacy: + if not use_device: + sum_self_cuda_time_total += evt.self_cuda_time_total + else: + sum_self_privateuse1_time_total += evt.self_privateuse1_time_total + elif evt.device_type == DeviceType.CUDA: + # in kineto profiler, there're events with the correct device type (e.g. CUDA) + sum_self_cuda_time_total += evt.self_cuda_time_total + elif evt.device_type == DeviceType.PrivateUse1: + sum_self_privateuse1_time_total += evt.self_privateuse1_time_total + + # Actual printing + if header is not None: + append("=" * line_length) + append(header) + if top_level_events_only: + append("=" * line_length) + append("This report only display top-level ops statistics") + append(header_sep) + append(row_format.format(*headers)) + + append(header_sep) + + def trim_path(path, src_column_width): + if len(path) > src_column_width: + offset = len(path) - src_column_width + path = path[offset:] + if len(path) > 3: + path = "..." + path[3:] + return path + + event_limit = 0 + for evt in events: + if event_limit == row_limit: + break + if top_level_events_only and evt.cpu_parent is not None: + continue + else: + event_limit += 1 + name = evt.key + if max_name_column_width is not None and len(name) >= max_name_column_width - 3: + name = name[: (max_name_column_width - 3)] + "..." + row_values = [ + name, + # Self CPU total %, 0 for async events. + _format_time_share(evt.self_cpu_time_total, sum_self_cpu_time_total), + evt.self_cpu_time_total_str, # Self CPU total + # CPU total %, 0 for async events. + _format_time_share(evt.cpu_time_total, sum_self_cpu_time_total) + if not evt.is_async + else 0, + evt.cpu_time_total_str, # CPU total + evt.cpu_time_str, # CPU time avg + ] + if has_cuda_time: + row_values.extend( + [ + evt.self_cuda_time_total_str, + # CUDA time total % + _format_time_share( + evt.self_cuda_time_total, sum_self_cuda_time_total + ), + evt.cuda_time_total_str, + evt.cuda_time_str, # Cuda time avg + ] + ) + if has_privateuse1_time: + row_values.extend( + [ + evt.self_privateuse1_time_total_str, + # PrivateUse1 time total % + _format_time_share( + evt.self_privateuse1_time_total, sum_self_privateuse1_time_total + ), + evt.privateuse1_time_total_str, + evt.privateuse1_time_str, # PrivateUse1 time avg + ] + ) + if profile_memory: + row_values.extend( + [ + # CPU Mem Total + _format_memory(evt.cpu_memory_usage), + # Self CPU Mem Total + _format_memory(evt.self_cpu_memory_usage), + ] + ) + if has_cuda_mem: + row_values.extend( + [ + # CUDA Mem Total + _format_memory(evt.cuda_memory_usage), + # Self CUDA Mem Total + _format_memory(evt.self_cuda_memory_usage), + ] + ) + if has_privateuse1_mem: + row_values.extend( + [ + # PrivateUse1 Mem Total + _format_memory(evt.privateuse1_memory_usage), + # Self PrivateUse1 Mem Total + _format_memory(evt.self_privateuse1_memory_usage), + ] + ) + row_values.append( + evt.count, # Number of calls + ) + + if append_node_id: + row_values.append(evt.node_id) + if has_input_shapes: + row_values.append(str(evt.input_shapes)[:shapes_column_width]) + if with_flops: + if evt.flops <= 0: + row_values.append("--") + else: + row_values.append(f"{evt.flops * flops_scale:8.3f}") # type: ignore[possibly-undefined] + if has_stack: + src_field = "" + if len(evt.stack) > 0: + src_field = trim_path(evt.stack[0], src_column_width) + row_values.append(src_field) + append(row_format.format(*row_values)) + + if has_stack: + empty_headers = [""] * (len(headers) - 1) + for entry in evt.stack[1:MAX_STACK_ENTRY]: + append( + row_format.format( + *(empty_headers + [trim_path(entry, src_column_width)]) + ) + ) + empty_headers.append("") + append(row_format.format(*empty_headers)) + + append(header_sep) + append(f"Self CPU time total: {_format_time(sum_self_cpu_time_total)}") + if has_cuda_time: + append(f"Self CUDA time total: {_format_time(sum_self_cuda_time_total)}") + if has_privateuse1_time: + append( + f"Self {use_device.upper()} time total: {_format_time(sum_self_privateuse1_time_total)}" + ) + return "".join(result) diff --git a/venv/lib/python3.10/site-packages/torch/autograd/variable.py b/venv/lib/python3.10/site-packages/torch/autograd/variable.py new file mode 100644 index 0000000000000000000000000000000000000000..ed841d4da7d42e53b7143a1bd7fc3a3af979f630 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/autograd/variable.py @@ -0,0 +1,14 @@ +import torch +from torch._C import _ImperativeEngine as ImperativeEngine + + +__all__ = ["VariableMeta", "Variable"] + + +class VariableMeta(type): + def __instancecheck__(cls, other): + return isinstance(other, torch.Tensor) + + +class Variable(torch._C._LegacyVariableBase, metaclass=VariableMeta): # type: ignore[misc] + _execution_engine = ImperativeEngine() diff --git a/venv/lib/python3.10/site-packages/torch/distributions/__init__.py b/venv/lib/python3.10/site-packages/torch/distributions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..46be6489de3ac009cbcca3d9fa0cf76f45879d2f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/__init__.py @@ -0,0 +1,171 @@ +r""" +The ``distributions`` package contains parameterizable probability distributions +and sampling functions. This allows the construction of stochastic computation +graphs and stochastic gradient estimators for optimization. This package +generally follows the design of the `TensorFlow Distributions`_ package. + +.. _`TensorFlow Distributions`: + https://arxiv.org/abs/1711.10604 + +It is not possible to directly backpropagate through random samples. However, +there are two main methods for creating surrogate functions that can be +backpropagated through. These are the score function estimator/likelihood ratio +estimator/REINFORCE and the pathwise derivative estimator. REINFORCE is commonly +seen as the basis for policy gradient methods in reinforcement learning, and the +pathwise derivative estimator is commonly seen in the reparameterization trick +in variational autoencoders. Whilst the score function only requires the value +of samples :math:`f(x)`, the pathwise derivative requires the derivative +:math:`f'(x)`. The next sections discuss these two in a reinforcement learning +example. For more details see +`Gradient Estimation Using Stochastic Computation Graphs`_ . + +.. _`Gradient Estimation Using Stochastic Computation Graphs`: + https://arxiv.org/abs/1506.05254 + +Score function +^^^^^^^^^^^^^^ + +When the probability density function is differentiable with respect to its +parameters, we only need :meth:`~torch.distributions.Distribution.sample` and +:meth:`~torch.distributions.Distribution.log_prob` to implement REINFORCE: + +.. math:: + + \Delta\theta = \alpha r \frac{\partial\log p(a|\pi^\theta(s))}{\partial\theta} + +where :math:`\theta` are the parameters, :math:`\alpha` is the learning rate, +:math:`r` is the reward and :math:`p(a|\pi^\theta(s))` is the probability of +taking action :math:`a` in state :math:`s` given policy :math:`\pi^\theta`. + +In practice we would sample an action from the output of a network, apply this +action in an environment, and then use ``log_prob`` to construct an equivalent +loss function. Note that we use a negative because optimizers use gradient +descent, whilst the rule above assumes gradient ascent. With a categorical +policy, the code for implementing REINFORCE would be as follows:: + + probs = policy_network(state) + # Note that this is equivalent to what used to be called multinomial + m = Categorical(probs) + action = m.sample() + next_state, reward = env.step(action) + loss = -m.log_prob(action) * reward + loss.backward() + +Pathwise derivative +^^^^^^^^^^^^^^^^^^^ + +The other way to implement these stochastic/policy gradients would be to use the +reparameterization trick from the +:meth:`~torch.distributions.Distribution.rsample` method, where the +parameterized random variable can be constructed via a parameterized +deterministic function of a parameter-free random variable. The reparameterized +sample therefore becomes differentiable. The code for implementing the pathwise +derivative would be as follows:: + + params = policy_network(state) + m = Normal(*params) + # Any distribution with .has_rsample == True could work based on the application + action = m.rsample() + next_state, reward = env.step(action) # Assuming that reward is differentiable + loss = -reward + loss.backward() +""" + +from .bernoulli import Bernoulli +from .beta import Beta +from .binomial import Binomial +from .categorical import Categorical +from .cauchy import Cauchy +from .chi2 import Chi2 +from .constraint_registry import biject_to, transform_to +from .continuous_bernoulli import ContinuousBernoulli +from .dirichlet import Dirichlet +from .distribution import Distribution +from .exp_family import ExponentialFamily +from .exponential import Exponential +from .fishersnedecor import FisherSnedecor +from .gamma import Gamma +from .geometric import Geometric +from .gumbel import Gumbel +from .half_cauchy import HalfCauchy +from .half_normal import HalfNormal +from .independent import Independent +from .inverse_gamma import InverseGamma +from .kl import _add_kl_info, kl_divergence, register_kl +from .kumaraswamy import Kumaraswamy +from .laplace import Laplace +from .lkj_cholesky import LKJCholesky +from .log_normal import LogNormal +from .logistic_normal import LogisticNormal +from .lowrank_multivariate_normal import LowRankMultivariateNormal +from .mixture_same_family import MixtureSameFamily +from .multinomial import Multinomial +from .multivariate_normal import MultivariateNormal +from .negative_binomial import NegativeBinomial +from .normal import Normal +from .one_hot_categorical import OneHotCategorical, OneHotCategoricalStraightThrough +from .pareto import Pareto +from .poisson import Poisson +from .relaxed_bernoulli import RelaxedBernoulli +from .relaxed_categorical import RelaxedOneHotCategorical +from .studentT import StudentT +from .transformed_distribution import TransformedDistribution +from .transforms import * # noqa: F403 +from . import transforms +from .uniform import Uniform +from .von_mises import VonMises +from .weibull import Weibull +from .wishart import Wishart + +_add_kl_info() +del _add_kl_info + +__all__ = [ + "Bernoulli", + "Beta", + "Binomial", + "Categorical", + "Cauchy", + "Chi2", + "ContinuousBernoulli", + "Dirichlet", + "Distribution", + "Exponential", + "ExponentialFamily", + "FisherSnedecor", + "Gamma", + "Geometric", + "Gumbel", + "HalfCauchy", + "HalfNormal", + "Independent", + "InverseGamma", + "Kumaraswamy", + "LKJCholesky", + "Laplace", + "LogNormal", + "LogisticNormal", + "LowRankMultivariateNormal", + "MixtureSameFamily", + "Multinomial", + "MultivariateNormal", + "NegativeBinomial", + "Normal", + "OneHotCategorical", + "OneHotCategoricalStraightThrough", + "Pareto", + "RelaxedBernoulli", + "RelaxedOneHotCategorical", + "StudentT", + "Poisson", + "Uniform", + "VonMises", + "Weibull", + "Wishart", + "TransformedDistribution", + "biject_to", + "kl_divergence", + "register_kl", + "transform_to", +] +__all__.extend(transforms.__all__) diff --git a/venv/lib/python3.10/site-packages/torch/distributions/bernoulli.py b/venv/lib/python3.10/site-packages/torch/distributions/bernoulli.py new file mode 100644 index 0000000000000000000000000000000000000000..75c2882dbc15c017630dacde183a96c4d0f51225 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/bernoulli.py @@ -0,0 +1,130 @@ +from numbers import Number + +import torch +from torch import nan +from torch.distributions import constraints +from torch.distributions.exp_family import ExponentialFamily +from torch.distributions.utils import ( + broadcast_all, + lazy_property, + logits_to_probs, + probs_to_logits, +) +from torch.nn.functional import binary_cross_entropy_with_logits + +__all__ = ["Bernoulli"] + + +class Bernoulli(ExponentialFamily): + r""" + Creates a Bernoulli distribution parameterized by :attr:`probs` + or :attr:`logits` (but not both). + + Samples are binary (0 or 1). They take the value `1` with probability `p` + and `0` with probability `1 - p`. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Bernoulli(torch.tensor([0.3])) + >>> m.sample() # 30% chance 1; 70% chance 0 + tensor([ 0.]) + + Args: + probs (Number, Tensor): the probability of sampling `1` + logits (Number, Tensor): the log-odds of sampling `1` + """ + arg_constraints = {"probs": constraints.unit_interval, "logits": constraints.real} + support = constraints.boolean + has_enumerate_support = True + _mean_carrier_measure = 0 + + def __init__(self, probs=None, logits=None, validate_args=None): + if (probs is None) == (logits is None): + raise ValueError( + "Either `probs` or `logits` must be specified, but not both." + ) + if probs is not None: + is_scalar = isinstance(probs, Number) + (self.probs,) = broadcast_all(probs) + else: + is_scalar = isinstance(logits, Number) + (self.logits,) = broadcast_all(logits) + self._param = self.probs if probs is not None else self.logits + if is_scalar: + batch_shape = torch.Size() + else: + batch_shape = self._param.size() + super().__init__(batch_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Bernoulli, _instance) + batch_shape = torch.Size(batch_shape) + if "probs" in self.__dict__: + new.probs = self.probs.expand(batch_shape) + new._param = new.probs + if "logits" in self.__dict__: + new.logits = self.logits.expand(batch_shape) + new._param = new.logits + super(Bernoulli, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + def _new(self, *args, **kwargs): + return self._param.new(*args, **kwargs) + + @property + def mean(self): + return self.probs + + @property + def mode(self): + mode = (self.probs >= 0.5).to(self.probs) + mode[self.probs == 0.5] = nan + return mode + + @property + def variance(self): + return self.probs * (1 - self.probs) + + @lazy_property + def logits(self): + return probs_to_logits(self.probs, is_binary=True) + + @lazy_property + def probs(self): + return logits_to_probs(self.logits, is_binary=True) + + @property + def param_shape(self): + return self._param.size() + + def sample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + with torch.no_grad(): + return torch.bernoulli(self.probs.expand(shape)) + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + logits, value = broadcast_all(self.logits, value) + return -binary_cross_entropy_with_logits(logits, value, reduction="none") + + def entropy(self): + return binary_cross_entropy_with_logits( + self.logits, self.probs, reduction="none" + ) + + def enumerate_support(self, expand=True): + values = torch.arange(2, dtype=self._param.dtype, device=self._param.device) + values = values.view((-1,) + (1,) * len(self._batch_shape)) + if expand: + values = values.expand((-1,) + self._batch_shape) + return values + + @property + def _natural_params(self): + return (torch.logit(self.probs),) + + def _log_normalizer(self, x): + return torch.log1p(torch.exp(x)) diff --git a/venv/lib/python3.10/site-packages/torch/distributions/binomial.py b/venv/lib/python3.10/site-packages/torch/distributions/binomial.py new file mode 100644 index 0000000000000000000000000000000000000000..9243da7b6bf4ccb503626ef02c1644c84961a716 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/binomial.py @@ -0,0 +1,165 @@ +import torch +from torch.distributions import constraints +from torch.distributions.distribution import Distribution +from torch.distributions.utils import ( + broadcast_all, + lazy_property, + logits_to_probs, + probs_to_logits, +) + +__all__ = ["Binomial"] + + +def _clamp_by_zero(x): + # works like clamp(x, min=0) but has grad at 0 is 0.5 + return (x.clamp(min=0) + x - x.clamp(max=0)) / 2 + + +class Binomial(Distribution): + r""" + Creates a Binomial distribution parameterized by :attr:`total_count` and + either :attr:`probs` or :attr:`logits` (but not both). :attr:`total_count` must be + broadcastable with :attr:`probs`/:attr:`logits`. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Binomial(100, torch.tensor([0 , .2, .8, 1])) + >>> x = m.sample() + tensor([ 0., 22., 71., 100.]) + + >>> m = Binomial(torch.tensor([[5.], [10.]]), torch.tensor([0.5, 0.8])) + >>> x = m.sample() + tensor([[ 4., 5.], + [ 7., 6.]]) + + Args: + total_count (int or Tensor): number of Bernoulli trials + probs (Tensor): Event probabilities + logits (Tensor): Event log-odds + """ + arg_constraints = { + "total_count": constraints.nonnegative_integer, + "probs": constraints.unit_interval, + "logits": constraints.real, + } + has_enumerate_support = True + + def __init__(self, total_count=1, probs=None, logits=None, validate_args=None): + if (probs is None) == (logits is None): + raise ValueError( + "Either `probs` or `logits` must be specified, but not both." + ) + if probs is not None: + ( + self.total_count, + self.probs, + ) = broadcast_all(total_count, probs) + self.total_count = self.total_count.type_as(self.probs) + else: + ( + self.total_count, + self.logits, + ) = broadcast_all(total_count, logits) + self.total_count = self.total_count.type_as(self.logits) + + self._param = self.probs if probs is not None else self.logits + batch_shape = self._param.size() + super().__init__(batch_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Binomial, _instance) + batch_shape = torch.Size(batch_shape) + new.total_count = self.total_count.expand(batch_shape) + if "probs" in self.__dict__: + new.probs = self.probs.expand(batch_shape) + new._param = new.probs + if "logits" in self.__dict__: + new.logits = self.logits.expand(batch_shape) + new._param = new.logits + super(Binomial, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + def _new(self, *args, **kwargs): + return self._param.new(*args, **kwargs) + + @constraints.dependent_property(is_discrete=True, event_dim=0) + def support(self): + return constraints.integer_interval(0, self.total_count) + + @property + def mean(self): + return self.total_count * self.probs + + @property + def mode(self): + return ((self.total_count + 1) * self.probs).floor().clamp(max=self.total_count) + + @property + def variance(self): + return self.total_count * self.probs * (1 - self.probs) + + @lazy_property + def logits(self): + return probs_to_logits(self.probs, is_binary=True) + + @lazy_property + def probs(self): + return logits_to_probs(self.logits, is_binary=True) + + @property + def param_shape(self): + return self._param.size() + + def sample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + with torch.no_grad(): + return torch.binomial( + self.total_count.expand(shape), self.probs.expand(shape) + ) + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + log_factorial_n = torch.lgamma(self.total_count + 1) + log_factorial_k = torch.lgamma(value + 1) + log_factorial_nmk = torch.lgamma(self.total_count - value + 1) + # k * log(p) + (n - k) * log(1 - p) = k * (log(p) - log(1 - p)) + n * log(1 - p) + # (case logit < 0) = k * logit - n * log1p(e^logit) + # (case logit > 0) = k * logit - n * (log(p) - log(1 - p)) + n * log(p) + # = k * logit - n * logit - n * log1p(e^-logit) + # (merge two cases) = k * logit - n * max(logit, 0) - n * log1p(e^-|logit|) + normalize_term = ( + self.total_count * _clamp_by_zero(self.logits) + + self.total_count * torch.log1p(torch.exp(-torch.abs(self.logits))) + - log_factorial_n + ) + return ( + value * self.logits - log_factorial_k - log_factorial_nmk - normalize_term + ) + + def entropy(self): + total_count = int(self.total_count.max()) + if not self.total_count.min() == total_count: + raise NotImplementedError( + "Inhomogeneous total count not supported by `entropy`." + ) + + log_prob = self.log_prob(self.enumerate_support(False)) + return -(torch.exp(log_prob) * log_prob).sum(0) + + def enumerate_support(self, expand=True): + total_count = int(self.total_count.max()) + if not self.total_count.min() == total_count: + raise NotImplementedError( + "Inhomogeneous total count not supported by `enumerate_support`." + ) + values = torch.arange( + 1 + total_count, dtype=self._param.dtype, device=self._param.device + ) + values = values.view((-1,) + (1,) * len(self._batch_shape)) + if expand: + values = values.expand((-1,) + self._batch_shape) + return values diff --git a/venv/lib/python3.10/site-packages/torch/distributions/continuous_bernoulli.py b/venv/lib/python3.10/site-packages/torch/distributions/continuous_bernoulli.py new file mode 100644 index 0000000000000000000000000000000000000000..3e7f1a53a47fb4159bde1f51fb970a64ef49c911 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/continuous_bernoulli.py @@ -0,0 +1,235 @@ +import math +from numbers import Number + +import torch +from torch.distributions import constraints +from torch.distributions.exp_family import ExponentialFamily +from torch.distributions.utils import ( + broadcast_all, + clamp_probs, + lazy_property, + logits_to_probs, + probs_to_logits, +) +from torch.nn.functional import binary_cross_entropy_with_logits + +__all__ = ["ContinuousBernoulli"] + + +class ContinuousBernoulli(ExponentialFamily): + r""" + Creates a continuous Bernoulli distribution parameterized by :attr:`probs` + or :attr:`logits` (but not both). + + The distribution is supported in [0, 1] and parameterized by 'probs' (in + (0,1)) or 'logits' (real-valued). Note that, unlike the Bernoulli, 'probs' + does not correspond to a probability and 'logits' does not correspond to + log-odds, but the same names are used due to the similarity with the + Bernoulli. See [1] for more details. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = ContinuousBernoulli(torch.tensor([0.3])) + >>> m.sample() + tensor([ 0.2538]) + + Args: + probs (Number, Tensor): (0,1) valued parameters + logits (Number, Tensor): real valued parameters whose sigmoid matches 'probs' + + [1] The continuous Bernoulli: fixing a pervasive error in variational + autoencoders, Loaiza-Ganem G and Cunningham JP, NeurIPS 2019. + https://arxiv.org/abs/1907.06845 + """ + arg_constraints = {"probs": constraints.unit_interval, "logits": constraints.real} + support = constraints.unit_interval + _mean_carrier_measure = 0 + has_rsample = True + + def __init__( + self, probs=None, logits=None, lims=(0.499, 0.501), validate_args=None + ): + if (probs is None) == (logits is None): + raise ValueError( + "Either `probs` or `logits` must be specified, but not both." + ) + if probs is not None: + is_scalar = isinstance(probs, Number) + (self.probs,) = broadcast_all(probs) + # validate 'probs' here if necessary as it is later clamped for numerical stability + # close to 0 and 1, later on; otherwise the clamped 'probs' would always pass + if validate_args is not None: + if not self.arg_constraints["probs"].check(self.probs).all(): + raise ValueError("The parameter probs has invalid values") + self.probs = clamp_probs(self.probs) + else: + is_scalar = isinstance(logits, Number) + (self.logits,) = broadcast_all(logits) + self._param = self.probs if probs is not None else self.logits + if is_scalar: + batch_shape = torch.Size() + else: + batch_shape = self._param.size() + self._lims = lims + super().__init__(batch_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(ContinuousBernoulli, _instance) + new._lims = self._lims + batch_shape = torch.Size(batch_shape) + if "probs" in self.__dict__: + new.probs = self.probs.expand(batch_shape) + new._param = new.probs + if "logits" in self.__dict__: + new.logits = self.logits.expand(batch_shape) + new._param = new.logits + super(ContinuousBernoulli, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + def _new(self, *args, **kwargs): + return self._param.new(*args, **kwargs) + + def _outside_unstable_region(self): + return torch.max( + torch.le(self.probs, self._lims[0]), torch.gt(self.probs, self._lims[1]) + ) + + def _cut_probs(self): + return torch.where( + self._outside_unstable_region(), + self.probs, + self._lims[0] * torch.ones_like(self.probs), + ) + + def _cont_bern_log_norm(self): + """computes the log normalizing constant as a function of the 'probs' parameter""" + cut_probs = self._cut_probs() + cut_probs_below_half = torch.where( + torch.le(cut_probs, 0.5), cut_probs, torch.zeros_like(cut_probs) + ) + cut_probs_above_half = torch.where( + torch.ge(cut_probs, 0.5), cut_probs, torch.ones_like(cut_probs) + ) + log_norm = torch.log( + torch.abs(torch.log1p(-cut_probs) - torch.log(cut_probs)) + ) - torch.where( + torch.le(cut_probs, 0.5), + torch.log1p(-2.0 * cut_probs_below_half), + torch.log(2.0 * cut_probs_above_half - 1.0), + ) + x = torch.pow(self.probs - 0.5, 2) + taylor = math.log(2.0) + (4.0 / 3.0 + 104.0 / 45.0 * x) * x + return torch.where(self._outside_unstable_region(), log_norm, taylor) + + @property + def mean(self): + cut_probs = self._cut_probs() + mus = cut_probs / (2.0 * cut_probs - 1.0) + 1.0 / ( + torch.log1p(-cut_probs) - torch.log(cut_probs) + ) + x = self.probs - 0.5 + taylor = 0.5 + (1.0 / 3.0 + 16.0 / 45.0 * torch.pow(x, 2)) * x + return torch.where(self._outside_unstable_region(), mus, taylor) + + @property + def stddev(self): + return torch.sqrt(self.variance) + + @property + def variance(self): + cut_probs = self._cut_probs() + vars = cut_probs * (cut_probs - 1.0) / torch.pow( + 1.0 - 2.0 * cut_probs, 2 + ) + 1.0 / torch.pow(torch.log1p(-cut_probs) - torch.log(cut_probs), 2) + x = torch.pow(self.probs - 0.5, 2) + taylor = 1.0 / 12.0 - (1.0 / 15.0 - 128.0 / 945.0 * x) * x + return torch.where(self._outside_unstable_region(), vars, taylor) + + @lazy_property + def logits(self): + return probs_to_logits(self.probs, is_binary=True) + + @lazy_property + def probs(self): + return clamp_probs(logits_to_probs(self.logits, is_binary=True)) + + @property + def param_shape(self): + return self._param.size() + + def sample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + u = torch.rand(shape, dtype=self.probs.dtype, device=self.probs.device) + with torch.no_grad(): + return self.icdf(u) + + def rsample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + u = torch.rand(shape, dtype=self.probs.dtype, device=self.probs.device) + return self.icdf(u) + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + logits, value = broadcast_all(self.logits, value) + return ( + -binary_cross_entropy_with_logits(logits, value, reduction="none") + + self._cont_bern_log_norm() + ) + + def cdf(self, value): + if self._validate_args: + self._validate_sample(value) + cut_probs = self._cut_probs() + cdfs = ( + torch.pow(cut_probs, value) * torch.pow(1.0 - cut_probs, 1.0 - value) + + cut_probs + - 1.0 + ) / (2.0 * cut_probs - 1.0) + unbounded_cdfs = torch.where(self._outside_unstable_region(), cdfs, value) + return torch.where( + torch.le(value, 0.0), + torch.zeros_like(value), + torch.where(torch.ge(value, 1.0), torch.ones_like(value), unbounded_cdfs), + ) + + def icdf(self, value): + cut_probs = self._cut_probs() + return torch.where( + self._outside_unstable_region(), + ( + torch.log1p(-cut_probs + value * (2.0 * cut_probs - 1.0)) + - torch.log1p(-cut_probs) + ) + / (torch.log(cut_probs) - torch.log1p(-cut_probs)), + value, + ) + + def entropy(self): + log_probs0 = torch.log1p(-self.probs) + log_probs1 = torch.log(self.probs) + return ( + self.mean * (log_probs0 - log_probs1) + - self._cont_bern_log_norm() + - log_probs0 + ) + + @property + def _natural_params(self): + return (self.logits,) + + def _log_normalizer(self, x): + """computes the log normalizing constant as a function of the natural parameter""" + out_unst_reg = torch.max( + torch.le(x, self._lims[0] - 0.5), torch.gt(x, self._lims[1] - 0.5) + ) + cut_nat_params = torch.where( + out_unst_reg, x, (self._lims[0] - 0.5) * torch.ones_like(x) + ) + log_norm = torch.log(torch.abs(torch.exp(cut_nat_params) - 1.0)) - torch.log( + torch.abs(cut_nat_params) + ) + taylor = 0.5 * x + torch.pow(x, 2) / 24.0 - torch.pow(x, 4) / 2880.0 + return torch.where(out_unst_reg, log_norm, taylor) diff --git a/venv/lib/python3.10/site-packages/torch/distributions/dirichlet.py b/venv/lib/python3.10/site-packages/torch/distributions/dirichlet.py new file mode 100644 index 0000000000000000000000000000000000000000..b7175aa616282525c1e7107b3f5336ad0ad652c2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/dirichlet.py @@ -0,0 +1,123 @@ +import torch +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.distributions import constraints +from torch.distributions.exp_family import ExponentialFamily + +__all__ = ["Dirichlet"] + + +# This helper is exposed for testing. +def _Dirichlet_backward(x, concentration, grad_output): + total = concentration.sum(-1, True).expand_as(concentration) + grad = torch._dirichlet_grad(x, concentration, total) + return grad * (grad_output - (x * grad_output).sum(-1, True)) + + +class _Dirichlet(Function): + @staticmethod + def forward(ctx, concentration): + x = torch._sample_dirichlet(concentration) + ctx.save_for_backward(x, concentration) + return x + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + x, concentration = ctx.saved_tensors + return _Dirichlet_backward(x, concentration, grad_output) + + +class Dirichlet(ExponentialFamily): + r""" + Creates a Dirichlet distribution parameterized by concentration :attr:`concentration`. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Dirichlet(torch.tensor([0.5, 0.5])) + >>> m.sample() # Dirichlet distributed with concentration [0.5, 0.5] + tensor([ 0.1046, 0.8954]) + + Args: + concentration (Tensor): concentration parameter of the distribution + (often referred to as alpha) + """ + arg_constraints = { + "concentration": constraints.independent(constraints.positive, 1) + } + support = constraints.simplex + has_rsample = True + + def __init__(self, concentration, validate_args=None): + if concentration.dim() < 1: + raise ValueError( + "`concentration` parameter must be at least one-dimensional." + ) + self.concentration = concentration + batch_shape, event_shape = concentration.shape[:-1], concentration.shape[-1:] + super().__init__(batch_shape, event_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Dirichlet, _instance) + batch_shape = torch.Size(batch_shape) + new.concentration = self.concentration.expand(batch_shape + self.event_shape) + super(Dirichlet, new).__init__( + batch_shape, self.event_shape, validate_args=False + ) + new._validate_args = self._validate_args + return new + + def rsample(self, sample_shape=()): + shape = self._extended_shape(sample_shape) + concentration = self.concentration.expand(shape) + return _Dirichlet.apply(concentration) + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + return ( + torch.xlogy(self.concentration - 1.0, value).sum(-1) + + torch.lgamma(self.concentration.sum(-1)) + - torch.lgamma(self.concentration).sum(-1) + ) + + @property + def mean(self): + return self.concentration / self.concentration.sum(-1, True) + + @property + def mode(self): + concentrationm1 = (self.concentration - 1).clamp(min=0.0) + mode = concentrationm1 / concentrationm1.sum(-1, True) + mask = (self.concentration < 1).all(axis=-1) + mode[mask] = torch.nn.functional.one_hot( + mode[mask].argmax(axis=-1), concentrationm1.shape[-1] + ).to(mode) + return mode + + @property + def variance(self): + con0 = self.concentration.sum(-1, True) + return ( + self.concentration + * (con0 - self.concentration) + / (con0.pow(2) * (con0 + 1)) + ) + + def entropy(self): + k = self.concentration.size(-1) + a0 = self.concentration.sum(-1) + return ( + torch.lgamma(self.concentration).sum(-1) + - torch.lgamma(a0) + - (k - a0) * torch.digamma(a0) + - ((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1) + ) + + @property + def _natural_params(self): + return (self.concentration,) + + def _log_normalizer(self, x): + return x.lgamma().sum(-1) - torch.lgamma(x.sum(-1)) diff --git a/venv/lib/python3.10/site-packages/torch/distributions/exp_family.py b/venv/lib/python3.10/site-packages/torch/distributions/exp_family.py new file mode 100644 index 0000000000000000000000000000000000000000..e60f6489d5bfa68fd3e818245ab9a13dd7beb70c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/exp_family.py @@ -0,0 +1,62 @@ +import torch +from torch.distributions.distribution import Distribution + +__all__ = ["ExponentialFamily"] + + +class ExponentialFamily(Distribution): + r""" + ExponentialFamily is the abstract base class for probability distributions belonging to an + exponential family, whose probability mass/density function has the form is defined below + + .. math:: + + p_{F}(x; \theta) = \exp(\langle t(x), \theta\rangle - F(\theta) + k(x)) + + where :math:`\theta` denotes the natural parameters, :math:`t(x)` denotes the sufficient statistic, + :math:`F(\theta)` is the log normalizer function for a given family and :math:`k(x)` is the carrier + measure. + + Note: + This class is an intermediary between the `Distribution` class and distributions which belong + to an exponential family mainly to check the correctness of the `.entropy()` and analytic KL + divergence methods. We use this class to compute the entropy and KL divergence using the AD + framework and Bregman divergences (courtesy of: Frank Nielsen and Richard Nock, Entropies and + Cross-entropies of Exponential Families). + """ + + @property + def _natural_params(self): + """ + Abstract method for natural parameters. Returns a tuple of Tensors based + on the distribution + """ + raise NotImplementedError + + def _log_normalizer(self, *natural_params): + """ + Abstract method for log normalizer function. Returns a log normalizer based on + the distribution and input + """ + raise NotImplementedError + + @property + def _mean_carrier_measure(self): + """ + Abstract method for expected carrier measure, which is required for computing + entropy. + """ + raise NotImplementedError + + def entropy(self): + """ + Method to compute the entropy using Bregman divergence of the log normalizer. + """ + result = -self._mean_carrier_measure + nparams = [p.detach().requires_grad_() for p in self._natural_params] + lg_normal = self._log_normalizer(*nparams) + gradients = torch.autograd.grad(lg_normal.sum(), nparams, create_graph=True) + result += lg_normal + for np, g in zip(nparams, gradients): + result -= (np * g).reshape(self._batch_shape + (-1,)).sum(-1) + return result diff --git a/venv/lib/python3.10/site-packages/torch/distributions/fishersnedecor.py b/venv/lib/python3.10/site-packages/torch/distributions/fishersnedecor.py new file mode 100644 index 0000000000000000000000000000000000000000..788f74b58556a72ab16eb810d8158d8874ddc095 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/fishersnedecor.py @@ -0,0 +1,98 @@ +from numbers import Number + +import torch +from torch import nan +from torch.distributions import constraints +from torch.distributions.distribution import Distribution +from torch.distributions.gamma import Gamma +from torch.distributions.utils import broadcast_all + +__all__ = ["FisherSnedecor"] + + +class FisherSnedecor(Distribution): + r""" + Creates a Fisher-Snedecor distribution parameterized by :attr:`df1` and :attr:`df2`. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = FisherSnedecor(torch.tensor([1.0]), torch.tensor([2.0])) + >>> m.sample() # Fisher-Snedecor-distributed with df1=1 and df2=2 + tensor([ 0.2453]) + + Args: + df1 (float or Tensor): degrees of freedom parameter 1 + df2 (float or Tensor): degrees of freedom parameter 2 + """ + arg_constraints = {"df1": constraints.positive, "df2": constraints.positive} + support = constraints.positive + has_rsample = True + + def __init__(self, df1, df2, validate_args=None): + self.df1, self.df2 = broadcast_all(df1, df2) + self._gamma1 = Gamma(self.df1 * 0.5, self.df1) + self._gamma2 = Gamma(self.df2 * 0.5, self.df2) + + if isinstance(df1, Number) and isinstance(df2, Number): + batch_shape = torch.Size() + else: + batch_shape = self.df1.size() + super().__init__(batch_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(FisherSnedecor, _instance) + batch_shape = torch.Size(batch_shape) + new.df1 = self.df1.expand(batch_shape) + new.df2 = self.df2.expand(batch_shape) + new._gamma1 = self._gamma1.expand(batch_shape) + new._gamma2 = self._gamma2.expand(batch_shape) + super(FisherSnedecor, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + @property + def mean(self): + df2 = self.df2.clone(memory_format=torch.contiguous_format) + df2[df2 <= 2] = nan + return df2 / (df2 - 2) + + @property + def mode(self): + mode = (self.df1 - 2) / self.df1 * self.df2 / (self.df2 + 2) + mode[self.df1 <= 2] = nan + return mode + + @property + def variance(self): + df2 = self.df2.clone(memory_format=torch.contiguous_format) + df2[df2 <= 4] = nan + return ( + 2 + * df2.pow(2) + * (self.df1 + df2 - 2) + / (self.df1 * (df2 - 2).pow(2) * (df2 - 4)) + ) + + def rsample(self, sample_shape=torch.Size(())): + shape = self._extended_shape(sample_shape) + # X1 ~ Gamma(df1 / 2, 1 / df1), X2 ~ Gamma(df2 / 2, 1 / df2) + # Y = df2 * df1 * X1 / (df1 * df2 * X2) = X1 / X2 ~ F(df1, df2) + X1 = self._gamma1.rsample(sample_shape).view(shape) + X2 = self._gamma2.rsample(sample_shape).view(shape) + tiny = torch.finfo(X2.dtype).tiny + X2.clamp_(min=tiny) + Y = X1 / X2 + Y.clamp_(min=tiny) + return Y + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + ct1 = self.df1 * 0.5 + ct2 = self.df2 * 0.5 + ct3 = self.df1 / self.df2 + t1 = (ct1 + ct2).lgamma() - ct1.lgamma() - ct2.lgamma() + t2 = ct1 * ct3.log() + (ct1 - 1) * torch.log(value) + t3 = (ct1 + ct2) * torch.log1p(ct3 * value) + return t1 + t2 - t3 diff --git a/venv/lib/python3.10/site-packages/torch/distributions/geometric.py b/venv/lib/python3.10/site-packages/torch/distributions/geometric.py new file mode 100644 index 0000000000000000000000000000000000000000..0bf2f3dbacc67f3a4c8be53d2045523f9f9ec113 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/geometric.py @@ -0,0 +1,128 @@ +from numbers import Number + +import torch +from torch.distributions import constraints +from torch.distributions.distribution import Distribution +from torch.distributions.utils import ( + broadcast_all, + lazy_property, + logits_to_probs, + probs_to_logits, +) +from torch.nn.functional import binary_cross_entropy_with_logits + +__all__ = ["Geometric"] + + +class Geometric(Distribution): + r""" + Creates a Geometric distribution parameterized by :attr:`probs`, + where :attr:`probs` is the probability of success of Bernoulli trials. + + .. math:: + + P(X=k) = (1-p)^{k} p, k = 0, 1, ... + + .. note:: + :func:`torch.distributions.geometric.Geometric` :math:`(k+1)`-th trial is the first success + hence draws samples in :math:`\{0, 1, \ldots\}`, whereas + :func:`torch.Tensor.geometric_` `k`-th trial is the first success hence draws samples in :math:`\{1, 2, \ldots\}`. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Geometric(torch.tensor([0.3])) + >>> m.sample() # underlying Bernoulli has 30% chance 1; 70% chance 0 + tensor([ 2.]) + + Args: + probs (Number, Tensor): the probability of sampling `1`. Must be in range (0, 1] + logits (Number, Tensor): the log-odds of sampling `1`. + """ + arg_constraints = {"probs": constraints.unit_interval, "logits": constraints.real} + support = constraints.nonnegative_integer + + def __init__(self, probs=None, logits=None, validate_args=None): + if (probs is None) == (logits is None): + raise ValueError( + "Either `probs` or `logits` must be specified, but not both." + ) + if probs is not None: + (self.probs,) = broadcast_all(probs) + else: + (self.logits,) = broadcast_all(logits) + probs_or_logits = probs if probs is not None else logits + if isinstance(probs_or_logits, Number): + batch_shape = torch.Size() + else: + batch_shape = probs_or_logits.size() + super().__init__(batch_shape, validate_args=validate_args) + if self._validate_args and probs is not None: + # Add an extra check beyond unit_interval + value = self.probs + valid = value > 0 + if not valid.all(): + invalid_value = value.data[~valid] + raise ValueError( + "Expected parameter probs " + f"({type(value).__name__} of shape {tuple(value.shape)}) " + f"of distribution {repr(self)} " + f"to be positive but found invalid values:\n{invalid_value}" + ) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Geometric, _instance) + batch_shape = torch.Size(batch_shape) + if "probs" in self.__dict__: + new.probs = self.probs.expand(batch_shape) + if "logits" in self.__dict__: + new.logits = self.logits.expand(batch_shape) + super(Geometric, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + @property + def mean(self): + return 1.0 / self.probs - 1.0 + + @property + def mode(self): + return torch.zeros_like(self.probs) + + @property + def variance(self): + return (1.0 / self.probs - 1.0) / self.probs + + @lazy_property + def logits(self): + return probs_to_logits(self.probs, is_binary=True) + + @lazy_property + def probs(self): + return logits_to_probs(self.logits, is_binary=True) + + def sample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + tiny = torch.finfo(self.probs.dtype).tiny + with torch.no_grad(): + if torch._C._get_tracing_state(): + # [JIT WORKAROUND] lack of support for .uniform_() + u = torch.rand(shape, dtype=self.probs.dtype, device=self.probs.device) + u = u.clamp(min=tiny) + else: + u = self.probs.new(shape).uniform_(tiny, 1) + return (u.log() / (-self.probs).log1p()).floor() + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + value, probs = broadcast_all(value, self.probs) + probs = probs.clone(memory_format=torch.contiguous_format) + probs[(probs == 1) & (value == 0)] = 0 + return value * (-probs).log1p() + self.probs.log() + + def entropy(self): + return ( + binary_cross_entropy_with_logits(self.logits, self.probs, reduction="none") + / self.probs + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributions/gumbel.py b/venv/lib/python3.10/site-packages/torch/distributions/gumbel.py new file mode 100644 index 0000000000000000000000000000000000000000..e0ed5d8f86906b297a64979e39c559215e23bb59 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/gumbel.py @@ -0,0 +1,81 @@ +import math +from numbers import Number + +import torch +from torch.distributions import constraints +from torch.distributions.transformed_distribution import TransformedDistribution +from torch.distributions.transforms import AffineTransform, ExpTransform +from torch.distributions.uniform import Uniform +from torch.distributions.utils import broadcast_all, euler_constant + +__all__ = ["Gumbel"] + + +class Gumbel(TransformedDistribution): + r""" + Samples from a Gumbel Distribution. + + Examples:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Gumbel(torch.tensor([1.0]), torch.tensor([2.0])) + >>> m.sample() # sample from Gumbel distribution with loc=1, scale=2 + tensor([ 1.0124]) + + Args: + loc (float or Tensor): Location parameter of the distribution + scale (float or Tensor): Scale parameter of the distribution + """ + arg_constraints = {"loc": constraints.real, "scale": constraints.positive} + support = constraints.real + + def __init__(self, loc, scale, validate_args=None): + self.loc, self.scale = broadcast_all(loc, scale) + finfo = torch.finfo(self.loc.dtype) + if isinstance(loc, Number) and isinstance(scale, Number): + base_dist = Uniform(finfo.tiny, 1 - finfo.eps, validate_args=validate_args) + else: + base_dist = Uniform( + torch.full_like(self.loc, finfo.tiny), + torch.full_like(self.loc, 1 - finfo.eps), + validate_args=validate_args, + ) + transforms = [ + ExpTransform().inv, + AffineTransform(loc=0, scale=-torch.ones_like(self.scale)), + ExpTransform().inv, + AffineTransform(loc=loc, scale=-self.scale), + ] + super().__init__(base_dist, transforms, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Gumbel, _instance) + new.loc = self.loc.expand(batch_shape) + new.scale = self.scale.expand(batch_shape) + return super().expand(batch_shape, _instance=new) + + # Explicitly defining the log probability function for Gumbel due to precision issues + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + y = (self.loc - value) / self.scale + return (y - y.exp()) - self.scale.log() + + @property + def mean(self): + return self.loc + self.scale * euler_constant + + @property + def mode(self): + return self.loc + + @property + def stddev(self): + return (math.pi / math.sqrt(6)) * self.scale + + @property + def variance(self): + return self.stddev.pow(2) + + def entropy(self): + return self.scale.log() + (1 + euler_constant) diff --git a/venv/lib/python3.10/site-packages/torch/distributions/half_cauchy.py b/venv/lib/python3.10/site-packages/torch/distributions/half_cauchy.py new file mode 100644 index 0000000000000000000000000000000000000000..ef0edc6f0fe88054afe86b0e2ba13a5dc01ba092 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/half_cauchy.py @@ -0,0 +1,82 @@ +import math + +import torch +from torch import inf +from torch.distributions import constraints +from torch.distributions.cauchy import Cauchy +from torch.distributions.transformed_distribution import TransformedDistribution +from torch.distributions.transforms import AbsTransform + +__all__ = ["HalfCauchy"] + + +class HalfCauchy(TransformedDistribution): + r""" + Creates a half-Cauchy distribution parameterized by `scale` where:: + + X ~ Cauchy(0, scale) + Y = |X| ~ HalfCauchy(scale) + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = HalfCauchy(torch.tensor([1.0])) + >>> m.sample() # half-cauchy distributed with scale=1 + tensor([ 2.3214]) + + Args: + scale (float or Tensor): scale of the full Cauchy distribution + """ + arg_constraints = {"scale": constraints.positive} + support = constraints.nonnegative + has_rsample = True + + def __init__(self, scale, validate_args=None): + base_dist = Cauchy(0, scale, validate_args=False) + super().__init__(base_dist, AbsTransform(), validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(HalfCauchy, _instance) + return super().expand(batch_shape, _instance=new) + + @property + def scale(self): + return self.base_dist.scale + + @property + def mean(self): + return torch.full( + self._extended_shape(), + math.inf, + dtype=self.scale.dtype, + device=self.scale.device, + ) + + @property + def mode(self): + return torch.zeros_like(self.scale) + + @property + def variance(self): + return self.base_dist.variance + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + value = torch.as_tensor( + value, dtype=self.base_dist.scale.dtype, device=self.base_dist.scale.device + ) + log_prob = self.base_dist.log_prob(value) + math.log(2) + log_prob = torch.where(value >= 0, log_prob, -inf) + return log_prob + + def cdf(self, value): + if self._validate_args: + self._validate_sample(value) + return 2 * self.base_dist.cdf(value) - 1 + + def icdf(self, prob): + return self.base_dist.icdf((prob + 1) / 2) + + def entropy(self): + return self.base_dist.entropy() - math.log(2) diff --git a/venv/lib/python3.10/site-packages/torch/distributions/half_normal.py b/venv/lib/python3.10/site-packages/torch/distributions/half_normal.py new file mode 100644 index 0000000000000000000000000000000000000000..6526170b24ee7574a56c6c32909b508e3dd5326c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/half_normal.py @@ -0,0 +1,74 @@ +import math + +import torch +from torch import inf +from torch.distributions import constraints +from torch.distributions.normal import Normal +from torch.distributions.transformed_distribution import TransformedDistribution +from torch.distributions.transforms import AbsTransform + +__all__ = ["HalfNormal"] + + +class HalfNormal(TransformedDistribution): + r""" + Creates a half-normal distribution parameterized by `scale` where:: + + X ~ Normal(0, scale) + Y = |X| ~ HalfNormal(scale) + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = HalfNormal(torch.tensor([1.0])) + >>> m.sample() # half-normal distributed with scale=1 + tensor([ 0.1046]) + + Args: + scale (float or Tensor): scale of the full Normal distribution + """ + arg_constraints = {"scale": constraints.positive} + support = constraints.nonnegative + has_rsample = True + + def __init__(self, scale, validate_args=None): + base_dist = Normal(0, scale, validate_args=False) + super().__init__(base_dist, AbsTransform(), validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(HalfNormal, _instance) + return super().expand(batch_shape, _instance=new) + + @property + def scale(self): + return self.base_dist.scale + + @property + def mean(self): + return self.scale * math.sqrt(2 / math.pi) + + @property + def mode(self): + return torch.zeros_like(self.scale) + + @property + def variance(self): + return self.scale.pow(2) * (1 - 2 / math.pi) + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + log_prob = self.base_dist.log_prob(value) + math.log(2) + log_prob = torch.where(value >= 0, log_prob, -inf) + return log_prob + + def cdf(self, value): + if self._validate_args: + self._validate_sample(value) + return 2 * self.base_dist.cdf(value) - 1 + + def icdf(self, prob): + return self.base_dist.icdf((prob + 1) / 2) + + def entropy(self): + return self.base_dist.entropy() - math.log(2) diff --git a/venv/lib/python3.10/site-packages/torch/distributions/kl.py b/venv/lib/python3.10/site-packages/torch/distributions/kl.py new file mode 100644 index 0000000000000000000000000000000000000000..45e89c77d0a201480ff14e022d81bb100dd02947 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/kl.py @@ -0,0 +1,971 @@ +import math +import warnings +from functools import total_ordering +from typing import Callable, Dict, Tuple, Type + +import torch +from torch import inf + +from .bernoulli import Bernoulli +from .beta import Beta +from .binomial import Binomial +from .categorical import Categorical +from .cauchy import Cauchy +from .continuous_bernoulli import ContinuousBernoulli +from .dirichlet import Dirichlet +from .distribution import Distribution +from .exp_family import ExponentialFamily +from .exponential import Exponential +from .gamma import Gamma +from .geometric import Geometric +from .gumbel import Gumbel +from .half_normal import HalfNormal +from .independent import Independent +from .laplace import Laplace +from .lowrank_multivariate_normal import ( + _batch_lowrank_logdet, + _batch_lowrank_mahalanobis, + LowRankMultivariateNormal, +) +from .multivariate_normal import _batch_mahalanobis, MultivariateNormal +from .normal import Normal +from .one_hot_categorical import OneHotCategorical +from .pareto import Pareto +from .poisson import Poisson +from .transformed_distribution import TransformedDistribution +from .uniform import Uniform +from .utils import _sum_rightmost, euler_constant as _euler_gamma + +_KL_REGISTRY: Dict[ + Tuple[Type, Type], Callable +] = {} # Source of truth mapping a few general (type, type) pairs to functions. +_KL_MEMOIZE: Dict[ + Tuple[Type, Type], Callable +] = {} # Memoized version mapping many specific (type, type) pairs to functions. + +__all__ = ["register_kl", "kl_divergence"] + + +def register_kl(type_p, type_q): + """ + Decorator to register a pairwise function with :meth:`kl_divergence`. + Usage:: + + @register_kl(Normal, Normal) + def kl_normal_normal(p, q): + # insert implementation here + + Lookup returns the most specific (type,type) match ordered by subclass. If + the match is ambiguous, a `RuntimeWarning` is raised. For example to + resolve the ambiguous situation:: + + @register_kl(BaseP, DerivedQ) + def kl_version1(p, q): ... + @register_kl(DerivedP, BaseQ) + def kl_version2(p, q): ... + + you should register a third most-specific implementation, e.g.:: + + register_kl(DerivedP, DerivedQ)(kl_version1) # Break the tie. + + Args: + type_p (type): A subclass of :class:`~torch.distributions.Distribution`. + type_q (type): A subclass of :class:`~torch.distributions.Distribution`. + """ + if not isinstance(type_p, type) and issubclass(type_p, Distribution): + raise TypeError( + f"Expected type_p to be a Distribution subclass but got {type_p}" + ) + if not isinstance(type_q, type) and issubclass(type_q, Distribution): + raise TypeError( + f"Expected type_q to be a Distribution subclass but got {type_q}" + ) + + def decorator(fun): + _KL_REGISTRY[type_p, type_q] = fun + _KL_MEMOIZE.clear() # reset since lookup order may have changed + return fun + + return decorator + + +@total_ordering +class _Match: + __slots__ = ["types"] + + def __init__(self, *types): + self.types = types + + def __eq__(self, other): + return self.types == other.types + + def __le__(self, other): + for x, y in zip(self.types, other.types): + if not issubclass(x, y): + return False + if x is not y: + break + return True + + +def _dispatch_kl(type_p, type_q): + """ + Find the most specific approximate match, assuming single inheritance. + """ + matches = [ + (super_p, super_q) + for super_p, super_q in _KL_REGISTRY + if issubclass(type_p, super_p) and issubclass(type_q, super_q) + ] + if not matches: + return NotImplemented + # Check that the left- and right- lexicographic orders agree. + # mypy isn't smart enough to know that _Match implements __lt__ + # see: https://github.com/python/typing/issues/760#issuecomment-710670503 + left_p, left_q = min(_Match(*m) for m in matches).types # type: ignore[type-var] + right_q, right_p = min(_Match(*reversed(m)) for m in matches).types # type: ignore[type-var] + left_fun = _KL_REGISTRY[left_p, left_q] + right_fun = _KL_REGISTRY[right_p, right_q] + if left_fun is not right_fun: + warnings.warn( + "Ambiguous kl_divergence({}, {}). Please register_kl({}, {})".format( + type_p.__name__, type_q.__name__, left_p.__name__, right_q.__name__ + ), + RuntimeWarning, + ) + return left_fun + + +def _infinite_like(tensor): + """ + Helper function for obtaining infinite KL Divergence throughout + """ + return torch.full_like(tensor, inf) + + +def _x_log_x(tensor): + """ + Utility function for calculating x log x + """ + return tensor * tensor.log() + + +def _batch_trace_XXT(bmat): + """ + Utility function for calculating the trace of XX^{T} with X having arbitrary trailing batch dimensions + """ + n = bmat.size(-1) + m = bmat.size(-2) + flat_trace = bmat.reshape(-1, m * n).pow(2).sum(-1) + return flat_trace.reshape(bmat.shape[:-2]) + + +def kl_divergence(p: Distribution, q: Distribution) -> torch.Tensor: + r""" + Compute Kullback-Leibler divergence :math:`KL(p \| q)` between two distributions. + + .. math:: + + KL(p \| q) = \int p(x) \log\frac {p(x)} {q(x)} \,dx + + Args: + p (Distribution): A :class:`~torch.distributions.Distribution` object. + q (Distribution): A :class:`~torch.distributions.Distribution` object. + + Returns: + Tensor: A batch of KL divergences of shape `batch_shape`. + + Raises: + NotImplementedError: If the distribution types have not been registered via + :meth:`register_kl`. + """ + try: + fun = _KL_MEMOIZE[type(p), type(q)] + except KeyError: + fun = _dispatch_kl(type(p), type(q)) + _KL_MEMOIZE[type(p), type(q)] = fun + if fun is NotImplemented: + raise NotImplementedError( + f"No KL(p || q) is implemented for p type {p.__class__.__name__} and q type {q.__class__.__name__}" + ) + return fun(p, q) + + +################################################################################ +# KL Divergence Implementations +################################################################################ + +# Same distributions + + +@register_kl(Bernoulli, Bernoulli) +def _kl_bernoulli_bernoulli(p, q): + t1 = p.probs * ( + torch.nn.functional.softplus(-q.logits) + - torch.nn.functional.softplus(-p.logits) + ) + t1[q.probs == 0] = inf + t1[p.probs == 0] = 0 + t2 = (1 - p.probs) * ( + torch.nn.functional.softplus(q.logits) - torch.nn.functional.softplus(p.logits) + ) + t2[q.probs == 1] = inf + t2[p.probs == 1] = 0 + return t1 + t2 + + +@register_kl(Beta, Beta) +def _kl_beta_beta(p, q): + sum_params_p = p.concentration1 + p.concentration0 + sum_params_q = q.concentration1 + q.concentration0 + t1 = q.concentration1.lgamma() + q.concentration0.lgamma() + (sum_params_p).lgamma() + t2 = p.concentration1.lgamma() + p.concentration0.lgamma() + (sum_params_q).lgamma() + t3 = (p.concentration1 - q.concentration1) * torch.digamma(p.concentration1) + t4 = (p.concentration0 - q.concentration0) * torch.digamma(p.concentration0) + t5 = (sum_params_q - sum_params_p) * torch.digamma(sum_params_p) + return t1 - t2 + t3 + t4 + t5 + + +@register_kl(Binomial, Binomial) +def _kl_binomial_binomial(p, q): + # from https://math.stackexchange.com/questions/2214993/ + # kullback-leibler-divergence-for-binomial-distributions-p-and-q + if (p.total_count < q.total_count).any(): + raise NotImplementedError( + "KL between Binomials where q.total_count > p.total_count is not implemented" + ) + kl = p.total_count * ( + p.probs * (p.logits - q.logits) + (-p.probs).log1p() - (-q.probs).log1p() + ) + inf_idxs = p.total_count > q.total_count + kl[inf_idxs] = _infinite_like(kl[inf_idxs]) + return kl + + +@register_kl(Categorical, Categorical) +def _kl_categorical_categorical(p, q): + t = p.probs * (p.logits - q.logits) + t[(q.probs == 0).expand_as(t)] = inf + t[(p.probs == 0).expand_as(t)] = 0 + return t.sum(-1) + + +@register_kl(ContinuousBernoulli, ContinuousBernoulli) +def _kl_continuous_bernoulli_continuous_bernoulli(p, q): + t1 = p.mean * (p.logits - q.logits) + t2 = p._cont_bern_log_norm() + torch.log1p(-p.probs) + t3 = -q._cont_bern_log_norm() - torch.log1p(-q.probs) + return t1 + t2 + t3 + + +@register_kl(Dirichlet, Dirichlet) +def _kl_dirichlet_dirichlet(p, q): + # From http://bariskurt.com/kullback-leibler-divergence-between-two-dirichlet-and-beta-distributions/ + sum_p_concentration = p.concentration.sum(-1) + sum_q_concentration = q.concentration.sum(-1) + t1 = sum_p_concentration.lgamma() - sum_q_concentration.lgamma() + t2 = (p.concentration.lgamma() - q.concentration.lgamma()).sum(-1) + t3 = p.concentration - q.concentration + t4 = p.concentration.digamma() - sum_p_concentration.digamma().unsqueeze(-1) + return t1 - t2 + (t3 * t4).sum(-1) + + +@register_kl(Exponential, Exponential) +def _kl_exponential_exponential(p, q): + rate_ratio = q.rate / p.rate + t1 = -rate_ratio.log() + return t1 + rate_ratio - 1 + + +@register_kl(ExponentialFamily, ExponentialFamily) +def _kl_expfamily_expfamily(p, q): + if not type(p) == type(q): + raise NotImplementedError( + "The cross KL-divergence between different exponential families cannot \ + be computed using Bregman divergences" + ) + p_nparams = [np.detach().requires_grad_() for np in p._natural_params] + q_nparams = q._natural_params + lg_normal = p._log_normalizer(*p_nparams) + gradients = torch.autograd.grad(lg_normal.sum(), p_nparams, create_graph=True) + result = q._log_normalizer(*q_nparams) - lg_normal + for pnp, qnp, g in zip(p_nparams, q_nparams, gradients): + term = (qnp - pnp) * g + result -= _sum_rightmost(term, len(q.event_shape)) + return result + + +@register_kl(Gamma, Gamma) +def _kl_gamma_gamma(p, q): + t1 = q.concentration * (p.rate / q.rate).log() + t2 = torch.lgamma(q.concentration) - torch.lgamma(p.concentration) + t3 = (p.concentration - q.concentration) * torch.digamma(p.concentration) + t4 = (q.rate - p.rate) * (p.concentration / p.rate) + return t1 + t2 + t3 + t4 + + +@register_kl(Gumbel, Gumbel) +def _kl_gumbel_gumbel(p, q): + ct1 = p.scale / q.scale + ct2 = q.loc / q.scale + ct3 = p.loc / q.scale + t1 = -ct1.log() - ct2 + ct3 + t2 = ct1 * _euler_gamma + t3 = torch.exp(ct2 + (1 + ct1).lgamma() - ct3) + return t1 + t2 + t3 - (1 + _euler_gamma) + + +@register_kl(Geometric, Geometric) +def _kl_geometric_geometric(p, q): + return -p.entropy() - torch.log1p(-q.probs) / p.probs - q.logits + + +@register_kl(HalfNormal, HalfNormal) +def _kl_halfnormal_halfnormal(p, q): + return _kl_normal_normal(p.base_dist, q.base_dist) + + +@register_kl(Laplace, Laplace) +def _kl_laplace_laplace(p, q): + # From http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf + scale_ratio = p.scale / q.scale + loc_abs_diff = (p.loc - q.loc).abs() + t1 = -scale_ratio.log() + t2 = loc_abs_diff / q.scale + t3 = scale_ratio * torch.exp(-loc_abs_diff / p.scale) + return t1 + t2 + t3 - 1 + + +@register_kl(LowRankMultivariateNormal, LowRankMultivariateNormal) +def _kl_lowrankmultivariatenormal_lowrankmultivariatenormal(p, q): + if p.event_shape != q.event_shape: + raise ValueError( + "KL-divergence between two Low Rank Multivariate Normals with\ + different event shapes cannot be computed" + ) + + term1 = _batch_lowrank_logdet( + q._unbroadcasted_cov_factor, q._unbroadcasted_cov_diag, q._capacitance_tril + ) - _batch_lowrank_logdet( + p._unbroadcasted_cov_factor, p._unbroadcasted_cov_diag, p._capacitance_tril + ) + term3 = _batch_lowrank_mahalanobis( + q._unbroadcasted_cov_factor, + q._unbroadcasted_cov_diag, + q.loc - p.loc, + q._capacitance_tril, + ) + # Expands term2 according to + # inv(qcov) @ pcov = [inv(qD) - inv(qD) @ qW @ inv(qC) @ qW.T @ inv(qD)] @ (pW @ pW.T + pD) + # = [inv(qD) - A.T @ A] @ (pD + pW @ pW.T) + qWt_qDinv = q._unbroadcasted_cov_factor.mT / q._unbroadcasted_cov_diag.unsqueeze(-2) + A = torch.linalg.solve_triangular(q._capacitance_tril, qWt_qDinv, upper=False) + term21 = (p._unbroadcasted_cov_diag / q._unbroadcasted_cov_diag).sum(-1) + term22 = _batch_trace_XXT( + p._unbroadcasted_cov_factor * q._unbroadcasted_cov_diag.rsqrt().unsqueeze(-1) + ) + term23 = _batch_trace_XXT(A * p._unbroadcasted_cov_diag.sqrt().unsqueeze(-2)) + term24 = _batch_trace_XXT(A.matmul(p._unbroadcasted_cov_factor)) + term2 = term21 + term22 - term23 - term24 + return 0.5 * (term1 + term2 + term3 - p.event_shape[0]) + + +@register_kl(MultivariateNormal, LowRankMultivariateNormal) +def _kl_multivariatenormal_lowrankmultivariatenormal(p, q): + if p.event_shape != q.event_shape: + raise ValueError( + "KL-divergence between two (Low Rank) Multivariate Normals with\ + different event shapes cannot be computed" + ) + + term1 = _batch_lowrank_logdet( + q._unbroadcasted_cov_factor, q._unbroadcasted_cov_diag, q._capacitance_tril + ) - 2 * p._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) + term3 = _batch_lowrank_mahalanobis( + q._unbroadcasted_cov_factor, + q._unbroadcasted_cov_diag, + q.loc - p.loc, + q._capacitance_tril, + ) + # Expands term2 according to + # inv(qcov) @ pcov = [inv(qD) - inv(qD) @ qW @ inv(qC) @ qW.T @ inv(qD)] @ p_tril @ p_tril.T + # = [inv(qD) - A.T @ A] @ p_tril @ p_tril.T + qWt_qDinv = q._unbroadcasted_cov_factor.mT / q._unbroadcasted_cov_diag.unsqueeze(-2) + A = torch.linalg.solve_triangular(q._capacitance_tril, qWt_qDinv, upper=False) + term21 = _batch_trace_XXT( + p._unbroadcasted_scale_tril * q._unbroadcasted_cov_diag.rsqrt().unsqueeze(-1) + ) + term22 = _batch_trace_XXT(A.matmul(p._unbroadcasted_scale_tril)) + term2 = term21 - term22 + return 0.5 * (term1 + term2 + term3 - p.event_shape[0]) + + +@register_kl(LowRankMultivariateNormal, MultivariateNormal) +def _kl_lowrankmultivariatenormal_multivariatenormal(p, q): + if p.event_shape != q.event_shape: + raise ValueError( + "KL-divergence between two (Low Rank) Multivariate Normals with\ + different event shapes cannot be computed" + ) + + term1 = 2 * q._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum( + -1 + ) - _batch_lowrank_logdet( + p._unbroadcasted_cov_factor, p._unbroadcasted_cov_diag, p._capacitance_tril + ) + term3 = _batch_mahalanobis(q._unbroadcasted_scale_tril, (q.loc - p.loc)) + # Expands term2 according to + # inv(qcov) @ pcov = inv(q_tril @ q_tril.T) @ (pW @ pW.T + pD) + combined_batch_shape = torch._C._infer_size( + q._unbroadcasted_scale_tril.shape[:-2], p._unbroadcasted_cov_factor.shape[:-2] + ) + n = p.event_shape[0] + q_scale_tril = q._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n)) + p_cov_factor = p._unbroadcasted_cov_factor.expand( + combined_batch_shape + (n, p.cov_factor.size(-1)) + ) + p_cov_diag = torch.diag_embed(p._unbroadcasted_cov_diag.sqrt()).expand( + combined_batch_shape + (n, n) + ) + term21 = _batch_trace_XXT( + torch.linalg.solve_triangular(q_scale_tril, p_cov_factor, upper=False) + ) + term22 = _batch_trace_XXT( + torch.linalg.solve_triangular(q_scale_tril, p_cov_diag, upper=False) + ) + term2 = term21 + term22 + return 0.5 * (term1 + term2 + term3 - p.event_shape[0]) + + +@register_kl(MultivariateNormal, MultivariateNormal) +def _kl_multivariatenormal_multivariatenormal(p, q): + # From https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Kullback%E2%80%93Leibler_divergence + if p.event_shape != q.event_shape: + raise ValueError( + "KL-divergence between two Multivariate Normals with\ + different event shapes cannot be computed" + ) + + half_term1 = q._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum( + -1 + ) - p._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) + combined_batch_shape = torch._C._infer_size( + q._unbroadcasted_scale_tril.shape[:-2], p._unbroadcasted_scale_tril.shape[:-2] + ) + n = p.event_shape[0] + q_scale_tril = q._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n)) + p_scale_tril = p._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n)) + term2 = _batch_trace_XXT( + torch.linalg.solve_triangular(q_scale_tril, p_scale_tril, upper=False) + ) + term3 = _batch_mahalanobis(q._unbroadcasted_scale_tril, (q.loc - p.loc)) + return half_term1 + 0.5 * (term2 + term3 - n) + + +@register_kl(Normal, Normal) +def _kl_normal_normal(p, q): + var_ratio = (p.scale / q.scale).pow(2) + t1 = ((p.loc - q.loc) / q.scale).pow(2) + return 0.5 * (var_ratio + t1 - 1 - var_ratio.log()) + + +@register_kl(OneHotCategorical, OneHotCategorical) +def _kl_onehotcategorical_onehotcategorical(p, q): + return _kl_categorical_categorical(p._categorical, q._categorical) + + +@register_kl(Pareto, Pareto) +def _kl_pareto_pareto(p, q): + # From http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf + scale_ratio = p.scale / q.scale + alpha_ratio = q.alpha / p.alpha + t1 = q.alpha * scale_ratio.log() + t2 = -alpha_ratio.log() + result = t1 + t2 + alpha_ratio - 1 + result[p.support.lower_bound < q.support.lower_bound] = inf + return result + + +@register_kl(Poisson, Poisson) +def _kl_poisson_poisson(p, q): + return p.rate * (p.rate.log() - q.rate.log()) - (p.rate - q.rate) + + +@register_kl(TransformedDistribution, TransformedDistribution) +def _kl_transformed_transformed(p, q): + if p.transforms != q.transforms: + raise NotImplementedError + if p.event_shape != q.event_shape: + raise NotImplementedError + return kl_divergence(p.base_dist, q.base_dist) + + +@register_kl(Uniform, Uniform) +def _kl_uniform_uniform(p, q): + result = ((q.high - q.low) / (p.high - p.low)).log() + result[(q.low > p.low) | (q.high < p.high)] = inf + return result + + +# Different distributions +@register_kl(Bernoulli, Poisson) +def _kl_bernoulli_poisson(p, q): + return -p.entropy() - (p.probs * q.rate.log() - q.rate) + + +@register_kl(Beta, ContinuousBernoulli) +def _kl_beta_continuous_bernoulli(p, q): + return ( + -p.entropy() + - p.mean * q.logits + - torch.log1p(-q.probs) + - q._cont_bern_log_norm() + ) + + +@register_kl(Beta, Pareto) +def _kl_beta_infinity(p, q): + return _infinite_like(p.concentration1) + + +@register_kl(Beta, Exponential) +def _kl_beta_exponential(p, q): + return ( + -p.entropy() + - q.rate.log() + + q.rate * (p.concentration1 / (p.concentration1 + p.concentration0)) + ) + + +@register_kl(Beta, Gamma) +def _kl_beta_gamma(p, q): + t1 = -p.entropy() + t2 = q.concentration.lgamma() - q.concentration * q.rate.log() + t3 = (q.concentration - 1) * ( + p.concentration1.digamma() - (p.concentration1 + p.concentration0).digamma() + ) + t4 = q.rate * p.concentration1 / (p.concentration1 + p.concentration0) + return t1 + t2 - t3 + t4 + + +# TODO: Add Beta-Laplace KL Divergence + + +@register_kl(Beta, Normal) +def _kl_beta_normal(p, q): + E_beta = p.concentration1 / (p.concentration1 + p.concentration0) + var_normal = q.scale.pow(2) + t1 = -p.entropy() + t2 = 0.5 * (var_normal * 2 * math.pi).log() + t3 = ( + E_beta * (1 - E_beta) / (p.concentration1 + p.concentration0 + 1) + + E_beta.pow(2) + ) * 0.5 + t4 = q.loc * E_beta + t5 = q.loc.pow(2) * 0.5 + return t1 + t2 + (t3 - t4 + t5) / var_normal + + +@register_kl(Beta, Uniform) +def _kl_beta_uniform(p, q): + result = -p.entropy() + (q.high - q.low).log() + result[(q.low > p.support.lower_bound) | (q.high < p.support.upper_bound)] = inf + return result + + +# Note that the KL between a ContinuousBernoulli and Beta has no closed form + + +@register_kl(ContinuousBernoulli, Pareto) +def _kl_continuous_bernoulli_infinity(p, q): + return _infinite_like(p.probs) + + +@register_kl(ContinuousBernoulli, Exponential) +def _kl_continuous_bernoulli_exponential(p, q): + return -p.entropy() - torch.log(q.rate) + q.rate * p.mean + + +# Note that the KL between a ContinuousBernoulli and Gamma has no closed form +# TODO: Add ContinuousBernoulli-Laplace KL Divergence + + +@register_kl(ContinuousBernoulli, Normal) +def _kl_continuous_bernoulli_normal(p, q): + t1 = -p.entropy() + t2 = 0.5 * (math.log(2.0 * math.pi) + torch.square(q.loc / q.scale)) + torch.log( + q.scale + ) + t3 = (p.variance + torch.square(p.mean) - 2.0 * q.loc * p.mean) / ( + 2.0 * torch.square(q.scale) + ) + return t1 + t2 + t3 + + +@register_kl(ContinuousBernoulli, Uniform) +def _kl_continuous_bernoulli_uniform(p, q): + result = -p.entropy() + (q.high - q.low).log() + return torch.where( + torch.max( + torch.ge(q.low, p.support.lower_bound), + torch.le(q.high, p.support.upper_bound), + ), + torch.ones_like(result) * inf, + result, + ) + + +@register_kl(Exponential, Beta) +@register_kl(Exponential, ContinuousBernoulli) +@register_kl(Exponential, Pareto) +@register_kl(Exponential, Uniform) +def _kl_exponential_infinity(p, q): + return _infinite_like(p.rate) + + +@register_kl(Exponential, Gamma) +def _kl_exponential_gamma(p, q): + ratio = q.rate / p.rate + t1 = -q.concentration * torch.log(ratio) + return ( + t1 + + ratio + + q.concentration.lgamma() + + q.concentration * _euler_gamma + - (1 + _euler_gamma) + ) + + +@register_kl(Exponential, Gumbel) +def _kl_exponential_gumbel(p, q): + scale_rate_prod = p.rate * q.scale + loc_scale_ratio = q.loc / q.scale + t1 = scale_rate_prod.log() - 1 + t2 = torch.exp(loc_scale_ratio) * scale_rate_prod / (scale_rate_prod + 1) + t3 = scale_rate_prod.reciprocal() + return t1 - loc_scale_ratio + t2 + t3 + + +# TODO: Add Exponential-Laplace KL Divergence + + +@register_kl(Exponential, Normal) +def _kl_exponential_normal(p, q): + var_normal = q.scale.pow(2) + rate_sqr = p.rate.pow(2) + t1 = 0.5 * torch.log(rate_sqr * var_normal * 2 * math.pi) + t2 = rate_sqr.reciprocal() + t3 = q.loc / p.rate + t4 = q.loc.pow(2) * 0.5 + return t1 - 1 + (t2 - t3 + t4) / var_normal + + +@register_kl(Gamma, Beta) +@register_kl(Gamma, ContinuousBernoulli) +@register_kl(Gamma, Pareto) +@register_kl(Gamma, Uniform) +def _kl_gamma_infinity(p, q): + return _infinite_like(p.concentration) + + +@register_kl(Gamma, Exponential) +def _kl_gamma_exponential(p, q): + return -p.entropy() - q.rate.log() + q.rate * p.concentration / p.rate + + +@register_kl(Gamma, Gumbel) +def _kl_gamma_gumbel(p, q): + beta_scale_prod = p.rate * q.scale + loc_scale_ratio = q.loc / q.scale + t1 = ( + (p.concentration - 1) * p.concentration.digamma() + - p.concentration.lgamma() + - p.concentration + ) + t2 = beta_scale_prod.log() + p.concentration / beta_scale_prod + t3 = ( + torch.exp(loc_scale_ratio) + * (1 + beta_scale_prod.reciprocal()).pow(-p.concentration) + - loc_scale_ratio + ) + return t1 + t2 + t3 + + +# TODO: Add Gamma-Laplace KL Divergence + + +@register_kl(Gamma, Normal) +def _kl_gamma_normal(p, q): + var_normal = q.scale.pow(2) + beta_sqr = p.rate.pow(2) + t1 = ( + 0.5 * torch.log(beta_sqr * var_normal * 2 * math.pi) + - p.concentration + - p.concentration.lgamma() + ) + t2 = 0.5 * (p.concentration.pow(2) + p.concentration) / beta_sqr + t3 = q.loc * p.concentration / p.rate + t4 = 0.5 * q.loc.pow(2) + return ( + t1 + + (p.concentration - 1) * p.concentration.digamma() + + (t2 - t3 + t4) / var_normal + ) + + +@register_kl(Gumbel, Beta) +@register_kl(Gumbel, ContinuousBernoulli) +@register_kl(Gumbel, Exponential) +@register_kl(Gumbel, Gamma) +@register_kl(Gumbel, Pareto) +@register_kl(Gumbel, Uniform) +def _kl_gumbel_infinity(p, q): + return _infinite_like(p.loc) + + +# TODO: Add Gumbel-Laplace KL Divergence + + +@register_kl(Gumbel, Normal) +def _kl_gumbel_normal(p, q): + param_ratio = p.scale / q.scale + t1 = (param_ratio / math.sqrt(2 * math.pi)).log() + t2 = (math.pi * param_ratio * 0.5).pow(2) / 3 + t3 = ((p.loc + p.scale * _euler_gamma - q.loc) / q.scale).pow(2) * 0.5 + return -t1 + t2 + t3 - (_euler_gamma + 1) + + +@register_kl(Laplace, Beta) +@register_kl(Laplace, ContinuousBernoulli) +@register_kl(Laplace, Exponential) +@register_kl(Laplace, Gamma) +@register_kl(Laplace, Pareto) +@register_kl(Laplace, Uniform) +def _kl_laplace_infinity(p, q): + return _infinite_like(p.loc) + + +@register_kl(Laplace, Normal) +def _kl_laplace_normal(p, q): + var_normal = q.scale.pow(2) + scale_sqr_var_ratio = p.scale.pow(2) / var_normal + t1 = 0.5 * torch.log(2 * scale_sqr_var_ratio / math.pi) + t2 = 0.5 * p.loc.pow(2) + t3 = p.loc * q.loc + t4 = 0.5 * q.loc.pow(2) + return -t1 + scale_sqr_var_ratio + (t2 - t3 + t4) / var_normal - 1 + + +@register_kl(Normal, Beta) +@register_kl(Normal, ContinuousBernoulli) +@register_kl(Normal, Exponential) +@register_kl(Normal, Gamma) +@register_kl(Normal, Pareto) +@register_kl(Normal, Uniform) +def _kl_normal_infinity(p, q): + return _infinite_like(p.loc) + + +@register_kl(Normal, Gumbel) +def _kl_normal_gumbel(p, q): + mean_scale_ratio = p.loc / q.scale + var_scale_sqr_ratio = (p.scale / q.scale).pow(2) + loc_scale_ratio = q.loc / q.scale + t1 = var_scale_sqr_ratio.log() * 0.5 + t2 = mean_scale_ratio - loc_scale_ratio + t3 = torch.exp(-mean_scale_ratio + 0.5 * var_scale_sqr_ratio + loc_scale_ratio) + return -t1 + t2 + t3 - (0.5 * (1 + math.log(2 * math.pi))) + + +@register_kl(Normal, Laplace) +def _kl_normal_laplace(p, q): + loc_diff = p.loc - q.loc + scale_ratio = p.scale / q.scale + loc_diff_scale_ratio = loc_diff / p.scale + t1 = torch.log(scale_ratio) + t2 = ( + math.sqrt(2 / math.pi) * p.scale * torch.exp(-0.5 * loc_diff_scale_ratio.pow(2)) + ) + t3 = loc_diff * torch.erf(math.sqrt(0.5) * loc_diff_scale_ratio) + return -t1 + (t2 + t3) / q.scale - (0.5 * (1 + math.log(0.5 * math.pi))) + + +@register_kl(Pareto, Beta) +@register_kl(Pareto, ContinuousBernoulli) +@register_kl(Pareto, Uniform) +def _kl_pareto_infinity(p, q): + return _infinite_like(p.scale) + + +@register_kl(Pareto, Exponential) +def _kl_pareto_exponential(p, q): + scale_rate_prod = p.scale * q.rate + t1 = (p.alpha / scale_rate_prod).log() + t2 = p.alpha.reciprocal() + t3 = p.alpha * scale_rate_prod / (p.alpha - 1) + result = t1 - t2 + t3 - 1 + result[p.alpha <= 1] = inf + return result + + +@register_kl(Pareto, Gamma) +def _kl_pareto_gamma(p, q): + common_term = p.scale.log() + p.alpha.reciprocal() + t1 = p.alpha.log() - common_term + t2 = q.concentration.lgamma() - q.concentration * q.rate.log() + t3 = (1 - q.concentration) * common_term + t4 = q.rate * p.alpha * p.scale / (p.alpha - 1) + result = t1 + t2 + t3 + t4 - 1 + result[p.alpha <= 1] = inf + return result + + +# TODO: Add Pareto-Laplace KL Divergence + + +@register_kl(Pareto, Normal) +def _kl_pareto_normal(p, q): + var_normal = 2 * q.scale.pow(2) + common_term = p.scale / (p.alpha - 1) + t1 = (math.sqrt(2 * math.pi) * q.scale * p.alpha / p.scale).log() + t2 = p.alpha.reciprocal() + t3 = p.alpha * common_term.pow(2) / (p.alpha - 2) + t4 = (p.alpha * common_term - q.loc).pow(2) + result = t1 - t2 + (t3 + t4) / var_normal - 1 + result[p.alpha <= 2] = inf + return result + + +@register_kl(Poisson, Bernoulli) +@register_kl(Poisson, Binomial) +def _kl_poisson_infinity(p, q): + return _infinite_like(p.rate) + + +@register_kl(Uniform, Beta) +def _kl_uniform_beta(p, q): + common_term = p.high - p.low + t1 = torch.log(common_term) + t2 = ( + (q.concentration1 - 1) + * (_x_log_x(p.high) - _x_log_x(p.low) - common_term) + / common_term + ) + t3 = ( + (q.concentration0 - 1) + * (_x_log_x(1 - p.high) - _x_log_x(1 - p.low) + common_term) + / common_term + ) + t4 = ( + q.concentration1.lgamma() + + q.concentration0.lgamma() + - (q.concentration1 + q.concentration0).lgamma() + ) + result = t3 + t4 - t1 - t2 + result[(p.high > q.support.upper_bound) | (p.low < q.support.lower_bound)] = inf + return result + + +@register_kl(Uniform, ContinuousBernoulli) +def _kl_uniform_continuous_bernoulli(p, q): + result = ( + -p.entropy() + - p.mean * q.logits + - torch.log1p(-q.probs) + - q._cont_bern_log_norm() + ) + return torch.where( + torch.max( + torch.ge(p.high, q.support.upper_bound), + torch.le(p.low, q.support.lower_bound), + ), + torch.ones_like(result) * inf, + result, + ) + + +@register_kl(Uniform, Exponential) +def _kl_uniform_exponetial(p, q): + result = q.rate * (p.high + p.low) / 2 - ((p.high - p.low) * q.rate).log() + result[p.low < q.support.lower_bound] = inf + return result + + +@register_kl(Uniform, Gamma) +def _kl_uniform_gamma(p, q): + common_term = p.high - p.low + t1 = common_term.log() + t2 = q.concentration.lgamma() - q.concentration * q.rate.log() + t3 = ( + (1 - q.concentration) + * (_x_log_x(p.high) - _x_log_x(p.low) - common_term) + / common_term + ) + t4 = q.rate * (p.high + p.low) / 2 + result = -t1 + t2 + t3 + t4 + result[p.low < q.support.lower_bound] = inf + return result + + +@register_kl(Uniform, Gumbel) +def _kl_uniform_gumbel(p, q): + common_term = q.scale / (p.high - p.low) + high_loc_diff = (p.high - q.loc) / q.scale + low_loc_diff = (p.low - q.loc) / q.scale + t1 = common_term.log() + 0.5 * (high_loc_diff + low_loc_diff) + t2 = common_term * (torch.exp(-high_loc_diff) - torch.exp(-low_loc_diff)) + return t1 - t2 + + +# TODO: Uniform-Laplace KL Divergence + + +@register_kl(Uniform, Normal) +def _kl_uniform_normal(p, q): + common_term = p.high - p.low + t1 = (math.sqrt(math.pi * 2) * q.scale / common_term).log() + t2 = (common_term).pow(2) / 12 + t3 = ((p.high + p.low - 2 * q.loc) / 2).pow(2) + return t1 + 0.5 * (t2 + t3) / q.scale.pow(2) + + +@register_kl(Uniform, Pareto) +def _kl_uniform_pareto(p, q): + support_uniform = p.high - p.low + t1 = (q.alpha * q.scale.pow(q.alpha) * (support_uniform)).log() + t2 = (_x_log_x(p.high) - _x_log_x(p.low) - support_uniform) / support_uniform + result = t2 * (q.alpha + 1) - t1 + result[p.low < q.support.lower_bound] = inf + return result + + +@register_kl(Independent, Independent) +def _kl_independent_independent(p, q): + if p.reinterpreted_batch_ndims != q.reinterpreted_batch_ndims: + raise NotImplementedError + result = kl_divergence(p.base_dist, q.base_dist) + return _sum_rightmost(result, p.reinterpreted_batch_ndims) + + +@register_kl(Cauchy, Cauchy) +def _kl_cauchy_cauchy(p, q): + # From https://arxiv.org/abs/1905.10965 + t1 = ((p.scale + q.scale).pow(2) + (p.loc - q.loc).pow(2)).log() + t2 = (4 * p.scale * q.scale).log() + return t1 - t2 + + +def _add_kl_info(): + """Appends a list of implemented KL functions to the doc for kl_divergence.""" + rows = [ + "KL divergence is currently implemented for the following distribution pairs:" + ] + for p, q in sorted( + _KL_REGISTRY, key=lambda p_q: (p_q[0].__name__, p_q[1].__name__) + ): + rows.append( + f"* :class:`~torch.distributions.{p.__name__}` and :class:`~torch.distributions.{q.__name__}`" + ) + kl_info = "\n\t".join(rows) + if kl_divergence.__doc__: + kl_divergence.__doc__ += kl_info # type: ignore[operator] diff --git a/venv/lib/python3.10/site-packages/torch/distributions/kumaraswamy.py b/venv/lib/python3.10/site-packages/torch/distributions/kumaraswamy.py new file mode 100644 index 0000000000000000000000000000000000000000..9de3c422dc4c5a8ffa2a90dc61fd1439adcc60a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/kumaraswamy.py @@ -0,0 +1,97 @@ +import torch +from torch import nan +from torch.distributions import constraints +from torch.distributions.transformed_distribution import TransformedDistribution +from torch.distributions.transforms import AffineTransform, PowerTransform +from torch.distributions.uniform import Uniform +from torch.distributions.utils import broadcast_all, euler_constant + +__all__ = ["Kumaraswamy"] + + +def _moments(a, b, n): + """ + Computes nth moment of Kumaraswamy using using torch.lgamma + """ + arg1 = 1 + n / a + log_value = torch.lgamma(arg1) + torch.lgamma(b) - torch.lgamma(arg1 + b) + return b * torch.exp(log_value) + + +class Kumaraswamy(TransformedDistribution): + r""" + Samples from a Kumaraswamy distribution. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Kumaraswamy(torch.tensor([1.0]), torch.tensor([1.0])) + >>> m.sample() # sample from a Kumaraswamy distribution with concentration alpha=1 and beta=1 + tensor([ 0.1729]) + + Args: + concentration1 (float or Tensor): 1st concentration parameter of the distribution + (often referred to as alpha) + concentration0 (float or Tensor): 2nd concentration parameter of the distribution + (often referred to as beta) + """ + arg_constraints = { + "concentration1": constraints.positive, + "concentration0": constraints.positive, + } + support = constraints.unit_interval + has_rsample = True + + def __init__(self, concentration1, concentration0, validate_args=None): + self.concentration1, self.concentration0 = broadcast_all( + concentration1, concentration0 + ) + finfo = torch.finfo(self.concentration0.dtype) + base_dist = Uniform( + torch.full_like(self.concentration0, 0), + torch.full_like(self.concentration0, 1), + validate_args=validate_args, + ) + transforms = [ + PowerTransform(exponent=self.concentration0.reciprocal()), + AffineTransform(loc=1.0, scale=-1.0), + PowerTransform(exponent=self.concentration1.reciprocal()), + ] + super().__init__(base_dist, transforms, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Kumaraswamy, _instance) + new.concentration1 = self.concentration1.expand(batch_shape) + new.concentration0 = self.concentration0.expand(batch_shape) + return super().expand(batch_shape, _instance=new) + + @property + def mean(self): + return _moments(self.concentration1, self.concentration0, 1) + + @property + def mode(self): + # Evaluate in log-space for numerical stability. + log_mode = ( + self.concentration0.reciprocal() * (-self.concentration0).log1p() + - (-self.concentration0 * self.concentration1).log1p() + ) + log_mode[(self.concentration0 < 1) | (self.concentration1 < 1)] = nan + return log_mode.exp() + + @property + def variance(self): + return _moments(self.concentration1, self.concentration0, 2) - torch.pow( + self.mean, 2 + ) + + def entropy(self): + t1 = 1 - self.concentration1.reciprocal() + t0 = 1 - self.concentration0.reciprocal() + H0 = torch.digamma(self.concentration0 + 1) + euler_constant + return ( + t0 + + t1 * H0 + - torch.log(self.concentration1) + - torch.log(self.concentration0) + ) diff --git a/venv/lib/python3.10/site-packages/torch/distributions/logistic_normal.py b/venv/lib/python3.10/site-packages/torch/distributions/logistic_normal.py new file mode 100644 index 0000000000000000000000000000000000000000..a9ef4dd265642ae4cdf49ed046f22a5a4a20119f --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/logistic_normal.py @@ -0,0 +1,54 @@ +from torch.distributions import constraints +from torch.distributions.normal import Normal +from torch.distributions.transformed_distribution import TransformedDistribution +from torch.distributions.transforms import StickBreakingTransform + +__all__ = ["LogisticNormal"] + + +class LogisticNormal(TransformedDistribution): + r""" + Creates a logistic-normal distribution parameterized by :attr:`loc` and :attr:`scale` + that define the base `Normal` distribution transformed with the + `StickBreakingTransform` such that:: + + X ~ LogisticNormal(loc, scale) + Y = log(X / (1 - X.cumsum(-1)))[..., :-1] ~ Normal(loc, scale) + + Args: + loc (float or Tensor): mean of the base distribution + scale (float or Tensor): standard deviation of the base distribution + + Example:: + + >>> # logistic-normal distributed with mean=(0, 0, 0) and stddev=(1, 1, 1) + >>> # of the base Normal distribution + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = LogisticNormal(torch.tensor([0.0] * 3), torch.tensor([1.0] * 3)) + >>> m.sample() + tensor([ 0.7653, 0.0341, 0.0579, 0.1427]) + + """ + arg_constraints = {"loc": constraints.real, "scale": constraints.positive} + support = constraints.simplex + has_rsample = True + + def __init__(self, loc, scale, validate_args=None): + base_dist = Normal(loc, scale, validate_args=validate_args) + if not base_dist.batch_shape: + base_dist = base_dist.expand([1]) + super().__init__( + base_dist, StickBreakingTransform(), validate_args=validate_args + ) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(LogisticNormal, _instance) + return super().expand(batch_shape, _instance=new) + + @property + def loc(self): + return self.base_dist.base_dist.loc + + @property + def scale(self): + return self.base_dist.base_dist.scale diff --git a/venv/lib/python3.10/site-packages/torch/distributions/mixture_same_family.py b/venv/lib/python3.10/site-packages/torch/distributions/mixture_same_family.py new file mode 100644 index 0000000000000000000000000000000000000000..8db242e33253ac743d0643bae1474620266bffe9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/mixture_same_family.py @@ -0,0 +1,214 @@ +from typing import Dict + +import torch +from torch.distributions import Categorical, constraints +from torch.distributions.distribution import Distribution + +__all__ = ["MixtureSameFamily"] + + +class MixtureSameFamily(Distribution): + r""" + The `MixtureSameFamily` distribution implements a (batch of) mixture + distribution where all component are from different parameterizations of + the same distribution type. It is parameterized by a `Categorical` + "selecting distribution" (over `k` component) and a component + distribution, i.e., a `Distribution` with a rightmost batch shape + (equal to `[k]`) which indexes each (batch of) component. + + Examples:: + + >>> # xdoctest: +SKIP("undefined vars") + >>> # Construct Gaussian Mixture Model in 1D consisting of 5 equally + >>> # weighted normal distributions + >>> mix = D.Categorical(torch.ones(5,)) + >>> comp = D.Normal(torch.randn(5,), torch.rand(5,)) + >>> gmm = MixtureSameFamily(mix, comp) + + >>> # Construct Gaussian Mixture Model in 2D consisting of 5 equally + >>> # weighted bivariate normal distributions + >>> mix = D.Categorical(torch.ones(5,)) + >>> comp = D.Independent(D.Normal( + ... torch.randn(5,2), torch.rand(5,2)), 1) + >>> gmm = MixtureSameFamily(mix, comp) + + >>> # Construct a batch of 3 Gaussian Mixture Models in 2D each + >>> # consisting of 5 random weighted bivariate normal distributions + >>> mix = D.Categorical(torch.rand(3,5)) + >>> comp = D.Independent(D.Normal( + ... torch.randn(3,5,2), torch.rand(3,5,2)), 1) + >>> gmm = MixtureSameFamily(mix, comp) + + Args: + mixture_distribution: `torch.distributions.Categorical`-like + instance. Manages the probability of selecting component. + The number of categories must match the rightmost batch + dimension of the `component_distribution`. Must have either + scalar `batch_shape` or `batch_shape` matching + `component_distribution.batch_shape[:-1]` + component_distribution: `torch.distributions.Distribution`-like + instance. Right-most batch dimension indexes component. + """ + arg_constraints: Dict[str, constraints.Constraint] = {} + has_rsample = False + + def __init__( + self, mixture_distribution, component_distribution, validate_args=None + ): + self._mixture_distribution = mixture_distribution + self._component_distribution = component_distribution + + if not isinstance(self._mixture_distribution, Categorical): + raise ValueError( + " The Mixture distribution needs to be an " + " instance of torch.distributions.Categorical" + ) + + if not isinstance(self._component_distribution, Distribution): + raise ValueError( + "The Component distribution need to be an " + "instance of torch.distributions.Distribution" + ) + + # Check that batch size matches + mdbs = self._mixture_distribution.batch_shape + cdbs = self._component_distribution.batch_shape[:-1] + for size1, size2 in zip(reversed(mdbs), reversed(cdbs)): + if size1 != 1 and size2 != 1 and size1 != size2: + raise ValueError( + f"`mixture_distribution.batch_shape` ({mdbs}) is not " + "compatible with `component_distribution." + f"batch_shape`({cdbs})" + ) + + # Check that the number of mixture component matches + km = self._mixture_distribution.logits.shape[-1] + kc = self._component_distribution.batch_shape[-1] + if km is not None and kc is not None and km != kc: + raise ValueError( + f"`mixture_distribution component` ({km}) does not" + " equal `component_distribution.batch_shape[-1]`" + f" ({kc})" + ) + self._num_component = km + + event_shape = self._component_distribution.event_shape + self._event_ndims = len(event_shape) + super().__init__( + batch_shape=cdbs, event_shape=event_shape, validate_args=validate_args + ) + + def expand(self, batch_shape, _instance=None): + batch_shape = torch.Size(batch_shape) + batch_shape_comp = batch_shape + (self._num_component,) + new = self._get_checked_instance(MixtureSameFamily, _instance) + new._component_distribution = self._component_distribution.expand( + batch_shape_comp + ) + new._mixture_distribution = self._mixture_distribution.expand(batch_shape) + new._num_component = self._num_component + new._event_ndims = self._event_ndims + event_shape = new._component_distribution.event_shape + super(MixtureSameFamily, new).__init__( + batch_shape=batch_shape, event_shape=event_shape, validate_args=False + ) + new._validate_args = self._validate_args + return new + + @constraints.dependent_property + def support(self): + # FIXME this may have the wrong shape when support contains batched + # parameters + return self._component_distribution.support + + @property + def mixture_distribution(self): + return self._mixture_distribution + + @property + def component_distribution(self): + return self._component_distribution + + @property + def mean(self): + probs = self._pad_mixture_dimensions(self.mixture_distribution.probs) + return torch.sum( + probs * self.component_distribution.mean, dim=-1 - self._event_ndims + ) # [B, E] + + @property + def variance(self): + # Law of total variance: Var(Y) = E[Var(Y|X)] + Var(E[Y|X]) + probs = self._pad_mixture_dimensions(self.mixture_distribution.probs) + mean_cond_var = torch.sum( + probs * self.component_distribution.variance, dim=-1 - self._event_ndims + ) + var_cond_mean = torch.sum( + probs * (self.component_distribution.mean - self._pad(self.mean)).pow(2.0), + dim=-1 - self._event_ndims, + ) + return mean_cond_var + var_cond_mean + + def cdf(self, x): + x = self._pad(x) + cdf_x = self.component_distribution.cdf(x) + mix_prob = self.mixture_distribution.probs + + return torch.sum(cdf_x * mix_prob, dim=-1) + + def log_prob(self, x): + if self._validate_args: + self._validate_sample(x) + x = self._pad(x) + log_prob_x = self.component_distribution.log_prob(x) # [S, B, k] + log_mix_prob = torch.log_softmax( + self.mixture_distribution.logits, dim=-1 + ) # [B, k] + return torch.logsumexp(log_prob_x + log_mix_prob, dim=-1) # [S, B] + + def sample(self, sample_shape=torch.Size()): + with torch.no_grad(): + sample_len = len(sample_shape) + batch_len = len(self.batch_shape) + gather_dim = sample_len + batch_len + es = self.event_shape + + # mixture samples [n, B] + mix_sample = self.mixture_distribution.sample(sample_shape) + mix_shape = mix_sample.shape + + # component samples [n, B, k, E] + comp_samples = self.component_distribution.sample(sample_shape) + + # Gather along the k dimension + mix_sample_r = mix_sample.reshape( + mix_shape + torch.Size([1] * (len(es) + 1)) + ) + mix_sample_r = mix_sample_r.repeat( + torch.Size([1] * len(mix_shape)) + torch.Size([1]) + es + ) + + samples = torch.gather(comp_samples, gather_dim, mix_sample_r) + return samples.squeeze(gather_dim) + + def _pad(self, x): + return x.unsqueeze(-1 - self._event_ndims) + + def _pad_mixture_dimensions(self, x): + dist_batch_ndims = len(self.batch_shape) + cat_batch_ndims = len(self.mixture_distribution.batch_shape) + pad_ndims = 0 if cat_batch_ndims == 1 else dist_batch_ndims - cat_batch_ndims + xs = x.shape + x = x.reshape( + xs[:-1] + + torch.Size(pad_ndims * [1]) + + xs[-1:] + + torch.Size(self._event_ndims * [1]) + ) + return x + + def __repr__(self): + args_string = ( + f"\n {self.mixture_distribution},\n {self.component_distribution}" + ) + return "MixtureSameFamily" + "(" + args_string + ")" diff --git a/venv/lib/python3.10/site-packages/torch/distributions/multivariate_normal.py b/venv/lib/python3.10/site-packages/torch/distributions/multivariate_normal.py new file mode 100644 index 0000000000000000000000000000000000000000..2784eeb214d5c59e1e3aa3ac21c7059f032f16de --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/multivariate_normal.py @@ -0,0 +1,262 @@ +import math + +import torch +from torch.distributions import constraints +from torch.distributions.distribution import Distribution +from torch.distributions.utils import _standard_normal, lazy_property + +__all__ = ["MultivariateNormal"] + + +def _batch_mv(bmat, bvec): + r""" + Performs a batched matrix-vector product, with compatible but different batch shapes. + + This function takes as input `bmat`, containing :math:`n \times n` matrices, and + `bvec`, containing length :math:`n` vectors. + + Both `bmat` and `bvec` may have any number of leading dimensions, which correspond + to a batch shape. They are not necessarily assumed to have the same batch shape, + just ones which can be broadcasted. + """ + return torch.matmul(bmat, bvec.unsqueeze(-1)).squeeze(-1) + + +def _batch_mahalanobis(bL, bx): + r""" + Computes the squared Mahalanobis distance :math:`\mathbf{x}^\top\mathbf{M}^{-1}\mathbf{x}` + for a factored :math:`\mathbf{M} = \mathbf{L}\mathbf{L}^\top`. + + Accepts batches for both bL and bx. They are not necessarily assumed to have the same batch + shape, but `bL` one should be able to broadcasted to `bx` one. + """ + n = bx.size(-1) + bx_batch_shape = bx.shape[:-1] + + # Assume that bL.shape = (i, 1, n, n), bx.shape = (..., i, j, n), + # we are going to make bx have shape (..., 1, j, i, 1, n) to apply batched tri.solve + bx_batch_dims = len(bx_batch_shape) + bL_batch_dims = bL.dim() - 2 + outer_batch_dims = bx_batch_dims - bL_batch_dims + old_batch_dims = outer_batch_dims + bL_batch_dims + new_batch_dims = outer_batch_dims + 2 * bL_batch_dims + # Reshape bx with the shape (..., 1, i, j, 1, n) + bx_new_shape = bx.shape[:outer_batch_dims] + for sL, sx in zip(bL.shape[:-2], bx.shape[outer_batch_dims:-1]): + bx_new_shape += (sx // sL, sL) + bx_new_shape += (n,) + bx = bx.reshape(bx_new_shape) + # Permute bx to make it have shape (..., 1, j, i, 1, n) + permute_dims = ( + list(range(outer_batch_dims)) + + list(range(outer_batch_dims, new_batch_dims, 2)) + + list(range(outer_batch_dims + 1, new_batch_dims, 2)) + + [new_batch_dims] + ) + bx = bx.permute(permute_dims) + + flat_L = bL.reshape(-1, n, n) # shape = b x n x n + flat_x = bx.reshape(-1, flat_L.size(0), n) # shape = c x b x n + flat_x_swap = flat_x.permute(1, 2, 0) # shape = b x n x c + M_swap = ( + torch.linalg.solve_triangular(flat_L, flat_x_swap, upper=False).pow(2).sum(-2) + ) # shape = b x c + M = M_swap.t() # shape = c x b + + # Now we revert the above reshape and permute operators. + permuted_M = M.reshape(bx.shape[:-1]) # shape = (..., 1, j, i, 1) + permute_inv_dims = list(range(outer_batch_dims)) + for i in range(bL_batch_dims): + permute_inv_dims += [outer_batch_dims + i, old_batch_dims + i] + reshaped_M = permuted_M.permute(permute_inv_dims) # shape = (..., 1, i, j, 1) + return reshaped_M.reshape(bx_batch_shape) + + +def _precision_to_scale_tril(P): + # Ref: https://nbviewer.jupyter.org/gist/fehiepsi/5ef8e09e61604f10607380467eb82006#Precision-to-scale_tril + Lf = torch.linalg.cholesky(torch.flip(P, (-2, -1))) + L_inv = torch.transpose(torch.flip(Lf, (-2, -1)), -2, -1) + Id = torch.eye(P.shape[-1], dtype=P.dtype, device=P.device) + L = torch.linalg.solve_triangular(L_inv, Id, upper=False) + return L + + +class MultivariateNormal(Distribution): + r""" + Creates a multivariate normal (also called Gaussian) distribution + parameterized by a mean vector and a covariance matrix. + + The multivariate normal distribution can be parameterized either + in terms of a positive definite covariance matrix :math:`\mathbf{\Sigma}` + or a positive definite precision matrix :math:`\mathbf{\Sigma}^{-1}` + or a lower-triangular matrix :math:`\mathbf{L}` with positive-valued + diagonal entries, such that + :math:`\mathbf{\Sigma} = \mathbf{L}\mathbf{L}^\top`. This triangular matrix + can be obtained via e.g. Cholesky decomposition of the covariance. + + Example: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = MultivariateNormal(torch.zeros(2), torch.eye(2)) + >>> m.sample() # normally distributed with mean=`[0,0]` and covariance_matrix=`I` + tensor([-0.2102, -0.5429]) + + Args: + loc (Tensor): mean of the distribution + covariance_matrix (Tensor): positive-definite covariance matrix + precision_matrix (Tensor): positive-definite precision matrix + scale_tril (Tensor): lower-triangular factor of covariance, with positive-valued diagonal + + Note: + Only one of :attr:`covariance_matrix` or :attr:`precision_matrix` or + :attr:`scale_tril` can be specified. + + Using :attr:`scale_tril` will be more efficient: all computations internally + are based on :attr:`scale_tril`. If :attr:`covariance_matrix` or + :attr:`precision_matrix` is passed instead, it is only used to compute + the corresponding lower triangular matrices using a Cholesky decomposition. + """ + arg_constraints = { + "loc": constraints.real_vector, + "covariance_matrix": constraints.positive_definite, + "precision_matrix": constraints.positive_definite, + "scale_tril": constraints.lower_cholesky, + } + support = constraints.real_vector + has_rsample = True + + def __init__( + self, + loc, + covariance_matrix=None, + precision_matrix=None, + scale_tril=None, + validate_args=None, + ): + if loc.dim() < 1: + raise ValueError("loc must be at least one-dimensional.") + if (covariance_matrix is not None) + (scale_tril is not None) + ( + precision_matrix is not None + ) != 1: + raise ValueError( + "Exactly one of covariance_matrix or precision_matrix or scale_tril may be specified." + ) + + if scale_tril is not None: + if scale_tril.dim() < 2: + raise ValueError( + "scale_tril matrix must be at least two-dimensional, " + "with optional leading batch dimensions" + ) + batch_shape = torch.broadcast_shapes(scale_tril.shape[:-2], loc.shape[:-1]) + self.scale_tril = scale_tril.expand(batch_shape + (-1, -1)) + elif covariance_matrix is not None: + if covariance_matrix.dim() < 2: + raise ValueError( + "covariance_matrix must be at least two-dimensional, " + "with optional leading batch dimensions" + ) + batch_shape = torch.broadcast_shapes( + covariance_matrix.shape[:-2], loc.shape[:-1] + ) + self.covariance_matrix = covariance_matrix.expand(batch_shape + (-1, -1)) + else: + if precision_matrix.dim() < 2: + raise ValueError( + "precision_matrix must be at least two-dimensional, " + "with optional leading batch dimensions" + ) + batch_shape = torch.broadcast_shapes( + precision_matrix.shape[:-2], loc.shape[:-1] + ) + self.precision_matrix = precision_matrix.expand(batch_shape + (-1, -1)) + self.loc = loc.expand(batch_shape + (-1,)) + + event_shape = self.loc.shape[-1:] + super().__init__(batch_shape, event_shape, validate_args=validate_args) + + if scale_tril is not None: + self._unbroadcasted_scale_tril = scale_tril + elif covariance_matrix is not None: + self._unbroadcasted_scale_tril = torch.linalg.cholesky(covariance_matrix) + else: # precision_matrix is not None + self._unbroadcasted_scale_tril = _precision_to_scale_tril(precision_matrix) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(MultivariateNormal, _instance) + batch_shape = torch.Size(batch_shape) + loc_shape = batch_shape + self.event_shape + cov_shape = batch_shape + self.event_shape + self.event_shape + new.loc = self.loc.expand(loc_shape) + new._unbroadcasted_scale_tril = self._unbroadcasted_scale_tril + if "covariance_matrix" in self.__dict__: + new.covariance_matrix = self.covariance_matrix.expand(cov_shape) + if "scale_tril" in self.__dict__: + new.scale_tril = self.scale_tril.expand(cov_shape) + if "precision_matrix" in self.__dict__: + new.precision_matrix = self.precision_matrix.expand(cov_shape) + super(MultivariateNormal, new).__init__( + batch_shape, self.event_shape, validate_args=False + ) + new._validate_args = self._validate_args + return new + + @lazy_property + def scale_tril(self): + return self._unbroadcasted_scale_tril.expand( + self._batch_shape + self._event_shape + self._event_shape + ) + + @lazy_property + def covariance_matrix(self): + return torch.matmul( + self._unbroadcasted_scale_tril, self._unbroadcasted_scale_tril.mT + ).expand(self._batch_shape + self._event_shape + self._event_shape) + + @lazy_property + def precision_matrix(self): + return torch.cholesky_inverse(self._unbroadcasted_scale_tril).expand( + self._batch_shape + self._event_shape + self._event_shape + ) + + @property + def mean(self): + return self.loc + + @property + def mode(self): + return self.loc + + @property + def variance(self): + return ( + self._unbroadcasted_scale_tril.pow(2) + .sum(-1) + .expand(self._batch_shape + self._event_shape) + ) + + def rsample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + eps = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device) + return self.loc + _batch_mv(self._unbroadcasted_scale_tril, eps) + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + diff = value - self.loc + M = _batch_mahalanobis(self._unbroadcasted_scale_tril, diff) + half_log_det = ( + self._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) + ) + return -0.5 * (self._event_shape[0] * math.log(2 * math.pi) + M) - half_log_det + + def entropy(self): + half_log_det = ( + self._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) + ) + H = 0.5 * self._event_shape[0] * (1.0 + math.log(2 * math.pi)) + half_log_det + if len(self._batch_shape) == 0: + return H + else: + return H.expand(self._batch_shape) diff --git a/venv/lib/python3.10/site-packages/torch/distributions/negative_binomial.py b/venv/lib/python3.10/site-packages/torch/distributions/negative_binomial.py new file mode 100644 index 0000000000000000000000000000000000000000..59edee589f9ae03f87eb38672745f5cb8fd0bcb5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/negative_binomial.py @@ -0,0 +1,133 @@ +import torch +import torch.nn.functional as F +from torch.distributions import constraints +from torch.distributions.distribution import Distribution +from torch.distributions.utils import ( + broadcast_all, + lazy_property, + logits_to_probs, + probs_to_logits, +) + +__all__ = ["NegativeBinomial"] + + +class NegativeBinomial(Distribution): + r""" + Creates a Negative Binomial distribution, i.e. distribution + of the number of successful independent and identical Bernoulli trials + before :attr:`total_count` failures are achieved. The probability + of success of each Bernoulli trial is :attr:`probs`. + + Args: + total_count (float or Tensor): non-negative number of negative Bernoulli + trials to stop, although the distribution is still valid for real + valued count + probs (Tensor): Event probabilities of success in the half open interval [0, 1) + logits (Tensor): Event log-odds for probabilities of success + """ + arg_constraints = { + "total_count": constraints.greater_than_eq(0), + "probs": constraints.half_open_interval(0.0, 1.0), + "logits": constraints.real, + } + support = constraints.nonnegative_integer + + def __init__(self, total_count, probs=None, logits=None, validate_args=None): + if (probs is None) == (logits is None): + raise ValueError( + "Either `probs` or `logits` must be specified, but not both." + ) + if probs is not None: + ( + self.total_count, + self.probs, + ) = broadcast_all(total_count, probs) + self.total_count = self.total_count.type_as(self.probs) + else: + ( + self.total_count, + self.logits, + ) = broadcast_all(total_count, logits) + self.total_count = self.total_count.type_as(self.logits) + + self._param = self.probs if probs is not None else self.logits + batch_shape = self._param.size() + super().__init__(batch_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(NegativeBinomial, _instance) + batch_shape = torch.Size(batch_shape) + new.total_count = self.total_count.expand(batch_shape) + if "probs" in self.__dict__: + new.probs = self.probs.expand(batch_shape) + new._param = new.probs + if "logits" in self.__dict__: + new.logits = self.logits.expand(batch_shape) + new._param = new.logits + super(NegativeBinomial, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + def _new(self, *args, **kwargs): + return self._param.new(*args, **kwargs) + + @property + def mean(self): + return self.total_count * torch.exp(self.logits) + + @property + def mode(self): + return ((self.total_count - 1) * self.logits.exp()).floor().clamp(min=0.0) + + @property + def variance(self): + return self.mean / torch.sigmoid(-self.logits) + + @lazy_property + def logits(self): + return probs_to_logits(self.probs, is_binary=True) + + @lazy_property + def probs(self): + return logits_to_probs(self.logits, is_binary=True) + + @property + def param_shape(self): + return self._param.size() + + @lazy_property + def _gamma(self): + # Note we avoid validating because self.total_count can be zero. + return torch.distributions.Gamma( + concentration=self.total_count, + rate=torch.exp(-self.logits), + validate_args=False, + ) + + def sample(self, sample_shape=torch.Size()): + with torch.no_grad(): + rate = self._gamma.sample(sample_shape=sample_shape) + return torch.poisson(rate) + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + + log_unnormalized_prob = self.total_count * F.logsigmoid( + -self.logits + ) + value * F.logsigmoid(self.logits) + + log_normalization = ( + -torch.lgamma(self.total_count + value) + + torch.lgamma(1.0 + value) + + torch.lgamma(self.total_count) + ) + # The case self.total_count == 0 and value == 0 has probability 1 but + # lgamma(0) is infinite. Handle this case separately using a function + # that does not modify tensors in place to allow Jit compilation. + log_normalization = log_normalization.masked_fill( + self.total_count + value == 0.0, 0.0 + ) + + return log_unnormalized_prob - log_normalization diff --git a/venv/lib/python3.10/site-packages/torch/distributions/normal.py b/venv/lib/python3.10/site-packages/torch/distributions/normal.py new file mode 100644 index 0000000000000000000000000000000000000000..3364474ba68f401059eb545dddac186352051787 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/normal.py @@ -0,0 +1,109 @@ +import math +from numbers import Number, Real + +import torch +from torch.distributions import constraints +from torch.distributions.exp_family import ExponentialFamily +from torch.distributions.utils import _standard_normal, broadcast_all + +__all__ = ["Normal"] + + +class Normal(ExponentialFamily): + r""" + Creates a normal (also called Gaussian) distribution parameterized by + :attr:`loc` and :attr:`scale`. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Normal(torch.tensor([0.0]), torch.tensor([1.0])) + >>> m.sample() # normally distributed with loc=0 and scale=1 + tensor([ 0.1046]) + + Args: + loc (float or Tensor): mean of the distribution (often referred to as mu) + scale (float or Tensor): standard deviation of the distribution + (often referred to as sigma) + """ + arg_constraints = {"loc": constraints.real, "scale": constraints.positive} + support = constraints.real + has_rsample = True + _mean_carrier_measure = 0 + + @property + def mean(self): + return self.loc + + @property + def mode(self): + return self.loc + + @property + def stddev(self): + return self.scale + + @property + def variance(self): + return self.stddev.pow(2) + + def __init__(self, loc, scale, validate_args=None): + self.loc, self.scale = broadcast_all(loc, scale) + if isinstance(loc, Number) and isinstance(scale, Number): + batch_shape = torch.Size() + else: + batch_shape = self.loc.size() + super().__init__(batch_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Normal, _instance) + batch_shape = torch.Size(batch_shape) + new.loc = self.loc.expand(batch_shape) + new.scale = self.scale.expand(batch_shape) + super(Normal, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + def sample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + with torch.no_grad(): + return torch.normal(self.loc.expand(shape), self.scale.expand(shape)) + + def rsample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + eps = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device) + return self.loc + eps * self.scale + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + # compute the variance + var = self.scale**2 + log_scale = ( + math.log(self.scale) if isinstance(self.scale, Real) else self.scale.log() + ) + return ( + -((value - self.loc) ** 2) / (2 * var) + - log_scale + - math.log(math.sqrt(2 * math.pi)) + ) + + def cdf(self, value): + if self._validate_args: + self._validate_sample(value) + return 0.5 * ( + 1 + torch.erf((value - self.loc) * self.scale.reciprocal() / math.sqrt(2)) + ) + + def icdf(self, value): + return self.loc + self.scale * torch.erfinv(2 * value - 1) * math.sqrt(2) + + def entropy(self): + return 0.5 + 0.5 * math.log(2 * math.pi) + torch.log(self.scale) + + @property + def _natural_params(self): + return (self.loc / self.scale.pow(2), -0.5 * self.scale.pow(2).reciprocal()) + + def _log_normalizer(self, x, y): + return -0.25 * x.pow(2) / y + 0.5 * torch.log(-math.pi / y) diff --git a/venv/lib/python3.10/site-packages/torch/distributions/one_hot_categorical.py b/venv/lib/python3.10/site-packages/torch/distributions/one_hot_categorical.py new file mode 100644 index 0000000000000000000000000000000000000000..37e62e874f5ebef13667b3a2d2bd7f66fc88f398 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/one_hot_categorical.py @@ -0,0 +1,129 @@ +import torch +from torch.distributions import constraints +from torch.distributions.categorical import Categorical +from torch.distributions.distribution import Distribution + +__all__ = ["OneHotCategorical", "OneHotCategoricalStraightThrough"] + + +class OneHotCategorical(Distribution): + r""" + Creates a one-hot categorical distribution parameterized by :attr:`probs` or + :attr:`logits`. + + Samples are one-hot coded vectors of size ``probs.size(-1)``. + + .. note:: The `probs` argument must be non-negative, finite and have a non-zero sum, + and it will be normalized to sum to 1 along the last dimension. :attr:`probs` + will return this normalized value. + The `logits` argument will be interpreted as unnormalized log probabilities + and can therefore be any real number. It will likewise be normalized so that + the resulting probabilities sum to 1 along the last dimension. :attr:`logits` + will return this normalized value. + + See also: :func:`torch.distributions.Categorical` for specifications of + :attr:`probs` and :attr:`logits`. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = OneHotCategorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ])) + >>> m.sample() # equal probability of 0, 1, 2, 3 + tensor([ 0., 0., 0., 1.]) + + Args: + probs (Tensor): event probabilities + logits (Tensor): event log probabilities (unnormalized) + """ + arg_constraints = {"probs": constraints.simplex, "logits": constraints.real_vector} + support = constraints.one_hot + has_enumerate_support = True + + def __init__(self, probs=None, logits=None, validate_args=None): + self._categorical = Categorical(probs, logits) + batch_shape = self._categorical.batch_shape + event_shape = self._categorical.param_shape[-1:] + super().__init__(batch_shape, event_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(OneHotCategorical, _instance) + batch_shape = torch.Size(batch_shape) + new._categorical = self._categorical.expand(batch_shape) + super(OneHotCategorical, new).__init__( + batch_shape, self.event_shape, validate_args=False + ) + new._validate_args = self._validate_args + return new + + def _new(self, *args, **kwargs): + return self._categorical._new(*args, **kwargs) + + @property + def _param(self): + return self._categorical._param + + @property + def probs(self): + return self._categorical.probs + + @property + def logits(self): + return self._categorical.logits + + @property + def mean(self): + return self._categorical.probs + + @property + def mode(self): + probs = self._categorical.probs + mode = probs.argmax(axis=-1) + return torch.nn.functional.one_hot(mode, num_classes=probs.shape[-1]).to(probs) + + @property + def variance(self): + return self._categorical.probs * (1 - self._categorical.probs) + + @property + def param_shape(self): + return self._categorical.param_shape + + def sample(self, sample_shape=torch.Size()): + sample_shape = torch.Size(sample_shape) + probs = self._categorical.probs + num_events = self._categorical._num_events + indices = self._categorical.sample(sample_shape) + return torch.nn.functional.one_hot(indices, num_events).to(probs) + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + indices = value.max(-1)[1] + return self._categorical.log_prob(indices) + + def entropy(self): + return self._categorical.entropy() + + def enumerate_support(self, expand=True): + n = self.event_shape[0] + values = torch.eye(n, dtype=self._param.dtype, device=self._param.device) + values = values.view((n,) + (1,) * len(self.batch_shape) + (n,)) + if expand: + values = values.expand((n,) + self.batch_shape + (n,)) + return values + + +class OneHotCategoricalStraightThrough(OneHotCategorical): + r""" + Creates a reparameterizable :class:`OneHotCategorical` distribution based on the straight- + through gradient estimator from [1]. + + [1] Estimating or Propagating Gradients Through Stochastic Neurons for Conditional Computation + (Bengio et al, 2013) + """ + has_rsample = True + + def rsample(self, sample_shape=torch.Size()): + samples = self.sample(sample_shape) + probs = self._categorical.probs # cached via @lazy_property + return samples + (probs - probs.detach()) diff --git a/venv/lib/python3.10/site-packages/torch/distributions/relaxed_categorical.py b/venv/lib/python3.10/site-packages/torch/distributions/relaxed_categorical.py new file mode 100644 index 0000000000000000000000000000000000000000..245ab87aa2a75291d4d74d4845720b0bfa8fe935 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/relaxed_categorical.py @@ -0,0 +1,139 @@ +import torch +from torch.distributions import constraints +from torch.distributions.categorical import Categorical +from torch.distributions.distribution import Distribution +from torch.distributions.transformed_distribution import TransformedDistribution +from torch.distributions.transforms import ExpTransform +from torch.distributions.utils import broadcast_all, clamp_probs + +__all__ = ["ExpRelaxedCategorical", "RelaxedOneHotCategorical"] + + +class ExpRelaxedCategorical(Distribution): + r""" + Creates a ExpRelaxedCategorical parameterized by + :attr:`temperature`, and either :attr:`probs` or :attr:`logits` (but not both). + Returns the log of a point in the simplex. Based on the interface to + :class:`OneHotCategorical`. + + Implementation based on [1]. + + See also: :func:`torch.distributions.OneHotCategorical` + + Args: + temperature (Tensor): relaxation temperature + probs (Tensor): event probabilities + logits (Tensor): unnormalized log probability for each event + + [1] The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables + (Maddison et al, 2017) + + [2] Categorical Reparametrization with Gumbel-Softmax + (Jang et al, 2017) + """ + arg_constraints = {"probs": constraints.simplex, "logits": constraints.real_vector} + support = ( + constraints.real_vector + ) # The true support is actually a submanifold of this. + has_rsample = True + + def __init__(self, temperature, probs=None, logits=None, validate_args=None): + self._categorical = Categorical(probs, logits) + self.temperature = temperature + batch_shape = self._categorical.batch_shape + event_shape = self._categorical.param_shape[-1:] + super().__init__(batch_shape, event_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(ExpRelaxedCategorical, _instance) + batch_shape = torch.Size(batch_shape) + new.temperature = self.temperature + new._categorical = self._categorical.expand(batch_shape) + super(ExpRelaxedCategorical, new).__init__( + batch_shape, self.event_shape, validate_args=False + ) + new._validate_args = self._validate_args + return new + + def _new(self, *args, **kwargs): + return self._categorical._new(*args, **kwargs) + + @property + def param_shape(self): + return self._categorical.param_shape + + @property + def logits(self): + return self._categorical.logits + + @property + def probs(self): + return self._categorical.probs + + def rsample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + uniforms = clamp_probs( + torch.rand(shape, dtype=self.logits.dtype, device=self.logits.device) + ) + gumbels = -((-(uniforms.log())).log()) + scores = (self.logits + gumbels) / self.temperature + return scores - scores.logsumexp(dim=-1, keepdim=True) + + def log_prob(self, value): + K = self._categorical._num_events + if self._validate_args: + self._validate_sample(value) + logits, value = broadcast_all(self.logits, value) + log_scale = torch.full_like( + self.temperature, float(K) + ).lgamma() - self.temperature.log().mul(-(K - 1)) + score = logits - value.mul(self.temperature) + score = (score - score.logsumexp(dim=-1, keepdim=True)).sum(-1) + return score + log_scale + + +class RelaxedOneHotCategorical(TransformedDistribution): + r""" + Creates a RelaxedOneHotCategorical distribution parametrized by + :attr:`temperature`, and either :attr:`probs` or :attr:`logits`. + This is a relaxed version of the :class:`OneHotCategorical` distribution, so + its samples are on simplex, and are reparametrizable. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = RelaxedOneHotCategorical(torch.tensor([2.2]), + ... torch.tensor([0.1, 0.2, 0.3, 0.4])) + >>> m.sample() + tensor([ 0.1294, 0.2324, 0.3859, 0.2523]) + + Args: + temperature (Tensor): relaxation temperature + probs (Tensor): event probabilities + logits (Tensor): unnormalized log probability for each event + """ + arg_constraints = {"probs": constraints.simplex, "logits": constraints.real_vector} + support = constraints.simplex + has_rsample = True + + def __init__(self, temperature, probs=None, logits=None, validate_args=None): + base_dist = ExpRelaxedCategorical( + temperature, probs, logits, validate_args=validate_args + ) + super().__init__(base_dist, ExpTransform(), validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(RelaxedOneHotCategorical, _instance) + return super().expand(batch_shape, _instance=new) + + @property + def temperature(self): + return self.base_dist.temperature + + @property + def logits(self): + return self.base_dist.logits + + @property + def probs(self): + return self.base_dist.probs diff --git a/venv/lib/python3.10/site-packages/torch/distributions/transformed_distribution.py b/venv/lib/python3.10/site-packages/torch/distributions/transformed_distribution.py new file mode 100644 index 0000000000000000000000000000000000000000..060909f38ad06580550d3b5114bbdadd742cb4f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/transformed_distribution.py @@ -0,0 +1,215 @@ +from typing import Dict + +import torch +from torch.distributions import constraints +from torch.distributions.distribution import Distribution +from torch.distributions.independent import Independent +from torch.distributions.transforms import ComposeTransform, Transform +from torch.distributions.utils import _sum_rightmost + +__all__ = ["TransformedDistribution"] + + +class TransformedDistribution(Distribution): + r""" + Extension of the Distribution class, which applies a sequence of Transforms + to a base distribution. Let f be the composition of transforms applied:: + + X ~ BaseDistribution + Y = f(X) ~ TransformedDistribution(BaseDistribution, f) + log p(Y) = log p(X) + log |det (dX/dY)| + + Note that the ``.event_shape`` of a :class:`TransformedDistribution` is the + maximum shape of its base distribution and its transforms, since transforms + can introduce correlations among events. + + An example for the usage of :class:`TransformedDistribution` would be:: + + # Building a Logistic Distribution + # X ~ Uniform(0, 1) + # f = a + b * logit(X) + # Y ~ f(X) ~ Logistic(a, b) + base_distribution = Uniform(0, 1) + transforms = [SigmoidTransform().inv, AffineTransform(loc=a, scale=b)] + logistic = TransformedDistribution(base_distribution, transforms) + + For more examples, please look at the implementations of + :class:`~torch.distributions.gumbel.Gumbel`, + :class:`~torch.distributions.half_cauchy.HalfCauchy`, + :class:`~torch.distributions.half_normal.HalfNormal`, + :class:`~torch.distributions.log_normal.LogNormal`, + :class:`~torch.distributions.pareto.Pareto`, + :class:`~torch.distributions.weibull.Weibull`, + :class:`~torch.distributions.relaxed_bernoulli.RelaxedBernoulli` and + :class:`~torch.distributions.relaxed_categorical.RelaxedOneHotCategorical` + """ + arg_constraints: Dict[str, constraints.Constraint] = {} + + def __init__(self, base_distribution, transforms, validate_args=None): + if isinstance(transforms, Transform): + self.transforms = [ + transforms, + ] + elif isinstance(transforms, list): + if not all(isinstance(t, Transform) for t in transforms): + raise ValueError( + "transforms must be a Transform or a list of Transforms" + ) + self.transforms = transforms + else: + raise ValueError( + f"transforms must be a Transform or list, but was {transforms}" + ) + + # Reshape base_distribution according to transforms. + base_shape = base_distribution.batch_shape + base_distribution.event_shape + base_event_dim = len(base_distribution.event_shape) + transform = ComposeTransform(self.transforms) + if len(base_shape) < transform.domain.event_dim: + raise ValueError( + "base_distribution needs to have shape with size at least {}, but got {}.".format( + transform.domain.event_dim, base_shape + ) + ) + forward_shape = transform.forward_shape(base_shape) + expanded_base_shape = transform.inverse_shape(forward_shape) + if base_shape != expanded_base_shape: + base_batch_shape = expanded_base_shape[ + : len(expanded_base_shape) - base_event_dim + ] + base_distribution = base_distribution.expand(base_batch_shape) + reinterpreted_batch_ndims = transform.domain.event_dim - base_event_dim + if reinterpreted_batch_ndims > 0: + base_distribution = Independent( + base_distribution, reinterpreted_batch_ndims + ) + self.base_dist = base_distribution + + # Compute shapes. + transform_change_in_event_dim = ( + transform.codomain.event_dim - transform.domain.event_dim + ) + event_dim = max( + transform.codomain.event_dim, # the transform is coupled + base_event_dim + transform_change_in_event_dim, # the base dist is coupled + ) + assert len(forward_shape) >= event_dim + cut = len(forward_shape) - event_dim + batch_shape = forward_shape[:cut] + event_shape = forward_shape[cut:] + super().__init__(batch_shape, event_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(TransformedDistribution, _instance) + batch_shape = torch.Size(batch_shape) + shape = batch_shape + self.event_shape + for t in reversed(self.transforms): + shape = t.inverse_shape(shape) + base_batch_shape = shape[: len(shape) - len(self.base_dist.event_shape)] + new.base_dist = self.base_dist.expand(base_batch_shape) + new.transforms = self.transforms + super(TransformedDistribution, new).__init__( + batch_shape, self.event_shape, validate_args=False + ) + new._validate_args = self._validate_args + return new + + @constraints.dependent_property(is_discrete=False) + def support(self): + if not self.transforms: + return self.base_dist.support + support = self.transforms[-1].codomain + if len(self.event_shape) > support.event_dim: + support = constraints.independent( + support, len(self.event_shape) - support.event_dim + ) + return support + + @property + def has_rsample(self): + return self.base_dist.has_rsample + + def sample(self, sample_shape=torch.Size()): + """ + Generates a sample_shape shaped sample or sample_shape shaped batch of + samples if the distribution parameters are batched. Samples first from + base distribution and applies `transform()` for every transform in the + list. + """ + with torch.no_grad(): + x = self.base_dist.sample(sample_shape) + for transform in self.transforms: + x = transform(x) + return x + + def rsample(self, sample_shape=torch.Size()): + """ + Generates a sample_shape shaped reparameterized sample or sample_shape + shaped batch of reparameterized samples if the distribution parameters + are batched. Samples first from base distribution and applies + `transform()` for every transform in the list. + """ + x = self.base_dist.rsample(sample_shape) + for transform in self.transforms: + x = transform(x) + return x + + def log_prob(self, value): + """ + Scores the sample by inverting the transform(s) and computing the score + using the score of the base distribution and the log abs det jacobian. + """ + if self._validate_args: + self._validate_sample(value) + event_dim = len(self.event_shape) + log_prob = 0.0 + y = value + for transform in reversed(self.transforms): + x = transform.inv(y) + event_dim += transform.domain.event_dim - transform.codomain.event_dim + log_prob = log_prob - _sum_rightmost( + transform.log_abs_det_jacobian(x, y), + event_dim - transform.domain.event_dim, + ) + y = x + + log_prob = log_prob + _sum_rightmost( + self.base_dist.log_prob(y), event_dim - len(self.base_dist.event_shape) + ) + return log_prob + + def _monotonize_cdf(self, value): + """ + This conditionally flips ``value -> 1-value`` to ensure :meth:`cdf` is + monotone increasing. + """ + sign = 1 + for transform in self.transforms: + sign = sign * transform.sign + if isinstance(sign, int) and sign == 1: + return value + return sign * (value - 0.5) + 0.5 + + def cdf(self, value): + """ + Computes the cumulative distribution function by inverting the + transform(s) and computing the score of the base distribution. + """ + for transform in self.transforms[::-1]: + value = transform.inv(value) + if self._validate_args: + self.base_dist._validate_sample(value) + value = self.base_dist.cdf(value) + value = self._monotonize_cdf(value) + return value + + def icdf(self, value): + """ + Computes the inverse cumulative distribution function using + transform(s) and computing the score of the base distribution. + """ + value = self._monotonize_cdf(value) + value = self.base_dist.icdf(value) + for transform in self.transforms: + value = transform(value) + return value diff --git a/venv/lib/python3.10/site-packages/torch/distributions/wishart.py b/venv/lib/python3.10/site-packages/torch/distributions/wishart.py new file mode 100644 index 0000000000000000000000000000000000000000..733efbbeb95f0772e30cc30681b7b2c420160b8c --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/distributions/wishart.py @@ -0,0 +1,335 @@ +import math +import warnings +from numbers import Number +from typing import Optional, Union + +import torch +from torch import nan +from torch.distributions import constraints +from torch.distributions.exp_family import ExponentialFamily +from torch.distributions.multivariate_normal import _precision_to_scale_tril +from torch.distributions.utils import lazy_property + + +__all__ = ["Wishart"] + +_log_2 = math.log(2) + + +def _mvdigamma(x: torch.Tensor, p: int) -> torch.Tensor: + assert x.gt((p - 1) / 2).all(), "Wrong domain for multivariate digamma function." + return torch.digamma( + x.unsqueeze(-1) + - torch.arange(p, dtype=x.dtype, device=x.device).div(2).expand(x.shape + (-1,)) + ).sum(-1) + + +def _clamp_above_eps(x: torch.Tensor) -> torch.Tensor: + # We assume positive input for this function + return x.clamp(min=torch.finfo(x.dtype).eps) + + +class Wishart(ExponentialFamily): + r""" + Creates a Wishart distribution parameterized by a symmetric positive definite matrix :math:`\Sigma`, + or its Cholesky decomposition :math:`\mathbf{\Sigma} = \mathbf{L}\mathbf{L}^\top` + + Example: + >>> # xdoctest: +SKIP("FIXME: scale_tril must be at least two-dimensional") + >>> m = Wishart(torch.Tensor([2]), covariance_matrix=torch.eye(2)) + >>> m.sample() # Wishart distributed with mean=`df * I` and + >>> # variance(x_ij)=`df` for i != j and variance(x_ij)=`2 * df` for i == j + + Args: + df (float or Tensor): real-valued parameter larger than the (dimension of Square matrix) - 1 + covariance_matrix (Tensor): positive-definite covariance matrix + precision_matrix (Tensor): positive-definite precision matrix + scale_tril (Tensor): lower-triangular factor of covariance, with positive-valued diagonal + Note: + Only one of :attr:`covariance_matrix` or :attr:`precision_matrix` or + :attr:`scale_tril` can be specified. + Using :attr:`scale_tril` will be more efficient: all computations internally + are based on :attr:`scale_tril`. If :attr:`covariance_matrix` or + :attr:`precision_matrix` is passed instead, it is only used to compute + the corresponding lower triangular matrices using a Cholesky decomposition. + 'torch.distributions.LKJCholesky' is a restricted Wishart distribution.[1] + + **References** + + [1] Wang, Z., Wu, Y. and Chu, H., 2018. `On equivalence of the LKJ distribution and the restricted Wishart distribution`. + [2] Sawyer, S., 2007. `Wishart Distributions and Inverse-Wishart Sampling`. + [3] Anderson, T. W., 2003. `An Introduction to Multivariate Statistical Analysis (3rd ed.)`. + [4] Odell, P. L. & Feiveson, A. H., 1966. `A Numerical Procedure to Generate a SampleCovariance Matrix`. JASA, 61(313):199-203. + [5] Ku, Y.-C. & Bloomfield, P., 2010. `Generating Random Wishart Matrices with Fractional Degrees of Freedom in OX`. + """ + arg_constraints = { + "covariance_matrix": constraints.positive_definite, + "precision_matrix": constraints.positive_definite, + "scale_tril": constraints.lower_cholesky, + "df": constraints.greater_than(0), + } + support = constraints.positive_definite + has_rsample = True + _mean_carrier_measure = 0 + + def __init__( + self, + df: Union[torch.Tensor, Number], + covariance_matrix: Optional[torch.Tensor] = None, + precision_matrix: Optional[torch.Tensor] = None, + scale_tril: Optional[torch.Tensor] = None, + validate_args=None, + ): + assert (covariance_matrix is not None) + (scale_tril is not None) + ( + precision_matrix is not None + ) == 1, "Exactly one of covariance_matrix or precision_matrix or scale_tril may be specified." + + param = next( + p + for p in (covariance_matrix, precision_matrix, scale_tril) + if p is not None + ) + + if param.dim() < 2: + raise ValueError( + "scale_tril must be at least two-dimensional, with optional leading batch dimensions" + ) + + if isinstance(df, Number): + batch_shape = torch.Size(param.shape[:-2]) + self.df = torch.tensor(df, dtype=param.dtype, device=param.device) + else: + batch_shape = torch.broadcast_shapes(param.shape[:-2], df.shape) + self.df = df.expand(batch_shape) + event_shape = param.shape[-2:] + + if self.df.le(event_shape[-1] - 1).any(): + raise ValueError( + f"Value of df={df} expected to be greater than ndim - 1 = {event_shape[-1]-1}." + ) + + if scale_tril is not None: + self.scale_tril = param.expand(batch_shape + (-1, -1)) + elif covariance_matrix is not None: + self.covariance_matrix = param.expand(batch_shape + (-1, -1)) + elif precision_matrix is not None: + self.precision_matrix = param.expand(batch_shape + (-1, -1)) + + self.arg_constraints["df"] = constraints.greater_than(event_shape[-1] - 1) + if self.df.lt(event_shape[-1]).any(): + warnings.warn( + "Low df values detected. Singular samples are highly likely to occur for ndim - 1 < df < ndim." + ) + + super().__init__(batch_shape, event_shape, validate_args=validate_args) + self._batch_dims = [-(x + 1) for x in range(len(self._batch_shape))] + + if scale_tril is not None: + self._unbroadcasted_scale_tril = scale_tril + elif covariance_matrix is not None: + self._unbroadcasted_scale_tril = torch.linalg.cholesky(covariance_matrix) + else: # precision_matrix is not None + self._unbroadcasted_scale_tril = _precision_to_scale_tril(precision_matrix) + + # Chi2 distribution is needed for Bartlett decomposition sampling + self._dist_chi2 = torch.distributions.chi2.Chi2( + df=( + self.df.unsqueeze(-1) + - torch.arange( + self._event_shape[-1], + dtype=self._unbroadcasted_scale_tril.dtype, + device=self._unbroadcasted_scale_tril.device, + ).expand(batch_shape + (-1,)) + ) + ) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Wishart, _instance) + batch_shape = torch.Size(batch_shape) + cov_shape = batch_shape + self.event_shape + new._unbroadcasted_scale_tril = self._unbroadcasted_scale_tril.expand(cov_shape) + new.df = self.df.expand(batch_shape) + + new._batch_dims = [-(x + 1) for x in range(len(batch_shape))] + + if "covariance_matrix" in self.__dict__: + new.covariance_matrix = self.covariance_matrix.expand(cov_shape) + if "scale_tril" in self.__dict__: + new.scale_tril = self.scale_tril.expand(cov_shape) + if "precision_matrix" in self.__dict__: + new.precision_matrix = self.precision_matrix.expand(cov_shape) + + # Chi2 distribution is needed for Bartlett decomposition sampling + new._dist_chi2 = torch.distributions.chi2.Chi2( + df=( + new.df.unsqueeze(-1) + - torch.arange( + self.event_shape[-1], + dtype=new._unbroadcasted_scale_tril.dtype, + device=new._unbroadcasted_scale_tril.device, + ).expand(batch_shape + (-1,)) + ) + ) + + super(Wishart, new).__init__(batch_shape, self.event_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + @lazy_property + def scale_tril(self): + return self._unbroadcasted_scale_tril.expand( + self._batch_shape + self._event_shape + ) + + @lazy_property + def covariance_matrix(self): + return ( + self._unbroadcasted_scale_tril + @ self._unbroadcasted_scale_tril.transpose(-2, -1) + ).expand(self._batch_shape + self._event_shape) + + @lazy_property + def precision_matrix(self): + identity = torch.eye( + self._event_shape[-1], + device=self._unbroadcasted_scale_tril.device, + dtype=self._unbroadcasted_scale_tril.dtype, + ) + return torch.cholesky_solve(identity, self._unbroadcasted_scale_tril).expand( + self._batch_shape + self._event_shape + ) + + @property + def mean(self): + return self.df.view(self._batch_shape + (1, 1)) * self.covariance_matrix + + @property + def mode(self): + factor = self.df - self.covariance_matrix.shape[-1] - 1 + factor[factor <= 0] = nan + return factor.view(self._batch_shape + (1, 1)) * self.covariance_matrix + + @property + def variance(self): + V = self.covariance_matrix # has shape (batch_shape x event_shape) + diag_V = V.diagonal(dim1=-2, dim2=-1) + return self.df.view(self._batch_shape + (1, 1)) * ( + V.pow(2) + torch.einsum("...i,...j->...ij", diag_V, diag_V) + ) + + def _bartlett_sampling(self, sample_shape=torch.Size()): + p = self._event_shape[-1] # has singleton shape + + # Implemented Sampling using Bartlett decomposition + noise = _clamp_above_eps( + self._dist_chi2.rsample(sample_shape).sqrt() + ).diag_embed(dim1=-2, dim2=-1) + + i, j = torch.tril_indices(p, p, offset=-1) + noise[..., i, j] = torch.randn( + torch.Size(sample_shape) + self._batch_shape + (int(p * (p - 1) / 2),), + dtype=noise.dtype, + device=noise.device, + ) + chol = self._unbroadcasted_scale_tril @ noise + return chol @ chol.transpose(-2, -1) + + def rsample(self, sample_shape=torch.Size(), max_try_correction=None): + r""" + .. warning:: + In some cases, sampling algorithm based on Bartlett decomposition may return singular matrix samples. + Several tries to correct singular samples are performed by default, but it may end up returning + singular matrix samples. Singular samples may return `-inf` values in `.log_prob()`. + In those cases, the user should validate the samples and either fix the value of `df` + or adjust `max_try_correction` value for argument in `.rsample` accordingly. + """ + + if max_try_correction is None: + max_try_correction = 3 if torch._C._get_tracing_state() else 10 + + sample_shape = torch.Size(sample_shape) + sample = self._bartlett_sampling(sample_shape) + + # Below part is to improve numerical stability temporally and should be removed in the future + is_singular = self.support.check(sample) + if self._batch_shape: + is_singular = is_singular.amax(self._batch_dims) + + if torch._C._get_tracing_state(): + # Less optimized version for JIT + for _ in range(max_try_correction): + sample_new = self._bartlett_sampling(sample_shape) + sample = torch.where(is_singular, sample_new, sample) + + is_singular = ~self.support.check(sample) + if self._batch_shape: + is_singular = is_singular.amax(self._batch_dims) + + else: + # More optimized version with data-dependent control flow. + if is_singular.any(): + warnings.warn("Singular sample detected.") + + for _ in range(max_try_correction): + sample_new = self._bartlett_sampling(is_singular[is_singular].shape) + sample[is_singular] = sample_new + + is_singular_new = ~self.support.check(sample_new) + if self._batch_shape: + is_singular_new = is_singular_new.amax(self._batch_dims) + is_singular[is_singular.clone()] = is_singular_new + + if not is_singular.any(): + break + + return sample + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + nu = self.df # has shape (batch_shape) + p = self._event_shape[-1] # has singleton shape + return ( + -nu + * ( + p * _log_2 / 2 + + self._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1) + .log() + .sum(-1) + ) + - torch.mvlgamma(nu / 2, p=p) + + (nu - p - 1) / 2 * torch.linalg.slogdet(value).logabsdet + - torch.cholesky_solve(value, self._unbroadcasted_scale_tril) + .diagonal(dim1=-2, dim2=-1) + .sum(dim=-1) + / 2 + ) + + def entropy(self): + nu = self.df # has shape (batch_shape) + p = self._event_shape[-1] # has singleton shape + V = self.covariance_matrix # has shape (batch_shape x event_shape) + return ( + (p + 1) + * ( + p * _log_2 / 2 + + self._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1) + .log() + .sum(-1) + ) + + torch.mvlgamma(nu / 2, p=p) + - (nu - p - 1) / 2 * _mvdigamma(nu / 2, p=p) + + nu * p / 2 + ) + + @property + def _natural_params(self): + nu = self.df # has shape (batch_shape) + p = self._event_shape[-1] # has singleton shape + return -self.precision_matrix / 2, (nu - p - 1) / 2 + + def _log_normalizer(self, x, y): + p = self._event_shape[-1] + return (y + (p + 1) / 2) * ( + -torch.linalg.slogdet(-2 * x).logabsdet + _log_2 * p + ) + torch.mvlgamma(y + (p + 1) / 2, p=p) diff --git a/venv/lib/python3.10/site-packages/torch/masked/__init__.py b/venv/lib/python3.10/site-packages/torch/masked/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e0193416ed2f572b476e3fabfa8668c7c4b651dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/masked/__init__.py @@ -0,0 +1,37 @@ +from .maskedtensor.core import is_masked_tensor, MaskedTensor +from .maskedtensor.creation import as_masked_tensor, masked_tensor +from ._ops import ( + _canonical_dim, + _generate_docstring, + _reduction_identity, + _where, + _input_mask, + _output_mask, + _combine_input_and_mask, + sum, + prod, + cumsum, + cumprod, + amax, + amin, + argmax, + argmin, + mean, + median, + logsumexp, + logaddexp, + norm, + var, + std, + softmax, + log_softmax, + softmin, + normalize, +) + +__all__ = [ + "as_masked_tensor", + "is_masked_tensor", + "masked_tensor", + "MaskedTensor", +] diff --git a/venv/lib/python3.10/site-packages/torch/masked/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/masked/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ef82b667919602ee19142267b53d546a685951b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/masked/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/masked/__pycache__/_docs.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/masked/__pycache__/_docs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9df79664b96efd414d7a733936339037f36fbcac Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/masked/__pycache__/_docs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/masked/__pycache__/_ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/masked/__pycache__/_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd7a0f8d3873c988d6366e155a6fdfdc817c5930 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/masked/__pycache__/_ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/masked/_docs.py b/venv/lib/python3.10/site-packages/torch/masked/_docs.py new file mode 100644 index 0000000000000000000000000000000000000000..bf96b49e3e8271cd93b5e84f74f5f333e56fda98 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/masked/_docs.py @@ -0,0 +1,1177 @@ +# This file is generated, do not modify it! +# +# To update this file, run the update masked docs script as follows: +# +# python tools/update_masked_docs.py +# +# The script must be called from an environment where the development +# version of torch package can be imported and is functional. +# + +amax_docstring = """amax(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor + +Returns maximum of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. + +The identity value of maximum operation, which is used to start the +reduction, depends on input dtype. For instance, for float32, uint8, +and int32 dtypes, the identity values are ``-inf``, ``0``, and ``-2147483648``, respectively. + +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in maximum computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of maximum operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + Default: None that is equivalent to ``tuple(range(input.ndim))``. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]]) + >>> input + tensor([[-3, -2, -1], + [ 0, 1, 2]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.amax(input, 1, mask=mask) + tensor([ -1, -9223372036854775808]) +""" + +amin_docstring = """amin(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor + +Returns minimum of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. + +The identity value of minimum operation, which is used to start the +reduction, depends on input dtype. For instance, for float32, uint8, +and int32 dtypes, the identity values are ``inf``, ``255``, and ``2147483647``, respectively. + +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in minimum computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of minimum operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + Default: None that is equivalent to ``tuple(range(input.ndim))``. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]]) + >>> input + tensor([[-3, -2, -1], + [ 0, 1, 2]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.amin(input, 1, mask=mask) + tensor([ -3, 9223372036854775807]) +""" + +argmax_docstring = """argmax(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor +Returns argmax of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. +The identity value of argmax operation, which is used to start the +reduction, depends on input dtype. For instance, for float32, uint8, +and int32 dtypes, the identity values are ``-inf``, ``0``, and ``-2147483648``, respectively. +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in argmax computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of argmax operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int): the dimension along which argmax is computed. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. +Example:: + + >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]]) + >>> input + tensor([[-3, -2, -1], + [ 0, 1, 2]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.argmax(input, 1, mask=mask) + tensor([2, 0]) +""" + +argmin_docstring = """argmin(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor +Returns argmin of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. +The identity value of argmin operation, which is used to start the +reduction, depends on input dtype. For instance, for float32, uint8, +and int32 dtypes, the identity values are ``inf``, ``255``, and ``2147483647``, respectively. +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in argmin computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of argmin operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int): the dimension along which argmin is computed. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. +Example:: + + >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]]) + >>> input + tensor([[-3, -2, -1], + [ 0, 1, 2]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.argmin(input, 1, mask=mask) + tensor([0, 0]) +""" + +cumprod_docstring = """cumprod(input, dim, *, dtype=None, mask=None) -> Tensor + +Returns cumulative_prod of all the slices in the :attr:`input` tensor +along :attr:`dim` while the :attr:`input` elements are masked out +according to the boolean tensor :attr:`mask`. + +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is +defined as ``prod(x[:i])``. + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True then +the corresponding element in :attr:`input` tensor will be included in +cumulative_prod computation, otherwise the element is ignored. + +The values of masked-out elements of the output tensor have undefined +value: it may or may not be set to zero or nan; the choice may correspond to +the value that leads to the most efficient storage of :attr:`output` +tensor. + +The mask of the cumulative_prod output tensor can be computed as +``torch.broadcast_to(mask, input.shape)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int): the dimension along which cumulative_prod is computed. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]]) + >>> input + tensor([[-3., -2., -1.], + [ 0., 1., 2.]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.cumprod(input, 1, mask=mask) + tensor([[-3., -3., 3.], + [ 1., 1., 1.]]) +""" + +cumsum_docstring = """cumsum(input, dim, *, dtype=None, mask=None) -> Tensor + +Returns cumulative_sum of all the slices in the :attr:`input` tensor +along :attr:`dim` while the :attr:`input` elements are masked out +according to the boolean tensor :attr:`mask`. + +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is +defined as ``sum(x[:i])``. + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True then +the corresponding element in :attr:`input` tensor will be included in +cumulative_sum computation, otherwise the element is ignored. + +The values of masked-out elements of the output tensor have undefined +value: it may or may not be set to zero or nan; the choice may correspond to +the value that leads to the most efficient storage of :attr:`output` +tensor. + +The mask of the cumulative_sum output tensor can be computed as +``torch.broadcast_to(mask, input.shape)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int): the dimension along which cumulative_sum is computed. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]]) + >>> input + tensor([[-3., -2., -1.], + [ 0., 1., 2.]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.cumsum(input, 1, mask=mask) + tensor([[-3., -3., -4.], + [ 0., 0., 0.]]) +""" + +log_softmax_docstring = """log_softmax(input, dim, *, dtype=None, mask=None) -> Tensor + +Returns log_softmax of all the slices in the :attr:`input` tensor +along :attr:`dim` while the :attr:`input` elements are masked out +according to the boolean tensor :attr:`mask`. + +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. LogSoftmax of i-th element in ``x`` is +defined as ``log(exp(x[i])/sum(exp(x)))``. + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True then +the corresponding element in :attr:`input` tensor will be included in +log_softmax computation, otherwise the element is ignored. + +The values of masked-out elements of the output tensor have undefined +value: it may or may not be set to zero or nan; the choice may correspond to +the value that leads to the most efficient storage of :attr:`output` +tensor. + +The mask of the log_softmax output tensor can be computed as +``torch.broadcast_to(mask, input.shape)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int): the dimension along which log_softmax is computed. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]]) + >>> input + tensor([[-3., -2., -1.], + [ 0., 1., 2.]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.log_softmax(input, 1, mask=mask) + tensor([[-2.1269, -inf, -0.1269], + [ nan, nan, nan]]) +""" + +logsumexp_docstring = """logsumexp(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor + +Returns logsumexp of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. + +The identity value of logsumexp operation, which is used to start the reduction, is ``-2147483648``. + +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in logsumexp computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of logsumexp operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + Default: None that is equivalent to ``tuple(range(input.ndim))``. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]]) + >>> input + tensor([[-3, -2, -1], + [ 0, 1, 2]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.logsumexp(input, 1, mask=mask) + tensor([ 0, -9223372036854775808]) +""" + +mean_docstring = """mean(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor + +Returns mean of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. + +By definition, the identity value of a mean operation is the mean +value of the tensor. If all elements of the input tensor along given +dimension(s) :attr:`dim` are masked-out, the identity value of the +mean is undefined. Due to this ambiguity, the elements of output +tensor with strided layout, that correspond to fully masked-out +elements, have ``nan`` values. + +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in mean computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of mean operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + Default: None that is equivalent to ``tuple(range(input.ndim))``. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]]) + >>> input + tensor([[-3, -2, -1], + [ 0, 1, 2]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.mean(input, 1, mask=mask) + tensor([-2., nan]) +""" + +median_docstring = """median(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor +Returns median of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. +By definition, the identity value of a median operation is the median +value of the tensor. If all elements of the input tensor along given +dimension(s) :attr:`dim` are masked-out, the identity value of the +median is undefined. Due to this ambiguity, the elements of output +tensor with strided layout, that correspond to fully masked-out +elements, have ``nan`` values. +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in median computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of median operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int): the dimension along which median is computed. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. +Example:: + + >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]]) + >>> input + tensor([[-3., -2., -1.], + [ 0., 1., 2.]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.median(input, 1, mask=mask) + tensor([-3., nan]) +""" + +norm_docstring = """norm(input, ord, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor + +Returns norm of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. + +The identity value of norm operation, which is used to start the +reduction, is ``0.0``, except for ``ord=-inf`` it is +``inf``. + +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in norm computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of norm operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + ord (int, float, optional): the order of vector norm. Default: 2. + See :func:`torch.linalg.vector_norm` for a list of supported norms. + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + Default: None that is equivalent to ``tuple(range(input.ndim))``. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]]) + >>> input + tensor([[-3., -2., -1.], + [ 0., 1., 2.]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.norm(input, 2.0, 1, mask=mask) + tensor([3.1623, 0.0000]) +""" + +normalize_docstring = """normalize(input, ord, dim, *, eps=1e-12, dtype=None, mask=None) -> Tensor + +Returns normalize of all the slices in the :attr:`input` tensor +along :attr:`dim` while the :attr:`input` elements are masked out +according to the boolean tensor :attr:`mask`. + +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. Normalize of i-th element in ``x`` is +defined as ``x[i]/max(norm(x, p), eps)``. + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True then +the corresponding element in :attr:`input` tensor will be included in +normalize computation, otherwise the element is ignored. + +The values of masked-out elements of the output tensor have undefined +value: it may or may not be set to zero or nan; the choice may correspond to +the value that leads to the most efficient storage of :attr:`output` +tensor. + +The mask of the normalize output tensor can be computed as +``torch.broadcast_to(mask, input.shape)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + ord (int, float): the order of vector norm. Default: 2. + See :func:`torch.linalg.vector_norm` for a list of supported norms. + dim (int): the dimension along which normalize is computed. + +Keyword args: + eps (float, optional): small value to avoid division by zero. Default: 1e-12. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]]) + >>> input + tensor([[-3., -2., -1.], + [ 0., 1., 2.]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.normalize(input, 2.0, 1, mask=mask) + tensor([[-0.9487, 0.0000, -0.3162], + [ 0.0000, 0.0000, 0.0000]]) +""" + +prod_docstring = """prod(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor + +Returns product of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. + +The identity value of product operation, which is used to start the reduction, is ``1``. + +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in product computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of product operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + Default: None that is equivalent to ``tuple(range(input.ndim))``. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]]) + >>> input + tensor([[-3, -2, -1], + [ 0, 1, 2]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.prod(input, 1, mask=mask) + tensor([3, 1]) +""" + +softmax_docstring = """softmax(input, dim, *, dtype=None, mask=None) -> Tensor + +Returns softmax of all the slices in the :attr:`input` tensor +along :attr:`dim` while the :attr:`input` elements are masked out +according to the boolean tensor :attr:`mask`. + +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. Softmax of i-th element in ``x`` is +defined as ``exp(x[i])/sum(exp(x))``. + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True then +the corresponding element in :attr:`input` tensor will be included in +softmax computation, otherwise the element is ignored. + +The values of masked-out elements of the output tensor have undefined +value: it may or may not be set to zero or nan; the choice may correspond to +the value that leads to the most efficient storage of :attr:`output` +tensor. + +The mask of the softmax output tensor can be computed as +``torch.broadcast_to(mask, input.shape)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int): the dimension along which softmax is computed. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]]) + >>> input + tensor([[-3., -2., -1.], + [ 0., 1., 2.]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.softmax(input, 1, mask=mask) + tensor([[0.1192, 0.0000, 0.8808], + [ nan, nan, nan]]) +""" + +softmin_docstring = """softmin(input, dim, *, dtype=None, mask=None) -> Tensor + +Returns softmin of all the slices in the :attr:`input` tensor +along :attr:`dim` while the :attr:`input` elements are masked out +according to the boolean tensor :attr:`mask`. + +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. Softmin of i-th element in ``x`` is +defined as ``exp(-x[i])/sum(exp(-x))``. + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True then +the corresponding element in :attr:`input` tensor will be included in +softmin computation, otherwise the element is ignored. + +The values of masked-out elements of the output tensor have undefined +value: it may or may not be set to zero or nan; the choice may correspond to +the value that leads to the most efficient storage of :attr:`output` +tensor. + +The mask of the softmin output tensor can be computed as +``torch.broadcast_to(mask, input.shape)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int): the dimension along which softmin is computed. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3., -2., -1.], [ 0., 1., 2.]]) + >>> input + tensor([[-3., -2., -1.], + [ 0., 1., 2.]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.softmin(input, 1, mask=mask) + tensor([[0.8808, 0.0000, 0.1192], + [ nan, nan, nan]]) +""" + +std_docstring = """std(input, dim, unbiased, *, keepdim=False, dtype=None, mask=None) -> Tensor +Returns standard_deviation of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. +The identity value of sample standard deviation operation is undefined. The +elements of output tensor with strided layout, that correspond to +fully masked-out elements, have ``nan`` values. +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in standard_deviation computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of standard_deviation operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + Default: None that is equivalent to ``tuple(range(input.ndim))``. + unbiased (bool): when True, use Bessel’s correction, otherwise, compute + the uncorrected sample variance. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. +Example:: + + >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]]) + >>> input + tensor([[-3, -2, -1], + [ 0, 1, 2]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.std(input, 1, False, mask=mask) + tensor([1., nan]) +""" + +sum_docstring = """sum(input, dim, *, keepdim=False, dtype=None, mask=None) -> Tensor + +Returns sum of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. + +The identity value of sum operation, which is used to start the reduction, is ``0``. + +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in sum computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of sum operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + Default: None that is equivalent to ``tuple(range(input.ndim))``. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + +Example:: + + >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]]) + >>> input + tensor([[-3, -2, -1], + [ 0, 1, 2]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.sum(input, 1, mask=mask) + tensor([-4, 0]) +""" + +var_docstring = """var(input, dim, unbiased, *, keepdim=False, dtype=None, mask=None) -> Tensor +Returns variance of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`. +The identity value of sample variance operation is undefined. The +elements of output tensor with strided layout, that correspond to +fully masked-out elements, have ``nan`` values. +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in variance computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of variance operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + Default: None that is equivalent to ``tuple(range(input.ndim))``. + unbiased (bool): when True, use Bessel’s correction, otherwise, compute + the uncorrected sample variance. + +Keyword args: + keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: False. + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: None. + mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. +Example:: + + >>> input = tensor([[-3, -2, -1], [ 0, 1, 2]]) + >>> input + tensor([[-3, -2, -1], + [ 0, 1, 2]]) + >>> mask = tensor([[ True, False, True], [False, False, False]]) + >>> mask + tensor([[ True, False, True], + [False, False, False]]) + >>> torch.masked._ops.var(input, 1, False, mask=mask) + tensor([1., nan]) +""" diff --git a/venv/lib/python3.10/site-packages/torch/masked/_ops.py b/venv/lib/python3.10/site-packages/torch/masked/_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..2a2ff3fd6f857507df133598b057a8e33a65fad0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/masked/_ops.py @@ -0,0 +1,1796 @@ + +import warnings + +# A workaround to support both TorchScript and MyPy: +from typing import Any, List, Optional, Tuple, TYPE_CHECKING, Union + +import torch +from torch import Tensor +from torch.masked import as_masked_tensor, is_masked_tensor, MaskedTensor +from . import _docs +from torch._prims_common import corresponding_real_dtype +from torch import sym_float + +if TYPE_CHECKING: + from torch.types import _dtype as DType + + DimOrDims = Optional[Union[int, Tuple[int], List[int]]] +else: + # The JIT doesn't understand Union, nor torch.dtype here + DType = int + DimOrDims = Optional[Tuple[int]] + + +__all__: List[str] = [] + +# All masked reduction/normalization operations have the same +# signatures. Here we introduce docstring templates that are applied +# to docstrings of reduction/normalization functions via +# _apply_docstring_templates decorator. + + +def _apply_docstring_templates(func): + """Decorator that applies docstring templates to function docstring + and returns the function instance. + """ + + doc_string = getattr(_docs, f"{func.__name__}_docstring", None) + if doc_string is None: + warnings.warn( + f"No documentation string available for {func.__name__}." + " PyTorch team should run `python tools/update_masked_docs.py`" + " to generate the missing docstrings." + ) + else: + func.__doc__ = doc_string + + # Expose function as public symbol + __all__.append(func.__name__) + + return func + + +def _generate_docstring(func): + """A utility function called from tools/update_masked_docs.py + script to update the module torch.masked._docs.py + """ + docstring_templates = dict( + reduction_signature="""\ +{function_name}(input, {operation_args}, *, {operation_kwargs}) -> Tensor""", + reduction_descr="""\ +Returns {operation name} of all the elements in the :attr:`input` +tensor along the given dimension(s) :attr:`dim` while the :attr:`input` +elements are masked out according to the boolean tensor +:attr:`mask`.""", + reduction_args="""\ +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of +size 1. Otherwise, :attr:`dim` is squeezed (see +:func:`torch.squeeze`), resulting in the output tensor having 1 (or +``len(dim)``) fewer dimension(s). + +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True +then the corresponding element in :attr:`input` tensor will be +included in {operation name} computation, otherwise the element is +ignored. + +When all elements of :attr:`input` along the given dimension +:attr:`dim` are ignored (fully masked-out), the corresponding element +of the output tensor will have undefined value: it may or may not +correspond to the identity value of {operation name} operation; the +choice may correspond to the value that leads to the most efficient +storage of :attr:`output` tensor. + +The mask of the output tensor can be computed as +``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim, +dtype=torch.bool)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + {args_declarations} + +Keyword args: + {kwargs_declarations}""", + reduction_example="""\ +Example:: + + >>> input = {example_input} + >>> input + {indent_example_input} + >>> mask = {example_mask} + >>> mask + {indent_example_mask} + >>> {full_function_name}(input, {example_args}, mask=mask) + {indent_example_output} +""", + reduction_identity="""\ +The identity value of {operation name} operation, which is used to start the reduction, is ``{identity_int32}``.""", + reduction_identity_dtype="""\ +The identity value of {operation name} operation, which is used to start the +reduction, depends on input dtype. For instance, for float32, uint8, +and int32 dtypes, the identity values are ``{identity_float32}``, ``{identity_uint8}``, and ``{identity_int32}``, respectively.""", + normalization_signature="""\ +{function_name}(input, {operation_args}, *, {operation_kwargs}) -> Tensor""", + normalization_descr="""\ +Returns {operation name} of all the slices in the :attr:`input` tensor +along :attr:`dim` while the :attr:`input` elements are masked out +according to the boolean tensor :attr:`mask`. + +{definition}""", + normalization_args="""\ +The boolean tensor :attr:`mask` defines the "validity" of +:attr:`input` tensor elements: if :attr:`mask` element is True then +the corresponding element in :attr:`input` tensor will be included in +{operation name} computation, otherwise the element is ignored. + +The values of masked-out elements of the output tensor have undefined +value: it may or may not be set to zero or nan; the choice may correspond to +the value that leads to the most efficient storage of :attr:`output` +tensor. + +The mask of the {operation name} output tensor can be computed as +``torch.broadcast_to(mask, input.shape)``. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the :attr:`mask` +tensor must not be greater than of the :attr:`input` tensor. + +Args: + input (Tensor): the input tensor + {args_declarations} + +Keyword args: + {kwargs_declarations}""", + normalization_example="""\ +Example:: + + >>> input = {example_input} + >>> input + {indent_example_input} + >>> mask = {example_mask} + >>> mask + {indent_example_mask} + >>> {full_function_name}(input, {example_args}, mask=mask) + {indent_example_output} +""", + ) + + args_and_kwargs = dict( + # argument name sufficies separated by double underscore will + # be removed in the final documentation string. + sum=(("dim",), ("keepdim=False", "dtype=None", "mask=None")), + prod=(("dim",), ("keepdim=False", "dtype=None", "mask=None")), + cumsum=(("dim__as_int",), ("dtype=None", "mask=None")), + cumprod=(("dim__as_int",), ("dtype=None", "mask=None")), + amin=(("dim",), ("keepdim=False", "dtype=None", "mask=None")), + amax=(("dim",), ("keepdim=False", "dtype=None", "mask=None")), + argmin=(("dim__as_int",), ("keepdim=False", "dtype=None", "mask=None")), + argmax=(("dim__as_int",), ("keepdim=False", "dtype=None", "mask=None")), + mean=(("dim",), ("keepdim=False", "dtype=None", "mask=None")), + median=(("dim__as_int",), ("keepdim=False", "dtype=None", "mask=None")), + norm=( + ( + "ord", + "dim", + ), + ("keepdim=False", "dtype=None", "mask=None"), + ), + var=(("dim", "unbiased"), ("keepdim=False", "dtype=None", "mask=None")), + std=(("dim", "unbiased"), ("keepdim=False", "dtype=None", "mask=None")), + logsumexp=(("dim",), ("keepdim=False", "dtype=None", "mask=None")), + softmax=(("dim__as_int",), ("dtype=None", "mask=None")), + log_softmax=(("dim__as_int",), ("dtype=None", "mask=None")), + softmin=(("dim__as_int",), ("dtype=None", "mask=None")), + normalize=( + ( + "ord__required", + "dim__as_int", + ), + ("eps=1e-12", "dtype=None", "mask=None"), + ), + ) + + argument_declarations = dict( + dim="""\ +dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + Default: None that is equivalent to ``tuple(range(input.ndim))``.""", + dim__as_int="""\ +dim (int): the dimension along which {operation name} is computed.""", + ord="""\ +ord (int, float, optional): the order of vector norm. Default: 2. + See :func:`torch.linalg.vector_norm` for a list of supported norms.""", + ord__required="""\ +ord (int, float): the order of vector norm. Default: 2. + See :func:`torch.linalg.vector_norm` for a list of supported norms.""", + unbiased="""\ +unbiased (bool): when True, use Bessel’s correction, otherwise, compute + the uncorrected sample variance.""", + eps="""\ +eps (float, optional): small value to avoid division by zero. Default: {default}.""", + keepdim="""\ +keepdim (bool, optional): whether the output tensor has + :attr:`dim` retained or not. Default: {default}.""", + dtype="""\ +dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the input tensor is + casted to :attr:`dtype` before the operation is + performed. Default: {default}.""", + mask="""\ +mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of input tensor + elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.""", + ) + + definitions = dict( + softmax="""\ +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. Softmax of i-th element in ``x`` is +defined as ``exp(x[i])/sum(exp(x))``.""", + log_softmax="""\ +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. LogSoftmax of i-th element in ``x`` is +defined as ``log(exp(x[i])/sum(exp(x)))``.""", + softmin="""\ +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. Softmin of i-th element in ``x`` is +defined as ``exp(-x[i])/sum(exp(-x))``.""", + normalize="""\ +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. Normalize of i-th element in ``x`` is +defined as ``x[i]/max(norm(x, p), eps)``.""", + cumsum="""\ +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is +defined as ``sum(x[:i])``.""", + cumprod="""\ +Let ``x`` be a sequence of unmasked elements of one-dimensional slice +of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is +defined as ``prod(x[:i])``.""", + ) + + reduction_names = dict( + sum="sum", + prod="product", + amax="maximum", + amin="minimum", + argmax="argmax", + argmin="argmin", + mean="mean", + median="median", + norm="norm", + var="variance", + std="standard_deviation", + logsumexp="logsumexp", + ) + + normalization_names = dict( + softmax="softmax", + log_softmax="log_softmax", + softmin="softmin", + normalize="normalize", + cumsum="cumulative_sum", + cumprod="cumulative_prod", + ) + + operation_names = {} + operation_names.update(reduction_names) + operation_names.update(normalization_names) + + # Default example data: + example_dim = 1 + example_input = torch.tensor([[-3, -2, -1], [0, 1, 2]]) + example_mask = torch.tensor([[True, False, True], [False, False, False]]) + example_args: Tuple[Any, ...] + if func.__name__ in {"norm", "normalize"}: + example_args = (2.0, example_dim) + example_input = example_input.to(dtype=torch.float32) + elif func.__name__ in {"var", "std"}: + example_args = (example_dim, False) + elif func.__name__ == "median": + example_args = (example_dim,) + example_input = example_input.to(dtype=torch.float32) + else: + example_args = (example_dim,) + + operation_args: Tuple[str, ...] + operation_kwargs: Tuple[str, ...] + operation_args, operation_kwargs = args_and_kwargs[func.__name__] + arg_declarations = [ + "\n ".join( + argument_declarations.get(a, f'{a.split("__", 1)[0]}: TBD.').splitlines() + ) + for a in operation_args + ] + kwarg_declarations = [ + "\n ".join( + argument_declarations.get( + a.split("=", 1)[0], f'{a.split("__", 1)[0]}: TBD.' + ) + .format(default=a.split("=", 1)[1]) + .splitlines() + ) + for a in operation_kwargs + ] + + if func.__name__ in reduction_names: + op_kind = "reduction" + doc_sections = ["signature", "descr", "identity", "args", "example"] + elif func.__name__ in normalization_names: + op_kind = "normalization" + doc_sections = ["signature", "descr", "args", "example"] + example_input = example_input.to(dtype=torch.float32) + else: + assert 0 # add function name to operation names dictionaries + example_output = func(example_input, *example_args, mask=example_mask) + + template_data = { + "function_name": func.__name__, + "full_function_name": func.__module__ + "." + func.__name__, + "operation name": operation_names[func.__name__], + "operation_args": ", ".join(a.split("__", 1)[0] for a in operation_args), + "operation_kwargs": ", ".join(a.split("__", 1)[0] for a in operation_kwargs), + # one-line representation of a tensor: + "example_input": " ".join(str(example_input).split()), + "example_args": ", ".join(map(str, example_args)), + "example_mask": " ".join(str(example_mask).split()), + # multi-line representation of a tensor with indent + "indent_example_input": ("\n ").join(str(example_input).splitlines()), + "indent_example_mask": ("\n ").join(str(example_mask).splitlines()), + "indent_example_output": ("\n ").join(str(example_output).splitlines()), + } + + if func.__name__ in reduction_names: + template_data.update( + identity_uint8=_reduction_identity( + func.__name__, torch.tensor(0, dtype=torch.uint8) + ), + identity_int32=_reduction_identity( + func.__name__, torch.tensor(0, dtype=torch.int32) + ), + identity_float32=_reduction_identity( + func.__name__, torch.tensor(0, dtype=torch.float32) + ), + ) + if func.__name__ == "norm": + template_data.update( + identity_ord_ninf=_reduction_identity( + func.__name__, torch.tensor(0, dtype=torch.float32), float("-inf") + ) + ) + elif func.__name__ in normalization_names: + template_data.update(definition=definitions[func.__name__]) + else: + assert 0 # add function name to operation names dictionaries + template_data.update( + args_declarations=("\n ".join(arg_declarations)).format_map(template_data) + ) + template_data.update( + kwargs_declarations=("\n ".join(kwarg_declarations)).format_map( + template_data + ) + ) + + # Apply function name info to docstring templates: + templates = { + k: v.format_map(template_data) + for k, v in docstring_templates.items() + if k.startswith(op_kind) + } + templates.update( + (k, v.format_map(template_data) if isinstance(v, str) else v) + for k, v in template_data.items() + ) + + # Apply docstring templates to function doctring: + if func.__doc__ is None: + doc_template = "\n\n".join([f"{{{op_kind}_{sec}}}" for sec in doc_sections]) + else: + doc_template = func.__doc__ + return doc_template.format_map(templates) + + +def _reduction_identity(op_name: str, input: Tensor, *args): + """Return identity value as scalar tensor of a reduction operation on + given input, or None, if the identity value cannot be uniquely + defined for the given input. + + The identity value of the operation is defined as the initial + value to reduction operation that has a property ``op(op_identity, + value) == value`` for any value in the domain of the operation. + Or put it another way, including or excluding the identity value in + a list of operands will not change the reduction result. + + See https://github.com/pytorch/rfcs/pull/27 for more information. + + """ + dtype: DType = input.dtype + device = input.device + op_name = op_name.rsplit(".", 1)[-1] # lstrip module name when present + if op_name in {"sum", "cumsum"}: + return torch.tensor(0, dtype=dtype, device=device) + elif op_name in {"prod", "cumprod"}: + return torch.tensor(1, dtype=dtype, device=device) + elif op_name in {"amax", "argmax", "logsumexp"}: + if torch.is_floating_point(input): + return torch.tensor(-torch.inf, dtype=dtype, device=device) + elif torch.is_signed(input) or dtype == torch.uint8: + return torch.tensor(torch.iinfo(dtype).min, dtype=dtype, device=device) + elif op_name in {"amin", "argmin"}: + if torch.is_floating_point(input): + return torch.tensor(torch.inf, dtype=dtype, device=device) + elif torch.is_signed(input) or dtype == torch.uint8: + return torch.tensor(torch.iinfo(dtype).max, dtype=dtype, device=device) + elif op_name == "mean": + # Strictly speaking, the identity value of the mean operation + # is the mean of the input. Since the mean value depends on + # the dim argument and it may be a non-scalar tensor, we + # consider the identity value of the mean operation ambiguous. + # Moreover, the mean value of empty input is undefined. + return None + elif op_name == "norm": + ord = args[0] if args else 2 + if ord == float("-inf"): + assert torch.is_floating_point(input), input.dtype + return torch.tensor(torch.inf, dtype=dtype, device=device) + return torch.tensor(0, dtype=dtype, device=device) + elif op_name == "median": + # We use NaN for now because the implementation is currently using torch.nanmedian + # and NaN is the identity for that function since it gets ignored + dtype = input.dtype if torch.is_floating_point(input) else torch.float + return torch.tensor(torch.nan, dtype=dtype, device=device) + elif op_name in {"var", "std"}: + return None + raise NotImplementedError(f"identity of {op_name} on {dtype} input") + + +def _canonical_dim(dim: DimOrDims, ndim: int) -> Tuple[int, ...]: + """Return dim argument as a tuple of sorted dim values.""" + dims: List[int] = [] + if dim == (): + # Currently, `dim=()` in reductions operations means "reduce + # over all dimensions" while in future, it will read "no + # reduce". See https://github.com/pytorch/pytorch/issues/29137 + # When gh-29137 is resolved, this if-block must be deleted. + dim = None + if dim is None: + return tuple(range(ndim)) + ndim = max(ndim, 1) + dim_ = (dim,) if isinstance(dim, (int, torch.SymInt)) else dim + for d in dim_: + if d in dims: + raise RuntimeError(f"dim={d} appears multiple times in the list of dims") + if d >= ndim or d < -ndim: + raise IndexError( + f"Dimension out of range (expected to be in range of [{-ndim}, {ndim-1}], but got {d})" + ) + dims.append(d % ndim) + return tuple(sorted(dims)) + + +def _sparse_coo_flatten_indices(indices: Tensor, shape: tuple): + # Flatted N-D indices to 1-D indices + flat_indices = indices.new_zeros(indices.size(1)) + for d, sz in enumerate(shape): + flat_indices.mul_(sz) + flat_indices.add_(indices[d]) + return flat_indices + + +def _any(input: Tensor, dim: tuple, keepdim: bool): + # Support torch.any with tuple dim argument. + # Workaround of https://github.com/pytorch/pytorch/issues/56586 + r = input + for d in reversed(dim): + r = r.any(dim=d, keepdim=keepdim) + return r + + +def _sparse_coo_where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor: + """Sparse variant of torch.where. Supports sparse COO and hybrid sparse COO tensors. + + _sparse_coo_where implements the following invariant: + + _sparse_coo_where(mask, input, fill_value).to_dense(fill_value) == + torch.where(mask.to_dense(), input.to_dense(), torch.full(input.shape, fill_value)) + + where `a == b` means `assertEqual(a, b)`, mask is boolean sparse + tensor, and `to_dense(fill_value)` is like `to_dense()` except + that the unspecified elements are mapped to `fill_value` rather + than to `0`. + + Returns a sparse COO tensor with the following features: + + - all specified elements correspond to masked-in elements that + have the values of the input tensor. If there exists a masked-in + element (as specified by mask) that is not specified in the + input, in the result tensor, the corresponding element has value + 0. In the dense part of the sparse tensor, the masked-out + elements are replaced with fill_value. + + - all unspecified elements correspond to masked-out elements. + """ + + assert input.layout == torch.sparse_coo + assert mask.layout == input.layout + assert mask.shape == input.shape + assert mask.dense_dim() == input.dense_dim() # TODO: eliminate this restriction + + input = input.coalesce() + + # For set operations on sparse tensor indices, we'll convert + # multi-dimensional indices to 1-D indices for efficiency. + input_flat_indices = _sparse_coo_flatten_indices( + input.indices(), input.shape[: input.sparse_dim()] + ) + mask_flat_indices = _sparse_coo_flatten_indices( + mask.indices(), mask.shape[: mask.sparse_dim()] + ) + + # the set of mask flat indices that define masked-in elements: + if mask.dense_dim() > 0: + mask_values = _any( + mask.values(), tuple(range(1, input.sparse_dim() + 1)), False + ) + else: + mask_values = mask.values() + maskin_flat_indices = mask_flat_indices[mask_values.nonzero()[:, 0]] + + def intersection(i1, i2): + union, counts = torch.cat([i1, i2]).unique(return_counts=True) + return union, torch.where(counts.gt(1)) + + def minus(i1, i2): + union, counts = torch.cat([i1, i2]).unique(return_counts=True) + return intersection(union[torch.where(counts.eq(1))], i1) + + def _apply(a): + obj, w = a + return obj[w] + + # the set of input flat indices of specified and masked-in elements: + maskin_input_flat_indices = _apply( + intersection(maskin_flat_indices, input_flat_indices) + ) + _, w = intersection(input_flat_indices, maskin_input_flat_indices) + + # the indices and values of masked-in elements + where_input_indices = input.indices()[(slice(None),) + w] + where_input_values = input.values()[w] + + if mask.dense_dim() > 0: + # apply mask to the dense part of the input values: + _, w1 = intersection(mask_flat_indices, maskin_input_flat_indices) + where_mask_values = mask.values()[w1] + where_input_values = torch.where( + where_mask_values, where_input_values, fill_value + ) + + # the set of flat indices of unspecified input and masked-in elements: + maskin_zero_flat_indices = _apply( + minus(maskin_flat_indices, maskin_input_flat_indices) + ) + + # the indices of masked-in zero elements + _, w = intersection(mask_flat_indices, maskin_zero_flat_indices) + where_zero_indices = mask.indices()[(slice(None),) + w] + + # construct result + n = where_zero_indices.size(1) + if n == 0: + # the input is coalesced, hence input_flat_indices are ordered + # and the result is guaranteed to be coalesced: + result = torch.sparse_coo_tensor( + where_input_indices, where_input_values, input.shape + ) + return result._coalesced_(True) + + where_indices = torch.cat([where_input_indices, where_zero_indices], dim=1) + where_values = torch.cat( + [ + where_input_values, + where_input_values.new_zeros((n,) + where_input_values.shape[1:]), + ] + ) + result = torch.sparse_coo_tensor(where_indices, where_values, input.shape) + + # appending zero elements leads to uncoalesced sparse tensor + return result.coalesce() + + +def _sparse_coo_scatter_reduction_helper( + op, + mask_input: Tensor, + dims: Tuple[int, ...], + keepdim: bool, + dtype: Optional[DType] = None, +) -> Tensor: + reduce = op.__name__ + valid_reductions = ["sum", "prod", "amax", "amin"] + if reduce not in valid_reductions: + raise ValueError( + f"op must be one of {' '.join(valid_reductions)}, but got {reduce} instead" + ) + + output_dtype = dtype + values, indices = mask_input._values(), mask_input._indices() + input_dims = mask_input.dim() + num_sparse_dims = mask_input.sparse_dim() + reduced_sparse_dims = [] + retained_sparse_dims = [] + reduced_dense_dims = [] + + # promote dtype if specified + if values.dtype != output_dtype: + values = values.to(output_dtype) + + if keepdim: + output_shape = tuple( + 1 if i in dims else si for (i, si) in enumerate(mask_input.shape) + ) + else: + output_shape = tuple( + si for (i, si) in enumerate(mask_input.shape) if i not in dims + ) + + for d in dims: + if d >= input_dims: + continue + + if d < num_sparse_dims: + reduced_sparse_dims.append(d) + else: + reduced_dense_dims.append(d + 1 - num_sparse_dims) + + # Reduce dense dimensions + if len(reduced_dense_dims) > 0: + if reduce == "sum": + new_values = values + new_values = op(new_values, dim=reduced_dense_dims, keepdim=bool(keepdim)) + else: + # FIXME: Implement reductions for dense dimensions for ops with non-zero reduction identities + return NotImplemented + else: + new_values = values.clone() + + # Reduce sparse dimensions + if len(reduced_sparse_dims) == num_sparse_dims: + if reduce in {"amax", "amin"} and new_values.size(0) == 0: + # IndexError: amax(): Expected reduction dim 0 to have non-zero size. + # sum()/prod() return the reduction identity when dim has size 0 but amax()/amin() do not + # See https://github.com/pytorch/pytorch/issues/61901 + new_values = _reduction_identity(reduce, new_values) + else: + new_values = op(new_values, dim=0) + if keepdim: + for _ in range(num_sparse_dims): + new_values = new_values.unsqueeze(0) + return new_values.to(dtype=output_dtype).to_sparse() + else: + new_indices = indices.clone() + if keepdim: + # zero out reduced sparse dimensions if keepdim = True + # ensures that the call to torch.unique folds duplicated indices together while preserving the dimension + new_indices[reduced_sparse_dims, :] = 0 + else: + # remove reduced sparse dimensions if keepdim = False + if len(reduced_sparse_dims) > 0: + retained_sparse_dims = [ + i + for i in range(num_sparse_dims) + if i not in set(reduced_sparse_dims) + ] + new_indices = new_indices.index_select( + 0, torch.tensor(retained_sparse_dims).to(mask_input.device) + ) + + # Use scatter_reduce to reduce items in the new_values tensor that correspond to the same indices in new_indices + if new_indices.numel() > 0: + # lexsort indices and get index tensor for scatter reduction + new_indices, inverse_indices = torch.unique( + new_indices, return_inverse=True, dim=1 + ) + out_shape = list(new_values.shape) + out_shape[0] = new_indices.shape[1] + for _ in range(new_values.ndim - 1): + inverse_indices = inverse_indices.unsqueeze(-1) + scatter_indices = inverse_indices.expand(new_values.shape) + # FIXME: temporary workaround for issue with bfloat16/float16 remove when acctype is implemented for scatter_reduce + if output_dtype in {torch.bfloat16, torch.float16}: + new_values = new_values.to(torch.float) + out = new_values.new_empty(out_shape) + new_values = out.scatter_reduce_( + 0, scatter_indices, new_values, reduce=reduce, include_self=False + ) + new_values = new_values.to(dtype=output_dtype) + else: + out = new_values.new_empty(out_shape) + new_values = out.scatter_reduce_( + 0, scatter_indices, new_values, reduce=reduce, include_self=False + ) + + return torch.sparse_coo_tensor( + new_indices, + new_values, + output_shape, + dtype=output_dtype, + device=mask_input.device, + ) + + +def _sparse_csr_segment_reduction_helper( + op, + mask_input: Tensor, + dims: Tuple[int, ...], + keepdim: bool, + dtype: Optional[DType] = None, +) -> Tensor: + # Currently, while sparse CSR is always 2D with no dense dimensions keepdim must be True + # FIXME: when dense dimensions are implemented for CSR tensors + assert ( + keepdim + ), "reduction operations on CSR tensors with keepdim=False is unsupported" + reduce = op.__name__ + valid_reductions = ["sum", "prod", "mean", "amax", "amin"] + if reduce not in valid_reductions: + raise ValueError( + f"op must be one of {' '.join(valid_reductions)}, but got {reduce} instead" + ) + device = mask_input.device + output_dtype = dtype + values, crow_indices, col_indices = ( + mask_input.values(), + mask_input.crow_indices(), + mask_input.col_indices(), + ) + + # promote dtype if specified + if values.dtype != output_dtype: + values = values.to(output_dtype) + + if len(dims) == 0: + return mask_input + if len(dims) == 1: + if dims[0] == 0: + new_col_indices, scatter_indices = torch.unique( + col_indices, return_inverse=True + ) + new_nnz = new_col_indices.shape[0] + new_crow_indices = torch.tensor([0, new_nnz]) + new_values = values.new_empty(new_col_indices.shape) + new_values.scatter_reduce_( + 0, scatter_indices, values, reduce, include_self=False + ) + new_shape = [1, mask_input.size(1)] + else: + assert ( + dims[0] == 1 + ), "Sparse CSR tensors are 2D and only support reduction along dim 0 or 1." + # all intervals new_crow_indices[i] - new_crow_indices[i-1] are 1 + # except for where crow_indices[i] == crow_indices[i-1] where the interval remains as 0 + new_crow_indices = torch.cat( + ( + crow_indices.new_zeros(1), + torch.cumsum(torch.diff(crow_indices) != 0, 0), + ), + 0, + ) + new_nnz = new_crow_indices[-1] + new_col_indices = col_indices.new_zeros(new_nnz) + new_values = torch._segment_reduce(values, reduce, offsets=crow_indices) # type: ignore[attr-defined] + new_shape = [mask_input.size(0), 1] + else: + assert len(dims) == 2 + nnz = min(1, values.numel()) + if nnz == 1: + op_kwargs = {"keepdim": True, "dtype": output_dtype} + # amax and amin do not support dtype kwarg + if reduce in ["amax", "amin"]: + del op_kwargs["dtype"] + new_values = op(values, 0, **op_kwargs) + else: + new_values = torch.empty(0, dtype=output_dtype) + new_col_indices = col_indices.new_zeros(nnz) + new_crow_indices = torch.tensor([0, nnz]) + new_shape = [1, nnz] + + return torch.sparse_csr_tensor( + new_crow_indices, + new_col_indices, + new_values, + new_shape, + dtype=output_dtype, + device=device, + ) + + +def _sparse_csr_where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor: + """Sparse variant of torch.where. Supports sparse CSR tensors.""" + # TODO: implement sparse CSR specific where operator for efficiency + return _sparse_coo_where( + mask.to_sparse_coo(), input.to_sparse_coo(), fill_value + ).to_sparse_csr() + + +def _where(mask: Tensor, input: Tensor, fill_value: Tensor) -> Tensor: + """torch.where with sparse inputs support. + + _where implements the following invariant: + + _where(mask, input, fill_value).to_dense(fill_value) == + torch.where(mask.to_dense(), input.to_dense(), torch.full(input.shape, fill_value)) + + where `a == b` means `assertEqual(a, b)`, mask is boolean sparse + tensor, and `to_dense(fill_value)` is like `to_dense()` except + that the unspecified elements are mapped to `fill_value` rather + than to `0`. + + Returns a sparse tensor with the following features: + + - all specified elements correspond to masked-in elements that + have the values of the input tensor. If there exists a masked-in + element (as specified by mask) that is not specified in the + input, in the result tensor, the corresponding element has value + 0. In the dense part of the sparse tensor, the masked-out + elements are replaced with fill_value. + + - all unspecified elements correspond to masked-out elements. + """ + if mask.layout == torch.strided: + return torch.where(mask, input, fill_value) + elif mask.layout == torch.sparse_coo: + return _sparse_coo_where(mask, input, fill_value) + elif mask.layout == torch.sparse_csr: + return _sparse_csr_where(mask, input, fill_value) + else: + raise ValueError( + f"_where expects strided or sparse COO or sparse CSR tensor but got {mask.layout}" + ) + + +def _input_mask(input: Union[Tensor, MaskedTensor], *args, **kwargs) -> Tensor: + """Return canonical input mask. + + A canonical input mask is defined as a boolean mask tensor that + shape and layout matches with the shape and the layout of the + input. + + The canonical input mask is computed from the :attr:`mask` tensor + content to meet the following criteria: + + 1. The shape of the canonical input mask is the same as the shape + of :attr:`input` tensor. If the mask tensor has a smaller shape + than the shape of the :attr:`input`, broadcasting rules will be + applied. Downcasting of mask is not supported. + + 2. The layout of the canonical input mask is the same as the + layout of the :attr:`input` tensor. If the mask has different + layout, it will be converted to the expected layout. In the + case of sparse COO layout, the canonical input mask will be + coalesced. + + 3. The dtype of the canonical input mask is torch.bool. If the + mask dtype is not bool then it will be converted to bool dtype + using `.to(dtype=bool)` method call. + + 4. The elements of the canonical input mask have boolean values + copied from the content of the :attr:`mask` tensor (after + possible broadcasting and dtype conversion transforms). In + general, the sparsity pattern of the sparse canonical input + mask need not to be the same as the sparsity pattern of the + sparse :attr:`input` tensor. + + """ + if input.layout not in {torch.strided, torch.sparse_coo, torch.sparse_csr}: + raise ValueError( + f"_input_mask expects strided or sparse COO or sparse CSR tensor but got {input.layout}" + ) + + mask = kwargs.get("mask") + + # default mask + if mask is None: + raise ValueError("_input_mask requires explicit mask") + + # mask shape must match with input shape + if mask.shape != input.shape: + if mask.ndim > input.ndim: + raise IndexError( + "_input_mask expected broadcastable mask (got mask dimensionality higher than of the input)" + ) + if mask.layout == torch.strided: + mask = torch.broadcast_to(mask.clone(), input.shape).to(dtype=torch.bool) + elif mask.layout == torch.sparse_coo: + mask = torch._sparse_broadcast_to(mask, input.shape) + else: + assert mask.layout == torch.sparse_csr + # Broadcasting of CSR tensors is not implemented. Working + # around by using COO layout. + mask = torch._sparse_broadcast_to( + mask.to_sparse(), input.shape + ).to_sparse_csr() + + # mask layout must match with input layout + if mask.layout != input.layout: + if input.layout == torch.strided: + mask = mask.to_dense() + elif input.layout == torch.sparse_coo: + if mask.layout == torch.strided: + mask = mask.to_sparse(input.sparse_dim()) + else: + mask = mask.to_sparse() + else: + assert input.layout == torch.sparse_csr + mask = mask.to_sparse_csr() + + # sparse mask must be coalesced + if mask.layout == torch.sparse_coo: + mask = mask.coalesce() + + # mask is a boolean tensor + mask = mask.to(dtype=torch.bool) + + return mask + + +def _output_mask(op, input: Tensor, *args, **kwargs) -> Tensor: + """Return output mask of masked operation applied to given arguments.""" + if callable(op): + is_reduction = op.__name__ in { + "sum", + "prod", + "amax", + "amin", + "argmax", + "argmin", + "mean", + "median", + "norm", + "var", + "std", + "logsumexp", + } + is_normalization = op.__name__ in { + "softmax", + "log_softmax", + "softmin", + "normalize", + "cumsum", + "cumprod", + } + if is_reduction: + if op.__name__ == "norm": + if args: + args = args[1:] # lstrip ord argument + dim = args[0] if args else kwargs.get("dim") + outmask = _input_mask(input, *args, **kwargs) + keepdim = kwargs.get("keepdim", False) + dim_ = _canonical_dim(dim, input.ndim) + return _any(outmask, dim_, bool(keepdim)) + elif is_normalization: + return _input_mask(input, *args, **kwargs) + else: + raise ValueError( + f"_output_mask expected masked operation (got callable {op.__module__}.{op.__name__})" + ) + else: + raise ValueError( + f"_output_mask expected masked operation (got {type(op).__name__} object)" + ) + + +def _combine_input_and_mask( + op, input: Union[MaskedTensor, Tensor], mask, *args +) -> Tensor: + def helper(input, mask): + if mask is None: + return input + canonical_mask = _input_mask(input, mask=mask) + if callable(op): + fill_value = _reduction_identity(op.__name__, input, *args) + return _where(canonical_mask, input, fill_value) + else: + raise ValueError( + f"_combine_input_and_mask expected masked operation (got {type(op).__name__} object)" + ) + + class Combine(torch.autograd.Function): + @staticmethod + def forward(ctx, input, mask): + """Return input with masked-out elements eliminated for the given operations.""" + ctx.save_for_backward(mask) + + if mask is not None: + ctx.mark_non_differentiable(mask) + + return helper(input, mask) + + @staticmethod + def backward(ctx, grad_output): + (mask,) = ctx.saved_tensors + grad_data = ( + grad_output.get_data() if is_masked_tensor(grad_output) else grad_output + ) + result = as_masked_tensor(grad_data, mask) + return result, None + + return ( + Combine.apply(input.get_data(), input.get_mask()) # type: ignore[union-attr] + if is_masked_tensor(input) + else helper(input, mask) + ) + + +@_apply_docstring_templates +def sum( + input: Union[Tensor, MaskedTensor], + dim: DimOrDims = None, + *, + keepdim: Optional[bool] = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + # __doc__ is generated by _apply_docstring_templates decorator + if dtype is None: + # promote integer types to int64 when output dtype is not specified + if input.layout == torch.sparse_csr: + if input.dtype in { + torch.uint8, + torch.bool, + torch.int8, + torch.int16, + torch.int32, + }: + # csr.to(dtype=torch.int64) is not implemented, so + # using coo.to on input to ensure the promoted dtype + input = input.to_sparse_coo().to(dtype=torch.int64).to_sparse_csr() + else: + dtype = input.dtype + else: + dtype = input.dtype + if input.dtype in { + torch.uint8, + torch.bool, + torch.int8, + torch.int16, + torch.int32, + }: + dtype = torch.int64 + dim_ = _canonical_dim(dim, input.ndim) + mask_input = _combine_input_and_mask(sum, input, mask) + if mask_input.layout == torch.strided: + return torch.sum(mask_input, dim_, bool(keepdim), dtype=dtype) + elif mask_input.layout == torch.sparse_coo: + return _sparse_coo_scatter_reduction_helper( + torch.sum, mask_input, dim_, bool(keepdim), dtype + ) + elif mask_input.layout == torch.sparse_csr: + return torch._sparse_csr_sum( + mask_input, dim=list(dim_), keepdim=bool(keepdim), dtype=dtype + ) + else: + raise ValueError( + f"masked sum expects strided, sparse_coo or sparse_csr tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def prod( + input: Union[Tensor, MaskedTensor], + dim: DimOrDims = None, + *, + keepdim: Optional[bool] = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + # __doc__ is generated by _apply_docstring_templates decorator + if dtype is None: + # promote integer types to int64 when output dtype is not specified + if input.layout == torch.sparse_csr: + if input.dtype in { + torch.uint8, + torch.bool, + torch.int8, + torch.int16, + torch.int32, + }: + # csr.to(dtype=torch.int64) is not implemented, so + # using coo.to on input to ensure the promoted dtype + input = input.to_sparse_coo().to(dtype=torch.int64).to_sparse_csr() + else: + dtype = input.dtype + else: + dtype = input.dtype + if input.dtype in { + torch.uint8, + torch.bool, + torch.int8, + torch.int16, + torch.int32, + }: + dtype = torch.int64 + dim_ = _canonical_dim(dim, input.ndim) + mask_input = _combine_input_and_mask(prod, input, mask) + if mask_input.layout == torch.strided: + # Workaround https://github.com/pytorch/pytorch/issues/56586 + result = mask_input + result = result.to(dtype=dtype) + for d in reversed(dim_): + result = result.prod(dim=d, keepdim=bool(keepdim)) + return result + elif mask_input.layout == torch.sparse_coo: + if mask is None: + # See comment in the sparse_csr branch, the same issue arises for sparse_coo tensors + raise ValueError( + "masked prod expects explicit mask for sparse_coo tensor input" + ) + return _sparse_coo_scatter_reduction_helper( + torch.prod, mask_input, dim_, bool(keepdim), dtype + ) + elif mask_input.layout == torch.sparse_csr: + if mask is None: + # mask is None corresponds to all-True mask. The + # unspecified elements in the CSR tensor correspond to + # zero values. Hence, the prod reduction result is + # automatically zero unless all elements are specified. + # A semi-optimal way to take this into account is to use: + # + # masked_prod(csr, ..., mask=None) == torch._sparse_csr_prod(csr, ...) * all(csr.nonzero(), ...) + # + # but that requires implementing `all` and `nonzero` + # support for sparse csr tensors. + raise ValueError( + "masked prod expects explicit mask for sparse_csr tensor input" + ) + return torch._sparse_csr_prod( + mask_input, dim=list(dim_), keepdim=bool(keepdim), dtype=dtype + ) + else: + raise ValueError( + f"masked prod expects strided, sparse_coo or sparse_csr tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def cumsum( + input: Tensor, + dim: int, + *, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + if dtype is None: + dtype = input.dtype + dim_ = _canonical_dim(dim, input.ndim)[0] + mask_input = _combine_input_and_mask(sum, input, mask) + if mask_input.layout == torch.strided: + return torch.cumsum(mask_input, dim_, dtype=dtype).to(dtype=dtype) + else: + raise ValueError( + f"masked cumsum expects strided tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def cumprod( + input: Tensor, + dim: int, + *, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + if dtype is None: + dtype = input.dtype + dim_ = _canonical_dim(dim, input.ndim)[0] + mask_input = _combine_input_and_mask(prod, input, mask) + if mask_input.layout == torch.strided: + return torch.cumprod(mask_input, dim_, dtype=dtype).to(dtype=dtype) + else: + raise ValueError( + f"masked cumprod expects strided tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def amax( + input: Union[Tensor, MaskedTensor], + dim: DimOrDims = None, + *, + keepdim: Optional[bool] = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + """\ +{reduction_signature} + +{reduction_descr} + +{reduction_identity_dtype} + +{reduction_args} + +{reduction_example}""" + if dtype is None: + dtype = input.dtype + + mask_input = _combine_input_and_mask(amax, input, mask) + dim_ = _canonical_dim(dim, mask_input.ndim) + if mask_input.layout == torch.strided: + return torch.amax(mask_input, dim_, bool(keepdim)).to(dtype=dtype) + elif mask_input.layout == torch.sparse_coo: + if mask is None: + # See comment in the sparse_csr branch of prod, a similar issue arises here + # where unspecified elements along a dimension may need to be reduced with the result + raise ValueError( + "masked amax expects explicit mask for sparse_coo tensor input" + ) + return _sparse_coo_scatter_reduction_helper( + torch.amax, mask_input, dim_, bool(keepdim), dtype + ) + elif mask_input.layout == torch.sparse_csr: + if mask is None: + raise ValueError( + "masked amax expects explicit mask for sparse_csr tensor input" + ) + return _sparse_csr_segment_reduction_helper( + torch.amax, mask_input, dim_, bool(keepdim), dtype + ) + else: + raise ValueError( + f"masked amax expects strided, sparse_coo or sparse_csr tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def amin( + input: Union[Tensor, MaskedTensor], + dim: DimOrDims = None, + *, + keepdim: Optional[bool] = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + """\ +{reduction_signature} + +{reduction_descr} + +{reduction_identity_dtype} + +{reduction_args} + +{reduction_example}""" + if dtype is None: + dtype = input.dtype + + mask_input = _combine_input_and_mask(amin, input, mask) + dim_ = _canonical_dim(dim, mask_input.ndim) + if mask_input.layout == torch.strided: + return torch.amin(mask_input, dim_, bool(keepdim)).to(dtype=dtype) + elif mask_input.layout == torch.sparse_coo: + if mask is None: + # See comment in the sparse_csr branch of prod, a similar issue arises here + # where unspecified elements along a dimension may need to be reduced with the result + raise ValueError( + "masked amax expects explicit mask for sparse_coo tensor input" + ) + return _sparse_coo_scatter_reduction_helper( + torch.amin, mask_input, dim_, bool(keepdim), dtype + ) + elif mask_input.layout == torch.sparse_csr: + if mask is None: + raise ValueError( + "masked amin expects explicit mask for sparse_csr tensor input" + ) + return _sparse_csr_segment_reduction_helper( + torch.amin, mask_input, dim_, bool(keepdim), dtype + ) + else: + raise ValueError( + f"masked amin expects strided, sparse_coo or sparse_csr tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def argmax( + input: Union[Tensor, MaskedTensor], + dim: Optional[int] = None, + *, + keepdim: Optional[bool] = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + """\ +{reduction_signature} +{reduction_descr} +{reduction_identity_dtype} +{reduction_args} +{reduction_example}""" + if dtype is None: + dtype = input.dtype + mask_input = _combine_input_and_mask(argmax, input, mask) + if mask_input.layout == torch.strided: + return torch.argmax(mask_input, dim, bool(keepdim)).to(dtype=dtype) + else: + raise ValueError( + f"masked argmax expects strided tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def argmin( + input: Union[Tensor, MaskedTensor], + dim: Optional[int] = None, + *, + keepdim: Optional[bool] = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + """\ +{reduction_signature} +{reduction_descr} +{reduction_identity_dtype} +{reduction_args} +{reduction_example}""" + if dtype is None: + dtype = input.dtype + mask_input = _combine_input_and_mask(argmin, input, mask) + if mask_input.layout == torch.strided: + return torch.argmin(mask_input, dim, bool(keepdim)).to(dtype=dtype) + else: + raise ValueError( + f"masked argmin expects strided tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def mean( + input: Union[Tensor, MaskedTensor], + dim: DimOrDims = None, + *, + keepdim: Optional[bool] = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + """\ +{reduction_signature} + +{reduction_descr} + +By definition, the identity value of a mean operation is the mean +value of the tensor. If all elements of the input tensor along given +dimension(s) :attr:`dim` are masked-out, the identity value of the +mean is undefined. Due to this ambiguity, the elements of output +tensor with strided layout, that correspond to fully masked-out +elements, have ``nan`` values. + +{reduction_args} + +{reduction_example}""" + if dtype is None: + dtype = input.dtype + if input.layout == torch.strided: + if mask is None: + # TODO: compute count analytically + count = sum( + torch.ones(input.shape, dtype=torch.int64, device=input.device), + dim, + keepdim=keepdim, + ) + total = sum(input, dim, keepdim=keepdim, dtype=dtype) + else: + inmask = _input_mask(input, mask=mask) + count = sum( + inmask.new_ones(input.shape, dtype=torch.int64), + dim, + keepdim=keepdim, + mask=inmask, + ) + total = sum(input, dim, keepdim=keepdim, dtype=dtype, mask=inmask) + return total / count + elif input.layout == torch.sparse_csr: + mask_input = _combine_input_and_mask(mean, input, mask) + dim_ = _canonical_dim(dim, mask_input.ndim) + if mask is None: + raise ValueError( + "masked mean expects explicit mask for sparse_csr tensor input" + ) + return _sparse_csr_segment_reduction_helper( + torch.mean, mask_input, dim_, bool(keepdim), dtype + ) + else: + raise ValueError( + f"masked mean expects strided or sparse_csr tensor (got {input.layout} tensor)" + ) + + +@_apply_docstring_templates +def median( + input: Union[Tensor, MaskedTensor], + dim: int = -1, + *, + keepdim: bool = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + + """\ +{reduction_signature} +{reduction_descr} +By definition, the identity value of a median operation is the median +value of the tensor. If all elements of the input tensor along given +dimension(s) :attr:`dim` are masked-out, the identity value of the +median is undefined. Due to this ambiguity, the elements of output +tensor with strided layout, that correspond to fully masked-out +elements, have ``nan`` values. +{reduction_args} +{reduction_example}""" + if dtype is None: + dtype = input.dtype + dim_ = _canonical_dim(dim, input.ndim)[0] + is_float = torch.is_floating_point(input) + if not is_float: + input = input.to(dtype=torch.float) + mask_input = _combine_input_and_mask(median, input, mask) + if mask_input.layout == torch.strided: + output = torch.nanmedian(mask_input, dim_, keepdim).values + if is_float: + return output + elif not is_float and not torch.isnan(output).any(): + return output.to(dtype=dtype) + else: + raise ValueError( + "masked median expects no fully masked out rows if dtype is not floating point" + ) + else: + raise ValueError( + f"masked median expects strided tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def logsumexp( + input: Tensor, + dim: DimOrDims = None, + *, + keepdim: bool = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + if dtype is None: + dtype = input.dtype + dim_ = _canonical_dim(dim, input.ndim) + mask_input = _combine_input_and_mask(logsumexp, input, mask) + if mask_input.layout == torch.strided: + return torch.logsumexp(mask_input, dim_, keepdim=keepdim).to(dtype=dtype) + else: + raise ValueError( + f"masked logsumexp expects strided tensor (got {mask_input.layout} tensor)" + ) + + +# Cannot use _apply_docstring_templates as it is only set up for reductions and normalizations +def logaddexp( + input: Union[Tensor, MaskedTensor], + other: Union[Tensor, MaskedTensor], + *, + dtype: Optional[DType] = None, + input_mask: Optional[Tensor] = None, + other_mask: Optional[Tensor] = None, +) -> Tensor: + """logaddexp(input, other, *, dtype=None, input_mask=None, other_mask=None) -> Tensor + +Returns logaddexp of all the elements in the :attr:`input` and the :attr:`other` +tensor. The :attr:`input` elements are masked out according to the boolean tensor +:attr:`input_mask` and the attr:`other` elements are masked out according to the boolean tensor +:attr:`other_mask`. + +The shapes of a mask tensor and the tensor to be masked +don't need to match, but they must be :ref:`broadcastable +` and the dimensionality of the mask +tensor must not be greater than of the tensor to be masked. + +Args: + input (Tensor): the input tensor + other (Tensor): the second input tensor + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type + of returned tensor. If specified, the output tensor is + casted to :attr:`dtype` after the operation is + performed. Default: None. + input_mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of :attr:`input` tensor elements. + Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``. + other_mask (:class:`torch.Tensor`, optional): the boolean tensor + containing the binary mask of validity of :attr:`other` tensor elements. + Default: None that is equivalent to ``torch.ones(other.shape, dtype=torch.bool)``. + +Example:: + + >>> input = torch.tensor([-100.0, -200, -300]) + >>> input + tensor([-100., -200., -300.]) + >>> other = torch.tensor([-1.0, -2, -3]) + >>> other + tensor([-1., -2., -3.]) + >>> mask = torch.tensor([True, False, True]) + >>> mask + tensor([ True, False, True]) + >>> torch.masked._ops.logaddexp(input, other, input_mask=mask, other_mask=mask) + tensor([-1., -inf, -3.]) +""" + if dtype is None: + dtype = input.dtype + if input.layout == torch.strided and other.layout == torch.strided: + mask_input = _combine_input_and_mask(logsumexp, input, input_mask) + mask_other = _combine_input_and_mask(logsumexp, other, other_mask) + return torch.logaddexp(mask_input, mask_other).to(dtype=dtype) + else: + raise ValueError( + f"masked logaddexp expects strided tensors (got {input.layout} tensor for input, {other.layout} for other)" + ) + + +@_apply_docstring_templates +def norm( + input: Union[Tensor, MaskedTensor], + ord: Optional[float] = 2.0, + dim: DimOrDims = None, + *, + keepdim: Optional[bool] = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + """\ +{reduction_signature} + +{reduction_descr} + +The identity value of norm operation, which is used to start the +reduction, is ``{identity_float32}``, except for ``ord=-inf`` it is +``{identity_ord_ninf}``. + +{reduction_args} + +{reduction_example}""" + if dtype is None: + dtype = input.dtype + mask_input = _combine_input_and_mask(norm, input, mask, ord) + if mask_input.layout == torch.strided: + dim_ = _canonical_dim(dim, input.ndim) + return torch.linalg.vector_norm( + mask_input, ord, dim_, bool(keepdim), dtype=dtype + ) + else: + raise ValueError( + f"masked norm expects strided tensor (got {mask_input.layout} tensor)" + ) + + +def _std_var( + input: Union[Tensor, MaskedTensor], + dim: DimOrDims, + unbiased: Optional[bool], + *, + correction_opt: Optional[Union[int, float]], + keepdim: Optional[bool], + dtype: Optional[DType], + mask: Optional[Tensor], + take_sqrt: Optional[bool], +) -> Tensor: + assert (unbiased is None or correction_opt is None), "Only one of unbiased and correction may be given" + correction = 1.0 + if unbiased is not None: + correction = 1.0 if unbiased else 0.0 + if correction_opt is not None: + correction = sym_float(correction_opt) + + if dtype is None: + dtype = input.dtype + if not (dtype.is_floating_point or dtype.is_complex): + dtype = torch.float32 + compute_dtype = dtype + if not (compute_dtype.is_floating_point or compute_dtype.is_complex): + compute_dtype = torch.float32 + if input.layout == torch.strided: + if mask is None: + # TODO: compute count analytically + count = sum( + torch.ones(input.shape, dtype=torch.int64, device=input.device), + dim, + keepdim=True, + ) + sample_total = sum(input, dim, keepdim=True, dtype=dtype) + else: + inmask = _input_mask(input, mask=mask) + count = sum( + inmask.new_ones(input.shape, dtype=torch.int64), + dim, + keepdim=True, + mask=inmask, + ) + sample_total = sum(input, dim, keepdim=True, dtype=dtype, mask=inmask) + # TODO: replace torch.subtract/divide/square/maximum with + # masked subtract/divide/square/maximum when these will be + # available. + sample_mean = torch.divide(sample_total, count) + x = torch.subtract(input, sample_mean) + if mask is None: + total = sum(x * x.conj(), dim, keepdim=keepdim, dtype=compute_dtype) + else: + total = sum( + x * x.conj(), dim, keepdim=keepdim, dtype=compute_dtype, mask=inmask # type: ignore[possibly-undefined] + ) + if not keepdim: + count = count.reshape(total.shape) + if correction != 0: + real_dtype = (corresponding_real_dtype(compute_dtype) + if compute_dtype.is_complex else compute_dtype) + count = count.to(real_dtype) + count = torch.subtract(count, correction) + count = torch.maximum(count, count.new_zeros([])) + output = torch.divide(total, count).to(dtype=dtype) + if take_sqrt: + output = torch.sqrt(output) + return output + else: + raise ValueError( + f"masked std/var expects strided tensor (got {input.layout} tensor)" + ) + + +@_apply_docstring_templates +def var( + input: Union[Tensor, MaskedTensor], + dim: DimOrDims = None, + unbiased: Optional[bool] = None, + *, + correction: Optional[Union[int, float]] = None, + keepdim: Optional[bool] = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + """\ +{reduction_signature} +{reduction_descr} +The identity value of sample variance operation is undefined. The +elements of output tensor with strided layout, that correspond to +fully masked-out elements, have ``nan`` values. +{reduction_args} +{reduction_example}""" + return _std_var( + input=input, + dim=dim, + unbiased=unbiased, + correction_opt=correction, + keepdim=keepdim, + dtype=dtype, + mask=mask, + take_sqrt=False, + ) + + +@_apply_docstring_templates +def std( + input: Union[Tensor, MaskedTensor], + dim: DimOrDims = None, + unbiased: Optional[bool] = None, + *, + correction: Optional[int] = None, + keepdim: Optional[bool] = False, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + """\ +{reduction_signature} +{reduction_descr} +The identity value of sample standard deviation operation is undefined. The +elements of output tensor with strided layout, that correspond to +fully masked-out elements, have ``nan`` values. +{reduction_args} +{reduction_example}""" + return _std_var( + input=input, + dim=dim, + unbiased=unbiased, + correction_opt=correction, + keepdim=keepdim, + dtype=dtype, + mask=mask, + take_sqrt=True, + ) + + +@_apply_docstring_templates +def softmax( + input: Union[Tensor, MaskedTensor], + dim: int, + *, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + if dtype is None: + dtype = input.dtype + dim_ = _canonical_dim(dim, input.ndim)[0] + mask_input = _combine_input_and_mask(amax, input, mask) + if mask_input.layout == torch.strided: + return torch.nn.functional.softmax(mask_input, dim_, dtype=dtype) + else: + raise ValueError( + f"masked softmax expects strided tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def log_softmax( + input: Union[Tensor, MaskedTensor], + dim: int, + *, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + if dtype is None: + dtype = input.dtype + dim_ = _canonical_dim(dim, input.ndim)[0] + mask_input = _combine_input_and_mask(amax, input, mask) + if mask_input.layout == torch.strided: + return torch.nn.functional.log_softmax(mask_input, dim_, dtype=dtype) + else: + raise ValueError( + f"masked log_softmax expects strided tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def softmin( + input: Union[Tensor, MaskedTensor], + dim: int, + *, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + if dtype is None: + dtype = input.dtype + dim_ = _canonical_dim(dim, input.ndim)[0] + mask_input = _combine_input_and_mask(amin, input, mask) + if mask_input.layout == torch.strided: + return torch.nn.functional.softmin(mask_input, dim_, dtype=dtype) + else: + raise ValueError( + f"masked softmin expects strided tensor (got {mask_input.layout} tensor)" + ) + + +@_apply_docstring_templates +def normalize( + input: Union[Tensor, MaskedTensor], + ord: float, + dim: int, + *, + eps: float = 1e-12, + dtype: Optional[DType] = None, + mask: Optional[Tensor] = None, +) -> Tensor: + if dtype is None: + dtype = input.dtype + dim_ = _canonical_dim(dim, input.ndim)[0] + # TODO: eliminate mask_input as unnecessary when using masked divide. + mask_input = _combine_input_and_mask(sum, input, mask) + if mask_input.layout == torch.strided: + nrm_ = norm(input, ord, dim, keepdim=True, dtype=dtype, mask=mask) + # TODO: replace torch.maximum with masked maximum when available. + denom = torch.maximum(nrm_, nrm_.new_full([], eps)) + # TODO: replace torch.divide with masked divide when available. + return torch.divide(mask_input, denom) + else: + raise ValueError( + f"masked normalize expects strided tensor (got {mask_input.layout} tensor)" + ) diff --git a/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__init__.py b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e38e03c87086cf50d031dd5591f64f65399d6ac1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# flake8: noqa + +from .binary import _apply_native_binary, _is_native_binary +from .core import is_masked_tensor, MaskedTensor +from .passthrough import _apply_pass_through_fn, _is_pass_through_fn +from .reductions import _apply_reduction, _is_reduction +from .unary import _apply_native_unary, _is_native_unary diff --git a/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..acddbfd02f8e801834479c26ab978071a61a5009 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/_ops_refs.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/_ops_refs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e659320e9180b734352b5b9e97ecc1a93848e161 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/_ops_refs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/binary.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/binary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed5564b283e1f70ee1ba5f3badc43c8e6786876f Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/binary.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/core.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..364c79d776936811ac257d9888976f3608b9245c Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/core.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/creation.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/creation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02c1e13ceac4a81086634919fdf20bcceca1dd74 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/creation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/passthrough.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/passthrough.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9b80821c21c8cc32da197c728e63630bdcb703b Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/passthrough.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/reductions.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/reductions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cd7bcf21307f06a0e937ab77683a53b6cbeee56 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/reductions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/unary.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/unary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61af60afba6209a49ab76dbd6e8bad0530176c56 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/__pycache__/unary.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/_ops_refs.py b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/_ops_refs.py new file mode 100644 index 0000000000000000000000000000000000000000..81a890af5d65fdeac98635aa16aed03184bcd290 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/_ops_refs.py @@ -0,0 +1,477 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +from functools import partial +from typing import Callable, Any, Dict, TYPE_CHECKING +import torch + +if TYPE_CHECKING: + import torch._ops + +from .binary import ( + _apply_native_binary, + NATIVE_BINARY_FNS, + NATIVE_INPLACE_BINARY_FNS, +) +from .core import is_masked_tensor, MaskedTensor, _get_data, _masks_match, _maybe_get_mask +from .passthrough import ( + _apply_pass_through_fn, + PASSTHROUGH_FNS +) +from .reductions import ( + _apply_reduction, + NATIVE_REDUCE_FNS, + TORCH_REDUCE_FNS, + TENSOR_REDUCE_FNS, +) +from .unary import ( + _apply_native_unary, + NATIVE_UNARY_FNS, + NATIVE_INPLACE_UNARY_FNS, +) + + +__all__ = [] # type: ignore[var-annotated] + + +def _check_args_kwargs_length(args, kwargs, error_prefix, len_args=None, len_kwargs=None): + if len_args is not None and len_args != len(args): + raise ValueError(f"{error_prefix}: len(args) must be {len_args} but got {len(args)}") + if len_kwargs is not None and len_kwargs != len(kwargs): + raise ValueError(f"{error_prefix}: len(kwargs) must be {len_kwargs} but got {len(kwargs)}") + + +class _MaskedContiguous(torch.autograd.Function): + @staticmethod + def forward(ctx, input): + if not is_masked_tensor(input): + raise ValueError("MaskedContiguous forward: input must be a MaskedTensor.") + + if input.is_contiguous(): + return input + + data = input.get_data() + mask = input.get_mask() + + return MaskedTensor(data.contiguous(), mask.contiguous()) + + @staticmethod + def backward(ctx, grad_output): + return grad_output + + +class _MaskedToDense(torch.autograd.Function): + @staticmethod + def forward(ctx, input): + if not is_masked_tensor(input): + raise ValueError("MaskedToDense forward: input must be a MaskedTensor.") + + if input.layout == torch.strided: + return input + + ctx.layout = input.layout + data = input.get_data() + mask = input.get_mask() + + return MaskedTensor(data.to_dense(), mask.to_dense()) + + @staticmethod + def backward(ctx, grad_output): + layout = ctx.layout + + if layout == torch.sparse_coo: + return grad_output.to_sparse_coo() + elif layout == torch.sparse_csr: + return grad_output.to_sparse_csr() + elif layout == torch.strided: + return grad_output.to_dense() + raise ValueError("to_dense: Unsupported input layout: ", layout) + + +class _MaskedToSparse(torch.autograd.Function): + @staticmethod + def forward(ctx, input): + if not is_masked_tensor(input): + raise ValueError("MaskedToSparse forward: input must be a MaskedTensor.") + + # Following the convention from sparse tensors that to_sparse always means that we convert to sparse_coo + if input.layout == torch.sparse_coo: + return input + + data = input.get_data() + mask = input.get_mask() + sparse_mask = mask.to_sparse_coo().coalesce() + sparse_data = data.sparse_mask(sparse_mask) + + return MaskedTensor(sparse_data, sparse_mask) + + @staticmethod + def backward(ctx, grad_output): + return grad_output.to_dense() + + +class _MaskedToSparseCsr(torch.autograd.Function): + @staticmethod + def forward(ctx, input): + if not is_masked_tensor(input): + raise ValueError("MaskedToSparseCsr forward: input must be a MaskedTensor.") + + if input._masked_data.ndim != 2: + raise ValueError(f"Only 2D tensors can be converted to the SparseCsr layout but got shape: {input._masked_data.size()}") + + if input.layout == torch.sparse_csr: + return input + + data = input.get_data() + mask = input.get_mask() + sparse_mask = mask.to_sparse_csr() + sparse_data = data.sparse_mask(sparse_mask) + + return MaskedTensor(sparse_data, sparse_mask) + + @staticmethod + def backward(ctx, grad_output): + return grad_output.to_dense() + + +class _MaskedWhere(torch.autograd.Function): + @staticmethod + def forward(ctx, cond, self, other): + ctx.mark_non_differentiable(cond) + ctx.save_for_backward(cond) + return torch.ops.aten.where(cond, self, other) + + @staticmethod + def backward(ctx, grad_output): + (cond,) = ctx.saved_tensors + + def masked_out_like(mt): + return MaskedTensor(mt.get_data(), torch.zeros_like(mt.get_mask()).bool()) + + return ( + None, + torch.ops.aten.where(cond, grad_output, masked_out_like(grad_output)), + torch.ops.aten.where(cond, masked_out_like(grad_output), grad_output), + ) + + +_MASKEDTENSOR_FUNCTION_TABLE = {} + +_function_fn_apply_map = { + (tuple(NATIVE_REDUCE_FNS), tuple(TORCH_REDUCE_FNS), tuple(TENSOR_REDUCE_FNS)): _apply_reduction, +} + +for fn_map_list, apply_fn in _function_fn_apply_map.items(): + for fn_map in fn_map_list: + for fn in fn_map: + _MASKEDTENSOR_FUNCTION_TABLE[fn] = partial(apply_fn, fn) + + +def register_function_func(ops): + """ + Used for registering a new __torch_function__ function to MaskedTensor + Called via _MASKEDTENSOR_FUNCTION_TABLE[func](*args, **kwargs) + + The code to register a new function looks like: + + @register_function_func(list_of_ops) + def foo(func, *args, **kwargs): + + """ + def wrapper(func): + for op in ops: + _MASKEDTENSOR_FUNCTION_TABLE[op] = partial(func, op) + return wrapper + + +@register_function_func(NATIVE_REDUCE_FNS + TORCH_REDUCE_FNS + TENSOR_REDUCE_FNS) +def _general_function_reductions(func, *args, **kwargs): + return _apply_reduction(func, *args, **kwargs) + + +@register_function_func([torch.Tensor.where, torch.where]) +def _function_where(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, "__torch_function__, torch.where", len_args=3, len_kwargs=0) + return _MaskedWhere.apply(*args) + + +@register_function_func([torch.Tensor.contiguous]) +def _function_contiguous(func, *args, **kwargs): + return _MaskedContiguous.apply(args[0]) + + +@register_function_func([torch.Tensor.to_dense]) +def _function_to_dense(func, *args, **kwargs): + return _MaskedToDense.apply(args[0]) + + +@register_function_func([torch.Tensor.to_sparse]) +def _function_to_sparse(func, *args, **kwargs): + return _MaskedToSparse.apply(args[0]) + + +@register_function_func([torch.Tensor.to_sparse_csr]) +def _function_to_sparse_csr(func, *args, **kwargs): + return _MaskedToSparseCsr.apply(args[0]) + + +_MASKEDTENSOR_DISPATCH_TABLE: Dict["torch._ops.OpOverload", Callable[..., Any]] = {} + +def register_dispatch_func(aten_ops): + """ + Used for registering a new __torch_dispatch__ function to MaskedTensor + Called via _MASKEDTENSOR_DISPATCH_TABLE[func](*args, **kwargs) + + The code to register a new function looks like: + + @register_dispatch_func(list_of_ops) + def foo(func, *args, **kwargs): + + """ + def wrapper(func): + for aten_op in aten_ops: + _MASKEDTENSOR_DISPATCH_TABLE[aten_op] = partial(func, aten_op) + return wrapper + + +@register_dispatch_func(NATIVE_REDUCE_FNS + TORCH_REDUCE_FNS + TENSOR_REDUCE_FNS) +def _general_reduction(func, *args, **kwargs): + return _apply_reduction(func, *args, **kwargs) + + +@register_dispatch_func(PASSTHROUGH_FNS) +def _general_passthrough(func, *args, **kwargs): + return _apply_pass_through_fn(func, *args, **kwargs) + + +@register_dispatch_func(NATIVE_UNARY_FNS + NATIVE_INPLACE_UNARY_FNS) +def _general_unary(func, *args, **kwargs): + return _apply_native_unary(func, *args, **kwargs) + + +@register_dispatch_func(NATIVE_BINARY_FNS + NATIVE_INPLACE_BINARY_FNS) +def _general_binary(func, *args, **kwargs): + return _apply_native_binary(func, *args, **kwargs) + + +@register_dispatch_func([torch.ops.aten.stride]) +def stride(func, *args, **kwargs): + return None + + +@register_dispatch_func([torch.ops.aten.sym_stride]) +def sym_stride(func, *args, **kwargs): + return None + + +@register_dispatch_func([torch.ops.prim.layout]) +def layout(func, *args, **kwargs): + return _get_data(args[0]).layout + + +@register_dispatch_func([torch.ops.aten.is_contiguous]) +def is_contiguous(func, *args, **kwargs): + data = _get_data(args[0]) + if data.is_sparse: + raise ValueError( + "MaskedTensors with sparse data do not have is_contiguous" + ) + return func(data, *args[1:], **kwargs) + + +@register_dispatch_func([torch.ops.aten.is_strides_like_format]) +def is_strides_like_format(func, *args, **kwargs): + data = _get_data(args[0]) + if data.is_sparse: + raise ValueError( + "MaskedTensors with sparse data do not have is_strides_like_format" + ) + return func(data, *args[1:], **kwargs) + + +@register_dispatch_func([torch.ops.aten.is_non_overlapping_and_dense]) +def is_non_overlapping_and_dense(func, *args, **kwargs): + data = _get_data(args[0]) + if data.is_sparse: + raise ValueError( + "MaskedTensors with sparse data do not have is_non_overlapping_and_dense" + ) + return func(data, *args[1:], **kwargs) + + +@register_dispatch_func([torch.ops.aten.contiguous]) +def contiguous(func, *args, **kwargs): + if _get_data(args[0]).is_sparse: + raise ValueError( + "MaskedTensors with sparse data do not have contiguous" + ) + return _MaskedContiguous.apply(args[0]) + + +@register_dispatch_func([torch.ops.aten.new_empty_strided]) +def new_empty_strided(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=3) + data = _get_data(args[0]) + mask = _maybe_get_mask(args[0]) + if tuple(args[1]) != tuple(data.size()): + raise ValueError(f"__torch_dispatch__, {func}: args[1] expected to be the same as data.size()") + if tuple(args[2]) != tuple(data.stride()): + raise ValueError(f"__torch_dispatch__, {func}: args[2] expected to be the same as data.stride()") + return MaskedTensor(func(data, args[1], args[2], **kwargs), mask) + + +@register_dispatch_func([torch.ops.aten._local_scalar_dense]) +def _local_scalar_dense(func, *args, **kwargs): + if not _maybe_get_mask(args[0]): + raise ValueError(f"__torch_dispatch__, {func}: expected a mask tensor") + return torch.ops.aten._local_scalar_dense(_get_data(args[0])) + + +@register_dispatch_func([torch.ops.aten.detach, torch.ops.aten.clone]) +def _apply_fn_on_data(func, *args, **kwargs): + return MaskedTensor(func(_get_data(args[0])), _maybe_get_mask(args[0])) + + +@register_dispatch_func([torch.ops.aten._to_copy]) +def _to_copy(func, *args, **kwargs): + new_data = func(_get_data(args[0]), *args[1:], **kwargs) + return MaskedTensor(new_data, _maybe_get_mask(args[0])) + + +@register_dispatch_func([torch.ops.aten._softmax]) +def _softmax(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=3, len_kwargs=0) + data = _get_data(args[0]) + mask = _maybe_get_mask(args[0]) + result_data = torch.ops.aten._masked_softmax(data, ~mask, args[1], 2) + return MaskedTensor(result_data, mask) + + +@register_dispatch_func([torch.ops.aten.ones_like]) +def ones_like(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=1) + result_data = func(_get_data(args[0]), **kwargs) + return MaskedTensor(result_data, _maybe_get_mask(args[0])) + + +@register_dispatch_func([torch.ops.aten._softmax_backward_data]) +def _softmax_backward_data(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=4) + grad, output, dim, input_dtype = args + if is_masked_tensor(grad) and is_masked_tensor(output): + if not _masks_match(grad, output): + raise ValueError("__torch_dispatch__, {func}: expected the masks of grad and output to match") + grad_data = _get_data(grad) + new_grad_data = torch.ops.aten._masked_softmax_backward( + grad_data, + _get_data(output), + ~_maybe_get_mask(grad), + dim % grad_data.ndim, + ) + res = MaskedTensor(new_grad_data, _maybe_get_mask(grad)) + return res + else: + raise ValueError(f"__torch_dispatch__, {func}: grad and output must both be MaskedTensors") + + +@register_dispatch_func([torch.ops.aten.copy_]) +def copy_(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=2) + if not _masks_match(_maybe_get_mask(args[0]), _maybe_get_mask(args[1])): + raise ValueError("args[0] mask and args[1] mask must match but do not") + func(_get_data(args[0]), _get_data(args[1])) + return args[0] + + +@register_dispatch_func([torch.ops.aten.where]) +def where(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=3, len_kwargs=0) + if not torch.is_tensor(args[0]): + raise ValueError("__torch_dispatch__, {func}: expected args[0] to be a tensor") + mx = args[1] + my = args[2] + if not is_masked_tensor(mx): + mx = MaskedTensor(mx, torch.ones_like(mx, dtype=torch.bool)) + if not is_masked_tensor(my): + my = MaskedTensor(my, torch.ones_like(my, dtype=torch.bool)) + new_data = func(args[0], mx.get_data(), my.get_data()) + new_mask = func(args[0], mx.get_mask(), my.get_mask()) + return MaskedTensor(new_data, new_mask) + + +@register_dispatch_func([torch.ops.aten._to_sparse]) +def _to_sparse(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=1, len_kwargs=0) + if not torch.is_tensor(args[0]): + raise TypeError("__torch_dispatch__, {func}: expected args[0] to be a tensor") + mt = args[0] + if not is_masked_tensor(mt): + mt = MaskedTensor(mt, torch.ones_like(mt, dtype=torch.bool)) + if mt.is_sparse_coo(): + return mt + new_mask = func(_maybe_get_mask(args[0])).coalesce() + new_data = _get_data(args[0]).sparse_mask(new_mask) + return MaskedTensor(new_data, new_mask) + + +@register_dispatch_func([torch.ops.aten._to_sparse_csr]) +def _to_sparse_csr(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=1, len_kwargs=0) + if not torch.is_tensor(args[0]): + raise ValueError("__torch_dispatch__, {func}: expected args[0] to be a tensor") + mt = args[0] + if not is_masked_tensor(mt): + mt = MaskedTensor(mt, torch.ones_like(mt).bool()) + if mt.is_sparse_csr(): + return mt + new_mask = func(_maybe_get_mask(args[0])) + new_data = _get_data(args[0]).sparse_mask(new_mask) + return MaskedTensor(new_data, new_mask) + + +@register_dispatch_func([torch.ops.aten._to_dense]) +def _to_dense(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=1, len_kwargs=0) + if not torch.is_tensor(args[0]): + raise ValueError("__torch_dispatch__, {func}: expected args[0] to be a tensor") + mt = args[0] + if not is_masked_tensor(mt): + mt = MaskedTensor(mt, torch.ones_like(mt).bool()) + new_data = func(_get_data(args[0])) + new_mask = func(_maybe_get_mask(args[0])) + return MaskedTensor(new_data, new_mask) + + +@register_dispatch_func([torch.ops.aten._indices]) +def _indices(func, *args, **kwargs): + # Assumes data is sparse + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=1, len_kwargs=0) + data = _get_data(args[0]).indices() + return MaskedTensor(data, torch.ones_like(data).bool()) + + +@register_dispatch_func([torch.ops.aten._values]) +def _values(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=1, len_kwargs=0) + data = _get_data(args[0]).values() + return MaskedTensor(data, torch.ones_like(data).bool()) + + +@register_dispatch_func([torch.ops.aten._sparse_coo_tensor_with_dims_and_tensors]) +def _sparse_coo_tensor_with_dims_and_tensors(func, *args, **kwargs): + new_args = list(args) + if is_masked_tensor(args[-1]): + new_args[-1] = args[-1].get_data() + if is_masked_tensor(args[-2]): + new_args[-2] = args[-2].get_data() + + new_data = func(*new_args, **kwargs) + new_args[-1] = torch.ones_like(new_args[-1]) + new_mask = func(*new_args, **kwargs).bool() + + return MaskedTensor(new_data, new_mask) + + +@register_dispatch_func([torch.ops.aten.is_same_size]) +def is_same_size(func, *args, **kwargs): + _check_args_kwargs_length(args, kwargs, f"__torch_dispatch__, {func}", len_args=2) + return _get_data(args[0]).is_same_size(_get_data(args[1])) diff --git a/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/binary.py b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/binary.py new file mode 100644 index 0000000000000000000000000000000000000000..087ea95916e54ee925b50a6466693a735a8717d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/binary.py @@ -0,0 +1,192 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +import torch + +from .core import _map_mt_args_kwargs, _masks_match, _tensors_match, _wrap_result, is_masked_tensor + +__all__ = [] # type: ignore[var-annotated] + +BINARY_NAMES = [ + "add", + "atan2", + "arctan2", + "bitwise_and", + "bitwise_or", + "bitwise_xor", + "bitwise_left_shift", + "bitwise_right_shift", + "div", + "divide", + "floor_divide", + "fmod", + "logaddexp", + "logaddexp2", + "mul", + "multiply", + "nextafter", + "remainder", + "sub", + "subtract", + "true_divide", + "eq", + "ne", + "le", + "ge", + "greater", + "greater_equal", + "gt", + "less_equal", + "lt", + "less", + "maximum", + "minimum", + "fmax", + "fmin", + "not_equal", +] + +INPLACE_BINARY_NAMES = [ + n + "_" + for n in ( + list( + set(BINARY_NAMES) + - { + "logaddexp", + "logaddexp2", + "equal", + "fmin", + "minimum", + "maximum", + "fmax", + } + ) + ) +] + + +def _get_at_least_one_mask(a, b): + if not is_masked_tensor(a) and not is_masked_tensor(b): + raise TypeError("At least one of `a` and `b` must be a MaskedTensor") + if not _masks_match(a, b): + raise ValueError("a and b must have matching masks") + if is_masked_tensor(a): + return a.get_mask() + return b.get_mask() + + +def _binary_helper(fn, args, kwargs, inplace): + if len(kwargs) != 0: + raise ValueError("len(kwargs) must equal 0") + for a in args[2:]: + if torch.is_tensor(a): + raise TypeError("MaskedTensor binary ops do not support Tensor arguments aside from the lhs and rhs") + + if not _masks_match(*args[:2]): + raise ValueError( + "Input masks must match. If you need support for this, please open an issue on Github." + ) + + data_args, data_kwargs = _map_mt_args_kwargs( + args, kwargs, lambda x: x.get_data() + ) + mask_args, mask_kwargs = _map_mt_args_kwargs( + args, kwargs, lambda x: x.get_mask() + ) + + args0_layout = data_args[0].layout + same_layout = ( + (torch.is_tensor(data_args[1]) or is_masked_tensor(data_args[1])) and + (args0_layout == data_args[1].layout) + ) + + if args0_layout == torch.sparse_coo: + if same_layout: + if not _tensors_match(data_args[0].indices(), data_args[1].indices()): + raise ValueError( + "sparse_coo indices must match. If you need support for this, please open an issue on Github." + ) + if data_args[0].size() != data_args[1].size(): + raise ValueError("input1 and input2 must have the same size for binary functions.") + + data_args[1] = data_args[1].values() + + i = data_args[0].indices() + size = data_args[0].size() + data_args[0] = data_args[0].values() + v = fn(*data_args) + result_data = torch.sparse_coo_tensor(i, v, size) + + elif args0_layout == torch.sparse_csr: + if same_layout: + if not ( + _tensors_match(data_args[0].crow_indices(), data_args[1].crow_indices()) + and _tensors_match( + data_args[0].col_indices(), data_args[1].col_indices() + ) + ): + raise ValueError( + "sparse_csr indices must match. If you need support for this, please open an issue on Github." + ) + + data_args[1] = data_args[1].values() + + crow = data_args[0].crow_indices() + col = data_args[0].col_indices() + data_args[0] = data_args[0].values() + v = fn(*data_args) + result_data = torch.sparse_csr_tensor(crow, col, v) + + else: + result_data = fn(*data_args) + + if inplace: + args[0]._set_data_mask(result_data, mask_args[0]) + return args[0] + else: + result_mask = _get_at_least_one_mask(*args[:2]) + # sparse tensors don't have strides so we can only expand if the layout is strided + if args0_layout == torch.strided: + result_mask = result_mask.expand_as(result_data) + return _wrap_result(result_data, result_mask) + + +def _torch_binary(fn_name): + fn = getattr(torch.ops.aten, fn_name) + + def binary_fn(*args, **kwargs): + return _binary_helper(fn, args, kwargs, inplace=False) + + return binary_fn + + +def _torch_inplace_binary(fn_name): + fn = getattr(torch.ops.aten, fn_name) + + def binary_fn(*args, **kwargs): + return _binary_helper(fn, args, kwargs, inplace=True) + + return binary_fn + + +NATIVE_BINARY_MAP = { + getattr(torch.ops.aten, name): _torch_binary(name) for name in BINARY_NAMES +} +NATIVE_INPLACE_BINARY_MAP = { + getattr(torch.ops.aten, name): _torch_inplace_binary(name) + for name in INPLACE_BINARY_NAMES +} + +NATIVE_BINARY_FNS = list(NATIVE_BINARY_MAP.keys()) +NATIVE_INPLACE_BINARY_FNS = list(NATIVE_INPLACE_BINARY_MAP.keys()) + + +def _is_native_binary(fn): + return fn in NATIVE_BINARY_FNS or fn in NATIVE_INPLACE_BINARY_FNS + + +def _apply_native_binary(fn, *args, **kwargs): + if fn in NATIVE_BINARY_FNS: + return NATIVE_BINARY_MAP[fn](*args, **kwargs) + if fn in NATIVE_INPLACE_BINARY_FNS: + return NATIVE_INPLACE_BINARY_MAP[fn](*args, **kwargs) + return NotImplemented diff --git a/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/core.py b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/core.py new file mode 100644 index 0000000000000000000000000000000000000000..d2002048edd995e0d3bcd28f8a2349548a2ba80e --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/core.py @@ -0,0 +1,336 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +import warnings + +import torch +from torch.overrides import get_default_nowrap_functions + + +__all__ = [ + "MaskedTensor", + "is_masked_tensor", +] + + +def is_masked_tensor(a): + r""" Returns True if the input is a MaskedTensor, else False + + Args: + a: any input + + Examples: + + >>> # xdoctest: +SKIP + >>> from torch.masked import MaskedTensor + >>> data = torch.arange(6).reshape(2,3) + >>> mask = torch.tensor([[True, False, False], [True, True, False]]) + >>> mt = MaskedTensor(data, mask) + >>> is_masked_tensor(mt) + True + """ + return isinstance(a, MaskedTensor) + + +def _tensors_match(a, b, exact=True, rtol=1e-05, atol=1e-08): + if is_masked_tensor(a) or is_masked_tensor(b): + raise ValueError("Neither `a` nor `b` can be a MaskedTensor.") + if a.layout != b.layout: + raise ValueError(f"`a` and `b` must have the same layout. Got {a.layout} and {b.layout}") + + if a.dtype != b.dtype: + b = b.type(a.dtype) + if a.layout == b.layout == torch.sparse_coo: + return _tensors_match(a.values(), b.values(), exact) and _tensors_match( + a.indices(), b.indices(), exact + ) + elif a.layout == b.layout == torch.sparse_csr: + return ( + _tensors_match(a.crow_indices(), b.crow_indices(), exact) + and _tensors_match(a.col_indices(), b.col_indices(), exact) + and _tensors_match(a.values(), b.values(), exact) + ) + if exact: + return (a.dim() == b.dim()) and torch.eq(a, b).all().item() + return (a.dim() == b.dim()) and torch.allclose(a, b, rtol=rtol, atol=atol) + + +def _masks_match(a, b): + if is_masked_tensor(a) and is_masked_tensor(b): + mask_a = a.get_mask() + mask_b = b.get_mask() + return _tensors_match(mask_a, mask_b, exact=True) + return True + + +def _map_mt_args_kwargs(args, kwargs, map_fn): + def _helper(a, map_fn): + if is_masked_tensor(a): + return map_fn(a) + elif torch.is_tensor(a): + return a + elif isinstance(a, list): + a_impl, _ = _map_mt_args_kwargs(a, {}, map_fn) + return a_impl + elif isinstance(a, tuple): + a_impl, _ = _map_mt_args_kwargs(a, {}, map_fn) + return tuple(a_impl) + else: + return a + + if kwargs is None: + kwargs = {} + impl_args = [] + for a in args: + impl_args.append(_helper(a, map_fn)) + impl_kwargs = {} + for k in kwargs.keys(): + impl_kwargs[k] = _helper(a, map_fn) + return impl_args, impl_kwargs + + +def _wrap_result(result_data, result_mask): + if isinstance(result_data, list): + return [_wrap_result(r, m) for (r, m) in zip(result_data, result_mask)] + if isinstance(result_data, tuple): + return tuple(_wrap_result(r, m) for (r, m) in zip(result_data, result_mask)) + if torch.is_tensor(result_data): + return MaskedTensor(result_data, result_mask) + # Expect result_data and result_mask to be Tensors only + return NotImplemented + + +def _masked_tensor_str(data, mask, formatter): + if data.layout in {torch.sparse_coo, torch.sparse_csr}: + data = data.to_dense() + mask = mask.to_dense() + if data.dim() == 1: + formatted_elements = [ + formatter.format(d.item()) if isinstance(d.item(), float) else str(d.item()) + for d in data + ] + max_len = max( + 8 if x[1] else len(x[0]) for x in zip(formatted_elements, ~mask) + ) + return ( + "[" + + ", ".join( + [ + "--".rjust(max_len) if m else e + for (e, m) in zip(formatted_elements, ~mask) + ] + ) + + "]" + ) + sub_strings = [_masked_tensor_str(d, m, formatter) for (d, m) in zip(data, mask)] + sub_strings = ["\n".join([" " + si for si in s.split("\n")]) for s in sub_strings] + return "[\n" + ",\n".join(sub_strings) + "\n]" + + +def _get_data(a): + if is_masked_tensor(a): + return a._masked_data + return a + + +def _maybe_get_mask(a): + if is_masked_tensor(a): + return a.get_mask() + return None + + +class MaskedTensor(torch.Tensor): + @staticmethod + def __new__(cls, data, mask, requires_grad=False): + if is_masked_tensor(data) or not torch.is_tensor(data): + raise TypeError("data must be a Tensor") + if is_masked_tensor(mask) or not torch.is_tensor(mask): + raise TypeError("mask must be a Tensor") + # Use a Tensor that of the give size for the wrapper. + kwargs = {} + kwargs["device"] = data.device + kwargs["dtype"] = data.dtype + kwargs["layout"] = data.layout + kwargs["requires_grad"] = requires_grad + kwargs["dispatch_sizes_strides_policy"] = "strides" + kwargs["dispatch_layout"] = True + warnings.warn(("The PyTorch API of MaskedTensors is in prototype stage " + "and will change in the near future. Please open a Github issue " + "for features requests and see our documentation on the torch.masked " + "module for further information about the project."), UserWarning) + if data.requires_grad: + warnings.warn("It is not recommended to create a MaskedTensor with a tensor that requires_grad. " + "To avoid this, you can use data.clone().detach()", UserWarning) + return torch.Tensor._make_wrapper_subclass(cls, data.size(), **kwargs) # type: ignore[attr-defined] + + def _preprocess_data(self, data, mask): + from .._ops import _sparse_coo_where, _sparse_csr_where + + if data.layout != mask.layout: + raise TypeError("data and mask must have the same layout.") + if data.layout == torch.sparse_coo: + data = data.coalesce() + mask = mask.coalesce() + if data._nnz() != mask._nnz(): + data = _sparse_coo_where(mask, data, torch.tensor(0)) + elif data.layout == torch.sparse_csr: + if data._nnz() != mask._nnz(): + data = _sparse_csr_where(mask, data, torch.tensor(0)) + + # Have to pick awkward names to not conflict with existing fields such as data + self._masked_data = data.clone() + self._masked_mask = mask.clone() + + def _validate_members(self): + data = self._masked_data + mask = self.get_mask() + if type(data) != type(mask): + raise TypeError(f"data and mask must have the same type. Got {type(data)} and {type(mask)}") + if data.layout not in {torch.strided, torch.sparse_coo, torch.sparse_csr}: + raise TypeError(f"data layout of {data.layout} is not supported.") + if data.layout == torch.sparse_coo: + if not _tensors_match(data.indices(), mask.indices(), exact=True): + raise ValueError("data and mask are both sparse COO tensors but do not have the same indices.") + elif data.layout == torch.sparse_csr: + if not _tensors_match( + data.crow_indices(), mask.crow_indices(), exact=True + ) or not _tensors_match(data.col_indices(), mask.col_indices(), exact=True): + raise ValueError("data and mask are both sparse CSR tensors but do not share either crow or col indices.") + if mask.dtype != torch.bool: + raise TypeError("mask must have dtype bool.") + if not ( + data.dtype == torch.float16 + or data.dtype == torch.float32 + or data.dtype == torch.float64 + or data.dtype == torch.bool + or data.dtype == torch.int8 + or data.dtype == torch.int16 + or data.dtype == torch.int32 + or data.dtype == torch.int64 + ): + raise TypeError(f"{data.dtype} is not supported in MaskedTensor.") + if data.dim() != mask.dim(): + raise ValueError("data.dim() must equal mask.dim()") + if data.size() != mask.size(): + raise ValueError("data.size() must equal mask.size()") + + def __init__(self, data, mask, requires_grad=False): + self._preprocess_data(data, mask) + self._validate_members() + + @staticmethod + def _from_values(data, mask): + """ Differentiable constructor for MaskedTensor """ + class Constructor(torch.autograd.Function): + @staticmethod + def forward(ctx, data, mask): + return MaskedTensor(data, mask) + + @staticmethod + def backward(ctx, grad_output): + return grad_output, None + + result = Constructor.apply(data, mask) + return result + + def _set_data_mask(self, data, mask): + self._masked_data = data + self._masked_mask = mask + self._validate_members() + + def __repr__(self): + formatter = "{0:8.4f}" + if self.dim() == 0: + scalar_data = self.get_data().item() + data_formatted = ( + formatter.format(scalar_data) + if isinstance(scalar_data, float) + else str(scalar_data) + ) + if not self.get_mask().item(): + data_formatted = "--" + return ( + "MaskedTensor(" + + data_formatted + + ", " + + str(self.get_mask().item()) + + ")" + ) + s = _masked_tensor_str(self.get_data(), self.get_mask(), formatter) + s = "\n".join(" " + si for si in s.split("\n")) + return "MaskedTensor(\n" + s + "\n)" + + # Seems like this needs to be defined before torch_dispatch to work + @classmethod + def __torch_function__(cls, func, types, args=(), kwargs=None): + kwargs = kwargs or {} + + from ._ops_refs import _MASKEDTENSOR_FUNCTION_TABLE + if func in _MASKEDTENSOR_FUNCTION_TABLE: + return _MASKEDTENSOR_FUNCTION_TABLE[func](*args, **kwargs) + + if not all(issubclass(cls, t) for t in types): + return NotImplemented + with torch._C.DisableTorchFunctionSubclass(): + ret = func(*args, **kwargs) + if func in get_default_nowrap_functions(): + return ret + else: + return torch._tensor._convert(ret, cls) + + @classmethod + def unary(cls, fn, data, mask): + return MaskedTensor(fn(data), mask) + + @classmethod + def __torch_dispatch__(cls, func, types, args, kwargs): + func = func.overloadpacket + + from ._ops_refs import _MASKEDTENSOR_DISPATCH_TABLE + if func in _MASKEDTENSOR_DISPATCH_TABLE: + return _MASKEDTENSOR_DISPATCH_TABLE[func](*args, **kwargs) + + msg = ( + f"{func.__name__} is not implemented in __torch_dispatch__ for MaskedTensor.\n" + "If you would like this operator to be supported, please file an issue for a feature request at " + "https://github.com/pytorch/maskedtensor/issues with a minimal reproducible code snippet.\n" + "In the case that the semantics for the operator are not trivial, it would be appreciated " + "to also include a proposal for the semantics." + ) + warnings.warn(msg) + return NotImplemented + + def __lt__(self, other): + if is_masked_tensor(other): + return MaskedTensor(self.get_data() < _get_data(other), self.get_mask()) + return MaskedTensor(self.get_data() < other, self.get_mask()) + + def to_tensor(self, value): + return self.get_data().masked_fill(~self.get_mask(), value) + + def get_data(self): + class GetData(torch.autograd.Function): + @staticmethod + def forward(ctx, self): + return self._masked_data + + @staticmethod + def backward(ctx, grad_output): + if is_masked_tensor(grad_output): + return grad_output + return MaskedTensor(grad_output, self.get_mask()) + + return GetData.apply(self) + + def get_mask(self): + return self._masked_mask + + def is_sparse_coo(self): + return self.layout == torch.sparse_coo + + def is_sparse_csr(self): + return self.layout == torch.sparse_csr + + # Update later to support more sparse layouts + @property + def is_sparse(self): + return self.is_sparse_coo() or self.is_sparse_csr() diff --git a/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/creation.py b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/creation.py new file mode 100644 index 0000000000000000000000000000000000000000..861984a21e1c436ef738c71b96fb1b4534f61583 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/creation.py @@ -0,0 +1,21 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +from .core import MaskedTensor + +__all__ = [ + "as_masked_tensor", + "masked_tensor", +] + + +"""" +These two factory functions are intended to mirror + torch.tensor - guaranteed to be a leaf node + torch.as_tensor - differentiable constructor that preserves the autograd history +""" + +def masked_tensor(data, mask, requires_grad=False): + return MaskedTensor(data, mask, requires_grad) + +def as_masked_tensor(data, mask): + return MaskedTensor._from_values(data, mask) diff --git a/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/passthrough.py b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/passthrough.py new file mode 100644 index 0000000000000000000000000000000000000000..91c9e5f81830e953b2d7c6ebc58f05e4c7fe1ecf --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/passthrough.py @@ -0,0 +1,43 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +""" +These are functions that should simply be applied to both mask and data. +Take select or stack as an example. This operation can be applied to +both the mask and data of a MaskedTensor and the result wrapped into +a new MaskedTensor as a result. +""" + +import torch + +from .core import _map_mt_args_kwargs, _wrap_result + +__all__ = [] # type: ignore[var-annotated] + + +PASSTHROUGH_FNS = [ + torch.ops.aten.select, + torch.ops.aten.transpose, + torch.ops.aten.split, + torch.ops.aten.t, + torch.ops.aten.slice, + torch.ops.aten.slice_backward, + torch.ops.aten.select_backward, + torch.ops.aten.index, + torch.ops.aten.expand, + torch.ops.aten.view, + torch.ops.aten._unsafe_view, + torch.ops.aten._reshape_alias, + torch.ops.aten.cat, + torch.ops.aten.unsqueeze, +] + + +def _is_pass_through_fn(fn): + return fn in PASSTHROUGH_FNS + + +def _apply_pass_through_fn(fn, *args, **kwargs): + data_args, data_kwargs = _map_mt_args_kwargs(args, kwargs, lambda x: x.get_data()) + result_data = fn(*data_args, **data_kwargs) + mask_args, mask_kwargs = _map_mt_args_kwargs(args, kwargs, lambda x: x.get_mask()) + result_mask = fn(*mask_args, **mask_kwargs) + return _wrap_result(result_data, result_mask) diff --git a/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/reductions.py b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/reductions.py new file mode 100644 index 0000000000000000000000000000000000000000..737f4b240beb91bca5b8b5fe46cc45dd4dce9c63 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/reductions.py @@ -0,0 +1,173 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +import warnings + +import torch + +from .core import is_masked_tensor +from .creation import as_masked_tensor, masked_tensor + +__all__ = [] # type: ignore[var-annotated] + + +def _masked_all_all(data, mask=None): + if mask is None: + return data.all() + return data.masked_fill(~mask, True).all() + + +def _masked_all_dim(data, dim, keepdim=False, mask=None): + if mask is None: + return torch.all(data, dim=dim, keepdim=keepdim) + return torch.all(data.masked_fill(~mask, True), dim=dim, keepdim=keepdim) + + +def _masked_all(*args, **kwargs): + if len(args) == 1 and len(kwargs) == 1: + return _masked_all_all(args[0], mask=kwargs["mask"]) + return _masked_all_dim(*args, **kwargs) + + +def _multidim_any(mask, dim, keepdim): + if isinstance(dim, int): + return _multidim_any(mask, [dim], keepdim) + for d in sorted(dim, reverse=True): + mask = torch.any(mask, dim=d, keepdim=keepdim) + return mask + + +def _get_masked_fn(fn): + if fn == "all": + return _masked_all + return getattr(torch.masked, fn) + + +def _torch_reduce_all(fn): + def reduce_all(self): + masked_fn = _get_masked_fn(fn) + data = self.get_data() + mask = self.get_mask().values() if self.is_sparse else self.get_mask() + # When reduction is "all", then torch.argmin/torch.argmax needs to return the index of the + # element corresponding to the min/max, but this operation isn't supported correctly for sparse layouts. + # Therefore, this implementation calculates it using the strides. + if fn == "all": + result_data = masked_fn(data, mask=mask) + + elif fn in {"argmin", "argmax"} and self.is_sparse_coo(): + sparse_idx = masked_fn(data.values(), mask=mask).to(dtype=torch.int) + indices = ( + data.to_sparse_coo().indices() + if not self.is_sparse_coo() + else data.indices() + ) + idx = indices.unbind(1)[sparse_idx] + stride = data.size().numel() / torch.tensor( + data.size(), device=data.device + ).cumprod(0) + result_data = torch.sum(idx * stride) + + # we simply pass in the values for sparse COO/CSR tensors + elif self.is_sparse: + result_data = masked_fn(masked_tensor(data.values(), mask)) + + else: + result_data = masked_fn(self, mask=mask) + + return as_masked_tensor(result_data, torch.any(mask)) + + return reduce_all + + +def _torch_reduce_dim(fn): + def reduce_dim(self, dim, keepdim=False, dtype=None): + if self.is_sparse: + msg = ( + f"The sparse version of {fn} is not implemented in reductions.\n" + "If you would like this operator to be supported, please file an issue for a feature request at " + "https://github.com/pytorch/maskedtensor/issues with a minimal reproducible code snippet.\n" + "In the case that the semantics for the operator are not trivial, it would be appreciated " + "to also include a proposal for the semantics." + ) + warnings.warn(msg) + return NotImplemented + if not is_masked_tensor(self): + raise TypeError("Input to reduce_dim must be a MaskedTensor") + + masked_fn = _get_masked_fn(fn) + data = self.get_data() + mask = self.get_mask() + if fn == "all": + result_data = masked_fn(data, dim=dim, keepdim=keepdim, mask=mask) + else: + result_data = masked_fn( + self, dim=dim, keepdim=keepdim, dtype=dtype, mask=self.get_mask() + ) + return as_masked_tensor(result_data, _multidim_any(mask, dim, keepdim)) + + return reduce_dim + + +def _torch_reduce(fn): + def reduce_fn(*args, **kwargs): + if len(args) == 1 and len(kwargs) == 0: + return _torch_reduce_all(fn)(args[0]) + return _torch_reduce_dim(fn)(*args, **kwargs) + + return reduce_fn + + +def _reduce_dim_args(input, dim, keepdim=False, dtype=None): + return input, dim, keepdim, dtype + + +def _torch_grad_reduce(fn): + def grad_reduce(*args, **kwargs): + if len(args) == 1 and len(kwargs) == 0: + return _torch_reduce_all(fn)(args[0]) + # TODO: autograd.Function doesn't support kwarg + input, dim, keepdim, dtype = _reduce_dim_args(*args, **kwargs) + return _torch_reduce_dim(fn)(input, dim, keepdim, dtype) + + return grad_reduce + + +REDUCE_NAMES = [ + "sum", + "mean", + "amin", + "amax", + "argmin", + "argmax", + "prod", + "all", + "norm", + "var", + "std", +] + +NATIVE_REDUCE_MAP = { + getattr(torch.ops.aten, name): _torch_reduce(name) for name in REDUCE_NAMES +} +TORCH_REDUCE_MAP = { + getattr(torch, name): _torch_grad_reduce(name) for name in REDUCE_NAMES +} +TENSOR_REDUCE_MAP = { + getattr(torch.Tensor, name): _torch_grad_reduce(name) for name in REDUCE_NAMES +} + +NATIVE_REDUCE_FNS = list(NATIVE_REDUCE_MAP.keys()) +TORCH_REDUCE_FNS = list(TORCH_REDUCE_MAP.keys()) +TENSOR_REDUCE_FNS = list(TENSOR_REDUCE_MAP.keys()) + +def _is_reduction(fn): + return fn in NATIVE_REDUCE_MAP or fn in TORCH_REDUCE_MAP or fn in TENSOR_REDUCE_MAP + + +def _apply_reduction(fn, *args, **kwargs): + if fn in NATIVE_REDUCE_MAP: + return NATIVE_REDUCE_MAP[fn](*args, **kwargs) + if fn in TORCH_REDUCE_MAP: + return TORCH_REDUCE_MAP[fn](*args, **kwargs) + if fn in TENSOR_REDUCE_MAP: + return TENSOR_REDUCE_MAP[fn](*args, **kwargs) + return NotImplemented diff --git a/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/unary.py b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/unary.py new file mode 100644 index 0000000000000000000000000000000000000000..b3d5c136bfd4149810d25c36fd18b34d7a0a67c5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/masked/maskedtensor/unary.py @@ -0,0 +1,188 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates + +import torch + +from .core import _map_mt_args_kwargs, _wrap_result + +__all__ = [] # type: ignore[var-annotated] + + +UNARY_NAMES = [ + "abs", + "absolute", + "acos", + "arccos", + "acosh", + "arccosh", + "angle", + "asin", + "arcsin", + "asinh", + "arcsinh", + "atan", + "arctan", + "atanh", + "arctanh", + "bitwise_not", + "ceil", + "clamp", + "clip", + "conj_physical", + "cos", + "cosh", + "deg2rad", + "digamma", + "erf", + "erfc", + "erfinv", + "exp", + "exp2", + "expm1", + "fix", + "floor", + "frac", + "lgamma", + "log", + "log10", + "log1p", + "log2", + "logit", + "i0", + "isnan", + "nan_to_num", + "neg", + "negative", + "positive", + "pow", + "rad2deg", + "reciprocal", + "round", + "rsqrt", + "sigmoid", + "sign", + "sgn", + "signbit", + "sin", + "sinc", + "sinh", + "sqrt", + "square", + "tan", + "tanh", + "trunc", +] + +INPLACE_UNARY_NAMES = [ + n + "_" + for n in (list(set(UNARY_NAMES) - {"angle", "positive", "signbit", "isnan"})) +] + +# Explicitly tracking functions we know are currently not supported +# This might be due to missing code gen or because of complex semantics +UNARY_NAMES_UNSUPPORTED = [ + "atan2", + "arctan2", + "bitwise_left_shift", + "bitwise_right_shift", + "copysign", + "float_power", + "fmod", + "frexp", + "gradient", + "imag", + "ldexp", + "lerp", + "logical_not", + "hypot", + "igamma", + "igammac", + "mvlgamma", + "nextafter", + "polygamma", + "real", + "remainder", + "true_divide", + "xlogy", +] + + +def _unary_helper(fn, args, kwargs, inplace): + if len(kwargs) != 0: + raise ValueError("MaskedTensor unary ops require that len(kwargs) == 0. " + "If you need support for this, please open an issue on Github.") + for a in args[1:]: + if torch.is_tensor(a): + raise TypeError("MaskedTensor unary ops do not support additional Tensor arguments") + + mask_args, mask_kwargs = _map_mt_args_kwargs( + args, kwargs, lambda x: x._masked_mask + ) + data_args, data_kwargs = _map_mt_args_kwargs( + args, kwargs, lambda x: x._masked_data + ) + + if args[0].layout == torch.sparse_coo: + data_args[0] = data_args[0].coalesce() + s = data_args[0].size() + i = data_args[0].indices() + data_args[0] = data_args[0].coalesce().values() + v = fn(*data_args) + result_data = torch.sparse_coo_tensor(i, v, size=s) + + elif args[0].layout == torch.sparse_csr: + crow = data_args[0].crow_indices() + col = data_args[0].col_indices() + data_args[0] = data_args[0].values() + v = fn(*data_args) + result_data = torch.sparse_csr_tensor(crow, col, v) + + else: + result_data = fn(*data_args) + + if inplace: + args[0]._set_data_mask(result_data, mask_args[0]) + return args[0] + else: + return _wrap_result(result_data, mask_args[0]) + + +def _torch_unary(fn_name): + fn = getattr(torch.ops.aten, fn_name) + + def unary_fn(*args, **kwargs): + return _unary_helper(fn, args, kwargs, inplace=False) + + return unary_fn + + +def _torch_inplace_unary(fn_name): + fn = getattr(torch.ops.aten, fn_name) + + def unary_fn(*args, **kwargs): + return _unary_helper(fn, args, kwargs, inplace=True) + + return unary_fn + + +NATIVE_UNARY_MAP = { + getattr(torch.ops.aten, name): _torch_unary(name) for name in UNARY_NAMES +} +NATIVE_INPLACE_UNARY_MAP = { + getattr(torch.ops.aten, name): _torch_inplace_unary(name) + for name in INPLACE_UNARY_NAMES +} + +NATIVE_UNARY_FNS = list(NATIVE_UNARY_MAP.keys()) +NATIVE_INPLACE_UNARY_FNS = list(NATIVE_INPLACE_UNARY_MAP.keys()) + + +def _is_native_unary(fn): + return fn in NATIVE_UNARY_FNS or fn in NATIVE_INPLACE_UNARY_FNS + + +def _apply_native_unary(fn, *args, **kwargs): + if fn in NATIVE_UNARY_FNS: + return NATIVE_UNARY_MAP[fn](*args, **kwargs) + if fn in NATIVE_INPLACE_UNARY_FNS: + return NATIVE_INPLACE_UNARY_MAP[fn](*args, **kwargs) + return NotImplemented diff --git a/venv/lib/python3.10/site-packages/torch/package/__pycache__/glob_group.cpython-310.pyc b/venv/lib/python3.10/site-packages/torch/package/__pycache__/glob_group.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..085d6ddd5d6bf3fecbea932aefc45e48bd61efd3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/torch/package/__pycache__/glob_group.cpython-310.pyc differ