diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/bytecode_transformation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/bytecode_transformation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80ee281e328e8c97363ed3ad2e393ad9627a36d3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/bytecode_transformation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/code_context.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/code_context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68bc5700050007e5814b0a11190ce23c52b99058 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/code_context.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/convert_frame.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/convert_frame.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30f0a01751e4b44c4940c77660a5581c0b0f31ab Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/convert_frame.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/debug_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/debug_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c16b9a1a35bd02e26dde91f9a28c4d4b4d51b139 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/debug_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/decorators.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/decorators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a067d156e4b03f73cbe94669bfd7ff9bd697eb19 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/decorators.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/exc.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/exc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..776b254ef6603e77c00442e9bcd6f98809f09b34 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/exc.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/logging.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/logging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68b3ad6308fcc69c2c22b244cf8f26d5743102ea Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/logging.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/mutation_guard.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/mutation_guard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b775d3d59e9b7546d0a20b79ea464a097d58067c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/mutation_guard.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/replay_record.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/replay_record.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9a2154d66bbcf04b26dac9aa69ddcc571012d9b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/replay_record.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/side_effects.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/side_effects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ed4141e7494f16620b90aa322bf047f9d4de557 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/side_effects.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/symbolic_convert.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/symbolic_convert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24f1d3466064080f363e7536ffa5a75a203e6c5c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/symbolic_convert.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/test_case.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/test_case.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7c21c8cb0bc93c620a9f6c55062338b93b4ec4a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/test_case.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/test_minifier_common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/test_minifier_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe2191a5e97227048ecc3ac6e854e8da9d6bb56b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_dynamo/__pycache__/test_minifier_common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_prims_common/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/_prims_common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..340c9f1eae493ea95d939b30601108bd1a76e83d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_prims_common/__init__.py @@ -0,0 +1,1944 @@ +from __future__ import annotations + +import operator +import warnings +import weakref + +from contextlib import nullcontext +from enum import Enum +from functools import cmp_to_key, reduce +from typing import ( + Any, + Callable, + cast, + List, + Optional, + overload, + Sequence, + Tuple, + Type, + TYPE_CHECKING, + Union, +) + + +if TYPE_CHECKING: + # Import the following modules during type checking to enable code intelligence features, + # such as auto-completion in tools like pylance, even when these modules are not explicitly + # imported in user code. + + import sympy + +import torch +from torch import sym_float, sym_int, sym_max + + +ShapeType = Union[torch.Size, List[int], Tuple[int, ...]] +StrideType = Union[List[int], Tuple[int, ...]] +DimsType = Union[int, List[int], Tuple[int, ...]] +DimsSequenceType = Union[List[int], Tuple[int, ...]] +# TODO: Type[torch.SymInt], Type[torch.SymFloat] +NumberTypeType = Union[Type[bool], Type[int], Type[float], Type[complex]] +# TODO: This needs a lot more type annotations +# NumberType = Union[bool, int, float, complex, torch.SymInt, torch.SymFloat] +NumberType = Union[bool, int, float, complex] +RealNumberType = Union[bool, int, float] + +Number = (bool, int, float, complex, torch.SymInt, torch.SymFloat) +# I don't call it Integral because numbers.Integral includes bool, but IntLike +# does not +Dim = int +IntLike = (int, torch.SymInt) +FloatLike = (float, torch.SymFloat) +IntWithoutSymInt = int +FloatWithoutSymFloat = float +DeviceLikeType = Union[str, torch.device, int] +Tensor = torch.Tensor + + +torch_function_passthrough = { + torch.device, + torch.sym_not, + torch.sym_float, + torch.sym_int, + torch.sym_max, + torch.sym_min, + torch.sym_sqrt, + torch.sym_ite, + torch.Tensor.dim, + torch.Tensor.ndim.__get__, # type: ignore[attr-defined] + torch.Tensor.numel, + torch.Tensor.size, + torch.Tensor.storage_offset, + torch.Tensor.stride, + torch.Tensor.dtype.__get__, # type: ignore[attr-defined] + torch.Tensor.is_sparse.__get__, # type: ignore[attr-defined] + torch.Tensor.shape.__get__, # type: ignore[attr-defined] + torch.Tensor.device.__get__, # type: ignore[attr-defined] + torch.Tensor.requires_grad.__get__, # type: ignore[attr-defined] + torch.Tensor.layout.__get__, # type: ignore[attr-defined] + torch.Tensor.is_contiguous, + # For TorchRefsMode only + torch.Tensor.__format__, + torch.Tensor.__repr__, + torch.Tensor.requires_grad.__get__, # type: ignore[attr-defined] +} + + +TensorLikeType = torch.Tensor +TensorLike = torch.Tensor +TensorSequenceType = Union[List[TensorLikeType], Tuple[TensorLikeType, ...]] +TensorOrNumberLikeType = Union[TensorLikeType, NumberType] + +CustomOutParamAnnotation = "__custom_out_param__" + + +def same_shape(a: ShapeType, b: ShapeType, *, allow_rhs_unbacked=False) -> bool: + if len(a) != len(b): + return False + + for x, y in zip(a, b): + if allow_rhs_unbacked: + # TODO: We should check that the symbols are consistent + # with each other + if isinstance(y, torch.SymInt): + continue + if x != y: + return False + + return True + + +def _maybe_get_pytype(t): + if t is torch.SymFloat: + return float + elif t is torch.SymInt: + return int + elif t is torch.SymBool: + return bool + else: + return t + + +# TODO: look at using torch.testing.assert_close instead with an option +# to just compare metadata +def compare_tensor_meta( + a: TensorLikeType, + b: TensorLikeType, + check_strides=False, + *, + allow_rhs_unbacked=False, +): + """ + Checks that two tensor likes have the same shape, + dtype and device. + + In the future this will validate additional metadata, like + strides. + """ + assert isinstance(a, TensorLike) + assert isinstance(b, TensorLike) + + if not same_shape(a.shape, b.shape, allow_rhs_unbacked=allow_rhs_unbacked): + msg = f"Shapes {a.shape} and {b.shape} are not equal!" + raise AssertionError(msg) + + if a.dtype != b.dtype: + msg = f"Dtypes {a.dtype} and {b.dtype} are not equal!" + raise AssertionError(msg) + + if a.device != b.device: + # Handles special cuda:0 vs cuda case + # TODO: we should review why this happens and see about fixing it + if (str(a.device) == "cuda:0" or str(a.device) == "cuda") and ( + str(b.device) == "cuda:0" or str(b.device) == "cuda" + ): + pass + else: + msg = f"Devices {a.device} and {b.device} are not equal!" + raise AssertionError(msg) + + # Stride checking is currently disabled, see https://github.com/pytorch/pytorch/issues/78050 + if check_strides: + same_strides, idx = check_significant_strides(a, b) + if not same_strides: + msg = f"Stride mismatch! Strides are {a.stride()} and {b.stride()} (mismatched at {idx})!" + raise RuntimeError(msg) + + if a.storage_offset() != b.storage_offset(): + msg = f"Storage offset mismatch! Storage offsets are {a.storage_offset()} and {b.storage_offset()}!" + raise RuntimeError(msg) + + if a.is_conj() != b.is_conj(): + raise RuntimeError( + f"Conj mismatch! is_conj is set to {a.is_conj()} and {b.is_conj()}" + ) + + if a.is_neg() != b.is_neg(): + raise RuntimeError( + f"Neg mismatch! is_neg is set to {a.is_neg()} and {b.is_neg()}" + ) + + +def _check_strides_helper( + a: TensorLikeType, b: TensorLikeType, *, only_cuda=True, significant_only=True +) -> Tuple[bool, Optional[int]]: + # NOTE: only on CUDA because CPU elementwise strides are incorrect in PyTorch + # See https://github.com/pytorch/pytorch/issues/77553 + # Only compares strides that are "meaningful" -- strides for dimensions with length > 1 + # and for tensors with more than one element + if ( + not only_cuda or a.device.type == "cuda" or b.device.type == "cuda" + ) and a.numel() > 0: + for idx in range(a.ndim): + check = not significant_only or a.shape[idx] > 1 + if a.stride()[idx] != b.stride()[idx] and check: + return False, idx + + return True, None + + +def check_significant_strides( + a: TensorLikeType, b: TensorLikeType, *, only_cuda=True +) -> Tuple[bool, Optional[int]]: + return _check_strides_helper(a, b, only_cuda=only_cuda, significant_only=True) + + +def check_all_strides( + a: TensorLikeType, b: TensorLikeType, *, only_cuda=True +) -> Tuple[bool, Optional[int]]: + return _check_strides_helper(a, b, only_cuda=only_cuda, significant_only=False) + + +# This function is equivalent to compute_contiguous() from TensorImpl.cpp +def is_contiguous(a: TensorLikeType) -> bool: + """ + Tests whether a tensor is contiguous or not. + + Tensors are contiguous when they have no elements, + one element, or when they have "nested" strides. + """ + if a.numel() < 2: + return True + + expected_stride = 1 + for x, y in reversed(tuple(zip(a.shape, a.stride()))): + # Skips checking strides when a dimension has length 1 + if x == 1: + continue + + if y != expected_stride: + return False + expected_stride = expected_stride * x + + return True + + +# This function is equivalent to compute_channels_last_contiguous_2d() in TensorImpl.cpp +def is_channels_last_contiguous_2d(a: Tensor) -> bool: + # NHWC or not channels last 2D contiguous + if a.ndim != 4: + return False + + expected_stride = 1 + for idx in (1, 3, 2, 0): + length = a.shape[idx] + if length == 1: + continue + + stride = a.stride()[idx] + if stride != expected_stride: + return False + + expected_stride *= length + + return True + + +def is_channels_last_contiguous_3d(a: Tensor) -> bool: + # NDHWC or not channels last 3D contiguous + if a.ndim != 5: + return False + + expected_stride = 1 + for idx in (1, 4, 3, 2, 0): + length = a.shape[idx] + if length == 1: + continue + + stride = a.stride()[idx] + if stride != expected_stride: + return False + + expected_stride *= length + + return True + + +_memory_formats = { + torch.contiguous_format, + torch.preserve_format, + torch.channels_last, + torch.channels_last_3d, +} + + +def validate_memory_format(memory_format: torch.memory_format): + torch._check( + memory_format in _memory_formats, + lambda: f"Received unknown memory format {memory_format}!", + ) + + +def is_contiguous_for_memory_format( # type: ignore[return] + a: Tensor, *, memory_format: torch.memory_format +) -> bool: + validate_memory_format(memory_format) + + if memory_format == torch.contiguous_format: + return is_contiguous(a) + if memory_format == torch.channels_last: + return is_channels_last_contiguous_2d(a) + if memory_format == torch.channels_last_3d: + return is_channels_last_contiguous_3d(a) + + torch._check( + False, + lambda: f"is_contiguous received unsupported memory format {memory_format}", + ) + + +# NOTE: that tensors with no elements and channels last is ??? +def is_channels_last_contiguous(a: Tensor) -> bool: + """ + True when a tensor is channels-last contiguous. + + This requires that: + + - the tensor is conceptually either 4 (NHWC) or 5 (NDHWC) dimensions + - if we name the tensor's dimensions NCHW or NCDHW, then the strides are such that the + stride of the 'C' dimension (Cs) is 1 and the strides corresponding to + each dimension (Xs) can be ordered Cs <= Ws <= Hs <= (Ds) <= Ns and are + "nested" -- so Ws = Cs * Cl, where Cl is the length of the 'C' dimension, + for example. + """ + return is_channels_last_contiguous_2d(a) or is_channels_last_contiguous_3d(a) + + +def is_non_overlapping_and_dense(a: Tensor) -> bool: + """ + True when a tensor is non-overlapping and dense. + + A tensor is non-overlapping and dense when there exists a permutation of + its dimensions that is contiguous. + """ + + if a.is_sparse: + return False + + # Short-circuits if the tensor is already contiguous or channels-last contiguous + if is_contiguous(a) or is_channels_last_contiguous(a): + return True + + # The following is equivalent to compute_non_overlapping_and_dense in TensorImpl.cpp + + # Short-circuits for tensors of rank one, which are + # non-overlapping and "dense" if their stride is one + if a.ndim == 1: + return a.stride()[0] == 1 + + # Checks that there exists a permutation of the strides s.t. the tensor would be contiguous + # Sorts (length, stride) pairs by stride + lengths_and_strides = sorted(zip(a.shape, a.stride()), key=operator.itemgetter(1)) + + expected_stride = 1 + for length, stride in lengths_and_strides: + if length == 1: + continue + + if stride != expected_stride: + return False + + expected_stride *= length + + return True + + +# NOTE: Based on the implementation in TensorIterator.cpp, but note that +# the note [Computing output strides] is incorrect, because it +# says that strides will be preserved even if they are not +# "non overlapping and dense", but this is incorrect. The +# output of elementwise operations are always given +# non overlapping and dense strides. +# This is also INCORRECT because it does not model TensorIterator's +# short-circuit, which can cause different strides. +def compute_elementwise_output_logical_to_physical_perm( + *tensors, _skip_checks=False +) -> List[int]: + if not _skip_checks and len(tensors) == 0: + msg = "Can't compute elementwise output strides for zero tensors!" + raise ValueError(msg) + + if not _skip_checks: + check_same_shape(*tensors, allow_cpu_scalar_tensors=True) + + # Filters the tensors to actual tensors + if not _skip_checks: + tensors = tuple( + a + for a in tensors + if isinstance(a, TensorLike) and not is_cpu_scalar_tensor(a) + ) + + # Short-circuits for CPU scalar case + if len(tensors) == 0: + return [] + + # Short-circuits for shapes with zero or one dimensions + # TODO: are these necessary? + ndim = tensors[0].ndim + if ndim == 0: + return [] + if ndim == 1: + return [0] + + # Short-circuits if contiguous, following the fake fast path. + # This reduces the number of guards we end up making + # TODO: do channels last too + is_contiguous = True + for t in tensors: + is_contiguous = is_contiguous and t.is_contiguous( + memory_format=torch.contiguous_format + ) + + if is_contiguous: + return list(range(ndim)) + + shape = tensors[0].shape + + def should_swap(idx_a, idx_b): + for tensor in tensors: + stride_a = tensor.stride()[idx_a] + stride_b = tensor.stride()[idx_b] + + if stride_a == 0 or stride_b == 0: + continue + + if stride_a < stride_b: + return -1 + + if stride_a > stride_b: + return 1 + + # stride_a == stride_b + if shape[idx_a] > shape[idx_b]: + return 1 + + # Note: this case is hit if all strides are zero, + # or all strides are equal and all dimensions have the same length + return 0 + + # The "sort" order for the permutation is back-to-front, but + # the natural order for permutations is front-to-back. Do the + # sorting back-to-front and then reverse it on output. + # + # also, note this returns the logical to physical shape permutation + perm = list(reversed(range(ndim))) + + # insertion sort with support for ambiguous comparisons + for i in range(1, ndim): + dim1 = i + for dim0 in reversed(range(i)): + comparison = should_swap(perm[dim0], perm[dim1]) + if comparison > 0: + perm[dim0], perm[dim1] = perm[dim1], perm[dim0] + dim1 = dim0 + elif comparison < 0: + break + + return list(reversed(perm)) + + +def compute_elementwise_output_strides(*tensors) -> Tuple[int, ...]: + """ + Computes the output strides for elementwise operations. + """ + if len(tensors) == 0: + msg = "Can't compute elementwise output strides for zero tensors!" + raise ValueError(msg) + + check_same_shape(*tensors, allow_cpu_scalar_tensors=True) + + # Filters the tensors to actual tensors + tensors = tuple( + a for a in tensors if isinstance(a, TensorLike) and not is_cpu_scalar_tensor(a) + ) + + # Short-circuits for CPU scalar case + if len(tensors) == 0: + return () + + ndim = tensors[0].ndim + shape = tensors[0].shape + + if ndim == 0: + return () + if ndim == 1: + return (1,) + + logical_to_physical_perm = compute_elementwise_output_logical_to_physical_perm( + *tensors, _skip_checks=True + ) + permuted_shape = apply_perm(shape, logical_to_physical_perm) # to physical + + new_strides = make_contiguous_strides_for(permuted_shape) + permuted_strides = apply_perm( + new_strides, invert_perm(logical_to_physical_perm) + ) # to logical + + return tuple(permuted_strides) + + +# Identity permutation is [0, 1, 2] +def apply_perm(inp, perm): + ndim = len(inp) + permuted_inp = [-1] * ndim + for idx, x in enumerate(perm): + permuted_inp[idx] = inp[x] + return permuted_inp + + +def invert_perm(perm): + ndim = len(perm) + new_perm = [-1] * ndim + for idx, x in enumerate(perm): + new_perm[x] = idx + return new_perm + + +# +# Common helper functions +# + + +def validate_dim_length(length: int): + """ + Validates that an object represents a valid + dimension length. + """ + + if isinstance(length, (int, torch.SymInt)): + torch._check_is_size(length) + else: + # sometimes called with sympy expression by inductor + assert length >= 0 + + +def validate_shape(shape: ShapeType): + """ + Validates that a sequence represents a valid shape. + """ + + assert isinstance(shape, Sequence), type(shape) + for l in shape: + validate_dim_length(l) + + +def validate_strides(strides: StrideType): + """ + Verifies the object specifies valid strides. + """ + + assert isinstance(strides, Sequence) + for stride in strides: + assert stride >= 0 + + +def validate_idx(rank: int, idx: int): + """ + Validates that idx is a valid index for the given shape. + Assumes the index is already canonicalized. + """ + + assert isinstance(idx, Dim) + assert isinstance(rank, Dim) + + assert idx >= 0 and idx < rank or idx == 0 + + +def validate_dimension_indices(rank: int, indices: DimsSequenceType): + for idx in indices: + validate_idx(rank, idx) + + +def validate_exclusive_idx(rank: int, ex_idx: int): + """ + Validates that ex_idx is a valid exclusive index + for the given shape. + """ + + assert isinstance(ex_idx, Dim) + assert isinstance(rank, Dim) + assert ex_idx > 0 and ex_idx <= rank + + +# "Wraps" a dim (up to one time) for the given rank, allowing dims to be +# specified using negative indices. If `wrap_scalar` is true then scalar +# tensors of rank 0 will allow dimensions in the range [-1, 0]. Otherwise, +# idx should be in the range [-rank, rank-1]. +def canonicalize_dim(rank: int, idx: int, wrap_scalar: bool = True) -> int: + if rank < 0: + msg = f"Rank cannot be negative but got {rank}" + raise IndexError(msg) + + if rank == 0: + if not wrap_scalar: + msg = f"Dimension specified as {idx} but tensor has no dimensions" + raise IndexError(msg) + rank = 1 + + if idx >= 0 and idx < rank: + return idx + + if idx < 0: + _idx = idx + rank + else: + _idx = idx + + if _idx < 0 or _idx >= rank: + # Same error message as in aten/src/ATen/WrapDimUtils.h:49 + msg = f"Dimension out of range (expected to be in range of [{-rank}, {rank - 1}], but got {idx})" + raise IndexError(msg) + + return _idx + + +# Takes a dimension or sequence of dimensions and "wraps" them, +# mapping negative offsets to positive ones +@overload +def canonicalize_dims( + rank: int, indices: Sequence[int], wrap_scalar: bool = True +) -> Tuple[int, ...]: + pass + + +@overload +def canonicalize_dims(rank: int, indices: int, wrap_scalar: bool = True) -> int: + pass + + +def canonicalize_dims(rank, indices, wrap_scalar=True): + if isinstance(indices, Dim): + return canonicalize_dim(rank, indices, wrap_scalar) + + return tuple(canonicalize_dim(rank, x, wrap_scalar) for x in indices) + + +def is_valid_permutation(rank: int, perm: DimsSequenceType) -> bool: + """ + Validates that perm is a permutation of length rank. + """ + + if not isinstance(perm, Sequence): + return False + + if not (tuple(sorted(perm)) == tuple(range(0, rank))): + return False + + return True + + +def is_same_shape(a: Sequence, b: Sequence) -> bool: + """ + Compares two shapes a and b, returning True if they are the same + (their ranks and corresponding lengths match) and False otherwise. + """ + + return tuple(a) == tuple(b) + + +def is_cpu_scalar_tensor(a: Any) -> bool: + return isinstance(a, TensorLike) and a.ndim == 0 and a.device.type == "cpu" + + +def check_same_device(*args, allow_cpu_scalar_tensors): + """ + Checks that all Tensors in args have the same device. + + Raises a RuntimeError when: + - args contains an object whose type is not Tensor or Number + - two Tensor objects in args have different devices, unless one is a CPU scalar tensor and allow_cpu_scalar_tensors is True + """ + # Short-circuits if all (one or fewer) arguments are trivially on the same device + if len(args) <= 1: + return + + # Note: cannot initialize device to the first arg's device (it may not have one) + device = None + for arg in args: + if isinstance(arg, Number): + continue + elif isinstance(arg, TensorLike): + if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg): + continue + + if device is None: + device = arg.device + + if device != arg.device: + msg = ( + "Tensor on device " + + str(arg.device) + + " is not on the expected device " + + str(device) + + "!" + ) + raise RuntimeError(msg) + else: + msg = ( + "Unexpected type when checking for same device, " + str(type(arg)) + "!" + ) + raise RuntimeError(msg) + + +def canonicalize_device(device: DeviceLikeType) -> torch.device: + if isinstance(device, torch.device): + return device + + assert isinstance(device, str) + return torch.device(device) + + +# Asserts if any of the following are true: +# - a non-scalar or non-Tensor is given +# - the shape of any tensors is distinct +def check_same_shape(*args, allow_cpu_scalar_tensors: bool): + """ + Checks that all Tensors in args have the same shape. + + Raises a RuntimeError when: + - args contains an object whose type is not Tensor or Number + - two Tensor objects in args have different devices + """ + shape = None + + for arg in args: + if isinstance(arg, Number): + continue + elif isinstance(arg, TensorLike): + if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg): + continue + + if shape is None: + shape = arg.shape + + if not is_same_shape(shape, arg.shape): + msg = f"Shape {arg.shape} is not the expected shape {shape}!" + raise RuntimeError(msg) + else: + msg = ( + "Unexpected type when checking for same shape, " + str(type(arg)) + "!" + ) + raise RuntimeError(msg) + + +# Acquires a common shape, if it exists, from one or more tensor arguments, +# filtering number arguments +def extract_shape(*args, allow_cpu_scalar_tensors: bool) -> Optional[ShapeType]: + shape = None + scalar_shape = None + + for arg in args: + if isinstance(arg, Number): + continue + elif isinstance(arg, TensorLike): + if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg): + scalar_shape = arg.shape + continue + + if shape is None: + shape = arg.shape + + if not is_same_shape(shape, arg.shape): + return None + else: + return None + + return shape if shape is not None else scalar_shape + + +# Extracts dimensions that might be passed either as a list/tuple or as varargs. +# A typical case is Tensor.permute . +def extract_dims_from_varargs( + dims: Union[DimsSequenceType, Tuple[DimsSequenceType, ...]] +) -> DimsSequenceType: + if dims and isinstance(dims[0], Sequence): + assert len(dims) == 1 + dims = cast(Tuple[DimsSequenceType], dims) + return dims[0] + else: + return cast(DimsSequenceType, dims) + + +def extract_shape_from_varargs( + shape: Union[ShapeType, Tuple[ShapeType]], + validate=True, +) -> Tuple[int, ...]: + """ + Returns a shape from varargs. + + In PyTorch, operations that accept shapes often accept them as varargs, like + foo(*shape). However a user can pass the shape as a sequence of integers, + like this: + + foo(1, 2, 3) + + or as a sequence of integers + + foo((1, 2, 3)) + + In the first case shape will be a tuple of integers, and in the second case it's a tuple + containing a tuple of integers. This validates those inputs and canonicalizes them + to a tuple of integers. + """ + + # Handles tuple unwrapping + if len(shape) == 1 and isinstance(shape[0], Sequence): + shape = shape[0] + + if validate: + validate_shape(shape) # type: ignore[arg-type] + return shape # type: ignore[return-value] + + +def infer_size_shapes(a: ShapeType, b: ShapeType) -> Tuple[int, ...]: + ndim = max(len(a), len(b)) + expandedSizes = [0] * ndim + + for i in range(ndim - 1, -1, -1): + offset = ndim - 1 - i + dimA = len(a) - 1 - offset + dimB = len(b) - 1 - offset + sizeA = a[dimA] if dimA >= 0 else 1 + sizeB = b[dimB] if dimB >= 0 else 1 + + torch._check( + (sizeA == sizeB) or (sizeA == 1) or (sizeB == 1), + lambda: ( + f"The size of tensor a ({sizeA}) must match the size of " + f"tensor b ({sizeB}) at non-singleton dimension {i}" + ), + ) + + # 1s map to the other size (even 0) + expandedSizes[i] = sizeB if sizeA == 1 else sizeA + + return tuple(expandedSizes) + + +def infer_size(shape: ShapeType, numel: int) -> Tuple[int, ...]: + """ + Infers the size of a dim with size -1, if it exists. + Also checks that new shape is compatible with the number of elements. + """ + dim = None + newsize = 1 + for i, d in enumerate(shape): + if d == -1: + torch._check(dim is None, lambda: "only one dimension can be inferred") + dim = i + elif d >= 0: + newsize *= d + else: + torch._check(False, lambda: f"invalid shape dimension {d}") + if dim is None: + torch._check( + numel == newsize, + lambda: f"shape '{list(shape)}' is invalid for input of size {numel}", + ) + else: + from torch.fx.experimental.symbolic_shapes import definitely_true + + torch._check( + newsize != 0, + lambda: ( + f"cannot reshape tensor of 0 elements into shape {list(shape)} because the " + f"unspecified dimension size -1 can be any value and is ambiguous" + if definitely_true(numel == 0) + else f"shape '{list(shape)}' is invalid for input of size {numel}" + ), + ) + torch._check( + numel % newsize == 0, + lambda: f"shape '{list(shape)}' is invalid for input of size {numel}", + ) + # Convert to list to produce a compatible error message with core + # PyTorch, which prints sequences in square brackets. + shape = list(shape) + shape[dim] = numel // newsize + # NB: This is pretty important when you have unbacked SymInts. + # Suppose you have (i0, 12) resizing into (2, -1, 12). The old + # range for i0 is typically [2, inf], which means if you divide + # by two the new range should be [1, inf]. But this is bad news + # if you have an unbacked SymInt: we need to reapply the unsound + # assumption that the size is >= 2. + torch._check_is_size(shape[dim]) + return tuple(shape) + + +_integer_dtypes = (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64) +_low_precision_dtypes = (torch.float16, torch.bfloat16, torch.complex32) +_complex_dtypes = (torch.complex32, torch.complex64, torch.complex128) + + +def is_boolean_dtype(dtype: torch.dtype) -> bool: + assert isinstance(dtype, torch.dtype) + return dtype is torch.bool + + +def is_integer_dtype(dtype: torch.dtype) -> bool: + assert isinstance(dtype, torch.dtype) + return dtype in _integer_dtypes + + +def is_low_precision_dtype(dtype: torch.dtype) -> bool: + assert isinstance(dtype, torch.dtype) + return dtype in _low_precision_dtypes + + +def is_float_dtype(dtype: torch.dtype) -> bool: + assert isinstance(dtype, torch.dtype) + return dtype.is_floating_point + + +def is_complex_dtype(dtype: torch.dtype) -> bool: + assert isinstance(dtype, torch.dtype) + return dtype in _complex_dtypes + + +def is_grad_dtype(dtype: torch.dtype) -> bool: + """ + Checks if the dtype can require a gradient. + """ + return dtype.is_floating_point or is_complex_dtype(dtype) + + +_complex_to_real_dtype_map = { + torch.complex128: torch.float64, + torch.complex64: torch.float32, + torch.complex32: torch.float16, +} + +_real_to_complex_dtype_map = { + torch.float16: torch.complex32, + torch.bfloat16: torch.complex64, + torch.float32: torch.complex64, + torch.float64: torch.complex128, +} + + +def corresponding_real_dtype(dtype: torch.dtype) -> torch.dtype: + return _complex_to_real_dtype_map[dtype] + + +def corresponding_complex_dtype(dtype: torch.dtype) -> torch.dtype: + return _real_to_complex_dtype_map[dtype] + + +def dtype_to_type(dtype: torch.dtype) -> type: + """ + Computes the corresponding Python type (AKA "type kind") for the + given dtype. + """ + assert isinstance(dtype, torch.dtype) + + if dtype is torch.bool: + return bool + if dtype in _integer_dtypes: + return int + if dtype.is_floating_point: + return float + if dtype in _complex_dtypes: + return complex + + raise ValueError("Invalid dtype!") + + +def dtype_to_type_ctor(dtype: torch.dtype) -> Callable[[NumberType], NumberType]: + """ + Computes the corresponding Python type constructor for the + given dtype. + """ + assert isinstance(dtype, torch.dtype) + + if dtype is torch.bool: + return lambda x: bool(x) + if dtype in _integer_dtypes: + return sym_int + if dtype.is_floating_point: + return sym_float + if dtype in _complex_dtypes: + # TODO: type error here is real, replace with sym_complex + return lambda x: complex(x) # type: ignore[arg-type] + + raise ValueError("Invalid dtype!") + + +def type_to_dtype(typ: type) -> torch.dtype: + """ + Computes the corresponding dtype for a Number type. + """ + + assert isinstance(typ, type) + + if typ is bool: + return torch.bool + if typ in [int, torch.SymInt]: + return torch.long + if typ in [float, torch.SymFloat]: + return torch.get_default_dtype() + # TODO: sym_complex_float? + if typ is complex: + return corresponding_complex_dtype(torch.get_default_dtype()) + + raise ValueError("Invalid type!") + + +def get_dtype(x: Union[torch.Tensor, NumberType]): + if isinstance(x, torch.Tensor): + return x.dtype + else: + return type_to_dtype(type(x)) + + +_ordered_types = (bool, int, float, complex) + + +def check_fp_or_complex( + dtype: torch.dtype, fn_name: str, allow_low_precision_dtypes: bool = True +): + """ + Checks whether the input is floating point or complex. + If allow_low_precision_dtypes is True, it allows having float16, bfloat16, and complex32 + """ + torch._check( + is_float_dtype(dtype) or is_complex_dtype(dtype), + lambda: f"{fn_name}: Expected a floating point or complex tensor as input. Got {dtype}", + ) + torch._check( + allow_low_precision_dtypes or not is_low_precision_dtype(dtype), + lambda: f"{fn_name}: Half precision dtypes not supported. Got {dtype}", + ) + + +def check_is_matrix(A: TensorLikeType, f_name: str, arg_name: str = "A"): + torch._check( + len(A.shape) >= 2, + lambda: f"{f_name}: The input tensor {arg_name} must have at least 2 dimensions.", + ) + + +def get_higher_type(a: type, b: type) -> type: + """ + Returns the higher of the two given Number types. + + The types are ordered bool -> int -> float -> complex. + """ + a, b = _maybe_get_pytype(a), _maybe_get_pytype(b) + # Type checking + if a not in _ordered_types or b not in _ordered_types: + raise RuntimeError(f"Expected builtin numeric types, found {a}, {b}") + + if a is b: + return a + + for typ in _ordered_types: + if a is typ: + return b + if b is typ: + return a + + raise ValueError("Unknown Python scalar type!") + + +# Returns the higher of two torch datatypes a and b or, if the two +# are not ordered relative to each other, the next +# higher datatype +def get_higher_dtype( + a: Optional[Union[torch.dtype, TensorLikeType, NumberType]], + b: Optional[Union[torch.dtype, TensorLikeType, NumberType]], +) -> Optional[torch.dtype]: + """ + Computes the "lowest" datatype that is weakly + "higher" than both a and b. + """ + + # Type checking + assert a is None or isinstance(a, (torch.dtype, TensorLike, Number)) + assert b is None or isinstance(b, (torch.dtype, TensorLike, Number)) + + def _extract_dtype( + x: Optional[Union[torch.dtype, TensorLikeType, NumberType]] + ) -> Optional[torch.dtype]: + if x is None: + return None + if isinstance(x, torch.dtype): + return x + if isinstance(x, TensorLike): + return x.dtype + if isinstance(x, Number): + return type_to_dtype(type(x)) + + raise RuntimeError("Unexpected type given to _extract_dtype!") + + a, b = _extract_dtype(a), _extract_dtype(b) + + if a is b: + return a + + if a is None: + return b + + if b is None: + return a + + ordered_datatypes = ( + (torch.bool,), + (torch.uint8, torch.int8), + (torch.int16,), + (torch.int32,), + (torch.int64,), + (torch.float16, torch.bfloat16), + (torch.float32,), + (torch.float64,), + (torch.complex32,), + (torch.complex64,), + (torch.complex128,), + ) + + for idx, dtypes in enumerate(ordered_datatypes): + if a in dtypes and b in dtypes: + return ordered_datatypes[idx + 1][0] + if a in dtypes: + return b + if b in dtypes: + return a + + raise RuntimeError("Unexpected termination!") + + +def check_pin_memory(pin_memory: bool): + torch._check_not_implemented( + not pin_memory, lambda: "PrimTorch does not support pinned memory" + ) + + +def check_layout(layout: torch.layout): + torch._check_not_implemented( + layout == torch.strided, lambda: f"PrimTorch doesn't support layout={layout}" + ) + + +# TODO: maybe unify with can_cast_to? +def is_weakly_lesser_type(a: type, b: type) -> bool: + """ + Compares two types, a and b, returning True if a is weakly "less" than b. + + The comparison is determined by the following type ordering: bool, int, float, complex. + """ + + a, b = _maybe_get_pytype(a), _maybe_get_pytype(b) + + if a not in _ordered_types or b not in _ordered_types: + raise RuntimeError(f"Expected builtin numeric types, found {a}, {b}") + + for typ in _ordered_types: + if a == typ: + return True + if b == typ: + return False + + raise RuntimeError("Unexpected termination!") + + +def can_safe_cast_to(*, cast_to: torch.dtype, cast_from: torch.dtype) -> bool: + for fn in (is_complex_dtype, is_float_dtype, is_integer_dtype, is_boolean_dtype): + if fn(cast_to): + return True + if fn(cast_from): + return False + + raise ValueError(f"Received unknown dtypes {cast_to}, {cast_from}!") + + +def check_same_dtype(*args): + """ + Checks that all Tensors in args have the same device and that all Numbers have the + same corresponding Python type. + + Raises a RuntimeError when: + - args contains an object whose type is not Tensor or Number + - two Tensors objects in args have different dtypes + - two Number objects in args have different types + - there are Tensors and Numbers in args, and one of those Tensors corresponding + Python types is different from the type of one of those Numbers + """ + full_dtype = None + scalar_type = None + + for arg in args: + if isinstance(arg, Number): + # Scalar type checking is disabled (and may be removed in the future) + continue + # if scalar_type is None: + # scalar_type = type(arg) + + # if scalar_type is not type(arg): + # msg = ( + # "Scalar of type " + # + str(type(arg)) + # + " is not the expected type of " + # + str(scalar_type) + # + "!" + # ) + # raise RuntimeError(msg) + elif isinstance(arg, TensorLike): + if full_dtype is None: + full_dtype = arg.dtype + if scalar_type is None: + scalar_type = dtype_to_type(arg.dtype) + + if full_dtype is not arg.dtype: + msg = ( + "Tensor with dtype " + + str(arg.dtype) + + " is not the expected dtype of " + + str(full_dtype) + + "!" + ) + raise RuntimeError(msg) + + arg_type = dtype_to_type(arg.dtype) + if arg_type is not scalar_type: + msg = ( + "Tensor with corresponding Python type " + + str(arg_type) + + " is not the expected type of " + + str(scalar_type) + + "!" + ) + raise RuntimeError(msg) + else: + msg = ( + "Unexpected type when checking for same dtype, " + str(type(arg)) + "!" + ) + raise RuntimeError(msg) + + +# Maps datatypes to their computation types for elementwise operations +_computation_dtype_map = { + torch.bfloat16: torch.float32, + torch.float16: torch.float32, + torch.complex32: torch.complex64, +} + + +def get_computation_dtype(dtype: torch.dtype) -> torch.dtype: + return _computation_dtype_map.get(dtype, dtype) + + +_cpu_acc_type_map = { + torch.bfloat16: torch.float64, + torch.float16: torch.float64, + torch.float32: torch.float64, + torch.complex32: torch.complex128, + torch.complex64: torch.complex128, +} + + +def get_acc_type(dtype: torch.dtype, device: torch.device) -> torch.dtype: + # Equivalent to at::toAccumulateType, prefer computation_dtype where possible + if device.type == "cpu": + return _cpu_acc_type_map.get(dtype, dtype) + else: + return get_computation_dtype(dtype) + + +class ELEMENTWISE_TYPE_PROMOTION_KIND(Enum): + DEFAULT = (0,) + NO_OPMATH = (1,) + INT_TO_FLOAT = (2,) + ALWAYS_BOOL = (3,) + COMPLEX_TO_FLOAT = (4,) + BOOL_TO_LONG = (5,) + + +class REDUCTION_OUTPUT_TYPE_KIND(Enum): + SAME = (0,) + COMPLEX_TO_FLOAT = (1,) # for complex types outputs corresponding real type + KEEP_PROMOTED_TYPE = (2,) # keep output in opmath type, needed for mean + ALWAYS_BOOL = (3,) + + +# Describes the return type of the primitive: +# +# - NEW, a new tensor is created +# - VIEW, a view of an input tensor is returned +# - INPLACE, one or more input tensors is modified +# +# these descriptors are mututally exclusive and exhaustive. +class RETURN_TYPE(Enum): + NEW = (0,) + VIEW = (1,) + INPLACE = (2,) + + +# TODO: when NumberType contains the sym types, can simplify this +def number_type(x: Union[NumberType, torch.SymInt, torch.SymFloat]) -> Type: + if isinstance(x, torch.SymInt): + return int + elif isinstance(x, torch.SymFloat): + return float + else: + return type(x) + + +def symbol_type(x: sympy.Symbol) -> Type: + if x.is_integer: # type: ignore[attr-defined] + return int + else: + # NB: Not strictly correct, but we don't support SymPy complex or bool. + return float + + +# TODO: document type promotion kinds +def elementwise_dtypes( + *_args, + type_promotion_kind: ELEMENTWISE_TYPE_PROMOTION_KIND, +) -> Tuple[torch.dtype, torch.dtype]: + """ + Computes the computation and result dtypes for elementwise type promotion + on the given arguments and with the given elementwise type promotion kind. + + Note that not all inputs to an elementwise operation necessarily participate in type promotion. + For example, the "alpha" parameter of torch.add does not participate in type promotion, + although it may be cast to the Python type corresponding to the computation dtype that + the type promotion algorithm determines. + + Default elementwise type promotion, which all other type promotion kinds tweak (see below), + first decides which of four ordered types to use: + + bool -> integer -> floating point -> complex + + The selected type is the "lowest" type in the above list such that all number arguments + have a weakly "lower" type and all tensor arguments have a weakly lower corresponding + type for their dtype. + + Once the type is determined, the particular result dtype is found. The dtypes are + partially ordered as follows: + + bool -> uint8, int8 -> int16 -> int32 -> int64 -> + float16, bfloat16 -> float32 -> float64 -> complex32 -> complex64 -> complex128 + + The result dtype is selected by: + - if no tensor's dtype has the same corresponding type as the one selected, + then the result dtype is the (default) dtype corresponding to the selected type + (for example, 1.5 + an integer tensor has a result dtype of the default floating point dtype) + - if the result type is complex then the dtype is: + - the default complex dtype if there are no floating point or complex tensors + - if there are floating point or complex tensors with one or more dimensions, then + the complex dtype corresponding to the highest corresponding complex dtype among those tensors + (for example, double + cfloat -> cdouble) + - if there are only floating point or complex tensors with zero dimensions, then + the complex dtype corresponding to the highest corresponding complex dtype among those tensors + - if the first two cases do not apply, the result dtype is the highest dtype among + all tensors with one or more dimensions of the output type, and if there are no such + tensors then it's the highest dtype among all tensors with zero dimensions of the output type + (for example, long + half -> half, even if the half tensor has zero dimensions) + + The "corresponding complex dtypes" are: + float16 -> complex32 + bfloat16 -> complex64 + float32 -> complex64 + float64 -> complex128 + complex32 -> complex32 + complex64 -> complex64 + complex128 -> complex128 + + The DEFAULT type promotion kind computes per above, and then uses the result dtype to pick a computation + dtype by mapping low precision floating point and complex dtypes as follows: + + float16 -> float32 + bfloat16 -> float32 + complex32 -> complex64 + + This is referred to as "op math", and the NO_OPMATH type promotion kind disables this mapping, making the + computation dtype the same as the result dtype when it's selected. NO_OPMATH is appropriate for kernels + which perform no mathematical operations on their tensors (see below for examples). + + The INT_TO_FLOAT type promotion kind maps boolean and integer result dtypes to the default floating point dtype, + and computation dtypes to the appropriate op math dtype. + + The COMPLEX_TO_FLOAT type promotion kind maps complex result dtypes to the corresponding float dtype, following this + mapping: + + complex32 -> float16 + complex64 -> float32 + complex128 -> float64 + + Note that COMPLEX_TO_FLOAT derives the computation dtype as the DEFAULT setting does. + + The BOOL_TO_LONG type promotion kind maps boolean computation and result dtypes to long. + + The ALWAYS_BOOL type promotion kind always sets the result dtype to bool. + + Example operators for each type promotion option: + DEFAULT : add + NO_OPMATH : where, nextafter, cat + INT_TO_FLOAT : sin + COMPLEX_TO_FLOAT : abs + BOOL_TO_LONG : pow + ALWAYS_BOOL : eq + + """ + + args = tuple(x for x in _args if x is not None) + + highest_type: type = bool + + # Import sympy locally, as importing it eagerly at a module level is too slow + # See https://dev-discuss.pytorch.org/t/delving-into-what-happens-when-you-import-torch/1589 + import sympy + + for x in args: + if not isinstance(x, (Number, TensorLike, sympy.Symbol)): + msg = f"Unexpected type {str(type(x))} when computing elementwise type promotion!" + raise ValueError(msg) + + if isinstance(x, Number): + highest_type = get_higher_type(highest_type, number_type(x)) + elif isinstance(x, sympy.Symbol): + highest_type = get_higher_type(highest_type, symbol_type(x)) + else: + # x is a TensorLike + highest_type = get_higher_type(highest_type, dtype_to_type(x.dtype)) + + result_dtype = None + + def _find_highest_dtype_filtered( + args, filter, *, float_as_complex=False + ) -> Optional[torch.dtype]: + zero_dim_tensor_dtype = None + one_plus_dim_tensor_dtype = None + for x in args: + if isinstance(x, TensorLike) and filter(x.dtype): + _dtype = x.dtype + if float_as_complex and is_float_dtype(_dtype): + _dtype = corresponding_complex_dtype(_dtype) + if x.ndim == 0: + zero_dim_tensor_dtype = get_higher_dtype( + zero_dim_tensor_dtype, _dtype + ) + else: + # x.ndim > 0 + one_plus_dim_tensor_dtype = get_higher_dtype( + one_plus_dim_tensor_dtype, _dtype + ) + + # Prefers dtype of tensors with one or more dimensions + if one_plus_dim_tensor_dtype is not None: + return one_plus_dim_tensor_dtype + + return zero_dim_tensor_dtype + + if highest_type is float: + result_dtype = _find_highest_dtype_filtered(args, is_float_dtype) + result_dtype = ( + torch.get_default_dtype() if result_dtype is None else result_dtype + ) + elif highest_type is complex: + result_dtype = _find_highest_dtype_filtered( + args, + lambda x: is_float_dtype(x) or is_complex_dtype(x), + float_as_complex=True, + ) + if result_dtype is None: + result_dtype = corresponding_complex_dtype(torch.get_default_dtype()) + elif highest_type is int: + result_dtype = _find_highest_dtype_filtered(args, is_integer_dtype) + result_dtype = torch.long if result_dtype is None else result_dtype + else: + # highest_type is bool + result_dtype = torch.bool + + if type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT: + return get_computation_dtype(result_dtype), result_dtype + elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH: + return result_dtype, result_dtype + elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT: + if is_integer_dtype(result_dtype) or is_boolean_dtype(result_dtype): + result_dtype = torch.get_default_dtype() + return get_computation_dtype(result_dtype), result_dtype + elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT: + # NOTE: computation can still occur in a complex dtype + computation_dtype = get_computation_dtype(result_dtype) + if is_complex_dtype(result_dtype): + result_dtype = corresponding_real_dtype(result_dtype) + return computation_dtype, result_dtype + elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG: + if is_boolean_dtype(result_dtype): + return torch.long, torch.long + return get_computation_dtype(result_dtype), result_dtype + elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL: + return get_computation_dtype(result_dtype), torch.bool + else: + raise ValueError(f"Unknown type promotion kind {str(type_promotion_kind)}") + + +def reduction_dtypes( + arg, + output_dtype_kind: REDUCTION_OUTPUT_TYPE_KIND, + dtype: Optional[torch.dtype] = None, +) -> Tuple[torch.dtype, Optional[torch.dtype]]: + # even though some reductions, like amin or amax, don't strictly require type promotion, + # all the math ops (including comparisons) are still defined only for a computation type, + # so promotion will still happen. We are doing it explicitly here + inp_dtype = dtype if dtype is not None else arg.dtype + computation_dtype = get_computation_dtype(inp_dtype) + if ( + output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.SAME + or output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT + ): + result_dtype = dtype if dtype else arg.dtype + if ( + output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT + and is_complex_dtype(result_dtype) + ): + result_dtype = corresponding_real_dtype(result_dtype) + elif output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.KEEP_PROMOTED_TYPE: + result_dtype = None + else: # ALWAYS_BOOL + result_dtype = torch.bool + return computation_dtype, result_dtype + + +# This function's logic is borrowed from the following functions defined in C++: +# batched_matrix_contiguous_strides and contiguous_strides +def make_contiguous_strides_for( + shape: ShapeType, row_major: bool = True +) -> Tuple[int, ...]: + """ + Returns the strides of a contiguous tensor if row_major + If row_major=True, it returns the strides of a contiguous batch of Fortran-contiguous matrices + This is often used when calling external libraries like BLAS/LAPACK/cuSolver... + """ + # contiguous_strides from c10/util/strides.h + validate_shape(shape) + if not shape: + return () + + # TODO: Move this somewhere central? + def _is_singleton(s): + # check for SingletonSymNode + if not isinstance(s, torch.SymInt): + return False + if s.node.singleton_int() is not None: + return True + + # check for SymInt wrapping a SingletonSymNode (fake-ifying causes this) + return ( + s.node.is_symbolic() + and s.node.hint is not None + and isinstance(s.node.hint, torch.SymInt) + and s.node.hint.node.singleton_int() is not None + ) + + multiplier = 1 + strides = [] + for l in reversed(shape): + strides.append(multiplier) + multiplier *= l if _is_singleton(l) else sym_max(l, 1) + + result = tuple(reversed(strides)) + + # batched_matrix_contiguous_strides from aten/src/ATen/native/LinearAlgebraUtils.h + if row_major: + return result + else: + if len(shape) < 2: + return result + return result[:-2] + (1, max(shape[-2], 1)) + + +def make_channels_last_1d_strides_for(shape: ShapeType) -> Tuple[int, ...]: + torch._check( + len(shape) == 3, + lambda: "Only tensors of rank 3 can use the channels_last_1d memory format", + ) + + multiplier = 1 + strides = [0] * 3 + for idx in (1, -1, 0): + # NOTE: intentionally divergence from make_contiguous_strides_for + # This is consistent with eager + strides[idx] = multiplier + multiplier *= shape[idx] + + return tuple(strides) + + +def make_channels_last_2d_strides_for(shape: ShapeType) -> Tuple[int, ...]: + # TODO: maybe inform the user of channels_last_3d if rank of the tensor is 5? + torch._check( + len(shape) == 4, + lambda: "Only tensors of rank 4 can use the channels_last memory format", + ) + + multiplier = 1 + strides = [0] * 4 + for idx in (1, -1, -2, 0): + # NOTE: intentionally divergence from make_contiguous_strides_for + # This is consistent with eager + strides[idx] = multiplier + multiplier *= shape[idx] + + return tuple(strides) + + +def make_channels_last_3d_strides_for(shape: ShapeType) -> Tuple[int, ...]: + torch._check( + len(shape) == 5, + lambda: "Only tensors of rank 5 can use the channels_last_3d memory format", + ) + + multiplier = 1 + strides = [0] * 5 + for idx in (1, -1, -2, -3, 0): + # NOTE: intentionally divergence from make_contiguous_strides_for + # This is consistent with eager + strides[idx] = multiplier + multiplier *= shape[idx] + + return tuple(strides) + + +def make_channels_last_strides_for(shape: ShapeType) -> Tuple[int, ...]: + ndim = len(shape) if isinstance(shape, Sequence) else 1 + if ndim == 3: + return make_channels_last_1d_strides_for(shape) + elif ndim == 4: + return make_channels_last_2d_strides_for(shape) + elif ndim == 5: + return make_channels_last_3d_strides_for(shape) + else: + raise RuntimeError( + f"no channels last format strides exist in {ndim} dimensions" + ) + + +def compute_reduction_output_shape( + shape: ShapeType, dimensions: Sequence +) -> Tuple[int, ...]: + for idx in dimensions: + validate_idx(len(shape), idx) + + new_shape = [] + for idx in range(len(shape)): + if idx in dimensions: + continue + + new_shape.append(shape[idx]) + + return tuple(new_shape) + + +def validate_no_repeating_dims(dims: Sequence): + if len(dims) != len(set(dims)): + raise RuntimeError("duplicate value in the list of dims") + + +def reduction_dims(shape: ShapeType, dims: Optional[Sequence]) -> Tuple[int, ...]: + if dims is None: + return tuple(range(len(shape))) + dims = tuple(canonicalize_dim(len(shape), idx) for idx in dims) + validate_no_repeating_dims(dims) + return dims + + +def set_correction( + unbiased: Optional[bool] = None, + correction: Optional[NumberType] = None, +) -> float: + if correction is not None and unbiased is not None: + raise RuntimeError("cannot specify both correction and unbiased arguments") + elif correction is None and unbiased is None: + correction = 1.0 + elif correction is None and unbiased is not None: + correction = 0.0 if unbiased is False else 1.0 + # NB: we don't actually support symint here, but it's harmless to accept + if not isinstance(correction, (IntLike, FloatLike)): + raise ValueError("correction argument should be integer or float") + if correction < 0: + raise ValueError("correction argument should be non-negative") + return sym_float(correction) + + +def compute_required_storage_length( + shape: ShapeType, strides: StrideType, storage_offset: int +) -> int: + """Computes the minimum storage size to hold the given tensor geometry. + + Example + ======= + + This is the size of a newly allocated tensor's storage, in units of elements + + >>> t = torch.empty((10, 20)) + >>> compute_required_storage_length(t.shape, t.stride(), t.storage_offset()) + 200 + + >>> # xdoctest: +SKIP(failing) + >>> t2 = torch.empty_strided((1, 2, 3), (5, 7, 11)) + >>> size = compute_required_storage_length(t2.shape, t2.stride(), t2.storage_offset()) + >>> size == t.storage().size() + True + + A valid tensor may have a larger storage size, but never smaller + + >>> slice = torch.empty(100)[20:40] + >>> slice.storage().size() + 100 + + >>> compute_required_storage_length(slice.shape, slice.stride(), slice.storage_offset()) + 40 + + """ + # Short-circuits if the shape has no elements + if reduce(operator.mul, shape, 1) == 0: + return 0 + + max_offset = sum((x - 1) * y for x, y in zip(shape, strides)) + # +1 to account for the first element which offsets are taken from + return 1 + storage_offset + max_offset + + +def check_in_bounds_for_storage( + a: torch.TypedStorage, shape: ShapeType, strides: StrideType, storage_offset: int +): + """ + Determines if the given shape, strides, and offset are valid for the given storage. + """ + + required_length = compute_required_storage_length(shape, strides, storage_offset) + if a.size() < required_length: + msg = ( + "Can't view a storage of size {} with an offset of {}, shape of {}, and strides of {}, " + "which requires a storage of size {}".format( + a.size(), storage_offset, str(shape), str(strides), required_length + ) + ) + raise ValueError(msg) + + +# NOTE: This function should ideally be removed, but some Meta internal models +# packaged with `torch.package` are using it, so it will have to be removed +# at some point in the future when those models no longer use this function. +def check( + b: bool, s: Callable[[], str], exc_type: Type[Exception] = RuntimeError +) -> None: + """ + Helper function for raising an error_type (default: RuntimeError) if a boolean condition fails. + Error message is a callable producing a string (to avoid wasting time + string formatting in non-error case, and also to make it easier for torchdynamo + to trace.) + + .. note:: This function is planned for removal in the future. Please use + `torch._check*` functions instead. + """ + warnings.warn( + DeprecationWarning( + "'torch._prims_common.check' will be removed in the future. Please use " + "'torch._check*' functions instead" + ) + ) + torch._check_with(exc_type, b, s) + + +# This combines is_channels_last_strides_2d and is_channels_last_strides_3d in +# c10/core/MemoryFormat.h into one function +def are_strides_like_channels_last( + shape: Sequence[int], strides: Sequence[int] +) -> bool: + ndim = len(shape) + + if ndim == 4: + # Check for channels_last_2d + dim_order = [1, 3, 2, 0] + elif ndim == 5: + # Check for channels_last_3d + dim_order = [1, 4, 3, 2, 0] + else: + return False + + if strides[1] == 0: + return False + + min = 0 + for d in dim_order: + if shape[d] == 0: + return False + if strides[d] < min: + return False + if d == 0 and min == strides[1]: + return False + min = strides[d] + if strides[d] > 1: + min *= shape[d] + return True + + +def suggest_memory_format(x: TensorLikeType) -> torch.memory_format: + if x.layout != torch.strided: + return torch.contiguous_format + + if are_strides_like_channels_last(x.shape, x.stride()): + return torch.channels_last if x.ndim == 4 else torch.channels_last_3d + + return torch.contiguous_format + + +def prod(xs: Sequence[NumberType]) -> NumberType: + """Product of elements in input sequence. Returns 1 for empty sequence""" + return reduce(operator.mul, xs, 1) + + +def is_expandable_to(shape: ShapeType, desired: ShapeType) -> bool: + """Checks if a shape can be expanded to another shape. + This is equivalent to checking if the two shapes are broadcastable. + """ + # This is a Python implementation of + # aten/src/ATen/ExpandUtils.h:is_expandable_to + if len(shape) > len(desired): + return False + for i in range(len(shape)): + if shape[-i - 1] != desired[-i - 1] and shape[-i - 1] != 1: + return False + return True + + +def mask_tensor(mask: TensorLikeType, t: TensorLikeType): + """ + Similar to torch.where(mask, t, 0) but if t is boolean, + result is also boolean and not promoted to int. + """ + # torch.where(mask, t, False) is equivalent + # but feels hacky and might break in the future + if t.dtype is torch.bool: + return mask.logical_and(t) + else: + return torch.where(mask, t, 0) + + +def get_aten_op(fn: Callable, name: str): + """ + Given the __module__ of reference and its name, it returns + (our best guess of) the ATen name of the associated operation + + Note: In ATen, the __name__ of a function within a module often + starts by the module name. E.g. linalg_eigh, or special_zeta + """ + module = fn.__module__ + prefix = "torch._refs" + assert module.startswith(prefix) + module = module[len(prefix) :] + # We want to go from .special / .nn.functional + # to special and special_ / nn_functional_ + if module: + module = module[1:] + module = module.replace(".", "_") + module = module + "_" + return getattr(torch._ops.ops.aten, f"{module}{name}") + + +def dtype_or_default(dtype: Optional[torch.dtype]) -> torch.dtype: + return dtype if dtype is not None else torch.get_default_dtype() + + +def device_or_default(device: Optional[DeviceLikeType]) -> DeviceLikeType: + return device if device is not None else torch.device("cpu") + + +def layout_or_default(layout: Optional[torch.layout]) -> torch.layout: + return layout if layout is not None else torch.strided + + +def clone_preserve_strides(x): + needed_size = compute_required_storage_length( + x.size(), x.stride(), x.storage_offset() + ) + # Our eager implementations for *_scatter ops are all primitives w.r.t autograd, + # so these as_strided() calls are not seen by autograd. + # We need to mimic this behavior in our ref/prim implementations. + # TODO: a better way to handle this would be with a new op, "_unsafe_as_strided" + # We should revisit this when we add a compositional as_strided op, + # and also as part of https://github.com/pytorch/pytorch/issues/90507 + try: + old = torch._C._dispatch_tls_is_dispatch_key_excluded( + torch._C.DispatchKey.ADInplaceOrView + ) + torch._C._dispatch_tls_set_dispatch_key_excluded( + torch._C.DispatchKey.ADInplaceOrView, True + ) + buffer = torch.as_strided(x, (needed_size,), (1,), 0).clone() + return torch.as_strided(buffer, x.size(), x.stride(), x.storage_offset()) + finally: + torch._C._dispatch_tls_set_dispatch_key_excluded( + torch._C.DispatchKey.ADInplaceOrView, old + ) + + +def alert_not_deterministic(caller: str): + if torch.are_deterministic_algorithms_enabled(): + if torch.is_deterministic_algorithms_warn_only_enabled(): + warnings.warn( + f"{caller} does not have a deterministic implementation, but you set " + f"'torch.use_deterministic_algorithms(True, warn_only=True)'. " + f"You can file an issue at https://github.com/pytorch/pytorch/issues " + f"to help us prioritize adding deterministic support for this operation." + ) + else: + torch._check( + False, + lambda: ( + f"{caller} does not have a deterministic implementation, but you set " + f"'torch.use_deterministic_algorithms(True)'. You can turn off " + f"determinism just for this operation, or you can use the " + f"'warn_only=True' option, if that's acceptable for your application. " + f"You can also file an issue at https://github.com/pytorch/pytorch/issues " + f"to help us prioritize adding deterministic support for this operation." + ), + ) + + +class CUDARngStateHelper: + @staticmethod + def get_torch_state_as_tuple(fake_mode=nullcontext()): + if not torch.cuda.is_available(): + raise RuntimeError("CUDA not available") + + with fake_mode: + seed = torch.tensor(torch.cuda.initial_seed()) + offset = torch.tensor(torch.cuda._get_rng_state_offset()) + return seed, offset + + @staticmethod + def set_torch_state_tensor(seed, offset): + # Rng state is [64-bit seed, 64-bit offset] + seed_portion = seed.reshape([1]).view(torch.uint8) + offset_portion = offset.reshape([1]).view(torch.uint8) + new_state = torch.cat([seed_portion, offset_portion]) + torch.cuda.set_rng_state(new_state) + + @staticmethod + def set_new_offset(relative_offset): + torch.cuda._set_rng_state_offset(relative_offset.item()) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_prims_common/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_prims_common/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..871b85901477afc9a8d4b0924f371fe695a5ceae Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_prims_common/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_prims_common/__pycache__/wrappers.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/_prims_common/__pycache__/wrappers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..279ff954b52db34f08a8eef14eabd76a21e91b0f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/_prims_common/__pycache__/wrappers.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/_prims_common/wrappers.py b/env-llmeval/lib/python3.10/site-packages/torch/_prims_common/wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..331d036fb934098a04e11453c36f0bb31f654168 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/_prims_common/wrappers.py @@ -0,0 +1,399 @@ +import inspect +import warnings +from functools import wraps +from itertools import chain + +from typing import Callable, NamedTuple, Optional, overload, Sequence, Tuple + +import torch +import torch._prims_common as utils +from torch._prims_common import ( + CustomOutParamAnnotation, + ELEMENTWISE_TYPE_PROMOTION_KIND, + Number, + NumberType, + ShapeType, + TensorLike, + TensorLikeType, +) +from torch.utils import _pytree as pytree +from torch.utils._pytree import tree_flatten, tree_unflatten + + +@overload +def _maybe_convert_to_dtype(a: TensorLikeType, dtype: torch.dtype) -> TensorLikeType: + pass + + +@overload +def _maybe_convert_to_dtype(a: NumberType, dtype: torch.dtype) -> NumberType: + pass + + +@overload +def _maybe_convert_to_dtype(a: Sequence, dtype: torch.dtype) -> Sequence: + pass + + +@overload +def _maybe_convert_to_dtype(a: None, dtype: torch.dtype) -> None: + pass + + +# TODO: implement ref.cast with an option to enforce safe casting +def _maybe_convert_to_dtype(a, dtype): + if isinstance(a, TensorLike): + if a.dtype != dtype: + return a.to(dtype) + return a + if isinstance(a, Number): + return utils.dtype_to_type_ctor(dtype)(a) # type: ignore[arg-type] + if isinstance(a, Sequence): + return tuple(_maybe_convert_to_dtype(x, dtype) for x in a) + # Passthrough None because some functions wrapped with type promotion + # wrapper might have optional args + if a is None: + return None + + raise ValueError(f"Received type {type(a)} that is neither a tensor or a number!") + + +def _maybe_convert_to_type(a: NumberType, typ: type) -> NumberType: + if not isinstance(a, Number): + msg = f"Found unknown type {type(a)} when trying to convert scalars!" + raise ValueError(msg) + if not utils.is_weakly_lesser_type(type(a), typ): + msg = f"Scalar {a} of type {type(a)} cannot be safely cast to type {typ}!" + raise ValueError(msg) + + return typ(a) + + +def _annotation_has_type(*, typ, annotation): + if hasattr(annotation, "__args__"): + for a in annotation.__args__: + if _annotation_has_type(typ=typ, annotation=a): + return True + return False + + return typ is annotation + + +class elementwise_type_promotion_wrapper: + """ + Adds elementwise type promotion to a Python reference implementation. + + Takes two kwargs, type_promoting_args and type_promotion_kind. + + type_promoting_args must be a string Sequence specifiying the argument names of all + arguments that participate in type promotion (and should be type promoted). If the + arg specifies a Sequence-type then every element of the Sequence will participate in + type promotion. + + type_promotion_kind must be one of the kinds specified by ELEMENTWISE_TYPE_PROMOTION_KIND. + See its documentation for details. + + The return_dtype will be coerced to the wrapped function's dtype arg if it is available and + not None. + + Other type promotion behavior, like validating the Python type of scalar arguments, must + be handled separately. + """ + + def __init__( + self, + *, + type_promotion_kind: ELEMENTWISE_TYPE_PROMOTION_KIND, + type_promoting_args: Optional[Sequence[str]] = None, + ): + self.type_promoting_arg_names = type_promoting_args + self.type_promotion_kind = type_promotion_kind + + def __call__(self, fn: Callable) -> Callable: + sig = inspect.signature(fn) + + @wraps(fn) + def _fn(*args, **kwargs): + bound = sig.bind(*args, **kwargs) + type_promoting_args = tuple( + bound.arguments[x] + for x in self.type_promoting_arg_names # type: ignore[union-attr] + if x in bound.arguments.keys() + ) + + flattened_type_promoting_args = pytree.arg_tree_leaves(*type_promoting_args) + compute_dtype, result_dtype = utils.elementwise_dtypes( + *flattened_type_promoting_args, + type_promotion_kind=self.type_promotion_kind, + ) + + promoted_args = { + x: _maybe_convert_to_dtype(bound.arguments[x], compute_dtype) + for x in self.type_promoting_arg_names # type: ignore[union-attr] + if x in bound.arguments.keys() + } + bound.arguments.update(promoted_args) + + result = fn(**bound.arguments) + + # Override the return_dtype if a dtype arg is present and not None + if "dtype" in bound.arguments: + maybe_dtype = bound.arguments["dtype"] + if maybe_dtype: # dtype cannot be None + result_dtype = maybe_dtype + + if isinstance(result, TensorLike): + return _maybe_convert_to_dtype(result, result_dtype) + if isinstance(result, Sequence): + return tuple(_maybe_convert_to_dtype(x, result_dtype) for x in result) + raise AssertionError(f"Unhandled result type: {type(result)}") + + _fn.__signature__ = sig # type: ignore[attr-defined] + return _fn + + +# Returns True if resize is necessary +def _resize_output_check(out: TensorLikeType, shape: ShapeType): + # If the shapes are correct there's nothing to do + if utils.same_shape(out.shape, shape): + return False + if out.numel() != 0: + msg = ( + f"An output with one or more elements was resized since it had shape {str(out.shape)} " + "which does not match the required output shape {str(shape)}. " + "This behavior is deprecated, and in a future PyTorch release outputs will not " + "be resized unless they have zero elements. " + "You can explicitly reuse an out tensor t by resizing it, inplace, to zero elements with t.resize_(0)." + ) + warnings.warn(msg) + return True + + +# TODO: handle tuples of tensors +def _maybe_resize_out(out: TensorLikeType, shape: ShapeType): + if _resize_output_check(out, shape): + return out.resize_(shape) + else: + return out + + +def _safe_copy_out( + *, copy_from: TensorLikeType, copy_to: TensorLikeType, exact_dtype: bool = False +): + # Checks same device + if copy_from.device != copy_to.device: + msg = "Attempting to copy from device {} to device {}, but cross-device copies are not allowed!".format( + copy_from.device, copy_to.device + ) + raise RuntimeError(msg) + + # Checks safe cast + if exact_dtype: + torch._check( + copy_from.dtype == copy_to.dtype, + lambda: f"Expected out tensor to have dtype {copy_from.dtype} " + f"but got {copy_to.dtype} instead", + ) + else: + torch._check( + utils.can_safe_cast_to(cast_from=copy_from.dtype, cast_to=copy_to.dtype), + lambda: f"Attempting to cast from {copy_from.dtype} to out tensor with dtype {copy_to.dtype}, " + "but this can't be cast because it is not safe!", + ) + + return copy_to.copy_(copy_from) + + +def out_wrapper(*out_names: str, exact_dtype: bool = False): + # The wrapped function needs to convert the output parameters to ensure + # compatability between the Python API (which always uses "out" as the + # parameter name and may be a tuple) and the Aten API (which may have + # multiple output parematers and use different parameter names such as + # "grad_input", "indices" or "values".) + + default_out_names = ("out",) + if len(out_names) == 0: + # Use default in out name + out_names = default_out_names + + is_tensor = len(out_names) == 1 + + def _out_wrapper(fn: Callable) -> Callable: + """ + Adds the out parameter to a Python reference. + """ + out_type = ( + TensorLikeType + if is_tensor + else Tuple[tuple(TensorLikeType for _ in range(len(out_names)))] + ) + return_type = ( + TensorLikeType + if is_tensor + else NamedTuple( + f"return_types_{fn.__name__}", [(o, TensorLikeType) for o in out_names] + ) + ) + + sig = inspect.signature(fn) + factory_kwargs = ("device", "dtype") + is_factory_fn = all(p in sig.parameters for p in factory_kwargs) + + @wraps(fn) + def _fn(*args, out=None, **kwargs): + if is_factory_fn and out is not None: + for k in factory_kwargs: + out_attr = getattr(out, k) + if k not in kwargs: + kwargs[k] = out_attr + + result = fn(*args, **kwargs) + assert ( + isinstance(result, TensorLike) + and is_tensor + or isinstance(result, Tuple) # type: ignore[arg-type] + and len(result) == len(out_names) + ) + if out is not None: + # Naively you might expect this assert to be true, but + # it's not: + # + # assert type(out) == type(result) + # + # The reason is that functions under this wrapper can + # get registered to the Meta dispatch key, and that + # means they can be executed in a context where tensor + # subclasses are disabled (with no_dispatch), which is a + # handy way for an is-a tensor subclass (e.g., + # FakeTensor) to have the normal meta backend create a + # meta tensor, to be wrapped once it gets returned. + # In this situation, you will get a FakeTensor as + # the output tensor, but not the result--which will + # be a normal meta tensor, but this is perfectly + # harmless. + if is_tensor: + assert isinstance(out, TensorLike) + # These two operations are done in-place + _maybe_resize_out(out, result.shape) + _safe_copy_out(copy_from=result, copy_to=out, exact_dtype=exact_dtype) # type: ignore[arg-type] + else: + assert isinstance(out, Tuple) # type: ignore[arg-type] + torch._check_type( + len(out) == len(result), + lambda: f"expected tuple of {len(result)} elements but got {len(out)}", + ) + for r, o in zip(result, out): + # These two operations are done in-place + _maybe_resize_out(o, r.shape) + _safe_copy_out(copy_from=r, copy_to=o, exact_dtype=exact_dtype) # type: ignore[arg-type] + else: + out = result + # mypy does not see through the definition of out_type given that it's in a different scope + return out if is_tensor else return_type(*out) # type: ignore[operator] + + out_param = inspect.Parameter( + "out", + kind=inspect.Parameter.KEYWORD_ONLY, + default=None, + annotation=out_type, + ) + # Mark that the function now returns a tuple + assert isinstance(sig.return_annotation, str) or sig.return_annotation in ( + sig.empty, + out_type, + ) + params = chain(sig.parameters.values(), (out_param,)) + _fn.__signature__ = inspect.Signature( # type: ignore[attr-defined] + parameters=params, return_annotation=return_type # type: ignore[arg-type] + ) + + _fn.__annotations__ = fn.__annotations__ + _fn.__annotations__["out"] = out_type + _fn.__annotations__["return"] = return_type + + # In the special case of having a single tensor out parameter with a + # name other than out, add a special annotation to name the parameter + if is_tensor and out_names != default_out_names: + _fn.__annotations__[CustomOutParamAnnotation] = out_names[0] + + # Add an indicator attribute that can be used in special cases + # where having a function wrapped by `out_wrapper` is not desirable e.g. + # jit + _fn._torch_decompositions_out_wrapper = f"This function is wrapped by {out_wrapper.__module__}.out_wrapper" # type: ignore[attr-defined] + + return _fn + + return _out_wrapper + + +def _maybe_remove_out_wrapper(fn: Callable): + return inspect.unwrap( + fn, + stop=lambda f: not hasattr(f, "_torch_decompositions_out_wrapper"), + ) + + +def backwards_not_supported(prim): + def redispatch_prim(args, kwargs): + with torch._C._AutoDispatchBelowAutograd(): + old = torch._C._dispatch_tls_is_dispatch_key_excluded( + torch._C.DispatchKey.ADInplaceOrView + ) + return prim(*args, **kwargs) + + class BackwardsNotSupported(torch.autograd.Function): + @staticmethod + def forward(ctx, args_spec, *flat_args): + args, kwargs = tree_unflatten(flat_args, args_spec) # type: ignore[arg-type] + return redispatch_prim(args, kwargs) + + @staticmethod + def backward(ctx, *args): + raise RuntimeError("backwards not supported on prim") + + @wraps(prim) + def _autograd_impl(*args, **kwargs): + flat_args, args_spec = tree_flatten((args, kwargs)) + if torch.is_grad_enabled() and any( + a.requires_grad for a in flat_args if isinstance(a, torch.Tensor) + ): + # TODO: There is a subtle bug here: prims like copy_to + # return their input argument after mutating it; and custom + # autograd function will incorrectly turn the result into + # a view which will fail test_python_ref_executor tests. + # At the moment, we sidestep this by observing that the + # unit tests don't ever try to run the executor with + # autograd, so we don't exercise the buggy case, but if + # you ever want to feed autograd through this, be aware + # of it! We need a way of properly implementing autograd + # for mutating operations in Python to do this. + return BackwardsNotSupported.apply(args_spec, *flat_args) + else: + return redispatch_prim(args, kwargs) + + return _autograd_impl + + +# TODO: when tracing this will add torch tensors and not TensorMeta objects +# to the trace -- we should fix this by adding a tracing context and NumberMeta classes +# TODO: this wrapper is currently untested +def elementwise_unary_scalar_wrapper(fn: Callable) -> Callable: + """ + Allows unary operators that accept tensors to work with Python numbers. + """ + sig = inspect.signature(fn) + + @wraps(fn) + def _fn(*args, **kwargs): + if len(args) > 0 and isinstance(args[0], Number): + dtype = utils.type_to_dtype(type(args[0])) + args_ = list(args) + args_[0] = torch.tensor(args[0], dtype=dtype) + result = fn(*args_, **kwargs) + assert isinstance(result, torch.Tensor) + return result.item() + + return fn(*args, **kwargs) + + _fn.__signature__ = sig # type: ignore[attr-defined] + return _fn diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..99f5497529870cd47a8c993c0b5175d9e04af577 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__init__.py @@ -0,0 +1,1421 @@ +r""" +This package adds support for CUDA tensor types. + +It implements the same function as CPU tensors, but they utilize +GPUs for computation. + +It is lazily initialized, so you can always import it, and use +:func:`is_available()` to determine if your system supports CUDA. + +:ref:`cuda-semantics` has more details about working with CUDA. +""" + + +import contextlib +import importlib +import os +import sys +import threading +import traceback +import warnings +from functools import lru_cache +from typing import Any, cast, List, Optional, Tuple, Union + +import torch +import torch._C +from torch.types import Device +from .. import device as _device +from .._utils import classproperty +from ._utils import _dummy_type, _get_device_index +from .graphs import ( + CUDAGraph, + graph, + graph_pool_handle, + is_current_stream_capturing, + make_graphed_callables, +) +from .streams import Event, ExternalStream, Stream + +try: + from torch._C import _cudart # type: ignore[attr-defined] +except ImportError: + _cudart = None + +_initialized = False +_tls = threading.local() +_initialization_lock = threading.Lock() +_queued_calls = [] # don't invoke these until initialization occurs +_is_in_bad_fork = getattr(torch._C, "_cuda_isInBadFork", lambda: False) +_device_t = Union[_device, str, int, None] + +_HAS_PYNVML = False +_PYNVML_ERR = None +try: + import pynvml # type: ignore[import] + + _HAS_PYNVML = True +except ImportError as err: + _PYNVML_ERR = err # sometimes a lib is installed but the import fails for some other reason, so we log the error for later + + +class _LazySeedTracker: + # Since seeding is memory-less, only track the latest seed. + # Note: `manual_seed_all` followed by `manual_seed` overwrites + # the seed on current device. We track the order of **latest** + # calls between these two API. + def __init__(self): + self.manual_seed_all_cb = None + self.manual_seed_cb = None + self.call_order = [] + + def queue_seed_all(self, cb, traceback): + self.manual_seed_all_cb = (cb, traceback) + # update seed_all to be latest + self.call_order = [self.manual_seed_cb, self.manual_seed_all_cb] + + def queue_seed(self, cb, traceback): + self.manual_seed_cb = (cb, traceback) + # update seed to be latest + self.call_order = [self.manual_seed_all_cb, self.manual_seed_cb] + + def get_calls(self) -> List: + return self.call_order + + +_lazy_seed_tracker = _LazySeedTracker() + +# Define dummy _CudaDeviceProperties type if PyTorch was compiled without CUDA +if hasattr(torch._C, "_CudaDeviceProperties"): + _CudaDeviceProperties = torch._C._CudaDeviceProperties +else: + _CudaDeviceProperties = _dummy_type("_CudaDeviceProperties") # type: ignore[assignment, misc] + +if hasattr(torch._C, "_cuda_exchangeDevice"): + _exchange_device = torch._C._cuda_exchangeDevice +else: + + def _exchange_device(device: int) -> int: + if device < 0: + return -1 + raise RuntimeError("PyTorch was compiled without CUDA support") + + +if hasattr(torch._C, "_cuda_maybeExchangeDevice"): + _maybe_exchange_device = torch._C._cuda_maybeExchangeDevice +else: + + def _maybe_exchange_device(device: int) -> int: + if device < 0: + return -1 + raise RuntimeError("PyTorch was compiled without CUDA support") + + +# Global variables dynamically populated by native code +has_magma: bool = False +has_half: bool = False +default_generators: Tuple[torch._C.Generator] = () # type: ignore[assignment] + + +def _is_compiled() -> bool: + r"""Return true if compile with CUDA support.""" + return hasattr(torch._C, "_cuda_getDeviceCount") + + +def _nvml_based_avail() -> bool: + return os.getenv("PYTORCH_NVML_BASED_CUDA_CHECK") == "1" + + +def is_available() -> bool: + r"""Return a bool indicating if CUDA is currently available.""" + if not _is_compiled(): + return False + if _nvml_based_avail(): + # The user has set an env variable to request this availability check that attempts to avoid fork poisoning by + # using NVML at the cost of a weaker CUDA availability assessment. Note that if NVML discovery/initialization + # fails, this assessment falls back to the default CUDA Runtime API assessment (`cudaGetDeviceCount`) + return device_count() > 0 + else: + # The default availability inspection never throws and returns 0 if the driver is missing or can't + # be initialized. This uses the CUDA Runtime API `cudaGetDeviceCount` which in turn initializes the CUDA Driver + # API via `cuInit` + return torch._C._cuda_getDeviceCount() > 0 + + +def is_bf16_supported(): + r"""Return a bool indicating if the current CUDA/ROCm device supports dtype bfloat16.""" + # Check for ROCm, if true return true, no ROCM_VERSION check required, + # since it is supported on AMD GPU archs. + if torch.version.hip: + return True + + cu_vers = torch.version.cuda + if cu_vers is not None: + cuda_maj_decide = int(cu_vers.split(".")[0]) >= 11 + else: + cuda_maj_decide = False + return ( + torch.cuda.get_device_properties(torch.cuda.current_device()).major >= 8 + and cuda_maj_decide + ) + + +def _sleep(cycles): + torch._C._cuda_sleep(cycles) + + +def _check_capability(): + incorrect_binary_warn = """ + Found GPU%d %s which requires CUDA_VERSION >= %d to + work properly, but your PyTorch was compiled + with CUDA_VERSION %d. Please install the correct PyTorch binary + using instructions from https://pytorch.org + """ + + old_gpu_warn = """ + Found GPU%d %s which is of cuda capability %d.%d. + PyTorch no longer supports this GPU because it is too old. + The minimum cuda capability supported by this library is %d.%d. + """ + + if torch.version.cuda is not None: # on ROCm we don't want this check + CUDA_VERSION = torch._C._cuda_getCompiledVersion() + for d in range(device_count()): + capability = get_device_capability(d) + major = capability[0] + minor = capability[1] + name = get_device_name(d) + current_arch = major * 10 + minor + min_arch = min( + (int(arch.split("_")[1]) for arch in torch.cuda.get_arch_list()), + default=35, + ) + if current_arch < min_arch: + warnings.warn( + old_gpu_warn + % (d, name, major, minor, min_arch // 10, min_arch % 10) + ) + + +def _check_cubins(): + incompatible_device_warn = """ +{} with CUDA capability sm_{} is not compatible with the current PyTorch installation. +The current PyTorch install supports CUDA capabilities {}. +If you want to use the {} GPU with PyTorch, please check the instructions at https://pytorch.org/get-started/locally/ +""" + if torch.version.cuda is None: # on ROCm we don't want this check + return + arch_list = get_arch_list() + if len(arch_list) == 0: + return + supported_sm = [int(arch.split("_")[1]) for arch in arch_list if "sm_" in arch] + for idx in range(device_count()): + cap_major, cap_minor = get_device_capability(idx) + # NVIDIA GPU compute architectures are backward compatible within major version + supported = any(sm // 10 == cap_major for sm in supported_sm) + if not supported: + device_name = get_device_name(idx) + capability = cap_major * 10 + cap_minor + warnings.warn( + incompatible_device_warn.format( + device_name, capability, " ".join(arch_list), device_name + ) + ) + + +def is_initialized(): + r"""Return whether PyTorch's CUDA state has been initialized.""" + return _initialized and not _is_in_bad_fork() + + +def _lazy_call(callable, **kwargs): + if is_initialized(): + callable() + else: + # TODO(torch_deploy): this accesses linecache, which attempts to read the + # file system to get traceback info. Patch linecache or do something + # else here if this ends up being important. + global _lazy_seed_tracker + if kwargs.get("seed_all", False): + _lazy_seed_tracker.queue_seed_all(callable, traceback.format_stack()) + elif kwargs.get("seed", False): + _lazy_seed_tracker.queue_seed(callable, traceback.format_stack()) + else: + # Don't store the actual traceback to avoid memory cycle + _queued_calls.append((callable, traceback.format_stack())) + + +_lazy_call(_check_capability) +_lazy_call(_check_cubins) + + +class DeferredCudaCallError(Exception): + pass + + +OutOfMemoryError = torch._C._OutOfMemoryError + + +def init(): + r"""Initialize PyTorch's CUDA state. + + You may need to call this explicitly if you are interacting with + PyTorch via its C API, as Python bindings for CUDA functionality + will not be available until this initialization takes place. + Ordinary users should not need this, as all of PyTorch's CUDA methods + automatically initialize CUDA state on-demand. + + Does nothing if the CUDA state is already initialized. + """ + _lazy_init() + + +def _lazy_init(): + global _initialized, _queued_calls + if is_initialized() or hasattr(_tls, "is_initializing"): + return + with _initialization_lock: + # We be double-checked locking, boys! This is OK because + # the above test was GIL protected anyway. The inner test + # is for when a thread blocked on some other thread which was + # doing the initialization; when they get the lock, they will + # find there is nothing left to do. + if is_initialized(): + return + # It is important to prevent other threads from entering _lazy_init + # immediately, while we are still guaranteed to have the GIL, because some + # of the C calls we make below will release the GIL + if _is_in_bad_fork(): + raise RuntimeError( + "Cannot re-initialize CUDA in forked subprocess. To use CUDA with " + "multiprocessing, you must use the 'spawn' start method" + ) + if not hasattr(torch._C, "_cuda_getDeviceCount"): + raise AssertionError("Torch not compiled with CUDA enabled") + if _cudart is None: + raise AssertionError( + "libcudart functions unavailable. It looks like you have a broken build?" + ) + # This function throws if there's a driver initialization error, no GPUs + # are found or any other error occurs + if "CUDA_MODULE_LOADING" not in os.environ: + os.environ["CUDA_MODULE_LOADING"] = "LAZY" + torch._C._cuda_init() + # Some of the queued calls may reentrantly call _lazy_init(); + # we need to just return without initializing in that case. + # However, we must not let any *other* threads in! + _tls.is_initializing = True + + for calls in _lazy_seed_tracker.get_calls(): + if calls: + _queued_calls.append(calls) + + try: + for queued_call, orig_traceback in _queued_calls: + try: + queued_call() + except Exception as e: + msg = ( + f"CUDA call failed lazily at initialization with error: {str(e)}\n\n" + f"CUDA call was originally invoked at:\n\n{''.join(orig_traceback)}" + ) + raise DeferredCudaCallError(msg) from e + finally: + delattr(_tls, "is_initializing") + _initialized = True + + +def cudart(): + _lazy_init() + return _cudart + + +class cudaStatus: + SUCCESS: int = 0 + ERROR_NOT_READY: int = 34 + + +class CudaError(RuntimeError): + def __init__(self, code: int) -> None: + msg = _cudart.cudaGetErrorString(_cudart.cudaError(code)) + super().__init__(f"{msg} ({code})") + + +def check_error(res: int) -> None: + if res != _cudart.cudaError.success: + raise CudaError(res) + + +class _DeviceGuard: + def __init__(self, index: int): + self.idx = index + self.prev_idx = -1 + + def __enter__(self): + self.prev_idx = torch.cuda._exchange_device(self.idx) + + def __exit__(self, type: Any, value: Any, traceback: Any): + self.idx = torch.cuda._maybe_exchange_device(self.prev_idx) + return False + + +class device: + r"""Context-manager that changes the selected device. + + Args: + device (torch.device or int): device index to select. It's a no-op if + this argument is a negative integer or ``None``. + """ + + def __init__(self, device: Any): + self.idx = _get_device_index(device, optional=True) + self.prev_idx = -1 + + def __enter__(self): + self.prev_idx = torch.cuda._exchange_device(self.idx) + + def __exit__(self, type: Any, value: Any, traceback: Any): + self.idx = torch.cuda._maybe_exchange_device(self.prev_idx) + return False + + +class device_of(device): + r"""Context-manager that changes the current device to that of given object. + + You can use both tensors and storages as arguments. If a given object is + not allocated on a GPU, this is a no-op. + + Args: + obj (Tensor or Storage): object allocated on the selected device. + """ + + def __init__(self, obj): + idx = obj.get_device() if obj.is_cuda else -1 + super().__init__(idx) + + +def set_device(device: _device_t) -> None: + r"""Set the current device. + + Usage of this function is discouraged in favor of :any:`device`. In most + cases it's better to use ``CUDA_VISIBLE_DEVICES`` environmental variable. + + Args: + device (torch.device or int): selected device. This function is a no-op + if this argument is negative. + """ + device = _get_device_index(device) + if device >= 0: + torch._C._cuda_setDevice(device) + + +def get_device_name(device: Optional[_device_t] = None) -> str: + r"""Get the name of a device. + + Args: + device (torch.device or int, optional): device for which to return the + name. This function is a no-op if this argument is a negative + integer. It uses the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + Returns: + str: the name of the device + """ + return get_device_properties(device).name + + +def get_device_capability(device: Optional[_device_t] = None) -> Tuple[int, int]: + r"""Get the cuda capability of a device. + + Args: + device (torch.device or int, optional): device for which to return the + device capability. This function is a no-op if this argument is + a negative integer. It uses the current device, given by + :func:`~torch.cuda.current_device`, if :attr:`device` is ``None`` + (default). + + Returns: + tuple(int, int): the major and minor cuda capability of the device + """ + prop = get_device_properties(device) + return prop.major, prop.minor + + +def get_device_properties(device: _device_t) -> _CudaDeviceProperties: + r"""Get the properties of a device. + + Args: + device (torch.device or int or str): device for which to return the + properties of the device. + + Returns: + _CudaDeviceProperties: the properties of the device + """ + _lazy_init() # will define _get_device_properties + device = _get_device_index(device, optional=True) + if device < 0 or device >= device_count(): + raise AssertionError("Invalid device id") + return _get_device_properties(device) # type: ignore[name-defined] + + +def can_device_access_peer(device: _device_t, peer_device: _device_t) -> bool: + r"""Check if peer access between two devices is possible.""" + _lazy_init() + device = _get_device_index(device, optional=True) + peer_device = _get_device_index(peer_device) + if device < 0 or device >= device_count(): + raise AssertionError("Invalid device id") + if peer_device < 0 or peer_device >= device_count(): + raise AssertionError("Invalid peer device id") + return torch._C._cuda_canDeviceAccessPeer(device, peer_device) + + +class StreamContext: + r"""Context-manager that selects a given stream. + + All CUDA kernels queued within its context will be enqueued on a selected + stream. + + Args: + Stream (Stream): selected stream. This manager is a no-op if it's + ``None``. + .. note:: Streams are per-device. + """ + cur_stream: Optional["torch.cuda.Stream"] + + def __init__(self, stream: Optional["torch.cuda.Stream"]): + self.stream = stream + self.idx = _get_device_index(None, True) + if not torch.jit.is_scripting(): + if self.idx is None: + self.idx = -1 + + self.src_prev_stream = ( + None if not torch.jit.is_scripting() else torch.cuda.default_stream(None) + ) + self.dst_prev_stream = ( + None if not torch.jit.is_scripting() else torch.cuda.default_stream(None) + ) + + def __enter__(self): + # Local cur_stream variable for type refinement + cur_stream = self.stream + # Return if stream is None or CUDA device not available + if cur_stream is None or self.idx == -1: + return + self.src_prev_stream = torch.cuda.current_stream(None) + + # If the stream is not on the current device, then + # set the current stream on the device + if self.src_prev_stream.device != cur_stream.device: + with device(cur_stream.device): + self.dst_prev_stream = torch.cuda.current_stream(cur_stream.device) + torch.cuda.set_stream(cur_stream) + + def __exit__(self, type: Any, value: Any, traceback: Any): + # Local cur_stream variable for type refinement + cur_stream = self.stream + # If stream is None or no CUDA device available, return + if cur_stream is None or self.idx == -1: + return + + # Reset the stream on the original device + # and destination device + if self.src_prev_stream.device != cur_stream.device: # type: ignore[union-attr] + torch.cuda.set_stream(self.dst_prev_stream) # type: ignore[arg-type] + torch.cuda.set_stream(self.src_prev_stream) # type: ignore[arg-type] + + +def stream(stream: Optional["torch.cuda.Stream"]) -> StreamContext: + r"""Wrap around the Context-manager StreamContext that selects a given stream. + + Arguments: + stream (Stream): selected stream. This manager is a no-op if it's + ``None``. + ..Note:: In eager mode stream is of type Stream class while in JIT it is + an object of the custom class ``torch.classes.cuda.Stream``. + """ + return StreamContext(stream) + + +def _set_stream_by_id(stream_id, device_index, device_type): + r"""set stream specified by the stream id, device index and + device type + + Args: stream_id (int): stream id in stream pool + device_index (int): device index in topo + device_type (int): enum device type + """ + torch._C._cuda_setStream( + stream_id=stream_id, + device_index=device_index, + device_type=device_type, + ) + + +def set_stream(stream: Stream): + r"""Set the current stream.This is a wrapper API to set the stream. + Usage of this function is discouraged in favor of the ``stream`` + context manager. + + Args: + stream (Stream): selected stream. This function is a no-op + if this argument is ``None``. + """ + if stream is None: + return + _set_stream_by_id( + stream_id=stream.stream_id, + device_index=stream.device_index, + device_type=stream.device_type, + ) + + +def _parse_visible_devices() -> Union[List[int], List[str]]: + r"""Parse CUDA_VISIBLE_DEVICES environment variable.""" + var = os.getenv("CUDA_VISIBLE_DEVICES") + if var is None: + return list(range(64)) + + def _strtoul(s: str) -> int: + """Return -1 or positive integer sequence string starts with.""" + if not s: + return -1 + for idx, c in enumerate(s): + if not (c.isdigit() or (idx == 0 and c in "+-")): + break + if idx + 1 == len(s): + idx += 1 + return int(s[:idx]) if idx > 0 else -1 + + def parse_list_with_prefix(lst: str, prefix: str) -> List[str]: + rcs: List[str] = [] + for elem in lst.split(","): + # Repeated id results in empty set + if elem in rcs: + return cast(List[str], []) + # Anything other but prefix is ignored + if not elem.startswith(prefix): + break + rcs.append(elem) + return rcs + + if var.startswith("GPU-"): + return parse_list_with_prefix(var, "GPU-") + if var.startswith("MIG-"): + return parse_list_with_prefix(var, "MIG-") + # CUDA_VISIBLE_DEVICES uses something like strtoul + # which makes `1gpu2,2ampere` is equivalent to `1,2` + rc: List[int] = [] + for elem in var.split(","): + x = _strtoul(elem.strip()) + # Repeated ordinal results in empty set + if x in rc: + return cast(List[int], []) + # Negative value aborts the sequence + if x < 0: + break + rc.append(x) + return rc + + +def _raw_device_count_nvml() -> int: + r"""Return number of devices as reported by NVML or negative value if NVML discovery/initialization failed.""" + from ctypes import byref, c_int, CDLL + + nvml_h = CDLL("libnvidia-ml.so.1") + rc = nvml_h.nvmlInit() + if rc != 0: + warnings.warn("Can't initialize NVML") + return -1 + dev_count = c_int(-1) + rc = nvml_h.nvmlDeviceGetCount_v2(byref(dev_count)) + if rc != 0: + warnings.warn("Can't get nvml device count") + return -1 + del nvml_h + return dev_count.value + + +def _raw_device_uuid_nvml() -> Optional[List[str]]: + r"""Return list of device UUID as reported by NVML or None if NVM discovery/initialization failed.""" + from ctypes import byref, c_int, c_void_p, CDLL, create_string_buffer + + nvml_h = CDLL("libnvidia-ml.so.1") + rc = nvml_h.nvmlInit() + if rc != 0: + warnings.warn("Can't initialize NVML") + return None + dev_count = c_int(-1) + rc = nvml_h.nvmlDeviceGetCount_v2(byref(dev_count)) + if rc != 0: + warnings.warn("Can't get nvml device count") + return None + uuids: List[str] = [] + for idx in range(dev_count.value): + dev_id = c_void_p() + rc = nvml_h.nvmlDeviceGetHandleByIndex_v2(idx, byref(dev_id)) + if rc != 0: + warnings.warn("Can't get device handle") + return None + buf_len = 96 + buf = create_string_buffer(buf_len) + rc = nvml_h.nvmlDeviceGetUUID(dev_id, buf, buf_len) + if rc != 0: + warnings.warn("Can't get device UUID") + return None + uuids.append(buf.raw.decode("ascii").strip("\0")) + del nvml_h + return uuids + + +def _transform_uuid_to_ordinals(candidates: List[str], uuids: List[str]) -> List[int]: + r"""Given the set of partial uuids and list of known uuids builds a set of ordinals excluding ambiguous partials IDs.""" + + def uuid_to_orinal(candidate: str, uuids: List[str]) -> int: + best_match = -1 + for idx, uuid in enumerate(uuids): + if not uuid.startswith(candidate): + continue + # Ambiguous candidate + if best_match != -1: + return -1 + best_match = idx + return best_match + + rc: List[int] = [] + for candidate in candidates: + idx = uuid_to_orinal(candidate, uuids) + # First invalid ordinal stops parsing + if idx < 0: + break + # Duplicates result in empty set + if idx in rc: + return cast(List[int], []) + rc.append(idx) + return rc + + +def _device_count_nvml() -> int: + r"""Return number of devices as reported by NVML taking CUDA_VISIBLE_DEVICES into account. + + Negative value is returned if NVML discovery or initialization has failed. + """ + visible_devices = _parse_visible_devices() + if not visible_devices: + return 0 + try: + if type(visible_devices[0]) is str: + # Skip MIG parsing + if visible_devices[0].startswith("MIG-"): + return -1 + uuids = _raw_device_uuid_nvml() + if uuids is None: + return -1 + visible_devices = _transform_uuid_to_ordinals( + cast(List[str], visible_devices), uuids + ) + else: + raw_cnt = _raw_device_count_nvml() + if raw_cnt <= 0: + return raw_cnt + # Trim the list up to a maximum available device + for idx, val in enumerate(visible_devices): + if cast(int, val) >= raw_cnt: + return idx + except OSError: + return -1 + except AttributeError: + return -1 + return len(visible_devices) + + +def _get_nvml_device_index(device: Optional[Union[int, Device]]) -> int: + r"""Return the NVML index of the device, taking CUDA_VISIBLE_DEVICES into account.""" + idx = _get_device_index(device, optional=True) + visible_devices = _parse_visible_devices() + if type(visible_devices[0]) is str: + uuids = _raw_device_uuid_nvml() + if uuids is None: + raise RuntimeError("Can't get device UUIDs") + visible_devices = _transform_uuid_to_ordinals( + cast(List[str], visible_devices), uuids + ) + idx_map = dict(enumerate(cast(List[int], visible_devices))) + if idx not in idx_map: + raise RuntimeError( + f"device {idx} is not visible (CUDA_VISIBLE_DEVICES={visible_devices})" + ) + return idx_map[idx] + + +@lru_cache(maxsize=1) +def device_count() -> int: + r"""Return the number of GPUs available.""" + if not _is_compiled(): + return 0 + # bypass _device_count_nvml() if rocm (not supported) + nvml_count = -1 if torch.version.hip else _device_count_nvml() + return torch._C._cuda_getDeviceCount() if nvml_count < 0 else nvml_count + + +def get_arch_list() -> List[str]: + r"""Return list CUDA architectures this library was compiled for.""" + if not is_available(): + return [] + arch_flags = torch._C._cuda_getArchFlags() + if arch_flags is None: + return [] + return arch_flags.split() + + +def get_gencode_flags() -> str: + r"""Return NVCC gencode flags this library was compiled with.""" + arch_list = get_arch_list() + if len(arch_list) == 0: + return "" + arch_list_ = [arch.split("_") for arch in arch_list] + return " ".join( + [ + f"-gencode compute=compute_{arch},code={kind}_{arch}" + for (kind, arch) in arch_list_ + ] + ) + + +def current_device() -> int: + r"""Return the index of a currently selected device.""" + _lazy_init() + return torch._C._cuda_getDevice() + + +def synchronize(device: _device_t = None) -> None: + r"""Wait for all kernels in all streams on a CUDA device to complete. + + Args: + device (torch.device or int, optional): device for which to synchronize. + It uses the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + """ + _lazy_init() + with torch.cuda.device(device): + return torch._C._cuda_synchronize() + + +def ipc_collect(): + r"""Force collects GPU memory after it has been released by CUDA IPC. + + .. note:: + Checks if any sent CUDA tensors could be cleaned from the memory. Force + closes shared memory file used for reference counting if there is no + active counters. Useful when the producer process stopped actively sending + tensors and want to release unused memory. + """ + _lazy_init() + return torch._C._cuda_ipc_collect() + + +def current_stream(device: Optional[_device_t] = None) -> Stream: + r"""Return the currently selected :class:`Stream` for a given device. + + Args: + device (torch.device or int, optional): selected device. Returns + the currently selected :class:`Stream` for the current device, given + by :func:`~torch.cuda.current_device`, if :attr:`device` is ``None`` + (default). + """ + _lazy_init() + streamdata = torch._C._cuda_getCurrentStream( + _get_device_index(device, optional=True) + ) + return Stream( + stream_id=streamdata[0], device_index=streamdata[1], device_type=streamdata[2] + ) + + +def default_stream(device: Optional[_device_t] = None) -> Stream: + r"""Return the default :class:`Stream` for a given device. + + Args: + device (torch.device or int, optional): selected device. Returns + the default :class:`Stream` for the current device, given by + :func:`~torch.cuda.current_device`, if :attr:`device` is ``None`` + (default). + """ + _lazy_init() + streamdata = torch._C._cuda_getDefaultStream( + _get_device_index(device, optional=True) + ) + return Stream( + stream_id=streamdata[0], device_index=streamdata[1], device_type=streamdata[2] + ) + + +def current_blas_handle(): + r"""Return cublasHandle_t pointer to current cuBLAS handle""" + _lazy_init() + return torch._C._cuda_getCurrentBlasHandle() + + +def set_sync_debug_mode(debug_mode: Union[int, str]) -> None: + r"""Set the debug mode for cuda synchronizing operations. + + Args: + debug_mode(str or int): if "default" or 0, don't error or warn on synchronizing operations, + if "warn" or 1, warn on synchronizing operations, if "error" or 2, error out synchronizing operations. + + Warning: + This is an experimental feature, and not all synchronizing operations will trigger warning or error. In + particular, operations in torch.distributed and torch.sparse namespaces are not covered yet. + """ + _lazy_init() + if isinstance(debug_mode, str): + if debug_mode == "default": + debug_mode = 0 + elif debug_mode == "warn": + debug_mode = 1 + elif debug_mode == "error": + debug_mode = 2 + else: + raise RuntimeError( + "invalid value of debug_mode, expected one of `default`, `warn`, `error`" + ) + + torch._C._cuda_set_sync_debug_mode(debug_mode) + + +def get_sync_debug_mode() -> int: + r"""Return current value of debug mode for cuda synchronizing operations.""" + _lazy_init() + return torch._C._cuda_get_sync_debug_mode() + + +def _get_pynvml_handler(device: Optional[Union[Device, int]] = None): + if not _HAS_PYNVML: + raise ModuleNotFoundError( + "pynvml does not seem to be installed or it can't be imported." + ) from _PYNVML_ERR + from pynvml import NVMLError_DriverNotLoaded + + try: + pynvml.nvmlInit() + except NVMLError_DriverNotLoaded as e: + raise RuntimeError("cuda driver can't be loaded, is cuda enabled?") from e + + device = _get_nvml_device_index(device) + handle = pynvml.nvmlDeviceGetHandleByIndex(device) + return handle + + +def memory_usage(device: Optional[Union[Device, int]] = None) -> int: + r"""Return the percent of time over the past sample period during which global (device) + memory was being read or written as given by `nvidia-smi`. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + Warning: Each sample period may be between 1 second and 1/6 second, + depending on the product being queried. + """ + handle = _get_pynvml_handler() + + device = _get_nvml_device_index(device) + handle = pynvml.nvmlDeviceGetHandleByIndex(device) + return pynvml.nvmlDeviceGetUtilizationRates(handle).memory + + +def utilization(device: Optional[Union[Device, int]] = None) -> int: + r"""Return the percent of time over the past sample period during which one or + more kernels was executing on the GPU as given by `nvidia-smi`. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + Warning: Each sample period may be between 1 second and 1/6 second, + depending on the product being queried. + """ + handle = _get_pynvml_handler(device) + device = _get_nvml_device_index(device) + handle = pynvml.nvmlDeviceGetHandleByIndex(device) + return pynvml.nvmlDeviceGetUtilizationRates(handle).gpu + + +def temperature(device: Optional[Union[Device, int]] = None) -> int: + r"""Return the average temperature of the GPU sensor in Degrees C (Centigrades). + + The average temperature is computed based on past sample period as given by `nvidia-smi`. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + Warning: Each sample period may be between 1 second and 1/6 second, + depending on the product being queried. + """ + handle = _get_pynvml_handler(device) + # 0 refers to the temperature sensor for the GPU die. + return pynvml.nvmlDeviceGetTemperature(handle, 0) + + +def power_draw(device: Optional[Union[Device, int]] = None) -> int: + r"""Return the average power draw of the GPU sensor in mW (MilliWatts) + over the past sample period as given by `nvidia-smi` for Fermi or newer fully supported devices. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + Warning: Each sample period may be between 1 second and 1/6 second, + depending on the product being queried. + """ + handle = _get_pynvml_handler(device) + return pynvml.nvmlDeviceGetPowerUsage(handle) + + +def clock_rate(device: Optional[Union[Device, int]] = None) -> int: + r"""Return the clock speed of the GPU SM in Hz Hertz over the past sample period as given by `nvidia-smi`. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + Warning: Each sample period may be between 1 second and 1/6 second, + depending on the product being queried. + """ + handle = _get_pynvml_handler(device) + return pynvml.nvmlDeviceGetClockInfo(handle, 1) + + +def _get_device(device: Union[int, str, torch.device]) -> torch.device: + r"""Return the torch.device type object from the passed in device. + + Args: + device (torch.device or int): selected device. + """ + if isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device("cuda", device) + return device + + +def _get_generator(device: torch.device) -> torch._C.Generator: + r"""Return the CUDA Generator object for the given device. + + Args: + device (torch.device): selected device. + """ + idx = device.index + if idx is None: + idx = current_device() + return torch.cuda.default_generators[idx] + + +def _set_rng_state_offset( + offset: int, device: Union[int, str, torch.device] = "cuda" +) -> None: + r"""Set the random number generator state offset of the specified GPU. + + Args: + offset (int): The desired offset + device (torch.device or int, optional): The device to set the RNG state. + Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device). + """ + final_device = _get_device(device) + + def cb(): + default_generator = _get_generator(final_device) + default_generator.set_offset(offset) + + _lazy_call(cb) + + +def _get_rng_state_offset(device: Union[int, str, torch.device] = "cuda") -> int: + r"""Return the random number generator state offset of the specified GPU. + + Args: + device (torch.device or int, optional): The device to return the RNG state offset of. + Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device). + + .. warning:: + This function eagerly initializes CUDA. + """ + _lazy_init() + final_device = _get_device(device) + default_generator = _get_generator(final_device) + return default_generator.get_offset() + + +from .memory import * # noqa: F403 + + +from .random import * # noqa: F403 + +################################################################################ +# Define Storage and Tensor classes +################################################################################ + + +@staticmethod # type: ignore[misc] +def _lazy_new(cls, *args, **kwargs): + _lazy_init() + # We may need to call lazy init again if we are a forked child + # del _CudaBase.__new__ + return super(_CudaBase, cls).__new__(cls, *args, **kwargs) + + +class _CudaBase: + is_cuda = True + is_sparse = False + + def type(self, *args, **kwargs): + # We could use a Protocol here to tell mypy that self has `get_device` method + # but it is only available in the typing module on Python >= 3.8 + # or on typing_extensions module on Python >= 3.6 + with device(self.get_device()): # type: ignore[attr-defined] + return super().type(*args, **kwargs) # type: ignore[misc] + + __new__ = _lazy_new + + +from torch.storage import _LegacyStorage, _warn_typed_storage_removal + + +class _CudaLegacyStorage(_LegacyStorage): + @classmethod + def from_buffer(cls, *args, **kwargs): + _warn_typed_storage_removal() + raise RuntimeError("from_buffer: Not available for CUDA storage") + + @classmethod + def _new_with_weak_ptr(cls, *args, **kwargs): + raise RuntimeError("_new_with_weak_ptr: Not available for CUDA storage") + + @classmethod + def _new_shared_filename(cls, manager, obj, size, *, device=None, dtype=None): + raise RuntimeError("_new_shared_filename: Not available for CUDA storage") + + +class ByteStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.uint8 + + +class DoubleStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.double + + +class FloatStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.float + + +class HalfStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.half + + +class LongStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.long + + +class IntStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.int + + +class ShortStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.short + + +class CharStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.int8 + + +class BoolStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.bool + + +class BFloat16Storage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.bfloat16 + + +class ComplexDoubleStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.cdouble + + +class ComplexFloatStorage(_CudaLegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal() + return self._dtype + + @classproperty + def _dtype(self): + return torch.cfloat + + +del _LegacyStorage +del _CudaLegacyStorage + +torch._storage_classes.add(DoubleStorage) +torch._storage_classes.add(FloatStorage) +torch._storage_classes.add(LongStorage) +torch._storage_classes.add(IntStorage) +torch._storage_classes.add(ShortStorage) +torch._storage_classes.add(CharStorage) +torch._storage_classes.add(ByteStorage) +torch._storage_classes.add(HalfStorage) +torch._storage_classes.add(BoolStorage) +torch._storage_classes.add(BFloat16Storage) +torch._storage_classes.add(ComplexDoubleStorage) +torch._storage_classes.add(ComplexFloatStorage) + + +class _WrappedTritonKernel: + """Just a simple wrapper to store some metadata for testing purposes.""" + + def __init__(self, kernel): + self.kernel = kernel + self.kernel_invoked = False + + def __call__(self, *args, **kwargs): + res = self.kernel(*args, **kwargs) + self.kernel_invoked = True + return res + + +def _register_triton_kernels(): + if torch._running_with_deploy(): + return + + @_WrappedTritonKernel + def kernel_impl(*args, **kwargs): + from torch.sparse._triton_ops import bsr_dense_mm + + return bsr_dense_mm(*args, skip_checks=True, **kwargs) + + @_WrappedTritonKernel + def addmm_kernel_impl(*args, **kwargs): + from torch.sparse._triton_ops import bsr_dense_addmm + + return bsr_dense_addmm(*args, skip_checks=True, **kwargs) + + has_triton = importlib.util.find_spec("triton") is not None + if has_triton: + torch._TritonLibrary.registerOp( + "_triton_bsr_dense_mm_out", + "_triton_bsr_dense_mm_out(Tensor bsr, Tensor dense, *, Tensor(a!) out) -> Tensor(a!)", + kernel_impl, + "SparseCsrCUDA", + ) + + torch._TritonLibrary.registerOp( + "_triton_bsr_dense_addmm_out", + ( + "_triton_bsr_dense_addmm_out(Tensor input, Tensor bsr, Tensor dense," + " *, Scalar beta, Scalar alpha, Tensor(a!) out) -> Tensor(a!)" + ), + addmm_kernel_impl, + "SparseCsrCUDA", + ) + + +_lazy_call(_register_triton_kernels) + + +from . import amp, jiterator, nvtx, profiler, sparse + +__all__ = [ + # Typed storage and tensors + "BFloat16Storage", + "BFloat16Tensor", + "BoolStorage", + "BoolTensor", + "ByteStorage", + "ByteTensor", + "CharStorage", + "CharTensor", + "ComplexDoubleStorage", + "ComplexFloatStorage", + "DoubleStorage", + "DoubleTensor", + "FloatStorage", + "FloatTensor", + "HalfStorage", + "HalfTensor", + "IntStorage", + "IntTensor", + "LongStorage", + "LongTensor", + "ShortStorage", + "ShortTensor", + "CUDAGraph", + "CudaError", + "DeferredCudaCallError", + "Event", + "ExternalStream", + "OutOfMemoryError", + "Stream", + "StreamContext", + "amp", + "caching_allocator_alloc", + "caching_allocator_delete", + "can_device_access_peer", + "check_error", + "cudaStatus", + "cudart", + "current_blas_handle", + "current_device", + "current_stream", + "default_generators", + "default_stream", + "device", + "device_count", + "device_of", + "empty_cache", + "get_allocator_backend", + "CUDAPluggableAllocator", + "change_current_allocator", + "get_arch_list", + "get_device_capability", + "get_device_name", + "get_device_properties", + "get_gencode_flags", + "get_rng_state", + "get_rng_state_all", + "get_sync_debug_mode", + "graph", + "graph_pool_handle", + "graphs", + "has_half", + "has_magma", + "init", + "initial_seed", + "ipc_collect", + "is_available", + "is_bf16_supported", + "is_current_stream_capturing", + "is_initialized", + "jiterator", + "list_gpu_processes", + "make_graphed_callables", + "manual_seed", + "manual_seed_all", + "max_memory_allocated", + "max_memory_cached", + "max_memory_reserved", + "mem_get_info", + "memory", + "memory_allocated", + "memory_cached", + "memory_reserved", + "memory_snapshot", + "memory_stats", + "memory_stats_as_nested_dict", + "memory_summary", + "memory_usage", + "temperature", + "power_draw", + "clock_rate", + "nccl", + "nvtx", + "profiler", + "random", + "reset_accumulated_memory_stats", + "reset_max_memory_allocated", + "reset_max_memory_cached", + "reset_peak_memory_stats", + "seed", + "seed_all", + "set_device", + "set_per_process_memory_fraction", + "set_rng_state", + "set_rng_state_all", + "set_stream", + "set_sync_debug_mode", + "sparse", + "stream", + "streams", + "synchronize", + "utilization", +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ba3faed234d798512c60793cf7797dd89ecba17 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/_memory_viz.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/_memory_viz.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1762e70c78d1a7fc5407de40d8d3a51adbe7c97 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/_memory_viz.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/_sanitizer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/_sanitizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d881346eaa14de45e52504d7648e0db1884fd918 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/_sanitizer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c93a05e18af2473542e9e37158fe9a5091af2ba Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/error.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/error.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a20d3d2edcc243789ca9e4cb0a9aede4c372f4c5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/error.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/graphs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/graphs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4ae05049098c24811c8df08848d367b604f320a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/graphs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/jiterator.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/jiterator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f9a48ace073a699c9c68194a18ed542e9dfe738 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/jiterator.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/memory.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..493721472904cb493763e8bdad23ecc4fb2f7d3f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/memory.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/nccl.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/nccl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d848e40b83aa52c7a9808fd93c777c2e143f7bf3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/nccl.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/nvtx.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/nvtx.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98a8f9d5fe08f262764120e8141166833d660d3b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/nvtx.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/profiler.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/profiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3eb4012845b38c8b7d6bd51571a3ca248a1e328 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/profiler.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/random.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/random.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1143df587265b560d6ce237ceeaad65ae77f9acb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/random.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/sparse.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/sparse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3cd8fd31c35fe69831c42b212c2ab27a693c07a1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/sparse.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/streams.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/streams.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..206e11a6e64406f969d5927bc6c762e6ab97319d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cuda/__pycache__/streams.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/_memory_viz.py b/env-llmeval/lib/python3.10/site-packages/torch/cuda/_memory_viz.py new file mode 100644 index 0000000000000000000000000000000000000000..a862acd73184733dca0c811204456adc21394200 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/cuda/_memory_viz.py @@ -0,0 +1,626 @@ +import pickle +import sys +import os +import io +import subprocess +import json +from functools import lru_cache +from typing import Any +from itertools import groupby +import base64 +import warnings + +cache = lru_cache(None) + +__all__ = ["format_flamegraph", "segments", "memory", "compare"] + +def _frame_fmt(f, full_filename=False): + i = f['line'] + fname = f['filename'] + if not full_filename: + fname = fname.split('/')[-1] + func = f['name'] + return f'{fname}:{i}:{func}' + +@cache +def _frame_filter(name, filename): + omit_functions = [ + "unwind::unwind", + "CapturedTraceback::gather", + "gather_with_cpp", + "_start", + "__libc_start_main", + "PyEval_", + "PyObject_", + "PyFunction_", + ] + omit_filenames = [ + "core/boxing", + "/Register", + "/Redispatch", + "pythonrun.c", + "Modules/main.c", + "Objects/call.c", + "Objects/methodobject.c", + "pycore_ceval.h", + "ceval.c", + "cpython/abstract.h", + ] + for of in omit_functions: + if of in name: + return False + for of in omit_filenames: + if of in filename: + return False + return True + +def _frames_fmt(frames, full_filename=False, reverse=False): + if reverse: + frames = reversed(frames) + return [_frame_fmt(f, full_filename) for f in frames if _frame_filter(f['name'], f['filename'])] + +def _block_extra_legacy(b): + if 'history' in b: + frames = b['history'][0].get('frames', []) + real_size = b['history'][0]['real_size'] + else: + real_size = b.get('requested_size', b['size']) + frames = [] + return frames, real_size + +def _block_extra(b): + if 'frames' not in b: + # old snapshot format made it more complicated to get frames/allocated size + return _block_extra_legacy(b) + return b['frames'], b['requested_size'] + +def format_flamegraph(flamegraph_lines, flamegraph_script=None): + if flamegraph_script is None: + flamegraph_script = f'/tmp/{os.getuid()}_flamegraph.pl' + if not os.path.exists(flamegraph_script): + import urllib.request + print(f"Downloading flamegraph.pl to: {flamegraph_script}") + urllib.request.urlretrieve( + 'https://raw.githubusercontent.com/brendangregg/FlameGraph/master/flamegraph.pl', flamegraph_script) + subprocess.check_call(['chmod', '+x', flamegraph_script]) + args = [flamegraph_script, '--countname', 'bytes'] + p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, encoding='utf-8') + assert p.stdin is not None + assert p.stdout is not None + p.stdin.write(flamegraph_lines) + p.stdin.close() + result = p.stdout.read() + p.stdout.close() + p.wait() + assert p.wait() == 0 + return result + +def _write_blocks(f, prefix, blocks): + def frames_fragment(frames): + if not frames: + return "" + return ';'.join(_frames_fmt(frames, reverse=True)) + for b in blocks: + if 'history' not in b: + frames, accounted_for_size = _block_extra(b) + f.write(f'{prefix};{b["state"]};{frames_fragment(frames)} {accounted_for_size}\n') + else: + accounted_for_size = 0 + for h in b['history']: + sz = h['real_size'] + accounted_for_size += sz + if 'frames' in h: + frames = h['frames'] + f.write(f'{prefix};{b["state"]};{frames_fragment(frames)} {sz}\n') + else: + f.write(f'{prefix};{b["state"]}; {sz}\n') + gaps = b['size'] - accounted_for_size + if gaps: + f.write(f'{prefix};{b["state"]}; {gaps}\n') + +def segments(snapshot, format_flamegraph=format_flamegraph): + f = io.StringIO() + for seg in snapshot['segments']: + prefix = f'stream_{seg["stream"]};seg_{seg["address"]}' + _write_blocks(f, prefix, seg['blocks']) + return format_flamegraph(f.getvalue()) + +def memory(snapshot, format_flamegraph=format_flamegraph): + f = io.StringIO() + for seg in snapshot['segments']: + prefix = f'stream_{seg["stream"]}' + _write_blocks(f, prefix, seg['blocks']) + return format_flamegraph(f.getvalue()) + +def compare(before, after, format_flamegraph=format_flamegraph): + def _seg_key(seg): + return (seg['address'], seg['total_size']) + + def _seg_info(seg): + return f'stream_{seg["stream"]};seg_{seg["address"]}' + + f = io.StringIO() + + before_segs = {_seg_key(seg) for seg in before} + after_segs = {_seg_key(seg) for seg in after} + + print(f'only_before = {[a for a,_ in (before_segs - after_segs)]}') + print(f'only_after = {[a for a,_ in (after_segs - before_segs)]}') + + for seg in before: + if _seg_key(seg) not in after_segs: + _write_blocks(f, f'only_before;{_seg_info(seg)}', seg['blocks']) + + for seg in after: + if _seg_key(seg) not in before_segs: + _write_blocks(f, f'only_after;{_seg_info(seg)}', seg['blocks']) + + return format_flamegraph(f.getvalue()) + +def _format_size(num): + # https://stackoverflow.com/questions/1094841/get-human-readable-version-of-file-size + for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]: + if abs(num) < 1024.0: + return f"{num:3.1f}{unit}B" + num /= 1024.0 + return f"{num:.1f}YiB" + +class Bytes: + def __init__(self, value): + self.value = value + + def __add__(self, rhs): + return Bytes(self.value + rhs) + + def __repr__(self): + return _format_size(self.value) + +def calc_active(seg): + return sum(b['size'] for b in seg['blocks'] if b['state'] == 'active_allocated') + +def _report_free(free_external, free_internal): + total = free_external + free_internal + suffix = '' + if total != 0: + pct = (free_internal / total) * 100 + suffix = f' ({pct:.1f}% internal)' + return f'{Bytes(total)}{suffix}' + +PAGE_SIZE = 1024 * 1024 * 20 +legend = f"""\ + +Legend: + [a ] - a segment in the allocator + ^-- a page {Bytes(PAGE_SIZE)} of memory in the segment + a-z: pages filled with a single block's content + ' ': page is completely free + *: page if completely full with multiple blocks + 0-9: page is partially full with tensors of multiple blocks (9 == 90% full) + (X% internal) - of the free memory, X% is free because we rounded the size of the allocation. +""" + +def segsum(data): + r"""Visually reports how the allocator has filled its segments. + + This printout can help debug fragmentation issues since free fragments + will appear as gaps in this printout. The amount of free space is reported + for each segment. + We distinguish between internal free memory which occurs because the + allocator rounds the allocation size, and external free memory, which are + the gaps between allocations in a segment. + Args: + data: snapshot dictionary created from _snapshot() + """ + segments = [] + out = io.StringIO() + out.write(f"Summary of segments >= {Bytes(PAGE_SIZE)} in size\n") + total_reserved = 0 + total_allocated = 0 + free_external = 0 + free_internal = 0 + for seg in sorted(data['segments'], key=lambda x: (x['total_size'], calc_active(x))): + total_reserved += seg['total_size'] + + seg_free_external = 0 + seg_free_internal = 0 + seg_allocated = 0 + all_ranges = [] + boffset = 0 + for b in seg['blocks']: + active = b['state'] == 'active_allocated' + if active: + _, allocated_size = _block_extra(b) + all_ranges.append((boffset, allocated_size, True)) + seg_allocated += allocated_size + seg_free_internal += b['size'] - allocated_size + else: + seg_free_external += b['size'] + + boffset += b['size'] + + total_allocated += seg_allocated + free_external += seg_free_external + free_internal += seg_free_internal + + nseg = (seg['total_size'] - 1) // PAGE_SIZE + 1 + occupied = [' ' for _ in range(nseg)] + frac = [0.0 for _ in range(nseg)] + active_size = 0 + for i, (start_, size, active) in enumerate(all_ranges): + active_size += size + finish_ = (start_ + size) + start = start_ // PAGE_SIZE + finish = (finish_ - 1) // PAGE_SIZE + 1 + m = chr(ord('a' if active else 'A') + (i % 26)) + for j in range(start, finish): + s = max(start_, j * PAGE_SIZE) + e = min(finish_, (j + 1) * PAGE_SIZE) + frac[j] += (e - s) / PAGE_SIZE + if occupied[j] != ' ': + occupied[j] = '0123456789*'[int(frac[j] * 10)] + else: + occupied[j] = m + stream = '' if seg['stream'] == 0 else f', stream_{seg["stream"]}' + body = ''.join(occupied) + assert seg_free_external + seg_free_internal + seg_allocated == seg['total_size'] + stream = f' stream_{seg["stream"]}' if seg['stream'] != 0 else '' + if seg['total_size'] >= PAGE_SIZE: + out.write(f'[{body}] {Bytes(seg["total_size"])} allocated, ' + f'{_report_free(seg_free_external, seg_free_internal)} free{stream}\n') + out.write(f'segments: {len(data["segments"])}\n') + out.write(f'total_reserved: {Bytes(total_reserved)}\n') + out.write(f'total_allocated: {Bytes(total_allocated)}\n') + internal_external = f' ({Bytes(free_internal)} internal + {Bytes(free_external)} external)' if free_internal else '' + out.write(f'total_free: {_report_free(free_external, free_internal)}\n') + out.write(legend) + assert free_internal + free_external + total_allocated == total_reserved + return out.getvalue() + +def trace(data): + out = io.StringIO() + + def format(entries): + segment_intervals : list = [] + segment_addr_to_name = {} + allocation_addr_to_name = {} + + free_names : list = [] + next_name = 0 + + def _name(): + nonlocal next_name + if free_names: + return free_names.pop() + r, m = next_name // 26, next_name % 26 + next_name += 1 + return f'{chr(ord("a") + m)}{"" if r == 0 else r}' + + def find_segment(addr): + for name, saddr, size in segment_intervals: + if addr >= saddr and addr < saddr + size: + return name, saddr + for i, seg in enumerate(data['segments']): + saddr = seg['address'] + size = seg['allocated_size'] + if addr >= saddr and addr < saddr + size: + return f'seg_{i}', saddr + return None, None + count = 0 + out.write(f'{len(entries)} entries\n') + + + total_reserved = 0 + for seg in data['segments']: + total_reserved += seg['total_size'] + + for count, e in enumerate(entries): + if e['action'] == 'alloc': + addr, size = e['addr'], e['size'] + n = _name() + seg_name, seg_addr = find_segment(addr) + if seg_name is None: + seg_name = "MEM" + offset = addr + else: + offset = addr - seg_addr + out.write(f'{n} = {seg_name}[{offset}:{Bytes(size)}]\n') + allocation_addr_to_name[addr] = (n, size, count) + count += size + elif e['action'] == 'free_requested': + addr, size = e['addr'], e['size'] + name, _, _ = allocation_addr_to_name.get(addr, (addr, None, None)) + out.write(f'del {name} # {Bytes(size)}\n') + elif e['action'] == 'free_completed': + addr, size = e['addr'], e['size'] + count -= size + name, _, _ = allocation_addr_to_name.get(addr, (addr, None, None)) + out.write(f'# free completed for {name} {Bytes(size)}\n') + if name in allocation_addr_to_name: + free_names.append(name) + del allocation_addr_to_name[name] + elif e['action'] == 'segment_alloc': + addr, size = e['addr'], e['size'] + name = _name() + out.write(f'{name} = cudaMalloc({addr}, {Bytes(size)})\n') + segment_intervals.append((name, addr, size)) + segment_addr_to_name[addr] = name + elif e['action'] == 'segment_free': + addr, size = e['addr'], e['size'] + name = segment_addr_to_name.get(addr, addr) + out.write(f'cudaFree({name}) # {Bytes(size)}\n') + if name in segment_addr_to_name: + free_names.append(name) + del segment_addr_to_name[name] + elif e['action'] == 'oom': + size = e['size'] + free = e['device_free'] + out.write(f'raise OutOfMemoryError() # {Bytes(size)} requested, {Bytes(free)} free in CUDA\n') + else: + out.write(f'{e}\n') + out.write(f"TOTAL MEM: {Bytes(count)}") + for i, d in enumerate(data['device_traces']): + if d: + out.write(f'Device {i} ----------------\n') + format(d) + return out.getvalue() + + +_memory_viz_template = r""" + + + + + + + +""" + +def _format_viz(data, viz_kind, device): + if device is not None: + warnings.warn('device argument is deprecated, plots now contain all device') + buffer = pickle.dumps(data) + buffer += b'\x00' * (3 - len(buffer) % 3) + # Encode the buffer with base64 + encoded_buffer = base64.b64encode(buffer).decode('utf-8') + + json_format = json.dumps([{"name": 'snapshot.pickle', "base64": encoded_buffer}]) + return _memory_viz_template.replace('$VIZ_KIND', repr(viz_kind)) \ + .replace('$SNAPSHOT', json_format) + +def trace_plot(data, device=None, plot_segments=False): + """Generate a visualization over time of the memory usage recorded by the trace as an html file. + + Args: + data: Memory snapshot as generated from torch.cuda.memory._snapshot() + device (torch.device, optional): Generate the trace for this device, needed if multiple devices have allocations. + plot_segments (bool, optional): Plots memory returned from cudaMalloc, rather than individual allocations. + Defaults to False. + + Returns: + str: HTML of visualization + """ + return _format_viz(data, 'Active Memory Timeline' if not plot_segments else 'Active Cached Memory Timeline', device) + + +def _profile_to_snapshot(profile): + import torch + from torch.profiler._memory_profiler import Action, TensorKey + from torch._C._profiler import _EventType + memory_profile = profile._memory_profile() + + allocation_stacks = {} + for event in memory_profile._op_tree.sorted_nodes: + if event.tag == _EventType.Allocation: + parent = event.parent + python_parents = [] + while parent: + if parent.tag in (_EventType.PyCall, _EventType.PyCCall): + python_parents.append(parent) + parent = parent.parent + key = TensorKey.from_allocation(event.extra_fields) + + # Corner case: If allocation doesn't have an ID (can't prove it was used as a Tensor) + # key will be None. I should add some way to identify these, I just haven't yet. + if key and event.extra_fields.alloc_size > 0: + allocation_stacks[key] = python_parents + + + device_count = torch.cuda.device_count() + snapshot = { + 'device_traces': [[] for _ in range(device_count + 1)], + 'segments': [{'device': device, + 'address': None, + 'total_size': 0, + 'stream': 0, + 'blocks': []} for device in range(device_count + 1)] + } + + def to_device(device): + if device.type == 'cuda': + return device.index + else: + return device_count + + def allocate(size, tensor_key, version, during_trace=True): + device = to_device(tensor_key.device) + addr = tensor_key.storage.ptr + + seg = snapshot['segments'][device] # type: ignore[index] + if seg['address'] is None or seg['address'] > addr: + seg['address'] = addr + seg['total_size'] = max(seg['total_size'], addr + size) # record max addr for now, we will make it the size later + category = memory_profile._categories.get(tensor_key, version) + category = category.name.lower() if category is not None else "unknown" + stack = allocation_stacks.get(tensor_key, ()) + stack = [{'filename': 'none', 'line': 0, 'name': p.name} for p in stack] + r = {'action': 'alloc', 'addr': addr, 'size': size, 'stream': 0, 'frames': stack, 'category': category} + if during_trace: + snapshot['device_traces'][device].append(r) # type: ignore[index] + return r + + def free(alloc, device): + for e in ('free_requested', 'free_completed'): + snapshot['device_traces'][device].append({'action': e, # type: ignore[index] + 'addr': alloc['addr'], + 'size': alloc['size'], + 'stream': 0, + 'frames': alloc['frames']}) + + kv_to_elem = {} + + + + # create the device trace + for time, action, (tensor_key, version), size in memory_profile.timeline: + if not isinstance(tensor_key, TensorKey): + continue + if action == Action.CREATE: + kv_to_elem[(tensor_key, version)] = allocate(size, tensor_key, version) + elif action == Action.DESTROY: + free(kv_to_elem.pop((tensor_key, version)), to_device(tensor_key.device)) + elif action == Action.INCREMENT_VERSION: + free(kv_to_elem.pop((tensor_key, version)), to_device(tensor_key.device)) + kv_to_elem[(tensor_key, version + 1)] = allocate(size, tensor_key, version + 1) + elif action == Action.PREEXISTING: + kv_to_elem[(tensor_key, version)] = allocate(size, tensor_key, version, during_trace=False) + + + # create the final snapshot state + blocks_at_end = [(to_device(tensor_key.device), event['addr'], event['size'], event['frames']) + for (tensor_key, version), event in kv_to_elem.items()] + for device, blocks in groupby(sorted(blocks_at_end), key=lambda x: x[0]): + seg = snapshot['segments'][device] # type: ignore[index] + last_addr = seg['address'] + for _, addr, size, frames in blocks: + if last_addr < addr: + seg['blocks'].append({'size': addr - last_addr, 'state': 'inactive'}) + seg['blocks'].append({'size': size, 'state': 'active_allocated', 'requested_size': size, 'frames': frames}) + last_addr = addr + size + if last_addr < seg['total_size']: + seg['blocks'].append({'size': seg['total_size'] - last_addr, 'state': 'inactive'}) + + snapshot['segments'] = [seg for seg in snapshot['segments'] if seg['blocks']] # type: ignore[attr-defined] + for seg in snapshot['segments']: # type: ignore[attr-defined, name-defined, no-redef] + seg['total_size'] -= seg['address'] + if not seg['blocks']: + seg['blocks'].append({'size': seg['total_size'], 'state': 'inactive'}) + + return snapshot + +def profile_plot(profile, device=None): + """Generate a visualization over time of the memory usage recorded by kineto memory profiling as an html file. + + Args: + profile: profile as generated by `torch.profiler.profile(profile_memory=True)` + device (torch.device, optional): Generate the trace for this device, needed if multiple devices have allocations. + + Returns: + str: HTML of visualization + """ + snapshot = _profile_to_snapshot(profile) + return _format_viz(snapshot, 'Active Memory Timeline', device) + + +def segment_plot(data: Any, device=None): + return _format_viz(data, 'Allocator State History', device) + +if __name__ == "__main__": + import os.path + thedir = os.path.realpath(os.path.dirname(__file__)) + if thedir in sys.path: + # otherwise we find cuda/random.py as random... + sys.path.remove(thedir) + import argparse + + fn_name = 'torch.cuda.memory._snapshot()' + pickled = f'pickled memory statistics from {fn_name}' + parser = argparse.ArgumentParser(description=f'Visualize memory dumps produced by {fn_name}') + + subparsers = parser.add_subparsers(dest='action') + + def _output(p): + p.add_argument('-o', '--output', default='output.svg', help='flamegraph svg (default: output.svg)') + + description = 'Prints overall allocation statistics and a visualization of how the allocators segments are currently filled.' + stats_a = subparsers.add_parser('stats', description=description) + stats_a.add_argument('input', help=pickled) + + description = 'Prints buffer of the most recent allocation events embedded in the snapshot in a Pythonic style.' + trace_a = subparsers.add_parser('trace', description=description) + trace_a.add_argument('input', help=pickled) + + description = 'Generate a flamegraph that visualizes what memory is stored in each allocator segment (aka block)' + segments_a = subparsers.add_parser('segments', description=description) + segments_a.add_argument('input', help=pickled) + _output(segments_a) + + description = "Generate a flamegraph the program locations contributing to CUDA memory usage." + memory_a = subparsers.add_parser('memory', description=description) + memory_a.add_argument('input', help=pickled) + _output(memory_a) + + description = 'Generate a flamegraph that shows segments (aka blocks) that have been added ' \ + 'or removed between two different memorys snapshots.' + compare_a = subparsers.add_parser('compare', description=description) + compare_a.add_argument('before', help=pickled) + compare_a.add_argument('after', help=pickled) + _output(compare_a) + + plots = ( + ("trace_plot", "Generate a visualization over time of the memory usage recorded by the trace as an html file."), + ("segment_plot", "Visualize how allocations are packed into allocator segments at each point in a trace as an html file.") + ) + for cmd, description in plots: + trace_plot_a = subparsers.add_parser(cmd, description=description) + trace_plot_a.add_argument('input', help=pickled) + help = 'visualize trace from this device (default: chooses the only device with trace info or errors)' + trace_plot_a.add_argument('-d', '--device', type=int, default=None, help=help) + help = 'path to save the visualization(default: output.html)' + trace_plot_a.add_argument('-o', '--output', default='output.html', help=help) + if cmd == "trace_plot": + help = 'visualize change to segments rather than individual allocations' + trace_plot_a.add_argument('-s', '--segments', action='store_true', help=help) + + + args = parser.parse_args() + + def _read(name): + if name == '-': + f = sys.stdin.buffer + else: + f = open(name, 'rb') + data = pickle.load(f) + if isinstance(data, list): # segments only... + data = {'segments': data, 'traces': []} + return data + + def _write(name, data): + with open(name, 'w') as f: + f.write(data) + + if args.action == 'segments': + data = _read(args.input) + _write(args.output, segments(data)) + elif args.action == 'memory': + data = _read(args.input) + _write(args.output, memory(data)) + elif args.action == 'stats': + data = _read(args.input) + print(segsum(data)) + elif args.action == 'trace': + data = _read(args.input) + print(trace(data)) + elif args.action == 'compare': + before = _read(args.before) + after = _read(args.after) + _write(args.output, compare(before, after)) + elif args.action == 'trace_plot': + data = _read(args.input) + _write(args.output, trace_plot(data, device=args.device, plot_segments=args.segments)) + elif args.action == 'segment_plot': + data = _read(args.input) + _write(args.output, segment_plot(data, device=args.device)) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/_sanitizer.py b/env-llmeval/lib/python3.10/site-packages/torch/cuda/_sanitizer.py new file mode 100644 index 0000000000000000000000000000000000000000..c0b0297366db73b31bd15a5ba7b30d86164b3f31 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/cuda/_sanitizer.py @@ -0,0 +1,622 @@ +r""" +This module introduces CUDA Sanitizer, a tool for detecting synchronization errors between kernels ran on different streams. + +It stores information on accesses to tensors to determine if they are synchronized +or not. When enabled in a python program and a possible data race is detected, a +detailed warning will be printed and the program will exit. + +It can be enabled either by importing this module and calling +:func:`enable_cuda_sanitizer()` or by exporting the ``TORCH_CUDA_SANITIZER`` +environment variable. +""" + +import enum +import functools +import inspect +import io +import logging +import sys +import textwrap +import traceback +from dataclasses import dataclass, field +from typing import Any, Dict, Iterator, List, Optional, Set, Tuple, TypeVar + +import torch +import torch.utils._cuda_trace as cuda_trace +from torch.utils import _pytree as pytree +from torch.utils._python_dispatch import TorchDispatchMode + + +DEFAULT_STREAM_ID = 0 + +TK = TypeVar("TK") +TVa = TypeVar("TVa") +TVb = TypeVar("TVb") + +DataPtr = int +StreamId = int +EventId = int +SeqNum = int + +logger = logging.getLogger(__name__) + + +class AccessType(enum.Enum): + READ = enum.auto() + WRITE = enum.auto() + + def __str__(self): + return "reading from" if self is AccessType.READ else "writing to" + + +@dataclass +class Access: + r"""Stores information about a single access to a tensor by a kernel. + + Args: + type: either AccessType.READ or AccessType.Write. + seq_num: the sequential number of the kernel performing the access. + stream: the stream id of the stream executing the kernel. + operator: the schema of the launched kernel, which lists the + arguments and return type. + aliases: the arguments in the schema this access corresponds to. + is_output: Whether the tensor was an output of the kernel. + stack_trace: the stack summary object captured during access. + """ + + type: AccessType + seq_num: SeqNum + stream: StreamId + operator: str + aliases: List[str] + is_output: bool + stack_trace: traceback.StackSummary + + +class SynchronizationError(Exception): + """Base class for errors detected by CUDA Sanitizer.""" + + pass + + +class UnsynchronizedAccessError(SynchronizationError): + """Stores information about two unsynchronized accesses to one data pointer.""" + + def __init__( + self, + data_ptr: DataPtr, + allocation_stack_trace: Optional[traceback.StackSummary], + current_access: Access, + previous_access: Access, + ): + self.data_ptr = data_ptr + self.allocation_stack_trace = allocation_stack_trace + self.current_access = current_access + self.previous_access = previous_access + + def __str__(self): + def format_access(access: Access): + message.write(f"{access.operator}\n{access.type}") + if access.aliases: + message.write(" argument(s) " + ", ".join(access.aliases)) + if access.is_output: + message.write(", and to") + if access.is_output: + message.write(" the output") + message.write( + f"\nWith stack trace:\n{''.join(access.stack_trace.format())}\n" + ) + + with io.StringIO() as message: + message.write( + textwrap.dedent( + f"""\ + ============================ + CSAN detected a possible data race on tensor with data pointer {self.data_ptr} + Access by stream {self.current_access.stream} during kernel: + """ + ) + ) + format_access(self.current_access) + + message.write( + f"Previous access by stream {self.previous_access.stream} during kernel:\n" + ) + format_access(self.previous_access) + + if self.allocation_stack_trace: + message.write( + "Tensor was allocated with stack trace:\n" + f"{''.join(self.allocation_stack_trace.format())}" + ) + else: + message.write("Trace for tensor allocation not found.") + return message.getvalue() + + +class CUDASanitizerErrors(Exception): + """Wrapper class for errors reported by CUDA Sanitizer.""" + + def __init__(self, errors: List[SynchronizationError]): + self.errors = errors + + def __str__(self): + return f"detected {len(self.errors)} errors" + + +@dataclass +class TensorInfo: + r"""Stores information about a single tensor and recent accesses to it. + + Args: + allocation_stack_trace: the stack summary object captured during tensor + allocation. Can be ``None`` if the allocation wasn't caught by CSAN. + reads: list of read accesses to the tensor that were performed since + the last write. + write: the last write access to the tensor. + """ + + allocation_stack_trace: Optional[traceback.StackSummary] + reads: List[Access] = field(default_factory=list) + write: Optional[Access] = None + + +class _TensorsAccessed: + def __init__(self): + self.accesses: Dict[DataPtr, TensorInfo] = {} + + def ensure_tensor_exists(self, data_ptr: DataPtr) -> None: + if data_ptr not in self.accesses: + logger.info( + "Found tensor with pointer: %s, but no matching tensor " + "allocation in the trace. Backfilling the trace now. " + "Perhaps the sanitizer was enabled after some torch operations?", + data_ptr, + ) + self.create_tensor(data_ptr, None) + + def ensure_tensor_does_not_exist(self, data_ptr: DataPtr) -> None: + if data_ptr in self.accesses: + logger.info( + "Found duplicate tensor allocation in the trace for tensor with " + "pointer: %s. Assuming the trace for tensor deallocation " + "wasn't caught and backfilling it now. " + "Perhaps the sanitizer was enabled after some torch operations?", + data_ptr, + ) + self.delete_tensor(data_ptr) + + def create_tensor( + self, data_ptr: DataPtr, stack_trace: Optional[traceback.StackSummary] + ) -> None: + self.accesses[data_ptr] = TensorInfo(stack_trace) + + def delete_tensor(self, data_ptr: DataPtr) -> None: + del self.accesses[data_ptr] + + def were_there_reads_since_last_write(self, data_ptr: DataPtr) -> bool: + return True if self.accesses[data_ptr].reads else False + + def get_allocation_stack_trace( + self, data_ptr: DataPtr + ) -> Optional[traceback.StackSummary]: + return self.accesses[data_ptr].allocation_stack_trace + + def get_write(self, data_ptr: DataPtr) -> Optional[Access]: + return self.accesses[data_ptr].write + + def get_reads(self, data_ptr: DataPtr) -> List[Access]: + return self.accesses[data_ptr].reads + + def add_read(self, data_ptr: DataPtr, access: Access) -> None: + self.accesses[data_ptr].reads.append(access) + + def set_write(self, data_ptr: DataPtr, access: Access) -> None: + self.accesses[data_ptr].write = access + self.accesses[data_ptr].reads = [] + + +class StreamSynchronizations: + def __init__(self): + self.current_sync_states: Dict[StreamId, Dict[StreamId, SeqNum]] = {} + self.recorded_sync_states: Dict[EventId, Dict[StreamId, SeqNum]] = {} + self.host_sync_state: Dict[StreamId, SeqNum] = {} + self.create_stream(DEFAULT_STREAM_ID) + + def _ensure_stream_exists(self, stream: StreamId) -> None: + if stream not in self.current_sync_states: + logger.info( + "Found Stream with id: %s, but no matching stream " + "creation in the trace. Backfilling the trace now. " + "Perhaps the sanitizer was enabled after some torch operations?", + stream, + ) + self.create_stream(stream) + + def _ensure_event_exists(self, event: EventId) -> None: + if event not in self.recorded_sync_states: + logger.info( + "Found Event with id: %s, but no matching event " + "creation in the trace. Backfilling the trace now. " + "Perhaps the sanitizer was enabled after some torch operations?", + event, + ) + self.create_event(event) + + def _ensure_event_does_not_exist(self, event: EventId) -> None: + if event in self.recorded_sync_states: + logger.info( + "Found duplicate event creation in the trace for event with " + "id: %s. Assuming the trace for event deletion wasn't caught " + "and backfilling it now. " + "Perhaps the sanitizer was enabled after some torch operations?", + event, + ) + self.delete_event(event) + + def create_stream(self, stream: StreamId) -> None: + if stream in self.current_sync_states: + logger.info( + "Found duplicate Stream creation in the trace for Stream with " + "id: %s. PyTorch Streams are only created once, so this " + "trace entry is ignored.", + stream, + ) + else: + self.host_sync_state[stream] = 0 + self.current_sync_states[stream] = self.host_sync_state.copy() + + def create_event(self, event: EventId) -> None: + self._ensure_event_does_not_exist(event) + self.recorded_sync_states[event] = {} + + def delete_event(self, event: EventId) -> None: + self._ensure_event_exists(event) + del self.recorded_sync_states[event] + + def update_seq_num(self, stream: StreamId, seq_num: SeqNum) -> None: + self._ensure_stream_exists(stream) + self.current_sync_states[stream][stream] = seq_num + + def record_state(self, event: EventId, stream: StreamId) -> None: + self._ensure_event_exists(event) + self._ensure_stream_exists(stream) + self.recorded_sync_states[event] = self.current_sync_states[stream].copy() + + def _state_wait_for_other( + self, state: Dict[StreamId, SeqNum], other: Dict[StreamId, SeqNum] + ) -> None: + for stream, seq_num in other.items(): + state[stream] = max(state.get(stream, -1), seq_num) + + def stream_wait_for_event(self, stream: StreamId, event: EventId) -> None: + self._ensure_stream_exists(stream) + self._ensure_event_exists(event) + self._state_wait_for_other( + self.current_sync_states[stream], self.recorded_sync_states[event] + ) + + def all_streams_wait_for_event(self, event: EventId) -> None: + self._ensure_event_exists(event) + for stream in self.current_sync_states.keys(): + self.stream_wait_for_event(stream, event) + + self._state_wait_for_other( + self.host_sync_state, self.recorded_sync_states[event] + ) + + def all_streams_wait_for_stream(self, stream: StreamId) -> None: + self._ensure_stream_exists(stream) + for state in self.current_sync_states.values(): + self._state_wait_for_other(state, self.current_sync_states[stream]) + + self._state_wait_for_other( + self.host_sync_state, self.current_sync_states[stream] + ) + + def sync_all_streams(self) -> None: + for stream, state in self.current_sync_states.items(): + self.host_sync_state[stream] = state[stream] + + for state in self.current_sync_states.values(): + self._state_wait_for_other(state, self.host_sync_state) + + def is_ordered_after( + self, current_stream: StreamId, seq_num: SeqNum, other_stream: StreamId + ) -> bool: + self._ensure_stream_exists(current_stream) + self._ensure_stream_exists(other_stream) + return seq_num <= self.current_sync_states[current_stream].get(other_stream, -1) + + +class EventHandler: + """Analyzes CSAN trace for synchronization errors. + + Stores information on each stream's synchronizations with other streams as well + as tensor accesses to determine whether a given kernel launch might cause a + data race. + """ + + def __init__(self): + self.tensors_accessed = _TensorsAccessed() + self.syncs = StreamSynchronizations() + self.seq_num: SeqNum = 0 + + def _handle_kernel_launch( + self, + stream: StreamId, + read_only: Set[DataPtr], + read_write: Set[DataPtr], + outputs: Set[DataPtr], + operator: str, + tensor_aliases: Dict[int, List[str]], + ) -> List[SynchronizationError]: + def check_conflict( + data_ptr: DataPtr, current_access: Access, previous_access: Optional[Access] + ) -> None: + if previous_access is None: + return + if not self.syncs.is_ordered_after( + current_access.stream, previous_access.seq_num, previous_access.stream + ): + error_list.append( + UnsynchronizedAccessError( + data_ptr, + self.tensors_accessed.get_allocation_stack_trace(data_ptr), + current_access, + previous_access, + ) + ) + + error_list: List[SynchronizationError] = [] + self.seq_num += 1 + self.syncs.update_seq_num(stream, self.seq_num) + stack_trace = traceback.StackSummary.extract( + traceback.walk_stack(inspect.currentframe()), lookup_lines=False + ) + # The stack trace generated in this way is in the inverse order, so it must be + # reversed. + stack_trace.reverse() + + for data_ptr in read_only: + self.tensors_accessed.ensure_tensor_exists(data_ptr) + current_access = Access( + AccessType.READ, + self.seq_num, + stream, + operator, + tensor_aliases[data_ptr], + data_ptr in outputs, + stack_trace, + ) + check_conflict( + data_ptr, current_access, self.tensors_accessed.get_write(data_ptr) + ) + self.tensors_accessed.add_read(data_ptr, current_access) + + for data_ptr in read_write: + self.tensors_accessed.ensure_tensor_exists(data_ptr) + current_access = Access( + AccessType.WRITE, + self.seq_num, + stream, + operator, + tensor_aliases[data_ptr], + data_ptr in outputs, + stack_trace, + ) + if self.tensors_accessed.were_there_reads_since_last_write(data_ptr): + for previous_access in self.tensors_accessed.get_reads(data_ptr): + check_conflict(data_ptr, current_access, previous_access) + else: + check_conflict( + data_ptr, current_access, self.tensors_accessed.get_write(data_ptr) + ) + self.tensors_accessed.set_write(data_ptr, current_access) + + return error_list + + def _handle_event_creation(self, event: EventId) -> None: + self.syncs.create_event(event) + + def _handle_event_deletion(self, event: EventId) -> None: + self.syncs.delete_event(event) + + def _handle_event_record(self, event: EventId, stream: StreamId) -> None: + self.syncs.record_state(event, stream) + + def _handle_event_wait(self, event: EventId, stream: StreamId) -> None: + self.syncs.stream_wait_for_event(stream, event) + + def _handle_memory_allocation(self, data_ptr: DataPtr) -> None: + self.tensors_accessed.ensure_tensor_does_not_exist(data_ptr) + stack_trace = traceback.StackSummary.extract( + traceback.walk_stack(inspect.currentframe()), lookup_lines=False + ) + # The stack trace generated in this way is in the inverse order, so it must be + # reversed. + stack_trace.reverse() + self.tensors_accessed.create_tensor( + data_ptr, + stack_trace, + ) + + def _handle_memory_deallocation(self, data_ptr: DataPtr) -> None: + self.tensors_accessed.ensure_tensor_exists(data_ptr) + self.tensors_accessed.delete_tensor(data_ptr) + + def _handle_stream_creation(self, stream: StreamId) -> None: + self.syncs.create_stream(stream) + + def _handle_device_synchronization(self) -> None: + self.syncs.sync_all_streams() + + def _handle_stream_synchronization(self, stream: StreamId) -> None: + self.syncs.all_streams_wait_for_stream(stream) + + def _handle_event_synchronization(self, event: EventId) -> None: + self.syncs.all_streams_wait_for_event(event) + + +def zip_by_key(a: Dict[TK, TVa], b: Dict[TK, TVb]) -> Iterator[Tuple[TK, TVa, TVb]]: + for arg, value in a.items(): + if arg in b: + yield arg, value, b[arg] + + +def zip_arguments( + schema: torch.FunctionSchema, args: Tuple[Any, ...], kwargs: Dict[str, Any] +) -> Iterator[Tuple[torch.Argument, Any]]: + schema_args = schema.arguments[: len(args)] + schema_kwargs = {arg.name: arg for arg in schema.arguments[len(args) :]} + + yield from zip(schema_args, args) + + for _, argument, value in zip_by_key(schema_kwargs, kwargs): + yield (argument, value) + + +class ArgumentHandler: + def __init__(self): + self.dataptrs_read: Set[DataPtr] = set() + self.dataptrs_written: Set[DataPtr] = set() + self.tensor_aliases: Dict[DataPtr, List[str]] = dict() + self.outputs: Set[DataPtr] = set() + + def _handle_argument( + self, + value: Any, + is_write: bool, + name: Optional[str] = None, + is_output: bool = False, + ) -> None: + if isinstance(value, torch.Tensor) and value.is_cuda: + data_ptr = value.data_ptr() + if is_write: + self.dataptrs_written.add(data_ptr) + else: + self.dataptrs_read.add(data_ptr) + + self.tensor_aliases.setdefault(data_ptr, []) + if name is not None: + self.tensor_aliases[data_ptr].append(name) + if is_output: + self.outputs.add(data_ptr) + + def parse_inputs( + self, + schema: torch.FunctionSchema, + args: Tuple[Any, ...], + kwargs: Dict[str, Any], + ) -> None: + for argument, value in zip_arguments(schema, args, kwargs): + is_write = argument.alias_info is not None and argument.alias_info.is_write + pytree.tree_map_( + functools.partial( + self._handle_argument, is_write=is_write, name=argument.name + ), + value, + ) + + def parse_outputs(self, outputs: Any) -> None: + pytree.tree_map_( + functools.partial(self._handle_argument, is_write=True, is_output=True), + outputs, + ) + + +class CUDASanitizerDispatchMode(TorchDispatchMode): + def __init__(self): + self.event_handler = EventHandler() + torch._C._activate_cuda_trace() + cuda_trace.register_callback_for_cuda_event_creation( + self.event_handler._handle_event_creation + ) + cuda_trace.register_callback_for_cuda_event_deletion( + self.event_handler._handle_event_deletion + ) + cuda_trace.register_callback_for_cuda_event_record( + self.event_handler._handle_event_record + ) + cuda_trace.register_callback_for_cuda_event_wait( + self.event_handler._handle_event_wait + ) + cuda_trace.register_callback_for_cuda_memory_allocation( + self.event_handler._handle_memory_allocation + ) + cuda_trace.register_callback_for_cuda_memory_deallocation( + self.event_handler._handle_memory_deallocation + ) + cuda_trace.register_callback_for_cuda_stream_creation( + self.event_handler._handle_stream_creation + ) + cuda_trace.register_callback_for_cuda_device_synchronization( + self.event_handler._handle_device_synchronization + ) + cuda_trace.register_callback_for_cuda_stream_synchronization( + self.event_handler._handle_stream_synchronization + ) + cuda_trace.register_callback_for_cuda_event_synchronization( + self.event_handler._handle_event_synchronization + ) + + def __torch_dispatch__(self, func, types, args=(), kwargs=None): + if kwargs is None: + kwargs = {} + + argument_handler = ArgumentHandler() + argument_handler.parse_inputs(func._schema, args, kwargs) + + outputs = func(*args, **kwargs) + + argument_handler.parse_outputs(outputs) + errors = self.event_handler._handle_kernel_launch( + torch.cuda.current_stream().cuda_stream, + argument_handler.dataptrs_read - argument_handler.dataptrs_written, + argument_handler.dataptrs_written, + argument_handler.outputs, + func._schema, + argument_handler.tensor_aliases, + ) + if errors: + for error in errors: + print(error, file=sys.stderr) + raise CUDASanitizerErrors(errors) + + return outputs + + +class CUDASanitizer: + """Manages the lifetime of a CUDASanitizer dispatch mode object. + + The CUDASanitizer class wraps the entering/exiting functions of the dispatch mode + context manager in the enable function/destructor, respectively. This is to + explicitly set the lifetime of the dispatch mode object to that of the application. + This approach was deemed more elegant than using the atexit module. + """ + + def __init__(self): + self.dispatch = CUDASanitizerDispatchMode() + self.enabled = False + + def enable(self): + self.dispatch.__enter__() + self.enabled = True + + def __del__(self): + if self.enabled: + self.dispatch.__exit__(None, None, None) + + +def enable_cuda_sanitizer(): + """Enable CUDA Sanitizer. + + The sanitizer will begin to analyze low-level CUDA calls invoked by torch functions + for synchronization errors. All data races found will be printed to the standard + error output along with stack traces of suspected causes. For best results, the + sanitizer should be enabled at the very beginning of the program. + """ + cuda_sanitizer.enable() + + +cuda_sanitizer = CUDASanitizer() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/_utils.py b/env-llmeval/lib/python3.10/site-packages/torch/cuda/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1794ca9ddd1fd67c18c391312613ede4638c91bb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/cuda/_utils.py @@ -0,0 +1,54 @@ +from typing import Any + +import torch + +# The _get_device_index has been moved to torch.utils._get_device_index +from torch._utils import _get_device_index as _torch_get_device_index + + +def _get_device_index( + device: Any, optional: bool = False, allow_cpu: bool = False +) -> int: + r"""Get the device index from :attr:`device`, which can be a torch.device object, a Python integer, or ``None``. + + If :attr:`device` is a torch.device object, returns the device index if it + is a CUDA device. Note that for a CUDA device without a specified index, + i.e., ``torch.device('cuda')``, this will return the current default CUDA + device if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``, + CPU devices will be accepted and ``-1`` will be returned in this case. + + If :attr:`device` is a Python integer, it is returned as is. + + If :attr:`device` is ``None``, this will return the current default CUDA + device if :attr:`optional` is ``True``. + """ + if isinstance(device, int): + return device + if isinstance(device, str): + device = torch.device(device) + if isinstance(device, torch.device): + if allow_cpu: + if device.type not in ["cuda", "cpu"]: + raise ValueError(f"Expected a cuda or cpu device, but got: {device}") + elif device.type != "cuda": + raise ValueError(f"Expected a cuda device, but got: {device}") + if not torch.jit.is_scripting(): + if isinstance(device, torch.cuda.device): + return device.idx + return _torch_get_device_index(device, optional, allow_cpu) + + +def _dummy_type(name: str) -> type: + def get_err_fn(is_init: bool): + def err_fn(obj, *args, **kwargs): + if is_init: + class_name = obj.__class__.__name__ + else: + class_name = obj.__name__ + raise RuntimeError(f"Tried to instantiate dummy base class {class_name}") + + return err_fn + + return type( + name, (object,), {"__init__": get_err_fn(True), "__new__": get_err_fn(False)} + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..867637eed07097d1e795e502c4eb460abaf1ae24 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__init__.py @@ -0,0 +1,9 @@ +from .autocast_mode import autocast, custom_bwd, custom_fwd +from .grad_scaler import GradScaler + +__all__ = [ + "autocast", + "custom_bwd", + "custom_fwd", + "GradScaler", +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..699ec8c997cf9765cebf09e92bf89efc1f8f2132 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/autocast_mode.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/autocast_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a54e0e8abdb19d13e26330366acea4e5a3142751 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/autocast_mode.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb620efc23c3c1d628cd4a4951090275bb9d9ff8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/grad_scaler.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/grad_scaler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6bea28db51f785a00ef07dca6379211886cc7552 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/__pycache__/grad_scaler.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/autocast_mode.py b/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/autocast_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..88ff04d86648806a21b180ae79e6a58bf5b22685 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/autocast_mode.py @@ -0,0 +1,144 @@ +import collections +import functools + +import torch + +try: + import numpy as np + + HAS_NUMPY = True +except ModuleNotFoundError: + np = None # type: ignore[assignment] +from typing import Any + +__all__ = ["autocast", "custom_fwd", "custom_bwd"] + + +class autocast(torch.amp.autocast_mode.autocast): + r"""See :class:`torch.autocast`. + + ``torch.cuda.amp.autocast(args...)`` is equivalent to ``torch.autocast("cuda", args...)`` + """ + + def __init__( + self, + enabled: bool = True, + dtype: torch.dtype = torch.float16, + cache_enabled: bool = True, + ): + if torch._jit_internal.is_scripting(): + self._enabled = enabled + self.device = "cuda" + self.fast_dtype = dtype + return + super().__init__( + "cuda", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled + ) + + def __enter__(self): + if torch._jit_internal.is_scripting(): + return self + return super().__enter__() + + # TODO: discuss a unified TorchScript-friendly API for autocast + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override] + if torch._jit_internal.is_scripting(): + return + return super().__exit__(exc_type, exc_val, exc_tb) + + def __call__(self, func): + if torch._jit_internal.is_scripting(): + return func + return super().__call__(func) + + +# Casts Tensors and containers of Tensors. Special-cases passthroughs for strings and np.ndarrays, which +# may be falsely detected as "Iterables." +def _cast(value, dtype): + if isinstance(value, torch.Tensor): + is_eligible = ( + value.is_floating_point() + and value.is_cuda + and (value.dtype is not torch.float64) + ) + return value.to(dtype) if is_eligible else value + elif isinstance(value, (str, bytes)): + return value + elif HAS_NUMPY and isinstance(value, np.ndarray): + return value + elif isinstance(value, collections.abc.Mapping): + return {_cast(k, dtype): _cast(v, dtype) for k, v in value.items()} + elif isinstance(value, collections.abc.Iterable): + iterable = (_cast(v, dtype) for v in value) + if isinstance(value, (list, tuple)): + return type(value)(iterable) + else: + return iterable + else: + return value + + +# custom_fwd is a decorator that may or may not be used with arguments, following +# https://github.com/dabeaz/python-cookbook/tree/master/src/9/defining_a_decorator_that_takes_an_optional_argument. +# this works: +# @custom_fwd +# def forward(...): +# this also works: +# @custom_fwd(cast_inputs=torch.float) +# def forward(...): +def custom_fwd(fwd=None, *, cast_inputs=None): + """ + Create a helper decorator for ``forward`` methods of custom autograd functions. + + Autograd functions are subclasses of :class:`torch.autograd.Function`. + See the :ref:`example page` for more detail. + + Args: + cast_inputs (:class:`torch.dtype` or None, optional, default=None): If not ``None``, + when ``forward`` runs in an autocast-enabled region, casts incoming + floating-point CUDA Tensors to the target dtype (non-floating-point Tensors are not affected), + then executes ``forward`` with autocast disabled. + If ``None``, ``forward``'s internal ops execute with the current autocast state. + + .. note:: + If the decorated ``forward`` is called outside an autocast-enabled region, + :func:`custom_fwd` is a no-op and ``cast_inputs`` has no effect. + """ + if fwd is None: + return functools.partial(custom_fwd, cast_inputs=cast_inputs) + + @functools.wraps(fwd) + def decorate_fwd(*args, **kwargs): + args[0]._dtype = torch.get_autocast_gpu_dtype() + if cast_inputs is None: + args[0]._fwd_used_autocast = torch.is_autocast_enabled() + return fwd(*args, **kwargs) + else: + autocast_context = torch.is_autocast_enabled() + args[0]._fwd_used_autocast = False + if autocast_context: + with autocast(enabled=False): + return fwd(*_cast(args, cast_inputs), **_cast(kwargs, cast_inputs)) + else: + return fwd(*args, **kwargs) + + return decorate_fwd + + +# Autograd ensures incoming gradients are the same type as forward outputs. Allowing a separate +# cast_inputs argument on custom_bwd is unnecessary and could cause errors if it doesn't match +# cast_inputs supplied to custom_fwd. +def custom_bwd(bwd): + """Create a helper decorator for backward methods of custom autograd functions. + + Autograd functions are subclasses of :class:`torch.autograd.Function`. + Ensures that ``backward`` executes with the same autocast state as ``forward``. + See the :ref:`example page` for more detail. + """ + + @functools.wraps(bwd) + def decorate_bwd(*args, **kwargs): + with autocast(enabled=args[0]._fwd_used_autocast, dtype=args[0]._dtype): + return bwd(*args, **kwargs) + + return decorate_bwd diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/common.py b/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/common.py new file mode 100644 index 0000000000000000000000000000000000000000..c4e8c1cc99b00d63672e12f2908a82c899076306 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/common.py @@ -0,0 +1,9 @@ +from importlib.util import find_spec + +import torch + +__all__ = ["amp_definitely_not_available"] + + +def amp_definitely_not_available(): + return not (torch.cuda.is_available() or find_spec("torch_xla")) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/grad_scaler.py b/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/grad_scaler.py new file mode 100644 index 0000000000000000000000000000000000000000..704ffa8d85100ff4f0287166b1d18caeeb8e2dce --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/cuda/amp/grad_scaler.py @@ -0,0 +1,679 @@ +from __future__ import annotations + +import inspect +import warnings +from collections import abc, defaultdict +from enum import Enum +from typing import Any, cast, Dict, Iterable, List, Optional, overload, Tuple, Union + +import torch +from .common import amp_definitely_not_available + + +__all__ = ["OptState", "GradScaler"] + + +class _MultiDeviceReplicator: + """Lazily serves copies of a tensor to requested devices. + + Copies are cached per-device. + """ + + def __init__(self, master_tensor: torch.Tensor) -> None: + assert master_tensor.is_cuda or master_tensor.device.type == "xla" + self.master = master_tensor + self._per_device_tensors: Dict[torch.device, torch.Tensor] = {} + + def get(self, device: torch.device) -> torch.Tensor: + retval = self._per_device_tensors.get(device, None) + if retval is None: + retval = self.master.to(device=device, non_blocking=True, copy=True) + self._per_device_tensors[device] = retval + return retval + + +# Defines default_factory for GradScaler's _per_optimizer_states defaultdict, +# as well as associated "enum" values. Prefers defining these at top level because +# - Lambdas can't be pickled, so we don't want to supply a lambda as the factory. +# - Defining READY, UNSCALED, STEPPED and _refresh_per_optimizer_state within GradScaler +# causes a circular reference, which we'd rather avoid. +class OptState(Enum): + READY = 0 + UNSCALED = 1 + STEPPED = 2 + + +def _refresh_per_optimizer_state() -> Dict[str, Any]: + return {"stage": OptState.READY, "found_inf_per_device": {}} + + +class GradScaler: + """An instance ``scaler`` of :class:`GradScaler`. + + Helps perform the steps of gradient scaling + conveniently. + + * ``scaler.scale(loss)`` multiplies a given loss by ``scaler``'s current scale factor. + * ``scaler.step(optimizer)`` safely unscales gradients and calls ``optimizer.step()``. + * ``scaler.update()`` updates ``scaler``'s scale factor. + + Example:: + + # Creates a GradScaler once at the beginning of training. + scaler = GradScaler() + + for epoch in epochs: + for input, target in data: + optimizer.zero_grad() + output = model(input) + loss = loss_fn(output, target) + + # Scales loss. Calls backward() on scaled loss to create scaled gradients. + scaler.scale(loss).backward() + + # scaler.step() first unscales gradients of the optimizer's params. + # If gradients don't contain infs/NaNs, optimizer.step() is then called, + # otherwise, optimizer.step() is skipped. + scaler.step(optimizer) + + # Updates the scale for next iteration. + scaler.update() + + See the :ref:`Automatic Mixed Precision examples` for usage + (along with autocasting) in more complex cases like gradient clipping, gradient accumulation, gradient penalty, + and multiple losses/optimizers. + + ``scaler`` dynamically estimates the scale factor each iteration. To minimize gradient underflow, + a large scale factor should be used. However, ``float16`` values can "overflow" (become inf or NaN) if + the scale factor is too large. Therefore, the optimal scale factor is the largest factor that can be used + without incurring inf or NaN gradient values. + ``scaler`` approximates the optimal scale factor over time by checking the gradients for infs and NaNs during every + ``scaler.step(optimizer)`` (or optional separate ``scaler.unscale_(optimizer)``, see :meth:`unscale_`). + + * If infs/NaNs are found, ``scaler.step(optimizer)`` skips the underlying ``optimizer.step()`` (so the params + themselves remain uncorrupted) and ``update()`` multiplies the scale by ``backoff_factor``. + + * If no infs/NaNs are found, ``scaler.step(optimizer)`` runs the underlying ``optimizer.step()`` as usual. + If ``growth_interval`` unskipped iterations occur consecutively, ``update()`` multiplies the scale by + ``growth_factor``. + + The scale factor often causes infs/NaNs to appear in gradients for the first few iterations as its + value calibrates. ``scaler.step`` will skip the underlying ``optimizer.step()`` for these + iterations. After that, step skipping should occur rarely (once every few hundred or thousand iterations). + + Args: + init_scale (float, optional, default=2.**16): Initial scale factor. + growth_factor (float, optional, default=2.0): Factor by which the scale is multiplied during + :meth:`update` if no inf/NaN gradients occur for ``growth_interval`` consecutive iterations. + backoff_factor (float, optional, default=0.5): Factor by which the scale is multiplied during + :meth:`update` if inf/NaN gradients occur in an iteration. + growth_interval (int, optional, default=2000): Number of consecutive iterations without inf/NaN gradients + that must occur for the scale to be multiplied by ``growth_factor``. + enabled (bool, optional): If ``False``, disables gradient scaling. :meth:`step` simply + invokes the underlying ``optimizer.step()``, and other methods become no-ops. + Default: ``True`` + """ + + def __init__( + self, + init_scale: float = 2.0**16, + growth_factor: float = 2.0, + backoff_factor: float = 0.5, + growth_interval: int = 2000, + enabled: bool = True, + ) -> None: + if enabled and amp_definitely_not_available(): + warnings.warn( + "torch.cuda.amp.GradScaler is enabled, but CUDA is not available. Disabling." + ) + self._enabled = False + else: + self._enabled = enabled + + if self._enabled: + assert growth_factor > 1.0, "The growth factor must be > 1.0." + assert backoff_factor < 1.0, "The backoff factor must be < 1.0." + + self._init_scale = init_scale + # self._scale will be lazily initialized during the first call to scale() + self._scale: Optional[torch.Tensor] = None + self._growth_factor = growth_factor + self._backoff_factor = backoff_factor + self._growth_interval = growth_interval + self._init_growth_tracker = 0 + # self._growth_tracker will be lazily initialized during the first call to scale() + self._growth_tracker: Optional[torch.Tensor] = None + self._per_optimizer_states: Dict[int, Dict[str, Any]] = defaultdict( + _refresh_per_optimizer_state + ) + + def _check_scale_growth_tracker( + self, funcname: str + ) -> Tuple[torch.Tensor, torch.Tensor]: + fix = "This may indicate your script did not use scaler.scale(loss or outputs) earlier in the iteration." + assert self._scale is not None, ( + f"Attempted {funcname} but _scale is None. " + fix + ) + assert self._growth_tracker is not None, ( + f"Attempted {funcname} but _growth_tracker is None. " + fix + ) + return (self._scale, self._growth_tracker) + + def _lazy_init_scale_growth_tracker(self, dev: torch.device) -> None: + assert self._growth_tracker is None, "_growth_tracker initialized before _scale" + self._scale = torch.full((), self._init_scale, dtype=torch.float32, device=dev) + self._growth_tracker = torch.full( + (), self._init_growth_tracker, dtype=torch.int32, device=dev + ) + + @overload + def scale(self, outputs: torch.Tensor) -> torch.Tensor: + ... + + @overload + def scale(self, outputs: List[torch.Tensor]) -> List[torch.Tensor]: + ... + + @overload + def scale(self, outputs: Tuple[torch.Tensor, ...]) -> Tuple[torch.Tensor, ...]: + ... + + @overload + def scale(self, outputs: Iterable[torch.Tensor]) -> Iterable[torch.Tensor]: + ... + + def scale( + self, + outputs: Union[torch.Tensor, Iterable[torch.Tensor]], + ) -> Union[torch.Tensor, Iterable[torch.Tensor]]: + """ + Multiplies ('scales') a tensor or list of tensors by the scale factor. + + Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned + unmodified. + + Args: + outputs (Tensor or iterable of Tensors): Outputs to scale. + """ + if not self._enabled: + return outputs + + # Short-circuit for the common case. + if isinstance(outputs, torch.Tensor): + assert outputs.is_cuda or outputs.device.type == "xla" + if self._scale is None: + self._lazy_init_scale_growth_tracker(outputs.device) + assert self._scale is not None + return outputs * self._scale.to(device=outputs.device, non_blocking=True) + + # Invoke the more complex machinery only if we're treating multiple outputs. + stash: List[ + _MultiDeviceReplicator + ] = [] # holds a reference that can be overwritten by apply_scale + + def apply_scale(val: Union[torch.Tensor, Iterable[torch.Tensor]]): + if isinstance(val, torch.Tensor): + assert val.is_cuda or val.device.type == "xla" + if len(stash) == 0: + if self._scale is None: + self._lazy_init_scale_growth_tracker(val.device) + assert self._scale is not None + stash.append(_MultiDeviceReplicator(self._scale)) + return val * stash[0].get(val.device) + if isinstance(val, abc.Iterable): + iterable = map(apply_scale, val) + if isinstance(val, (list, tuple)): + return type(val)(iterable) + return iterable + raise ValueError("outputs must be a Tensor or an iterable of Tensors") + + return apply_scale(outputs) + + def _unscale_grads_( + self, + optimizer: torch.optim.Optimizer, + inv_scale: torch.Tensor, + found_inf: torch.Tensor, + allow_fp16: bool, + ) -> Dict[torch.device, torch.Tensor]: + per_device_inv_scale = _MultiDeviceReplicator(inv_scale) + per_device_found_inf = _MultiDeviceReplicator(found_inf) + + # To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype. + # There could be hundreds of grads, so we'd like to iterate through them just once. + # However, we don't know their devices or dtypes in advance. + + # https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict + # Google says mypy struggles with defaultdicts type annotations. + per_device_and_dtype_grads: Dict[ + torch.device, Dict[torch.dtype, List[torch.Tensor]] + ] = defaultdict(lambda: defaultdict(list)) + with torch.no_grad(): + for group in optimizer.param_groups: + for param in group["params"]: + assert isinstance(param, torch.Tensor) + if param.grad is None: + continue + if (not allow_fp16) and param.grad.dtype == torch.float16: + raise ValueError("Attempting to unscale FP16 gradients.") + if param.grad.is_sparse: + # is_coalesced() == False means the sparse grad has values with duplicate indices. + # coalesce() deduplicates indices and adds all values that have the same index. + # For scaled fp16 values, there's a good chance coalescing will cause overflow, + # so we should check the coalesced _values(). + if param.grad.dtype is torch.float16: + param.grad = param.grad.coalesce() + to_unscale = param.grad._values() + else: + to_unscale = param.grad + + # TODO: is there a way to split by device and dtype without appending in the inner loop? + per_device_and_dtype_grads[to_unscale.device][ + to_unscale.dtype + ].append(to_unscale) + + for device, per_dtype_grads in per_device_and_dtype_grads.items(): + for grads in per_dtype_grads.values(): + torch._amp_foreach_non_finite_check_and_unscale_( + grads, + per_device_found_inf.get(device), + per_device_inv_scale.get(device), + ) + + return per_device_found_inf._per_device_tensors + + def unscale_(self, optimizer: torch.optim.Optimizer) -> None: + """ + Divides ("unscales") the optimizer's gradient tensors by the scale factor. + + :meth:`unscale_` is optional, serving cases where you need to + :ref:`modify or inspect gradients` + between the backward pass(es) and :meth:`step`. + If :meth:`unscale_` is not called explicitly, gradients will be unscaled automatically during :meth:`step`. + + Simple example, using :meth:`unscale_` to enable clipping of unscaled gradients:: + + ... + scaler.scale(loss).backward() + scaler.unscale_(optimizer) + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm) + scaler.step(optimizer) + scaler.update() + + Args: + optimizer (torch.optim.Optimizer): Optimizer that owns the gradients to be unscaled. + + .. note:: + :meth:`unscale_` does not incur a CPU-GPU sync. + + .. warning:: + :meth:`unscale_` should only be called once per optimizer per :meth:`step` call, + and only after all gradients for that optimizer's assigned parameters have been accumulated. + Calling :meth:`unscale_` twice for a given optimizer between each :meth:`step` triggers a RuntimeError. + + .. warning:: + :meth:`unscale_` may unscale sparse gradients out of place, replacing the ``.grad`` attribute. + """ + if not self._enabled: + return + + self._check_scale_growth_tracker("unscale_") + + optimizer_state = self._per_optimizer_states[id(optimizer)] + + if optimizer_state["stage"] is OptState.UNSCALED: + raise RuntimeError( + "unscale_() has already been called on this optimizer since the last update()." + ) + elif optimizer_state["stage"] is OptState.STEPPED: + raise RuntimeError("unscale_() is being called after step().") + + # FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64. + assert self._scale is not None + inv_scale = self._scale.double().reciprocal().float() + found_inf = torch.full((), 0.0, dtype=torch.float32, device=self._scale.device) + + optimizer_state["found_inf_per_device"] = self._unscale_grads_( + optimizer, inv_scale, found_inf, False + ) + optimizer_state["stage"] = OptState.UNSCALED + + def _maybe_opt_step( + self, + optimizer: torch.optim.Optimizer, + optimizer_state: Dict[str, Any], + *args: Any, + **kwargs: Any, + ) -> Optional[float]: + retval: Optional[float] = None + if not sum(v.item() for v in optimizer_state["found_inf_per_device"].values()): + retval = optimizer.step(*args, **kwargs) + return retval + + def step( + self, optimizer: torch.optim.Optimizer, *args: Any, **kwargs: Any + ) -> Optional[float]: + """Invoke ``unscale_(optimizer)`` followed by parameter update, if gradients are not infs/NaN. + + :meth:`step` carries out the following two operations: + + 1. Internally invokes ``unscale_(optimizer)`` (unless :meth:`unscale_` was explicitly called for ``optimizer`` + earlier in the iteration). As part of the :meth:`unscale_`, gradients are checked for infs/NaNs. + 2. If no inf/NaN gradients are found, invokes ``optimizer.step()`` using the unscaled + gradients. Otherwise, ``optimizer.step()`` is skipped to avoid corrupting the params. + + ``*args`` and ``**kwargs`` are forwarded to ``optimizer.step()``. + + Returns the return value of ``optimizer.step(*args, **kwargs)``. + + Args: + optimizer (torch.optim.Optimizer): Optimizer that applies the gradients. + args: Any arguments. + kwargs: Any keyword arguments. + + .. warning:: + Closure use is not currently supported. + """ + if not self._enabled: + return optimizer.step(*args, **kwargs) + + if "closure" in kwargs: + raise RuntimeError( + "Closure use is not currently supported if GradScaler is enabled." + ) + + self._check_scale_growth_tracker("step") + + optimizer_state = self._per_optimizer_states[id(optimizer)] + + if optimizer_state["stage"] is OptState.STEPPED: + raise RuntimeError( + "step() has already been called since the last update()." + ) + + retval: Optional[float] = None + + if getattr(optimizer, "_step_supports_amp_scaling", False): + # This optimizer has customized scale-handling logic, so we can call optimizer.step() directly. + # The contract with custom optimizers is that their step() should accept an additional, + # optional grad_scaler kwarg. We append self to the kwargs so the custom optimizer has full information: + # it can query its own state, invoke unscale_ on itself, etc + # The contract above is being deprecated to avoid introducing `grad_scaler: GradScaler` argument + # to `Optimizer.step`. The new behavior is going to add two Tensor attributes of `grad_scale` + # and `found_inf` to the passed optimizer so that the optimizer can utilize those + # to skip the parameter updates or unscale gradients before updating parameters in + # the fused kernel, e.g. `FusedAdamMathFunctor`. + # In this behavior, `GradScaler._check_inf_per_device` is called if `OptState.READY`, + # while the method is expected to be called by users side, i.e. their optimizers. + kwargs_ = kwargs + has_grad_scaler_kwarg = ( + "grad_scaler" in inspect.signature(optimizer.step).parameters + ) + if has_grad_scaler_kwarg: + warnings.warn( + "GradScaler is going to stop passing itself as a keyword argument to the passed " + "optimizer. In the near future GradScaler registers `grad_scale: Tensor` and " + "`found_inf: Tensor` to the passed optimizer and let the optimizer use them directly.", + FutureWarning, + ) + kwargs_.update({"grad_scaler": self}) + else: + if optimizer_state["stage"] is OptState.READY: + self._check_inf_per_device(optimizer) + scaler = self._get_scale_async() + assert scaler is not None + found_inf = cast( + torch.Tensor, + sum( + [ + t.to(scaler.device, non_blocking=True) + for t in optimizer_state["found_inf_per_device"].values() + ] + ), + ) + optimizer.grad_scale = ( # type: ignore[attr-defined] + None if optimizer_state["stage"] == OptState.UNSCALED else scaler + ) + optimizer.found_inf = found_inf # type: ignore[attr-defined] + retval = optimizer.step(*args, **kwargs_) + optimizer_state["stage"] = OptState.STEPPED + if not has_grad_scaler_kwarg: + del optimizer.grad_scale # type: ignore[attr-defined] + del optimizer.found_inf # type: ignore[attr-defined] + return retval + + if optimizer_state["stage"] is OptState.READY: + self.unscale_(optimizer) + + assert ( + len(optimizer_state["found_inf_per_device"]) > 0 + ), "No inf checks were recorded for this optimizer." + + retval = self._maybe_opt_step(optimizer, optimizer_state, *args, **kwargs) + + optimizer_state["stage"] = OptState.STEPPED + + return retval + + def update(self, new_scale: Optional[Union[float, torch.Tensor]] = None) -> None: + """Update the scale factor. + + If any optimizer steps were skipped the scale is multiplied by ``backoff_factor`` + to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively, + the scale is multiplied by ``growth_factor`` to increase it. + + Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not + used directly, it's used to fill GradScaler's internal scale tensor. So if + ``new_scale`` was a tensor, later in-place changes to that tensor will not further + affect the scale GradScaler uses internally.) + + Args: + new_scale (float or :class:`torch.cuda.FloatTensor`, optional, default=None): New scale factor. + + .. warning:: + :meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has + been invoked for all optimizers used this iteration. + + .. warning:: + For performance reasons, we do not check the scale factor value to avoid synchronizations, + so the scale factor is not guaranteed to be above 1. If the scale falls below 1 and/or + you are seeing NaNs in your gradients or loss, something is likely wrong. For example, + bf16-pretrained models are often incompatible with AMP/fp16 due to differing dynamic ranges. + """ + if not self._enabled: + return + + _scale, _growth_tracker = self._check_scale_growth_tracker("update") + + if new_scale is not None: + assert self._scale is not None + # Accept a new user-defined scale. + if isinstance(new_scale, float): + self._scale.fill_(new_scale) + else: + reason = "new_scale should be a float or a 1-element torch.cuda.FloatTensor with requires_grad=False." + assert isinstance(new_scale, torch.cuda.FloatTensor), reason # type: ignore[attr-defined] + assert new_scale.numel() == 1, reason + assert new_scale.requires_grad is False, reason + self._scale.copy_(new_scale) + else: + # Consume shared inf/nan data collected from optimizers to update the scale. + # If all found_inf tensors are on the same device as self._scale, this operation is asynchronous. + found_infs = [ + found_inf.to(device=_scale.device, non_blocking=True) + for state in self._per_optimizer_states.values() + for found_inf in state["found_inf_per_device"].values() + ] + + assert len(found_infs) > 0, "No inf checks were recorded prior to update." + + found_inf_combined = found_infs[0] + if len(found_infs) > 1: + for i in range(1, len(found_infs)): + found_inf_combined += found_infs[i] + + torch._amp_update_scale_( + _scale, + _growth_tracker, + found_inf_combined, + self._growth_factor, + self._backoff_factor, + self._growth_interval, + ) + + # To prepare for next iteration, clear the data collected from optimizers this iteration. + self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state) + + def _get_scale_async(self) -> Optional[torch.Tensor]: + return self._scale + + def get_scale(self) -> float: + """Return a Python float containing the current scale, or 1.0 if scaling is disabled. + + .. warning:: + :meth:`get_scale` incurs a CPU-GPU sync. + """ + if self._enabled: + return ( + self._init_scale + if (scale := self._get_scale_async()) is None + else cast(float, scale.item()) + ) + return 1.0 + + def get_growth_factor(self) -> float: + r"""Return a Python float containing the scale growth factor.""" + return self._growth_factor + + def set_growth_factor(self, new_factor: float) -> None: + r"""Set a new scale growth factor. + + Args: + new_scale (float): Value to use as the new scale growth factor. + """ + self._growth_factor = new_factor + + def get_backoff_factor(self) -> float: + r"""Return a Python float containing the scale backoff factor.""" + return self._backoff_factor + + def set_backoff_factor(self, new_factor: float) -> None: + r"""Set a new scale backoff factor. + + Args: + new_scale (float): Value to use as the new scale backoff factor. + """ + self._backoff_factor = new_factor + + def get_growth_interval(self) -> int: + r"""Return a Python int containing the growth interval.""" + return self._growth_interval + + def set_growth_interval(self, new_interval: int) -> None: + r"""Set a new growth interval. + + Args: + new_interval (int): Value to use as the new growth interval. + """ + self._growth_interval = new_interval + + def _get_growth_tracker(self) -> int: + if self._enabled: + return ( + self._init_growth_tracker + if self._growth_tracker is None + else cast(int, self._growth_tracker.item()) + ) + return 0 + + def is_enabled(self) -> bool: + r"""Return a bool indicating whether this instance is enabled.""" + return self._enabled + + def state_dict(self) -> Dict[str, Any]: + r"""Return the state of the scaler as a :class:`dict`. + + It contains five entries: + + * ``"scale"`` - a Python float containing the current scale + * ``"growth_factor"`` - a Python float containing the current growth factor + * ``"backoff_factor"`` - a Python float containing the current backoff factor + * ``"growth_interval"`` - a Python int containing the current growth interval + * ``"_growth_tracker"`` - a Python int containing the number of recent consecutive unskipped steps. + + If this instance is not enabled, returns an empty dict. + + .. note:: + If you wish to checkpoint the scaler's state after a particular iteration, :meth:`state_dict` + should be called after :meth:`update`. + """ + if self._enabled: + return { + "scale": self.get_scale(), + "growth_factor": self._growth_factor, + "backoff_factor": self._backoff_factor, + "growth_interval": self._growth_interval, + "_growth_tracker": self._get_growth_tracker(), + } + return {} + + def load_state_dict(self, state_dict: Dict[str, Any]) -> None: + r"""Load the scaler state. + + If this instance is disabled, :meth:`load_state_dict` is a no-op. + + Args: + state_dict(dict): scaler state. Should be an object returned from a call to :meth:`state_dict`. + """ + if not self._enabled: + return + + if len(state_dict) == 0: + raise RuntimeError( + "The source state dict is empty, possibly because it was saved " + "from a disabled instance of GradScaler." + ) + + self._init_scale = cast(float, state_dict["scale"]) + if self._scale is not None: + self._scale.fill_(state_dict["scale"]) + self._growth_factor = cast(float, state_dict["growth_factor"]) + self._backoff_factor = cast(float, state_dict["backoff_factor"]) + self._growth_interval = cast(int, state_dict["growth_interval"]) + self._init_growth_tracker = cast(int, state_dict["_growth_tracker"]) + if self._growth_tracker is not None: + self._growth_tracker.fill_(state_dict["_growth_tracker"]) + + def __getstate__(self) -> Dict[str, Any]: + state = self.__dict__.copy() + if self._enabled: + assert len(self._per_optimizer_states) == 0, ( + "A GradScaler instance may only be pickled at the beginning " + "of an iteration, or at the end after scaler.update()." + ) + # Pickling _scale and _growth_tracker Tensors directly triggers + # "warnings.warn("pickle support for Storage will be removed in 1.5..." + # so instead, we set the unpickled instance up to reinitialize them lazily. + state["_init_scale"] = self.get_scale() + state["_init_growth_tracker"] = self._get_growth_tracker() + state["_scale"] = None + state["_growth_tracker"] = None + return state + + def __setstate__(self, state: Dict[str, Any]) -> None: + self.__dict__.update(state) + + def _check_inf_per_device(self, optimizer: torch.optim.Optimizer) -> Dict[str, Any]: + _scale, _ = self._check_scale_growth_tracker("_check_inf_per_device") + + dummy_inv_scale = torch.full((), 1.0, dtype=torch.float32, device=_scale.device) + found_inf = torch.full((), 0.0, dtype=torch.float32, device=_scale.device) + + self._per_optimizer_states[id(optimizer)][ + "found_inf_per_device" + ] = self._unscale_grads_(optimizer, dummy_inv_scale, found_inf, True) + + return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"] + + def _found_inf_per_device(self, optimizer: torch.optim.Optimizer) -> Dict[str, Any]: + return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/comm.py b/env-llmeval/lib/python3.10/site-packages/torch/cuda/comm.py new file mode 100644 index 0000000000000000000000000000000000000000..2ea23c2072d86a61db643fcfbfb799e97267e5e9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/cuda/comm.py @@ -0,0 +1,18 @@ +# The functions here have been moved to torch.nn.parallel.comm +from torch.nn.parallel.comm import ( + broadcast, + broadcast_coalesced, + gather, + reduce_add, + reduce_add_coalesced, + scatter, +) + +__all__ = [ + "broadcast", + "broadcast_coalesced", + "reduce_add", + "reduce_add_coalesced", + "scatter", + "gather", +] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/graphs.py b/env-llmeval/lib/python3.10/site-packages/torch/cuda/graphs.py new file mode 100644 index 0000000000000000000000000000000000000000..372e6c8e09bed3ad582eb8ff771c7b7b1fa24d8d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/cuda/graphs.py @@ -0,0 +1,476 @@ +import gc + +import torch +from torch.utils import _pytree +from ._utils import _dummy_type + +if not hasattr(torch._C, "_CudaStreamBase"): + # Define dummy base classes + torch._C.__dict__["_CUDAGraph"] = _dummy_type("_CUDAGraph") + torch._C.__dict__["_graph_pool_handle"] = _dummy_type("_graph_pool_handle") + torch._C.__dict__["_cuda_isCurrentStreamCapturing"] = _dummy_type( + "_cuda_isCurrentStreamCapturing" + ) + +from torch._C import ( # noqa: F401 + _cuda_isCurrentStreamCapturing, + _CUDAGraph, + _graph_pool_handle, +) + + +def is_current_stream_capturing(): + r"""Return True if CUDA graph capture is underway on the current CUDA stream, False otherwise. + + If a CUDA context does not exist on the current device, returns False without initializing the context. + """ + return _cuda_isCurrentStreamCapturing() + + +# Python shim helps Sphinx process docstrings more reliably. +def graph_pool_handle(): + r"""Return an opaque token representing the id of a graph memory pool. + + See :ref:`Graph memory management`. + + .. warning:: + This API is in beta and may change in future releases. + """ + return _graph_pool_handle() + + +# Python shim helps Sphinx process docstrings more reliably. +class CUDAGraph(torch._C._CUDAGraph): + r"""Wrapper around a CUDA graph. + + .. warning:: + This API is in beta and may change in future releases. + """ + + def __new__(cls): + return super().__new__(cls) + + def capture_begin(self, pool=None, capture_error_mode="global"): + r"""Begin capturing CUDA work on the current stream. + + Typically, you shouldn't call ``capture_begin`` yourself. + Use :class:`~torch.cuda.graph` or :func:`~torch.cuda.make_graphed_callables`, + which call ``capture_begin`` internally. + + Arguments: + pool (optional): Token (returned by :func:`~torch.cuda.graph_pool_handle` or + :meth:`other_Graph_instance.pool()`) that hints this graph may share memory + with the indicated pool. See :ref:`Graph memory management`. + capture_error_mode (str, optional): specifies the cudaStreamCaptureMode for the graph capture stream. + Can be "global", "thread_local" or "relaxed". During cuda graph capture, some actions, such as cudaMalloc, + may be unsafe. "global" will error on actions in other threads, "thread_local" will only error for + actions in the current thread, and "relaxed" will not error on these actions. Do NOT change this setting + unless you're familiar with `cudaStreamCaptureMode `_ + """ # noqa: B950 + super().capture_begin(pool=pool, capture_error_mode=capture_error_mode) + + def capture_end(self): + r"""End CUDA graph capture on the current stream. + + After ``capture_end``, ``replay`` may be called on this instance. + + Typically, you shouldn't call ``capture_end`` yourself. + Use :class:`~torch.cuda.graph` or :func:`~torch.cuda.make_graphed_callables`, + which call ``capture_end`` internally. + """ + super().capture_end() + + def replay(self): + r"""Replay the CUDA work captured by this graph.""" + super().replay() + + def reset(self): + r"""Delete the graph currently held by this instance.""" + super().reset() + + def pool(self): + r"""Return an opaque token representing the id of this graph's memory pool. + + This id can optionally be passed to another graph's ``capture_begin``, + which hints the other graph may share the same memory pool. + """ + return super().pool() + + def enable_debug_mode(self): + r"""Enable debugging mode for CUDAGraph.debug_dump.""" + return super().enable_debug_mode() + + def debug_dump(self, debug_path): + r""" + Arguments: + debug_path (required): Path to dump the graph to. + + Calls a debugging function to dump the graph if the debugging is + enabled via CUDAGraph.enable_debug_mode() + """ + return super().debug_dump(debug_path) + + +class graph: + r"""Context-manager that captures CUDA work into a :class:`torch.cuda.CUDAGraph` object for later replay. + + See :ref:`CUDA Graphs ` for a general introduction, + detailed use, and constraints. + + Arguments: + cuda_graph (torch.cuda.CUDAGraph): Graph object used for capture. + pool (optional): Opaque token (returned by a call to :func:`~torch.cuda.graph_pool_handle()` or + :meth:`other_Graph_instance.pool()`) hinting this graph's capture + may share memory from the specified pool. See :ref:`Graph memory management`. + stream (torch.cuda.Stream, optional): If supplied, will be set as the current stream in the context. + If not supplied, ``graph`` sets its own internal side stream as the current stream in the context. + capture_error_mode (str, optional): specifies the cudaStreamCaptureMode for the graph capture stream. + Can be "global", "thread_local" or "relaxed". During cuda graph capture, some actions, such as cudaMalloc, + may be unsafe. "global" will error on actions in other threads, "thread_local" will only error for + actions in the current thread, and "relaxed" will not error on actions. Do NOT change this setting + unless you're familiar with `cudaStreamCaptureMode `_ + + .. note:: + For effective memory sharing, if you pass a ``pool`` used by a previous capture and the previous capture + used an explicit ``stream`` argument, you should pass the same ``stream`` argument to this capture. + + .. warning:: + This API is in beta and may change in future releases. + + .. _cudaStreamCaptureMode: + https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html#group__CUDART__STREAM_1g9d0535d93a214cbf126835257b16ba85 + """ # noqa: B950 + + default_capture_stream = None + + def __init__( + self, + cuda_graph, + pool=None, + stream=None, + capture_error_mode: str = "global", + ): + # Lazy-init of default_capture_stream helps avoid circular-import errors. + # Not thread safe, but graphs already have the general (explicitly documented) + # restriction that only one capture may be underway at a time in the process. + if self.__class__.default_capture_stream is None: + self.__class__.default_capture_stream = torch.cuda.Stream() + + self.pool = () if pool is None else (pool,) + self.capture_stream = ( + stream if stream is not None else self.__class__.default_capture_stream + ) + assert self.capture_stream is not None + self.stream_ctx = torch.cuda.stream(self.capture_stream) + self.cuda_graph = cuda_graph + self.capture_error_mode = capture_error_mode + + def __enter__(self): + # Free as much memory as we can for the graph + torch.cuda.synchronize() + gc.collect() + torch.cuda.empty_cache() + + # Stackoverflow seems comfortable with this pattern + # https://stackoverflow.com/questions/26635684/calling-enter-and-exit-manually#39172487 + self.stream_ctx.__enter__() + + self.cuda_graph.capture_begin( + *self.pool, capture_error_mode=self.capture_error_mode + ) + + def __exit__(self, exc_type, exc_value, traceback): + self.cuda_graph.capture_end() + self.stream_ctx.__exit__(exc_type, exc_value, traceback) + # returning None should propagate exceptions from either capture_end or stream_ctx.__exit__() + + +def make_graphed_callables( + callables, sample_args, num_warmup_iters=3, allow_unused_input=False +): + r"""Accept callables (functions or :class:`nn.Module`\ s) and returns graphed versions. + + Each graphed callable's forward pass runs its source callable's + forward CUDA work as a CUDA graph inside a single autograd node. + + The graphed callable's forward pass also appends + a backward node to the autograd graph. During backward, this node runs the + callable's backward work as a CUDA graph. + + Therefore, each graphed callable should be a drop-in replacement for its source callable + in an autograd-enabled training loop. + + See :ref:`Partial-network capture` for detailed use and constraints. + + If you pass a tuple of several callables, their captures will use the same memory pool. + See :ref:`Graph memory management` for when this is appropriate. + + Arguments: + callables (torch.nn.Module or Python function, or tuple of these): Callable or callables to graph. + See :ref:`Graph memory management` for when passing a tuple of callables + is appropriate. If you pass a tuple of callables, their order in the tuple must be the same order + they'll run in the live workload. + sample_args (tuple of Tensors, or tuple of tuples of Tensors): Samples args for each callable. + If a single callable was passed, ``sample_args`` must be a single tuple of argument Tensors. + If a tuple of callables was passed, ``sample_args`` must be tuple of tuples of argument Tensors. + num_warmup_iters (int): The number of warmup iterations. Currently, ``DataDistributedParallel`` needs + 11 iterations for warm up. Default: ``3``. + allow_unused_input (bool): If False, specifying inputs that were not used when computing outputs + (and therefore their grad is always zero) is an error. Defaults to False. + + .. note:: + The ``requires_grad`` state of each Tensor in ``sample_args`` must match the state + that's expected for the corresponding real input in the training loop. + + .. warning:: + This API is in beta and may change in future releases. + + .. warning:: + ``sample_args`` for each callable must contain only Tensors. Other types are not allowed. + + .. warning:: + Returned callables do not support higher order differentiation (e.g., double backward). + + .. warning:: + In any :class:`~torch.nn.Module` passed to :func:`~make_graphed_callables`, only parameters + may be trainable. Buffers must have ``requires_grad=False``. + + .. warning:: + After you pass a :class:`torch.nn.Module` through :func:`~make_graphed_callables`, + you may not add or remove any of that Module's parameters or buffers. + + .. warning:: + :class:`torch.nn.Module`\s passed to :func:`~torch.cuda.make_graphed_callables` must not have module hooks + registered on them at the time they are passed. However, registering hooks on modules *after* passing them + through :func:`~torch.cuda.make_graphed_callables` is allowed. + + .. warning:: + When running a graphed callable, you must pass its arguments in the same order and format + they appeared in that callable's ``sample_args``. + + .. warning:: + The automatic mixed precision is supported in :func:`~torch.cuda.make_graphed_callables` only with disabled + caching. The context manager `torch.cuda.amp.autocast()` must have `cache_enabled=False`. + """ + if torch.is_autocast_enabled() and torch.is_autocast_cache_enabled(): + raise RuntimeError( + "make_graphed_callables does not support the autocast caching. Please set `cache_enabled=False`." + ) + + just_one_callable = False + + if not isinstance(callables, tuple): + just_one_callable = True + callables = (callables,) + sample_args = (sample_args,) + + flatten_sample_args = [] + + for c, args in zip(callables, sample_args): + if isinstance(c, torch.nn.Module): + assert ( + len(c._backward_hooks) == 0 + and len(c._forward_hooks) == 0 + and len(c._forward_pre_hooks) == 0 + ), ( + "Modules must not have hooks registered at the time they are passed. However, registering hooks " + + "on modules after passing them through make_graphed_callables is allowed." + ) + assert all(b.requires_grad is False for b in c.buffers()), ( + "In any :class:`~torch.nn.Module` passed to " + + ":func:`~make_graphed_callables`, only parameters may be trainable. All buffers must have " + + "``requires_grad=False``." + ) + flatten_arg = _pytree.arg_tree_leaves(*args) + flatten_sample_args.append(tuple(flatten_arg)) + assert all(isinstance(arg, torch.Tensor) for arg in flatten_arg), ( + "In the beta API, sample_args " + + "for each callable must contain only Tensors. Other types are not allowed." + ) + + # If a callable is an nn.Module, its graph's full input surface is the args the user explicitly + # passes to forward (ie, its sample_args) AND the module's parameter attributes. + per_callable_len_user_args = [len(args) for args in flatten_sample_args] + per_callable_module_params = [ + tuple(c.parameters()) if isinstance(c, torch.nn.Module) else () + for c in callables + ] + per_callable_static_input_surfaces = [ + flatten_sample_args[i] + per_callable_module_params[i] + for i in range(len(callables)) + ] + + fwd_graphs = [torch.cuda.CUDAGraph() for _ in range(len(callables))] + bwd_graphs = [torch.cuda.CUDAGraph() for _ in range(len(callables))] + + mempool = graph_pool_handle() + + # Warmup + # Hopefully prevents cudnn benchmarking and other lazy-initialization cuda work + # from ending up in any captures. + torch.cuda.synchronize() + with torch.cuda.stream(torch.cuda.Stream()): + for func, args, static_input_surface in zip( + callables, sample_args, per_callable_static_input_surfaces + ): + for _ in range(num_warmup_iters): + outputs = _pytree.tree_leaves(func(*args)) + grad_inputs = torch.autograd.grad( + outputs=tuple(o for o in outputs if o.requires_grad), + inputs=tuple(i for i in static_input_surface if i.requires_grad), + grad_outputs=tuple( + torch.empty_like(o) for o in outputs if o.requires_grad + ), + only_inputs=True, + allow_unused=allow_unused_input, + ) + del outputs, grad_inputs + torch.cuda.synchronize() + + # All captures here share a mempool. To avoid replays corrupting each other's memory, + # the safest approach is to capture all passes in the same order they'll run: + # fwd 1, fwd 2, ... fwd N, then bwd N, bwd N-1, ... bwd 1. + + # Capture forward graphs + per_callable_static_outputs = [] + per_callable_output_unflatten_spec = [] + for func, args, fwd_graph in zip(callables, sample_args, fwd_graphs): + with torch.cuda.graph(fwd_graph, pool=mempool): + outputs = func(*args) + + flatten_outputs, spec = _pytree.tree_flatten(outputs) + per_callable_static_outputs.append(tuple(flatten_outputs)) + per_callable_output_unflatten_spec.append(spec) + + # Capture backward graphs in reverse order + per_callable_static_grad_outputs = [] + per_callable_static_grad_inputs = [] + for static_input_surface, static_outputs, bwd_graph, module_params in zip( + reversed(per_callable_static_input_surfaces), + reversed(per_callable_static_outputs), + reversed(bwd_graphs), + reversed(per_callable_module_params), + ): + # For now, assumes all static_outputs require grad + # assert all(o.requires_grad for o in static_outputs), "Outputs of graphed callables must require grad." + static_grad_outputs = tuple( + torch.empty_like(o) if o.requires_grad else None for o in static_outputs + ) + + with torch.cuda.graph(bwd_graph, pool=mempool): + grad_inputs = torch.autograd.grad( + outputs=tuple(o for o in static_outputs if o.requires_grad), + inputs=tuple(i for i in static_input_surface if i.requires_grad), + grad_outputs=tuple(o for o in static_grad_outputs if o is not None), + only_inputs=True, + allow_unused=allow_unused_input, + ) + + # Constructs a tuple suitable for returning from Graphed.backward: + # Pads out the actually-needed grads with Nones in gradient slots for inputs that don't require grad. + # I couldn't think of a slick one-liner for this pattern. + static_grad_inputs = [] + grad_idx = 0 + for arg in static_input_surface: + if arg.requires_grad: + static_grad_inputs.append(grad_inputs[grad_idx]) + grad_idx += 1 + else: + static_grad_inputs.append(None) # type: ignore[arg-type] + static_grad_inputs = tuple(static_grad_inputs) # type: ignore[assignment] + + per_callable_static_grad_outputs.append(static_grad_outputs) + per_callable_static_grad_inputs.append(static_grad_inputs) + + # Reverses the most recent two lists + per_callable_static_grad_outputs = list(reversed(per_callable_static_grad_outputs)) + per_callable_static_grad_inputs = list(reversed(per_callable_static_grad_inputs)) + # Now for every per_callable list, per_callable_*[i] holds the stuff for the ith callable. + + def make_graphed_autograd_function( + fwd_graph, + bwd_graph, + module_params, + len_user_args, + output_unflatten_spec, + static_input_surface, + static_outputs, + static_grad_outputs, + static_grad_inputs, + ): + class Graphed(torch.autograd.Function): + @staticmethod + def forward(ctx, *inputs): + # At this stage, only the user args may (potentially) be new tensors. + for i in range(len_user_args): + if static_input_surface[i].data_ptr() != inputs[i].data_ptr(): + static_input_surface[i].copy_(inputs[i]) + fwd_graph.replay() + assert isinstance(static_outputs, tuple) + return tuple(o.detach() for o in static_outputs) + + @staticmethod + @torch.autograd.function.once_differentiable + def backward(ctx, *grads): + assert len(grads) == len(static_grad_outputs) + for g, grad in zip(static_grad_outputs, grads): + if g is not None: + # don't copy if autograd gods have been kind and the + # incoming grad is already in the right place + if g.data_ptr() != grad.data_ptr(): + g.copy_(grad) + bwd_graph.replay() + + # Input args that didn't require grad expect a None gradient. + assert isinstance(static_grad_inputs, tuple) + return tuple( + b.detach() if b is not None else b for b in static_grad_inputs + ) + + def functionalized(*user_args): + # Runs the autograd function with inputs == all inputs to the graph that might require grad + # (explicit user args + module parameters) + # Assumes module params didn't change since capture. + flatten_user_args = _pytree.arg_tree_leaves(*user_args) + out = Graphed.apply(*(tuple(flatten_user_args) + module_params)) + return _pytree.tree_unflatten(out, output_unflatten_spec) + + return functionalized + + # Put together the final graphed callables + ret = [] + for i, func in enumerate(callables): + graphed = make_graphed_autograd_function( + fwd_graphs[i], + bwd_graphs[i], + per_callable_module_params[i], + per_callable_len_user_args[i], + per_callable_output_unflatten_spec[i], + per_callable_static_input_surfaces[i], + per_callable_static_outputs[i], + per_callable_static_grad_outputs[i], + per_callable_static_grad_inputs[i], + ) + + if isinstance(func, torch.nn.Module): + + def make_graphed_forward(func, graph_training_state, graphed, orig_fwd): + def new_fwd(*user_args): + # If the module's training-or-eval state matches what we graphed, + # run the graph, otherwise run the original forward method + if func.training == graph_training_state: + return graphed(*user_args) + else: + return orig_fwd(*user_args) + + return new_fwd + + func.forward = make_graphed_forward(func, func.training, graphed, func.forward) # type: ignore[assignment] + ret.append(func) + else: + ret.append(graphed) + + if just_one_callable: + return ret[0] + + return tuple(ret) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/jiterator.py b/env-llmeval/lib/python3.10/site-packages/torch/cuda/jiterator.py new file mode 100644 index 0000000000000000000000000000000000000000..25d25482419e635612855ed402fd02ef58709417 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/cuda/jiterator.py @@ -0,0 +1,185 @@ +import re +from typing import Callable, List + +import torch +from torch import Tensor + +__all__: List[str] = [] + + +class _CodeParser: + def __init__(self, code_string: str): + optional_ws = r"\s*" + required_ws = r"\s+" + template_params = r"(?P\<.+\>)" + return_type = r"(?P\w+)" + function_name = r"(?P\w+)" + function_params = r"(?P\(.+\))" + function_body = r"(?P\{.+\})" + + pattern = ( + optional_ws + + "template" + + optional_ws + + template_params + + optional_ws + + return_type + + required_ws + + function_name + + optional_ws + + function_params + + optional_ws + + function_body + + optional_ws + ) + + result = re.match( + pattern, code_string, re.DOTALL + ) # DOTALL for matching multiline + + if result is None: + raise Exception( + f"Couldn't parse code, please check correctness:\n {code_string}" + ) + + self.template_params = result["template_params"] + self.return_type = result["return_type"] + self.function_name = result["function_name"] + self.function_params = result["function_params"] + self.function_body = result["function_body"] + + +class _JittedFunction: + def __init__( + self, code_string: str, return_by_ref: bool, num_outputs: int, **kwargs + ): + self.code_string = code_string + + assert ( + return_by_ref or num_outputs == 1 + ), "Return by value only works for single output. " + self.return_by_ref = return_by_ref + self.num_outputs = num_outputs + + parsed_code = _CodeParser(code_string) + self.kernel_name = parsed_code.function_name + + self.kwargs_dict = kwargs + self.is_cuda_available = torch.cuda.is_available() + + def __call__(self, *tensors: Tensor, **kwargs): + # Jiterator follow torch.cuda's lazy initialization behavior + # Defer checking cuda's availability at the function invocation time + assert ( + self.is_cuda_available + ), "Jiterator is only supported on CUDA and ROCm GPUs, none are available." + + assert len(tensors) <= 8, "jiterator only supports up to 8 tensor inputs." + + expanded_kwargs = self.kwargs_dict.copy() + for key, value in kwargs.items(): + if key in self.kwargs_dict: + expanded_kwargs[key] = value + else: + raise KeyError(f"{key} is not declared in function definition") + + return torch._C._cuda_jiterator_compile_and_launch_kernel( + self.code_string, + self.kernel_name, + self.return_by_ref, + self.num_outputs, + tensors, + expanded_kwargs, + ) + + +def _create_jit_fn(code_string: str, **kwargs) -> Callable: + """ + Create a jiterator-generated cuda kernel for an elementwise op. + + The code string has to be a valid CUDA function that describes the computation for a single element. The code + string has to follow the c++ template pattern, as shown in the example below. This function will be inlined + into elementwise kernel template, and compiled on the fly. Compiled kernel will be cached in memory, as well as + local temp dir. + + Jiterator-generated kernels accepts noncontiguous tensors, and supports broadcasting and type promotion. + + Args: + code_string (str): CUDA code string to be compiled by jiterator. The entry functor must return by value. + kwargs (Dict, optional): Keyword arguments for generated function + + Example:: + + code_string = "template T my_kernel(T x, T y, T alpha) { return -x + alpha * y; }" + jitted_fn = create_jit_fn(code_string, alpha=1.0) + a = torch.rand(3, device='cuda') + b = torch.rand(3, device='cuda') + # invoke jitted function like a regular python function + result = jitted_fn(a, b, alpha=3.14) + + code_string also allows multiple function definitions, and the last function will be treated as the entry function. + + Example:: + + code_string = "template T util_fn(T x, T y) { return ::sin(x) + ::cos(y); }" + code_string += "template T my_kernel(T x, T y, T val) { return ::min(val, util_fn(x, y)); }" + jitted_fn = create_jit_fn(code_string, val=0.0) + a = torch.rand(3, device='cuda') + b = torch.rand(3, device='cuda') + # invoke jitted function like a regular python function + result = jitted_fn(a, b) # using default val=0.0 + + Jiterator can be used together with python registration to override an operator's cuda kernel. + Following example is overriding gelu's cuda kernel with relu. + + Example:: + + code_string = "template T my_gelu(T a) { return a > 0 ? a : 0; }" + my_gelu = create_jit_fn(code_string) + my_lib = torch.library.Library("aten", "IMPL") + my_lib.impl('aten::gelu', my_gelu, "CUDA") + # torch.nn.GELU and torch.nn.function.gelu are now overridden + a = torch.rand(3, device='cuda') + torch.allclose(torch.nn.functional.gelu(a), torch.nn.functional.relu(a)) + + .. warning:: + This API is in beta and may change in future releases. + + .. warning:: + This API only supports up to 8 inputs and 1 output + + .. warning:: + All input tensors must live in CUDA device + """ + return _JittedFunction(code_string, return_by_ref=False, num_outputs=1, **kwargs) + + +def _create_multi_output_jit_fn( + code_string: str, num_outputs: int, **kwargs +) -> Callable: + """ + Create a jiterator-generated cuda kernel for an elementwise op that supports returning one or more outputs. + + Args: + code_string (str): CUDA code string to be compiled by jiterator. The entry functor must return value by reference. + num_outputs(int): number of outputs return by the kernel + kwargs (Dict, optional): Keyword arguments for generated function + + Example:: + + code_string = "template void my_kernel(T x, T y, T alpha, T& out) { out = -x + alpha * y; }" + jitted_fn = create_jit_fn(code_string, alpha=1.0) + a = torch.rand(3, device='cuda') + b = torch.rand(3, device='cuda') + # invoke jitted function like a regular python function + result = jitted_fn(a, b, alpha=3.14) + + .. warning:: + This API is in beta and may change in future releases. + + .. warning:: + This API only supports up to 8 inputs and 8 outputs + """ + return _JittedFunction( + code_string, return_by_ref=True, num_outputs=num_outputs, **kwargs + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/memory.py b/env-llmeval/lib/python3.10/site-packages/torch/cuda/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..55022ae829a62b782de92d10cade5f0964ed1845 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/cuda/memory.py @@ -0,0 +1,914 @@ +r"""This package adds support for device memory management implemented in CUDA.""" + +import collections +import contextlib +import ctypes +import pickle +import sys +import warnings +from inspect import signature + +from typing import Any, Dict, Optional, Tuple, Union + +import torch +from torch import _C + +from torch.types import Device +from . import _get_device_index, _get_nvml_device_index, _lazy_init, is_initialized + +from ._memory_viz import memory as _memory, segments as _segments +from ._utils import _dummy_type + +__all__ = [ + "caching_allocator_alloc", + "caching_allocator_delete", + "set_per_process_memory_fraction", + "empty_cache", + "memory_stats", + "memory_stats_as_nested_dict", + "reset_accumulated_memory_stats", + "reset_peak_memory_stats", + "reset_max_memory_allocated", + "reset_max_memory_cached", + "memory_allocated", + "max_memory_allocated", + "memory_reserved", + "max_memory_reserved", + "memory_cached", + "max_memory_cached", + "memory_snapshot", + "memory_summary", + "list_gpu_processes", + "mem_get_info", + "get_allocator_backend", + "CUDAPluggableAllocator", + "change_current_allocator", +] + + +if not hasattr(torch._C, "_cuda_CUDAAllocator"): + # Define dummy base classes + torch._C.__dict__["_cuda_CUDAAllocator"] = _dummy_type("_cuda_CUDAAllocator") + + +def _host_allocator(): + _lazy_init() + return torch._C._cuda_cudaHostAllocator() + + +@contextlib.contextmanager +def _free_mutex(): + torch._C._cuda_lock_mutex() + try: + yield + finally: + torch._C._cuda_unlock_mutex() + + +def caching_allocator_alloc(size, device: Union[Device, int] = None, stream=None): + r"""Perform a memory allocation using the CUDA memory allocator. + + Memory is allocated for a given device and a stream, this + function is intended to be used for interoperability with other + frameworks. Allocated memory is released through + :func:`~torch.cuda.caching_allocator_delete`. + + Args: + size (int): number of bytes to be allocated. + device (torch.device or int, optional): selected device. If it is + ``None`` the default CUDA device is used. + stream (torch.cuda.Stream or int, optional): selected stream. If is ``None`` then + the default stream for the selected device is used. + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + if device is None: + device = torch.cuda.current_device() + device = _get_device_index(device) + if stream is None: + stream = torch.cuda.current_stream(device) + if isinstance(stream, torch.cuda.streams.Stream): + stream = stream.cuda_stream + if not isinstance(stream, int): + raise TypeError( + "Invalid type for stream argument, must be " + "`torch.cuda.Stream` or `int` representing a pointer " + "to a existing stream" + ) + with torch.cuda.device(device): + return torch._C._cuda_cudaCachingAllocator_raw_alloc(size, stream) + + +def caching_allocator_delete(mem_ptr): + r"""Delete memory allocated using the CUDA memory allocator. + + Memory allocated with :func:`~torch.cuda.caching_allocator_alloc`. + is freed here. The associated device and stream are tracked inside + the allocator. + + Args: + mem_ptr (int): memory address to be freed by the allocator. + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + torch._C._cuda_cudaCachingAllocator_raw_delete(mem_ptr) + + +def set_per_process_memory_fraction( + fraction, device: Union[Device, int] = None +) -> None: + r"""Set memory fraction for a process. + + The fraction is used to limit an caching allocator to allocated memory on a CUDA device. + The allowed value equals the total visible memory multiplied fraction. + If trying to allocate more than the allowed value in a process, will raise an out of + memory error in allocator. + + Args: + fraction(float): Range: 0~1. Allowed memory equals total_memory * fraction. + device (torch.device or int, optional): selected device. If it is + ``None`` the default CUDA device is used. + .. note:: + In general, the total available free memory is less than the total capacity. + """ + _lazy_init() + if device is None: + device = torch.cuda.current_device() + device = _get_device_index(device) + if not isinstance(fraction, float): + raise TypeError("Invalid type for fraction argument, must be `float`") + if fraction < 0 or fraction > 1: + raise ValueError(f"Invalid fraction value: {fraction}. Allowed range: 0~1") + + torch._C._cuda_setMemoryFraction(fraction, device) + + +def empty_cache() -> None: + r"""Release all unoccupied cached memory currently held by the caching + allocator so that those can be used in other GPU application and visible in + `nvidia-smi`. + + .. note:: + :func:`~torch.cuda.empty_cache` doesn't increase the amount of GPU + memory available for PyTorch. However, it may help reduce fragmentation + of GPU memory in certain cases. See :ref:`cuda-memory-management` for + more details about GPU memory management. + """ + if is_initialized(): + torch._C._cuda_emptyCache() + + +def memory_stats(device: Union[Device, int] = None) -> Dict[str, Any]: + r"""Return a dictionary of CUDA memory allocator statistics for a given device. + + The return value of this function is a dictionary of statistics, each of + which is a non-negative integer. + + Core statistics: + + - ``"allocated.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + number of allocation requests received by the memory allocator. + - ``"allocated_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + amount of allocated memory. + - ``"segment.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + number of reserved segments from ``cudaMalloc()``. + - ``"reserved_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + amount of reserved memory. + - ``"active.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + number of active memory blocks. + - ``"active_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + amount of active memory. + - ``"inactive_split.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + number of inactive, non-releasable memory blocks. + - ``"inactive_split_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + amount of inactive, non-releasable memory. + + For these core statistics, values are broken down as follows. + + Pool type: + + - ``all``: combined statistics across all memory pools. + - ``large_pool``: statistics for the large allocation pool + (as of October 2019, for size >= 1MB allocations). + - ``small_pool``: statistics for the small allocation pool + (as of October 2019, for size < 1MB allocations). + + Metric type: + + - ``current``: current value of this metric. + - ``peak``: maximum value of this metric. + - ``allocated``: historical total increase in this metric. + - ``freed``: historical total decrease in this metric. + + In addition to the core statistics, we also provide some simple event + counters: + + - ``"num_alloc_retries"``: number of failed ``cudaMalloc`` calls that + result in a cache flush and retry. + - ``"num_ooms"``: number of out-of-memory errors thrown. + + The caching allocator can be configured via ENV to not split blocks larger than a + defined size (see Memory Management section of the Cuda Semantics documentation). + This helps avoid memory fragmentation but may have a performance + penalty. Additional outputs to assist with tuning and evaluating impact: + + - ``"max_split_size"``: blocks above this size will not be split. + - ``"oversize_allocations.{current,peak,allocated,freed}"``: + number of over-size allocation requests received by the memory allocator. + - ``"oversize_segments.{current,peak,allocated,freed}"``: + number of over-size reserved segments from ``cudaMalloc()``. + + The caching allocator can be configured via ENV to round memory allocations in order + to reduce fragmentation. Sometimes the overhead from rounding can be higher than + the fragmentation it helps reduce. The following stat can be used to check if + rounding adds too much overhead: + + - ``"requested_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``: + memory requested by client code, compare this with allocated_bytes to check if + allocation rounding adds too much overhead. + + Args: + device (torch.device or int, optional): selected device. Returns + statistics for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + + .. note:: + With :ref:`backend:cudaMallocAsync`, some stats are not + meaningful, and are always reported as zero. + """ + result = [] + + def _recurse_add_to_result(prefix, obj): + if isinstance(obj, dict): + if len(prefix) > 0: + prefix += "." + for k, v in obj.items(): + _recurse_add_to_result(prefix + k, v) + else: + result.append((prefix, obj)) + + stats = memory_stats_as_nested_dict(device=device) + _recurse_add_to_result("", stats) + result.sort() + + return collections.OrderedDict(result) + + +def memory_stats_as_nested_dict(device: Union[Device, int] = None) -> Dict[str, Any]: + r"""Return the result of :func:`~torch.cuda.memory_stats` as a nested dictionary.""" + if not is_initialized(): + return {} + device = _get_device_index(device, optional=True) + return torch._C._cuda_memoryStats(device) + + +def reset_accumulated_memory_stats(device: Union[Device, int] = None) -> None: + r"""Reset the "accumulated" (historical) stats tracked by the CUDA memory allocator. + + See :func:`~torch.cuda.memory_stats` for details. Accumulated stats correspond to + the `"allocated"` and `"freed"` keys in each individual stat dict, as well as + `"num_alloc_retries"` and `"num_ooms"`. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + device = _get_device_index(device, optional=True) + return torch._C._cuda_resetAccumulatedMemoryStats(device) + + +def reset_peak_memory_stats(device: Union[Device, int] = None) -> None: + r"""Reset the "peak" stats tracked by the CUDA memory allocator. + + See :func:`~torch.cuda.memory_stats` for details. Peak stats correspond to the + `"peak"` key in each individual stat dict. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + device = _get_device_index(device, optional=True) + return torch._C._cuda_resetPeakMemoryStats(device) + + +def reset_max_memory_allocated(device: Union[Device, int] = None) -> None: + r"""Reset the starting point in tracking maximum GPU memory occupied by tensors for a given device. + + See :func:`~torch.cuda.max_memory_allocated` for details. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. warning:: + This function now calls :func:`~torch.cuda.reset_peak_memory_stats`, which resets + /all/ peak memory stats. + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + warnings.warn( + "torch.cuda.reset_max_memory_allocated now calls torch.cuda.reset_peak_memory_stats, " + "which resets /all/ peak memory stats.", + FutureWarning, + ) + return reset_peak_memory_stats(device=device) + + +def reset_max_memory_cached(device: Union[Device, int] = None) -> None: + r"""Reset the starting point in tracking maximum GPU memory managed by the caching allocator for a given device. + + See :func:`~torch.cuda.max_memory_cached` for details. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. warning:: + This function now calls :func:`~torch.cuda.reset_peak_memory_stats`, which resets + /all/ peak memory stats. + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + warnings.warn( + "torch.cuda.reset_max_memory_cached now calls torch.cuda.reset_peak_memory_stats, " + "which resets /all/ peak memory stats.", + FutureWarning, + ) + return reset_peak_memory_stats(device=device) + + +def memory_allocated(device: Union[Device, int] = None) -> int: + r"""Return the current GPU memory occupied by tensors in bytes for a given device. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + This is likely less than the amount shown in `nvidia-smi` since some + unused memory can be held by the caching allocator and some context + needs to be created on GPU. See :ref:`cuda-memory-management` for more + details about GPU memory management. + """ + return memory_stats(device=device).get("allocated_bytes.all.current", 0) + + +def max_memory_allocated(device: Union[Device, int] = None) -> int: + r"""Return the maximum GPU memory occupied by tensors in bytes for a given device. + + By default, this returns the peak allocated memory since the beginning of + this program. :func:`~torch.cuda.reset_peak_memory_stats` can be used to + reset the starting point in tracking this metric. For example, these two + functions can measure the peak allocated memory usage of each iteration in a + training loop. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + return memory_stats(device=device).get("allocated_bytes.all.peak", 0) + + +def memory_reserved(device: Union[Device, int] = None) -> int: + r"""Return the current GPU memory managed by the caching allocator in bytes for a given device. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + return memory_stats(device=device).get("reserved_bytes.all.current", 0) + + +def max_memory_reserved(device: Union[Device, int] = None) -> int: + r"""Return the maximum GPU memory managed by the caching allocator in bytes for a given device. + + By default, this returns the peak cached memory since the beginning of this + program. :func:`~torch.cuda.reset_peak_memory_stats` can be used to reset + the starting point in tracking this metric. For example, these two functions + can measure the peak cached memory amount of each iteration in a training + loop. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + return memory_stats(device=device).get("reserved_bytes.all.peak", 0) + + +def memory_cached(device: Union[Device, int] = None) -> int: + r"""Deprecated; see :func:`~torch.cuda.memory_reserved`.""" + warnings.warn( + "torch.cuda.memory_cached has been renamed to torch.cuda.memory_reserved", + FutureWarning, + ) + return memory_reserved(device=device) + + +def max_memory_cached(device: Union[Device, int] = None) -> int: + r"""Deprecated; see :func:`~torch.cuda.max_memory_reserved`.""" + warnings.warn( + "torch.cuda.max_memory_cached has been renamed to torch.cuda.max_memory_reserved", + FutureWarning, + ) + return max_memory_reserved(device=device) + + +def memory_snapshot(): + r"""Return a snapshot of the CUDA memory allocator state across all devices. + + Interpreting the output of this function requires familiarity with the + memory allocator internals. + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + return torch._C._cuda_memorySnapshot()["segments"] + + +def memory_summary(device: Union[Device, int] = None, abbreviated: bool = False) -> str: + r"""Return a human-readable printout of the current memory allocator statistics for a given device. + + This can be useful to display periodically during training, or when + handling out-of-memory exceptions. + + Args: + device (torch.device or int, optional): selected device. Returns + printout for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + abbreviated (bool, optional): whether to return an abbreviated summary + (default: False). + + .. note:: + See :ref:`cuda-memory-management` for more details about GPU memory + management. + """ + device = _get_device_index(device, optional=True) + stats = memory_stats(device=device) + + def _format_size(sz, pref_sz): + prefixes = ["B ", "KiB", "MiB", "GiB", "TiB", "PiB"] + prefix = prefixes[0] + for new_prefix in prefixes[1:]: + if pref_sz < 768 * 1024: + break + prefix = new_prefix + sz //= 1024 + pref_sz /= 1024 + return f"{sz:6d} {prefix}" + + def _format_count(cnt, pref_cnt): + prefixes = [" ", "K", "M"] + prefix = prefixes[0] + for new_prefix in prefixes[1:]: + if pref_cnt < 750 * 1000: + break + prefix = new_prefix + cnt //= 1000 + pref_cnt /= 1000 + return f"{cnt:7d} {prefix} " + + metrics_to_display = [ + ("allocated_bytes", "Allocated memory", _format_size), + ("active_bytes", "Active memory", _format_size), + ("requested_bytes", "Requested memory", _format_size), + ("reserved_bytes", "GPU reserved memory", _format_size), + ("inactive_split_bytes", "Non-releasable memory", _format_size), + ("allocation", "Allocations", _format_count), + ("active", "Active allocs", _format_count), + ("segment", "GPU reserved segments", _format_count), + ("inactive_split", "Non-releasable allocs", _format_count), + ] + + lines = [] + lines.append("=" * 75) + lines.append(" {_:16} PyTorch CUDA memory summary, device ID {device:<17d} ") + lines.append("-" * 75) + lines.append( + " {_:9} CUDA OOMs: {num_ooms:<12d} | {_:6} cudaMalloc retries: {num_alloc_retries:<8d} " + ) + lines.append("=" * 75) + lines.append( + " Metric | Cur Usage | Peak Usage | Tot Alloc | Tot Freed " + ) + + for metric_key, metric_name, formatter in metrics_to_display: + lines.append("-" * 75) + submetrics = [("all", metric_name)] + if not abbreviated: + submetrics.append(("large_pool", " from large pool")) + submetrics.append(("small_pool", " from small pool")) + + current_prefval, peak_prefval, allocated_prefval, freed_prefval = ( + None, + None, + None, + None, + ) + + for submetric_key, submetric_name in submetrics: + prefix = metric_key + "." + submetric_key + "." + + current = stats[prefix + "current"] + peak = stats[prefix + "peak"] + allocated = stats[prefix + "allocated"] + freed = stats[prefix + "freed"] + + if current_prefval is None: + current_prefval = current + peak_prefval = peak + allocated_prefval = allocated + freed_prefval = freed + + lines.append( + " {:<21} | {} | {} | {} | {} ".format( + submetric_name, + formatter(current, current_prefval), + formatter(peak, peak_prefval), + formatter(allocated, allocated_prefval), + formatter(freed, freed_prefval), + ), + ) + + metrics_to_display = [ + ("oversize_allocations", "Oversize allocations", _format_count), + ("oversize_segments", "Oversize GPU segments", _format_count), + ] + + for metric_key, metric_name, formatter in metrics_to_display: + lines.append("-" * 75) + + prefix = metric_key + "." + + current = stats[prefix + "current"] + peak = stats[prefix + "peak"] + allocated = stats[prefix + "allocated"] + freed = stats[prefix + "freed"] + + lines.append( + " {:<21} | {} | {} | {} | {} ".format( + metric_name, + formatter(current, current), + formatter(peak, peak), + formatter(allocated, allocated), + formatter(freed, freed), + ), + ) + + lines.append("=" * 75) + + fmt_dict = {"_": "", "device": device} + for k, v in stats.items(): + fmt_dict[k.replace(".", "-")] = v + return "|" + "|\n|".join(lines).format(**fmt_dict) + "|\n" + + +def list_gpu_processes(device: Union[Device, int] = None) -> str: + r"""Return a human-readable printout of the running processes and their GPU memory use for a given device. + + This can be useful to display periodically during training, or when + handling out-of-memory exceptions. + + Args: + device (torch.device or int, optional): selected device. Returns + printout for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + """ + try: + import pynvml # type: ignore[import] + except ModuleNotFoundError: + return "pynvml module not found, please install pynvml" + from pynvml import NVMLError_DriverNotLoaded + + try: + pynvml.nvmlInit() + except NVMLError_DriverNotLoaded: + return "cuda driver can't be loaded, is cuda enabled?" + device = _get_nvml_device_index(device) + handle = pynvml.nvmlDeviceGetHandleByIndex(device) + procs = pynvml.nvmlDeviceGetComputeRunningProcesses(handle) + lines = [] + lines.append(f"GPU:{device}") + if len(procs) == 0: + lines.append("no processes are running") + for p in procs: + mem = p.usedGpuMemory / (1024 * 1024) + lines.append(f"process {p.pid:>10d} uses {mem:>12.3f} MB GPU memory") + return "\n".join(lines) + + +def mem_get_info(device: Union[Device, int] = None) -> Tuple[int, int]: + r"""Return the global free and total GPU memory for a given device using cudaMemGetInfo. + + Args: + device (torch.device or int, optional): selected device. Returns + statistic for the current device, given by :func:`~torch.cuda.current_device`, + if :attr:`device` is ``None`` (default). + + .. note:: + See :ref:`cuda-memory-management` for more + details about GPU memory management. + """ + if device is None: + device = torch.cuda.current_device() + device = _get_device_index(device) + return torch.cuda.cudart().cudaMemGetInfo(device) + + +def _record_memory_history_legacy( + enabled: bool, + record_context=True, + trace_alloc_max_entries=1, + trace_alloc_record_context=False, + device: Union[Device, int] = None, + record_context_cpp=False, +): + _C._cuda_record_memory_history_legacy( + enabled, + record_context, + trace_alloc_max_entries, + trace_alloc_record_context, + record_context_cpp, + ) + + +def _record_memory_history(enabled="all", *args, **kwargs): + """Enable recording of stack traces associated with memory + allocations, so you can tell what allocated any piece of memory in + :func:`torch.cuda.memory._snapshot()`. + + In addition too keeping stack traces with each current allocation and free, + this will also enable recording of a history of all alloc/free events. + + Use :func:`torch.cuda.memory._snapshot()` to retrieve this information, + and the tools in `_memory_viz.py` to visualize snapshots. + + The Python trace collection is fast (2us per trace), so you may consider + enabling this on production jobs if you anticipate ever having to debug + memory issues. + + C++ trace collection is also fast (~50ns/frame), which for many typical programs + works out to ~2us per trace, but can vary depending on stack depth. + + Args: + enabled (Literal[None, "state", "all"], optional): + `None`, disable recording memory history. + `"state"`, keep information for currenly allocated memory. + `"all"`, additionally keep a history of all alloc/free calls. + Defaults to "all". + context (Literal[None, "state", "alloc", "all"], optional): + `None`, Do not record any tracebacks. + `"state"`, Record tracebacks for currently allocated memory. + `"alloc"`, additionally keep tracebacks for alloc calls. + `"all"`, additionally keep tracebacks for free calls. + Defaults to "all". + stacks (Literal["python", "all"], optional): + `"python"`, include Python, TorchScript, and inductor frames in tracebacks + `"all"`, additionally include C++ frames + Defaults to "all". + max_entries (int, optional): Keep a maximum of `max_entries` + alloc/free events in the recorded history recorded. + """ + if isinstance(enabled, bool): + return _record_memory_history_legacy(enabled, *args, **kwargs) + else: + return _record_memory_history_impl(enabled, *args, **kwargs) + + +def _record_memory_history_impl( + enabled: Optional[str] = "all", + context: Optional[str] = "all", + stacks: str = "all", + max_entries: int = sys.maxsize, + device: Union[Device, int] = None, +): + _C._cuda_record_memory_history(enabled, context, stacks, max_entries) + + +_record_memory_history.__signature__ = signature(_record_memory_history_impl) # type: ignore[attr-defined] + + +def _snapshot(device: Union[Device, int] = None): + """Save a snapshot of CUDA memory state at the time it was called. + + The state is represented as a dictionary with the following structure. + + .. code-block:: python + + class Snapshot(TypedDict): + segments : List[Segment] + device_traces: List[List[TraceEntry]] + + class Segment(TypedDict): + # Segments are memory returned from a cudaMalloc call. + # The size of reserved memory is the sum of all Segments. + # Segments are cached and reused for future allocations. + # If the reuse is smaller than the segment, the segment + # is split into more then one Block. + # empty_cache() frees Segments that are entirely inactive. + address: int + total_size: int # cudaMalloc'd size of segment + stream: int + segment_type: Literal['small', 'large'] # 'large' (>1MB) + allocated_size: int # size of memory in use + active_size: int # size of memory in use or in active_awaiting_free state + blocks : List[Block] + + class Block(TypedDict): + # A piece of memory returned from the allocator, or + # current cached but inactive. + size: int + requested_size: int # size requested during malloc, may be smaller than + # size due to rounding + address: int + state: Literal['active_allocated', # used by a tensor + 'active_awaiting_free', # waiting for another stream to finish using + # this, then it will become free + 'inactive',] # free for reuse + frames: List[Frame] # stack trace from where the allocation occurred + + class Frame(TypedDict): + filename: str + line: int + name: str + + class TraceEntry(TypedDict): + # When `torch.cuda.memory._record_memory_history()` is enabled, + # the snapshot will contain TraceEntry objects that record each + # action the allocator took. + action: Literal[ + 'alloc' # memory allocated + 'free_requested', # the allocated received a call to free memory + 'free_completed', # the memory that was requested to be freed is now + # able to be used in future allocation calls + 'segment_alloc', # the caching allocator ask cudaMalloc for more memory + # and added it as a segment in its cache + 'segment_free', # the caching allocator called cudaFree to return memory + # to cuda possibly trying free up memory to + # allocate more segments or because empty_caches was called + 'oom', # the allocator threw an OOM exception. 'size' is + # the requested number of bytes that did not succeed + 'snapshot' # the allocator generated a memory snapshot + # useful to coorelate a previously taken + # snapshot with this trace + ] + addr: int # not present for OOM + frames: List[Frame] + size: int + stream: int + device_free: int # only present for OOM, the amount of + # memory cuda still reports to be free + + Returns: + The Snapshot dictionary object + """ + return _C._cuda_memorySnapshot() + + +def _dump_snapshot(filename="dump_snapshot.pickle"): + """ + Save a pickled version of the `torch.memory._snapshot()` dictionary to a file. + + This file can be opened by the interactive snapshot viewer at pytorch.org/memory_viz + + Args: + filename (str, optional): Name of the file to create. Defaults to "dump_snapshot.pickle". + """ + s = _snapshot() + with open(filename, "wb") as f: + pickle.dump(s, f) + + +def _save_segment_usage(filename="output.svg", snapshot=None): + if snapshot is None: + snapshot = _snapshot() + with open(filename, "w") as f: + f.write(_segments(snapshot)) + + +def _save_memory_usage(filename="output.svg", snapshot=None): + if snapshot is None: + snapshot = _snapshot() + with open(filename, "w") as f: + f.write(_memory(snapshot)) + + +def _set_allocator_settings(env: str): + return torch._C._cuda_cudaCachingAllocator_set_allocator_settings(env) + + +def get_allocator_backend() -> str: + r"""Return a string describing the active allocator backend as set by + ``PYTORCH_CUDA_ALLOC_CONF``. Currently available backends are + ``native`` (PyTorch's native caching allocator) and `cudaMallocAsync`` + (CUDA's built-in asynchronous allocator). + + .. note:: + See :ref:`cuda-memory-management` for details on choosing the allocator backend. + """ + return torch._C._cuda_getAllocatorBackend() + + +class _CUDAAllocator: + r"""Wrapper over internal CUDA memory allocators.""" + + def __init__(self, allocator: torch._C._cuda_CUDAAllocator): + self._allocator = allocator + + def allocator(self): + return self._allocator + + +class CUDAPluggableAllocator(_CUDAAllocator): + r"""CUDA memory allocator loaded from a so file.""" + + def __init__(self, path_to_so_file: str, alloc_fn_name: str, free_fn_name: str): + r"""Memory allocators are compiled in .so files and loaded dynamically using ctypes. + + To change the active allocator use the :func:`torch.memory.cuda.change_current_allocator` function. + + Args: + path_to_so_file(str): Path in the filesystem to the `.so` file containing + the allocator functions + alloc_fn_name(str): Name of the function to perform the memory allocation + in the so file. The signature must be: + void* alloc_fn_name(ssize_t size, int device, cudaStream_t stream); + free_fn_name(str): Name of the function to perform the memory release + in the so file. The signature must be: + void free_fn_name(void* ptr, size_t size, cudaStream_t stream); + + .. warning:: + This is currently supported only in unix OSs + + .. note:: + See :ref:`cuda-memory-management` for details on creating and using a custom allocator + """ + allocator = ctypes.CDLL(path_to_so_file) + alloc_fn = ctypes.cast(getattr(allocator, alloc_fn_name), ctypes.c_void_p).value + free_fn = ctypes.cast(getattr(allocator, free_fn_name), ctypes.c_void_p).value + assert alloc_fn is not None + assert free_fn is not None + self._allocator = torch._C._cuda_customAllocator(alloc_fn, free_fn) + + +def change_current_allocator(allocator: _CUDAAllocator) -> None: + r"""Change the currently used memory allocator to be the one provided. + + If the current allocator has already been used/initialized, this function will error. + + + Args: + allocator (torch.cuda.memory._CUDAAllocator): allocator to be set as the active one. + .. note:: + See :ref:`cuda-memory-management` for details on creating and using a custom allocator + """ + torch._C._cuda_changeCurrentAllocator(allocator.allocator()) + + +def _get_current_allocator() -> _CUDAAllocator: + r"""Return the allocator being currently used. + + .. note:: + See :ref:`cuda-memory-management` for details on creating and using a custom allocator + """ + return _CUDAAllocator(torch._C._cuda_getAllocator()) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/nccl.py b/env-llmeval/lib/python3.10/site-packages/torch/cuda/nccl.py new file mode 100644 index 0000000000000000000000000000000000000000..05751ab5f87b7042426454e83541d3bebe1861fc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/cuda/nccl.py @@ -0,0 +1,137 @@ +import collections +import warnings +from typing import Optional, Sequence, Union + +import torch.cuda + + +__all__ = ["all_reduce", "reduce", "broadcast", "all_gather", "reduce_scatter"] + +SUM = 0 # ncclRedOp_t + + +def is_available(tensors): + if not hasattr(torch._C, "_nccl_all_reduce"): + warnings.warn("PyTorch is not compiled with NCCL support") + return False + + devices = set() + for tensor in tensors: + if tensor.is_sparse: + return False + if not tensor.is_contiguous(): + return False + if not tensor.is_cuda: + return False + device = tensor.get_device() + if device in devices: + return False + devices.add(device) + + return True + + +def version(): + ver = torch._C._nccl_version() + major = ver >> 32 + minor = (ver >> 16) & 65535 + patch = ver & 65535 + suffix = torch._C._nccl_version_suffix().decode("utf-8") + if suffix == "": + return (major, minor, patch) + else: + return (major, minor, patch, suffix) + + +def unique_id(): + return torch._C._nccl_unique_id() + + +def init_rank(num_ranks, uid, rank): + return torch._C._nccl_init_rank(num_ranks, uid, rank) + + +def _check_sequence_type(inputs: Union[torch.Tensor, Sequence[torch.Tensor]]) -> None: + if not isinstance(inputs, collections.abc.Container) or isinstance( + inputs, torch.Tensor + ): + raise TypeError("Inputs should be a collection of tensors") + + +def all_reduce(inputs, outputs=None, op=SUM, streams=None, comms=None): + _check_sequence_type(inputs) + if outputs is None: + outputs = inputs + _check_sequence_type(outputs) + torch._C._nccl_all_reduce(inputs, outputs, op, streams, comms) + + +# `output` used to be `outputs`, taking in a list of tensors. So we have two +# arguments for BC reasons. +def reduce( + inputs: Sequence[torch.Tensor], + output: Optional[Union[torch.Tensor, Sequence[torch.Tensor]]] = None, + root: int = 0, + op: int = SUM, + streams: Optional[Sequence[torch.cuda.Stream]] = None, + comms=None, + *, + outputs: Optional[Sequence[torch.Tensor]] = None, +) -> None: + _check_sequence_type(inputs) + _output: torch.Tensor + if outputs is not None: + if output is not None: + raise ValueError( + "'output' and 'outputs' can not be both specified. 'outputs' is deprecated in " + "favor of 'output', taking in a single output tensor. The signature of reduce is: " + "reduce(inputs, output=None, root=0, op=SUM, streams=None, comms=None)." + ) + else: + warnings.warn( + "nccl.reduce with an output tensor list is deprecated. " + "Please specify a single output tensor with argument 'output' instead instead." + ) + _output = outputs[root] + elif not isinstance(output, torch.Tensor) and isinstance( + output, collections.abc.Sequence + ): + # User called old API with positional arguments of list of output tensors. + warnings.warn( + "nccl.reduce with an output tensor list is deprecated. " + "Please specify a single output tensor." + ) + _output = output[root] + else: + _output = inputs[root] if output is None else output + torch._C._nccl_reduce(inputs, _output, root, op, streams, comms) + + +def broadcast( + inputs: Sequence[torch.Tensor], root: int = 0, streams=None, comms=None +) -> None: + _check_sequence_type(inputs) + torch._C._nccl_broadcast(inputs, root, streams, comms) + + +def all_gather( + inputs: Sequence[torch.Tensor], + outputs: Sequence[torch.Tensor], + streams=None, + comms=None, +) -> None: + _check_sequence_type(inputs) + _check_sequence_type(outputs) + torch._C._nccl_all_gather(inputs, outputs, streams, comms) + + +def reduce_scatter( + inputs: Sequence[torch.Tensor], + outputs: Sequence[torch.Tensor], + op: int = SUM, + streams=None, + comms=None, +) -> None: + _check_sequence_type(inputs) + _check_sequence_type(outputs) + torch._C._nccl_reduce_scatter(inputs, outputs, op, streams, comms) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/nvtx.py b/env-llmeval/lib/python3.10/site-packages/torch/cuda/nvtx.py new file mode 100644 index 0000000000000000000000000000000000000000..4b902c0c6d4d76c6d584ed4d0ad1cc71a3f9cc6d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/cuda/nvtx.py @@ -0,0 +1,91 @@ +r"""This package adds support for NVIDIA Tools Extension (NVTX) used in profiling.""" + +from contextlib import contextmanager + +try: + from torch._C import _nvtx +except ImportError: + + class _NVTXStub: + @staticmethod + def _fail(*args, **kwargs): + raise RuntimeError( + "NVTX functions not installed. Are you sure you have a CUDA build?" + ) + + rangePushA = _fail + rangePop = _fail + markA = _fail + + _nvtx = _NVTXStub() # type: ignore[assignment] + +__all__ = ["range_push", "range_pop", "range_start", "range_end", "mark", "range"] + + +def range_push(msg): + """ + Push a range onto a stack of nested range span. Returns zero-based depth of the range that is started. + + Args: + msg (str): ASCII message to associate with range + """ + return _nvtx.rangePushA(msg) + + +def range_pop(): + """Pop a range off of a stack of nested range spans. Returns the zero-based depth of the range that is ended.""" + return _nvtx.rangePop() + + +def range_start(msg) -> int: + """ + Mark the start of a range with string message. It returns an unique handle + for this range to pass to the corresponding call to rangeEnd(). + + A key difference between this and range_push/range_pop is that the + range_start/range_end version supports range across threads (start on one + thread and end on another thread). + + Returns: A range handle (uint64_t) that can be passed to range_end(). + + Args: + msg (str): ASCII message to associate with the range. + """ + return _nvtx.rangeStartA(msg) + + +def range_end(range_id) -> None: + """ + Mark the end of a range for a given range_id. + + Args: + range_id (int): an unique handle for the start range. + """ + _nvtx.rangeEnd(range_id) + + +def mark(msg): + """ + Describe an instantaneous event that occurred at some point. + + Args: + msg (str): ASCII message to associate with the event. + """ + return _nvtx.markA(msg) + + +@contextmanager +def range(msg, *args, **kwargs): + """ + Context manager / decorator that pushes an NVTX range at the beginning + of its scope, and pops it at the end. If extra arguments are given, + they are passed as arguments to msg.format(). + + Args: + msg (str): message to associate with the range + """ + range_push(msg.format(*args, **kwargs)) + try: + yield + finally: + range_pop() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/profiler.py b/env-llmeval/lib/python3.10/site-packages/torch/cuda/profiler.py new file mode 100644 index 0000000000000000000000000000000000000000..51c8aa46f714b6a9fd30857c9edb575614d52420 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/cuda/profiler.py @@ -0,0 +1,61 @@ +import contextlib +import tempfile + +import torch +from . import check_error, cudart + +__all__ = ["init", "start", "stop", "profile"] + +DEFAULT_FLAGS = [ + "gpustarttimestamp", + "gpuendtimestamp", + "gridsize3d", + "threadblocksize", + "streamid", + "enableonstart 0", + "conckerneltrace", +] + + +def init(output_file, flags=None, output_mode="key_value"): + rt = cudart() + if not hasattr(rt, "cudaOutputMode"): + raise AssertionError("HIP does not support profiler initialization!") + if ( + hasattr(torch.version, "cuda") + and torch.version.cuda is not None + and int(torch.version.cuda.split(".")[0]) >= 12 + ): + # Check https://github.com/pytorch/pytorch/pull/91118 + # cudaProfilerInitialize is no longer needed after CUDA 12 + raise AssertionError("CUDA12+ does not need profiler initialization!") + flags = DEFAULT_FLAGS if flags is None else flags + if output_mode == "key_value": + output_mode_enum = rt.cudaOutputMode.KeyValuePair + elif output_mode == "csv": + output_mode_enum = rt.cudaOutputMode.CSV + else: + raise RuntimeError( + "supported CUDA profiler output modes are: key_value and csv" + ) + with tempfile.NamedTemporaryFile(delete=True) as f: + f.write(b"\n".join(f.encode("ascii") for f in flags)) + f.flush() + check_error(rt.cudaProfilerInitialize(f.name, output_file, output_mode_enum)) + + +def start(): + check_error(cudart().cudaProfilerStart()) + + +def stop(): + check_error(cudart().cudaProfilerStop()) + + +@contextlib.contextmanager +def profile(): + try: + start() + yield + finally: + stop() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/random.py b/env-llmeval/lib/python3.10/site-packages/torch/cuda/random.py new file mode 100644 index 0000000000000000000000000000000000000000..1cf33114d17bd1867dfc5e5bb9179670291878a2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/cuda/random.py @@ -0,0 +1,179 @@ +from typing import Iterable, List, Union + +import torch +from .. import Tensor +from . import _lazy_call, _lazy_init, current_device, device_count + +__all__ = [ + "get_rng_state", + "get_rng_state_all", + "set_rng_state", + "set_rng_state_all", + "manual_seed", + "manual_seed_all", + "seed", + "seed_all", + "initial_seed", +] + + +def get_rng_state(device: Union[int, str, torch.device] = "cuda") -> Tensor: + r"""Return the random number generator state of the specified GPU as a ByteTensor. + + Args: + device (torch.device or int, optional): The device to return the RNG state of. + Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device). + + .. warning:: + This function eagerly initializes CUDA. + """ + _lazy_init() + if isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device("cuda", device) + idx = device.index + if idx is None: + idx = current_device() + default_generator = torch.cuda.default_generators[idx] + return default_generator.get_state() + + +def get_rng_state_all() -> List[Tensor]: + r"""Return a list of ByteTensor representing the random number states of all devices.""" + results = [] + for i in range(device_count()): + results.append(get_rng_state(i)) + return results + + +def set_rng_state( + new_state: Tensor, device: Union[int, str, torch.device] = "cuda" +) -> None: + r"""Set the random number generator state of the specified GPU. + + Args: + new_state (torch.ByteTensor): The desired state + device (torch.device or int, optional): The device to set the RNG state. + Default: ``'cuda'`` (i.e., ``torch.device('cuda')``, the current CUDA device). + """ + with torch._C._DisableFuncTorch(): + new_state_copy = new_state.clone(memory_format=torch.contiguous_format) + if isinstance(device, str): + device = torch.device(device) + elif isinstance(device, int): + device = torch.device("cuda", device) + + def cb(): + idx = device.index + if idx is None: + idx = current_device() + default_generator = torch.cuda.default_generators[idx] + default_generator.set_state(new_state_copy) + + _lazy_call(cb) + + +def set_rng_state_all(new_states: Iterable[Tensor]) -> None: + r"""Set the random number generator state of all devices. + + Args: + new_states (Iterable of torch.ByteTensor): The desired state for each device. + """ + for i, state in enumerate(new_states): + set_rng_state(state, i) + + +def manual_seed(seed: int) -> None: + r"""Set the seed for generating random numbers for the current GPU. + + It's safe to call this function if CUDA is not available; in that + case, it is silently ignored. + + Args: + seed (int): The desired seed. + + .. warning:: + If you are working with a multi-GPU model, this function is insufficient + to get determinism. To seed all GPUs, use :func:`manual_seed_all`. + """ + seed = int(seed) + + def cb(): + idx = current_device() + default_generator = torch.cuda.default_generators[idx] + default_generator.manual_seed(seed) + + _lazy_call(cb, seed=True) + + +def manual_seed_all(seed: int) -> None: + r"""Set the seed for generating random numbers on all GPUs. + + It's safe to call this function if CUDA is not available; in that + case, it is silently ignored. + + Args: + seed (int): The desired seed. + """ + seed = int(seed) + + def cb(): + for i in range(device_count()): + default_generator = torch.cuda.default_generators[i] + default_generator.manual_seed(seed) + + _lazy_call(cb, seed_all=True) + + +def seed() -> None: + r"""Set the seed for generating random numbers to a random number for the current GPU. + + It's safe to call this function if CUDA is not available; in that + case, it is silently ignored. + + .. warning:: + If you are working with a multi-GPU model, this function will only initialize + the seed on one GPU. To initialize all GPUs, use :func:`seed_all`. + """ + + def cb(): + idx = current_device() + default_generator = torch.cuda.default_generators[idx] + default_generator.seed() + + _lazy_call(cb) + + +def seed_all() -> None: + r"""Set the seed for generating random numbers to a random number on all GPUs. + + It's safe to call this function if CUDA is not available; in that + case, it is silently ignored. + """ + + def cb(): + random_seed = 0 + seeded = False + for i in range(device_count()): + default_generator = torch.cuda.default_generators[i] + if not seeded: + default_generator.seed() + random_seed = default_generator.initial_seed() + seeded = True + else: + default_generator.manual_seed(random_seed) + + _lazy_call(cb) + + +def initial_seed() -> int: + r"""Return the current random seed of the current GPU. + + .. warning:: + This function eagerly initializes CUDA. + """ + _lazy_init() + idx = current_device() + default_generator = torch.cuda.default_generators[idx] + return default_generator.initial_seed() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/cuda/streams.py b/env-llmeval/lib/python3.10/site-packages/torch/cuda/streams.py new file mode 100644 index 0000000000000000000000000000000000000000..3d417958373ed21c22155571fc32e3a22616e821 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/cuda/streams.py @@ -0,0 +1,241 @@ +import ctypes + +import torch +from torch._streambase import _EventBase, _StreamBase +from ._utils import _dummy_type + + +if not hasattr(torch._C, "_CudaStreamBase"): + # Define dummy base classes + torch._C.__dict__["_CudaStreamBase"] = _dummy_type("_CudaStreamBase") + torch._C.__dict__["_CudaEventBase"] = _dummy_type("_CudaEventBase") + + +class Stream(torch._C._CudaStreamBase, _StreamBase): + r"""Wrapper around a CUDA stream. + + A CUDA stream is a linear sequence of execution that belongs to a specific + device, independent from other streams. See :ref:`cuda-semantics` for + details. + + Args: + device(torch.device or int, optional): a device on which to allocate + the stream. If :attr:`device` is ``None`` (default) or a negative + integer, this will use the current device. + priority(int, optional): priority of the stream, should be 0 or + negative, where negative numbers indicate higher priority. By default, + streams have priority 0. + + """ + + def __new__(cls, device=None, priority=0, **kwargs): + # setting device manager is expensive, so we avoid it unless necessary + if device is None or ("stream_id" in kwargs and "device_index" in kwargs): + return super().__new__(cls, priority=priority, **kwargs) + else: + with torch.cuda.device(device): + return super().__new__(cls, priority=priority, **kwargs) + + def wait_event(self, event): + r"""Make all future work submitted to the stream wait for an event. + + Args: + event (torch.cuda.Event): an event to wait for. + + .. note:: This is a wrapper around ``cudaStreamWaitEvent()``: see + `CUDA Stream documentation`_ for more info. + + This function returns without waiting for :attr:`event`: only future + operations are affected. + + .. _CUDA Stream documentation: + https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html + """ + event.wait(self) + + def wait_stream(self, stream): + r"""Synchronize with another stream. + + All future work submitted to this stream will wait until all kernels + submitted to a given stream at the time of call complete. + + Args: + stream (Stream): a stream to synchronize. + + .. note:: This function returns without waiting for currently enqueued + kernels in :attr:`stream`: only future operations are affected. + """ + self.wait_event(stream.record_event()) + + def record_event(self, event=None): + r"""Record an event. + + Args: + event (torch.cuda.Event, optional): event to record. If not given, a new one + will be allocated. + + Returns: + Recorded event. + """ + if event is None: + event = Event() + event.record(self) + return event + + def query(self): + r"""Check if all the work submitted has been completed. + + Returns: + A boolean indicating if all kernels in this stream are completed. + """ + return super().query() + + def synchronize(self): + r"""Wait for all the kernels in this stream to complete. + + .. note:: This is a wrapper around ``cudaStreamSynchronize()``: see + `CUDA Stream documentation`_ for more info. + """ + super().synchronize() + + @property + def _as_parameter_(self): + return ctypes.c_void_p(self.cuda_stream) + + def __eq__(self, o): + if isinstance(o, Stream): + return super().__eq__(o) + return False + + def __hash__(self): + return hash((self.cuda_stream, self.device)) + + def __repr__(self): + return f"" + + +class ExternalStream(Stream): + r"""Wrapper around an externally allocated CUDA stream. + + This class is used to wrap streams allocated in other libraries in order + to facilitate data exchange and multi-library interactions. + + .. note:: This class doesn't manage the stream life-cycle, it is the user + responsibility to keep the referenced stream alive while this class is + being used. + + Args: + stream_ptr(int): Integer representation of the `cudaStream_t` value. + allocated externally. + device(torch.device or int, optional): the device where the stream + was originally allocated. if device is specified incorrectly, + subsequent launches using this stream may fail. + """ + + def __new__(cls, stream_ptr, device=None, **kwargs): + with torch.cuda.device(device): + return super().__new__(cls, stream_ptr=stream_ptr, **kwargs) + + +class Event(torch._C._CudaEventBase, _EventBase): + r"""Wrapper around a CUDA event. + + CUDA events are synchronization markers that can be used to monitor the + device's progress, to accurately measure timing, and to synchronize CUDA + streams. + + The underlying CUDA events are lazily initialized when the event is first + recorded or exported to another process. After creation, only streams on the + same device may record the event. However, streams on any device can wait on + the event. + + Args: + enable_timing (bool, optional): indicates if the event should measure time + (default: ``False``) + blocking (bool, optional): if ``True``, :meth:`wait` will be blocking (default: ``False``) + interprocess (bool): if ``True``, the event can be shared between processes + (default: ``False``) + + .. _CUDA Event Documentation: + https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__EVENT.html + """ + + def __new__(cls, enable_timing=False, blocking=False, interprocess=False): + return super().__new__( + cls, + enable_timing=enable_timing, + blocking=blocking, + interprocess=interprocess, + ) + + @classmethod + def from_ipc_handle(cls, device, handle): + r"""Reconstruct an event from an IPC handle on the given device.""" + return super().from_ipc_handle(device, handle) + + def record(self, stream=None): + r"""Record the event in a given stream. + + Uses ``torch.cuda.current_stream()`` if no stream is specified. The + stream's device must match the event's device. + """ + if stream is None: + stream = torch.cuda.current_stream() + super().record(stream) + + def wait(self, stream=None): + r"""Make all future work submitted to the given stream wait for this event. + + Use ``torch.cuda.current_stream()`` if no stream is specified. + + .. note:: This is a wrapper around ``cudaStreamWaitEvent()``: see + `CUDA Event documentation`_ for more info. + """ + if stream is None: + stream = torch.cuda.current_stream() + super().wait(stream) + + def query(self): + r"""Check if all work currently captured by event has completed. + + Returns: + A boolean indicating if all work currently captured by event has + completed. + """ + return super().query() + + def elapsed_time(self, end_event): + r"""Return the time elapsed. + + Time reported in milliseconds after the event was recorded and + before the end_event was recorded. + """ + return super().elapsed_time(end_event) + + def synchronize(self): + r"""Wait for the event to complete. + + Waits until the completion of all work currently captured in this event. + This prevents the CPU thread from proceeding until the event completes. + + .. note:: This is a wrapper around ``cudaEventSynchronize()``: see + `CUDA Event documentation`_ for more info. + """ + super().synchronize() + + def ipc_handle(self): + r"""Return an IPC handle of this event. + + If not recorded yet, the event will use the current device. + """ + return super().ipc_handle() + + @property + def _as_parameter_(self): + return ctypes.c_void_p(self.cuda_event) + + def __repr__(self): + if self.cuda_event: + return f"" + else: + return "" diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/__init__.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..46be6489de3ac009cbcca3d9fa0cf76f45879d2f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/__init__.py @@ -0,0 +1,171 @@ +r""" +The ``distributions`` package contains parameterizable probability distributions +and sampling functions. This allows the construction of stochastic computation +graphs and stochastic gradient estimators for optimization. This package +generally follows the design of the `TensorFlow Distributions`_ package. + +.. _`TensorFlow Distributions`: + https://arxiv.org/abs/1711.10604 + +It is not possible to directly backpropagate through random samples. However, +there are two main methods for creating surrogate functions that can be +backpropagated through. These are the score function estimator/likelihood ratio +estimator/REINFORCE and the pathwise derivative estimator. REINFORCE is commonly +seen as the basis for policy gradient methods in reinforcement learning, and the +pathwise derivative estimator is commonly seen in the reparameterization trick +in variational autoencoders. Whilst the score function only requires the value +of samples :math:`f(x)`, the pathwise derivative requires the derivative +:math:`f'(x)`. The next sections discuss these two in a reinforcement learning +example. For more details see +`Gradient Estimation Using Stochastic Computation Graphs`_ . + +.. _`Gradient Estimation Using Stochastic Computation Graphs`: + https://arxiv.org/abs/1506.05254 + +Score function +^^^^^^^^^^^^^^ + +When the probability density function is differentiable with respect to its +parameters, we only need :meth:`~torch.distributions.Distribution.sample` and +:meth:`~torch.distributions.Distribution.log_prob` to implement REINFORCE: + +.. math:: + + \Delta\theta = \alpha r \frac{\partial\log p(a|\pi^\theta(s))}{\partial\theta} + +where :math:`\theta` are the parameters, :math:`\alpha` is the learning rate, +:math:`r` is the reward and :math:`p(a|\pi^\theta(s))` is the probability of +taking action :math:`a` in state :math:`s` given policy :math:`\pi^\theta`. + +In practice we would sample an action from the output of a network, apply this +action in an environment, and then use ``log_prob`` to construct an equivalent +loss function. Note that we use a negative because optimizers use gradient +descent, whilst the rule above assumes gradient ascent. With a categorical +policy, the code for implementing REINFORCE would be as follows:: + + probs = policy_network(state) + # Note that this is equivalent to what used to be called multinomial + m = Categorical(probs) + action = m.sample() + next_state, reward = env.step(action) + loss = -m.log_prob(action) * reward + loss.backward() + +Pathwise derivative +^^^^^^^^^^^^^^^^^^^ + +The other way to implement these stochastic/policy gradients would be to use the +reparameterization trick from the +:meth:`~torch.distributions.Distribution.rsample` method, where the +parameterized random variable can be constructed via a parameterized +deterministic function of a parameter-free random variable. The reparameterized +sample therefore becomes differentiable. The code for implementing the pathwise +derivative would be as follows:: + + params = policy_network(state) + m = Normal(*params) + # Any distribution with .has_rsample == True could work based on the application + action = m.rsample() + next_state, reward = env.step(action) # Assuming that reward is differentiable + loss = -reward + loss.backward() +""" + +from .bernoulli import Bernoulli +from .beta import Beta +from .binomial import Binomial +from .categorical import Categorical +from .cauchy import Cauchy +from .chi2 import Chi2 +from .constraint_registry import biject_to, transform_to +from .continuous_bernoulli import ContinuousBernoulli +from .dirichlet import Dirichlet +from .distribution import Distribution +from .exp_family import ExponentialFamily +from .exponential import Exponential +from .fishersnedecor import FisherSnedecor +from .gamma import Gamma +from .geometric import Geometric +from .gumbel import Gumbel +from .half_cauchy import HalfCauchy +from .half_normal import HalfNormal +from .independent import Independent +from .inverse_gamma import InverseGamma +from .kl import _add_kl_info, kl_divergence, register_kl +from .kumaraswamy import Kumaraswamy +from .laplace import Laplace +from .lkj_cholesky import LKJCholesky +from .log_normal import LogNormal +from .logistic_normal import LogisticNormal +from .lowrank_multivariate_normal import LowRankMultivariateNormal +from .mixture_same_family import MixtureSameFamily +from .multinomial import Multinomial +from .multivariate_normal import MultivariateNormal +from .negative_binomial import NegativeBinomial +from .normal import Normal +from .one_hot_categorical import OneHotCategorical, OneHotCategoricalStraightThrough +from .pareto import Pareto +from .poisson import Poisson +from .relaxed_bernoulli import RelaxedBernoulli +from .relaxed_categorical import RelaxedOneHotCategorical +from .studentT import StudentT +from .transformed_distribution import TransformedDistribution +from .transforms import * # noqa: F403 +from . import transforms +from .uniform import Uniform +from .von_mises import VonMises +from .weibull import Weibull +from .wishart import Wishart + +_add_kl_info() +del _add_kl_info + +__all__ = [ + "Bernoulli", + "Beta", + "Binomial", + "Categorical", + "Cauchy", + "Chi2", + "ContinuousBernoulli", + "Dirichlet", + "Distribution", + "Exponential", + "ExponentialFamily", + "FisherSnedecor", + "Gamma", + "Geometric", + "Gumbel", + "HalfCauchy", + "HalfNormal", + "Independent", + "InverseGamma", + "Kumaraswamy", + "LKJCholesky", + "Laplace", + "LogNormal", + "LogisticNormal", + "LowRankMultivariateNormal", + "MixtureSameFamily", + "Multinomial", + "MultivariateNormal", + "NegativeBinomial", + "Normal", + "OneHotCategorical", + "OneHotCategoricalStraightThrough", + "Pareto", + "RelaxedBernoulli", + "RelaxedOneHotCategorical", + "StudentT", + "Poisson", + "Uniform", + "VonMises", + "Weibull", + "Wishart", + "TransformedDistribution", + "biject_to", + "kl_divergence", + "register_kl", + "transform_to", +] +__all__.extend(transforms.__all__) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/bernoulli.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/bernoulli.py new file mode 100644 index 0000000000000000000000000000000000000000..75c2882dbc15c017630dacde183a96c4d0f51225 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/bernoulli.py @@ -0,0 +1,130 @@ +from numbers import Number + +import torch +from torch import nan +from torch.distributions import constraints +from torch.distributions.exp_family import ExponentialFamily +from torch.distributions.utils import ( + broadcast_all, + lazy_property, + logits_to_probs, + probs_to_logits, +) +from torch.nn.functional import binary_cross_entropy_with_logits + +__all__ = ["Bernoulli"] + + +class Bernoulli(ExponentialFamily): + r""" + Creates a Bernoulli distribution parameterized by :attr:`probs` + or :attr:`logits` (but not both). + + Samples are binary (0 or 1). They take the value `1` with probability `p` + and `0` with probability `1 - p`. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Bernoulli(torch.tensor([0.3])) + >>> m.sample() # 30% chance 1; 70% chance 0 + tensor([ 0.]) + + Args: + probs (Number, Tensor): the probability of sampling `1` + logits (Number, Tensor): the log-odds of sampling `1` + """ + arg_constraints = {"probs": constraints.unit_interval, "logits": constraints.real} + support = constraints.boolean + has_enumerate_support = True + _mean_carrier_measure = 0 + + def __init__(self, probs=None, logits=None, validate_args=None): + if (probs is None) == (logits is None): + raise ValueError( + "Either `probs` or `logits` must be specified, but not both." + ) + if probs is not None: + is_scalar = isinstance(probs, Number) + (self.probs,) = broadcast_all(probs) + else: + is_scalar = isinstance(logits, Number) + (self.logits,) = broadcast_all(logits) + self._param = self.probs if probs is not None else self.logits + if is_scalar: + batch_shape = torch.Size() + else: + batch_shape = self._param.size() + super().__init__(batch_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Bernoulli, _instance) + batch_shape = torch.Size(batch_shape) + if "probs" in self.__dict__: + new.probs = self.probs.expand(batch_shape) + new._param = new.probs + if "logits" in self.__dict__: + new.logits = self.logits.expand(batch_shape) + new._param = new.logits + super(Bernoulli, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + def _new(self, *args, **kwargs): + return self._param.new(*args, **kwargs) + + @property + def mean(self): + return self.probs + + @property + def mode(self): + mode = (self.probs >= 0.5).to(self.probs) + mode[self.probs == 0.5] = nan + return mode + + @property + def variance(self): + return self.probs * (1 - self.probs) + + @lazy_property + def logits(self): + return probs_to_logits(self.probs, is_binary=True) + + @lazy_property + def probs(self): + return logits_to_probs(self.logits, is_binary=True) + + @property + def param_shape(self): + return self._param.size() + + def sample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + with torch.no_grad(): + return torch.bernoulli(self.probs.expand(shape)) + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + logits, value = broadcast_all(self.logits, value) + return -binary_cross_entropy_with_logits(logits, value, reduction="none") + + def entropy(self): + return binary_cross_entropy_with_logits( + self.logits, self.probs, reduction="none" + ) + + def enumerate_support(self, expand=True): + values = torch.arange(2, dtype=self._param.dtype, device=self._param.device) + values = values.view((-1,) + (1,) * len(self._batch_shape)) + if expand: + values = values.expand((-1,) + self._batch_shape) + return values + + @property + def _natural_params(self): + return (torch.logit(self.probs),) + + def _log_normalizer(self, x): + return torch.log1p(torch.exp(x)) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/binomial.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/binomial.py new file mode 100644 index 0000000000000000000000000000000000000000..9243da7b6bf4ccb503626ef02c1644c84961a716 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/binomial.py @@ -0,0 +1,165 @@ +import torch +from torch.distributions import constraints +from torch.distributions.distribution import Distribution +from torch.distributions.utils import ( + broadcast_all, + lazy_property, + logits_to_probs, + probs_to_logits, +) + +__all__ = ["Binomial"] + + +def _clamp_by_zero(x): + # works like clamp(x, min=0) but has grad at 0 is 0.5 + return (x.clamp(min=0) + x - x.clamp(max=0)) / 2 + + +class Binomial(Distribution): + r""" + Creates a Binomial distribution parameterized by :attr:`total_count` and + either :attr:`probs` or :attr:`logits` (but not both). :attr:`total_count` must be + broadcastable with :attr:`probs`/:attr:`logits`. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Binomial(100, torch.tensor([0 , .2, .8, 1])) + >>> x = m.sample() + tensor([ 0., 22., 71., 100.]) + + >>> m = Binomial(torch.tensor([[5.], [10.]]), torch.tensor([0.5, 0.8])) + >>> x = m.sample() + tensor([[ 4., 5.], + [ 7., 6.]]) + + Args: + total_count (int or Tensor): number of Bernoulli trials + probs (Tensor): Event probabilities + logits (Tensor): Event log-odds + """ + arg_constraints = { + "total_count": constraints.nonnegative_integer, + "probs": constraints.unit_interval, + "logits": constraints.real, + } + has_enumerate_support = True + + def __init__(self, total_count=1, probs=None, logits=None, validate_args=None): + if (probs is None) == (logits is None): + raise ValueError( + "Either `probs` or `logits` must be specified, but not both." + ) + if probs is not None: + ( + self.total_count, + self.probs, + ) = broadcast_all(total_count, probs) + self.total_count = self.total_count.type_as(self.probs) + else: + ( + self.total_count, + self.logits, + ) = broadcast_all(total_count, logits) + self.total_count = self.total_count.type_as(self.logits) + + self._param = self.probs if probs is not None else self.logits + batch_shape = self._param.size() + super().__init__(batch_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Binomial, _instance) + batch_shape = torch.Size(batch_shape) + new.total_count = self.total_count.expand(batch_shape) + if "probs" in self.__dict__: + new.probs = self.probs.expand(batch_shape) + new._param = new.probs + if "logits" in self.__dict__: + new.logits = self.logits.expand(batch_shape) + new._param = new.logits + super(Binomial, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + def _new(self, *args, **kwargs): + return self._param.new(*args, **kwargs) + + @constraints.dependent_property(is_discrete=True, event_dim=0) + def support(self): + return constraints.integer_interval(0, self.total_count) + + @property + def mean(self): + return self.total_count * self.probs + + @property + def mode(self): + return ((self.total_count + 1) * self.probs).floor().clamp(max=self.total_count) + + @property + def variance(self): + return self.total_count * self.probs * (1 - self.probs) + + @lazy_property + def logits(self): + return probs_to_logits(self.probs, is_binary=True) + + @lazy_property + def probs(self): + return logits_to_probs(self.logits, is_binary=True) + + @property + def param_shape(self): + return self._param.size() + + def sample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + with torch.no_grad(): + return torch.binomial( + self.total_count.expand(shape), self.probs.expand(shape) + ) + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + log_factorial_n = torch.lgamma(self.total_count + 1) + log_factorial_k = torch.lgamma(value + 1) + log_factorial_nmk = torch.lgamma(self.total_count - value + 1) + # k * log(p) + (n - k) * log(1 - p) = k * (log(p) - log(1 - p)) + n * log(1 - p) + # (case logit < 0) = k * logit - n * log1p(e^logit) + # (case logit > 0) = k * logit - n * (log(p) - log(1 - p)) + n * log(p) + # = k * logit - n * logit - n * log1p(e^-logit) + # (merge two cases) = k * logit - n * max(logit, 0) - n * log1p(e^-|logit|) + normalize_term = ( + self.total_count * _clamp_by_zero(self.logits) + + self.total_count * torch.log1p(torch.exp(-torch.abs(self.logits))) + - log_factorial_n + ) + return ( + value * self.logits - log_factorial_k - log_factorial_nmk - normalize_term + ) + + def entropy(self): + total_count = int(self.total_count.max()) + if not self.total_count.min() == total_count: + raise NotImplementedError( + "Inhomogeneous total count not supported by `entropy`." + ) + + log_prob = self.log_prob(self.enumerate_support(False)) + return -(torch.exp(log_prob) * log_prob).sum(0) + + def enumerate_support(self, expand=True): + total_count = int(self.total_count.max()) + if not self.total_count.min() == total_count: + raise NotImplementedError( + "Inhomogeneous total count not supported by `enumerate_support`." + ) + values = torch.arange( + 1 + total_count, dtype=self._param.dtype, device=self._param.device + ) + values = values.view((-1,) + (1,) * len(self._batch_shape)) + if expand: + values = values.expand((-1,) + self._batch_shape) + return values diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/cauchy.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/cauchy.py new file mode 100644 index 0000000000000000000000000000000000000000..1a95dfe0d762d8cec467aca366f7ba13f1cb82a3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/cauchy.py @@ -0,0 +1,90 @@ +import math +from numbers import Number + +import torch +from torch import inf, nan +from torch.distributions import constraints +from torch.distributions.distribution import Distribution +from torch.distributions.utils import broadcast_all + +__all__ = ["Cauchy"] + + +class Cauchy(Distribution): + r""" + Samples from a Cauchy (Lorentz) distribution. The distribution of the ratio of + independent normally distributed random variables with means `0` follows a + Cauchy distribution. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Cauchy(torch.tensor([0.0]), torch.tensor([1.0])) + >>> m.sample() # sample from a Cauchy distribution with loc=0 and scale=1 + tensor([ 2.3214]) + + Args: + loc (float or Tensor): mode or median of the distribution. + scale (float or Tensor): half width at half maximum. + """ + arg_constraints = {"loc": constraints.real, "scale": constraints.positive} + support = constraints.real + has_rsample = True + + def __init__(self, loc, scale, validate_args=None): + self.loc, self.scale = broadcast_all(loc, scale) + if isinstance(loc, Number) and isinstance(scale, Number): + batch_shape = torch.Size() + else: + batch_shape = self.loc.size() + super().__init__(batch_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Cauchy, _instance) + batch_shape = torch.Size(batch_shape) + new.loc = self.loc.expand(batch_shape) + new.scale = self.scale.expand(batch_shape) + super(Cauchy, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + @property + def mean(self): + return torch.full( + self._extended_shape(), nan, dtype=self.loc.dtype, device=self.loc.device + ) + + @property + def mode(self): + return self.loc + + @property + def variance(self): + return torch.full( + self._extended_shape(), inf, dtype=self.loc.dtype, device=self.loc.device + ) + + def rsample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + eps = self.loc.new(shape).cauchy_() + return self.loc + eps * self.scale + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + return ( + -math.log(math.pi) + - self.scale.log() + - (((value - self.loc) / self.scale) ** 2).log1p() + ) + + def cdf(self, value): + if self._validate_args: + self._validate_sample(value) + return torch.atan((value - self.loc) / self.scale) / math.pi + 0.5 + + def icdf(self, value): + return torch.tan(math.pi * (value - 0.5)) * self.scale + self.loc + + def entropy(self): + return math.log(4 * math.pi) + self.scale.log() diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/chi2.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/chi2.py new file mode 100644 index 0000000000000000000000000000000000000000..16d0d6d60fbeb93544d21127c57f4bebcfb2bd74 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/chi2.py @@ -0,0 +1,33 @@ +from torch.distributions import constraints +from torch.distributions.gamma import Gamma + +__all__ = ["Chi2"] + + +class Chi2(Gamma): + r""" + Creates a Chi-squared distribution parameterized by shape parameter :attr:`df`. + This is exactly equivalent to ``Gamma(alpha=0.5*df, beta=0.5)`` + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Chi2(torch.tensor([1.0])) + >>> m.sample() # Chi2 distributed with shape df=1 + tensor([ 0.1046]) + + Args: + df (float or Tensor): shape parameter of the distribution + """ + arg_constraints = {"df": constraints.positive} + + def __init__(self, df, validate_args=None): + super().__init__(0.5 * df, 0.5, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Chi2, _instance) + return super().expand(batch_shape, new) + + @property + def df(self): + return self.concentration * 2 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/continuous_bernoulli.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/continuous_bernoulli.py new file mode 100644 index 0000000000000000000000000000000000000000..3e7f1a53a47fb4159bde1f51fb970a64ef49c911 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/continuous_bernoulli.py @@ -0,0 +1,235 @@ +import math +from numbers import Number + +import torch +from torch.distributions import constraints +from torch.distributions.exp_family import ExponentialFamily +from torch.distributions.utils import ( + broadcast_all, + clamp_probs, + lazy_property, + logits_to_probs, + probs_to_logits, +) +from torch.nn.functional import binary_cross_entropy_with_logits + +__all__ = ["ContinuousBernoulli"] + + +class ContinuousBernoulli(ExponentialFamily): + r""" + Creates a continuous Bernoulli distribution parameterized by :attr:`probs` + or :attr:`logits` (but not both). + + The distribution is supported in [0, 1] and parameterized by 'probs' (in + (0,1)) or 'logits' (real-valued). Note that, unlike the Bernoulli, 'probs' + does not correspond to a probability and 'logits' does not correspond to + log-odds, but the same names are used due to the similarity with the + Bernoulli. See [1] for more details. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = ContinuousBernoulli(torch.tensor([0.3])) + >>> m.sample() + tensor([ 0.2538]) + + Args: + probs (Number, Tensor): (0,1) valued parameters + logits (Number, Tensor): real valued parameters whose sigmoid matches 'probs' + + [1] The continuous Bernoulli: fixing a pervasive error in variational + autoencoders, Loaiza-Ganem G and Cunningham JP, NeurIPS 2019. + https://arxiv.org/abs/1907.06845 + """ + arg_constraints = {"probs": constraints.unit_interval, "logits": constraints.real} + support = constraints.unit_interval + _mean_carrier_measure = 0 + has_rsample = True + + def __init__( + self, probs=None, logits=None, lims=(0.499, 0.501), validate_args=None + ): + if (probs is None) == (logits is None): + raise ValueError( + "Either `probs` or `logits` must be specified, but not both." + ) + if probs is not None: + is_scalar = isinstance(probs, Number) + (self.probs,) = broadcast_all(probs) + # validate 'probs' here if necessary as it is later clamped for numerical stability + # close to 0 and 1, later on; otherwise the clamped 'probs' would always pass + if validate_args is not None: + if not self.arg_constraints["probs"].check(self.probs).all(): + raise ValueError("The parameter probs has invalid values") + self.probs = clamp_probs(self.probs) + else: + is_scalar = isinstance(logits, Number) + (self.logits,) = broadcast_all(logits) + self._param = self.probs if probs is not None else self.logits + if is_scalar: + batch_shape = torch.Size() + else: + batch_shape = self._param.size() + self._lims = lims + super().__init__(batch_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(ContinuousBernoulli, _instance) + new._lims = self._lims + batch_shape = torch.Size(batch_shape) + if "probs" in self.__dict__: + new.probs = self.probs.expand(batch_shape) + new._param = new.probs + if "logits" in self.__dict__: + new.logits = self.logits.expand(batch_shape) + new._param = new.logits + super(ContinuousBernoulli, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + def _new(self, *args, **kwargs): + return self._param.new(*args, **kwargs) + + def _outside_unstable_region(self): + return torch.max( + torch.le(self.probs, self._lims[0]), torch.gt(self.probs, self._lims[1]) + ) + + def _cut_probs(self): + return torch.where( + self._outside_unstable_region(), + self.probs, + self._lims[0] * torch.ones_like(self.probs), + ) + + def _cont_bern_log_norm(self): + """computes the log normalizing constant as a function of the 'probs' parameter""" + cut_probs = self._cut_probs() + cut_probs_below_half = torch.where( + torch.le(cut_probs, 0.5), cut_probs, torch.zeros_like(cut_probs) + ) + cut_probs_above_half = torch.where( + torch.ge(cut_probs, 0.5), cut_probs, torch.ones_like(cut_probs) + ) + log_norm = torch.log( + torch.abs(torch.log1p(-cut_probs) - torch.log(cut_probs)) + ) - torch.where( + torch.le(cut_probs, 0.5), + torch.log1p(-2.0 * cut_probs_below_half), + torch.log(2.0 * cut_probs_above_half - 1.0), + ) + x = torch.pow(self.probs - 0.5, 2) + taylor = math.log(2.0) + (4.0 / 3.0 + 104.0 / 45.0 * x) * x + return torch.where(self._outside_unstable_region(), log_norm, taylor) + + @property + def mean(self): + cut_probs = self._cut_probs() + mus = cut_probs / (2.0 * cut_probs - 1.0) + 1.0 / ( + torch.log1p(-cut_probs) - torch.log(cut_probs) + ) + x = self.probs - 0.5 + taylor = 0.5 + (1.0 / 3.0 + 16.0 / 45.0 * torch.pow(x, 2)) * x + return torch.where(self._outside_unstable_region(), mus, taylor) + + @property + def stddev(self): + return torch.sqrt(self.variance) + + @property + def variance(self): + cut_probs = self._cut_probs() + vars = cut_probs * (cut_probs - 1.0) / torch.pow( + 1.0 - 2.0 * cut_probs, 2 + ) + 1.0 / torch.pow(torch.log1p(-cut_probs) - torch.log(cut_probs), 2) + x = torch.pow(self.probs - 0.5, 2) + taylor = 1.0 / 12.0 - (1.0 / 15.0 - 128.0 / 945.0 * x) * x + return torch.where(self._outside_unstable_region(), vars, taylor) + + @lazy_property + def logits(self): + return probs_to_logits(self.probs, is_binary=True) + + @lazy_property + def probs(self): + return clamp_probs(logits_to_probs(self.logits, is_binary=True)) + + @property + def param_shape(self): + return self._param.size() + + def sample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + u = torch.rand(shape, dtype=self.probs.dtype, device=self.probs.device) + with torch.no_grad(): + return self.icdf(u) + + def rsample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + u = torch.rand(shape, dtype=self.probs.dtype, device=self.probs.device) + return self.icdf(u) + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + logits, value = broadcast_all(self.logits, value) + return ( + -binary_cross_entropy_with_logits(logits, value, reduction="none") + + self._cont_bern_log_norm() + ) + + def cdf(self, value): + if self._validate_args: + self._validate_sample(value) + cut_probs = self._cut_probs() + cdfs = ( + torch.pow(cut_probs, value) * torch.pow(1.0 - cut_probs, 1.0 - value) + + cut_probs + - 1.0 + ) / (2.0 * cut_probs - 1.0) + unbounded_cdfs = torch.where(self._outside_unstable_region(), cdfs, value) + return torch.where( + torch.le(value, 0.0), + torch.zeros_like(value), + torch.where(torch.ge(value, 1.0), torch.ones_like(value), unbounded_cdfs), + ) + + def icdf(self, value): + cut_probs = self._cut_probs() + return torch.where( + self._outside_unstable_region(), + ( + torch.log1p(-cut_probs + value * (2.0 * cut_probs - 1.0)) + - torch.log1p(-cut_probs) + ) + / (torch.log(cut_probs) - torch.log1p(-cut_probs)), + value, + ) + + def entropy(self): + log_probs0 = torch.log1p(-self.probs) + log_probs1 = torch.log(self.probs) + return ( + self.mean * (log_probs0 - log_probs1) + - self._cont_bern_log_norm() + - log_probs0 + ) + + @property + def _natural_params(self): + return (self.logits,) + + def _log_normalizer(self, x): + """computes the log normalizing constant as a function of the natural parameter""" + out_unst_reg = torch.max( + torch.le(x, self._lims[0] - 0.5), torch.gt(x, self._lims[1] - 0.5) + ) + cut_nat_params = torch.where( + out_unst_reg, x, (self._lims[0] - 0.5) * torch.ones_like(x) + ) + log_norm = torch.log(torch.abs(torch.exp(cut_nat_params) - 1.0)) - torch.log( + torch.abs(cut_nat_params) + ) + taylor = 0.5 * x + torch.pow(x, 2) / 24.0 - torch.pow(x, 4) / 2880.0 + return torch.where(out_unst_reg, log_norm, taylor) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/dirichlet.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/dirichlet.py new file mode 100644 index 0000000000000000000000000000000000000000..b7175aa616282525c1e7107b3f5336ad0ad652c2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/dirichlet.py @@ -0,0 +1,123 @@ +import torch +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.distributions import constraints +from torch.distributions.exp_family import ExponentialFamily + +__all__ = ["Dirichlet"] + + +# This helper is exposed for testing. +def _Dirichlet_backward(x, concentration, grad_output): + total = concentration.sum(-1, True).expand_as(concentration) + grad = torch._dirichlet_grad(x, concentration, total) + return grad * (grad_output - (x * grad_output).sum(-1, True)) + + +class _Dirichlet(Function): + @staticmethod + def forward(ctx, concentration): + x = torch._sample_dirichlet(concentration) + ctx.save_for_backward(x, concentration) + return x + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + x, concentration = ctx.saved_tensors + return _Dirichlet_backward(x, concentration, grad_output) + + +class Dirichlet(ExponentialFamily): + r""" + Creates a Dirichlet distribution parameterized by concentration :attr:`concentration`. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Dirichlet(torch.tensor([0.5, 0.5])) + >>> m.sample() # Dirichlet distributed with concentration [0.5, 0.5] + tensor([ 0.1046, 0.8954]) + + Args: + concentration (Tensor): concentration parameter of the distribution + (often referred to as alpha) + """ + arg_constraints = { + "concentration": constraints.independent(constraints.positive, 1) + } + support = constraints.simplex + has_rsample = True + + def __init__(self, concentration, validate_args=None): + if concentration.dim() < 1: + raise ValueError( + "`concentration` parameter must be at least one-dimensional." + ) + self.concentration = concentration + batch_shape, event_shape = concentration.shape[:-1], concentration.shape[-1:] + super().__init__(batch_shape, event_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Dirichlet, _instance) + batch_shape = torch.Size(batch_shape) + new.concentration = self.concentration.expand(batch_shape + self.event_shape) + super(Dirichlet, new).__init__( + batch_shape, self.event_shape, validate_args=False + ) + new._validate_args = self._validate_args + return new + + def rsample(self, sample_shape=()): + shape = self._extended_shape(sample_shape) + concentration = self.concentration.expand(shape) + return _Dirichlet.apply(concentration) + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + return ( + torch.xlogy(self.concentration - 1.0, value).sum(-1) + + torch.lgamma(self.concentration.sum(-1)) + - torch.lgamma(self.concentration).sum(-1) + ) + + @property + def mean(self): + return self.concentration / self.concentration.sum(-1, True) + + @property + def mode(self): + concentrationm1 = (self.concentration - 1).clamp(min=0.0) + mode = concentrationm1 / concentrationm1.sum(-1, True) + mask = (self.concentration < 1).all(axis=-1) + mode[mask] = torch.nn.functional.one_hot( + mode[mask].argmax(axis=-1), concentrationm1.shape[-1] + ).to(mode) + return mode + + @property + def variance(self): + con0 = self.concentration.sum(-1, True) + return ( + self.concentration + * (con0 - self.concentration) + / (con0.pow(2) * (con0 + 1)) + ) + + def entropy(self): + k = self.concentration.size(-1) + a0 = self.concentration.sum(-1) + return ( + torch.lgamma(self.concentration).sum(-1) + - torch.lgamma(a0) + - (k - a0) * torch.digamma(a0) + - ((self.concentration - 1.0) * torch.digamma(self.concentration)).sum(-1) + ) + + @property + def _natural_params(self): + return (self.concentration,) + + def _log_normalizer(self, x): + return x.lgamma().sum(-1) - torch.lgamma(x.sum(-1)) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/distribution.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/distribution.py new file mode 100644 index 0000000000000000000000000000000000000000..2752d710e8fb31f5d5867685cf5917573c9128cf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/distribution.py @@ -0,0 +1,336 @@ +import warnings +from typing import Any, Dict, Optional, Tuple + +import torch +from torch.distributions import constraints +from torch.distributions.utils import lazy_property +from torch.types import _size + +__all__ = ["Distribution"] + + +class Distribution: + r""" + Distribution is the abstract base class for probability distributions. + """ + + has_rsample = False + has_enumerate_support = False + _validate_args = __debug__ + + @staticmethod + def set_default_validate_args(value: bool) -> None: + """ + Sets whether validation is enabled or disabled. + + The default behavior mimics Python's ``assert`` statement: validation + is on by default, but is disabled if Python is run in optimized mode + (via ``python -O``). Validation may be expensive, so you may want to + disable it once a model is working. + + Args: + value (bool): Whether to enable validation. + """ + if value not in [True, False]: + raise ValueError + Distribution._validate_args = value + + def __init__( + self, + batch_shape: torch.Size = torch.Size(), + event_shape: torch.Size = torch.Size(), + validate_args: Optional[bool] = None, + ): + self._batch_shape = batch_shape + self._event_shape = event_shape + if validate_args is not None: + self._validate_args = validate_args + if self._validate_args: + try: + arg_constraints = self.arg_constraints + except NotImplementedError: + arg_constraints = {} + warnings.warn( + f"{self.__class__} does not define `arg_constraints`. " + + "Please set `arg_constraints = {}` or initialize the distribution " + + "with `validate_args=False` to turn off validation." + ) + for param, constraint in arg_constraints.items(): + if constraints.is_dependent(constraint): + continue # skip constraints that cannot be checked + if param not in self.__dict__ and isinstance( + getattr(type(self), param), lazy_property + ): + continue # skip checking lazily-constructed args + value = getattr(self, param) + valid = constraint.check(value) + if not valid.all(): + raise ValueError( + f"Expected parameter {param} " + f"({type(value).__name__} of shape {tuple(value.shape)}) " + f"of distribution {repr(self)} " + f"to satisfy the constraint {repr(constraint)}, " + f"but found invalid values:\n{value}" + ) + super().__init__() + + def expand(self, batch_shape: torch.Size, _instance=None): + """ + Returns a new distribution instance (or populates an existing instance + provided by a derived class) with batch dimensions expanded to + `batch_shape`. This method calls :class:`~torch.Tensor.expand` on + the distribution's parameters. As such, this does not allocate new + memory for the expanded distribution instance. Additionally, + this does not repeat any args checking or parameter broadcasting in + `__init__.py`, when an instance is first created. + + Args: + batch_shape (torch.Size): the desired expanded size. + _instance: new instance provided by subclasses that + need to override `.expand`. + + Returns: + New distribution instance with batch dimensions expanded to + `batch_size`. + """ + raise NotImplementedError + + @property + def batch_shape(self) -> torch.Size: + """ + Returns the shape over which parameters are batched. + """ + return self._batch_shape + + @property + def event_shape(self) -> torch.Size: + """ + Returns the shape of a single sample (without batching). + """ + return self._event_shape + + @property + def arg_constraints(self) -> Dict[str, constraints.Constraint]: + """ + Returns a dictionary from argument names to + :class:`~torch.distributions.constraints.Constraint` objects that + should be satisfied by each argument of this distribution. Args that + are not tensors need not appear in this dict. + """ + raise NotImplementedError + + @property + def support(self) -> Optional[Any]: + """ + Returns a :class:`~torch.distributions.constraints.Constraint` object + representing this distribution's support. + """ + raise NotImplementedError + + @property + def mean(self) -> torch.Tensor: + """ + Returns the mean of the distribution. + """ + raise NotImplementedError + + @property + def mode(self) -> torch.Tensor: + """ + Returns the mode of the distribution. + """ + raise NotImplementedError(f"{self.__class__} does not implement mode") + + @property + def variance(self) -> torch.Tensor: + """ + Returns the variance of the distribution. + """ + raise NotImplementedError + + @property + def stddev(self) -> torch.Tensor: + """ + Returns the standard deviation of the distribution. + """ + return self.variance.sqrt() + + def sample(self, sample_shape: torch.Size = torch.Size()) -> torch.Tensor: + """ + Generates a sample_shape shaped sample or sample_shape shaped batch of + samples if the distribution parameters are batched. + """ + with torch.no_grad(): + return self.rsample(sample_shape) + + def rsample(self, sample_shape: torch.Size = torch.Size()) -> torch.Tensor: + """ + Generates a sample_shape shaped reparameterized sample or sample_shape + shaped batch of reparameterized samples if the distribution parameters + are batched. + """ + raise NotImplementedError + + def sample_n(self, n: int) -> torch.Tensor: + """ + Generates n samples or n batches of samples if the distribution + parameters are batched. + """ + warnings.warn( + "sample_n will be deprecated. Use .sample((n,)) instead", UserWarning + ) + return self.sample(torch.Size((n,))) + + def log_prob(self, value: torch.Tensor) -> torch.Tensor: + """ + Returns the log of the probability density/mass function evaluated at + `value`. + + Args: + value (Tensor): + """ + raise NotImplementedError + + def cdf(self, value: torch.Tensor) -> torch.Tensor: + """ + Returns the cumulative density/mass function evaluated at + `value`. + + Args: + value (Tensor): + """ + raise NotImplementedError + + def icdf(self, value: torch.Tensor) -> torch.Tensor: + """ + Returns the inverse cumulative density/mass function evaluated at + `value`. + + Args: + value (Tensor): + """ + raise NotImplementedError + + def enumerate_support(self, expand: bool = True) -> torch.Tensor: + """ + Returns tensor containing all values supported by a discrete + distribution. The result will enumerate over dimension 0, so the shape + of the result will be `(cardinality,) + batch_shape + event_shape` + (where `event_shape = ()` for univariate distributions). + + Note that this enumerates over all batched tensors in lock-step + `[[0, 0], [1, 1], ...]`. With `expand=False`, enumeration happens + along dim 0, but with the remaining batch dimensions being + singleton dimensions, `[[0], [1], ..`. + + To iterate over the full Cartesian product use + `itertools.product(m.enumerate_support())`. + + Args: + expand (bool): whether to expand the support over the + batch dims to match the distribution's `batch_shape`. + + Returns: + Tensor iterating over dimension 0. + """ + raise NotImplementedError + + def entropy(self) -> torch.Tensor: + """ + Returns entropy of distribution, batched over batch_shape. + + Returns: + Tensor of shape batch_shape. + """ + raise NotImplementedError + + def perplexity(self) -> torch.Tensor: + """ + Returns perplexity of distribution, batched over batch_shape. + + Returns: + Tensor of shape batch_shape. + """ + return torch.exp(self.entropy()) + + def _extended_shape(self, sample_shape: _size = torch.Size()) -> Tuple[int, ...]: + """ + Returns the size of the sample returned by the distribution, given + a `sample_shape`. Note, that the batch and event shapes of a distribution + instance are fixed at the time of construction. If this is empty, the + returned shape is upcast to (1,). + + Args: + sample_shape (torch.Size): the size of the sample to be drawn. + """ + if not isinstance(sample_shape, torch.Size): + sample_shape = torch.Size(sample_shape) + return torch.Size(sample_shape + self._batch_shape + self._event_shape) + + def _validate_sample(self, value: torch.Tensor) -> None: + """ + Argument validation for distribution methods such as `log_prob`, + `cdf` and `icdf`. The rightmost dimensions of a value to be + scored via these methods must agree with the distribution's batch + and event shapes. + + Args: + value (Tensor): the tensor whose log probability is to be + computed by the `log_prob` method. + Raises + ValueError: when the rightmost dimensions of `value` do not match the + distribution's batch and event shapes. + """ + if not isinstance(value, torch.Tensor): + raise ValueError("The value argument to log_prob must be a Tensor") + + event_dim_start = len(value.size()) - len(self._event_shape) + if value.size()[event_dim_start:] != self._event_shape: + raise ValueError( + f"The right-most size of value must match event_shape: {value.size()} vs {self._event_shape}." + ) + + actual_shape = value.size() + expected_shape = self._batch_shape + self._event_shape + for i, j in zip(reversed(actual_shape), reversed(expected_shape)): + if i != 1 and j != 1 and i != j: + raise ValueError( + f"Value is not broadcastable with batch_shape+event_shape: {actual_shape} vs {expected_shape}." + ) + try: + support = self.support + except NotImplementedError: + warnings.warn( + f"{self.__class__} does not define `support` to enable " + + "sample validation. Please initialize the distribution with " + + "`validate_args=False` to turn off validation." + ) + return + assert support is not None + valid = support.check(value) + if not valid.all(): + raise ValueError( + "Expected value argument " + f"({type(value).__name__} of shape {tuple(value.shape)}) " + f"to be within the support ({repr(support)}) " + f"of the distribution {repr(self)}, " + f"but found invalid values:\n{value}" + ) + + def _get_checked_instance(self, cls, _instance=None): + if _instance is None and type(self).__init__ != cls.__init__: + raise NotImplementedError( + f"Subclass {self.__class__.__name__} of {cls.__name__} that defines a custom __init__ method " + "must also define a custom .expand() method." + ) + return self.__new__(type(self)) if _instance is None else _instance + + def __repr__(self) -> str: + param_names = [k for k, _ in self.arg_constraints.items() if k in self.__dict__] + args_string = ", ".join( + [ + f"{p}: {self.__dict__[p] if self.__dict__[p].numel() == 1 else self.__dict__[p].size()}" + for p in param_names + ] + ) + return self.__class__.__name__ + "(" + args_string + ")" diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/fishersnedecor.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/fishersnedecor.py new file mode 100644 index 0000000000000000000000000000000000000000..788f74b58556a72ab16eb810d8158d8874ddc095 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/fishersnedecor.py @@ -0,0 +1,98 @@ +from numbers import Number + +import torch +from torch import nan +from torch.distributions import constraints +from torch.distributions.distribution import Distribution +from torch.distributions.gamma import Gamma +from torch.distributions.utils import broadcast_all + +__all__ = ["FisherSnedecor"] + + +class FisherSnedecor(Distribution): + r""" + Creates a Fisher-Snedecor distribution parameterized by :attr:`df1` and :attr:`df2`. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = FisherSnedecor(torch.tensor([1.0]), torch.tensor([2.0])) + >>> m.sample() # Fisher-Snedecor-distributed with df1=1 and df2=2 + tensor([ 0.2453]) + + Args: + df1 (float or Tensor): degrees of freedom parameter 1 + df2 (float or Tensor): degrees of freedom parameter 2 + """ + arg_constraints = {"df1": constraints.positive, "df2": constraints.positive} + support = constraints.positive + has_rsample = True + + def __init__(self, df1, df2, validate_args=None): + self.df1, self.df2 = broadcast_all(df1, df2) + self._gamma1 = Gamma(self.df1 * 0.5, self.df1) + self._gamma2 = Gamma(self.df2 * 0.5, self.df2) + + if isinstance(df1, Number) and isinstance(df2, Number): + batch_shape = torch.Size() + else: + batch_shape = self.df1.size() + super().__init__(batch_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(FisherSnedecor, _instance) + batch_shape = torch.Size(batch_shape) + new.df1 = self.df1.expand(batch_shape) + new.df2 = self.df2.expand(batch_shape) + new._gamma1 = self._gamma1.expand(batch_shape) + new._gamma2 = self._gamma2.expand(batch_shape) + super(FisherSnedecor, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + @property + def mean(self): + df2 = self.df2.clone(memory_format=torch.contiguous_format) + df2[df2 <= 2] = nan + return df2 / (df2 - 2) + + @property + def mode(self): + mode = (self.df1 - 2) / self.df1 * self.df2 / (self.df2 + 2) + mode[self.df1 <= 2] = nan + return mode + + @property + def variance(self): + df2 = self.df2.clone(memory_format=torch.contiguous_format) + df2[df2 <= 4] = nan + return ( + 2 + * df2.pow(2) + * (self.df1 + df2 - 2) + / (self.df1 * (df2 - 2).pow(2) * (df2 - 4)) + ) + + def rsample(self, sample_shape=torch.Size(())): + shape = self._extended_shape(sample_shape) + # X1 ~ Gamma(df1 / 2, 1 / df1), X2 ~ Gamma(df2 / 2, 1 / df2) + # Y = df2 * df1 * X1 / (df1 * df2 * X2) = X1 / X2 ~ F(df1, df2) + X1 = self._gamma1.rsample(sample_shape).view(shape) + X2 = self._gamma2.rsample(sample_shape).view(shape) + tiny = torch.finfo(X2.dtype).tiny + X2.clamp_(min=tiny) + Y = X1 / X2 + Y.clamp_(min=tiny) + return Y + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + ct1 = self.df1 * 0.5 + ct2 = self.df2 * 0.5 + ct3 = self.df1 / self.df2 + t1 = (ct1 + ct2).lgamma() - ct1.lgamma() - ct2.lgamma() + t2 = ct1 * ct3.log() + (ct1 - 1) * torch.log(value) + t3 = (ct1 + ct2) * torch.log1p(ct3 * value) + return t1 + t2 - t3 diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/gamma.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/gamma.py new file mode 100644 index 0000000000000000000000000000000000000000..c189fb24e070329c541dd783a4beeee604ee147e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/gamma.py @@ -0,0 +1,108 @@ +from numbers import Number + +import torch +from torch.distributions import constraints +from torch.distributions.exp_family import ExponentialFamily +from torch.distributions.utils import broadcast_all + +__all__ = ["Gamma"] + + +def _standard_gamma(concentration): + return torch._standard_gamma(concentration) + + +class Gamma(ExponentialFamily): + r""" + Creates a Gamma distribution parameterized by shape :attr:`concentration` and :attr:`rate`. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Gamma(torch.tensor([1.0]), torch.tensor([1.0])) + >>> m.sample() # Gamma distributed with concentration=1 and rate=1 + tensor([ 0.1046]) + + Args: + concentration (float or Tensor): shape parameter of the distribution + (often referred to as alpha) + rate (float or Tensor): rate = 1 / scale of the distribution + (often referred to as beta) + """ + arg_constraints = { + "concentration": constraints.positive, + "rate": constraints.positive, + } + support = constraints.nonnegative + has_rsample = True + _mean_carrier_measure = 0 + + @property + def mean(self): + return self.concentration / self.rate + + @property + def mode(self): + return ((self.concentration - 1) / self.rate).clamp(min=0) + + @property + def variance(self): + return self.concentration / self.rate.pow(2) + + def __init__(self, concentration, rate, validate_args=None): + self.concentration, self.rate = broadcast_all(concentration, rate) + if isinstance(concentration, Number) and isinstance(rate, Number): + batch_shape = torch.Size() + else: + batch_shape = self.concentration.size() + super().__init__(batch_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Gamma, _instance) + batch_shape = torch.Size(batch_shape) + new.concentration = self.concentration.expand(batch_shape) + new.rate = self.rate.expand(batch_shape) + super(Gamma, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + def rsample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + value = _standard_gamma(self.concentration.expand(shape)) / self.rate.expand( + shape + ) + value.detach().clamp_( + min=torch.finfo(value.dtype).tiny + ) # do not record in autograd graph + return value + + def log_prob(self, value): + value = torch.as_tensor(value, dtype=self.rate.dtype, device=self.rate.device) + if self._validate_args: + self._validate_sample(value) + return ( + torch.xlogy(self.concentration, self.rate) + + torch.xlogy(self.concentration - 1, value) + - self.rate * value + - torch.lgamma(self.concentration) + ) + + def entropy(self): + return ( + self.concentration + - torch.log(self.rate) + + torch.lgamma(self.concentration) + + (1.0 - self.concentration) * torch.digamma(self.concentration) + ) + + @property + def _natural_params(self): + return (self.concentration - 1, -self.rate) + + def _log_normalizer(self, x, y): + return torch.lgamma(x + 1) + (x + 1) * torch.log(-y.reciprocal()) + + def cdf(self, value): + if self._validate_args: + self._validate_sample(value) + return torch.special.gammainc(self.concentration, self.rate * value) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/gumbel.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/gumbel.py new file mode 100644 index 0000000000000000000000000000000000000000..e0ed5d8f86906b297a64979e39c559215e23bb59 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/gumbel.py @@ -0,0 +1,81 @@ +import math +from numbers import Number + +import torch +from torch.distributions import constraints +from torch.distributions.transformed_distribution import TransformedDistribution +from torch.distributions.transforms import AffineTransform, ExpTransform +from torch.distributions.uniform import Uniform +from torch.distributions.utils import broadcast_all, euler_constant + +__all__ = ["Gumbel"] + + +class Gumbel(TransformedDistribution): + r""" + Samples from a Gumbel Distribution. + + Examples:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Gumbel(torch.tensor([1.0]), torch.tensor([2.0])) + >>> m.sample() # sample from Gumbel distribution with loc=1, scale=2 + tensor([ 1.0124]) + + Args: + loc (float or Tensor): Location parameter of the distribution + scale (float or Tensor): Scale parameter of the distribution + """ + arg_constraints = {"loc": constraints.real, "scale": constraints.positive} + support = constraints.real + + def __init__(self, loc, scale, validate_args=None): + self.loc, self.scale = broadcast_all(loc, scale) + finfo = torch.finfo(self.loc.dtype) + if isinstance(loc, Number) and isinstance(scale, Number): + base_dist = Uniform(finfo.tiny, 1 - finfo.eps, validate_args=validate_args) + else: + base_dist = Uniform( + torch.full_like(self.loc, finfo.tiny), + torch.full_like(self.loc, 1 - finfo.eps), + validate_args=validate_args, + ) + transforms = [ + ExpTransform().inv, + AffineTransform(loc=0, scale=-torch.ones_like(self.scale)), + ExpTransform().inv, + AffineTransform(loc=loc, scale=-self.scale), + ] + super().__init__(base_dist, transforms, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Gumbel, _instance) + new.loc = self.loc.expand(batch_shape) + new.scale = self.scale.expand(batch_shape) + return super().expand(batch_shape, _instance=new) + + # Explicitly defining the log probability function for Gumbel due to precision issues + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + y = (self.loc - value) / self.scale + return (y - y.exp()) - self.scale.log() + + @property + def mean(self): + return self.loc + self.scale * euler_constant + + @property + def mode(self): + return self.loc + + @property + def stddev(self): + return (math.pi / math.sqrt(6)) * self.scale + + @property + def variance(self): + return self.stddev.pow(2) + + def entropy(self): + return self.scale.log() + (1 + euler_constant) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/independent.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/independent.py new file mode 100644 index 0000000000000000000000000000000000000000..35b705fd0f29c7e95ba11c5d84a69a68822dfabb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/independent.py @@ -0,0 +1,125 @@ +from typing import Dict + +import torch +from torch.distributions import constraints +from torch.distributions.distribution import Distribution +from torch.distributions.utils import _sum_rightmost + +__all__ = ["Independent"] + + +class Independent(Distribution): + r""" + Reinterprets some of the batch dims of a distribution as event dims. + + This is mainly useful for changing the shape of the result of + :meth:`log_prob`. For example to create a diagonal Normal distribution with + the same shape as a Multivariate Normal distribution (so they are + interchangeable), you can:: + + >>> from torch.distributions.multivariate_normal import MultivariateNormal + >>> from torch.distributions.normal import Normal + >>> loc = torch.zeros(3) + >>> scale = torch.ones(3) + >>> mvn = MultivariateNormal(loc, scale_tril=torch.diag(scale)) + >>> [mvn.batch_shape, mvn.event_shape] + [torch.Size([]), torch.Size([3])] + >>> normal = Normal(loc, scale) + >>> [normal.batch_shape, normal.event_shape] + [torch.Size([3]), torch.Size([])] + >>> diagn = Independent(normal, 1) + >>> [diagn.batch_shape, diagn.event_shape] + [torch.Size([]), torch.Size([3])] + + Args: + base_distribution (torch.distributions.distribution.Distribution): a + base distribution + reinterpreted_batch_ndims (int): the number of batch dims to + reinterpret as event dims + """ + arg_constraints: Dict[str, constraints.Constraint] = {} + + def __init__( + self, base_distribution, reinterpreted_batch_ndims, validate_args=None + ): + if reinterpreted_batch_ndims > len(base_distribution.batch_shape): + raise ValueError( + "Expected reinterpreted_batch_ndims <= len(base_distribution.batch_shape), " + f"actual {reinterpreted_batch_ndims} vs {len(base_distribution.batch_shape)}" + ) + shape = base_distribution.batch_shape + base_distribution.event_shape + event_dim = reinterpreted_batch_ndims + len(base_distribution.event_shape) + batch_shape = shape[: len(shape) - event_dim] + event_shape = shape[len(shape) - event_dim :] + self.base_dist = base_distribution + self.reinterpreted_batch_ndims = reinterpreted_batch_ndims + super().__init__(batch_shape, event_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Independent, _instance) + batch_shape = torch.Size(batch_shape) + new.base_dist = self.base_dist.expand( + batch_shape + self.event_shape[: self.reinterpreted_batch_ndims] + ) + new.reinterpreted_batch_ndims = self.reinterpreted_batch_ndims + super(Independent, new).__init__( + batch_shape, self.event_shape, validate_args=False + ) + new._validate_args = self._validate_args + return new + + @property + def has_rsample(self): + return self.base_dist.has_rsample + + @property + def has_enumerate_support(self): + if self.reinterpreted_batch_ndims > 0: + return False + return self.base_dist.has_enumerate_support + + @constraints.dependent_property + def support(self): + result = self.base_dist.support + if self.reinterpreted_batch_ndims: + result = constraints.independent(result, self.reinterpreted_batch_ndims) + return result + + @property + def mean(self): + return self.base_dist.mean + + @property + def mode(self): + return self.base_dist.mode + + @property + def variance(self): + return self.base_dist.variance + + def sample(self, sample_shape=torch.Size()): + return self.base_dist.sample(sample_shape) + + def rsample(self, sample_shape=torch.Size()): + return self.base_dist.rsample(sample_shape) + + def log_prob(self, value): + log_prob = self.base_dist.log_prob(value) + return _sum_rightmost(log_prob, self.reinterpreted_batch_ndims) + + def entropy(self): + entropy = self.base_dist.entropy() + return _sum_rightmost(entropy, self.reinterpreted_batch_ndims) + + def enumerate_support(self, expand=True): + if self.reinterpreted_batch_ndims > 0: + raise NotImplementedError( + "Enumeration over cartesian product is not implemented" + ) + return self.base_dist.enumerate_support(expand=expand) + + def __repr__(self): + return ( + self.__class__.__name__ + + f"({self.base_dist}, {self.reinterpreted_batch_ndims})" + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/kl.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/kl.py new file mode 100644 index 0000000000000000000000000000000000000000..2b9db6ef2558d9713c34929f09db35c8af8ccafd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/kl.py @@ -0,0 +1,971 @@ +import math +import warnings +from functools import total_ordering +from typing import Callable, Dict, Tuple, Type + +import torch +from torch import inf + +from .bernoulli import Bernoulli +from .beta import Beta +from .binomial import Binomial +from .categorical import Categorical +from .cauchy import Cauchy +from .continuous_bernoulli import ContinuousBernoulli +from .dirichlet import Dirichlet +from .distribution import Distribution +from .exp_family import ExponentialFamily +from .exponential import Exponential +from .gamma import Gamma +from .geometric import Geometric +from .gumbel import Gumbel +from .half_normal import HalfNormal +from .independent import Independent +from .laplace import Laplace +from .lowrank_multivariate_normal import ( + _batch_lowrank_logdet, + _batch_lowrank_mahalanobis, + LowRankMultivariateNormal, +) +from .multivariate_normal import _batch_mahalanobis, MultivariateNormal +from .normal import Normal +from .one_hot_categorical import OneHotCategorical +from .pareto import Pareto +from .poisson import Poisson +from .transformed_distribution import TransformedDistribution +from .uniform import Uniform +from .utils import _sum_rightmost, euler_constant as _euler_gamma + +_KL_REGISTRY = ( + {} +) # Source of truth mapping a few general (type, type) pairs to functions. +_KL_MEMOIZE: Dict[ + Tuple[Type, Type], Callable +] = {} # Memoized version mapping many specific (type, type) pairs to functions. + +__all__ = ["register_kl", "kl_divergence"] + + +def register_kl(type_p, type_q): + """ + Decorator to register a pairwise function with :meth:`kl_divergence`. + Usage:: + + @register_kl(Normal, Normal) + def kl_normal_normal(p, q): + # insert implementation here + + Lookup returns the most specific (type,type) match ordered by subclass. If + the match is ambiguous, a `RuntimeWarning` is raised. For example to + resolve the ambiguous situation:: + + @register_kl(BaseP, DerivedQ) + def kl_version1(p, q): ... + @register_kl(DerivedP, BaseQ) + def kl_version2(p, q): ... + + you should register a third most-specific implementation, e.g.:: + + register_kl(DerivedP, DerivedQ)(kl_version1) # Break the tie. + + Args: + type_p (type): A subclass of :class:`~torch.distributions.Distribution`. + type_q (type): A subclass of :class:`~torch.distributions.Distribution`. + """ + if not isinstance(type_p, type) and issubclass(type_p, Distribution): + raise TypeError( + f"Expected type_p to be a Distribution subclass but got {type_p}" + ) + if not isinstance(type_q, type) and issubclass(type_q, Distribution): + raise TypeError( + f"Expected type_q to be a Distribution subclass but got {type_q}" + ) + + def decorator(fun): + _KL_REGISTRY[type_p, type_q] = fun + _KL_MEMOIZE.clear() # reset since lookup order may have changed + return fun + + return decorator + + +@total_ordering +class _Match: + __slots__ = ["types"] + + def __init__(self, *types): + self.types = types + + def __eq__(self, other): + return self.types == other.types + + def __le__(self, other): + for x, y in zip(self.types, other.types): + if not issubclass(x, y): + return False + if x is not y: + break + return True + + +def _dispatch_kl(type_p, type_q): + """ + Find the most specific approximate match, assuming single inheritance. + """ + matches = [ + (super_p, super_q) + for super_p, super_q in _KL_REGISTRY + if issubclass(type_p, super_p) and issubclass(type_q, super_q) + ] + if not matches: + return NotImplemented + # Check that the left- and right- lexicographic orders agree. + # mypy isn't smart enough to know that _Match implements __lt__ + # see: https://github.com/python/typing/issues/760#issuecomment-710670503 + left_p, left_q = min(_Match(*m) for m in matches).types # type: ignore[type-var] + right_q, right_p = min(_Match(*reversed(m)) for m in matches).types # type: ignore[type-var] + left_fun = _KL_REGISTRY[left_p, left_q] + right_fun = _KL_REGISTRY[right_p, right_q] + if left_fun is not right_fun: + warnings.warn( + "Ambiguous kl_divergence({}, {}). Please register_kl({}, {})".format( + type_p.__name__, type_q.__name__, left_p.__name__, right_q.__name__ + ), + RuntimeWarning, + ) + return left_fun + + +def _infinite_like(tensor): + """ + Helper function for obtaining infinite KL Divergence throughout + """ + return torch.full_like(tensor, inf) + + +def _x_log_x(tensor): + """ + Utility function for calculating x log x + """ + return tensor * tensor.log() + + +def _batch_trace_XXT(bmat): + """ + Utility function for calculating the trace of XX^{T} with X having arbitrary trailing batch dimensions + """ + n = bmat.size(-1) + m = bmat.size(-2) + flat_trace = bmat.reshape(-1, m * n).pow(2).sum(-1) + return flat_trace.reshape(bmat.shape[:-2]) + + +def kl_divergence(p: Distribution, q: Distribution) -> torch.Tensor: + r""" + Compute Kullback-Leibler divergence :math:`KL(p \| q)` between two distributions. + + .. math:: + + KL(p \| q) = \int p(x) \log\frac {p(x)} {q(x)} \,dx + + Args: + p (Distribution): A :class:`~torch.distributions.Distribution` object. + q (Distribution): A :class:`~torch.distributions.Distribution` object. + + Returns: + Tensor: A batch of KL divergences of shape `batch_shape`. + + Raises: + NotImplementedError: If the distribution types have not been registered via + :meth:`register_kl`. + """ + try: + fun = _KL_MEMOIZE[type(p), type(q)] + except KeyError: + fun = _dispatch_kl(type(p), type(q)) + _KL_MEMOIZE[type(p), type(q)] = fun + if fun is NotImplemented: + raise NotImplementedError( + f"No KL(p || q) is implemented for p type {p.__class__.__name__} and q type {q.__class__.__name__}" + ) + return fun(p, q) + + +################################################################################ +# KL Divergence Implementations +################################################################################ + +# Same distributions + + +@register_kl(Bernoulli, Bernoulli) +def _kl_bernoulli_bernoulli(p, q): + t1 = p.probs * ( + torch.nn.functional.softplus(-q.logits) + - torch.nn.functional.softplus(-p.logits) + ) + t1[q.probs == 0] = inf + t1[p.probs == 0] = 0 + t2 = (1 - p.probs) * ( + torch.nn.functional.softplus(q.logits) - torch.nn.functional.softplus(p.logits) + ) + t2[q.probs == 1] = inf + t2[p.probs == 1] = 0 + return t1 + t2 + + +@register_kl(Beta, Beta) +def _kl_beta_beta(p, q): + sum_params_p = p.concentration1 + p.concentration0 + sum_params_q = q.concentration1 + q.concentration0 + t1 = q.concentration1.lgamma() + q.concentration0.lgamma() + (sum_params_p).lgamma() + t2 = p.concentration1.lgamma() + p.concentration0.lgamma() + (sum_params_q).lgamma() + t3 = (p.concentration1 - q.concentration1) * torch.digamma(p.concentration1) + t4 = (p.concentration0 - q.concentration0) * torch.digamma(p.concentration0) + t5 = (sum_params_q - sum_params_p) * torch.digamma(sum_params_p) + return t1 - t2 + t3 + t4 + t5 + + +@register_kl(Binomial, Binomial) +def _kl_binomial_binomial(p, q): + # from https://math.stackexchange.com/questions/2214993/ + # kullback-leibler-divergence-for-binomial-distributions-p-and-q + if (p.total_count < q.total_count).any(): + raise NotImplementedError( + "KL between Binomials where q.total_count > p.total_count is not implemented" + ) + kl = p.total_count * ( + p.probs * (p.logits - q.logits) + (-p.probs).log1p() - (-q.probs).log1p() + ) + inf_idxs = p.total_count > q.total_count + kl[inf_idxs] = _infinite_like(kl[inf_idxs]) + return kl + + +@register_kl(Categorical, Categorical) +def _kl_categorical_categorical(p, q): + t = p.probs * (p.logits - q.logits) + t[(q.probs == 0).expand_as(t)] = inf + t[(p.probs == 0).expand_as(t)] = 0 + return t.sum(-1) + + +@register_kl(ContinuousBernoulli, ContinuousBernoulli) +def _kl_continuous_bernoulli_continuous_bernoulli(p, q): + t1 = p.mean * (p.logits - q.logits) + t2 = p._cont_bern_log_norm() + torch.log1p(-p.probs) + t3 = -q._cont_bern_log_norm() - torch.log1p(-q.probs) + return t1 + t2 + t3 + + +@register_kl(Dirichlet, Dirichlet) +def _kl_dirichlet_dirichlet(p, q): + # From http://bariskurt.com/kullback-leibler-divergence-between-two-dirichlet-and-beta-distributions/ + sum_p_concentration = p.concentration.sum(-1) + sum_q_concentration = q.concentration.sum(-1) + t1 = sum_p_concentration.lgamma() - sum_q_concentration.lgamma() + t2 = (p.concentration.lgamma() - q.concentration.lgamma()).sum(-1) + t3 = p.concentration - q.concentration + t4 = p.concentration.digamma() - sum_p_concentration.digamma().unsqueeze(-1) + return t1 - t2 + (t3 * t4).sum(-1) + + +@register_kl(Exponential, Exponential) +def _kl_exponential_exponential(p, q): + rate_ratio = q.rate / p.rate + t1 = -rate_ratio.log() + return t1 + rate_ratio - 1 + + +@register_kl(ExponentialFamily, ExponentialFamily) +def _kl_expfamily_expfamily(p, q): + if not type(p) == type(q): + raise NotImplementedError( + "The cross KL-divergence between different exponential families cannot \ + be computed using Bregman divergences" + ) + p_nparams = [np.detach().requires_grad_() for np in p._natural_params] + q_nparams = q._natural_params + lg_normal = p._log_normalizer(*p_nparams) + gradients = torch.autograd.grad(lg_normal.sum(), p_nparams, create_graph=True) + result = q._log_normalizer(*q_nparams) - lg_normal + for pnp, qnp, g in zip(p_nparams, q_nparams, gradients): + term = (qnp - pnp) * g + result -= _sum_rightmost(term, len(q.event_shape)) + return result + + +@register_kl(Gamma, Gamma) +def _kl_gamma_gamma(p, q): + t1 = q.concentration * (p.rate / q.rate).log() + t2 = torch.lgamma(q.concentration) - torch.lgamma(p.concentration) + t3 = (p.concentration - q.concentration) * torch.digamma(p.concentration) + t4 = (q.rate - p.rate) * (p.concentration / p.rate) + return t1 + t2 + t3 + t4 + + +@register_kl(Gumbel, Gumbel) +def _kl_gumbel_gumbel(p, q): + ct1 = p.scale / q.scale + ct2 = q.loc / q.scale + ct3 = p.loc / q.scale + t1 = -ct1.log() - ct2 + ct3 + t2 = ct1 * _euler_gamma + t3 = torch.exp(ct2 + (1 + ct1).lgamma() - ct3) + return t1 + t2 + t3 - (1 + _euler_gamma) + + +@register_kl(Geometric, Geometric) +def _kl_geometric_geometric(p, q): + return -p.entropy() - torch.log1p(-q.probs) / p.probs - q.logits + + +@register_kl(HalfNormal, HalfNormal) +def _kl_halfnormal_halfnormal(p, q): + return _kl_normal_normal(p.base_dist, q.base_dist) + + +@register_kl(Laplace, Laplace) +def _kl_laplace_laplace(p, q): + # From http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf + scale_ratio = p.scale / q.scale + loc_abs_diff = (p.loc - q.loc).abs() + t1 = -scale_ratio.log() + t2 = loc_abs_diff / q.scale + t3 = scale_ratio * torch.exp(-loc_abs_diff / p.scale) + return t1 + t2 + t3 - 1 + + +@register_kl(LowRankMultivariateNormal, LowRankMultivariateNormal) +def _kl_lowrankmultivariatenormal_lowrankmultivariatenormal(p, q): + if p.event_shape != q.event_shape: + raise ValueError( + "KL-divergence between two Low Rank Multivariate Normals with\ + different event shapes cannot be computed" + ) + + term1 = _batch_lowrank_logdet( + q._unbroadcasted_cov_factor, q._unbroadcasted_cov_diag, q._capacitance_tril + ) - _batch_lowrank_logdet( + p._unbroadcasted_cov_factor, p._unbroadcasted_cov_diag, p._capacitance_tril + ) + term3 = _batch_lowrank_mahalanobis( + q._unbroadcasted_cov_factor, + q._unbroadcasted_cov_diag, + q.loc - p.loc, + q._capacitance_tril, + ) + # Expands term2 according to + # inv(qcov) @ pcov = [inv(qD) - inv(qD) @ qW @ inv(qC) @ qW.T @ inv(qD)] @ (pW @ pW.T + pD) + # = [inv(qD) - A.T @ A] @ (pD + pW @ pW.T) + qWt_qDinv = q._unbroadcasted_cov_factor.mT / q._unbroadcasted_cov_diag.unsqueeze(-2) + A = torch.linalg.solve_triangular(q._capacitance_tril, qWt_qDinv, upper=False) + term21 = (p._unbroadcasted_cov_diag / q._unbroadcasted_cov_diag).sum(-1) + term22 = _batch_trace_XXT( + p._unbroadcasted_cov_factor * q._unbroadcasted_cov_diag.rsqrt().unsqueeze(-1) + ) + term23 = _batch_trace_XXT(A * p._unbroadcasted_cov_diag.sqrt().unsqueeze(-2)) + term24 = _batch_trace_XXT(A.matmul(p._unbroadcasted_cov_factor)) + term2 = term21 + term22 - term23 - term24 + return 0.5 * (term1 + term2 + term3 - p.event_shape[0]) + + +@register_kl(MultivariateNormal, LowRankMultivariateNormal) +def _kl_multivariatenormal_lowrankmultivariatenormal(p, q): + if p.event_shape != q.event_shape: + raise ValueError( + "KL-divergence between two (Low Rank) Multivariate Normals with\ + different event shapes cannot be computed" + ) + + term1 = _batch_lowrank_logdet( + q._unbroadcasted_cov_factor, q._unbroadcasted_cov_diag, q._capacitance_tril + ) - 2 * p._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) + term3 = _batch_lowrank_mahalanobis( + q._unbroadcasted_cov_factor, + q._unbroadcasted_cov_diag, + q.loc - p.loc, + q._capacitance_tril, + ) + # Expands term2 according to + # inv(qcov) @ pcov = [inv(qD) - inv(qD) @ qW @ inv(qC) @ qW.T @ inv(qD)] @ p_tril @ p_tril.T + # = [inv(qD) - A.T @ A] @ p_tril @ p_tril.T + qWt_qDinv = q._unbroadcasted_cov_factor.mT / q._unbroadcasted_cov_diag.unsqueeze(-2) + A = torch.linalg.solve_triangular(q._capacitance_tril, qWt_qDinv, upper=False) + term21 = _batch_trace_XXT( + p._unbroadcasted_scale_tril * q._unbroadcasted_cov_diag.rsqrt().unsqueeze(-1) + ) + term22 = _batch_trace_XXT(A.matmul(p._unbroadcasted_scale_tril)) + term2 = term21 - term22 + return 0.5 * (term1 + term2 + term3 - p.event_shape[0]) + + +@register_kl(LowRankMultivariateNormal, MultivariateNormal) +def _kl_lowrankmultivariatenormal_multivariatenormal(p, q): + if p.event_shape != q.event_shape: + raise ValueError( + "KL-divergence between two (Low Rank) Multivariate Normals with\ + different event shapes cannot be computed" + ) + + term1 = 2 * q._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum( + -1 + ) - _batch_lowrank_logdet( + p._unbroadcasted_cov_factor, p._unbroadcasted_cov_diag, p._capacitance_tril + ) + term3 = _batch_mahalanobis(q._unbroadcasted_scale_tril, (q.loc - p.loc)) + # Expands term2 according to + # inv(qcov) @ pcov = inv(q_tril @ q_tril.T) @ (pW @ pW.T + pD) + combined_batch_shape = torch._C._infer_size( + q._unbroadcasted_scale_tril.shape[:-2], p._unbroadcasted_cov_factor.shape[:-2] + ) + n = p.event_shape[0] + q_scale_tril = q._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n)) + p_cov_factor = p._unbroadcasted_cov_factor.expand( + combined_batch_shape + (n, p.cov_factor.size(-1)) + ) + p_cov_diag = torch.diag_embed(p._unbroadcasted_cov_diag.sqrt()).expand( + combined_batch_shape + (n, n) + ) + term21 = _batch_trace_XXT( + torch.linalg.solve_triangular(q_scale_tril, p_cov_factor, upper=False) + ) + term22 = _batch_trace_XXT( + torch.linalg.solve_triangular(q_scale_tril, p_cov_diag, upper=False) + ) + term2 = term21 + term22 + return 0.5 * (term1 + term2 + term3 - p.event_shape[0]) + + +@register_kl(MultivariateNormal, MultivariateNormal) +def _kl_multivariatenormal_multivariatenormal(p, q): + # From https://en.wikipedia.org/wiki/Multivariate_normal_distribution#Kullback%E2%80%93Leibler_divergence + if p.event_shape != q.event_shape: + raise ValueError( + "KL-divergence between two Multivariate Normals with\ + different event shapes cannot be computed" + ) + + half_term1 = q._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum( + -1 + ) - p._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log().sum(-1) + combined_batch_shape = torch._C._infer_size( + q._unbroadcasted_scale_tril.shape[:-2], p._unbroadcasted_scale_tril.shape[:-2] + ) + n = p.event_shape[0] + q_scale_tril = q._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n)) + p_scale_tril = p._unbroadcasted_scale_tril.expand(combined_batch_shape + (n, n)) + term2 = _batch_trace_XXT( + torch.linalg.solve_triangular(q_scale_tril, p_scale_tril, upper=False) + ) + term3 = _batch_mahalanobis(q._unbroadcasted_scale_tril, (q.loc - p.loc)) + return half_term1 + 0.5 * (term2 + term3 - n) + + +@register_kl(Normal, Normal) +def _kl_normal_normal(p, q): + var_ratio = (p.scale / q.scale).pow(2) + t1 = ((p.loc - q.loc) / q.scale).pow(2) + return 0.5 * (var_ratio + t1 - 1 - var_ratio.log()) + + +@register_kl(OneHotCategorical, OneHotCategorical) +def _kl_onehotcategorical_onehotcategorical(p, q): + return _kl_categorical_categorical(p._categorical, q._categorical) + + +@register_kl(Pareto, Pareto) +def _kl_pareto_pareto(p, q): + # From http://www.mast.queensu.ca/~communications/Papers/gil-msc11.pdf + scale_ratio = p.scale / q.scale + alpha_ratio = q.alpha / p.alpha + t1 = q.alpha * scale_ratio.log() + t2 = -alpha_ratio.log() + result = t1 + t2 + alpha_ratio - 1 + result[p.support.lower_bound < q.support.lower_bound] = inf + return result + + +@register_kl(Poisson, Poisson) +def _kl_poisson_poisson(p, q): + return p.rate * (p.rate.log() - q.rate.log()) - (p.rate - q.rate) + + +@register_kl(TransformedDistribution, TransformedDistribution) +def _kl_transformed_transformed(p, q): + if p.transforms != q.transforms: + raise NotImplementedError + if p.event_shape != q.event_shape: + raise NotImplementedError + return kl_divergence(p.base_dist, q.base_dist) + + +@register_kl(Uniform, Uniform) +def _kl_uniform_uniform(p, q): + result = ((q.high - q.low) / (p.high - p.low)).log() + result[(q.low > p.low) | (q.high < p.high)] = inf + return result + + +# Different distributions +@register_kl(Bernoulli, Poisson) +def _kl_bernoulli_poisson(p, q): + return -p.entropy() - (p.probs * q.rate.log() - q.rate) + + +@register_kl(Beta, ContinuousBernoulli) +def _kl_beta_continuous_bernoulli(p, q): + return ( + -p.entropy() + - p.mean * q.logits + - torch.log1p(-q.probs) + - q._cont_bern_log_norm() + ) + + +@register_kl(Beta, Pareto) +def _kl_beta_infinity(p, q): + return _infinite_like(p.concentration1) + + +@register_kl(Beta, Exponential) +def _kl_beta_exponential(p, q): + return ( + -p.entropy() + - q.rate.log() + + q.rate * (p.concentration1 / (p.concentration1 + p.concentration0)) + ) + + +@register_kl(Beta, Gamma) +def _kl_beta_gamma(p, q): + t1 = -p.entropy() + t2 = q.concentration.lgamma() - q.concentration * q.rate.log() + t3 = (q.concentration - 1) * ( + p.concentration1.digamma() - (p.concentration1 + p.concentration0).digamma() + ) + t4 = q.rate * p.concentration1 / (p.concentration1 + p.concentration0) + return t1 + t2 - t3 + t4 + + +# TODO: Add Beta-Laplace KL Divergence + + +@register_kl(Beta, Normal) +def _kl_beta_normal(p, q): + E_beta = p.concentration1 / (p.concentration1 + p.concentration0) + var_normal = q.scale.pow(2) + t1 = -p.entropy() + t2 = 0.5 * (var_normal * 2 * math.pi).log() + t3 = ( + E_beta * (1 - E_beta) / (p.concentration1 + p.concentration0 + 1) + + E_beta.pow(2) + ) * 0.5 + t4 = q.loc * E_beta + t5 = q.loc.pow(2) * 0.5 + return t1 + t2 + (t3 - t4 + t5) / var_normal + + +@register_kl(Beta, Uniform) +def _kl_beta_uniform(p, q): + result = -p.entropy() + (q.high - q.low).log() + result[(q.low > p.support.lower_bound) | (q.high < p.support.upper_bound)] = inf + return result + + +# Note that the KL between a ContinuousBernoulli and Beta has no closed form + + +@register_kl(ContinuousBernoulli, Pareto) +def _kl_continuous_bernoulli_infinity(p, q): + return _infinite_like(p.probs) + + +@register_kl(ContinuousBernoulli, Exponential) +def _kl_continuous_bernoulli_exponential(p, q): + return -p.entropy() - torch.log(q.rate) + q.rate * p.mean + + +# Note that the KL between a ContinuousBernoulli and Gamma has no closed form +# TODO: Add ContinuousBernoulli-Laplace KL Divergence + + +@register_kl(ContinuousBernoulli, Normal) +def _kl_continuous_bernoulli_normal(p, q): + t1 = -p.entropy() + t2 = 0.5 * (math.log(2.0 * math.pi) + torch.square(q.loc / q.scale)) + torch.log( + q.scale + ) + t3 = (p.variance + torch.square(p.mean) - 2.0 * q.loc * p.mean) / ( + 2.0 * torch.square(q.scale) + ) + return t1 + t2 + t3 + + +@register_kl(ContinuousBernoulli, Uniform) +def _kl_continuous_bernoulli_uniform(p, q): + result = -p.entropy() + (q.high - q.low).log() + return torch.where( + torch.max( + torch.ge(q.low, p.support.lower_bound), + torch.le(q.high, p.support.upper_bound), + ), + torch.ones_like(result) * inf, + result, + ) + + +@register_kl(Exponential, Beta) +@register_kl(Exponential, ContinuousBernoulli) +@register_kl(Exponential, Pareto) +@register_kl(Exponential, Uniform) +def _kl_exponential_infinity(p, q): + return _infinite_like(p.rate) + + +@register_kl(Exponential, Gamma) +def _kl_exponential_gamma(p, q): + ratio = q.rate / p.rate + t1 = -q.concentration * torch.log(ratio) + return ( + t1 + + ratio + + q.concentration.lgamma() + + q.concentration * _euler_gamma + - (1 + _euler_gamma) + ) + + +@register_kl(Exponential, Gumbel) +def _kl_exponential_gumbel(p, q): + scale_rate_prod = p.rate * q.scale + loc_scale_ratio = q.loc / q.scale + t1 = scale_rate_prod.log() - 1 + t2 = torch.exp(loc_scale_ratio) * scale_rate_prod / (scale_rate_prod + 1) + t3 = scale_rate_prod.reciprocal() + return t1 - loc_scale_ratio + t2 + t3 + + +# TODO: Add Exponential-Laplace KL Divergence + + +@register_kl(Exponential, Normal) +def _kl_exponential_normal(p, q): + var_normal = q.scale.pow(2) + rate_sqr = p.rate.pow(2) + t1 = 0.5 * torch.log(rate_sqr * var_normal * 2 * math.pi) + t2 = rate_sqr.reciprocal() + t3 = q.loc / p.rate + t4 = q.loc.pow(2) * 0.5 + return t1 - 1 + (t2 - t3 + t4) / var_normal + + +@register_kl(Gamma, Beta) +@register_kl(Gamma, ContinuousBernoulli) +@register_kl(Gamma, Pareto) +@register_kl(Gamma, Uniform) +def _kl_gamma_infinity(p, q): + return _infinite_like(p.concentration) + + +@register_kl(Gamma, Exponential) +def _kl_gamma_exponential(p, q): + return -p.entropy() - q.rate.log() + q.rate * p.concentration / p.rate + + +@register_kl(Gamma, Gumbel) +def _kl_gamma_gumbel(p, q): + beta_scale_prod = p.rate * q.scale + loc_scale_ratio = q.loc / q.scale + t1 = ( + (p.concentration - 1) * p.concentration.digamma() + - p.concentration.lgamma() + - p.concentration + ) + t2 = beta_scale_prod.log() + p.concentration / beta_scale_prod + t3 = ( + torch.exp(loc_scale_ratio) + * (1 + beta_scale_prod.reciprocal()).pow(-p.concentration) + - loc_scale_ratio + ) + return t1 + t2 + t3 + + +# TODO: Add Gamma-Laplace KL Divergence + + +@register_kl(Gamma, Normal) +def _kl_gamma_normal(p, q): + var_normal = q.scale.pow(2) + beta_sqr = p.rate.pow(2) + t1 = ( + 0.5 * torch.log(beta_sqr * var_normal * 2 * math.pi) + - p.concentration + - p.concentration.lgamma() + ) + t2 = 0.5 * (p.concentration.pow(2) + p.concentration) / beta_sqr + t3 = q.loc * p.concentration / p.rate + t4 = 0.5 * q.loc.pow(2) + return ( + t1 + + (p.concentration - 1) * p.concentration.digamma() + + (t2 - t3 + t4) / var_normal + ) + + +@register_kl(Gumbel, Beta) +@register_kl(Gumbel, ContinuousBernoulli) +@register_kl(Gumbel, Exponential) +@register_kl(Gumbel, Gamma) +@register_kl(Gumbel, Pareto) +@register_kl(Gumbel, Uniform) +def _kl_gumbel_infinity(p, q): + return _infinite_like(p.loc) + + +# TODO: Add Gumbel-Laplace KL Divergence + + +@register_kl(Gumbel, Normal) +def _kl_gumbel_normal(p, q): + param_ratio = p.scale / q.scale + t1 = (param_ratio / math.sqrt(2 * math.pi)).log() + t2 = (math.pi * param_ratio * 0.5).pow(2) / 3 + t3 = ((p.loc + p.scale * _euler_gamma - q.loc) / q.scale).pow(2) * 0.5 + return -t1 + t2 + t3 - (_euler_gamma + 1) + + +@register_kl(Laplace, Beta) +@register_kl(Laplace, ContinuousBernoulli) +@register_kl(Laplace, Exponential) +@register_kl(Laplace, Gamma) +@register_kl(Laplace, Pareto) +@register_kl(Laplace, Uniform) +def _kl_laplace_infinity(p, q): + return _infinite_like(p.loc) + + +@register_kl(Laplace, Normal) +def _kl_laplace_normal(p, q): + var_normal = q.scale.pow(2) + scale_sqr_var_ratio = p.scale.pow(2) / var_normal + t1 = 0.5 * torch.log(2 * scale_sqr_var_ratio / math.pi) + t2 = 0.5 * p.loc.pow(2) + t3 = p.loc * q.loc + t4 = 0.5 * q.loc.pow(2) + return -t1 + scale_sqr_var_ratio + (t2 - t3 + t4) / var_normal - 1 + + +@register_kl(Normal, Beta) +@register_kl(Normal, ContinuousBernoulli) +@register_kl(Normal, Exponential) +@register_kl(Normal, Gamma) +@register_kl(Normal, Pareto) +@register_kl(Normal, Uniform) +def _kl_normal_infinity(p, q): + return _infinite_like(p.loc) + + +@register_kl(Normal, Gumbel) +def _kl_normal_gumbel(p, q): + mean_scale_ratio = p.loc / q.scale + var_scale_sqr_ratio = (p.scale / q.scale).pow(2) + loc_scale_ratio = q.loc / q.scale + t1 = var_scale_sqr_ratio.log() * 0.5 + t2 = mean_scale_ratio - loc_scale_ratio + t3 = torch.exp(-mean_scale_ratio + 0.5 * var_scale_sqr_ratio + loc_scale_ratio) + return -t1 + t2 + t3 - (0.5 * (1 + math.log(2 * math.pi))) + + +@register_kl(Normal, Laplace) +def _kl_normal_laplace(p, q): + loc_diff = p.loc - q.loc + scale_ratio = p.scale / q.scale + loc_diff_scale_ratio = loc_diff / p.scale + t1 = torch.log(scale_ratio) + t2 = ( + math.sqrt(2 / math.pi) * p.scale * torch.exp(-0.5 * loc_diff_scale_ratio.pow(2)) + ) + t3 = loc_diff * torch.erf(math.sqrt(0.5) * loc_diff_scale_ratio) + return -t1 + (t2 + t3) / q.scale - (0.5 * (1 + math.log(0.5 * math.pi))) + + +@register_kl(Pareto, Beta) +@register_kl(Pareto, ContinuousBernoulli) +@register_kl(Pareto, Uniform) +def _kl_pareto_infinity(p, q): + return _infinite_like(p.scale) + + +@register_kl(Pareto, Exponential) +def _kl_pareto_exponential(p, q): + scale_rate_prod = p.scale * q.rate + t1 = (p.alpha / scale_rate_prod).log() + t2 = p.alpha.reciprocal() + t3 = p.alpha * scale_rate_prod / (p.alpha - 1) + result = t1 - t2 + t3 - 1 + result[p.alpha <= 1] = inf + return result + + +@register_kl(Pareto, Gamma) +def _kl_pareto_gamma(p, q): + common_term = p.scale.log() + p.alpha.reciprocal() + t1 = p.alpha.log() - common_term + t2 = q.concentration.lgamma() - q.concentration * q.rate.log() + t3 = (1 - q.concentration) * common_term + t4 = q.rate * p.alpha * p.scale / (p.alpha - 1) + result = t1 + t2 + t3 + t4 - 1 + result[p.alpha <= 1] = inf + return result + + +# TODO: Add Pareto-Laplace KL Divergence + + +@register_kl(Pareto, Normal) +def _kl_pareto_normal(p, q): + var_normal = 2 * q.scale.pow(2) + common_term = p.scale / (p.alpha - 1) + t1 = (math.sqrt(2 * math.pi) * q.scale * p.alpha / p.scale).log() + t2 = p.alpha.reciprocal() + t3 = p.alpha * common_term.pow(2) / (p.alpha - 2) + t4 = (p.alpha * common_term - q.loc).pow(2) + result = t1 - t2 + (t3 + t4) / var_normal - 1 + result[p.alpha <= 2] = inf + return result + + +@register_kl(Poisson, Bernoulli) +@register_kl(Poisson, Binomial) +def _kl_poisson_infinity(p, q): + return _infinite_like(p.rate) + + +@register_kl(Uniform, Beta) +def _kl_uniform_beta(p, q): + common_term = p.high - p.low + t1 = torch.log(common_term) + t2 = ( + (q.concentration1 - 1) + * (_x_log_x(p.high) - _x_log_x(p.low) - common_term) + / common_term + ) + t3 = ( + (q.concentration0 - 1) + * (_x_log_x(1 - p.high) - _x_log_x(1 - p.low) + common_term) + / common_term + ) + t4 = ( + q.concentration1.lgamma() + + q.concentration0.lgamma() + - (q.concentration1 + q.concentration0).lgamma() + ) + result = t3 + t4 - t1 - t2 + result[(p.high > q.support.upper_bound) | (p.low < q.support.lower_bound)] = inf + return result + + +@register_kl(Uniform, ContinuousBernoulli) +def _kl_uniform_continuous_bernoulli(p, q): + result = ( + -p.entropy() + - p.mean * q.logits + - torch.log1p(-q.probs) + - q._cont_bern_log_norm() + ) + return torch.where( + torch.max( + torch.ge(p.high, q.support.upper_bound), + torch.le(p.low, q.support.lower_bound), + ), + torch.ones_like(result) * inf, + result, + ) + + +@register_kl(Uniform, Exponential) +def _kl_uniform_exponetial(p, q): + result = q.rate * (p.high + p.low) / 2 - ((p.high - p.low) * q.rate).log() + result[p.low < q.support.lower_bound] = inf + return result + + +@register_kl(Uniform, Gamma) +def _kl_uniform_gamma(p, q): + common_term = p.high - p.low + t1 = common_term.log() + t2 = q.concentration.lgamma() - q.concentration * q.rate.log() + t3 = ( + (1 - q.concentration) + * (_x_log_x(p.high) - _x_log_x(p.low) - common_term) + / common_term + ) + t4 = q.rate * (p.high + p.low) / 2 + result = -t1 + t2 + t3 + t4 + result[p.low < q.support.lower_bound] = inf + return result + + +@register_kl(Uniform, Gumbel) +def _kl_uniform_gumbel(p, q): + common_term = q.scale / (p.high - p.low) + high_loc_diff = (p.high - q.loc) / q.scale + low_loc_diff = (p.low - q.loc) / q.scale + t1 = common_term.log() + 0.5 * (high_loc_diff + low_loc_diff) + t2 = common_term * (torch.exp(-high_loc_diff) - torch.exp(-low_loc_diff)) + return t1 - t2 + + +# TODO: Uniform-Laplace KL Divergence + + +@register_kl(Uniform, Normal) +def _kl_uniform_normal(p, q): + common_term = p.high - p.low + t1 = (math.sqrt(math.pi * 2) * q.scale / common_term).log() + t2 = (common_term).pow(2) / 12 + t3 = ((p.high + p.low - 2 * q.loc) / 2).pow(2) + return t1 + 0.5 * (t2 + t3) / q.scale.pow(2) + + +@register_kl(Uniform, Pareto) +def _kl_uniform_pareto(p, q): + support_uniform = p.high - p.low + t1 = (q.alpha * q.scale.pow(q.alpha) * (support_uniform)).log() + t2 = (_x_log_x(p.high) - _x_log_x(p.low) - support_uniform) / support_uniform + result = t2 * (q.alpha + 1) - t1 + result[p.low < q.support.lower_bound] = inf + return result + + +@register_kl(Independent, Independent) +def _kl_independent_independent(p, q): + if p.reinterpreted_batch_ndims != q.reinterpreted_batch_ndims: + raise NotImplementedError + result = kl_divergence(p.base_dist, q.base_dist) + return _sum_rightmost(result, p.reinterpreted_batch_ndims) + + +@register_kl(Cauchy, Cauchy) +def _kl_cauchy_cauchy(p, q): + # From https://arxiv.org/abs/1905.10965 + t1 = ((p.scale + q.scale).pow(2) + (p.loc - q.loc).pow(2)).log() + t2 = (4 * p.scale * q.scale).log() + return t1 - t2 + + +def _add_kl_info(): + """Appends a list of implemented KL functions to the doc for kl_divergence.""" + rows = [ + "KL divergence is currently implemented for the following distribution pairs:" + ] + for p, q in sorted( + _KL_REGISTRY, key=lambda p_q: (p_q[0].__name__, p_q[1].__name__) + ): + rows.append( + f"* :class:`~torch.distributions.{p.__name__}` and :class:`~torch.distributions.{q.__name__}`" + ) + kl_info = "\n\t".join(rows) + if kl_divergence.__doc__: + kl_divergence.__doc__ += kl_info # type: ignore[operator] diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/kumaraswamy.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/kumaraswamy.py new file mode 100644 index 0000000000000000000000000000000000000000..9de3c422dc4c5a8ffa2a90dc61fd1439adcc60a6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/kumaraswamy.py @@ -0,0 +1,97 @@ +import torch +from torch import nan +from torch.distributions import constraints +from torch.distributions.transformed_distribution import TransformedDistribution +from torch.distributions.transforms import AffineTransform, PowerTransform +from torch.distributions.uniform import Uniform +from torch.distributions.utils import broadcast_all, euler_constant + +__all__ = ["Kumaraswamy"] + + +def _moments(a, b, n): + """ + Computes nth moment of Kumaraswamy using using torch.lgamma + """ + arg1 = 1 + n / a + log_value = torch.lgamma(arg1) + torch.lgamma(b) - torch.lgamma(arg1 + b) + return b * torch.exp(log_value) + + +class Kumaraswamy(TransformedDistribution): + r""" + Samples from a Kumaraswamy distribution. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Kumaraswamy(torch.tensor([1.0]), torch.tensor([1.0])) + >>> m.sample() # sample from a Kumaraswamy distribution with concentration alpha=1 and beta=1 + tensor([ 0.1729]) + + Args: + concentration1 (float or Tensor): 1st concentration parameter of the distribution + (often referred to as alpha) + concentration0 (float or Tensor): 2nd concentration parameter of the distribution + (often referred to as beta) + """ + arg_constraints = { + "concentration1": constraints.positive, + "concentration0": constraints.positive, + } + support = constraints.unit_interval + has_rsample = True + + def __init__(self, concentration1, concentration0, validate_args=None): + self.concentration1, self.concentration0 = broadcast_all( + concentration1, concentration0 + ) + finfo = torch.finfo(self.concentration0.dtype) + base_dist = Uniform( + torch.full_like(self.concentration0, 0), + torch.full_like(self.concentration0, 1), + validate_args=validate_args, + ) + transforms = [ + PowerTransform(exponent=self.concentration0.reciprocal()), + AffineTransform(loc=1.0, scale=-1.0), + PowerTransform(exponent=self.concentration1.reciprocal()), + ] + super().__init__(base_dist, transforms, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Kumaraswamy, _instance) + new.concentration1 = self.concentration1.expand(batch_shape) + new.concentration0 = self.concentration0.expand(batch_shape) + return super().expand(batch_shape, _instance=new) + + @property + def mean(self): + return _moments(self.concentration1, self.concentration0, 1) + + @property + def mode(self): + # Evaluate in log-space for numerical stability. + log_mode = ( + self.concentration0.reciprocal() * (-self.concentration0).log1p() + - (-self.concentration0 * self.concentration1).log1p() + ) + log_mode[(self.concentration0 < 1) | (self.concentration1 < 1)] = nan + return log_mode.exp() + + @property + def variance(self): + return _moments(self.concentration1, self.concentration0, 2) - torch.pow( + self.mean, 2 + ) + + def entropy(self): + t1 = 1 - self.concentration1.reciprocal() + t0 = 1 - self.concentration0.reciprocal() + H0 = torch.digamma(self.concentration0 + 1) + euler_constant + return ( + t0 + + t1 * H0 + - torch.log(self.concentration1) + - torch.log(self.concentration0) + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/laplace.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/laplace.py new file mode 100644 index 0000000000000000000000000000000000000000..7b830cc76f9b43149105d4c6d75560560117a18f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/laplace.py @@ -0,0 +1,94 @@ +from numbers import Number + +import torch +from torch.distributions import constraints +from torch.distributions.distribution import Distribution +from torch.distributions.utils import broadcast_all + +__all__ = ["Laplace"] + + +class Laplace(Distribution): + r""" + Creates a Laplace distribution parameterized by :attr:`loc` and :attr:`scale`. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = Laplace(torch.tensor([0.0]), torch.tensor([1.0])) + >>> m.sample() # Laplace distributed with loc=0, scale=1 + tensor([ 0.1046]) + + Args: + loc (float or Tensor): mean of the distribution + scale (float or Tensor): scale of the distribution + """ + arg_constraints = {"loc": constraints.real, "scale": constraints.positive} + support = constraints.real + has_rsample = True + + @property + def mean(self): + return self.loc + + @property + def mode(self): + return self.loc + + @property + def variance(self): + return 2 * self.scale.pow(2) + + @property + def stddev(self): + return (2**0.5) * self.scale + + def __init__(self, loc, scale, validate_args=None): + self.loc, self.scale = broadcast_all(loc, scale) + if isinstance(loc, Number) and isinstance(scale, Number): + batch_shape = torch.Size() + else: + batch_shape = self.loc.size() + super().__init__(batch_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Laplace, _instance) + batch_shape = torch.Size(batch_shape) + new.loc = self.loc.expand(batch_shape) + new.scale = self.scale.expand(batch_shape) + super(Laplace, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + def rsample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + finfo = torch.finfo(self.loc.dtype) + if torch._C._get_tracing_state(): + # [JIT WORKAROUND] lack of support for .uniform_() + u = torch.rand(shape, dtype=self.loc.dtype, device=self.loc.device) * 2 - 1 + return self.loc - self.scale * u.sign() * torch.log1p( + -u.abs().clamp(min=finfo.tiny) + ) + u = self.loc.new(shape).uniform_(finfo.eps - 1, 1) + # TODO: If we ever implement tensor.nextafter, below is what we want ideally. + # u = self.loc.new(shape).uniform_(self.loc.nextafter(-.5, 0), .5) + return self.loc - self.scale * u.sign() * torch.log1p(-u.abs()) + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + return -torch.log(2 * self.scale) - torch.abs(value - self.loc) / self.scale + + def cdf(self, value): + if self._validate_args: + self._validate_sample(value) + return 0.5 - 0.5 * (value - self.loc).sign() * torch.expm1( + -(value - self.loc).abs() / self.scale + ) + + def icdf(self, value): + term = value - 0.5 + return self.loc - self.scale * (term).sign() * torch.log1p(-2 * term.abs()) + + def entropy(self): + return 1 + torch.log(2 * self.scale) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/lkj_cholesky.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/lkj_cholesky.py new file mode 100644 index 0000000000000000000000000000000000000000..c1cb46f02fc24826ce8db3f079abd133652e8213 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/lkj_cholesky.py @@ -0,0 +1,142 @@ +""" +This closely follows the implementation in NumPyro (https://github.com/pyro-ppl/numpyro). + +Original copyright notice: + +# Copyright: Contributors to the Pyro project. +# SPDX-License-Identifier: Apache-2.0 +""" + +import math + +import torch +from torch.distributions import Beta, constraints +from torch.distributions.distribution import Distribution +from torch.distributions.utils import broadcast_all + +__all__ = ["LKJCholesky"] + + +class LKJCholesky(Distribution): + r""" + LKJ distribution for lower Cholesky factor of correlation matrices. + The distribution is controlled by ``concentration`` parameter :math:`\eta` + to make the probability of the correlation matrix :math:`M` generated from + a Cholesky factor proportional to :math:`\det(M)^{\eta - 1}`. Because of that, + when ``concentration == 1``, we have a uniform distribution over Cholesky + factors of correlation matrices:: + + L ~ LKJCholesky(dim, concentration) + X = L @ L' ~ LKJCorr(dim, concentration) + + Note that this distribution samples the + Cholesky factor of correlation matrices and not the correlation matrices + themselves and thereby differs slightly from the derivations in [1] for + the `LKJCorr` distribution. For sampling, this uses the Onion method from + [1] Section 3. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> l = LKJCholesky(3, 0.5) + >>> l.sample() # l @ l.T is a sample of a correlation 3x3 matrix + tensor([[ 1.0000, 0.0000, 0.0000], + [ 0.3516, 0.9361, 0.0000], + [-0.1899, 0.4748, 0.8593]]) + + Args: + dimension (dim): dimension of the matrices + concentration (float or Tensor): concentration/shape parameter of the + distribution (often referred to as eta) + + **References** + + [1] `Generating random correlation matrices based on vines and extended onion method` (2009), + Daniel Lewandowski, Dorota Kurowicka, Harry Joe. + Journal of Multivariate Analysis. 100. 10.1016/j.jmva.2009.04.008 + """ + arg_constraints = {"concentration": constraints.positive} + support = constraints.corr_cholesky + + def __init__(self, dim, concentration=1.0, validate_args=None): + if dim < 2: + raise ValueError( + f"Expected dim to be an integer greater than or equal to 2. Found dim={dim}." + ) + self.dim = dim + (self.concentration,) = broadcast_all(concentration) + batch_shape = self.concentration.size() + event_shape = torch.Size((dim, dim)) + # This is used to draw vectorized samples from the beta distribution in Sec. 3.2 of [1]. + marginal_conc = self.concentration + 0.5 * (self.dim - 2) + offset = torch.arange( + self.dim - 1, + dtype=self.concentration.dtype, + device=self.concentration.device, + ) + offset = torch.cat([offset.new_zeros((1,)), offset]) + beta_conc1 = offset + 0.5 + beta_conc0 = marginal_conc.unsqueeze(-1) - 0.5 * offset + self._beta = Beta(beta_conc1, beta_conc0) + super().__init__(batch_shape, event_shape, validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(LKJCholesky, _instance) + batch_shape = torch.Size(batch_shape) + new.dim = self.dim + new.concentration = self.concentration.expand(batch_shape) + new._beta = self._beta.expand(batch_shape + (self.dim,)) + super(LKJCholesky, new).__init__( + batch_shape, self.event_shape, validate_args=False + ) + new._validate_args = self._validate_args + return new + + def sample(self, sample_shape=torch.Size()): + # This uses the Onion method, but there are a few differences from [1] Sec. 3.2: + # - This vectorizes the for loop and also works for heterogeneous eta. + # - Same algorithm generalizes to n=1. + # - The procedure is simplified since we are sampling the cholesky factor of + # the correlation matrix instead of the correlation matrix itself. As such, + # we only need to generate `w`. + y = self._beta.sample(sample_shape).unsqueeze(-1) + u_normal = torch.randn( + self._extended_shape(sample_shape), dtype=y.dtype, device=y.device + ).tril(-1) + u_hypersphere = u_normal / u_normal.norm(dim=-1, keepdim=True) + # Replace NaNs in first row + u_hypersphere[..., 0, :].fill_(0.0) + w = torch.sqrt(y) * u_hypersphere + # Fill diagonal elements; clamp for numerical stability + eps = torch.finfo(w.dtype).tiny + diag_elems = torch.clamp(1 - torch.sum(w**2, dim=-1), min=eps).sqrt() + w += torch.diag_embed(diag_elems) + return w + + def log_prob(self, value): + # See: https://mc-stan.org/docs/2_25/functions-reference/cholesky-lkj-correlation-distribution.html + # The probability of a correlation matrix is proportional to + # determinant ** (concentration - 1) = prod(L_ii ^ 2(concentration - 1)) + # Additionally, the Jacobian of the transformation from Cholesky factor to + # correlation matrix is: + # prod(L_ii ^ (D - i)) + # So the probability of a Cholesky factor is propotional to + # prod(L_ii ^ (2 * concentration - 2 + D - i)) = prod(L_ii ^ order_i) + # with order_i = 2 * concentration - 2 + D - i + if self._validate_args: + self._validate_sample(value) + diag_elems = value.diagonal(dim1=-1, dim2=-2)[..., 1:] + order = torch.arange(2, self.dim + 1, device=self.concentration.device) + order = 2 * (self.concentration - 1).unsqueeze(-1) + self.dim - order + unnormalized_log_pdf = torch.sum(order * diag_elems.log(), dim=-1) + # Compute normalization constant (page 1999 of [1]) + dm1 = self.dim - 1 + alpha = self.concentration + 0.5 * dm1 + denominator = torch.lgamma(alpha) * dm1 + numerator = torch.mvlgamma(alpha - 0.5, dm1) + # pi_constant in [1] is D * (D - 1) / 4 * log(pi) + # pi_constant in multigammaln is (D - 1) * (D - 2) / 4 * log(pi) + # hence, we need to add a pi_constant = (D - 1) * log(pi) / 2 + pi_constant = 0.5 * dm1 * math.log(math.pi) + normalize_term = pi_constant + numerator - denominator + return unnormalized_log_pdf - normalize_term diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/logistic_normal.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/logistic_normal.py new file mode 100644 index 0000000000000000000000000000000000000000..a9ef4dd265642ae4cdf49ed046f22a5a4a20119f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/logistic_normal.py @@ -0,0 +1,54 @@ +from torch.distributions import constraints +from torch.distributions.normal import Normal +from torch.distributions.transformed_distribution import TransformedDistribution +from torch.distributions.transforms import StickBreakingTransform + +__all__ = ["LogisticNormal"] + + +class LogisticNormal(TransformedDistribution): + r""" + Creates a logistic-normal distribution parameterized by :attr:`loc` and :attr:`scale` + that define the base `Normal` distribution transformed with the + `StickBreakingTransform` such that:: + + X ~ LogisticNormal(loc, scale) + Y = log(X / (1 - X.cumsum(-1)))[..., :-1] ~ Normal(loc, scale) + + Args: + loc (float or Tensor): mean of the base distribution + scale (float or Tensor): standard deviation of the base distribution + + Example:: + + >>> # logistic-normal distributed with mean=(0, 0, 0) and stddev=(1, 1, 1) + >>> # of the base Normal distribution + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = LogisticNormal(torch.tensor([0.0] * 3), torch.tensor([1.0] * 3)) + >>> m.sample() + tensor([ 0.7653, 0.0341, 0.0579, 0.1427]) + + """ + arg_constraints = {"loc": constraints.real, "scale": constraints.positive} + support = constraints.simplex + has_rsample = True + + def __init__(self, loc, scale, validate_args=None): + base_dist = Normal(loc, scale, validate_args=validate_args) + if not base_dist.batch_shape: + base_dist = base_dist.expand([1]) + super().__init__( + base_dist, StickBreakingTransform(), validate_args=validate_args + ) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(LogisticNormal, _instance) + return super().expand(batch_shape, _instance=new) + + @property + def loc(self): + return self.base_dist.base_dist.loc + + @property + def scale(self): + return self.base_dist.base_dist.scale diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/negative_binomial.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/negative_binomial.py new file mode 100644 index 0000000000000000000000000000000000000000..59edee589f9ae03f87eb38672745f5cb8fd0bcb5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/negative_binomial.py @@ -0,0 +1,133 @@ +import torch +import torch.nn.functional as F +from torch.distributions import constraints +from torch.distributions.distribution import Distribution +from torch.distributions.utils import ( + broadcast_all, + lazy_property, + logits_to_probs, + probs_to_logits, +) + +__all__ = ["NegativeBinomial"] + + +class NegativeBinomial(Distribution): + r""" + Creates a Negative Binomial distribution, i.e. distribution + of the number of successful independent and identical Bernoulli trials + before :attr:`total_count` failures are achieved. The probability + of success of each Bernoulli trial is :attr:`probs`. + + Args: + total_count (float or Tensor): non-negative number of negative Bernoulli + trials to stop, although the distribution is still valid for real + valued count + probs (Tensor): Event probabilities of success in the half open interval [0, 1) + logits (Tensor): Event log-odds for probabilities of success + """ + arg_constraints = { + "total_count": constraints.greater_than_eq(0), + "probs": constraints.half_open_interval(0.0, 1.0), + "logits": constraints.real, + } + support = constraints.nonnegative_integer + + def __init__(self, total_count, probs=None, logits=None, validate_args=None): + if (probs is None) == (logits is None): + raise ValueError( + "Either `probs` or `logits` must be specified, but not both." + ) + if probs is not None: + ( + self.total_count, + self.probs, + ) = broadcast_all(total_count, probs) + self.total_count = self.total_count.type_as(self.probs) + else: + ( + self.total_count, + self.logits, + ) = broadcast_all(total_count, logits) + self.total_count = self.total_count.type_as(self.logits) + + self._param = self.probs if probs is not None else self.logits + batch_shape = self._param.size() + super().__init__(batch_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(NegativeBinomial, _instance) + batch_shape = torch.Size(batch_shape) + new.total_count = self.total_count.expand(batch_shape) + if "probs" in self.__dict__: + new.probs = self.probs.expand(batch_shape) + new._param = new.probs + if "logits" in self.__dict__: + new.logits = self.logits.expand(batch_shape) + new._param = new.logits + super(NegativeBinomial, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + def _new(self, *args, **kwargs): + return self._param.new(*args, **kwargs) + + @property + def mean(self): + return self.total_count * torch.exp(self.logits) + + @property + def mode(self): + return ((self.total_count - 1) * self.logits.exp()).floor().clamp(min=0.0) + + @property + def variance(self): + return self.mean / torch.sigmoid(-self.logits) + + @lazy_property + def logits(self): + return probs_to_logits(self.probs, is_binary=True) + + @lazy_property + def probs(self): + return logits_to_probs(self.logits, is_binary=True) + + @property + def param_shape(self): + return self._param.size() + + @lazy_property + def _gamma(self): + # Note we avoid validating because self.total_count can be zero. + return torch.distributions.Gamma( + concentration=self.total_count, + rate=torch.exp(-self.logits), + validate_args=False, + ) + + def sample(self, sample_shape=torch.Size()): + with torch.no_grad(): + rate = self._gamma.sample(sample_shape=sample_shape) + return torch.poisson(rate) + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + + log_unnormalized_prob = self.total_count * F.logsigmoid( + -self.logits + ) + value * F.logsigmoid(self.logits) + + log_normalization = ( + -torch.lgamma(self.total_count + value) + + torch.lgamma(1.0 + value) + + torch.lgamma(self.total_count) + ) + # The case self.total_count == 0 and value == 0 has probability 1 but + # lgamma(0) is infinite. Handle this case separately using a function + # that does not modify tensors in place to allow Jit compilation. + log_normalization = log_normalization.masked_fill( + self.total_count + value == 0.0, 0.0 + ) + + return log_unnormalized_prob - log_normalization diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/poisson.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/poisson.py new file mode 100644 index 0000000000000000000000000000000000000000..81c0898a577be5489a54c5e887b41a561f42037b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/poisson.py @@ -0,0 +1,77 @@ +from numbers import Number + +import torch +from torch.distributions import constraints +from torch.distributions.exp_family import ExponentialFamily +from torch.distributions.utils import broadcast_all + +__all__ = ["Poisson"] + + +class Poisson(ExponentialFamily): + r""" + Creates a Poisson distribution parameterized by :attr:`rate`, the rate parameter. + + Samples are nonnegative integers, with a pmf given by + + .. math:: + \mathrm{rate}^k \frac{e^{-\mathrm{rate}}}{k!} + + Example:: + + >>> # xdoctest: +SKIP("poisson_cpu not implemented for 'Long'") + >>> m = Poisson(torch.tensor([4])) + >>> m.sample() + tensor([ 3.]) + + Args: + rate (Number, Tensor): the rate parameter + """ + arg_constraints = {"rate": constraints.nonnegative} + support = constraints.nonnegative_integer + + @property + def mean(self): + return self.rate + + @property + def mode(self): + return self.rate.floor() + + @property + def variance(self): + return self.rate + + def __init__(self, rate, validate_args=None): + (self.rate,) = broadcast_all(rate) + if isinstance(rate, Number): + batch_shape = torch.Size() + else: + batch_shape = self.rate.size() + super().__init__(batch_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(Poisson, _instance) + batch_shape = torch.Size(batch_shape) + new.rate = self.rate.expand(batch_shape) + super(Poisson, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + def sample(self, sample_shape=torch.Size()): + shape = self._extended_shape(sample_shape) + with torch.no_grad(): + return torch.poisson(self.rate.expand(shape)) + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + rate, value = broadcast_all(self.rate, value) + return value.xlogy(rate) - rate - (value + 1).lgamma() + + @property + def _natural_params(self): + return (torch.log(self.rate),) + + def _log_normalizer(self, x): + return torch.exp(x) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/studentT.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/studentT.py new file mode 100644 index 0000000000000000000000000000000000000000..553144e2643b4d738c589394fddd7158d22cfc8c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/studentT.py @@ -0,0 +1,116 @@ +import math + +import torch +from torch import inf, nan +from torch.distributions import Chi2, constraints +from torch.distributions.distribution import Distribution +from torch.distributions.utils import _standard_normal, broadcast_all + +__all__ = ["StudentT"] + + +class StudentT(Distribution): + r""" + Creates a Student's t-distribution parameterized by degree of + freedom :attr:`df`, mean :attr:`loc` and scale :attr:`scale`. + + Example:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = StudentT(torch.tensor([2.0])) + >>> m.sample() # Student's t-distributed with degrees of freedom=2 + tensor([ 0.1046]) + + Args: + df (float or Tensor): degrees of freedom + loc (float or Tensor): mean of the distribution + scale (float or Tensor): scale of the distribution + """ + arg_constraints = { + "df": constraints.positive, + "loc": constraints.real, + "scale": constraints.positive, + } + support = constraints.real + has_rsample = True + + @property + def mean(self): + m = self.loc.clone(memory_format=torch.contiguous_format) + m[self.df <= 1] = nan + return m + + @property + def mode(self): + return self.loc + + @property + def variance(self): + m = self.df.clone(memory_format=torch.contiguous_format) + m[self.df > 2] = ( + self.scale[self.df > 2].pow(2) + * self.df[self.df > 2] + / (self.df[self.df > 2] - 2) + ) + m[(self.df <= 2) & (self.df > 1)] = inf + m[self.df <= 1] = nan + return m + + def __init__(self, df, loc=0.0, scale=1.0, validate_args=None): + self.df, self.loc, self.scale = broadcast_all(df, loc, scale) + self._chi2 = Chi2(self.df) + batch_shape = self.df.size() + super().__init__(batch_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(StudentT, _instance) + batch_shape = torch.Size(batch_shape) + new.df = self.df.expand(batch_shape) + new.loc = self.loc.expand(batch_shape) + new.scale = self.scale.expand(batch_shape) + new._chi2 = self._chi2.expand(batch_shape) + super(StudentT, new).__init__(batch_shape, validate_args=False) + new._validate_args = self._validate_args + return new + + def rsample(self, sample_shape=torch.Size()): + # NOTE: This does not agree with scipy implementation as much as other distributions. + # (see https://github.com/fritzo/notebooks/blob/master/debug-student-t.ipynb). Using DoubleTensor + # parameters seems to help. + + # X ~ Normal(0, 1) + # Z ~ Chi2(df) + # Y = X / sqrt(Z / df) ~ StudentT(df) + shape = self._extended_shape(sample_shape) + X = _standard_normal(shape, dtype=self.df.dtype, device=self.df.device) + Z = self._chi2.rsample(sample_shape) + Y = X * torch.rsqrt(Z / self.df) + return self.loc + self.scale * Y + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + y = (value - self.loc) / self.scale + Z = ( + self.scale.log() + + 0.5 * self.df.log() + + 0.5 * math.log(math.pi) + + torch.lgamma(0.5 * self.df) + - torch.lgamma(0.5 * (self.df + 1.0)) + ) + return -0.5 * (self.df + 1.0) * torch.log1p(y**2.0 / self.df) - Z + + def entropy(self): + lbeta = ( + torch.lgamma(0.5 * self.df) + + math.lgamma(0.5) + - torch.lgamma(0.5 * (self.df + 1)) + ) + return ( + self.scale.log() + + 0.5 + * (self.df + 1) + * (torch.digamma(0.5 * (self.df + 1)) - torch.digamma(0.5 * self.df)) + + 0.5 * self.df.log() + + lbeta + ) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/transformed_distribution.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/transformed_distribution.py new file mode 100644 index 0000000000000000000000000000000000000000..060909f38ad06580550d3b5114bbdadd742cb4f9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/transformed_distribution.py @@ -0,0 +1,215 @@ +from typing import Dict + +import torch +from torch.distributions import constraints +from torch.distributions.distribution import Distribution +from torch.distributions.independent import Independent +from torch.distributions.transforms import ComposeTransform, Transform +from torch.distributions.utils import _sum_rightmost + +__all__ = ["TransformedDistribution"] + + +class TransformedDistribution(Distribution): + r""" + Extension of the Distribution class, which applies a sequence of Transforms + to a base distribution. Let f be the composition of transforms applied:: + + X ~ BaseDistribution + Y = f(X) ~ TransformedDistribution(BaseDistribution, f) + log p(Y) = log p(X) + log |det (dX/dY)| + + Note that the ``.event_shape`` of a :class:`TransformedDistribution` is the + maximum shape of its base distribution and its transforms, since transforms + can introduce correlations among events. + + An example for the usage of :class:`TransformedDistribution` would be:: + + # Building a Logistic Distribution + # X ~ Uniform(0, 1) + # f = a + b * logit(X) + # Y ~ f(X) ~ Logistic(a, b) + base_distribution = Uniform(0, 1) + transforms = [SigmoidTransform().inv, AffineTransform(loc=a, scale=b)] + logistic = TransformedDistribution(base_distribution, transforms) + + For more examples, please look at the implementations of + :class:`~torch.distributions.gumbel.Gumbel`, + :class:`~torch.distributions.half_cauchy.HalfCauchy`, + :class:`~torch.distributions.half_normal.HalfNormal`, + :class:`~torch.distributions.log_normal.LogNormal`, + :class:`~torch.distributions.pareto.Pareto`, + :class:`~torch.distributions.weibull.Weibull`, + :class:`~torch.distributions.relaxed_bernoulli.RelaxedBernoulli` and + :class:`~torch.distributions.relaxed_categorical.RelaxedOneHotCategorical` + """ + arg_constraints: Dict[str, constraints.Constraint] = {} + + def __init__(self, base_distribution, transforms, validate_args=None): + if isinstance(transforms, Transform): + self.transforms = [ + transforms, + ] + elif isinstance(transforms, list): + if not all(isinstance(t, Transform) for t in transforms): + raise ValueError( + "transforms must be a Transform or a list of Transforms" + ) + self.transforms = transforms + else: + raise ValueError( + f"transforms must be a Transform or list, but was {transforms}" + ) + + # Reshape base_distribution according to transforms. + base_shape = base_distribution.batch_shape + base_distribution.event_shape + base_event_dim = len(base_distribution.event_shape) + transform = ComposeTransform(self.transforms) + if len(base_shape) < transform.domain.event_dim: + raise ValueError( + "base_distribution needs to have shape with size at least {}, but got {}.".format( + transform.domain.event_dim, base_shape + ) + ) + forward_shape = transform.forward_shape(base_shape) + expanded_base_shape = transform.inverse_shape(forward_shape) + if base_shape != expanded_base_shape: + base_batch_shape = expanded_base_shape[ + : len(expanded_base_shape) - base_event_dim + ] + base_distribution = base_distribution.expand(base_batch_shape) + reinterpreted_batch_ndims = transform.domain.event_dim - base_event_dim + if reinterpreted_batch_ndims > 0: + base_distribution = Independent( + base_distribution, reinterpreted_batch_ndims + ) + self.base_dist = base_distribution + + # Compute shapes. + transform_change_in_event_dim = ( + transform.codomain.event_dim - transform.domain.event_dim + ) + event_dim = max( + transform.codomain.event_dim, # the transform is coupled + base_event_dim + transform_change_in_event_dim, # the base dist is coupled + ) + assert len(forward_shape) >= event_dim + cut = len(forward_shape) - event_dim + batch_shape = forward_shape[:cut] + event_shape = forward_shape[cut:] + super().__init__(batch_shape, event_shape, validate_args=validate_args) + + def expand(self, batch_shape, _instance=None): + new = self._get_checked_instance(TransformedDistribution, _instance) + batch_shape = torch.Size(batch_shape) + shape = batch_shape + self.event_shape + for t in reversed(self.transforms): + shape = t.inverse_shape(shape) + base_batch_shape = shape[: len(shape) - len(self.base_dist.event_shape)] + new.base_dist = self.base_dist.expand(base_batch_shape) + new.transforms = self.transforms + super(TransformedDistribution, new).__init__( + batch_shape, self.event_shape, validate_args=False + ) + new._validate_args = self._validate_args + return new + + @constraints.dependent_property(is_discrete=False) + def support(self): + if not self.transforms: + return self.base_dist.support + support = self.transforms[-1].codomain + if len(self.event_shape) > support.event_dim: + support = constraints.independent( + support, len(self.event_shape) - support.event_dim + ) + return support + + @property + def has_rsample(self): + return self.base_dist.has_rsample + + def sample(self, sample_shape=torch.Size()): + """ + Generates a sample_shape shaped sample or sample_shape shaped batch of + samples if the distribution parameters are batched. Samples first from + base distribution and applies `transform()` for every transform in the + list. + """ + with torch.no_grad(): + x = self.base_dist.sample(sample_shape) + for transform in self.transforms: + x = transform(x) + return x + + def rsample(self, sample_shape=torch.Size()): + """ + Generates a sample_shape shaped reparameterized sample or sample_shape + shaped batch of reparameterized samples if the distribution parameters + are batched. Samples first from base distribution and applies + `transform()` for every transform in the list. + """ + x = self.base_dist.rsample(sample_shape) + for transform in self.transforms: + x = transform(x) + return x + + def log_prob(self, value): + """ + Scores the sample by inverting the transform(s) and computing the score + using the score of the base distribution and the log abs det jacobian. + """ + if self._validate_args: + self._validate_sample(value) + event_dim = len(self.event_shape) + log_prob = 0.0 + y = value + for transform in reversed(self.transforms): + x = transform.inv(y) + event_dim += transform.domain.event_dim - transform.codomain.event_dim + log_prob = log_prob - _sum_rightmost( + transform.log_abs_det_jacobian(x, y), + event_dim - transform.domain.event_dim, + ) + y = x + + log_prob = log_prob + _sum_rightmost( + self.base_dist.log_prob(y), event_dim - len(self.base_dist.event_shape) + ) + return log_prob + + def _monotonize_cdf(self, value): + """ + This conditionally flips ``value -> 1-value`` to ensure :meth:`cdf` is + monotone increasing. + """ + sign = 1 + for transform in self.transforms: + sign = sign * transform.sign + if isinstance(sign, int) and sign == 1: + return value + return sign * (value - 0.5) + 0.5 + + def cdf(self, value): + """ + Computes the cumulative distribution function by inverting the + transform(s) and computing the score of the base distribution. + """ + for transform in self.transforms[::-1]: + value = transform.inv(value) + if self._validate_args: + self.base_dist._validate_sample(value) + value = self.base_dist.cdf(value) + value = self._monotonize_cdf(value) + return value + + def icdf(self, value): + """ + Computes the inverse cumulative distribution function using + transform(s) and computing the score of the base distribution. + """ + value = self._monotonize_cdf(value) + value = self.base_dist.icdf(value) + for transform in self.transforms: + value = transform(value) + return value diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/transforms.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..f2907caa60180a93b9ebf62354479db4292e2c95 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/transforms.py @@ -0,0 +1,1245 @@ +import functools +import math +import numbers +import operator +import weakref +from typing import List + +import torch +import torch.nn.functional as F +from torch.distributions import constraints +from torch.distributions.utils import ( + _sum_rightmost, + broadcast_all, + lazy_property, + tril_matrix_to_vec, + vec_to_tril_matrix, +) +from torch.nn.functional import pad, softplus + +__all__ = [ + "AbsTransform", + "AffineTransform", + "CatTransform", + "ComposeTransform", + "CorrCholeskyTransform", + "CumulativeDistributionTransform", + "ExpTransform", + "IndependentTransform", + "LowerCholeskyTransform", + "PositiveDefiniteTransform", + "PowerTransform", + "ReshapeTransform", + "SigmoidTransform", + "SoftplusTransform", + "TanhTransform", + "SoftmaxTransform", + "StackTransform", + "StickBreakingTransform", + "Transform", + "identity_transform", +] + + +class Transform: + """ + Abstract class for invertable transformations with computable log + det jacobians. They are primarily used in + :class:`torch.distributions.TransformedDistribution`. + + Caching is useful for transforms whose inverses are either expensive or + numerically unstable. Note that care must be taken with memoized values + since the autograd graph may be reversed. For example while the following + works with or without caching:: + + y = t(x) + t.log_abs_det_jacobian(x, y).backward() # x will receive gradients. + + However the following will error when caching due to dependency reversal:: + + y = t(x) + z = t.inv(y) + grad(z.sum(), [y]) # error because z is x + + Derived classes should implement one or both of :meth:`_call` or + :meth:`_inverse`. Derived classes that set `bijective=True` should also + implement :meth:`log_abs_det_jacobian`. + + Args: + cache_size (int): Size of cache. If zero, no caching is done. If one, + the latest single value is cached. Only 0 and 1 are supported. + + Attributes: + domain (:class:`~torch.distributions.constraints.Constraint`): + The constraint representing valid inputs to this transform. + codomain (:class:`~torch.distributions.constraints.Constraint`): + The constraint representing valid outputs to this transform + which are inputs to the inverse transform. + bijective (bool): Whether this transform is bijective. A transform + ``t`` is bijective iff ``t.inv(t(x)) == x`` and + ``t(t.inv(y)) == y`` for every ``x`` in the domain and ``y`` in + the codomain. Transforms that are not bijective should at least + maintain the weaker pseudoinverse properties + ``t(t.inv(t(x)) == t(x)`` and ``t.inv(t(t.inv(y))) == t.inv(y)``. + sign (int or Tensor): For bijective univariate transforms, this + should be +1 or -1 depending on whether transform is monotone + increasing or decreasing. + """ + + bijective = False + domain: constraints.Constraint + codomain: constraints.Constraint + + def __init__(self, cache_size=0): + self._cache_size = cache_size + self._inv = None + if cache_size == 0: + pass # default behavior + elif cache_size == 1: + self._cached_x_y = None, None + else: + raise ValueError("cache_size must be 0 or 1") + super().__init__() + + def __getstate__(self): + state = self.__dict__.copy() + state["_inv"] = None + return state + + @property + def event_dim(self): + if self.domain.event_dim == self.codomain.event_dim: + return self.domain.event_dim + raise ValueError("Please use either .domain.event_dim or .codomain.event_dim") + + @property + def inv(self): + """ + Returns the inverse :class:`Transform` of this transform. + This should satisfy ``t.inv.inv is t``. + """ + inv = None + if self._inv is not None: + inv = self._inv() + if inv is None: + inv = _InverseTransform(self) + self._inv = weakref.ref(inv) + return inv + + @property + def sign(self): + """ + Returns the sign of the determinant of the Jacobian, if applicable. + In general this only makes sense for bijective transforms. + """ + raise NotImplementedError + + def with_cache(self, cache_size=1): + if self._cache_size == cache_size: + return self + if type(self).__init__ is Transform.__init__: + return type(self)(cache_size=cache_size) + raise NotImplementedError(f"{type(self)}.with_cache is not implemented") + + def __eq__(self, other): + return self is other + + def __ne__(self, other): + # Necessary for Python2 + return not self.__eq__(other) + + def __call__(self, x): + """ + Computes the transform `x => y`. + """ + if self._cache_size == 0: + return self._call(x) + x_old, y_old = self._cached_x_y + if x is x_old: + return y_old + y = self._call(x) + self._cached_x_y = x, y + return y + + def _inv_call(self, y): + """ + Inverts the transform `y => x`. + """ + if self._cache_size == 0: + return self._inverse(y) + x_old, y_old = self._cached_x_y + if y is y_old: + return x_old + x = self._inverse(y) + self._cached_x_y = x, y + return x + + def _call(self, x): + """ + Abstract method to compute forward transformation. + """ + raise NotImplementedError + + def _inverse(self, y): + """ + Abstract method to compute inverse transformation. + """ + raise NotImplementedError + + def log_abs_det_jacobian(self, x, y): + """ + Computes the log det jacobian `log |dy/dx|` given input and output. + """ + raise NotImplementedError + + def __repr__(self): + return self.__class__.__name__ + "()" + + def forward_shape(self, shape): + """ + Infers the shape of the forward computation, given the input shape. + Defaults to preserving shape. + """ + return shape + + def inverse_shape(self, shape): + """ + Infers the shapes of the inverse computation, given the output shape. + Defaults to preserving shape. + """ + return shape + + +class _InverseTransform(Transform): + """ + Inverts a single :class:`Transform`. + This class is private; please instead use the ``Transform.inv`` property. + """ + + def __init__(self, transform: Transform): + super().__init__(cache_size=transform._cache_size) + self._inv: Transform = transform + + @constraints.dependent_property(is_discrete=False) + def domain(self): + assert self._inv is not None + return self._inv.codomain + + @constraints.dependent_property(is_discrete=False) + def codomain(self): + assert self._inv is not None + return self._inv.domain + + @property + def bijective(self): + assert self._inv is not None + return self._inv.bijective + + @property + def sign(self): + assert self._inv is not None + return self._inv.sign + + @property + def inv(self): + return self._inv + + def with_cache(self, cache_size=1): + assert self._inv is not None + return self.inv.with_cache(cache_size).inv + + def __eq__(self, other): + if not isinstance(other, _InverseTransform): + return False + assert self._inv is not None + return self._inv == other._inv + + def __repr__(self): + return f"{self.__class__.__name__}({repr(self._inv)})" + + def __call__(self, x): + assert self._inv is not None + return self._inv._inv_call(x) + + def log_abs_det_jacobian(self, x, y): + assert self._inv is not None + return -self._inv.log_abs_det_jacobian(y, x) + + def forward_shape(self, shape): + return self._inv.inverse_shape(shape) + + def inverse_shape(self, shape): + return self._inv.forward_shape(shape) + + +class ComposeTransform(Transform): + """ + Composes multiple transforms in a chain. + The transforms being composed are responsible for caching. + + Args: + parts (list of :class:`Transform`): A list of transforms to compose. + cache_size (int): Size of cache. If zero, no caching is done. If one, + the latest single value is cached. Only 0 and 1 are supported. + """ + + def __init__(self, parts: List[Transform], cache_size=0): + if cache_size: + parts = [part.with_cache(cache_size) for part in parts] + super().__init__(cache_size=cache_size) + self.parts = parts + + def __eq__(self, other): + if not isinstance(other, ComposeTransform): + return False + return self.parts == other.parts + + @constraints.dependent_property(is_discrete=False) + def domain(self): + if not self.parts: + return constraints.real + domain = self.parts[0].domain + # Adjust event_dim to be maximum among all parts. + event_dim = self.parts[-1].codomain.event_dim + for part in reversed(self.parts): + event_dim += part.domain.event_dim - part.codomain.event_dim + event_dim = max(event_dim, part.domain.event_dim) + assert event_dim >= domain.event_dim + if event_dim > domain.event_dim: + domain = constraints.independent(domain, event_dim - domain.event_dim) + return domain + + @constraints.dependent_property(is_discrete=False) + def codomain(self): + if not self.parts: + return constraints.real + codomain = self.parts[-1].codomain + # Adjust event_dim to be maximum among all parts. + event_dim = self.parts[0].domain.event_dim + for part in self.parts: + event_dim += part.codomain.event_dim - part.domain.event_dim + event_dim = max(event_dim, part.codomain.event_dim) + assert event_dim >= codomain.event_dim + if event_dim > codomain.event_dim: + codomain = constraints.independent(codomain, event_dim - codomain.event_dim) + return codomain + + @lazy_property + def bijective(self): + return all(p.bijective for p in self.parts) + + @lazy_property + def sign(self): + sign = 1 + for p in self.parts: + sign = sign * p.sign + return sign + + @property + def inv(self): + inv = None + if self._inv is not None: + inv = self._inv() + if inv is None: + inv = ComposeTransform([p.inv for p in reversed(self.parts)]) + self._inv = weakref.ref(inv) + inv._inv = weakref.ref(self) + return inv + + def with_cache(self, cache_size=1): + if self._cache_size == cache_size: + return self + return ComposeTransform(self.parts, cache_size=cache_size) + + def __call__(self, x): + for part in self.parts: + x = part(x) + return x + + def log_abs_det_jacobian(self, x, y): + if not self.parts: + return torch.zeros_like(x) + + # Compute intermediates. This will be free if parts[:-1] are all cached. + xs = [x] + for part in self.parts[:-1]: + xs.append(part(xs[-1])) + xs.append(y) + + terms = [] + event_dim = self.domain.event_dim + for part, x, y in zip(self.parts, xs[:-1], xs[1:]): + terms.append( + _sum_rightmost( + part.log_abs_det_jacobian(x, y), event_dim - part.domain.event_dim + ) + ) + event_dim += part.codomain.event_dim - part.domain.event_dim + return functools.reduce(operator.add, terms) + + def forward_shape(self, shape): + for part in self.parts: + shape = part.forward_shape(shape) + return shape + + def inverse_shape(self, shape): + for part in reversed(self.parts): + shape = part.inverse_shape(shape) + return shape + + def __repr__(self): + fmt_string = self.__class__.__name__ + "(\n " + fmt_string += ",\n ".join([p.__repr__() for p in self.parts]) + fmt_string += "\n)" + return fmt_string + + +identity_transform = ComposeTransform([]) + + +class IndependentTransform(Transform): + """ + Wrapper around another transform to treat + ``reinterpreted_batch_ndims``-many extra of the right most dimensions as + dependent. This has no effect on the forward or backward transforms, but + does sum out ``reinterpreted_batch_ndims``-many of the rightmost dimensions + in :meth:`log_abs_det_jacobian`. + + Args: + base_transform (:class:`Transform`): A base transform. + reinterpreted_batch_ndims (int): The number of extra rightmost + dimensions to treat as dependent. + """ + + def __init__(self, base_transform, reinterpreted_batch_ndims, cache_size=0): + super().__init__(cache_size=cache_size) + self.base_transform = base_transform.with_cache(cache_size) + self.reinterpreted_batch_ndims = reinterpreted_batch_ndims + + def with_cache(self, cache_size=1): + if self._cache_size == cache_size: + return self + return IndependentTransform( + self.base_transform, self.reinterpreted_batch_ndims, cache_size=cache_size + ) + + @constraints.dependent_property(is_discrete=False) + def domain(self): + return constraints.independent( + self.base_transform.domain, self.reinterpreted_batch_ndims + ) + + @constraints.dependent_property(is_discrete=False) + def codomain(self): + return constraints.independent( + self.base_transform.codomain, self.reinterpreted_batch_ndims + ) + + @property + def bijective(self): + return self.base_transform.bijective + + @property + def sign(self): + return self.base_transform.sign + + def _call(self, x): + if x.dim() < self.domain.event_dim: + raise ValueError("Too few dimensions on input") + return self.base_transform(x) + + def _inverse(self, y): + if y.dim() < self.codomain.event_dim: + raise ValueError("Too few dimensions on input") + return self.base_transform.inv(y) + + def log_abs_det_jacobian(self, x, y): + result = self.base_transform.log_abs_det_jacobian(x, y) + result = _sum_rightmost(result, self.reinterpreted_batch_ndims) + return result + + def __repr__(self): + return f"{self.__class__.__name__}({repr(self.base_transform)}, {self.reinterpreted_batch_ndims})" + + def forward_shape(self, shape): + return self.base_transform.forward_shape(shape) + + def inverse_shape(self, shape): + return self.base_transform.inverse_shape(shape) + + +class ReshapeTransform(Transform): + """ + Unit Jacobian transform to reshape the rightmost part of a tensor. + + Note that ``in_shape`` and ``out_shape`` must have the same number of + elements, just as for :meth:`torch.Tensor.reshape`. + + Arguments: + in_shape (torch.Size): The input event shape. + out_shape (torch.Size): The output event shape. + """ + + bijective = True + + def __init__(self, in_shape, out_shape, cache_size=0): + self.in_shape = torch.Size(in_shape) + self.out_shape = torch.Size(out_shape) + if self.in_shape.numel() != self.out_shape.numel(): + raise ValueError("in_shape, out_shape have different numbers of elements") + super().__init__(cache_size=cache_size) + + @constraints.dependent_property + def domain(self): + return constraints.independent(constraints.real, len(self.in_shape)) + + @constraints.dependent_property + def codomain(self): + return constraints.independent(constraints.real, len(self.out_shape)) + + def with_cache(self, cache_size=1): + if self._cache_size == cache_size: + return self + return ReshapeTransform(self.in_shape, self.out_shape, cache_size=cache_size) + + def _call(self, x): + batch_shape = x.shape[: x.dim() - len(self.in_shape)] + return x.reshape(batch_shape + self.out_shape) + + def _inverse(self, y): + batch_shape = y.shape[: y.dim() - len(self.out_shape)] + return y.reshape(batch_shape + self.in_shape) + + def log_abs_det_jacobian(self, x, y): + batch_shape = x.shape[: x.dim() - len(self.in_shape)] + return x.new_zeros(batch_shape) + + def forward_shape(self, shape): + if len(shape) < len(self.in_shape): + raise ValueError("Too few dimensions on input") + cut = len(shape) - len(self.in_shape) + if shape[cut:] != self.in_shape: + raise ValueError( + f"Shape mismatch: expected {shape[cut:]} but got {self.in_shape}" + ) + return shape[:cut] + self.out_shape + + def inverse_shape(self, shape): + if len(shape) < len(self.out_shape): + raise ValueError("Too few dimensions on input") + cut = len(shape) - len(self.out_shape) + if shape[cut:] != self.out_shape: + raise ValueError( + f"Shape mismatch: expected {shape[cut:]} but got {self.out_shape}" + ) + return shape[:cut] + self.in_shape + + +class ExpTransform(Transform): + r""" + Transform via the mapping :math:`y = \exp(x)`. + """ + domain = constraints.real + codomain = constraints.positive + bijective = True + sign = +1 + + def __eq__(self, other): + return isinstance(other, ExpTransform) + + def _call(self, x): + return x.exp() + + def _inverse(self, y): + return y.log() + + def log_abs_det_jacobian(self, x, y): + return x + + +class PowerTransform(Transform): + r""" + Transform via the mapping :math:`y = x^{\text{exponent}}`. + """ + domain = constraints.positive + codomain = constraints.positive + bijective = True + + def __init__(self, exponent, cache_size=0): + super().__init__(cache_size=cache_size) + (self.exponent,) = broadcast_all(exponent) + + def with_cache(self, cache_size=1): + if self._cache_size == cache_size: + return self + return PowerTransform(self.exponent, cache_size=cache_size) + + @lazy_property + def sign(self): + return self.exponent.sign() + + def __eq__(self, other): + if not isinstance(other, PowerTransform): + return False + return self.exponent.eq(other.exponent).all().item() + + def _call(self, x): + return x.pow(self.exponent) + + def _inverse(self, y): + return y.pow(1 / self.exponent) + + def log_abs_det_jacobian(self, x, y): + return (self.exponent * y / x).abs().log() + + def forward_shape(self, shape): + return torch.broadcast_shapes(shape, getattr(self.exponent, "shape", ())) + + def inverse_shape(self, shape): + return torch.broadcast_shapes(shape, getattr(self.exponent, "shape", ())) + + +def _clipped_sigmoid(x): + finfo = torch.finfo(x.dtype) + return torch.clamp(torch.sigmoid(x), min=finfo.tiny, max=1.0 - finfo.eps) + + +class SigmoidTransform(Transform): + r""" + Transform via the mapping :math:`y = \frac{1}{1 + \exp(-x)}` and :math:`x = \text{logit}(y)`. + """ + domain = constraints.real + codomain = constraints.unit_interval + bijective = True + sign = +1 + + def __eq__(self, other): + return isinstance(other, SigmoidTransform) + + def _call(self, x): + return _clipped_sigmoid(x) + + def _inverse(self, y): + finfo = torch.finfo(y.dtype) + y = y.clamp(min=finfo.tiny, max=1.0 - finfo.eps) + return y.log() - (-y).log1p() + + def log_abs_det_jacobian(self, x, y): + return -F.softplus(-x) - F.softplus(x) + + +class SoftplusTransform(Transform): + r""" + Transform via the mapping :math:`\text{Softplus}(x) = \log(1 + \exp(x))`. + The implementation reverts to the linear function when :math:`x > 20`. + """ + domain = constraints.real + codomain = constraints.positive + bijective = True + sign = +1 + + def __eq__(self, other): + return isinstance(other, SoftplusTransform) + + def _call(self, x): + return softplus(x) + + def _inverse(self, y): + return (-y).expm1().neg().log() + y + + def log_abs_det_jacobian(self, x, y): + return -softplus(-x) + + +class TanhTransform(Transform): + r""" + Transform via the mapping :math:`y = \tanh(x)`. + + It is equivalent to + ``` + ComposeTransform([AffineTransform(0., 2.), SigmoidTransform(), AffineTransform(-1., 2.)]) + ``` + However this might not be numerically stable, thus it is recommended to use `TanhTransform` + instead. + + Note that one should use `cache_size=1` when it comes to `NaN/Inf` values. + + """ + domain = constraints.real + codomain = constraints.interval(-1.0, 1.0) + bijective = True + sign = +1 + + def __eq__(self, other): + return isinstance(other, TanhTransform) + + def _call(self, x): + return x.tanh() + + def _inverse(self, y): + # We do not clamp to the boundary here as it may degrade the performance of certain algorithms. + # one should use `cache_size=1` instead + return torch.atanh(y) + + def log_abs_det_jacobian(self, x, y): + # We use a formula that is more numerically stable, see details in the following link + # https://github.com/tensorflow/probability/blob/master/tensorflow_probability/python/bijectors/tanh.py#L69-L80 + return 2.0 * (math.log(2.0) - x - softplus(-2.0 * x)) + + +class AbsTransform(Transform): + r""" + Transform via the mapping :math:`y = |x|`. + """ + domain = constraints.real + codomain = constraints.positive + + def __eq__(self, other): + return isinstance(other, AbsTransform) + + def _call(self, x): + return x.abs() + + def _inverse(self, y): + return y + + +class AffineTransform(Transform): + r""" + Transform via the pointwise affine mapping :math:`y = \text{loc} + \text{scale} \times x`. + + Args: + loc (Tensor or float): Location parameter. + scale (Tensor or float): Scale parameter. + event_dim (int): Optional size of `event_shape`. This should be zero + for univariate random variables, 1 for distributions over vectors, + 2 for distributions over matrices, etc. + """ + bijective = True + + def __init__(self, loc, scale, event_dim=0, cache_size=0): + super().__init__(cache_size=cache_size) + self.loc = loc + self.scale = scale + self._event_dim = event_dim + + @property + def event_dim(self): + return self._event_dim + + @constraints.dependent_property(is_discrete=False) + def domain(self): + if self.event_dim == 0: + return constraints.real + return constraints.independent(constraints.real, self.event_dim) + + @constraints.dependent_property(is_discrete=False) + def codomain(self): + if self.event_dim == 0: + return constraints.real + return constraints.independent(constraints.real, self.event_dim) + + def with_cache(self, cache_size=1): + if self._cache_size == cache_size: + return self + return AffineTransform( + self.loc, self.scale, self.event_dim, cache_size=cache_size + ) + + def __eq__(self, other): + if not isinstance(other, AffineTransform): + return False + + if isinstance(self.loc, numbers.Number) and isinstance( + other.loc, numbers.Number + ): + if self.loc != other.loc: + return False + else: + if not (self.loc == other.loc).all().item(): + return False + + if isinstance(self.scale, numbers.Number) and isinstance( + other.scale, numbers.Number + ): + if self.scale != other.scale: + return False + else: + if not (self.scale == other.scale).all().item(): + return False + + return True + + @property + def sign(self): + if isinstance(self.scale, numbers.Real): + return 1 if float(self.scale) > 0 else -1 if float(self.scale) < 0 else 0 + return self.scale.sign() + + def _call(self, x): + return self.loc + self.scale * x + + def _inverse(self, y): + return (y - self.loc) / self.scale + + def log_abs_det_jacobian(self, x, y): + shape = x.shape + scale = self.scale + if isinstance(scale, numbers.Real): + result = torch.full_like(x, math.log(abs(scale))) + else: + result = torch.abs(scale).log() + if self.event_dim: + result_size = result.size()[: -self.event_dim] + (-1,) + result = result.view(result_size).sum(-1) + shape = shape[: -self.event_dim] + return result.expand(shape) + + def forward_shape(self, shape): + return torch.broadcast_shapes( + shape, getattr(self.loc, "shape", ()), getattr(self.scale, "shape", ()) + ) + + def inverse_shape(self, shape): + return torch.broadcast_shapes( + shape, getattr(self.loc, "shape", ()), getattr(self.scale, "shape", ()) + ) + + +class CorrCholeskyTransform(Transform): + r""" + Transforms an uncontrained real vector :math:`x` with length :math:`D*(D-1)/2` into the + Cholesky factor of a D-dimension correlation matrix. This Cholesky factor is a lower + triangular matrix with positive diagonals and unit Euclidean norm for each row. + The transform is processed as follows: + + 1. First we convert x into a lower triangular matrix in row order. + 2. For each row :math:`X_i` of the lower triangular part, we apply a *signed* version of + class :class:`StickBreakingTransform` to transform :math:`X_i` into a + unit Euclidean length vector using the following steps: + - Scales into the interval :math:`(-1, 1)` domain: :math:`r_i = \tanh(X_i)`. + - Transforms into an unsigned domain: :math:`z_i = r_i^2`. + - Applies :math:`s_i = StickBreakingTransform(z_i)`. + - Transforms back into signed domain: :math:`y_i = sign(r_i) * \sqrt{s_i}`. + """ + domain = constraints.real_vector + codomain = constraints.corr_cholesky + bijective = True + + def _call(self, x): + x = torch.tanh(x) + eps = torch.finfo(x.dtype).eps + x = x.clamp(min=-1 + eps, max=1 - eps) + r = vec_to_tril_matrix(x, diag=-1) + # apply stick-breaking on the squared values + # Note that y = sign(r) * sqrt(z * z1m_cumprod) + # = (sign(r) * sqrt(z)) * sqrt(z1m_cumprod) = r * sqrt(z1m_cumprod) + z = r**2 + z1m_cumprod_sqrt = (1 - z).sqrt().cumprod(-1) + # Diagonal elements must be 1. + r = r + torch.eye(r.shape[-1], dtype=r.dtype, device=r.device) + y = r * pad(z1m_cumprod_sqrt[..., :-1], [1, 0], value=1) + return y + + def _inverse(self, y): + # inverse stick-breaking + # See: https://mc-stan.org/docs/2_18/reference-manual/cholesky-factors-of-correlation-matrices-1.html + y_cumsum = 1 - torch.cumsum(y * y, dim=-1) + y_cumsum_shifted = pad(y_cumsum[..., :-1], [1, 0], value=1) + y_vec = tril_matrix_to_vec(y, diag=-1) + y_cumsum_vec = tril_matrix_to_vec(y_cumsum_shifted, diag=-1) + t = y_vec / (y_cumsum_vec).sqrt() + # inverse of tanh + x = (t.log1p() - t.neg().log1p()) / 2 + return x + + def log_abs_det_jacobian(self, x, y, intermediates=None): + # Because domain and codomain are two spaces with different dimensions, determinant of + # Jacobian is not well-defined. We return `log_abs_det_jacobian` of `x` and the + # flattened lower triangular part of `y`. + + # See: https://mc-stan.org/docs/2_18/reference-manual/cholesky-factors-of-correlation-matrices-1.html + y1m_cumsum = 1 - (y * y).cumsum(dim=-1) + # by taking diagonal=-2, we don't need to shift z_cumprod to the right + # also works for 2 x 2 matrix + y1m_cumsum_tril = tril_matrix_to_vec(y1m_cumsum, diag=-2) + stick_breaking_logdet = 0.5 * (y1m_cumsum_tril).log().sum(-1) + tanh_logdet = -2 * (x + softplus(-2 * x) - math.log(2.0)).sum(dim=-1) + return stick_breaking_logdet + tanh_logdet + + def forward_shape(self, shape): + # Reshape from (..., N) to (..., D, D). + if len(shape) < 1: + raise ValueError("Too few dimensions on input") + N = shape[-1] + D = round((0.25 + 2 * N) ** 0.5 + 0.5) + if D * (D - 1) // 2 != N: + raise ValueError("Input is not a flattend lower-diagonal number") + return shape[:-1] + (D, D) + + def inverse_shape(self, shape): + # Reshape from (..., D, D) to (..., N). + if len(shape) < 2: + raise ValueError("Too few dimensions on input") + if shape[-2] != shape[-1]: + raise ValueError("Input is not square") + D = shape[-1] + N = D * (D - 1) // 2 + return shape[:-2] + (N,) + + +class SoftmaxTransform(Transform): + r""" + Transform from unconstrained space to the simplex via :math:`y = \exp(x)` then + normalizing. + + This is not bijective and cannot be used for HMC. However this acts mostly + coordinate-wise (except for the final normalization), and thus is + appropriate for coordinate-wise optimization algorithms. + """ + domain = constraints.real_vector + codomain = constraints.simplex + + def __eq__(self, other): + return isinstance(other, SoftmaxTransform) + + def _call(self, x): + logprobs = x + probs = (logprobs - logprobs.max(-1, True)[0]).exp() + return probs / probs.sum(-1, True) + + def _inverse(self, y): + probs = y + return probs.log() + + def forward_shape(self, shape): + if len(shape) < 1: + raise ValueError("Too few dimensions on input") + return shape + + def inverse_shape(self, shape): + if len(shape) < 1: + raise ValueError("Too few dimensions on input") + return shape + + +class StickBreakingTransform(Transform): + """ + Transform from unconstrained space to the simplex of one additional + dimension via a stick-breaking process. + + This transform arises as an iterated sigmoid transform in a stick-breaking + construction of the `Dirichlet` distribution: the first logit is + transformed via sigmoid to the first probability and the probability of + everything else, and then the process recurses. + + This is bijective and appropriate for use in HMC; however it mixes + coordinates together and is less appropriate for optimization. + """ + + domain = constraints.real_vector + codomain = constraints.simplex + bijective = True + + def __eq__(self, other): + return isinstance(other, StickBreakingTransform) + + def _call(self, x): + offset = x.shape[-1] + 1 - x.new_ones(x.shape[-1]).cumsum(-1) + z = _clipped_sigmoid(x - offset.log()) + z_cumprod = (1 - z).cumprod(-1) + y = pad(z, [0, 1], value=1) * pad(z_cumprod, [1, 0], value=1) + return y + + def _inverse(self, y): + y_crop = y[..., :-1] + offset = y.shape[-1] - y.new_ones(y_crop.shape[-1]).cumsum(-1) + sf = 1 - y_crop.cumsum(-1) + # we clamp to make sure that sf is positive which sometimes does not + # happen when y[-1] ~ 0 or y[:-1].sum() ~ 1 + sf = torch.clamp(sf, min=torch.finfo(y.dtype).tiny) + x = y_crop.log() - sf.log() + offset.log() + return x + + def log_abs_det_jacobian(self, x, y): + offset = x.shape[-1] + 1 - x.new_ones(x.shape[-1]).cumsum(-1) + x = x - offset.log() + # use the identity 1 - sigmoid(x) = exp(-x) * sigmoid(x) + detJ = (-x + F.logsigmoid(x) + y[..., :-1].log()).sum(-1) + return detJ + + def forward_shape(self, shape): + if len(shape) < 1: + raise ValueError("Too few dimensions on input") + return shape[:-1] + (shape[-1] + 1,) + + def inverse_shape(self, shape): + if len(shape) < 1: + raise ValueError("Too few dimensions on input") + return shape[:-1] + (shape[-1] - 1,) + + +class LowerCholeskyTransform(Transform): + """ + Transform from unconstrained matrices to lower-triangular matrices with + nonnegative diagonal entries. + + This is useful for parameterizing positive definite matrices in terms of + their Cholesky factorization. + """ + + domain = constraints.independent(constraints.real, 2) + codomain = constraints.lower_cholesky + + def __eq__(self, other): + return isinstance(other, LowerCholeskyTransform) + + def _call(self, x): + return x.tril(-1) + x.diagonal(dim1=-2, dim2=-1).exp().diag_embed() + + def _inverse(self, y): + return y.tril(-1) + y.diagonal(dim1=-2, dim2=-1).log().diag_embed() + + +class PositiveDefiniteTransform(Transform): + """ + Transform from unconstrained matrices to positive-definite matrices. + """ + + domain = constraints.independent(constraints.real, 2) + codomain = constraints.positive_definite # type: ignore[assignment] + + def __eq__(self, other): + return isinstance(other, PositiveDefiniteTransform) + + def _call(self, x): + x = LowerCholeskyTransform()(x) + return x @ x.mT + + def _inverse(self, y): + y = torch.linalg.cholesky(y) + return LowerCholeskyTransform().inv(y) + + +class CatTransform(Transform): + """ + Transform functor that applies a sequence of transforms `tseq` + component-wise to each submatrix at `dim`, of length `lengths[dim]`, + in a way compatible with :func:`torch.cat`. + + Example:: + + x0 = torch.cat([torch.range(1, 10), torch.range(1, 10)], dim=0) + x = torch.cat([x0, x0], dim=0) + t0 = CatTransform([ExpTransform(), identity_transform], dim=0, lengths=[10, 10]) + t = CatTransform([t0, t0], dim=0, lengths=[20, 20]) + y = t(x) + """ + + transforms: List[Transform] + + def __init__(self, tseq, dim=0, lengths=None, cache_size=0): + assert all(isinstance(t, Transform) for t in tseq) + if cache_size: + tseq = [t.with_cache(cache_size) for t in tseq] + super().__init__(cache_size=cache_size) + self.transforms = list(tseq) + if lengths is None: + lengths = [1] * len(self.transforms) + self.lengths = list(lengths) + assert len(self.lengths) == len(self.transforms) + self.dim = dim + + @lazy_property + def event_dim(self): + return max(t.event_dim for t in self.transforms) + + @lazy_property + def length(self): + return sum(self.lengths) + + def with_cache(self, cache_size=1): + if self._cache_size == cache_size: + return self + return CatTransform(self.transforms, self.dim, self.lengths, cache_size) + + def _call(self, x): + assert -x.dim() <= self.dim < x.dim() + assert x.size(self.dim) == self.length + yslices = [] + start = 0 + for trans, length in zip(self.transforms, self.lengths): + xslice = x.narrow(self.dim, start, length) + yslices.append(trans(xslice)) + start = start + length # avoid += for jit compat + return torch.cat(yslices, dim=self.dim) + + def _inverse(self, y): + assert -y.dim() <= self.dim < y.dim() + assert y.size(self.dim) == self.length + xslices = [] + start = 0 + for trans, length in zip(self.transforms, self.lengths): + yslice = y.narrow(self.dim, start, length) + xslices.append(trans.inv(yslice)) + start = start + length # avoid += for jit compat + return torch.cat(xslices, dim=self.dim) + + def log_abs_det_jacobian(self, x, y): + assert -x.dim() <= self.dim < x.dim() + assert x.size(self.dim) == self.length + assert -y.dim() <= self.dim < y.dim() + assert y.size(self.dim) == self.length + logdetjacs = [] + start = 0 + for trans, length in zip(self.transforms, self.lengths): + xslice = x.narrow(self.dim, start, length) + yslice = y.narrow(self.dim, start, length) + logdetjac = trans.log_abs_det_jacobian(xslice, yslice) + if trans.event_dim < self.event_dim: + logdetjac = _sum_rightmost(logdetjac, self.event_dim - trans.event_dim) + logdetjacs.append(logdetjac) + start = start + length # avoid += for jit compat + # Decide whether to concatenate or sum. + dim = self.dim + if dim >= 0: + dim = dim - x.dim() + dim = dim + self.event_dim + if dim < 0: + return torch.cat(logdetjacs, dim=dim) + else: + return sum(logdetjacs) + + @property + def bijective(self): + return all(t.bijective for t in self.transforms) + + @constraints.dependent_property + def domain(self): + return constraints.cat( + [t.domain for t in self.transforms], self.dim, self.lengths + ) + + @constraints.dependent_property + def codomain(self): + return constraints.cat( + [t.codomain for t in self.transforms], self.dim, self.lengths + ) + + +class StackTransform(Transform): + """ + Transform functor that applies a sequence of transforms `tseq` + component-wise to each submatrix at `dim` + in a way compatible with :func:`torch.stack`. + + Example:: + + x = torch.stack([torch.range(1, 10), torch.range(1, 10)], dim=1) + t = StackTransform([ExpTransform(), identity_transform], dim=1) + y = t(x) + """ + + transforms: List[Transform] + + def __init__(self, tseq, dim=0, cache_size=0): + assert all(isinstance(t, Transform) for t in tseq) + if cache_size: + tseq = [t.with_cache(cache_size) for t in tseq] + super().__init__(cache_size=cache_size) + self.transforms = list(tseq) + self.dim = dim + + def with_cache(self, cache_size=1): + if self._cache_size == cache_size: + return self + return StackTransform(self.transforms, self.dim, cache_size) + + def _slice(self, z): + return [z.select(self.dim, i) for i in range(z.size(self.dim))] + + def _call(self, x): + assert -x.dim() <= self.dim < x.dim() + assert x.size(self.dim) == len(self.transforms) + yslices = [] + for xslice, trans in zip(self._slice(x), self.transforms): + yslices.append(trans(xslice)) + return torch.stack(yslices, dim=self.dim) + + def _inverse(self, y): + assert -y.dim() <= self.dim < y.dim() + assert y.size(self.dim) == len(self.transforms) + xslices = [] + for yslice, trans in zip(self._slice(y), self.transforms): + xslices.append(trans.inv(yslice)) + return torch.stack(xslices, dim=self.dim) + + def log_abs_det_jacobian(self, x, y): + assert -x.dim() <= self.dim < x.dim() + assert x.size(self.dim) == len(self.transforms) + assert -y.dim() <= self.dim < y.dim() + assert y.size(self.dim) == len(self.transforms) + logdetjacs = [] + yslices = self._slice(y) + xslices = self._slice(x) + for xslice, yslice, trans in zip(xslices, yslices, self.transforms): + logdetjacs.append(trans.log_abs_det_jacobian(xslice, yslice)) + return torch.stack(logdetjacs, dim=self.dim) + + @property + def bijective(self): + return all(t.bijective for t in self.transforms) + + @constraints.dependent_property + def domain(self): + return constraints.stack([t.domain for t in self.transforms], self.dim) + + @constraints.dependent_property + def codomain(self): + return constraints.stack([t.codomain for t in self.transforms], self.dim) + + +class CumulativeDistributionTransform(Transform): + """ + Transform via the cumulative distribution function of a probability distribution. + + Args: + distribution (Distribution): Distribution whose cumulative distribution function to use for + the transformation. + + Example:: + + # Construct a Gaussian copula from a multivariate normal. + base_dist = MultivariateNormal( + loc=torch.zeros(2), + scale_tril=LKJCholesky(2).sample(), + ) + transform = CumulativeDistributionTransform(Normal(0, 1)) + copula = TransformedDistribution(base_dist, [transform]) + """ + + bijective = True + codomain = constraints.unit_interval + sign = +1 + + def __init__(self, distribution, cache_size=0): + super().__init__(cache_size=cache_size) + self.distribution = distribution + + @property + def domain(self): + return self.distribution.support + + def _call(self, x): + return self.distribution.cdf(x) + + def _inverse(self, y): + return self.distribution.icdf(y) + + def log_abs_det_jacobian(self, x, y): + return self.distribution.log_prob(x) + + def with_cache(self, cache_size=1): + if self._cache_size == cache_size: + return self + return CumulativeDistributionTransform(self.distribution, cache_size=cache_size) diff --git a/env-llmeval/lib/python3.10/site-packages/torch/distributions/von_mises.py b/env-llmeval/lib/python3.10/site-packages/torch/distributions/von_mises.py new file mode 100644 index 0000000000000000000000000000000000000000..17f52fad25b3de6aa3455e4740269ab050dd4f08 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/torch/distributions/von_mises.py @@ -0,0 +1,209 @@ +import math + +import torch +import torch.jit +from torch.distributions import constraints +from torch.distributions.distribution import Distribution +from torch.distributions.utils import broadcast_all, lazy_property + +__all__ = ["VonMises"] + + +def _eval_poly(y, coef): + coef = list(coef) + result = coef.pop() + while coef: + result = coef.pop() + y * result + return result + + +_I0_COEF_SMALL = [ + 1.0, + 3.5156229, + 3.0899424, + 1.2067492, + 0.2659732, + 0.360768e-1, + 0.45813e-2, +] +_I0_COEF_LARGE = [ + 0.39894228, + 0.1328592e-1, + 0.225319e-2, + -0.157565e-2, + 0.916281e-2, + -0.2057706e-1, + 0.2635537e-1, + -0.1647633e-1, + 0.392377e-2, +] +_I1_COEF_SMALL = [ + 0.5, + 0.87890594, + 0.51498869, + 0.15084934, + 0.2658733e-1, + 0.301532e-2, + 0.32411e-3, +] +_I1_COEF_LARGE = [ + 0.39894228, + -0.3988024e-1, + -0.362018e-2, + 0.163801e-2, + -0.1031555e-1, + 0.2282967e-1, + -0.2895312e-1, + 0.1787654e-1, + -0.420059e-2, +] + +_COEF_SMALL = [_I0_COEF_SMALL, _I1_COEF_SMALL] +_COEF_LARGE = [_I0_COEF_LARGE, _I1_COEF_LARGE] + + +def _log_modified_bessel_fn(x, order=0): + """ + Returns ``log(I_order(x))`` for ``x > 0``, + where `order` is either 0 or 1. + """ + assert order == 0 or order == 1 + + # compute small solution + y = x / 3.75 + y = y * y + small = _eval_poly(y, _COEF_SMALL[order]) + if order == 1: + small = x.abs() * small + small = small.log() + + # compute large solution + y = 3.75 / x + large = x - 0.5 * x.log() + _eval_poly(y, _COEF_LARGE[order]).log() + + result = torch.where(x < 3.75, small, large) + return result + + +@torch.jit.script_if_tracing +def _rejection_sample(loc, concentration, proposal_r, x): + done = torch.zeros(x.shape, dtype=torch.bool, device=loc.device) + while not done.all(): + u = torch.rand((3,) + x.shape, dtype=loc.dtype, device=loc.device) + u1, u2, u3 = u.unbind() + z = torch.cos(math.pi * u1) + f = (1 + proposal_r * z) / (proposal_r + z) + c = concentration * (proposal_r - f) + accept = ((c * (2 - c) - u2) > 0) | ((c / u2).log() + 1 - c >= 0) + if accept.any(): + x = torch.where(accept, (u3 - 0.5).sign() * f.acos(), x) + done = done | accept + return (x + math.pi + loc) % (2 * math.pi) - math.pi + + +class VonMises(Distribution): + """ + A circular von Mises distribution. + + This implementation uses polar coordinates. The ``loc`` and ``value`` args + can be any real number (to facilitate unconstrained optimization), but are + interpreted as angles modulo 2 pi. + + Example:: + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> m = VonMises(torch.tensor([1.0]), torch.tensor([1.0])) + >>> m.sample() # von Mises distributed with loc=1 and concentration=1 + tensor([1.9777]) + + :param torch.Tensor loc: an angle in radians. + :param torch.Tensor concentration: concentration parameter + """ + + arg_constraints = {"loc": constraints.real, "concentration": constraints.positive} + support = constraints.real + has_rsample = False + + def __init__(self, loc, concentration, validate_args=None): + self.loc, self.concentration = broadcast_all(loc, concentration) + batch_shape = self.loc.shape + event_shape = torch.Size() + super().__init__(batch_shape, event_shape, validate_args) + + def log_prob(self, value): + if self._validate_args: + self._validate_sample(value) + log_prob = self.concentration * torch.cos(value - self.loc) + log_prob = ( + log_prob + - math.log(2 * math.pi) + - _log_modified_bessel_fn(self.concentration, order=0) + ) + return log_prob + + @lazy_property + def _loc(self): + return self.loc.to(torch.double) + + @lazy_property + def _concentration(self): + return self.concentration.to(torch.double) + + @lazy_property + def _proposal_r(self): + kappa = self._concentration + tau = 1 + (1 + 4 * kappa**2).sqrt() + rho = (tau - (2 * tau).sqrt()) / (2 * kappa) + _proposal_r = (1 + rho**2) / (2 * rho) + # second order Taylor expansion around 0 for small kappa + _proposal_r_taylor = 1 / kappa + kappa + return torch.where(kappa < 1e-5, _proposal_r_taylor, _proposal_r) + + @torch.no_grad() + def sample(self, sample_shape=torch.Size()): + """ + The sampling algorithm for the von Mises distribution is based on the + following paper: D.J. Best and N.I. Fisher, "Efficient simulation of the + von Mises distribution." Applied Statistics (1979): 152-157. + + Sampling is always done in double precision internally to avoid a hang + in _rejection_sample() for small values of the concentration, which + starts to happen for single precision around 1e-4 (see issue #88443). + """ + shape = self._extended_shape(sample_shape) + x = torch.empty(shape, dtype=self._loc.dtype, device=self.loc.device) + return _rejection_sample( + self._loc, self._concentration, self._proposal_r, x + ).to(self.loc.dtype) + + def expand(self, batch_shape): + try: + return super().expand(batch_shape) + except NotImplementedError: + validate_args = self.__dict__.get("_validate_args") + loc = self.loc.expand(batch_shape) + concentration = self.concentration.expand(batch_shape) + return type(self)(loc, concentration, validate_args=validate_args) + + @property + def mean(self): + """ + The provided mean is the circular one. + """ + return self.loc + + @property + def mode(self): + return self.loc + + @lazy_property + def variance(self): + """ + The provided variance is the circular one. + """ + return ( + 1 + - ( + _log_modified_bessel_fn(self.concentration, order=1) + - _log_modified_bessel_fn(self.concentration, order=0) + ).exp() + )