diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/_VariableFunctions.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/_VariableFunctions.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8f6e61bf678ae19145799f44bb6beceac9bf24c9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/_VariableFunctions.pyi @@ -0,0 +1,25648 @@ +# @generated from torch/_C/_VariableFunctions.pyi.in +# mypy: disable-error-code="type-arg" + +import builtins +from typing import ( + Any, + Callable, + ContextManager, + Iterator, + List, + Literal, + NamedTuple, + Optional, + overload, + Sequence, + Tuple, + TypeVar, + Union, +) + +import torch +from torch import contiguous_format, Generator, inf, memory_format, strided, SymInt, Tensor +from torch.types import ( + _bool, + _complex, + _device, + _dtype, + _float, + _int, + _layout, + _qscheme, + _size, + Device, + Number, +) + +from torch._prims_common import DeviceLikeType + +@overload +def __and__(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def __and__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +@overload +def __lshift__(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def __lshift__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +@overload +def __or__(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def __or__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +@overload +def __rshift__(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def __rshift__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +@overload +def __xor__(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def __xor__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +def _adaptive_avg_pool2d(input: Tensor, output_size: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]]) -> Tensor: ... +def _adaptive_avg_pool3d(input: Tensor, output_size: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]]) -> Tensor: ... +def _add_batch_dim(input: Tensor, batch_dim: _int, level: _int) -> Tensor: ... +@overload +def _add_relu(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def _add_relu(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ... +@overload +def _add_relu_(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ... +@overload +def _add_relu_(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ... +def _addmm_activation(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, use_gelu: _bool = False, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def _aminmax(input: Tensor) -> Tuple[Tensor, Tensor]: ... +@overload +def _aminmax(input: Tensor, dim: _int, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ... +def _amp_foreach_non_finite_check_and_unscale_(self: Union[Tuple[Tensor, ...], List[Tensor]], found_inf: Tensor, inv_scale: Tensor) -> None: ... +def _amp_update_scale_(input: Tensor, growth_tracker: Tensor, found_inf: Tensor, scale_growth_factor: _float, scale_backoff_factor: _float, growth_interval: _int) -> Tensor: ... +@overload +def _assert_async(input: Tensor) -> None: + r""" + _assert_async(tensor) -> void + + Asynchronously assert that the contents of tensor are nonzero. For CPU tensors, + this is equivalent to ``assert tensor`` or ``assert tensor.is_nonzero()``; for + CUDA tensors, we DO NOT synchronize and you may only find out the assertion + failed at a later CUDA kernel launch. Asynchronous assertion can be helpful for + testing invariants in CUDA tensors without giving up performance. This function + is NOT intended to be used for regular error checking, as it will trash your CUDA + context if the assert fails (forcing you to restart your PyTorch process.) + + Args: + tensor (Tensor): a one element tensor to test to see if it is nonzero. Zero + elements (including False for boolean tensors) cause an assertion failure + to be raised. + """ + ... +@overload +def _assert_async(input: Tensor, assert_msg: str) -> None: + r""" + _assert_async(tensor) -> void + + Asynchronously assert that the contents of tensor are nonzero. For CPU tensors, + this is equivalent to ``assert tensor`` or ``assert tensor.is_nonzero()``; for + CUDA tensors, we DO NOT synchronize and you may only find out the assertion + failed at a later CUDA kernel launch. Asynchronous assertion can be helpful for + testing invariants in CUDA tensors without giving up performance. This function + is NOT intended to be used for regular error checking, as it will trash your CUDA + context if the assert fails (forcing you to restart your PyTorch process.) + + Args: + tensor (Tensor): a one element tensor to test to see if it is nonzero. Zero + elements (including False for boolean tensors) cause an assertion failure + to be raised. + """ + ... +def _assert_scalar(self: Union[Number, _complex], assert_msg: str) -> None: ... +def _assert_tensor_metadata(a: Tensor, size: Optional[Sequence[Union[_int, SymInt]]] = None, stride: Optional[Sequence[Union[_int, SymInt]]] = None, dtype: Optional[_dtype] = None) -> None: ... +def _batch_norm_impl_index(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, _int]: ... +def _cast_Byte(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Char(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Double(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Float(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Half(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Int(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Long(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Short(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _choose_qparams_per_tensor(input: Tensor, reduce_range: _bool = False) -> Tuple[_float, _int]: ... +def _chunk_cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int, num_chunks: _int, *, out: Optional[Tensor] = None) -> Tensor: ... +def _coalesce(input: Tensor) -> Tensor: ... +def _compute_linear_combination(input: Tensor, coefficients: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _conj(input: Tensor) -> Tensor: ... +def _conj_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _conj_physical(input: Tensor) -> Tensor: ... +def _convert_indices_from_coo_to_csr(input: Tensor, size: _int, *, out_int32: _bool = False, out: Optional[Tensor] = None) -> Tensor: ... +def _convert_indices_from_csr_to_coo(crow_indices: Tensor, col_indices: Tensor, *, out_int32: _bool = False, transpose: _bool = False, out: Optional[Tensor] = None) -> Tensor: ... +def _convert_weight_to_int4pack(input: Tensor, innerKTiles: _int) -> Tensor: ... +@overload +def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: _size, groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool) -> Tensor: ... +@overload +def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool, allow_tf32: _bool) -> Tensor: ... +def _convolution_mode(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: str, dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def _copy_from(input: Tensor, dst: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _copy_from_and_resize(input: Tensor, dst: Tensor) -> Tensor: ... +def _cslt_compress(input: Tensor) -> Tensor: ... +def _cslt_sparse_mm(compressed_A: Tensor, dense_B: Tensor, bias: Optional[Tensor] = None, alpha: Optional[Tensor] = None, out_dtype: Optional[_dtype] = None, transpose_result: _bool = False, alg_id: _int = 0) -> Tensor: ... +def _cslt_sparse_mm_search(compressed_A: Tensor, dense_B: Tensor, bias: Optional[Tensor] = None, alpha: Optional[Tensor] = None, out_dtype: Optional[_dtype] = None, transpose_result: _bool = False) -> _int: ... +@overload +def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int = 0, zero_infinity: _bool = False) -> Tuple[Tensor, Tensor]: ... +@overload +def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int = 0, zero_infinity: _bool = False) -> Tuple[Tensor, Tensor]: ... +@overload +def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ... +@overload +def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ... +def _cudnn_init_dropout_state(dropout: _float, train: _bool, dropout_seed: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def _cudnn_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, weight_buf: Optional[Tensor], hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: Union[_int, SymInt], proj_size: Union[_int, SymInt], num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: Sequence[Union[_int, SymInt]], dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ... +def _cudnn_rnn_flatten_weight(weight_arr: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, input_size: Union[_int, SymInt], mode: _int, hidden_size: Union[_int, SymInt], proj_size: Union[_int, SymInt], num_layers: _int, batch_first: _bool, bidirectional: _bool) -> Tensor: ... +def _cufft_clear_plan_cache(device_index: _int) -> None: ... +def _cufft_get_plan_cache_max_size(device_index: _int) -> _int: ... +def _cufft_get_plan_cache_size(device_index: _int) -> _int: ... +def _cufft_set_plan_cache_max_size(device_index: _int, max_size: _int) -> None: ... +def _cummax_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ... +def _cummin_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ... +def _debug_has_internal_overlap(input: Tensor) -> _int: ... +def _dim_arange(like: Tensor, dim: _int) -> Tensor: ... +def _dirichlet_grad(x: Tensor, alpha: Tensor, total: Tensor) -> Tensor: ... +def _disable_functionalization(): ... +@overload +def _efficientzerotensor(size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def _efficientzerotensor(*size: _int, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def _embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False, padding_idx: _int = -1) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +def _embedding_bag_forward_only(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False, padding_idx: _int = -1) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +@overload +def _empty_affine_quantized(size: Sequence[Union[_int, SymInt]], *, scale: _float = 1, zero_point: _int = 0, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def _empty_affine_quantized(*size: _int, scale: _float = 1, zero_point: _int = 0, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def _empty_per_channel_affine_quantized(size: Sequence[Union[_int, SymInt]], *, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def _empty_per_channel_affine_quantized(*size: _int, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def _enable_functionalization(*, reapply_views: _bool = False): ... +def _euclidean_dist(x1: Tensor, x2: Tensor) -> Tensor: ... +def _fake_quantize_learnable_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int, grad_factor: _float = 1.0) -> Tensor: ... +def _fake_quantize_learnable_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int, grad_factor: _float = 1.0) -> Tensor: ... +def _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(input: Tensor, scale: Tensor, zero_point: Tensor, fake_quant_enabled: Tensor, quant_min: _int, quant_max: _int) -> torch.return_types._fake_quantize_per_tensor_affine_cachemask_tensor_qparams: ... +def _fft_c2c(input: Tensor, dim: Sequence[Union[_int, SymInt]], normalization: _int, forward: _bool, *, out: Optional[Tensor] = None) -> Tensor: ... +def _fft_c2r(input: Tensor, dim: _size, normalization: _int, last_dim_size: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor: ... +def _fft_r2c(input: Tensor, dim: _size, normalization: _int, onesided: _bool, *, out: Optional[Tensor] = None) -> Tensor: ... +def _fill_mem_eff_dropout_mask_(input: Tensor, dropout_p: _float, seed: _int, offset: _int) -> Tensor: ... +def _foobar(input: Tensor, arg1: _bool = True, arg2: _bool = True, *, arg3: _bool = True) -> Tensor: ... +def _foreach_abs(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_abs(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.abs` to each Tensor of the input list. + """ + ... +def _foreach_abs_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_abs_(self: List[Tensor]) -> None + + Apply :func:`torch.abs` to each Tensor of the input list. + """ + ... +def _foreach_acos(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_acos(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.acos` to each Tensor of the input list. + """ + ... +def _foreach_acos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_acos_(self: List[Tensor]) -> None + + Apply :func:`torch.acos` to each Tensor of the input list. + """ + ... +@overload +def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> None: ... +@overload +def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor, *, alpha: Union[Number, _complex] = 1) -> None: ... +@overload +def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> None: ... +@overload +def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> None: ... +@overload +def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> None: ... +@overload +def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> None: ... +def _foreach_asin(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_asin(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.asin` to each Tensor of the input list. + """ + ... +def _foreach_asin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_asin_(self: List[Tensor]) -> None + + Apply :func:`torch.asin` to each Tensor of the input list. + """ + ... +def _foreach_atan(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_atan(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.atan` to each Tensor of the input list. + """ + ... +def _foreach_atan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_atan_(self: List[Tensor]) -> None + + Apply :func:`torch.atan` to each Tensor of the input list. + """ + ... +def _foreach_ceil(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_ceil(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.ceil` to each Tensor of the input list. + """ + ... +def _foreach_ceil_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_ceil_(self: List[Tensor]) -> None + + Apply :func:`torch.ceil` to each Tensor of the input list. + """ + ... +@overload +def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +@overload +def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_copy_(self: Union[Tuple[Tensor, ...], List[Tensor]], src: Union[Tuple[Tensor, ...], List[Tensor]], non_blocking: _bool = False) -> None: ... +def _foreach_cos(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_cos(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.cos` to each Tensor of the input list. + """ + ... +def _foreach_cos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_cos_(self: List[Tensor]) -> None + + Apply :func:`torch.cos` to each Tensor of the input list. + """ + ... +def _foreach_cosh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_cosh(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.cosh` to each Tensor of the input list. + """ + ... +def _foreach_cosh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_cosh_(self: List[Tensor]) -> None + + Apply :func:`torch.cosh` to each Tensor of the input list. + """ + ... +@overload +def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> None: ... +@overload +def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_erf(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_erf(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.erf` to each Tensor of the input list. + """ + ... +def _foreach_erf_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_erf_(self: List[Tensor]) -> None + + Apply :func:`torch.erf` to each Tensor of the input list. + """ + ... +def _foreach_erfc(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_erfc(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.erfc` to each Tensor of the input list. + """ + ... +def _foreach_erfc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_erfc_(self: List[Tensor]) -> None + + Apply :func:`torch.erfc` to each Tensor of the input list. + """ + ... +def _foreach_exp(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_exp(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.exp` to each Tensor of the input list. + """ + ... +def _foreach_exp_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_exp_(self: List[Tensor]) -> None + + Apply :func:`torch.exp` to each Tensor of the input list. + """ + ... +def _foreach_expm1(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_expm1(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.expm1` to each Tensor of the input list. + """ + ... +def _foreach_expm1_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_expm1_(self: List[Tensor]) -> None + + Apply :func:`torch.expm1` to each Tensor of the input list. + """ + ... +def _foreach_floor(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_floor(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.floor` to each Tensor of the input list. + """ + ... +def _foreach_floor_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_floor_(self: List[Tensor]) -> None + + Apply :func:`torch.floor` to each Tensor of the input list. + """ + ... +def _foreach_frac(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_frac(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.frac` to each Tensor of the input list. + """ + ... +def _foreach_frac_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_frac_(self: List[Tensor]) -> None + + Apply :func:`torch.frac` to each Tensor of the input list. + """ + ... +@overload +def _foreach_lerp(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weight: Union[Number, _complex]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_lerp(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weights: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_lerp_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weight: Union[Number, _complex]) -> None: ... +@overload +def _foreach_lerp_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weights: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_lgamma(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_lgamma(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.lgamma` to each Tensor of the input list. + """ + ... +def _foreach_lgamma_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_lgamma_(self: List[Tensor]) -> None + + Apply :func:`torch.lgamma` to each Tensor of the input list. + """ + ... +def _foreach_log(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_log(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.log` to each Tensor of the input list. + """ + ... +def _foreach_log10(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_log10(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.log10` to each Tensor of the input list. + """ + ... +def _foreach_log10_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_log10_(self: List[Tensor]) -> None + + Apply :func:`torch.log10` to each Tensor of the input list. + """ + ... +def _foreach_log1p(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_log1p(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.log1p` to each Tensor of the input list. + """ + ... +def _foreach_log1p_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_log1p_(self: List[Tensor]) -> None + + Apply :func:`torch.log1p` to each Tensor of the input list. + """ + ... +def _foreach_log2(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_log2(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.log2` to each Tensor of the input list. + """ + ... +def _foreach_log2_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_log2_(self: List[Tensor]) -> None + + Apply :func:`torch.log2` to each Tensor of the input list. + """ + ... +def _foreach_log_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_log_(self: List[Tensor]) -> None + + Apply :func:`torch.log` to each Tensor of the input list. + """ + ... +@overload +def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +@overload +def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +@overload +def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> None: ... +@overload +def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_neg(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_neg(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.neg` to each Tensor of the input list. + """ + ... +def _foreach_neg_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_neg_(self: List[Tensor]) -> None + + Apply :func:`torch.neg` to each Tensor of the input list. + """ + ... +def _foreach_norm(self: Union[Tuple[Tensor, ...], List[Tensor]], ord: Union[Number, _complex] = 2) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Number, _complex]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_pow(self: Union[Number, _complex], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Number, _complex]) -> None: ... +@overload +def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_reciprocal(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_reciprocal(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.reciprocal` to each Tensor of the input list. + """ + ... +def _foreach_reciprocal_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_reciprocal_(self: List[Tensor]) -> None + + Apply :func:`torch.reciprocal` to each Tensor of the input list. + """ + ... +def _foreach_round(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_round(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.round` to each Tensor of the input list. + """ + ... +def _foreach_round_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_round_(self: List[Tensor]) -> None + + Apply :func:`torch.round` to each Tensor of the input list. + """ + ... +def _foreach_sigmoid(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_sigmoid(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.sigmoid` to each Tensor of the input list. + """ + ... +def _foreach_sigmoid_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_sigmoid_(self: List[Tensor]) -> None + + Apply :func:`torch.sigmoid` to each Tensor of the input list. + """ + ... +def _foreach_sign(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +def _foreach_sign_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_sin(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_sin(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.sin` to each Tensor of the input list. + """ + ... +def _foreach_sin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_sin_(self: List[Tensor]) -> None + + Apply :func:`torch.sin` to each Tensor of the input list. + """ + ... +def _foreach_sinh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_sinh(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.sinh` to each Tensor of the input list. + """ + ... +def _foreach_sinh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_sinh_(self: List[Tensor]) -> None + + Apply :func:`torch.sinh` to each Tensor of the input list. + """ + ... +def _foreach_sqrt(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_sqrt(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.sqrt` to each Tensor of the input list. + """ + ... +def _foreach_sqrt_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_sqrt_(self: List[Tensor]) -> None + + Apply :func:`torch.sqrt` to each Tensor of the input list. + """ + ... +@overload +def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> None: ... +@overload +def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +def _foreach_tan(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_tan(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.tan` to each Tensor of the input list. + """ + ... +def _foreach_tan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_tan_(self: List[Tensor]) -> None + + Apply :func:`torch.tan` to each Tensor of the input list. + """ + ... +def _foreach_tanh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_tanh(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.tanh` to each Tensor of the input list. + """ + ... +def _foreach_tanh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_tanh_(self: List[Tensor]) -> None + + Apply :func:`torch.tanh` to each Tensor of the input list. + """ + ... +def _foreach_trunc(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_trunc(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.trunc` to each Tensor of the input list. + """ + ... +def _foreach_trunc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_trunc_(self: List[Tensor]) -> None + + Apply :func:`torch.trunc` to each Tensor of the input list. + """ + ... +def _foreach_zero_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_zero_(self: List[Tensor]) -> None + + Apply :func:`torch.zero` to each Tensor of the input list. + """ + ... +def _from_functional_tensor(t: Tensor) -> Tensor: ... +def _functional_assert_async(input: Tensor, assert_msg: str, dep_token: Tensor) -> Tensor: ... +def _functional_assert_scalar(self: Union[Number, _complex], assert_msg: str, dep_token: Tensor) -> Tensor: ... +def _functional_sym_constrain_range(size: Union[Number, _complex], min: Optional[_int], max: Optional[_int], dep_token: Tensor) -> Tensor: ... +def _functional_sym_constrain_range_for_size(size: Union[Number, _complex], min: Optional[_int], max: Optional[_int], dep_token: Tensor) -> Tensor: ... +def _functionalize_are_all_mutations_hidden_from_autograd(t: Tensor) -> _bool: ... +def _functionalize_are_all_mutations_under_no_grad_or_inference_mode(t: Tensor) -> _bool: ... +def _functionalize_commit_update(t: Tensor) -> None: ... +def _functionalize_mark_mutation_hidden_from_autograd(t: Tensor) -> None: ... +def _functionalize_replace(self_: Tensor, other: Tensor) -> None: ... +def _functionalize_sync(t: Tensor) -> None: ... +@overload +def _fused_adam_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: Tensor, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ... +@overload +def _fused_adam_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ... +@overload +def _fused_adamw_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: Tensor, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ... +@overload +def _fused_adamw_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ... +def _fused_dropout(input: Tensor, p: _float, generator: Optional[Generator] = None) -> Tuple[Tensor, Tensor]: ... +def _fused_moving_avg_obs_fq_helper(input: Tensor, observer_on: Tensor, fake_quant_on: Tensor, running_min: Tensor, running_max: Tensor, scale: Tensor, zero_point: Tensor, averaging_const: _float, quant_min: _int, quant_max: _int, ch_axis: _int, per_row_fake_quant: _bool = False, symmetric_quant: _bool = False) -> torch.return_types._fused_moving_avg_obs_fq_helper: ... +def _fused_sdp_choice(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: _float = 0.0, is_causal: _bool = False, *, scale: Optional[_float] = None) -> _int: ... +@overload +def _fused_sgd_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], momentum_buffer_list: Union[Tuple[Tensor, ...], List[Tensor]], *, weight_decay: _float, momentum: _float, lr: Tensor, dampening: _float, nesterov: _bool, maximize: _bool, is_first_step: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ... +@overload +def _fused_sgd_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], momentum_buffer_list: Union[Tuple[Tensor, ...], List[Tensor]], *, weight_decay: _float, momentum: _float, lr: _float, dampening: _float, nesterov: _bool, maximize: _bool, is_first_step: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ... +def _fw_primal_copy(input: Tensor, level: _int, *, out: Optional[Tensor] = None) -> Tensor: ... +def _grid_sampler_2d_cpu_fallback(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ... +def _has_compatible_shallow_copy_type(input: Tensor, from_: Tensor) -> _bool: ... +def _histogramdd_bin_edges(input: Tensor, bins: _size, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> Tuple[Tensor, ...]: ... +def _histogramdd_from_bin_cts(input: Tensor, bins: _size, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> Tensor: ... +def _histogramdd_from_bin_tensors(input: Tensor, bins: Union[Tuple[Tensor, ...], List[Tensor]], *, weight: Optional[Tensor] = None, density: _bool = False) -> Tensor: ... +def _index_put_impl_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False, unsafe: _bool = False) -> Tensor: ... +def _indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _int_mm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _is_all_true(input: Tensor) -> Tensor: ... +def _is_any_true(input: Tensor) -> Tensor: ... +def _is_functional_tensor(t: Tensor) -> _bool: ... +def _is_zerotensor(input: Tensor) -> _bool: ... +def _lazy_clone(input: Tensor) -> Tensor: ... +def _linalg_check_errors(info: Tensor, api_name: str, *, is_matrix: _bool) -> None: ... +def _linalg_det(A: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_det: ... +def _linalg_eigh(A: Tensor, UPLO: str = "L", compute_v: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_eigh: ... +def _linalg_slogdet(A: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_slogdet: ... +def _linalg_solve_ex(A: Tensor, B: Tensor, *, left: _bool = True, check_errors: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_solve_ex: ... +def _linalg_svd(A: Tensor, full_matrices: _bool = False, compute_uv: _bool = True, *, driver: Optional[str] = None, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_svd: ... +def _log_softmax(input: Tensor, dim: _int, half_to_float: _bool, *, out: Optional[Tensor] = None) -> Tensor: ... +def _log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input_dtype: _dtype, *, out: Optional[Tensor] = None) -> Tensor: ... +def _logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor: ... +def _lstm_mps(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: ... +def _lu_with_info(input: Tensor, pivot: _bool = True, check_errors: _bool = True) -> torch.return_types._lu_with_info: ... +def _make_dep_token(*, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def _make_dual(primal: Tensor, tangent: Tensor, level: _int) -> Tensor: ... +def _make_dual_copy(primal: Tensor, tangent: Tensor, level: _int, *, out: Optional[Tensor] = None) -> Tensor: ... +def _make_per_channel_quantized_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int) -> Tensor: ... +def _make_per_tensor_quantized_tensor(input: Tensor, scale: _float, zero_point: _int) -> Tensor: ... +def _masked_scale(input: Tensor, mask: Tensor, scale: _float) -> Tensor: ... +def _masked_softmax(input: Tensor, mask: Tensor, dim: Optional[_int] = None, mask_type: Optional[_int] = None) -> Tensor: ... +def _mixed_dtypes_linear(input: Tensor, weight: Tensor, scale: Tensor, *, bias: Optional[Tensor] = None, activation: Optional[str] = None) -> Tensor: ... +def _mkldnn_reshape(input: Tensor, shape: _size) -> Tensor: ... +def _mkldnn_transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ... +def _mkldnn_transpose_(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ... +def _mps_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def _mps_convolution_transpose(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +@overload +def _native_batch_norm_legit(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Tensor, running_var: Tensor, training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ... +@overload +def _native_batch_norm_legit(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ... +def _native_batch_norm_legit_no_training(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Tensor, running_var: Tensor, momentum: _float, eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ... +def _native_multi_head_attention(query: Tensor, key: Tensor, value: Tensor, embed_dim: _int, num_head: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, mask: Optional[Tensor] = None, need_weights: _bool = True, average_attn_weights: _bool = True, mask_type: Optional[_int] = None) -> Tuple[Tensor, Tensor]: ... +def _neg_view(input: Tensor) -> Tensor: ... +def _neg_view_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _nested_from_padded(padded: Tensor, cpu_nested_shape_example: Tensor, fuse_transform_0213: _bool = False) -> Tensor: ... +def _nested_from_padded_and_nested_example(padded: Tensor, nt_example: Tensor) -> Tensor: ... +def _nested_get_jagged_dummy(any: Tensor) -> Tensor: ... +def _nested_get_lengths(input: Tensor) -> Tensor: ... +def _nested_get_offsets(input: Tensor) -> Tensor: ... +def _nested_get_ragged_idx(input: Tensor) -> _int: ... +def _nested_get_values(input: Tensor) -> Tensor: ... +def _nested_get_values_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _nested_tensor_from_mask(t: Tensor, mask: Tensor, mask_check: _bool = True) -> Tensor: ... +def _nested_tensor_from_mask_left_aligned(t: Tensor, mask: Tensor) -> _bool: ... +def _nested_tensor_from_tensor_list(list: Union[Tuple[Tensor, ...], List[Tensor]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = None) -> Tensor: ... +def _nested_tensor_softmax_with_shape(input: Tensor, query: Tensor) -> Tensor: ... +def _nested_view_from_buffer(input: Tensor, nested_size: Tensor, nested_strides: Tensor, offsets: Tensor) -> Tensor: ... +def _nested_view_from_buffer_copy(input: Tensor, nested_size: Tensor, nested_strides: Tensor, offsets: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _nested_view_from_jagged(input: Tensor, offsets: Tensor, dummy: Tensor, lengths: Optional[Tensor] = None, ragged_idx: _int = 1) -> Tensor: ... +def _nested_view_from_jagged_copy(input: Tensor, offsets: Tensor, dummy: Tensor, lengths: Optional[Tensor] = None, ragged_idx: _int = 1, *, out: Optional[Tensor] = None) -> Tensor: ... +def _nnpack_available() -> _bool: ... +def _nnpack_spatial_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]], stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ... +def _pack_padded_sequence(input: Tensor, lengths: Tensor, batch_first: _bool) -> Tuple[Tensor, Tensor]: ... +def _pad_packed_sequence(data: Tensor, batch_sizes: Tensor, batch_first: _bool, padding_value: Union[Number, _complex], total_length: _int) -> Tuple[Tensor, Tensor]: ... +def _pin_memory(input: Tensor, device: Optional[Optional[DeviceLikeType]] = None) -> Tensor: ... +def _prelu_kernel(input: Tensor, weight: Tensor) -> Tensor: ... +def _print(s: str) -> None: ... +def _propagate_xla_data(input: Tensor, output: Tensor) -> None: ... +def _remove_batch_dim(input: Tensor, level: _int, batch_size: _int, out_dim: _int) -> Tensor: ... +def _reshape_alias_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None) -> Tensor: ... +def _reshape_from_tensor(input: Tensor, shape: Tensor) -> Tensor: ... +def _resize_output_(input: Tensor, size: Sequence[Union[_int, SymInt]], device: Optional[DeviceLikeType]) -> Tensor: ... +def _rowwise_prune(weight: Tensor, mask: Tensor, compressed_indices_dtype: _dtype) -> Tuple[Tensor, Tensor]: ... +def _sample_dirichlet(input: Tensor, generator: Optional[Generator] = None) -> Tensor: ... +def _saturate_weight_to_fp16(weight: Tensor) -> Tensor: ... +def _scaled_dot_product_attention_math(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: _float = 0.0, is_causal: _bool = False, dropout_mask: Optional[Tensor] = None, *, scale: Optional[_float] = None) -> Tuple[Tensor, Tensor]: ... +def _scaled_dot_product_cudnn_attention(query: Tensor, key: Tensor, value: Tensor, dropout_p: _float = 0.0, is_causal: _bool = False, return_debug_mask: _bool = False, *, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_cudnn_attention: ... +def _scaled_dot_product_efficient_attention(query: Tensor, key: Tensor, value: Tensor, attn_bias: Optional[Tensor], compute_log_sumexp: _bool, dropout_p: _float = 0.0, is_causal: _bool = False, *, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_efficient_attention: ... +def _scaled_dot_product_flash_attention(query: Tensor, key: Tensor, value: Tensor, dropout_p: _float = 0.0, is_causal: _bool = False, return_debug_mask: _bool = False, *, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_flash_attention: ... +def _scaled_dot_product_flash_attention_for_cpu(query: Tensor, key: Tensor, value: Tensor, dropout_p: _float = 0.0, is_causal: _bool = False, *, attn_mask: Optional[Tensor] = None, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_flash_attention_for_cpu: ... +def _scaled_mm(input: Tensor, mat2: Tensor, *, bias: Optional[Tensor] = None, out_dtype: Optional[_dtype] = None, scale_a: Optional[Tensor] = None, scale_b: Optional[Tensor] = None, scale_result: Optional[Tensor] = None, use_fast_accum: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor]: ... +def _shape_as_tensor(input: Tensor) -> Tensor: ... +def _sobol_engine_draw(quasi: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int, dtype: Optional[_dtype]) -> Tuple[Tensor, Tensor]: ... +def _sobol_engine_ff_(input: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int) -> Tensor: ... +def _sobol_engine_initialize_state_(input: Tensor, dimension: _int) -> Tensor: ... +def _sobol_engine_scramble_(input: Tensor, ltm: Tensor, dimension: _int) -> Tensor: ... +def _softmax(input: Tensor, dim: _int, half_to_float: _bool, *, out: Optional[Tensor] = None) -> Tensor: ... +def _softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input_dtype: _dtype, *, grad_input: Optional[Tensor] = None) -> Tensor: ... +def _sparse_broadcast_to(input: Tensor, size: _size) -> Tensor: ... +def _sparse_broadcast_to_copy(input: Tensor, size: _size, *, out: Optional[Tensor] = None) -> Tensor: ... +def _sparse_csr_prod(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ... +def _sparse_csr_sum(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ... +def _sparse_log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ... +def _sparse_semi_structured_linear(input: Tensor, weight: Tensor, meta: Tensor, *, bias: Optional[Tensor] = None, activation: Optional[str] = None, out_dtype: Optional[_dtype] = None) -> Tensor: ... +def _sparse_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ... +def _sparse_sparse_matmul(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def _sparse_sum(input: Tensor) -> Tensor: ... +@overload +def _sparse_sum(input: Tensor, *, dtype: _dtype) -> Tensor: ... +@overload +def _sparse_sum(input: Tensor, dim: Union[_int, _size]) -> Tensor: ... +@overload +def _sparse_sum(input: Tensor, dim: Union[_int, _size], *, dtype: _dtype) -> Tensor: ... +def _stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ... +def _standard_gamma(input: Tensor, generator: Optional[Generator] = None) -> Tensor: ... +def _standard_gamma_grad(input: Tensor, output: Tensor) -> Tensor: ... +def _sync(t: Tensor) -> None: ... +@overload +def _test_autograd_multiple_dispatch(input: Tensor) -> Tensor: ... +@overload +def _test_autograd_multiple_dispatch(input: Tensor, b: _bool) -> Tensor: ... +def _test_autograd_multiple_dispatch_view(input: Tensor) -> Tensor: ... +def _test_autograd_multiple_dispatch_view_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _test_check_tensor(input: Tensor) -> Tensor: ... +def _test_functorch_fallback(input: Tensor, other: Tensor) -> Tensor: ... +def _test_parallel_materialize(input: Tensor, num_parallel: _int, skip_first: _bool = False) -> Tensor: ... +def _test_serialization_subcmul(input: Tensor, other: Tensor, alpha: Union[Number, _complex] = 1) -> Tensor: ... +def _to_cpu(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +def _to_functional_tensor(t: Tensor) -> Tensor: ... +def _to_sparse_semi_structured(dense: Tensor) -> Tuple[Tensor, Tensor]: ... +def _transform_bias_rescale_qkv(qkv: Tensor, qkv_bias: Tensor, num_heads: _int) -> Tuple[Tensor, Tensor, Tensor]: ... +def _transformer_encoder_layer_fwd(src: Tensor, embed_dim: _int, num_heads: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, use_gelu: _bool, norm_first: _bool, eps: _float, norm_weight_1: Tensor, norm_bias_1: Tensor, norm_weight_2: Tensor, norm_bias_2: Tensor, ffn_weight_1: Tensor, ffn_bias_1: Tensor, ffn_weight_2: Tensor, ffn_bias_2: Tensor, mask: Optional[Tensor] = None, mask_type: Optional[_int] = None) -> Tensor: ... +def _trilinear(i1: Tensor, i2: Tensor, i3: Tensor, expand1: _size, expand2: _size, expand3: _size, sumdim: _size, unroll_dim: _int = 1) -> Tensor: ... +def _triton_multi_head_attention(query: Tensor, key: Tensor, value: Tensor, embed_dim: _int, num_head: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, mask: Optional[Tensor] = None) -> Tensor: ... +def _triton_scaled_dot_attention(q: Tensor, k: Tensor, v: Tensor, dropout_p: _float = 0.0) -> Tensor: ... +def _unique(input: Tensor, sorted: _bool = True, return_inverse: _bool = False) -> Tuple[Tensor, Tensor]: ... +def _unique2(input: Tensor, sorted: _bool = True, return_inverse: _bool = False, return_counts: _bool = False) -> Tuple[Tensor, Tensor, Tensor]: ... +def _unpack_dual(dual: Tensor, level: _int) -> torch.return_types._unpack_dual: ... +def _unsafe_index(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]]) -> Tensor: ... +def _unsafe_index_put(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ... +@overload +def _use_cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int) -> _bool: ... +@overload +def _use_cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int) -> _bool: ... +def _use_cudnn_rnn_flatten_weight() -> _bool: ... +def _validate_compressed_sparse_indices(is_crow: _bool, compressed_idx: Tensor, plain_idx: Tensor, cdim: _int, dim: _int, nnz: _int) -> None: ... +def _validate_sparse_bsc_tensor_args(ccol_indices: Tensor, row_indices: Tensor, values: Tensor, size: _size) -> None: ... +def _validate_sparse_bsr_tensor_args(crow_indices: Tensor, col_indices: Tensor, values: Tensor, size: _size) -> None: ... +def _validate_sparse_compressed_tensor_args(compressed_indices: Tensor, plain_indices: Tensor, values: Tensor, size: _size, layout: _layout) -> None: ... +def _validate_sparse_coo_tensor_args(indices: Tensor, values: Tensor, size: _size, is_coalesced: Optional[_bool] = None) -> None: ... +def _validate_sparse_csc_tensor_args(ccol_indices: Tensor, row_indices: Tensor, values: Tensor, size: _size) -> None: ... +def _validate_sparse_csr_tensor_args(crow_indices: Tensor, col_indices: Tensor, values: Tensor, size: _size) -> None: ... +def _values_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _weight_int4pack_mm(input: Tensor, mat2: Tensor, qGroupSize: _int, qScaleAndZeros: Tensor) -> Tensor: ... +def _weight_int8pack_mm(input: Tensor, mat2: Tensor, scales: Tensor) -> Tensor: ... +def _weight_norm(v: Tensor, g: Tensor, dim: _int = 0) -> Tensor: ... +def _weight_norm_interface(v: Tensor, g: Tensor, dim: _int = 0) -> Tuple[Tensor, Tensor]: ... +def abs(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + abs(input, *, out=None) -> Tensor + + Computes the absolute value of each element in :attr:`input`. + + .. math:: + \text{out}_{i} = |\text{input}_{i}| + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.abs(torch.tensor([-1, -2, 3])) + tensor([ 1, 2, 3]) + """ + ... +def abs_(input: Tensor) -> Tensor: ... +def absolute(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + absolute(input, *, out=None) -> Tensor + + Alias for :func:`torch.abs` + """ + ... +def acos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + acos(input, *, out=None) -> Tensor + + Computes the inverse cosine of each element in :attr:`input`. + + .. math:: + \text{out}_{i} = \cos^{-1}(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.3348, -0.5889, 0.2005, -0.1584]) + >>> torch.acos(a) + tensor([ 1.2294, 2.2004, 1.3690, 1.7298]) + """ + ... +def acos_(input: Tensor) -> Tensor: ... +def acosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + acosh(input, *, out=None) -> Tensor + + Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \cosh^{-1}(\text{input}_{i}) + + Note: + The domain of the inverse hyperbolic cosine is `[1, inf)` and values outside this range + will be mapped to ``NaN``, except for `+ INF` for which the output is mapped to `+ INF`. + + Args: + input (Tensor): the input tensor. + + Keyword arguments: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4).uniform_(1, 2) + >>> a + tensor([ 1.3192, 1.9915, 1.9674, 1.7151 ]) + >>> torch.acosh(a) + tensor([ 0.7791, 1.3120, 1.2979, 1.1341 ]) + """ + ... +def acosh_(input: Tensor) -> Tensor: ... +def adaptive_avg_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tensor: ... +def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ... +@overload +def add(input: Union[Tensor, Number, _complex], other: Union[Tensor, Number, _complex], *, alpha: Optional[Union[Number, _complex]] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + add(input, other, *, alpha=1, out=None) -> Tensor + + Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`. + + .. math:: + \text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i + + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer, float, and complex inputs. + + Args: + input (Tensor): the input tensor. + other (Tensor or Number): the tensor or number to add to :attr:`input`. + + Keyword arguments: + alpha (Number): the multiplier for :attr:`other`. + out (Tensor, optional): the output tensor. + + Examples:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.0202, 1.0985, 1.3506, -0.6056]) + >>> torch.add(a, 20) + tensor([ 20.0202, 21.0985, 21.3506, 19.3944]) + + >>> b = torch.randn(4) + >>> b + tensor([-0.9732, -0.3497, 0.6245, 0.4022]) + >>> c = torch.randn(4, 1) + >>> c + tensor([[ 0.3743], + [-1.7724], + [-0.5811], + [-0.8017]]) + >>> torch.add(b, c, alpha=10) + tensor([[ 2.7695, 3.3930, 4.3672, 4.1450], + [-18.6971, -18.0736, -17.0994, -17.3216], + [ -6.7845, -6.1610, -5.1868, -5.4090], + [ -8.9902, -8.3667, -7.3925, -7.6147]]) + """ + ... +@overload +def add(self: Tensor, alpha: Union[Number, _complex], other: Tensor) -> Tensor: + r""" + add(input, other, *, alpha=1, out=None) -> Tensor + + Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`. + + .. math:: + \text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i + + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer, float, and complex inputs. + + Args: + input (Tensor): the input tensor. + other (Tensor or Number): the tensor or number to add to :attr:`input`. + + Keyword arguments: + alpha (Number): the multiplier for :attr:`other`. + out (Tensor, optional): the output tensor. + + Examples:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.0202, 1.0985, 1.3506, -0.6056]) + >>> torch.add(a, 20) + tensor([ 20.0202, 21.0985, 21.3506, 19.3944]) + + >>> b = torch.randn(4) + >>> b + tensor([-0.9732, -0.3497, 0.6245, 0.4022]) + >>> c = torch.randn(4, 1) + >>> c + tensor([[ 0.3743], + [-1.7724], + [-0.5811], + [-0.8017]]) + >>> torch.add(b, c, alpha=10) + tensor([[ 2.7695, 3.3930, 4.3672, 4.1450], + [-18.6971, -18.0736, -17.0994, -17.3216], + [ -6.7845, -6.1610, -5.1868, -5.4090], + [ -8.9902, -8.3667, -7.3925, -7.6147]]) + """ + ... +@overload +def add(self: Tensor, alpha: Union[Number, _complex], other: Tensor, *, out: Tensor) -> Tensor: + r""" + add(input, other, *, alpha=1, out=None) -> Tensor + + Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`. + + .. math:: + \text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i + + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer, float, and complex inputs. + + Args: + input (Tensor): the input tensor. + other (Tensor or Number): the tensor or number to add to :attr:`input`. + + Keyword arguments: + alpha (Number): the multiplier for :attr:`other`. + out (Tensor, optional): the output tensor. + + Examples:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.0202, 1.0985, 1.3506, -0.6056]) + >>> torch.add(a, 20) + tensor([ 20.0202, 21.0985, 21.3506, 19.3944]) + + >>> b = torch.randn(4) + >>> b + tensor([-0.9732, -0.3497, 0.6245, 0.4022]) + >>> c = torch.randn(4, 1) + >>> c + tensor([[ 0.3743], + [-1.7724], + [-0.5811], + [-0.8017]]) + >>> torch.add(b, c, alpha=10) + tensor([[ 2.7695, 3.3930, 4.3672, 4.1450], + [-18.6971, -18.0736, -17.0994, -17.3216], + [ -6.7845, -6.1610, -5.1868, -5.4090], + [ -8.9902, -8.3667, -7.3925, -7.6147]]) + """ + ... +@overload +def addbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor) -> Tensor: + r""" + addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices stored + in :attr:`batch1` and :attr:`batch2`, + with a reduced add step (all matrix multiplications get accumulated + along the first dimension). + :attr:`input` is added to the final result. + + :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the + same number of matrices. + + If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a + :math:`(b \times m \times p)` tensor, :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + .. math:: + out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha` + must be real numbers, otherwise they should be integers. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + input (Tensor): matrix to be added + alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.addbmm(M, batch1, batch2) + tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653], + [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743], + [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]]) + """ + ... +@overload +def addbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: + r""" + addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices stored + in :attr:`batch1` and :attr:`batch2`, + with a reduced add step (all matrix multiplications get accumulated + along the first dimension). + :attr:`input` is added to the final result. + + :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the + same number of matrices. + + If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a + :math:`(b \times m \times p)` tensor, :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + .. math:: + out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha` + must be real numbers, otherwise they should be integers. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + input (Tensor): matrix to be added + alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.addbmm(M, batch1, batch2) + tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653], + [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743], + [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]]) + """ + ... +@overload +def addbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices stored + in :attr:`batch1` and :attr:`batch2`, + with a reduced add step (all matrix multiplications get accumulated + along the first dimension). + :attr:`input` is added to the final result. + + :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the + same number of matrices. + + If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a + :math:`(b \times m \times p)` tensor, :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + .. math:: + out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha` + must be real numbers, otherwise they should be integers. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + input (Tensor): matrix to be added + alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.addbmm(M, batch1, batch2) + tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653], + [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743], + [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]]) + """ + ... +@overload +def addbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor: + r""" + addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices stored + in :attr:`batch1` and :attr:`batch2`, + with a reduced add step (all matrix multiplications get accumulated + along the first dimension). + :attr:`input` is added to the final result. + + :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the + same number of matrices. + + If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a + :math:`(b \times m \times p)` tensor, :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + .. math:: + out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha` + must be real numbers, otherwise they should be integers. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + input (Tensor): matrix to be added + alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.addbmm(M, batch1, batch2) + tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653], + [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743], + [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]]) + """ + ... +@overload +def addbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: + r""" + addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices stored + in :attr:`batch1` and :attr:`batch2`, + with a reduced add step (all matrix multiplications get accumulated + along the first dimension). + :attr:`input` is added to the final result. + + :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the + same number of matrices. + + If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a + :math:`(b \times m \times p)` tensor, :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + .. math:: + out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha` + must be real numbers, otherwise they should be integers. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + input (Tensor): matrix to be added + alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.addbmm(M, batch1, batch2) + tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653], + [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743], + [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]]) + """ + ... +@overload +def addcdiv(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor) -> Tensor: + r""" + addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor + + Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`, + multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`. + + .. warning:: + Integer division with addcdiv is no longer supported, and in a future + release addcdiv will perform a true division of tensor1 and tensor2. + The historic addcdiv behavior can be implemented as + (input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype) + for integer inputs and as (input + value * tensor1 / tensor2) for float inputs. + The future addcdiv behavior is just the latter implementation: + (input + value * tensor1 / tensor2), for all dtypes. + + .. math:: + \text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i} + + + The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be + :ref:`broadcastable `. + + For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be + a real number, otherwise an integer. + + Args: + input (Tensor): the tensor to be added + tensor1 (Tensor): the numerator tensor + tensor2 (Tensor): the denominator tensor + + Keyword args: + value (Number, optional): multiplier for :math:`\text{tensor1} / \text{tensor2}` + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.randn(1, 3) + >>> t1 = torch.randn(3, 1) + >>> t2 = torch.randn(1, 3) + >>> torch.addcdiv(t, t1, t2, value=0.1) + tensor([[-0.2312, -3.6496, 0.1312], + [-1.0428, 3.4292, -0.1030], + [-0.5369, -0.9829, 0.0430]]) + """ + ... +@overload +def addcdiv(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor: + r""" + addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor + + Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`, + multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`. + + .. warning:: + Integer division with addcdiv is no longer supported, and in a future + release addcdiv will perform a true division of tensor1 and tensor2. + The historic addcdiv behavior can be implemented as + (input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype) + for integer inputs and as (input + value * tensor1 / tensor2) for float inputs. + The future addcdiv behavior is just the latter implementation: + (input + value * tensor1 / tensor2), for all dtypes. + + .. math:: + \text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i} + + + The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be + :ref:`broadcastable `. + + For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be + a real number, otherwise an integer. + + Args: + input (Tensor): the tensor to be added + tensor1 (Tensor): the numerator tensor + tensor2 (Tensor): the denominator tensor + + Keyword args: + value (Number, optional): multiplier for :math:`\text{tensor1} / \text{tensor2}` + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.randn(1, 3) + >>> t1 = torch.randn(3, 1) + >>> t2 = torch.randn(1, 3) + >>> torch.addcdiv(t, t1, t2, value=0.1) + tensor([[-0.2312, -3.6496, 0.1312], + [-1.0428, 3.4292, -0.1030], + [-0.5369, -0.9829, 0.0430]]) + """ + ... +@overload +def addcdiv(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor + + Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`, + multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`. + + .. warning:: + Integer division with addcdiv is no longer supported, and in a future + release addcdiv will perform a true division of tensor1 and tensor2. + The historic addcdiv behavior can be implemented as + (input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype) + for integer inputs and as (input + value * tensor1 / tensor2) for float inputs. + The future addcdiv behavior is just the latter implementation: + (input + value * tensor1 / tensor2), for all dtypes. + + .. math:: + \text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i} + + + The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be + :ref:`broadcastable `. + + For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be + a real number, otherwise an integer. + + Args: + input (Tensor): the tensor to be added + tensor1 (Tensor): the numerator tensor + tensor2 (Tensor): the denominator tensor + + Keyword args: + value (Number, optional): multiplier for :math:`\text{tensor1} / \text{tensor2}` + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.randn(1, 3) + >>> t1 = torch.randn(3, 1) + >>> t2 = torch.randn(1, 3) + >>> torch.addcdiv(t, t1, t2, value=0.1) + tensor([[-0.2312, -3.6496, 0.1312], + [-1.0428, 3.4292, -0.1030], + [-0.5369, -0.9829, 0.0430]]) + """ + ... +@overload +def addcmul(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor) -> Tensor: + r""" + addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor + + Performs the element-wise multiplication of :attr:`tensor1` + by :attr:`tensor2`, multiplies the result by the scalar :attr:`value` + and adds it to :attr:`input`. + + .. math:: + \text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i + + The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be + :ref:`broadcastable `. + + For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be + a real number, otherwise an integer. + + Args: + input (Tensor): the tensor to be added + tensor1 (Tensor): the tensor to be multiplied + tensor2 (Tensor): the tensor to be multiplied + + Keyword args: + value (Number, optional): multiplier for :math:`tensor1 .* tensor2` + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.randn(1, 3) + >>> t1 = torch.randn(3, 1) + >>> t2 = torch.randn(1, 3) + >>> torch.addcmul(t, t1, t2, value=0.1) + tensor([[-0.8635, -0.6391, 1.6174], + [-0.7617, -0.5879, 1.7388], + [-0.8353, -0.6249, 1.6511]]) + """ + ... +@overload +def addcmul(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor: + r""" + addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor + + Performs the element-wise multiplication of :attr:`tensor1` + by :attr:`tensor2`, multiplies the result by the scalar :attr:`value` + and adds it to :attr:`input`. + + .. math:: + \text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i + + The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be + :ref:`broadcastable `. + + For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be + a real number, otherwise an integer. + + Args: + input (Tensor): the tensor to be added + tensor1 (Tensor): the tensor to be multiplied + tensor2 (Tensor): the tensor to be multiplied + + Keyword args: + value (Number, optional): multiplier for :math:`tensor1 .* tensor2` + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.randn(1, 3) + >>> t1 = torch.randn(3, 1) + >>> t2 = torch.randn(1, 3) + >>> torch.addcmul(t, t1, t2, value=0.1) + tensor([[-0.8635, -0.6391, 1.6174], + [-0.7617, -0.5879, 1.7388], + [-0.8353, -0.6249, 1.6511]]) + """ + ... +@overload +def addcmul(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor + + Performs the element-wise multiplication of :attr:`tensor1` + by :attr:`tensor2`, multiplies the result by the scalar :attr:`value` + and adds it to :attr:`input`. + + .. math:: + \text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i + + The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be + :ref:`broadcastable `. + + For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be + a real number, otherwise an integer. + + Args: + input (Tensor): the tensor to be added + tensor1 (Tensor): the tensor to be multiplied + tensor2 (Tensor): the tensor to be multiplied + + Keyword args: + value (Number, optional): multiplier for :math:`tensor1 .* tensor2` + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.randn(1, 3) + >>> t1 = torch.randn(3, 1) + >>> t2 = torch.randn(1, 3) + >>> torch.addcmul(t, t1, t2, value=0.1) + tensor([[-0.8635, -0.6391, 1.6174], + [-0.7617, -0.5879, 1.7388], + [-0.8353, -0.6249, 1.6511]]) + """ + ... +@overload +def addmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor) -> Tensor: + r""" + addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`. + The matrix :attr:`input` is added to the final result. + + If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a + :math:`(m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + This operation has support for arguments with :ref:`sparse layouts`. If + :attr:`input` is sparse the result will have the same layout and if :attr:`out` + is provided it must have the same layout as :attr:`input`. + + + .. warning:: + Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported, + or may not have autograd support. If you notice missing functionality please + open a feature request. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): matrix to be added + mat1 (Tensor): the first matrix to be matrix multiplied + mat2 (Tensor): the second matrix to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2, 3) + >>> mat1 = torch.randn(2, 3) + >>> mat2 = torch.randn(3, 3) + >>> torch.addmm(M, mat1, mat2) + tensor([[-4.8716, 1.4671, -1.3746], + [ 0.7573, -3.9555, -2.8681]]) + """ + ... +@overload +def addmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor, *, out: Tensor) -> Tensor: + r""" + addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`. + The matrix :attr:`input` is added to the final result. + + If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a + :math:`(m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + This operation has support for arguments with :ref:`sparse layouts`. If + :attr:`input` is sparse the result will have the same layout and if :attr:`out` + is provided it must have the same layout as :attr:`input`. + + + .. warning:: + Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported, + or may not have autograd support. If you notice missing functionality please + open a feature request. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): matrix to be added + mat1 (Tensor): the first matrix to be matrix multiplied + mat2 (Tensor): the second matrix to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2, 3) + >>> mat1 = torch.randn(2, 3) + >>> mat2 = torch.randn(3, 3) + >>> torch.addmm(M, mat1, mat2) + tensor([[-4.8716, 1.4671, -1.3746], + [ 0.7573, -3.9555, -2.8681]]) + """ + ... +@overload +def addmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`. + The matrix :attr:`input` is added to the final result. + + If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a + :math:`(m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + This operation has support for arguments with :ref:`sparse layouts`. If + :attr:`input` is sparse the result will have the same layout and if :attr:`out` + is provided it must have the same layout as :attr:`input`. + + + .. warning:: + Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported, + or may not have autograd support. If you notice missing functionality please + open a feature request. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): matrix to be added + mat1 (Tensor): the first matrix to be matrix multiplied + mat2 (Tensor): the second matrix to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2, 3) + >>> mat1 = torch.randn(2, 3) + >>> mat2 = torch.randn(3, 3) + >>> torch.addmm(M, mat1, mat2) + tensor([[-4.8716, 1.4671, -1.3746], + [ 0.7573, -3.9555, -2.8681]]) + """ + ... +@overload +def addmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor) -> Tensor: + r""" + addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`. + The matrix :attr:`input` is added to the final result. + + If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a + :math:`(m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + This operation has support for arguments with :ref:`sparse layouts`. If + :attr:`input` is sparse the result will have the same layout and if :attr:`out` + is provided it must have the same layout as :attr:`input`. + + + .. warning:: + Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported, + or may not have autograd support. If you notice missing functionality please + open a feature request. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): matrix to be added + mat1 (Tensor): the first matrix to be matrix multiplied + mat2 (Tensor): the second matrix to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2, 3) + >>> mat1 = torch.randn(2, 3) + >>> mat2 = torch.randn(3, 3) + >>> torch.addmm(M, mat1, mat2) + tensor([[-4.8716, 1.4671, -1.3746], + [ 0.7573, -3.9555, -2.8681]]) + """ + ... +@overload +def addmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor, *, out: Tensor) -> Tensor: + r""" + addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`. + The matrix :attr:`input` is added to the final result. + + If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a + :math:`(m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + This operation has support for arguments with :ref:`sparse layouts`. If + :attr:`input` is sparse the result will have the same layout and if :attr:`out` + is provided it must have the same layout as :attr:`input`. + + + .. warning:: + Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported, + or may not have autograd support. If you notice missing functionality please + open a feature request. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): matrix to be added + mat1 (Tensor): the first matrix to be matrix multiplied + mat2 (Tensor): the second matrix to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2, 3) + >>> mat1 = torch.randn(2, 3) + >>> mat2 = torch.randn(3, 3) + >>> torch.addmm(M, mat1, mat2) + tensor([[-4.8716, 1.4671, -1.3746], + [ 0.7573, -3.9555, -2.8681]]) + """ + ... +@overload +def addmv(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor) -> Tensor: + r""" + addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a matrix-vector product of the matrix :attr:`mat` and + the vector :attr:`vec`. + The vector :attr:`input` is added to the final result. + + If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of + size `m`, then :attr:`input` must be + :ref:`broadcastable ` with a 1-D tensor of size `n` and + :attr:`out` will be 1-D tensor of size `n`. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec}) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + Args: + input (Tensor): vector to be added + mat (Tensor): matrix to be matrix multiplied + vec (Tensor): vector to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2) + >>> mat = torch.randn(2, 3) + >>> vec = torch.randn(3) + >>> torch.addmv(M, mat, vec) + tensor([-0.3768, -5.5565]) + """ + ... +@overload +def addmv(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor, *, out: Tensor) -> Tensor: + r""" + addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a matrix-vector product of the matrix :attr:`mat` and + the vector :attr:`vec`. + The vector :attr:`input` is added to the final result. + + If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of + size `m`, then :attr:`input` must be + :ref:`broadcastable ` with a 1-D tensor of size `n` and + :attr:`out` will be 1-D tensor of size `n`. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec}) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + Args: + input (Tensor): vector to be added + mat (Tensor): matrix to be matrix multiplied + vec (Tensor): vector to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2) + >>> mat = torch.randn(2, 3) + >>> vec = torch.randn(3) + >>> torch.addmv(M, mat, vec) + tensor([-0.3768, -5.5565]) + """ + ... +@overload +def addmv(input: Tensor, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a matrix-vector product of the matrix :attr:`mat` and + the vector :attr:`vec`. + The vector :attr:`input` is added to the final result. + + If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of + size `m`, then :attr:`input` must be + :ref:`broadcastable ` with a 1-D tensor of size `n` and + :attr:`out` will be 1-D tensor of size `n`. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec}) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + Args: + input (Tensor): vector to be added + mat (Tensor): matrix to be matrix multiplied + vec (Tensor): vector to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2) + >>> mat = torch.randn(2, 3) + >>> vec = torch.randn(3) + >>> torch.addmv(M, mat, vec) + tensor([-0.3768, -5.5565]) + """ + ... +@overload +def addmv(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor) -> Tensor: + r""" + addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a matrix-vector product of the matrix :attr:`mat` and + the vector :attr:`vec`. + The vector :attr:`input` is added to the final result. + + If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of + size `m`, then :attr:`input` must be + :ref:`broadcastable ` with a 1-D tensor of size `n` and + :attr:`out` will be 1-D tensor of size `n`. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec}) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + Args: + input (Tensor): vector to be added + mat (Tensor): matrix to be matrix multiplied + vec (Tensor): vector to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2) + >>> mat = torch.randn(2, 3) + >>> vec = torch.randn(3) + >>> torch.addmv(M, mat, vec) + tensor([-0.3768, -5.5565]) + """ + ... +@overload +def addmv(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor, *, out: Tensor) -> Tensor: + r""" + addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a matrix-vector product of the matrix :attr:`mat` and + the vector :attr:`vec`. + The vector :attr:`input` is added to the final result. + + If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of + size `m`, then :attr:`input` must be + :ref:`broadcastable ` with a 1-D tensor of size `n` and + :attr:`out` will be 1-D tensor of size `n`. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec}) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + Args: + input (Tensor): vector to be added + mat (Tensor): matrix to be matrix multiplied + vec (Tensor): vector to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2) + >>> mat = torch.randn(2, 3) + >>> vec = torch.randn(3) + >>> torch.addmv(M, mat, vec) + tensor([-0.3768, -5.5565]) + """ + ... +@overload +def addmv_(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor) -> Tensor: ... +@overload +def addmv_(input: Tensor, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: ... +@overload +def addmv_(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor) -> Tensor: ... +@overload +def addr(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], vec1: Tensor, vec2: Tensor) -> Tensor: + r""" + addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2` + and adds it to the matrix :attr:`input`. + + Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the + outer product between :attr:`vec1` and :attr:`vec2` and the added matrix + :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2}) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector + of size `m`, then :attr:`input` must be + :ref:`broadcastable ` with a matrix of size + :math:`(n \times m)` and :attr:`out` will be a matrix of size + :math:`(n \times m)`. + + Args: + input (Tensor): matrix to be added + vec1 (Tensor): the first vector of the outer product + vec2 (Tensor): the second vector of the outer product + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> vec1 = torch.arange(1., 4.) + >>> vec2 = torch.arange(1., 3.) + >>> M = torch.zeros(3, 2) + >>> torch.addr(M, vec1, vec2) + tensor([[ 1., 2.], + [ 2., 4.], + [ 3., 6.]]) + """ + ... +@overload +def addr(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], vec1: Tensor, vec2: Tensor, *, out: Tensor) -> Tensor: + r""" + addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2` + and adds it to the matrix :attr:`input`. + + Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the + outer product between :attr:`vec1` and :attr:`vec2` and the added matrix + :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2}) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector + of size `m`, then :attr:`input` must be + :ref:`broadcastable ` with a matrix of size + :math:`(n \times m)` and :attr:`out` will be a matrix of size + :math:`(n \times m)`. + + Args: + input (Tensor): matrix to be added + vec1 (Tensor): the first vector of the outer product + vec2 (Tensor): the second vector of the outer product + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> vec1 = torch.arange(1., 4.) + >>> vec2 = torch.arange(1., 3.) + >>> M = torch.zeros(3, 2) + >>> torch.addr(M, vec1, vec2) + tensor([[ 1., 2.], + [ 2., 4.], + [ 3., 6.]]) + """ + ... +@overload +def addr(input: Tensor, vec1: Tensor, vec2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2` + and adds it to the matrix :attr:`input`. + + Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the + outer product between :attr:`vec1` and :attr:`vec2` and the added matrix + :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2}) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector + of size `m`, then :attr:`input` must be + :ref:`broadcastable ` with a matrix of size + :math:`(n \times m)` and :attr:`out` will be a matrix of size + :math:`(n \times m)`. + + Args: + input (Tensor): matrix to be added + vec1 (Tensor): the first vector of the outer product + vec2 (Tensor): the second vector of the outer product + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> vec1 = torch.arange(1., 4.) + >>> vec2 = torch.arange(1., 3.) + >>> M = torch.zeros(3, 2) + >>> torch.addr(M, vec1, vec2) + tensor([[ 1., 2.], + [ 2., 4.], + [ 3., 6.]]) + """ + ... +@overload +def addr(beta: Union[Number, _complex], self: Tensor, vec1: Tensor, vec2: Tensor) -> Tensor: + r""" + addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2` + and adds it to the matrix :attr:`input`. + + Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the + outer product between :attr:`vec1` and :attr:`vec2` and the added matrix + :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2}) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector + of size `m`, then :attr:`input` must be + :ref:`broadcastable ` with a matrix of size + :math:`(n \times m)` and :attr:`out` will be a matrix of size + :math:`(n \times m)`. + + Args: + input (Tensor): matrix to be added + vec1 (Tensor): the first vector of the outer product + vec2 (Tensor): the second vector of the outer product + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> vec1 = torch.arange(1., 4.) + >>> vec2 = torch.arange(1., 3.) + >>> M = torch.zeros(3, 2) + >>> torch.addr(M, vec1, vec2) + tensor([[ 1., 2.], + [ 2., 4.], + [ 3., 6.]]) + """ + ... +@overload +def addr(beta: Union[Number, _complex], self: Tensor, vec1: Tensor, vec2: Tensor, *, out: Tensor) -> Tensor: + r""" + addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2` + and adds it to the matrix :attr:`input`. + + Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the + outer product between :attr:`vec1` and :attr:`vec2` and the added matrix + :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2}) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector + of size `m`, then :attr:`input` must be + :ref:`broadcastable ` with a matrix of size + :math:`(n \times m)` and :attr:`out` will be a matrix of size + :math:`(n \times m)`. + + Args: + input (Tensor): matrix to be added + vec1 (Tensor): the first vector of the outer product + vec2 (Tensor): the second vector of the outer product + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> vec1 = torch.arange(1., 4.) + >>> vec2 = torch.arange(1., 3.) + >>> M = torch.zeros(3, 2) + >>> torch.addr(M, vec1, vec2) + tensor([[ 1., 2.], + [ 2., 4.], + [ 3., 6.]]) + """ + ... +def adjoint(input: Tensor) -> Tensor: + r""" + adjoint(Tensor) -> Tensor + Returns a view of the tensor conjugated and with the last two dimensions transposed. + + ``x.adjoint()`` is equivalent to ``x.transpose(-2, -1).conj()`` for complex tensors and + to ``x.transpose(-2, -1)`` for real tensors. + + Example:: + >>> x = torch.arange(4, dtype=torch.float) + >>> A = torch.complex(x, x).reshape(2, 2) + >>> A + tensor([[0.+0.j, 1.+1.j], + [2.+2.j, 3.+3.j]]) + >>> A.adjoint() + tensor([[0.-0.j, 2.-2.j], + [1.-1.j, 3.-3.j]]) + >>> (A.adjoint() == A.mH).all() + tensor(True) + """ + ... +def affine_grid_generator(theta: Tensor, size: Sequence[Union[_int, SymInt]], align_corners: _bool) -> Tensor: ... +def alias_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.alias`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +@overload +def all(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + all(input) -> Tensor + + Tests if all elements in :attr:`input` evaluate to `True`. + + .. note:: This function matches the behaviour of NumPy in returning + output of dtype `bool` for all supported dtypes except `uint8`. + For `uint8` the dtype of output is `uint8` itself. + + Example:: + + >>> a = torch.rand(1, 2).bool() + >>> a + tensor([[False, True]], dtype=torch.bool) + >>> torch.all(a) + tensor(False, dtype=torch.bool) + >>> a = torch.arange(0, 3) + >>> a + tensor([0, 1, 2]) + >>> torch.all(a) + tensor(False) + + .. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor + :noindex: + + For each row of :attr:`input` in the given dimension :attr:`dim`, + returns `True` if all elements in the row evaluate to `True` and `False` otherwise. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.rand(4, 2).bool() + >>> a + tensor([[True, True], + [True, False], + [True, True], + [True, True]], dtype=torch.bool) + >>> torch.all(a, dim=1) + tensor([ True, False, True, True], dtype=torch.bool) + >>> torch.all(a, dim=0) + tensor([ True, False], dtype=torch.bool) + """ + ... +@overload +def all(input: Tensor, dim: Optional[_size] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + all(input) -> Tensor + + Tests if all elements in :attr:`input` evaluate to `True`. + + .. note:: This function matches the behaviour of NumPy in returning + output of dtype `bool` for all supported dtypes except `uint8`. + For `uint8` the dtype of output is `uint8` itself. + + Example:: + + >>> a = torch.rand(1, 2).bool() + >>> a + tensor([[False, True]], dtype=torch.bool) + >>> torch.all(a) + tensor(False, dtype=torch.bool) + >>> a = torch.arange(0, 3) + >>> a + tensor([0, 1, 2]) + >>> torch.all(a) + tensor(False) + + .. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor + :noindex: + + For each row of :attr:`input` in the given dimension :attr:`dim`, + returns `True` if all elements in the row evaluate to `True` and `False` otherwise. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.rand(4, 2).bool() + >>> a + tensor([[True, True], + [True, False], + [True, True], + [True, True]], dtype=torch.bool) + >>> torch.all(a, dim=1) + tensor([ True, False, True, True], dtype=torch.bool) + >>> torch.all(a, dim=0) + tensor([ True, False], dtype=torch.bool) + """ + ... +@overload +def all(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + all(input) -> Tensor + + Tests if all elements in :attr:`input` evaluate to `True`. + + .. note:: This function matches the behaviour of NumPy in returning + output of dtype `bool` for all supported dtypes except `uint8`. + For `uint8` the dtype of output is `uint8` itself. + + Example:: + + >>> a = torch.rand(1, 2).bool() + >>> a + tensor([[False, True]], dtype=torch.bool) + >>> torch.all(a) + tensor(False, dtype=torch.bool) + >>> a = torch.arange(0, 3) + >>> a + tensor([0, 1, 2]) + >>> torch.all(a) + tensor(False) + + .. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor + :noindex: + + For each row of :attr:`input` in the given dimension :attr:`dim`, + returns `True` if all elements in the row evaluate to `True` and `False` otherwise. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.rand(4, 2).bool() + >>> a + tensor([[True, True], + [True, False], + [True, True], + [True, True]], dtype=torch.bool) + >>> torch.all(a, dim=1) + tensor([ True, False, True, True], dtype=torch.bool) + >>> torch.all(a, dim=0) + tensor([ True, False], dtype=torch.bool) + """ + ... +@overload +def all(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + all(input) -> Tensor + + Tests if all elements in :attr:`input` evaluate to `True`. + + .. note:: This function matches the behaviour of NumPy in returning + output of dtype `bool` for all supported dtypes except `uint8`. + For `uint8` the dtype of output is `uint8` itself. + + Example:: + + >>> a = torch.rand(1, 2).bool() + >>> a + tensor([[False, True]], dtype=torch.bool) + >>> torch.all(a) + tensor(False, dtype=torch.bool) + >>> a = torch.arange(0, 3) + >>> a + tensor([0, 1, 2]) + >>> torch.all(a) + tensor(False) + + .. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor + :noindex: + + For each row of :attr:`input` in the given dimension :attr:`dim`, + returns `True` if all elements in the row evaluate to `True` and `False` otherwise. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.rand(4, 2).bool() + >>> a + tensor([[True, True], + [True, False], + [True, True], + [True, True]], dtype=torch.bool) + >>> torch.all(a, dim=1) + tensor([ True, False, True, True], dtype=torch.bool) + >>> torch.all(a, dim=0) + tensor([ True, False], dtype=torch.bool) + """ + ... +def allclose(input: Tensor, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> _bool: + r""" + allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> bool + + This function checks if :attr:`input` and :attr:`other` satisfy the condition: + + .. math:: + \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert + + elementwise, for all elements of :attr:`input` and :attr:`other`. The behaviour of this function is analogous to + `numpy.allclose `_ + + Args: + input (Tensor): first tensor to compare + other (Tensor): second tensor to compare + atol (float, optional): absolute tolerance. Default: 1e-08 + rtol (float, optional): relative tolerance. Default: 1e-05 + equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False`` + + Example:: + + >>> torch.allclose(torch.tensor([10000., 1e-07]), torch.tensor([10000.1, 1e-08])) + False + >>> torch.allclose(torch.tensor([10000., 1e-08]), torch.tensor([10000.1, 1e-09])) + True + >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')])) + False + >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]), equal_nan=True) + True + """ + ... +def alpha_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def alpha_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def amax(input: Tensor, dim: Union[_int, _size] = (), keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + amax(input, dim, keepdim=False, *, out=None) -> Tensor + + Returns the maximum value of each slice of the :attr:`input` tensor in the given + dimension(s) :attr:`dim`. + + .. note:: + The difference between ``max``/``min`` and ``amax``/``amin`` is: + - ``amax``/``amin`` supports reducing on multiple dimensions, + - ``amax``/``amin`` does not return indices, + - ``amax``/``amin`` evenly distributes gradient between equal values, + while ``max(dim)``/``min(dim)`` propagates gradient only to a single + index in the source tensor. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.8177, 1.4878, -0.2491, 0.9130], + [-0.7158, 1.1775, 2.0992, 0.4817], + [-0.0053, 0.0164, -1.3738, -0.0507], + [ 1.9700, 1.1106, -1.0318, -1.0816]]) + >>> torch.amax(a, 1) + tensor([1.4878, 2.0992, 0.0164, 1.9700]) + """ + ... +def amin(input: Tensor, dim: Union[_int, _size] = (), keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + amin(input, dim, keepdim=False, *, out=None) -> Tensor + + Returns the minimum value of each slice of the :attr:`input` tensor in the given + dimension(s) :attr:`dim`. + + .. note:: + The difference between ``max``/``min`` and ``amax``/``amin`` is: + - ``amax``/``amin`` supports reducing on multiple dimensions, + - ``amax``/``amin`` does not return indices, + - ``amax``/``amin`` evenly distributes gradient between equal values, + while ``max(dim)``/``min(dim)`` propagates gradient only to a single + index in the source tensor. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.6451, -0.4866, 0.2987, -1.3312], + [-0.5744, 1.2980, 1.8397, -0.2713], + [ 0.9128, 0.9214, -1.7268, -0.2995], + [ 0.9023, 0.4853, 0.9075, -1.6165]]) + >>> torch.amin(a, 1) + tensor([-1.3312, -0.5744, -1.7268, -1.6165]) + """ + ... +def aminmax(input: Tensor, *, dim: Optional[_int] = None, keepdim: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.aminmax: + r""" + aminmax(input, *, dim=None, keepdim=False, out=None) -> (Tensor min, Tensor max) + + Computes the minimum and maximum values of the :attr:`input` tensor. + + Args: + input (Tensor): + The input tensor + + Keyword Args: + dim (Optional[int]): + The dimension along which to compute the values. If `None`, + computes the values over the entire :attr:`input` tensor. + Default is `None`. + keepdim (bool): + If `True`, the reduced dimensions will be kept in the output + tensor as dimensions with size 1 for broadcasting, otherwise + they will be removed, as if calling (:func:`torch.squeeze`). + Default is `False`. + out (Optional[Tuple[Tensor, Tensor]]): + Optional tensors on which to write the result. Must have the same + shape and dtype as the expected output. + Default is `None`. + + Returns: + A named tuple `(min, max)` containing the minimum and maximum values. + + Raises: + RuntimeError + If any of the dimensions to compute the values over has size 0. + + .. note:: + NaN values are propagated to the output if at least one value is NaN. + + .. seealso:: + :func:`torch.amin` computes just the minimum value + :func:`torch.amax` computes just the maximum value + + Example:: + + >>> torch.aminmax(torch.tensor([1, -3, 5])) + torch.return_types.aminmax( + min=tensor(-3), + max=tensor(5)) + + >>> # aminmax propagates NaNs + >>> torch.aminmax(torch.tensor([1, -3, 5, torch.nan])) + torch.return_types.aminmax( + min=tensor(nan), + max=tensor(nan)) + + >>> t = torch.arange(10).view(2, 5) + >>> t + tensor([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> t.aminmax(dim=0, keepdim=True) + torch.return_types.aminmax( + min=tensor([[0, 1, 2, 3, 4]]), + max=tensor([[5, 6, 7, 8, 9]])) + """ + ... +def angle(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + angle(input, *, out=None) -> Tensor + + Computes the element-wise angle (in radians) of the given :attr:`input` tensor. + + .. math:: + \text{out}_{i} = angle(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + .. note:: Starting in PyTorch 1.8, angle returns pi for negative real numbers, + zero for non-negative real numbers, and propagates NaNs. Previously + the function would return zero for all real numbers and not propagate + floating-point NaNs. + + Example:: + + >>> torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159 + tensor([ 135., 135, -45]) + """ + ... +@overload +def any(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + any(input) -> Tensor + + Tests if any element in :attr:`input` evaluates to `True`. + + .. note:: This function matches the behaviour of NumPy in returning + output of dtype `bool` for all supported dtypes except `uint8`. + For `uint8` the dtype of output is `uint8` itself. + + Example:: + + >>> a = torch.rand(1, 2).bool() + >>> a + tensor([[False, True]], dtype=torch.bool) + >>> torch.any(a) + tensor(True, dtype=torch.bool) + >>> a = torch.arange(0, 3) + >>> a + tensor([0, 1, 2]) + >>> torch.any(a) + tensor(True) + + .. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor + :noindex: + + For each row of :attr:`input` in the given dimension :attr:`dim`, + returns `True` if any element in the row evaluate to `True` and `False` otherwise. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4, 2) < 0 + >>> a + tensor([[ True, True], + [False, True], + [ True, True], + [False, False]]) + >>> torch.any(a, 1) + tensor([ True, True, True, False]) + >>> torch.any(a, 0) + tensor([True, True]) + """ + ... +@overload +def any(input: Tensor, dim: Optional[_size] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + any(input) -> Tensor + + Tests if any element in :attr:`input` evaluates to `True`. + + .. note:: This function matches the behaviour of NumPy in returning + output of dtype `bool` for all supported dtypes except `uint8`. + For `uint8` the dtype of output is `uint8` itself. + + Example:: + + >>> a = torch.rand(1, 2).bool() + >>> a + tensor([[False, True]], dtype=torch.bool) + >>> torch.any(a) + tensor(True, dtype=torch.bool) + >>> a = torch.arange(0, 3) + >>> a + tensor([0, 1, 2]) + >>> torch.any(a) + tensor(True) + + .. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor + :noindex: + + For each row of :attr:`input` in the given dimension :attr:`dim`, + returns `True` if any element in the row evaluate to `True` and `False` otherwise. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4, 2) < 0 + >>> a + tensor([[ True, True], + [False, True], + [ True, True], + [False, False]]) + >>> torch.any(a, 1) + tensor([ True, True, True, False]) + >>> torch.any(a, 0) + tensor([True, True]) + """ + ... +@overload +def any(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + any(input) -> Tensor + + Tests if any element in :attr:`input` evaluates to `True`. + + .. note:: This function matches the behaviour of NumPy in returning + output of dtype `bool` for all supported dtypes except `uint8`. + For `uint8` the dtype of output is `uint8` itself. + + Example:: + + >>> a = torch.rand(1, 2).bool() + >>> a + tensor([[False, True]], dtype=torch.bool) + >>> torch.any(a) + tensor(True, dtype=torch.bool) + >>> a = torch.arange(0, 3) + >>> a + tensor([0, 1, 2]) + >>> torch.any(a) + tensor(True) + + .. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor + :noindex: + + For each row of :attr:`input` in the given dimension :attr:`dim`, + returns `True` if any element in the row evaluate to `True` and `False` otherwise. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4, 2) < 0 + >>> a + tensor([[ True, True], + [False, True], + [ True, True], + [False, False]]) + >>> torch.any(a, 1) + tensor([ True, True, True, False]) + >>> torch.any(a, 0) + tensor([True, True]) + """ + ... +@overload +def any(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + any(input) -> Tensor + + Tests if any element in :attr:`input` evaluates to `True`. + + .. note:: This function matches the behaviour of NumPy in returning + output of dtype `bool` for all supported dtypes except `uint8`. + For `uint8` the dtype of output is `uint8` itself. + + Example:: + + >>> a = torch.rand(1, 2).bool() + >>> a + tensor([[False, True]], dtype=torch.bool) + >>> torch.any(a) + tensor(True, dtype=torch.bool) + >>> a = torch.arange(0, 3) + >>> a + tensor([0, 1, 2]) + >>> torch.any(a) + tensor(True) + + .. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor + :noindex: + + For each row of :attr:`input` in the given dimension :attr:`dim`, + returns `True` if any element in the row evaluate to `True` and `False` otherwise. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4, 2) < 0 + >>> a + tensor([[ True, True], + [False, True], + [ True, True], + [False, False]]) + >>> torch.any(a, 1) + tensor([ True, True, True, False]) + >>> torch.any(a, 0) + tensor([True, True]) + """ + ... +@overload +def arange(start: Number, end: Number, step: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil` + with values from the interval ``[start, end)`` taken with common difference + :attr:`step` beginning from `start`. + + Note that non-integer :attr:`step` is subject to floating point rounding errors when + comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end` + in such cases. + + .. math:: + \text{out}_{{i+1}} = \text{out}_{i} + \text{step} + + Args: + start (Number): the starting value for the set of points. Default: ``0``. + end (Number): the ending value for the set of points + step (Number): the gap between each pair of adjacent points. Default: ``1``. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input + arguments. If any of `start`, `end`, or `stop` are floating-point, the + `dtype` is inferred to be the default dtype, see + :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to + be `torch.int64`. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.arange(5) + tensor([ 0, 1, 2, 3, 4]) + >>> torch.arange(1, 4) + tensor([ 1, 2, 3]) + >>> torch.arange(1, 2.5, 0.5) + tensor([ 1.0000, 1.5000, 2.0000]) + """ + ... +@overload +def arange(start: Number, end: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil` + with values from the interval ``[start, end)`` taken with common difference + :attr:`step` beginning from `start`. + + Note that non-integer :attr:`step` is subject to floating point rounding errors when + comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end` + in such cases. + + .. math:: + \text{out}_{{i+1}} = \text{out}_{i} + \text{step} + + Args: + start (Number): the starting value for the set of points. Default: ``0``. + end (Number): the ending value for the set of points + step (Number): the gap between each pair of adjacent points. Default: ``1``. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input + arguments. If any of `start`, `end`, or `stop` are floating-point, the + `dtype` is inferred to be the default dtype, see + :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to + be `torch.int64`. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.arange(5) + tensor([ 0, 1, 2, 3, 4]) + >>> torch.arange(1, 4) + tensor([ 1, 2, 3]) + >>> torch.arange(1, 2.5, 0.5) + tensor([ 1.0000, 1.5000, 2.0000]) + """ + ... +@overload +def arange(end: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil` + with values from the interval ``[start, end)`` taken with common difference + :attr:`step` beginning from `start`. + + Note that non-integer :attr:`step` is subject to floating point rounding errors when + comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end` + in such cases. + + .. math:: + \text{out}_{{i+1}} = \text{out}_{i} + \text{step} + + Args: + start (Number): the starting value for the set of points. Default: ``0``. + end (Number): the ending value for the set of points + step (Number): the gap between each pair of adjacent points. Default: ``1``. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input + arguments. If any of `start`, `end`, or `stop` are floating-point, the + `dtype` is inferred to be the default dtype, see + :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to + be `torch.int64`. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.arange(5) + tensor([ 0, 1, 2, 3, 4]) + >>> torch.arange(1, 4) + tensor([ 1, 2, 3]) + >>> torch.arange(1, 2.5, 0.5) + tensor([ 1.0000, 1.5000, 2.0000]) + """ + ... +@overload +def arange(end: Union[Number, _complex], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil` + with values from the interval ``[start, end)`` taken with common difference + :attr:`step` beginning from `start`. + + Note that non-integer :attr:`step` is subject to floating point rounding errors when + comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end` + in such cases. + + .. math:: + \text{out}_{{i+1}} = \text{out}_{i} + \text{step} + + Args: + start (Number): the starting value for the set of points. Default: ``0``. + end (Number): the ending value for the set of points + step (Number): the gap between each pair of adjacent points. Default: ``1``. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input + arguments. If any of `start`, `end`, or `stop` are floating-point, the + `dtype` is inferred to be the default dtype, see + :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to + be `torch.int64`. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.arange(5) + tensor([ 0, 1, 2, 3, 4]) + >>> torch.arange(1, 4) + tensor([ 1, 2, 3]) + >>> torch.arange(1, 2.5, 0.5) + tensor([ 1.0000, 1.5000, 2.0000]) + """ + ... +@overload +def arange(start: Union[Number, _complex], end: Union[Number, _complex], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil` + with values from the interval ``[start, end)`` taken with common difference + :attr:`step` beginning from `start`. + + Note that non-integer :attr:`step` is subject to floating point rounding errors when + comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end` + in such cases. + + .. math:: + \text{out}_{{i+1}} = \text{out}_{i} + \text{step} + + Args: + start (Number): the starting value for the set of points. Default: ``0``. + end (Number): the ending value for the set of points + step (Number): the gap between each pair of adjacent points. Default: ``1``. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input + arguments. If any of `start`, `end`, or `stop` are floating-point, the + `dtype` is inferred to be the default dtype, see + :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to + be `torch.int64`. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.arange(5) + tensor([ 0, 1, 2, 3, 4]) + >>> torch.arange(1, 4) + tensor([ 1, 2, 3]) + >>> torch.arange(1, 2.5, 0.5) + tensor([ 1.0000, 1.5000, 2.0000]) + """ + ... +@overload +def arange(start: Union[Number, _complex], end: Union[Number, _complex], step: Union[Number, _complex] = 1, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil` + with values from the interval ``[start, end)`` taken with common difference + :attr:`step` beginning from `start`. + + Note that non-integer :attr:`step` is subject to floating point rounding errors when + comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end` + in such cases. + + .. math:: + \text{out}_{{i+1}} = \text{out}_{i} + \text{step} + + Args: + start (Number): the starting value for the set of points. Default: ``0``. + end (Number): the ending value for the set of points + step (Number): the gap between each pair of adjacent points. Default: ``1``. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input + arguments. If any of `start`, `end`, or `stop` are floating-point, the + `dtype` is inferred to be the default dtype, see + :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to + be `torch.int64`. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.arange(5) + tensor([ 0, 1, 2, 3, 4]) + >>> torch.arange(1, 4) + tensor([ 1, 2, 3]) + >>> torch.arange(1, 2.5, 0.5) + tensor([ 1.0000, 1.5000, 2.0000]) + """ + ... +def arccos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + arccos(input, *, out=None) -> Tensor + + Alias for :func:`torch.acos`. + """ + ... +def arccos_(input: Tensor) -> Tensor: ... +def arccosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + arccosh(input, *, out=None) -> Tensor + + Alias for :func:`torch.acosh`. + """ + ... +def arccosh_(input: Tensor) -> Tensor: ... +def arcsin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + arcsin(input, *, out=None) -> Tensor + + Alias for :func:`torch.asin`. + """ + ... +def arcsin_(input: Tensor) -> Tensor: ... +def arcsinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + arcsinh(input, *, out=None) -> Tensor + + Alias for :func:`torch.asinh`. + """ + ... +def arcsinh_(input: Tensor) -> Tensor: ... +def arctan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + arctan(input, *, out=None) -> Tensor + + Alias for :func:`torch.atan`. + """ + ... +def arctan2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + arctan2(input, other, *, out=None) -> Tensor + Alias for :func:`torch.atan2`. + """ + ... +def arctan_(input: Tensor) -> Tensor: ... +def arctanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + arctanh(input, *, out=None) -> Tensor + + Alias for :func:`torch.atanh`. + """ + ... +def arctanh_(input: Tensor) -> Tensor: ... +def argmax(input: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + argmax(input) -> LongTensor + + Returns the indices of the maximum value of all elements in the :attr:`input` tensor. + + This is the second value returned by :meth:`torch.max`. See its + documentation for the exact semantics of this method. + + .. note:: If there are multiple maximal values then the indices of the first maximal value are returned. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 1.3398, 0.2663, -0.2686, 0.2450], + [-0.7401, -0.8805, -0.3402, -1.1936], + [ 0.4907, -1.3948, -1.0691, -0.3132], + [-1.6092, 0.5419, -0.2993, 0.3195]]) + >>> torch.argmax(a) + tensor(0) + + .. function:: argmax(input, dim, keepdim=False) -> LongTensor + :noindex: + + Returns the indices of the maximum values of a tensor across a dimension. + + This is the second value returned by :meth:`torch.max`. See its + documentation for the exact semantics of this method. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. If ``None``, the argmax of the flattened input is returned. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 1.3398, 0.2663, -0.2686, 0.2450], + [-0.7401, -0.8805, -0.3402, -1.1936], + [ 0.4907, -1.3948, -1.0691, -0.3132], + [-1.6092, 0.5419, -0.2993, 0.3195]]) + >>> torch.argmax(a, dim=1) + tensor([ 0, 2, 0, 1]) + """ + ... +def argmin(input: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + argmin(input, dim=None, keepdim=False) -> LongTensor + + Returns the indices of the minimum value(s) of the flattened tensor or along a dimension + + This is the second value returned by :meth:`torch.min`. See its + documentation for the exact semantics of this method. + + .. note:: If there are multiple minimal values then the indices of the first minimal value are returned. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. If ``None``, the argmin of the flattened input is returned. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.1139, 0.2254, -0.1381, 0.3687], + [ 1.0100, -1.1975, -0.0102, -0.4732], + [-0.9240, 0.1207, -0.7506, -1.0213], + [ 1.7809, -1.2960, 0.9384, 0.1438]]) + >>> torch.argmin(a) + tensor(13) + >>> torch.argmin(a, dim=1) + tensor([ 2, 1, 3, 1]) + >>> torch.argmin(a, dim=1, keepdim=True) + tensor([[2], + [1], + [3], + [1]]) + """ + ... +@overload +def argsort(input: Tensor, *, stable: _bool, dim: _int = -1, descending: _bool = False) -> Tensor: + r""" + argsort(input, dim=-1, descending=False, stable=False) -> Tensor + + Returns the indices that sort a tensor along a given dimension in ascending + order by value. + + This is the second value returned by :meth:`torch.sort`. See its documentation + for the exact semantics of this method. + + If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving + the order of equivalent elements. If ``False``, the relative order of values + which compare equal is not guaranteed. ``True`` is slower. + + Args: + input (Tensor): the input tensor. + dim (int, optional): the dimension to sort along + descending (bool, optional): controls the sorting order (ascending or descending) + stable (bool, optional): controls the relative order of equivalent elements + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.0785, 1.5267, -0.8521, 0.4065], + [ 0.1598, 0.0788, -0.0745, -1.2700], + [ 1.2208, 1.0722, -0.7064, 1.2564], + [ 0.0669, -0.2318, -0.8229, -0.9280]]) + + + >>> torch.argsort(a, dim=1) + tensor([[2, 0, 3, 1], + [3, 2, 1, 0], + [2, 1, 0, 3], + [3, 2, 1, 0]]) + """ + ... +@overload +def argsort(input: Tensor, dim: _int = -1, descending: _bool = False) -> Tensor: + r""" + argsort(input, dim=-1, descending=False, stable=False) -> Tensor + + Returns the indices that sort a tensor along a given dimension in ascending + order by value. + + This is the second value returned by :meth:`torch.sort`. See its documentation + for the exact semantics of this method. + + If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving + the order of equivalent elements. If ``False``, the relative order of values + which compare equal is not guaranteed. ``True`` is slower. + + Args: + input (Tensor): the input tensor. + dim (int, optional): the dimension to sort along + descending (bool, optional): controls the sorting order (ascending or descending) + stable (bool, optional): controls the relative order of equivalent elements + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.0785, 1.5267, -0.8521, 0.4065], + [ 0.1598, 0.0788, -0.0745, -1.2700], + [ 1.2208, 1.0722, -0.7064, 1.2564], + [ 0.0669, -0.2318, -0.8229, -0.9280]]) + + + >>> torch.argsort(a, dim=1) + tensor([[2, 0, 3, 1], + [3, 2, 1, 0], + [2, 1, 0, 3], + [3, 2, 1, 0]]) + """ + ... +@overload +def argsort(input: Tensor, dim: Union[str, ellipsis, None], descending: _bool = False) -> Tensor: + r""" + argsort(input, dim=-1, descending=False, stable=False) -> Tensor + + Returns the indices that sort a tensor along a given dimension in ascending + order by value. + + This is the second value returned by :meth:`torch.sort`. See its documentation + for the exact semantics of this method. + + If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving + the order of equivalent elements. If ``False``, the relative order of values + which compare equal is not guaranteed. ``True`` is slower. + + Args: + input (Tensor): the input tensor. + dim (int, optional): the dimension to sort along + descending (bool, optional): controls the sorting order (ascending or descending) + stable (bool, optional): controls the relative order of equivalent elements + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.0785, 1.5267, -0.8521, 0.4065], + [ 0.1598, 0.0788, -0.0745, -1.2700], + [ 1.2208, 1.0722, -0.7064, 1.2564], + [ 0.0669, -0.2318, -0.8229, -0.9280]]) + + + >>> torch.argsort(a, dim=1) + tensor([[2, 0, 3, 1], + [3, 2, 1, 0], + [2, 1, 0, 3], + [3, 2, 1, 0]]) + """ + ... +def argwhere(input: Tensor) -> Tensor: + r""" + argwhere(input) -> Tensor + + Returns a tensor containing the indices of all non-zero elements of + :attr:`input`. Each row in the result contains the indices of a non-zero + element in :attr:`input`. The result is sorted lexicographically, with + the last index changing the fastest (C-style). + + If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor + :attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of + non-zero elements in the :attr:`input` tensor. + + .. note:: + This function is similar to NumPy's `argwhere`. + + When :attr:`input` is on CUDA, this function causes host-device synchronization. + + Args: + {input} + + Example:: + + >>> t = torch.tensor([1, 0, 1]) + >>> torch.argwhere(t) + tensor([[0], + [2]]) + >>> t = torch.tensor([[1, 0, 1], [0, 1, 1]]) + >>> torch.argwhere(t) + tensor([[0, 0], + [0, 2], + [1, 1], + [1, 2]]) + """ + ... +def as_strided(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: + r""" + as_strided(input, size, stride, storage_offset=None) -> Tensor + + Create a view of an existing `torch.Tensor` :attr:`input` with specified + :attr:`size`, :attr:`stride` and :attr:`storage_offset`. + + .. warning:: + Prefer using other view functions, like :meth:`torch.Tensor.expand`, + to setting a view's strides manually with `as_strided`, as this + function's behavior depends on the implementation of a tensor's storage. + The constructed view of the storage must only refer to elements within + the storage or a runtime error will be thrown, and if the view is + "overlapped" (with multiple indices referring to the same element in + memory) its behavior is undefined. + + Args: + input (Tensor): the input tensor. + size (tuple or ints): the shape of the output tensor + stride (tuple or ints): the stride of the output tensor + storage_offset (int, optional): the offset in the underlying storage of the output tensor. + If ``None``, the storage_offset of the output tensor will match the input tensor. + + Example:: + + >>> x = torch.randn(3, 3) + >>> x + tensor([[ 0.9039, 0.6291, 1.0795], + [ 0.1586, 2.1939, -0.4900], + [-0.1909, -0.7503, 1.9355]]) + >>> t = torch.as_strided(x, (2, 2), (1, 2)) + >>> t + tensor([[0.9039, 1.0795], + [0.6291, 0.1586]]) + >>> t = torch.as_strided(x, (2, 2), (1, 2), 1) + tensor([[0.6291, 0.1586], + [1.0795, 2.1939]]) + """ + ... +def as_strided_(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: ... +def as_strided_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.as_strided`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def as_strided_scatter(input: Tensor, src: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: + r""" + as_strided_scatter(input, src, size, stride, storage_offset=None) -> Tensor + + Embeds the values of the :attr:`src` tensor into :attr:`input` along + the elements corresponding to the result of calling + input.as_strided(size, stride, storage_offset). + + This function returns a tensor with fresh storage; it does not + return a view. + + Args: + input (Tensor): the input tensor. + size (tuple or ints): the shape of the output tensor + stride (tuple or ints): the stride of the output tensor + storage_offset (int, optional): the offset in the underlying storage of the output tensor + + .. note:: + + :attr:`src` must be of the proper size in order to be embedded + into :attr:`input`. Specifically, it should have the same shape as + `torch.as_strided(input, size, stride, storage_offset)` + + Example:: + + >>> a = torch.arange(4).reshape(2, 2) + 1 + >>> a + tensor([[1, 2], + [3, 4]]) + >>> b = torch.zeros(3, 3) + >>> b + tensor([[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]]) + >>> torch.as_strided_scatter(b, a, (2, 2), (1, 2)) + tensor([[1., 3., 2.], + [4., 0., 0.], + [0., 0., 0.]]) + """ + ... +def as_tensor(data: Any, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None) -> Tensor: + r""" + as_tensor(data, dtype=None, device=None) -> Tensor + + Converts :attr:`data` into a tensor, sharing data and preserving autograd + history if possible. + + If :attr:`data` is already a tensor with the requested dtype and device + then :attr:`data` itself is returned, but if :attr:`data` is a + tensor with a different dtype or device then it's copied as if using + `data.to(dtype=dtype, device=device)`. + + If :attr:`data` is a NumPy array (an ndarray) with the same dtype and device then a + tensor is constructed using :func:`torch.from_numpy`. + + .. seealso:: + + :func:`torch.tensor` never shares its data and creates a new "leaf tensor" (see :doc:`/notes/autograd`). + + + Args: + data (array_like): Initial data for the tensor. Can be a list, tuple, + NumPy ``ndarray``, scalar, and other types. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, infers data type from :attr:`data`. + device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor + then the device of data is used. If None and data is not a tensor then + the result tensor is constructed on the current device. + + + Example:: + + >>> a = numpy.array([1, 2, 3]) + >>> t = torch.as_tensor(a) + >>> t + tensor([ 1, 2, 3]) + >>> t[0] = -1 + >>> a + array([-1, 2, 3]) + + >>> a = numpy.array([1, 2, 3]) + >>> t = torch.as_tensor(a, device=torch.device('cuda')) + >>> t + tensor([ 1, 2, 3]) + >>> t[0] = -1 + >>> a + array([1, 2, 3]) + """ + ... +def asarray(obj: Any, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, copy: Optional[_bool] = None, requires_grad: _bool = False) -> Tensor: + r""" + asarray(obj, *, dtype=None, device=None, copy=None, requires_grad=False) -> Tensor + + Converts :attr:`obj` to a tensor. + + :attr:`obj` can be one of: + + 1. a tensor + 2. a NumPy array or a NumPy scalar + 3. a DLPack capsule + 4. an object that implements Python's buffer protocol + 5. a scalar + 6. a sequence of scalars + + When :attr:`obj` is a tensor, NumPy array, or DLPack capsule the returned tensor will, + by default, not require a gradient, have the same datatype as :attr:`obj`, be on the + same device, and share memory with it. These properties can be controlled with the + :attr:`dtype`, :attr:`device`, :attr:`copy`, and :attr:`requires_grad` keyword arguments. + If the returned tensor is of a different datatype, on a different device, or a copy is + requested then it will not share its memory with :attr:`obj`. If :attr:`requires_grad` + is ``True`` then the returned tensor will require a gradient, and if :attr:`obj` is + also a tensor with an autograd history then the returned tensor will have the same history. + + When :attr:`obj` is not a tensor, NumPy array, or DLPack capsule but implements Python's + buffer protocol then the buffer is interpreted as an array of bytes grouped according to + the size of the datatype passed to the :attr:`dtype` keyword argument. (If no datatype is + passed then the default floating point datatype is used, instead.) The returned tensor + will have the specified datatype (or default floating point datatype if none is specified) + and, by default, be on the CPU device and share memory with the buffer. + + When :attr:`obj` is a NumPy scalar, the returned tensor will be a 0-dimensional tensor on + the CPU and that doesn't share its memory (i.e. ``copy=True``). By default datatype will + be the PyTorch datatype corresponding to the NumPy's scalar's datatype. + + When :attr:`obj` is none of the above but a scalar, or a sequence of scalars then the + returned tensor will, by default, infer its datatype from the scalar values, be on the + current default device, and not share its memory. + + .. seealso:: + + :func:`torch.tensor` creates a tensor that always copies the data from the input object. + :func:`torch.from_numpy` creates a tensor that always shares memory from NumPy arrays. + :func:`torch.frombuffer` creates a tensor that always shares memory from objects that + implement the buffer protocol. + :func:`torch.from_dlpack` creates a tensor that always shares memory from + DLPack capsules. + + Args: + obj (object): a tensor, NumPy array, DLPack Capsule, object that implements Python's + buffer protocol, scalar, or sequence of scalars. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the datatype of the returned tensor. + Default: ``None``, which causes the datatype of the returned tensor to be + inferred from :attr:`obj`. + copy (bool, optional): controls whether the returned tensor shares memory with :attr:`obj`. + Default: ``None``, which causes the returned tensor to share memory with :attr:`obj` + whenever possible. If ``True`` then the returned tensor does not share its memory. + If ``False`` then the returned tensor shares its memory with :attr:`obj` and an + error is thrown if it cannot. + device (:class:`torch.device`, optional): the device of the returned tensor. + Default: ``None``, which causes the device of :attr:`obj` to be used. Or, if + :attr:`obj` is a Python sequence, the current default device will be used. + requires_grad (bool, optional): whether the returned tensor requires grad. + Default: ``False``, which causes the returned tensor not to require a gradient. + If ``True``, then the returned tensor will require a gradient, and if :attr:`obj` + is also a tensor with an autograd history then the returned tensor will have + the same history. + + Example:: + + >>> a = torch.tensor([1, 2, 3]) + >>> # Shares memory with tensor 'a' + >>> b = torch.asarray(a) + >>> a.data_ptr() == b.data_ptr() + True + >>> # Forces memory copy + >>> c = torch.asarray(a, copy=True) + >>> a.data_ptr() == c.data_ptr() + False + + >>> a = torch.tensor([1., 2., 3.], requires_grad=True) + >>> b = a + 2 + >>> b + tensor([3., 4., 5.], grad_fn=) + >>> # Shares memory with tensor 'b', with no grad + >>> c = torch.asarray(b) + >>> c + tensor([3., 4., 5.]) + >>> # Shares memory with tensor 'b', retaining autograd history + >>> d = torch.asarray(b, requires_grad=True) + >>> d + tensor([3., 4., 5.], grad_fn=) + + >>> array = numpy.array([1, 2, 3]) + >>> # Shares memory with array 'array' + >>> t1 = torch.asarray(array) + >>> array.__array_interface__['data'][0] == t1.data_ptr() + True + >>> # Copies memory due to dtype mismatch + >>> t2 = torch.asarray(array, dtype=torch.float32) + >>> array.__array_interface__['data'][0] == t2.data_ptr() + False + + >>> scalar = numpy.float64(0.5) + >>> torch.asarray(scalar) + tensor(0.5000, dtype=torch.float64) + """ + ... +def asin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + asin(input, *, out=None) -> Tensor + + Returns a new tensor with the arcsine of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \sin^{-1}(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.5962, 1.4985, -0.4396, 1.4525]) + >>> torch.asin(a) + tensor([-0.6387, nan, -0.4552, nan]) + """ + ... +def asin_(input: Tensor) -> Tensor: ... +def asinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + asinh(input, *, out=None) -> Tensor + + Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \sinh^{-1}(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword arguments: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.1606, -1.4267, -1.0899, -1.0250 ]) + >>> torch.asinh(a) + tensor([ 0.1599, -1.1534, -0.9435, -0.8990 ]) + """ + ... +def asinh_(input: Tensor) -> Tensor: ... +def atan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + atan(input, *, out=None) -> Tensor + + Returns a new tensor with the arctangent of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \tan^{-1}(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.2341, 0.2539, -0.6256, -0.6448]) + >>> torch.atan(a) + tensor([ 0.2299, 0.2487, -0.5591, -0.5727]) + """ + ... +def atan2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + atan2(input, other, *, out=None) -> Tensor + + Element-wise arctangent of :math:`\text{input}_{i} / \text{other}_{i}` + with consideration of the quadrant. Returns a new tensor with the signed angles + in radians between vector :math:`(\text{other}_{i}, \text{input}_{i})` + and vector :math:`(1, 0)`. (Note that :math:`\text{other}_{i}`, the second + parameter, is the x-coordinate, while :math:`\text{input}_{i}`, the first + parameter, is the y-coordinate.) + + The shapes of ``input`` and ``other`` must be + :ref:`broadcastable `. + + Args: + input (Tensor): the first input tensor + other (Tensor): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.9041, 0.0196, -0.3108, -2.4423]) + >>> torch.atan2(a, torch.randn(4)) + tensor([ 0.9833, 0.0811, -1.9743, -1.4151]) + """ + ... +def atan_(input: Tensor) -> Tensor: ... +def atanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + atanh(input, *, out=None) -> Tensor + + Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`. + + Note: + The domain of the inverse hyperbolic tangent is `(-1, 1)` and values outside this range + will be mapped to ``NaN``, except for the values `1` and `-1` for which the output is + mapped to `+/-INF` respectively. + + .. math:: + \text{out}_{i} = \tanh^{-1}(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword arguments: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4).uniform_(-1, 1) + >>> a + tensor([ -0.9385, 0.2968, -0.8591, -0.1871 ]) + >>> torch.atanh(a) + tensor([ -1.7253, 0.3060, -1.2899, -0.1893 ]) + """ + ... +def atanh_(input: Tensor) -> Tensor: ... +def avg_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, ceil_mode: _bool = False, count_include_pad: _bool = True) -> Tensor: ... +@overload +def baddbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor) -> Tensor: + r""" + baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices in :attr:`batch1` + and :attr:`batch2`. + :attr:`input` is added to the final result. + + :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same + number of matrices. + + If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a + :math:`(b \times m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a + :math:`(b \times n \times p)` tensor and :attr:`out` will be a + :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the + same as the scaling factors used in :meth:`torch.addbmm`. + + .. math:: + \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): the tensor to be added + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(10, 3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.baddbmm(M, batch1, batch2).size() + torch.Size([10, 3, 5]) + """ + ... +@overload +def baddbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: + r""" + baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices in :attr:`batch1` + and :attr:`batch2`. + :attr:`input` is added to the final result. + + :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same + number of matrices. + + If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a + :math:`(b \times m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a + :math:`(b \times n \times p)` tensor and :attr:`out` will be a + :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the + same as the scaling factors used in :meth:`torch.addbmm`. + + .. math:: + \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): the tensor to be added + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(10, 3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.baddbmm(M, batch1, batch2).size() + torch.Size([10, 3, 5]) + """ + ... +@overload +def baddbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices in :attr:`batch1` + and :attr:`batch2`. + :attr:`input` is added to the final result. + + :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same + number of matrices. + + If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a + :math:`(b \times m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a + :math:`(b \times n \times p)` tensor and :attr:`out` will be a + :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the + same as the scaling factors used in :meth:`torch.addbmm`. + + .. math:: + \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): the tensor to be added + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(10, 3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.baddbmm(M, batch1, batch2).size() + torch.Size([10, 3, 5]) + """ + ... +@overload +def baddbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor: + r""" + baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices in :attr:`batch1` + and :attr:`batch2`. + :attr:`input` is added to the final result. + + :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same + number of matrices. + + If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a + :math:`(b \times m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a + :math:`(b \times n \times p)` tensor and :attr:`out` will be a + :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the + same as the scaling factors used in :meth:`torch.addbmm`. + + .. math:: + \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): the tensor to be added + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(10, 3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.baddbmm(M, batch1, batch2).size() + torch.Size([10, 3, 5]) + """ + ... +@overload +def baddbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: + r""" + baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices in :attr:`batch1` + and :attr:`batch2`. + :attr:`input` is added to the final result. + + :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same + number of matrices. + + If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a + :math:`(b \times m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a + :math:`(b \times n \times p)` tensor and :attr:`out` will be a + :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the + same as the scaling factors used in :meth:`torch.addbmm`. + + .. math:: + \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): the tensor to be added + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(10, 3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.baddbmm(M, batch1, batch2).size() + torch.Size([10, 3, 5]) + """ + ... +@overload +def bartlett_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + bartlett_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Bartlett window function. + + .. math:: + w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases} + \frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\ + 2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\ + \end{cases}, + + where :math:`N` is the full window size. + + The input :attr:`window_length` is a positive integer controlling the + returned window size. :attr:`periodic` flag determines whether the returned + window trims off the last duplicate value from the symmetric window and is + ready to be used as a periodic window with functions like + :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in + above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have + ``torch.bartlett_window(L, periodic=True)`` equal to + ``torch.bartlett_window(L + 1, periodic=False)[:-1])``. + + .. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + + Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window + """ + ... +@overload +def bartlett_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + bartlett_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Bartlett window function. + + .. math:: + w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases} + \frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\ + 2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\ + \end{cases}, + + where :math:`N` is the full window size. + + The input :attr:`window_length` is a positive integer controlling the + returned window size. :attr:`periodic` flag determines whether the returned + window trims off the last duplicate value from the symmetric window and is + ready to be used as a periodic window with functions like + :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in + above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have + ``torch.bartlett_window(L, periodic=True)`` equal to + ``torch.bartlett_window(L + 1, periodic=False)[:-1])``. + + .. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + + Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window + """ + ... +def batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tensor: ... +def batch_norm_backward_elemt(grad_out: Tensor, input: Tensor, mean: Tensor, invstd: Tensor, weight: Optional[Tensor], sum_dy: Tensor, sum_dy_xmu: Tensor, count: Tensor) -> Tensor: ... +def batch_norm_backward_reduce(grad_out: Tensor, input: Tensor, mean: Tensor, invstd: Tensor, weight: Optional[Tensor], input_g: _bool, weight_g: _bool, bias_g: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +def batch_norm_elemt(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], mean: Tensor, invstd: Tensor, eps: _float, *, out: Optional[Tensor] = None) -> Tensor: ... +def batch_norm_gather_stats(input: Tensor, mean: Tensor, invstd: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float, eps: _float, count: _int) -> Tuple[Tensor, Tensor]: ... +def batch_norm_gather_stats_with_counts(input: Tensor, mean: Tensor, invstd: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float, eps: _float, counts: Tensor) -> Tuple[Tensor, Tensor]: ... +def batch_norm_stats(input: Tensor, eps: _float) -> Tuple[Tensor, Tensor]: ... +def batch_norm_update_stats(input: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float) -> Tuple[Tensor, Tensor]: ... +@overload +def bernoulli(input: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + bernoulli(input, *, generator=None, out=None) -> Tensor + + Draws binary random numbers (0 or 1) from a Bernoulli distribution. + + The :attr:`input` tensor should be a tensor containing probabilities + to be used for drawing the binary random number. + Hence, all values in :attr:`input` have to be in the range: + :math:`0 \leq \text{input}_i \leq 1`. + + The :math:`\text{i}^{th}` element of the output tensor will draw a + value :math:`1` according to the :math:`\text{i}^{th}` probability value given + in :attr:`input`. + + .. math:: + \text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i}) + + The returned :attr:`out` tensor only has values 0 or 1 and is of the same + shape as :attr:`input`. + + :attr:`out` can have integral ``dtype``, but :attr:`input` must have floating + point ``dtype``. + + Args: + input (Tensor): the input tensor of probability values for the Bernoulli distribution + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.empty(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1] + >>> a + tensor([[ 0.1737, 0.0950, 0.3609], + [ 0.7148, 0.0289, 0.2676], + [ 0.9456, 0.8937, 0.7202]]) + >>> torch.bernoulli(a) + tensor([[ 1., 0., 0.], + [ 0., 0., 0.], + [ 1., 1., 1.]]) + + >>> a = torch.ones(3, 3) # probability of drawing "1" is 1 + >>> torch.bernoulli(a) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.], + [ 1., 1., 1.]]) + >>> a = torch.zeros(3, 3) # probability of drawing "1" is 0 + >>> torch.bernoulli(a) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.], + [ 0., 0., 0.]]) + """ + ... +@overload +def bernoulli(input: Tensor, p: _float, *, generator: Optional[Generator] = None) -> Tensor: + r""" + bernoulli(input, *, generator=None, out=None) -> Tensor + + Draws binary random numbers (0 or 1) from a Bernoulli distribution. + + The :attr:`input` tensor should be a tensor containing probabilities + to be used for drawing the binary random number. + Hence, all values in :attr:`input` have to be in the range: + :math:`0 \leq \text{input}_i \leq 1`. + + The :math:`\text{i}^{th}` element of the output tensor will draw a + value :math:`1` according to the :math:`\text{i}^{th}` probability value given + in :attr:`input`. + + .. math:: + \text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i}) + + The returned :attr:`out` tensor only has values 0 or 1 and is of the same + shape as :attr:`input`. + + :attr:`out` can have integral ``dtype``, but :attr:`input` must have floating + point ``dtype``. + + Args: + input (Tensor): the input tensor of probability values for the Bernoulli distribution + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.empty(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1] + >>> a + tensor([[ 0.1737, 0.0950, 0.3609], + [ 0.7148, 0.0289, 0.2676], + [ 0.9456, 0.8937, 0.7202]]) + >>> torch.bernoulli(a) + tensor([[ 1., 0., 0.], + [ 0., 0., 0.], + [ 1., 1., 1.]]) + + >>> a = torch.ones(3, 3) # probability of drawing "1" is 1 + >>> torch.bernoulli(a) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.], + [ 1., 1., 1.]]) + >>> a = torch.zeros(3, 3) # probability of drawing "1" is 0 + >>> torch.bernoulli(a) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.], + [ 0., 0., 0.]]) + """ + ... +def bilinear(input1: Tensor, input2: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor: ... +def binary_cross_entropy_with_logits(input: Tensor, target: Tensor, weight: Optional[Tensor] = None, pos_weight: Optional[Tensor] = None, reduction: _int = 1) -> Tensor: ... +def bincount(input: Tensor, weights: Optional[Tensor] = None, minlength: _int = 0) -> Tensor: + r""" + bincount(input, weights=None, minlength=0) -> Tensor + + Count the frequency of each value in an array of non-negative ints. + + The number of bins (size 1) is one larger than the largest value in + :attr:`input` unless :attr:`input` is empty, in which case the result is a + tensor of size 0. If :attr:`minlength` is specified, the number of bins is at least + :attr:`minlength` and if :attr:`input` is empty, then the result is tensor of size + :attr:`minlength` filled with zeros. If ``n`` is the value at position ``i``, + ``out[n] += weights[i]`` if :attr:`weights` is specified else + ``out[n] += 1``. + + Note: + This operation may produce nondeterministic gradients when given tensors on a CUDA device. See :doc:`/notes/randomness` for more information. + + Arguments: + input (Tensor): 1-d int tensor + weights (Tensor): optional, weight for each value in the input tensor. + Should be of same size as input tensor. + minlength (int): optional, minimum number of bins. Should be non-negative. + + Returns: + output (Tensor): a tensor of shape ``Size([max(input) + 1])`` if + :attr:`input` is non-empty, else ``Size(0)`` + + Example:: + + >>> input = torch.randint(0, 8, (5,), dtype=torch.int64) + >>> weights = torch.linspace(0, 1, steps=5) + >>> input, weights + (tensor([4, 3, 6, 3, 4]), + tensor([ 0.0000, 0.2500, 0.5000, 0.7500, 1.0000]) + + >>> torch.bincount(input) + tensor([0, 0, 0, 2, 2, 0, 1]) + + >>> input.bincount(weights) + tensor([0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 0.0000, 0.5000]) + """ + ... +def binomial(count: Tensor, prob: Tensor, generator: Optional[Generator] = None) -> Tensor: ... +@overload +def bitwise_and(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_and(input, other, *, out=None) -> Tensor + + Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of + integral or Boolean types. For bool tensors, it computes the logical AND. + + Args: + input: the first input tensor + other: the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([1, 0, 3], dtype=torch.int8) + >>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ False, True, False]) + """ + ... +@overload +def bitwise_and(self: Union[Number, _complex], other: Tensor) -> Tensor: + r""" + bitwise_and(input, other, *, out=None) -> Tensor + + Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of + integral or Boolean types. For bool tensors, it computes the logical AND. + + Args: + input: the first input tensor + other: the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([1, 0, 3], dtype=torch.int8) + >>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ False, True, False]) + """ + ... +@overload +def bitwise_and(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_and(input, other, *, out=None) -> Tensor + + Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of + integral or Boolean types. For bool tensors, it computes the logical AND. + + Args: + input: the first input tensor + other: the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([1, 0, 3], dtype=torch.int8) + >>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ False, True, False]) + """ + ... +@overload +def bitwise_left_shift(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_left_shift(input, other, *, out=None) -> Tensor + + Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits. + The input tensor must be of integral type. This operator supports + :ref:`broadcasting to a common shape ` and + :ref:`type promotion `. + + The operation applied is: + + .. math:: + \text{out}_i = \text{input}_i << \text{other}_i + + Args: + input (Tensor or Scalar): the first input tensor + other (Tensor or Scalar): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-2, -2, 24], dtype=torch.int8) + """ + ... +@overload +def bitwise_left_shift(self: Union[Number, _complex], other: Tensor) -> Tensor: + r""" + bitwise_left_shift(input, other, *, out=None) -> Tensor + + Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits. + The input tensor must be of integral type. This operator supports + :ref:`broadcasting to a common shape ` and + :ref:`type promotion `. + + The operation applied is: + + .. math:: + \text{out}_i = \text{input}_i << \text{other}_i + + Args: + input (Tensor or Scalar): the first input tensor + other (Tensor or Scalar): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-2, -2, 24], dtype=torch.int8) + """ + ... +@overload +def bitwise_left_shift(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_left_shift(input, other, *, out=None) -> Tensor + + Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits. + The input tensor must be of integral type. This operator supports + :ref:`broadcasting to a common shape ` and + :ref:`type promotion `. + + The operation applied is: + + .. math:: + \text{out}_i = \text{input}_i << \text{other}_i + + Args: + input (Tensor or Scalar): the first input tensor + other (Tensor or Scalar): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-2, -2, 24], dtype=torch.int8) + """ + ... +def bitwise_not(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_not(input, *, out=None) -> Tensor + + Computes the bitwise NOT of the given input tensor. The input tensor must be of + integral or Boolean types. For bool tensors, it computes the logical NOT. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_not(torch.tensor([-1, -2, 3], dtype=torch.int8)) + tensor([ 0, 1, -4], dtype=torch.int8) + """ + ... +@overload +def bitwise_or(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_or(input, other, *, out=None) -> Tensor + + Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of + integral or Boolean types. For bool tensors, it computes the logical OR. + + Args: + input: the first input tensor + other: the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-1, -2, 3], dtype=torch.int8) + >>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ True, True, False]) + """ + ... +@overload +def bitwise_or(self: Union[Number, _complex], other: Tensor) -> Tensor: + r""" + bitwise_or(input, other, *, out=None) -> Tensor + + Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of + integral or Boolean types. For bool tensors, it computes the logical OR. + + Args: + input: the first input tensor + other: the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-1, -2, 3], dtype=torch.int8) + >>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ True, True, False]) + """ + ... +@overload +def bitwise_or(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_or(input, other, *, out=None) -> Tensor + + Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of + integral or Boolean types. For bool tensors, it computes the logical OR. + + Args: + input: the first input tensor + other: the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-1, -2, 3], dtype=torch.int8) + >>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ True, True, False]) + """ + ... +@overload +def bitwise_right_shift(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_right_shift(input, other, *, out=None) -> Tensor + + Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits. + The input tensor must be of integral type. This operator supports + :ref:`broadcasting to a common shape ` and + :ref:`type promotion `. + In any case, if the value of the right operand is negative or is greater + or equal to the number of bits in the promoted left operand, the behavior is undefined. + + The operation applied is: + + .. math:: + \text{out}_i = \text{input}_i >> \text{other}_i + + Args: + input (Tensor or Scalar): the first input tensor + other (Tensor or Scalar): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-1, -7, 3], dtype=torch.int8) + """ + ... +@overload +def bitwise_right_shift(self: Union[Number, _complex], other: Tensor) -> Tensor: + r""" + bitwise_right_shift(input, other, *, out=None) -> Tensor + + Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits. + The input tensor must be of integral type. This operator supports + :ref:`broadcasting to a common shape ` and + :ref:`type promotion `. + In any case, if the value of the right operand is negative or is greater + or equal to the number of bits in the promoted left operand, the behavior is undefined. + + The operation applied is: + + .. math:: + \text{out}_i = \text{input}_i >> \text{other}_i + + Args: + input (Tensor or Scalar): the first input tensor + other (Tensor or Scalar): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-1, -7, 3], dtype=torch.int8) + """ + ... +@overload +def bitwise_right_shift(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_right_shift(input, other, *, out=None) -> Tensor + + Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits. + The input tensor must be of integral type. This operator supports + :ref:`broadcasting to a common shape ` and + :ref:`type promotion `. + In any case, if the value of the right operand is negative or is greater + or equal to the number of bits in the promoted left operand, the behavior is undefined. + + The operation applied is: + + .. math:: + \text{out}_i = \text{input}_i >> \text{other}_i + + Args: + input (Tensor or Scalar): the first input tensor + other (Tensor or Scalar): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-1, -7, 3], dtype=torch.int8) + """ + ... +@overload +def bitwise_xor(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_xor(input, other, *, out=None) -> Tensor + + Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of + integral or Boolean types. For bool tensors, it computes the logical XOR. + + Args: + input: the first input tensor + other: the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-2, -2, 0], dtype=torch.int8) + >>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ True, False, False]) + """ + ... +@overload +def bitwise_xor(self: Union[Number, _complex], other: Tensor) -> Tensor: + r""" + bitwise_xor(input, other, *, out=None) -> Tensor + + Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of + integral or Boolean types. For bool tensors, it computes the logical XOR. + + Args: + input: the first input tensor + other: the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-2, -2, 0], dtype=torch.int8) + >>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ True, False, False]) + """ + ... +@overload +def bitwise_xor(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_xor(input, other, *, out=None) -> Tensor + + Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of + integral or Boolean types. For bool tensors, it computes the logical XOR. + + Args: + input: the first input tensor + other: the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-2, -2, 0], dtype=torch.int8) + >>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ True, False, False]) + """ + ... +@overload +def blackman_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + blackman_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Blackman window function. + + .. math:: + w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right) + + where :math:`N` is the full window size. + + The input :attr:`window_length` is a positive integer controlling the + returned window size. :attr:`periodic` flag determines whether the returned + window trims off the last duplicate value from the symmetric window and is + ready to be used as a periodic window with functions like + :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in + above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have + ``torch.blackman_window(L, periodic=True)`` equal to + ``torch.blackman_window(L + 1, periodic=False)[:-1])``. + + .. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + + Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window + """ + ... +@overload +def blackman_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + blackman_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Blackman window function. + + .. math:: + w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right) + + where :math:`N` is the full window size. + + The input :attr:`window_length` is a positive integer controlling the + returned window size. :attr:`periodic` flag determines whether the returned + window trims off the last duplicate value from the symmetric window and is + ready to be used as a periodic window with functions like + :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in + above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have + ``torch.blackman_window(L, periodic=True)`` equal to + ``torch.blackman_window(L + 1, periodic=False)[:-1])``. + + .. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + + Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window + """ + ... +def bmm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + bmm(input, mat2, *, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices stored in :attr:`input` + and :attr:`mat2`. + + :attr:`input` and :attr:`mat2` must be 3-D tensors each containing + the same number of matrices. + + If :attr:`input` is a :math:`(b \times n \times m)` tensor, :attr:`mat2` is a + :math:`(b \times m \times p)` tensor, :attr:`out` will be a + :math:`(b \times n \times p)` tensor. + + .. math:: + \text{out}_i = \text{input}_i \mathbin{@} \text{mat2}_i + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + .. note:: This function does not :ref:`broadcast `. + For broadcasting matrix products, see :func:`torch.matmul`. + + Args: + input (Tensor): the first batch of matrices to be multiplied + mat2 (Tensor): the second batch of matrices to be multiplied + + Keyword Args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> input = torch.randn(10, 3, 4) + >>> mat2 = torch.randn(10, 4, 5) + >>> res = torch.bmm(input, mat2) + >>> res.size() + torch.Size([10, 3, 5]) + """ + ... +def broadcast_to(input: Tensor, size: Sequence[Union[_int, SymInt]]) -> Tensor: + r""" + broadcast_to(input, shape) -> Tensor + + Broadcasts :attr:`input` to the shape :attr:`\shape`. + Equivalent to calling ``input.expand(shape)``. See :meth:`~Tensor.expand` for details. + + Args: + input (Tensor): the input tensor. + shape (list, tuple, or :class:`torch.Size`): the new shape. + + Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> torch.broadcast_to(x, (3, 3)) + tensor([[1, 2, 3], + [1, 2, 3], + [1, 2, 3]]) + """ + ... +@overload +def bucketize(input: Tensor, boundaries: Tensor, *, out_int32: _bool = False, right: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + bucketize(input, boundaries, *, out_int32=False, right=False, out=None) -> Tensor + + Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the + boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size + as :attr:`input`. If :attr:`right` is False (default), then the left boundary is open. Note that + this behavior is opposite the behavior of + `numpy.digitize `_. + More formally, the returned index satisfies the following rules: + + .. list-table:: + :widths: 15 85 + :header-rows: 1 + + * - :attr:`right` + - *returned index satisfies* + * - False + - ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]`` + * - True + - ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]`` + + Args: + input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s). + boundaries (Tensor): 1-D tensor, must contain a strictly increasing sequence, or the return value is undefined. + + Keyword args: + out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise. + Default value is False, i.e. default output data type is torch.int64. + right (bool, optional): if False, return the first suitable location that is found. If True, return the + last such index. If no suitable index found, return 0 for non-numerical value + (eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index). + In other words, if False, gets the lower bound index for each value in :attr:`input` + from :attr:`boundaries`. If True, gets the upper bound index instead. + Default value is False. + out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided. + + + Example:: + + >>> boundaries = torch.tensor([1, 3, 5, 7, 9]) + >>> boundaries + tensor([1, 3, 5, 7, 9]) + >>> v = torch.tensor([[3, 6, 9], [3, 6, 9]]) + >>> v + tensor([[3, 6, 9], + [3, 6, 9]]) + >>> torch.bucketize(v, boundaries) + tensor([[1, 3, 4], + [1, 3, 4]]) + >>> torch.bucketize(v, boundaries, right=True) + tensor([[2, 3, 5], + [2, 3, 5]]) + """ + ... +@overload +def bucketize(self: Union[Number, _complex], boundaries: Tensor, *, out_int32: _bool = False, right: _bool = False) -> Tensor: + r""" + bucketize(input, boundaries, *, out_int32=False, right=False, out=None) -> Tensor + + Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the + boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size + as :attr:`input`. If :attr:`right` is False (default), then the left boundary is open. Note that + this behavior is opposite the behavior of + `numpy.digitize `_. + More formally, the returned index satisfies the following rules: + + .. list-table:: + :widths: 15 85 + :header-rows: 1 + + * - :attr:`right` + - *returned index satisfies* + * - False + - ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]`` + * - True + - ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]`` + + Args: + input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s). + boundaries (Tensor): 1-D tensor, must contain a strictly increasing sequence, or the return value is undefined. + + Keyword args: + out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise. + Default value is False, i.e. default output data type is torch.int64. + right (bool, optional): if False, return the first suitable location that is found. If True, return the + last such index. If no suitable index found, return 0 for non-numerical value + (eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index). + In other words, if False, gets the lower bound index for each value in :attr:`input` + from :attr:`boundaries`. If True, gets the upper bound index instead. + Default value is False. + out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided. + + + Example:: + + >>> boundaries = torch.tensor([1, 3, 5, 7, 9]) + >>> boundaries + tensor([1, 3, 5, 7, 9]) + >>> v = torch.tensor([[3, 6, 9], [3, 6, 9]]) + >>> v + tensor([[3, 6, 9], + [3, 6, 9]]) + >>> torch.bucketize(v, boundaries) + tensor([[1, 3, 4], + [1, 3, 4]]) + >>> torch.bucketize(v, boundaries, right=True) + tensor([[2, 3, 5], + [2, 3, 5]]) + """ + ... +def can_cast(from_: _dtype, to: _dtype) -> _bool: + r""" + can_cast(from, to) -> bool + + Determines if a type conversion is allowed under PyTorch casting rules + described in the type promotion :ref:`documentation `. + + Args: + from (dtype): The original :class:`torch.dtype`. + to (dtype): The target :class:`torch.dtype`. + + Example:: + + >>> torch.can_cast(torch.double, torch.float) + True + >>> torch.can_cast(torch.float, torch.int) + False + """ + ... +@overload +def cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: + r""" + cat(tensors, dim=0, *, out=None) -> Tensor + + Concatenates the given sequence of :attr:`seq` tensors in the given dimension. + All tensors must either have the same shape (except in the concatenating + dimension) or be a 1-D empty tensor with size ``(0,)``. + + :func:`torch.cat` can be seen as an inverse operation for :func:`torch.split` + and :func:`torch.chunk`. + + :func:`torch.cat` can be best understood via examples. + + .. seealso:: + + :func:`torch.stack` concatenates the given sequence along a new dimension. + + Args: + tensors (sequence of Tensors): any python sequence of tensors of the same type. + Non-empty tensors provided must have the same shape, except in the + cat dimension. + dim (int, optional): the dimension over which the tensors are concatenated + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> x = torch.randn(2, 3) + >>> x + tensor([[ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497]]) + >>> torch.cat((x, x, x), 0) + tensor([[ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497], + [ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497], + [ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497]]) + >>> torch.cat((x, x, x), 1) + tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580, + -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034, + -0.5790, 0.1497]]) + """ + ... +@overload +def cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor: + r""" + cat(tensors, dim=0, *, out=None) -> Tensor + + Concatenates the given sequence of :attr:`seq` tensors in the given dimension. + All tensors must either have the same shape (except in the concatenating + dimension) or be a 1-D empty tensor with size ``(0,)``. + + :func:`torch.cat` can be seen as an inverse operation for :func:`torch.split` + and :func:`torch.chunk`. + + :func:`torch.cat` can be best understood via examples. + + .. seealso:: + + :func:`torch.stack` concatenates the given sequence along a new dimension. + + Args: + tensors (sequence of Tensors): any python sequence of tensors of the same type. + Non-empty tensors provided must have the same shape, except in the + cat dimension. + dim (int, optional): the dimension over which the tensors are concatenated + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> x = torch.randn(2, 3) + >>> x + tensor([[ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497]]) + >>> torch.cat((x, x, x), 0) + tensor([[ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497], + [ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497], + [ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497]]) + >>> torch.cat((x, x, x), 1) + tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580, + -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034, + -0.5790, 0.1497]]) + """ + ... +def ccol_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def ceil(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + ceil(input, *, out=None) -> Tensor + + Returns a new tensor with the ceil of the elements of :attr:`input`, + the smallest integer greater than or equal to each element. + + For integer inputs, follows the array-api convention of returning a + copy of the input tensor. + + .. math:: + \text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.6341, -1.4208, -1.0900, 0.5826]) + >>> torch.ceil(a) + tensor([-0., -1., -1., 1.]) + """ + ... +def ceil_(input: Tensor) -> Tensor: ... +def celu(input: Tensor, alpha: Union[Number, _complex] = 1.0) -> Tensor: ... +def celu_(input: Tensor, alpha: Union[Number, _complex] = 1.0) -> Tensor: ... +def channel_shuffle(input: Tensor, groups: Union[_int, SymInt]) -> Tensor: ... +def cholesky(input: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + cholesky(input, upper=False, *, out=None) -> Tensor + + Computes the Cholesky decomposition of a symmetric positive-definite + matrix :math:`A` or for batches of symmetric positive-definite matrices. + + If :attr:`upper` is ``True``, the returned matrix ``U`` is upper-triangular, and + the decomposition has the form: + + .. math:: + + A = U^TU + + If :attr:`upper` is ``False``, the returned matrix ``L`` is lower-triangular, and + the decomposition has the form: + + .. math:: + + A = LL^T + + If :attr:`upper` is ``True``, and :math:`A` is a batch of symmetric positive-definite + matrices, then the returned tensor will be composed of upper-triangular Cholesky factors + of each of the individual matrices. Similarly, when :attr:`upper` is ``False``, the returned + tensor will be composed of lower-triangular Cholesky factors of each of the individual + matrices. + + .. warning:: + + :func:`torch.cholesky` is deprecated in favor of :func:`torch.linalg.cholesky` + and will be removed in a future PyTorch release. + + ``L = torch.cholesky(A)`` should be replaced with + + .. code:: python + + L = torch.linalg.cholesky(A) + + ``U = torch.cholesky(A, upper=True)`` should be replaced with + + .. code:: python + + U = torch.linalg.cholesky(A).mH + + This transform will produce equivalent results for all valid (symmetric positive definite) inputs. + + Args: + input (Tensor): the input tensor :math:`A` of size :math:`(*, n, n)` where `*` is zero or more + batch dimensions consisting of symmetric positive-definite matrices. + upper (bool, optional): flag that indicates whether to return a + upper or lower triangular matrix. Default: ``False`` + + Keyword args: + out (Tensor, optional): the output matrix + + Example:: + + >>> a = torch.randn(3, 3) + >>> a = a @ a.mT + 1e-3 # make symmetric positive-definite + >>> l = torch.cholesky(a) + >>> a + tensor([[ 2.4112, -0.7486, 1.4551], + [-0.7486, 1.3544, 0.1294], + [ 1.4551, 0.1294, 1.6724]]) + >>> l + tensor([[ 1.5528, 0.0000, 0.0000], + [-0.4821, 1.0592, 0.0000], + [ 0.9371, 0.5487, 0.7023]]) + >>> l @ l.mT + tensor([[ 2.4112, -0.7486, 1.4551], + [-0.7486, 1.3544, 0.1294], + [ 1.4551, 0.1294, 1.6724]]) + >>> a = torch.randn(3, 2, 2) # Example for batched input + >>> a = a @ a.mT + 1e-03 # make symmetric positive-definite + >>> l = torch.cholesky(a) + >>> z = l @ l.mT + >>> torch.dist(z, a) + tensor(2.3842e-07) + """ + ... +def cholesky_inverse(input: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + cholesky_inverse(L, upper=False, *, out=None) -> Tensor + + Computes the inverse of a complex Hermitian or real symmetric + positive-definite matrix given its Cholesky decomposition. + + Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix, + and :math:`L` its Cholesky decomposition such that: + + .. math:: + + A = LL^{\text{H}} + + where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex, + and the transpose when :math:`L` is real-valued. + + Computes the inverse matrix :math:`A^{-1}`. + + Supports input of float, double, cfloat and cdouble dtypes. + Also supports batches of matrices, and if :math:`A` is a batch of matrices + then the output has the same batch dimensions. + + Args: + L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions + consisting of lower or upper triangular Cholesky decompositions of + symmetric or Hermitian positive-definite matrices. + upper (bool, optional): flag that indicates whether :math:`L` is lower triangular + or upper triangular. Default: ``False`` + + Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + + Example:: + + >>> A = torch.randn(3, 3) + >>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix + >>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition + >>> torch.cholesky_inverse(L) + tensor([[ 1.9314, 1.2251, -0.0889], + [ 1.2251, 2.4439, 0.2122], + [-0.0889, 0.2122, 0.1412]]) + >>> A.inverse() + tensor([[ 1.9314, 1.2251, -0.0889], + [ 1.2251, 2.4439, 0.2122], + [-0.0889, 0.2122, 0.1412]]) + + >>> A = torch.randn(3, 2, 2, dtype=torch.complex64) + >>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices + >>> L = torch.linalg.cholesky(A) + >>> torch.dist(torch.inverse(A), torch.cholesky_inverse(L)) + tensor(5.6358e-7) + """ + ... +def cholesky_solve(input: Tensor, input2: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + cholesky_solve(B, L, upper=False, *, out=None) -> Tensor + + Computes the solution of a system of linear equations with complex Hermitian + or real symmetric positive-definite lhs given its Cholesky decomposition. + + Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix, + and :math:`L` its Cholesky decomposition such that: + + .. math:: + + A = LL^{\text{H}} + + where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex, + and the transpose when :math:`L` is real-valued. + + Returns the solution :math:`X` of the following linear system: + + .. math:: + + AX = B + + Supports inputs of float, double, cfloat and cdouble dtypes. + Also supports batches of matrices, and if :math:`A` or :math:`B` is a batch of matrices + then the output has the same batch dimensions. + + Args: + B (Tensor): right-hand side tensor of shape `(*, n, k)` + where :math:`*` is zero or more batch dimensions + L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions + consisting of lower or upper triangular Cholesky decompositions of + symmetric or Hermitian positive-definite matrices. + upper (bool, optional): flag that indicates whether :math:`L` is lower triangular + or upper triangular. Default: ``False``. + + Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + + Example:: + + >>> A = torch.randn(3, 3) + >>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix + >>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition + >>> B = torch.randn(3, 2) + >>> torch.cholesky_solve(B, L) + tensor([[ -8.1625, 19.6097], + [ -5.8398, 14.2387], + [ -4.3771, 10.4173]]) + >>> A.inverse() @ B + tensor([[ -8.1626, 19.6097], + [ -5.8398, 14.2387], + [ -4.3771, 10.4173]]) + + >>> A = torch.randn(3, 2, 2, dtype=torch.complex64) + >>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices + >>> L = torch.linalg.cholesky(A) + >>> B = torch.randn(2, 1, dtype=torch.complex64) + >>> X = torch.cholesky_solve(B, L) + >>> torch.dist(X, A.inverse() @ B) + tensor(1.6881e-5) + """ + ... +def choose_qparams_optimized(input: Tensor, numel: _int, n_bins: _int, ratio: _float, bit_width: _int) -> Tuple[Tensor, Tensor]: ... +def chunk(input: Tensor, chunks: _int, dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + chunk(input, chunks, dim=0) -> List of Tensors + + Attempts to split a tensor into the specified number of chunks. Each chunk is a view of + the input tensor. + + + .. note:: + + This function may return fewer than the specified number of chunks! + + .. seealso:: + + :func:`torch.tensor_split` a function that always returns exactly the specified number of chunks + + If the tensor size along the given dimension :attr:`dim` is divisible by :attr:`chunks`, + all returned chunks will be the same size. + If the tensor size along the given dimension :attr:`dim` is not divisible by :attr:`chunks`, + all returned chunks will be the same size, except the last one. + If such division is not possible, this function may return fewer + than the specified number of chunks. + + Arguments: + input (Tensor): the tensor to split + chunks (int): number of chunks to return + dim (int): dimension along which to split the tensor + + Example: + >>> torch.arange(11).chunk(6) + (tensor([0, 1]), + tensor([2, 3]), + tensor([4, 5]), + tensor([6, 7]), + tensor([8, 9]), + tensor([10])) + >>> torch.arange(12).chunk(6) + (tensor([0, 1]), + tensor([2, 3]), + tensor([4, 5]), + tensor([6, 7]), + tensor([8, 9]), + tensor([10, 11])) + >>> torch.arange(13).chunk(6) + (tensor([0, 1, 2]), + tensor([3, 4, 5]), + tensor([6, 7, 8]), + tensor([ 9, 10, 11]), + tensor([12])) + """ + ... +@overload +def clamp(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + clamp(input, min=None, max=None, *, out=None) -> Tensor + + Clamps all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]`. + Letting min_value and max_value be :attr:`min` and :attr:`max`, respectively, this returns: + + .. math:: + y_i = \min(\max(x_i, \text{min\_value}_i), \text{max\_value}_i) + + If :attr:`min` is ``None``, there is no lower bound. + Or, if :attr:`max` is ``None`` there is no upper bound. + + + .. note:: + If :attr:`min` is greater than :attr:`max` :func:`torch.clamp(..., min, max) ` + sets all elements in :attr:`input` to the value of :attr:`max`. + + Args: + input (Tensor): the input tensor. + min (Number or Tensor, optional): lower-bound of the range to be clamped to + max (Number or Tensor, optional): upper-bound of the range to be clamped to + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-1.7120, 0.1734, -0.0478, -0.0922]) + >>> torch.clamp(a, min=-0.5, max=0.5) + tensor([-0.5000, 0.1734, -0.0478, -0.0922]) + + >>> min = torch.linspace(-1, 1, steps=4) + >>> torch.clamp(a, min=min) + tensor([-1.0000, 0.1734, 0.3333, 1.0000]) + """ + ... +@overload +def clamp(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + clamp(input, min=None, max=None, *, out=None) -> Tensor + + Clamps all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]`. + Letting min_value and max_value be :attr:`min` and :attr:`max`, respectively, this returns: + + .. math:: + y_i = \min(\max(x_i, \text{min\_value}_i), \text{max\_value}_i) + + If :attr:`min` is ``None``, there is no lower bound. + Or, if :attr:`max` is ``None`` there is no upper bound. + + + .. note:: + If :attr:`min` is greater than :attr:`max` :func:`torch.clamp(..., min, max) ` + sets all elements in :attr:`input` to the value of :attr:`max`. + + Args: + input (Tensor): the input tensor. + min (Number or Tensor, optional): lower-bound of the range to be clamped to + max (Number or Tensor, optional): upper-bound of the range to be clamped to + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-1.7120, 0.1734, -0.0478, -0.0922]) + >>> torch.clamp(a, min=-0.5, max=0.5) + tensor([-0.5000, 0.1734, -0.0478, -0.0922]) + + >>> min = torch.linspace(-1, 1, steps=4) + >>> torch.clamp(a, min=min) + tensor([-1.0000, 0.1734, 0.3333, 1.0000]) + """ + ... +@overload +def clamp_(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: ... +@overload +def clamp_(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: ... +@overload +def clamp_max(input: Tensor, max: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def clamp_max(input: Tensor, max: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def clamp_max_(input: Tensor, max: Tensor) -> Tensor: ... +@overload +def clamp_max_(input: Tensor, max: Union[Number, _complex]) -> Tensor: ... +@overload +def clamp_min(input: Tensor, min: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def clamp_min(input: Tensor, min: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def clamp_min_(input: Tensor, min: Tensor) -> Tensor: ... +@overload +def clamp_min_(input: Tensor, min: Union[Number, _complex]) -> Tensor: ... +@overload +def clip(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + clip(input, min=None, max=None, *, out=None) -> Tensor + + Alias for :func:`torch.clamp`. + """ + ... +@overload +def clip(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + clip(input, min=None, max=None, *, out=None) -> Tensor + + Alias for :func:`torch.clamp`. + """ + ... +@overload +def clip_(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: ... +@overload +def clip_(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: ... +def clone(input: Tensor, *, memory_format: Optional[memory_format] = None) -> Tensor: + r""" + clone(input, *, memory_format=torch.preserve_format) -> Tensor + + Returns a copy of :attr:`input`. + + .. note:: + + This function is differentiable, so gradients will flow back from the + result of this operation to :attr:`input`. To create a tensor without an + autograd relationship to :attr:`input` see :meth:`~Tensor.detach`. + + Args: + input (Tensor): the input tensor. + + Keyword args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned tensor. Default: ``torch.preserve_format``. + """ + ... +def col_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.col_indices`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def column_stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor: + r""" + column_stack(tensors, *, out=None) -> Tensor + + Creates a new tensor by horizontally stacking the tensors in :attr:`tensors`. + + Equivalent to ``torch.hstack(tensors)``, except each zero or one dimensional tensor ``t`` + in :attr:`tensors` is first reshaped into a ``(t.numel(), 1)`` column before being stacked horizontally. + + Args: + tensors (sequence of Tensors): sequence of tensors to concatenate + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([1, 2, 3]) + >>> b = torch.tensor([4, 5, 6]) + >>> torch.column_stack((a, b)) + tensor([[1, 4], + [2, 5], + [3, 6]]) + >>> a = torch.arange(5) + >>> b = torch.arange(10).reshape(5, 2) + >>> torch.column_stack((a, b, b)) + tensor([[0, 0, 1, 0, 1], + [1, 2, 3, 2, 3], + [2, 4, 5, 4, 5], + [3, 6, 7, 6, 7], + [4, 8, 9, 8, 9]]) + """ + ... +def combinations(input: Tensor, r: _int = 2, with_replacement: _bool = False) -> Tensor: + r""" + combinations(input, r=2, with_replacement=False) -> seq + + Compute combinations of length :math:`r` of the given tensor. The behavior is similar to + python's `itertools.combinations` when `with_replacement` is set to `False`, and + `itertools.combinations_with_replacement` when `with_replacement` is set to `True`. + + Arguments: + input (Tensor): 1D vector. + r (int, optional): number of elements to combine + with_replacement (bool, optional): whether to allow duplication in combination + + Returns: + Tensor: A tensor equivalent to converting all the input tensors into lists, do + `itertools.combinations` or `itertools.combinations_with_replacement` on these + lists, and finally convert the resulting list into tensor. + + Example:: + + >>> a = [1, 2, 3] + >>> list(itertools.combinations(a, r=2)) + [(1, 2), (1, 3), (2, 3)] + >>> list(itertools.combinations(a, r=3)) + [(1, 2, 3)] + >>> list(itertools.combinations_with_replacement(a, r=2)) + [(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)] + >>> tensor_a = torch.tensor(a) + >>> torch.combinations(tensor_a) + tensor([[1, 2], + [1, 3], + [2, 3]]) + >>> torch.combinations(tensor_a, r=3) + tensor([[1, 2, 3]]) + >>> torch.combinations(tensor_a, with_replacement=True) + tensor([[1, 1], + [1, 2], + [1, 3], + [2, 2], + [2, 3], + [3, 3]]) + """ + ... +def complex(real: Tensor, imag: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + complex(real, imag, *, out=None) -> Tensor + + Constructs a complex tensor with its real part equal to :attr:`real` and its + imaginary part equal to :attr:`imag`. + + Args: + real (Tensor): The real part of the complex tensor. Must be half, float or double. + imag (Tensor): The imaginary part of the complex tensor. Must be same dtype + as :attr:`real`. + + Keyword args: + out (Tensor): If the inputs are ``torch.float32``, must be + ``torch.complex64``. If the inputs are ``torch.float64``, must be + ``torch.complex128``. + + Example:: + + >>> real = torch.tensor([1, 2], dtype=torch.float32) + >>> imag = torch.tensor([3, 4], dtype=torch.float32) + >>> z = torch.complex(real, imag) + >>> z + tensor([(1.+3.j), (2.+4.j)]) + >>> z.dtype + torch.complex64 + """ + ... +@overload +def concat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: + r""" + concat(tensors, dim=0, *, out=None) -> Tensor + + Alias of :func:`torch.cat`. + """ + ... +@overload +def concat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor: + r""" + concat(tensors, dim=0, *, out=None) -> Tensor + + Alias of :func:`torch.cat`. + """ + ... +@overload +def concatenate(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: + r""" + concatenate(tensors, axis=0, out=None) -> Tensor + + Alias of :func:`torch.cat`. + """ + ... +@overload +def concatenate(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor: + r""" + concatenate(tensors, axis=0, out=None) -> Tensor + + Alias of :func:`torch.cat`. + """ + ... +def conj(input: Tensor) -> Tensor: + r""" + conj(input) -> Tensor + + Returns a view of :attr:`input` with a flipped conjugate bit. If :attr:`input` has a non-complex dtype, + this function just returns :attr:`input`. + + .. note:: + :func:`torch.conj` performs a lazy conjugation, but the actual conjugated tensor can be materialized + at any time using :func:`torch.resolve_conj`. + + .. warning:: In the future, :func:`torch.conj` may return a non-writeable view for an :attr:`input` of + non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical` + when :attr:`input` is of non-complex dtype to be compatible with this change. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]) + >>> x.is_conj() + False + >>> y = torch.conj(x) + >>> y.is_conj() + True + """ + ... +def conj_physical(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + conj_physical(input, *, out=None) -> Tensor + + Computes the element-wise conjugate of the given :attr:`input` tensor. + If :attr:`input` has a non-complex dtype, this function just returns :attr:`input`. + + .. note:: + This performs the conjugate operation regardless of the fact conjugate bit is set or not. + + .. warning:: In the future, :func:`torch.conj_physical` may return a non-writeable view for an :attr:`input` of + non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical` + when :attr:`input` is of non-complex dtype to be compatible with this change. + + .. math:: + \text{out}_{i} = conj(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.conj_physical(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])) + tensor([-1 - 1j, -2 - 2j, 3 + 3j]) + """ + ... +def conj_physical_(input: Tensor) -> Tensor: ... +def constant_pad_nd(input: Tensor, pad: Sequence[Union[_int, SymInt]], value: Union[Number, _complex] = 0) -> Tensor: ... +@overload +def conv1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ... +@overload +def conv1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ... +@overload +def conv2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ... +@overload +def conv2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ... +@overload +def conv3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ... +@overload +def conv3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ... +def conv_tbc(input: Tensor, weight: Tensor, bias: Tensor, pad: _int = 0) -> Tensor: ... +def conv_transpose1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ... +def conv_transpose2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ... +def conv_transpose3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ... +def convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +@overload +def copysign(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + copysign(input, other, *, out=None) -> Tensor + + Create a new floating-point tensor with the magnitude of :attr:`input` and the sign of :attr:`other`, elementwise. + + .. math:: + \text{out}_{i} = \begin{cases} + -|\text{input}_{i}| & \text{if } \text{other}_{i} \leq -0.0 \\ + |\text{input}_{i}| & \text{if } \text{other}_{i} \geq 0.0 \\ + \end{cases} + + + Supports :ref:`broadcasting to a common shape `, + and integer and float inputs. + + Args: + input (Tensor): magnitudes. + other (Tensor or Number): contains value(s) whose signbit(s) are + applied to the magnitudes in :attr:`input`. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(5) + >>> a + tensor([-1.2557, -0.0026, -0.5387, 0.4740, -0.9244]) + >>> torch.copysign(a, 1) + tensor([1.2557, 0.0026, 0.5387, 0.4740, 0.9244]) + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.7079, 0.2778, -1.0249, 0.5719], + [-0.0059, -0.2600, -0.4475, -1.3948], + [ 0.3667, -0.9567, -2.5757, -0.1751], + [ 0.2046, -0.0742, 0.2998, -0.1054]]) + >>> b = torch.randn(4) + tensor([ 0.2373, 0.3120, 0.3190, -1.1128]) + >>> torch.copysign(a, b) + tensor([[ 0.7079, 0.2778, 1.0249, -0.5719], + [ 0.0059, 0.2600, 0.4475, -1.3948], + [ 0.3667, 0.9567, 2.5757, -0.1751], + [ 0.2046, 0.0742, 0.2998, -0.1054]]) + >>> a = torch.tensor([1.]) + >>> b = torch.tensor([-0.]) + >>> torch.copysign(a, b) + tensor([-1.]) + + .. note:: + copysign handles signed zeros. If the other argument has a negative zero (-0), + the corresponding output value will be negative. + """ + ... +@overload +def copysign(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + copysign(input, other, *, out=None) -> Tensor + + Create a new floating-point tensor with the magnitude of :attr:`input` and the sign of :attr:`other`, elementwise. + + .. math:: + \text{out}_{i} = \begin{cases} + -|\text{input}_{i}| & \text{if } \text{other}_{i} \leq -0.0 \\ + |\text{input}_{i}| & \text{if } \text{other}_{i} \geq 0.0 \\ + \end{cases} + + + Supports :ref:`broadcasting to a common shape `, + and integer and float inputs. + + Args: + input (Tensor): magnitudes. + other (Tensor or Number): contains value(s) whose signbit(s) are + applied to the magnitudes in :attr:`input`. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(5) + >>> a + tensor([-1.2557, -0.0026, -0.5387, 0.4740, -0.9244]) + >>> torch.copysign(a, 1) + tensor([1.2557, 0.0026, 0.5387, 0.4740, 0.9244]) + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.7079, 0.2778, -1.0249, 0.5719], + [-0.0059, -0.2600, -0.4475, -1.3948], + [ 0.3667, -0.9567, -2.5757, -0.1751], + [ 0.2046, -0.0742, 0.2998, -0.1054]]) + >>> b = torch.randn(4) + tensor([ 0.2373, 0.3120, 0.3190, -1.1128]) + >>> torch.copysign(a, b) + tensor([[ 0.7079, 0.2778, 1.0249, -0.5719], + [ 0.0059, 0.2600, 0.4475, -1.3948], + [ 0.3667, 0.9567, 2.5757, -0.1751], + [ 0.2046, 0.0742, 0.2998, -0.1054]]) + >>> a = torch.tensor([1.]) + >>> b = torch.tensor([-0.]) + >>> torch.copysign(a, b) + tensor([-1.]) + + .. note:: + copysign handles signed zeros. If the other argument has a negative zero (-0), + the corresponding output value will be negative. + """ + ... +def corrcoef(input: Tensor) -> Tensor: + r""" + corrcoef(input) -> Tensor + + Estimates the Pearson product-moment correlation coefficient matrix of the variables given by the :attr:`input` matrix, + where rows are the variables and columns are the observations. + + .. note:: + + The correlation coefficient matrix R is computed using the covariance matrix C as given by + :math:`R_{ij} = \frac{ C_{ij} } { \sqrt{ C_{ii} * C_{jj} } }` + + .. note:: + + Due to floating point rounding, the resulting array may not be Hermitian and its diagonal elements may not be 1. + The real and imaginary values are clipped to the interval [-1, 1] in an attempt to improve this situation. + + Args: + input (Tensor): A 2D matrix containing multiple variables and observations, or a + Scalar or 1D vector representing a single variable. + + Returns: + (Tensor) The correlation coefficient matrix of the variables. + + .. seealso:: + + :func:`torch.cov` covariance matrix. + + Example:: + + >>> x = torch.tensor([[0, 1, 2], [2, 1, 0]]) + >>> torch.corrcoef(x) + tensor([[ 1., -1.], + [-1., 1.]]) + >>> x = torch.randn(2, 4) + >>> x + tensor([[-0.2678, -0.0908, -0.3766, 0.2780], + [-0.5812, 0.1535, 0.2387, 0.2350]]) + >>> torch.corrcoef(x) + tensor([[1.0000, 0.3582], + [0.3582, 1.0000]]) + >>> torch.corrcoef(x[0]) + tensor(1.) + """ + ... +def cos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + cos(input, *, out=None) -> Tensor + + Returns a new tensor with the cosine of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \cos(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 1.4309, 1.2706, -0.8562, 0.9796]) + >>> torch.cos(a) + tensor([ 0.1395, 0.2957, 0.6553, 0.5574]) + """ + ... +def cos_(input: Tensor) -> Tensor: ... +def cosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + cosh(input, *, out=None) -> Tensor + + Returns a new tensor with the hyperbolic cosine of the elements of + :attr:`input`. + + .. math:: + \text{out}_{i} = \cosh(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.1632, 1.1835, -0.6979, -0.7325]) + >>> torch.cosh(a) + tensor([ 1.0133, 1.7860, 1.2536, 1.2805]) + + .. note:: + When :attr:`input` is on the CPU, the implementation of torch.cosh may use + the Sleef library, which rounds very large results to infinity or negative + infinity. See `here `_ for details. + """ + ... +def cosh_(input: Tensor) -> Tensor: ... +def cosine_embedding_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: _float = 0.0, reduction: _int = 1) -> Tensor: ... +def cosine_similarity(x1: Tensor, x2: Tensor, dim: _int = 1, eps: _float = 1e-08) -> Tensor: ... +@overload +def count_nonzero(input: Tensor, dim: Optional[_int] = None) -> Tensor: + r""" + count_nonzero(input, dim=None) -> Tensor + + Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`. + If no dim is specified then all non-zeros in the tensor are counted. + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros. + + Example:: + + >>> x = torch.zeros(3,3) + >>> x[torch.randn(3,3) > 0.5] = 1 + >>> x + tensor([[0., 1., 1.], + [0., 0., 0.], + [0., 0., 1.]]) + >>> torch.count_nonzero(x) + tensor(3) + >>> torch.count_nonzero(x, dim=0) + tensor([0, 1, 2]) + """ + ... +@overload +def count_nonzero(input: Tensor, dim: _size) -> Tensor: + r""" + count_nonzero(input, dim=None) -> Tensor + + Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`. + If no dim is specified then all non-zeros in the tensor are counted. + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros. + + Example:: + + >>> x = torch.zeros(3,3) + >>> x[torch.randn(3,3) > 0.5] = 1 + >>> x + tensor([[0., 1., 1.], + [0., 0., 0.], + [0., 0., 1.]]) + >>> torch.count_nonzero(x) + tensor(3) + >>> torch.count_nonzero(x, dim=0) + tensor([0, 1, 2]) + """ + ... +def cov(input: Tensor, *, correction: _int = 1, fweights: Optional[Tensor] = None, aweights: Optional[Tensor] = None) -> Tensor: + r""" + cov(input, *, correction=1, fweights=None, aweights=None) -> Tensor + + Estimates the covariance matrix of the variables given by the :attr:`input` matrix, where rows are + the variables and columns are the observations. + + A covariance matrix is a square matrix giving the covariance of each pair of variables. The diagonal contains + the variance of each variable (covariance of a variable with itself). By definition, if :attr:`input` represents + a single variable (Scalar or 1D) then its variance is returned. + + The sample covariance of the variables :math:`x` and :math:`y` is given by: + + .. math:: + \text{cov}(x,y) = \frac{\sum^{N}_{i = 1}(x_{i} - \bar{x})(y_{i} - \bar{y})}{\max(0,~N~-~\delta N)} + + where :math:`\bar{x}` and :math:`\bar{y}` are the simple means of the :math:`x` and :math:`y` respectively, and + :math:`\delta N` is the :attr:`correction`. + + If :attr:`fweights` and/or :attr:`aweights` are provided, the weighted covariance + is calculated, which is given by: + + .. math:: + \text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}w_i(x_{i} - \mu_x^*)(y_{i} - \mu_y^*)} + {\max(0,~\sum^{N}_{i = 1}w_i~-~\frac{\sum^{N}_{i = 1}w_ia_i}{\sum^{N}_{i = 1}w_i}~\delta N)} + + where :math:`w` denotes :attr:`fweights` or :attr:`aweights` (``f`` and ``a`` for brevity) based on whichever is + provided, or :math:`w = f \times a` if both are provided, and + :math:`\mu_x^* = \frac{\sum^{N}_{i = 1}w_ix_{i} }{\sum^{N}_{i = 1}w_i}` is the weighted mean of the variable. If not + provided, ``f`` and/or ``a`` can be seen as a :math:`\mathbb{1}` vector of appropriate size. + + Args: + input (Tensor): A 2D matrix containing multiple variables and observations, or a + Scalar or 1D vector representing a single variable. + + Keyword Args: + correction (int, optional): difference between the sample size and sample degrees of freedom. + Defaults to Bessel's correction, ``correction = 1`` which returns the unbiased estimate, + even if both :attr:`fweights` and :attr:`aweights` are specified. ``correction = 0`` + will return the simple average. Defaults to ``1``. + fweights (tensor, optional): A Scalar or 1D tensor of observation vector frequencies representing the number of + times each observation should be repeated. Its numel must equal the number of columns of :attr:`input`. + Must have integral dtype. Ignored if ``None``. Defaults to ``None``. + aweights (tensor, optional): A Scalar or 1D array of observation vector weights. + These relative weights are typically large for observations considered “important” and smaller for + observations considered less “important”. Its numel must equal the number of columns of :attr:`input`. + Must have floating point dtype. Ignored if ``None``. Defaults to ``None``. + + Returns: + (Tensor) The covariance matrix of the variables. + + .. seealso:: + + :func:`torch.corrcoef` normalized covariance matrix. + + Example:: + >>> x = torch.tensor([[0, 2], [1, 1], [2, 0]]).T + >>> x + tensor([[0, 1, 2], + [2, 1, 0]]) + >>> torch.cov(x) + tensor([[ 1., -1.], + [-1., 1.]]) + >>> torch.cov(x, correction=0) + tensor([[ 0.6667, -0.6667], + [-0.6667, 0.6667]]) + >>> fw = torch.randint(1, 10, (3,)) + >>> fw + tensor([1, 6, 9]) + >>> aw = torch.rand(3) + >>> aw + tensor([0.4282, 0.0255, 0.4144]) + >>> torch.cov(x, fweights=fw, aweights=aw) + tensor([[ 0.4169, -0.4169], + [-0.4169, 0.4169]]) + """ + ... +def cross(input: Tensor, other: Tensor, dim: Optional[_int] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + cross(input, other, dim=None, *, out=None) -> Tensor + + + Returns the cross product of vectors in dimension :attr:`dim` of :attr:`input` + and :attr:`other`. + + Supports input of float, double, cfloat and cdouble dtypes. Also supports batches + of vectors, for which it computes the product along the dimension :attr:`dim`. + In this case, the output has the same batch dimensions as the inputs. + + .. warning:: + If :attr:`dim` is not given, it defaults to the first dimension found + with the size 3. Note that this might be unexpected. + + This behavior is deprecated and will be changed to match that of :func:`torch.linalg.cross` + in a future release. + + .. seealso:: + :func:`torch.linalg.cross` which has dim=-1 as default. + + + Args: + input (Tensor): the input tensor. + other (Tensor): the second input tensor + dim (int, optional): the dimension to take the cross-product in. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4, 3) + >>> a + tensor([[-0.3956, 1.1455, 1.6895], + [-0.5849, 1.3672, 0.3599], + [-1.1626, 0.7180, -0.0521], + [-0.1339, 0.9902, -2.0225]]) + >>> b = torch.randn(4, 3) + >>> b + tensor([[-0.0257, -1.4725, -1.2251], + [-1.1479, -0.7005, -1.9757], + [-1.3904, 0.3726, -1.1836], + [-0.9688, -0.7153, 0.2159]]) + >>> torch.cross(a, b, dim=1) + tensor([[ 1.0844, -0.5281, 0.6120], + [-2.4490, -1.5687, 1.9792], + [-0.8304, -1.3037, 0.5650], + [-1.2329, 1.9883, 1.0551]]) + >>> torch.cross(a, b) + tensor([[ 1.0844, -0.5281, 0.6120], + [-2.4490, -1.5687, 1.9792], + [-0.8304, -1.3037, 0.5650], + [-1.2329, 1.9883, 1.0551]]) + """ + ... +def crow_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.crow_indices`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +@overload +def ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int = 0, reduction: _int = 1, zero_infinity: _bool = False) -> Tensor: ... +@overload +def ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int = 0, reduction: _int = 1, zero_infinity: _bool = False) -> Tensor: ... +def cudnn_affine_grid_generator(theta: Tensor, N: _int, C: _int, H: _int, W: _int) -> Tensor: ... +def cudnn_batch_norm(input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, exponential_average_factor: _float, epsilon: _float) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +def cudnn_convolution(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, allow_tf32: _bool, *, out: Optional[Tensor] = None) -> Tensor: ... +def cudnn_convolution_add_relu(input: Tensor, weight: Tensor, z: Tensor, alpha: Optional[Union[Number, _complex]], bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def cudnn_convolution_relu(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def cudnn_convolution_transpose(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, allow_tf32: _bool) -> Tensor: ... +def cudnn_grid_sampler(input: Tensor, grid: Tensor) -> Tensor: ... +def cudnn_is_acceptable(input: Tensor) -> _bool: ... +@overload +def cummax(input: Tensor, dim: _int, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummax: + r""" + cummax(input, dim, *, out=None) -> (Tensor, LongTensor) + Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of + elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index + location of each maximum value found in the dimension :attr:`dim`. + + .. math:: + y_i = max(x_1, x_2, x_3, \dots, x_i) + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + + Keyword args: + out (tuple, optional): the result tuple of two output tensors (values, indices) + + Example:: + + >>> a = torch.randn(10) + >>> a + tensor([-0.3449, -1.5447, 0.0685, -1.5104, -1.1706, 0.2259, 1.4696, -1.3284, + 1.9946, -0.8209]) + >>> torch.cummax(a, dim=0) + torch.return_types.cummax( + values=tensor([-0.3449, -0.3449, 0.0685, 0.0685, 0.0685, 0.2259, 1.4696, 1.4696, + 1.9946, 1.9946]), + indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8])) + """ + ... +@overload +def cummax(input: Tensor, dim: Union[str, ellipsis, None], *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummax: + r""" + cummax(input, dim, *, out=None) -> (Tensor, LongTensor) + Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of + elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index + location of each maximum value found in the dimension :attr:`dim`. + + .. math:: + y_i = max(x_1, x_2, x_3, \dots, x_i) + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + + Keyword args: + out (tuple, optional): the result tuple of two output tensors (values, indices) + + Example:: + + >>> a = torch.randn(10) + >>> a + tensor([-0.3449, -1.5447, 0.0685, -1.5104, -1.1706, 0.2259, 1.4696, -1.3284, + 1.9946, -0.8209]) + >>> torch.cummax(a, dim=0) + torch.return_types.cummax( + values=tensor([-0.3449, -0.3449, 0.0685, 0.0685, 0.0685, 0.2259, 1.4696, 1.4696, + 1.9946, 1.9946]), + indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8])) + """ + ... +@overload +def cummin(input: Tensor, dim: _int, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummin: + r""" + cummin(input, dim, *, out=None) -> (Tensor, LongTensor) + Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of + elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index + location of each maximum value found in the dimension :attr:`dim`. + + .. math:: + y_i = min(x_1, x_2, x_3, \dots, x_i) + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + + Keyword args: + out (tuple, optional): the result tuple of two output tensors (values, indices) + + Example:: + + >>> a = torch.randn(10) + >>> a + tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220, -0.3885, 1.1762, + 0.9165, 1.6684]) + >>> torch.cummin(a, dim=0) + torch.return_types.cummin( + values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298, + -1.3298, -1.3298]), + indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4])) + """ + ... +@overload +def cummin(input: Tensor, dim: Union[str, ellipsis, None], *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummin: + r""" + cummin(input, dim, *, out=None) -> (Tensor, LongTensor) + Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of + elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index + location of each maximum value found in the dimension :attr:`dim`. + + .. math:: + y_i = min(x_1, x_2, x_3, \dots, x_i) + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + + Keyword args: + out (tuple, optional): the result tuple of two output tensors (values, indices) + + Example:: + + >>> a = torch.randn(10) + >>> a + tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220, -0.3885, 1.1762, + 0.9165, 1.6684]) + >>> torch.cummin(a, dim=0) + torch.return_types.cummin( + values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298, + -1.3298, -1.3298]), + indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4])) + """ + ... +@overload +def cumprod(input: Tensor, dim: _int, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + cumprod(input, dim, *, dtype=None, out=None) -> Tensor + + Returns the cumulative product of elements of :attr:`input` in the dimension + :attr:`dim`. + + For example, if :attr:`input` is a vector of size N, the result will also be + a vector of size N, with elements. + + .. math:: + y_i = x_1 \times x_2\times x_3\times \dots \times x_i + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(10) + >>> a + tensor([ 0.6001, 0.2069, -0.1919, 0.9792, 0.6727, 1.0062, 0.4126, + -0.2129, -0.4206, 0.1968]) + >>> torch.cumprod(a, dim=0) + tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065, + 0.0014, -0.0006, -0.0001]) + + >>> a[5] = 0.0 + >>> torch.cumprod(a, dim=0) + tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000, + 0.0000, -0.0000, -0.0000]) + """ + ... +@overload +def cumprod(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + cumprod(input, dim, *, dtype=None, out=None) -> Tensor + + Returns the cumulative product of elements of :attr:`input` in the dimension + :attr:`dim`. + + For example, if :attr:`input` is a vector of size N, the result will also be + a vector of size N, with elements. + + .. math:: + y_i = x_1 \times x_2\times x_3\times \dots \times x_i + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(10) + >>> a + tensor([ 0.6001, 0.2069, -0.1919, 0.9792, 0.6727, 1.0062, 0.4126, + -0.2129, -0.4206, 0.1968]) + >>> torch.cumprod(a, dim=0) + tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065, + 0.0014, -0.0006, -0.0001]) + + >>> a[5] = 0.0 + >>> torch.cumprod(a, dim=0) + tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000, + 0.0000, -0.0000, -0.0000]) + """ + ... +@overload +def cumsum(input: Tensor, dim: _int, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + cumsum(input, dim, *, dtype=None, out=None) -> Tensor + + Returns the cumulative sum of elements of :attr:`input` in the dimension + :attr:`dim`. + + For example, if :attr:`input` is a vector of size N, the result will also be + a vector of size N, with elements. + + .. math:: + y_i = x_1 + x_2 + x_3 + \dots + x_i + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randint(1, 20, (10,)) + >>> a + tensor([13, 7, 3, 10, 13, 3, 15, 10, 9, 10]) + >>> torch.cumsum(a, dim=0) + tensor([13, 20, 23, 33, 46, 49, 64, 74, 83, 93]) + """ + ... +@overload +def cumsum(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + cumsum(input, dim, *, dtype=None, out=None) -> Tensor + + Returns the cumulative sum of elements of :attr:`input` in the dimension + :attr:`dim`. + + For example, if :attr:`input` is a vector of size N, the result will also be + a vector of size N, with elements. + + .. math:: + y_i = x_1 + x_2 + x_3 + \dots + x_i + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randint(1, 20, (10,)) + >>> a + tensor([13, 7, 3, 10, 13, 3, 15, 10, 9, 10]) + >>> torch.cumsum(a, dim=0) + tensor([13, 20, 23, 33, 46, 49, 64, 74, 83, 93]) + """ + ... +@overload +def cumulative_trapezoid(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor: + r""" + cumulative_trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor + + Cumulatively computes the `trapezoidal rule `_ + along :attr:`dim`. By default the spacing between elements is assumed to be 1, but + :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be + used to specify arbitrary spacing along :attr:`dim`. + + For more details, please read :func:`torch.trapezoid`. The difference between :func:`torch.trapezoid` + and this function is that, :func:`torch.trapezoid` returns a value for each integration, + where as this function returns a cumulative value for every spacing within the integration. This + is analogous to how `.sum` returns a value and `.cumsum` returns a cumulative sum. + + Arguments: + y (Tensor): Values to use when computing the trapezoidal rule. + x (Tensor): If specified, defines spacing between values as specified above. + + Keyword arguments: + dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx` + are specified then this defaults to 1. Effectively multiplies the result by its value. + dim (int): The dimension along which to compute the trapezoidal rule. + The last (inner-most) dimension by default. + + Examples:: + + >>> # Cumulatively computes the trapezoidal rule in 1D, spacing is implicitly 1. + >>> y = torch.tensor([1, 5, 10]) + >>> torch.cumulative_trapezoid(y) + tensor([3., 10.5]) + + >>> # Computes the same trapezoidal rule directly up to each element to verify + >>> (1 + 5) / 2 + 3.0 + >>> (1 + 10 + 10) / 2 + 10.5 + + >>> # Cumulatively computes the trapezoidal rule in 1D with constant spacing of 2 + >>> # NOTE: the result is the same as before, but multiplied by 2 + >>> torch.cumulative_trapezoid(y, dx=2) + tensor([6., 21.]) + + >>> # Cumulatively computes the trapezoidal rule in 1D with arbitrary spacing + >>> x = torch.tensor([1, 3, 6]) + >>> torch.cumulative_trapezoid(y, x) + tensor([6., 28.5]) + + >>> # Computes the same trapezoidal rule directly up to each element to verify + >>> ((3 - 1) * (1 + 5)) / 2 + 6.0 + >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2 + 28.5 + + >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 matrix + >>> y = torch.arange(9).reshape(3, 3) + tensor([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> torch.cumulative_trapezoid(y) + tensor([[ 0.5, 2.], + [ 3.5, 8.], + [ 6.5, 14.]]) + + >>> # Cumulatively computes the trapezoidal rule for each column of the matrix + >>> torch.cumulative_trapezoid(y, dim=0) + tensor([[ 1.5, 2.5, 3.5], + [ 6.0, 8.0, 10.0]]) + + >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with the same arbitrary spacing + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([1, 3, 6]) + >>> torch.cumulative_trapezoid(y, x) + tensor([[2., 5.], + [2., 5.], + [2., 5.]]) + + >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with different arbitrary spacing per row + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]]) + >>> torch.cumulative_trapezoid(y, x) + tensor([[1., 2.], + [2., 4.], + [3., 6.]]) + """ + ... +@overload +def cumulative_trapezoid(y: Tensor, *, dx: Union[Number, _complex] = 1, dim: _int = -1) -> Tensor: + r""" + cumulative_trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor + + Cumulatively computes the `trapezoidal rule `_ + along :attr:`dim`. By default the spacing between elements is assumed to be 1, but + :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be + used to specify arbitrary spacing along :attr:`dim`. + + For more details, please read :func:`torch.trapezoid`. The difference between :func:`torch.trapezoid` + and this function is that, :func:`torch.trapezoid` returns a value for each integration, + where as this function returns a cumulative value for every spacing within the integration. This + is analogous to how `.sum` returns a value and `.cumsum` returns a cumulative sum. + + Arguments: + y (Tensor): Values to use when computing the trapezoidal rule. + x (Tensor): If specified, defines spacing between values as specified above. + + Keyword arguments: + dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx` + are specified then this defaults to 1. Effectively multiplies the result by its value. + dim (int): The dimension along which to compute the trapezoidal rule. + The last (inner-most) dimension by default. + + Examples:: + + >>> # Cumulatively computes the trapezoidal rule in 1D, spacing is implicitly 1. + >>> y = torch.tensor([1, 5, 10]) + >>> torch.cumulative_trapezoid(y) + tensor([3., 10.5]) + + >>> # Computes the same trapezoidal rule directly up to each element to verify + >>> (1 + 5) / 2 + 3.0 + >>> (1 + 10 + 10) / 2 + 10.5 + + >>> # Cumulatively computes the trapezoidal rule in 1D with constant spacing of 2 + >>> # NOTE: the result is the same as before, but multiplied by 2 + >>> torch.cumulative_trapezoid(y, dx=2) + tensor([6., 21.]) + + >>> # Cumulatively computes the trapezoidal rule in 1D with arbitrary spacing + >>> x = torch.tensor([1, 3, 6]) + >>> torch.cumulative_trapezoid(y, x) + tensor([6., 28.5]) + + >>> # Computes the same trapezoidal rule directly up to each element to verify + >>> ((3 - 1) * (1 + 5)) / 2 + 6.0 + >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2 + 28.5 + + >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 matrix + >>> y = torch.arange(9).reshape(3, 3) + tensor([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> torch.cumulative_trapezoid(y) + tensor([[ 0.5, 2.], + [ 3.5, 8.], + [ 6.5, 14.]]) + + >>> # Cumulatively computes the trapezoidal rule for each column of the matrix + >>> torch.cumulative_trapezoid(y, dim=0) + tensor([[ 1.5, 2.5, 3.5], + [ 6.0, 8.0, 10.0]]) + + >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with the same arbitrary spacing + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([1, 3, 6]) + >>> torch.cumulative_trapezoid(y, x) + tensor([[2., 5.], + [2., 5.], + [2., 5.]]) + + >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with different arbitrary spacing per row + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]]) + >>> torch.cumulative_trapezoid(y, x) + tensor([[1., 2.], + [2., 4.], + [3., 6.]]) + """ + ... +def deg2rad(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + deg2rad(input, *, out=None) -> Tensor + + Returns a new tensor with each of the elements of :attr:`input` + converted from angles in degrees to radians. + + Args: + input (Tensor): the input tensor. + + Keyword arguments: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]]) + >>> torch.deg2rad(a) + tensor([[ 3.1416, -3.1416], + [ 6.2832, -6.2832], + [ 1.5708, -1.5708]]) + """ + ... +def deg2rad_(input: Tensor) -> Tensor: ... +@overload +def dequantize(input: Tensor) -> Tensor: + r""" + dequantize(tensor) -> Tensor + + Returns an fp32 Tensor by dequantizing a quantized Tensor + + Args: + tensor (Tensor): A quantized Tensor + + .. function:: dequantize(tensors) -> sequence of Tensors + :noindex: + + Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors + + Args: + tensors (sequence of Tensors): A list of quantized Tensors + """ + ... +@overload +def dequantize(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + dequantize(tensor) -> Tensor + + Returns an fp32 Tensor by dequantizing a quantized Tensor + + Args: + tensor (Tensor): A quantized Tensor + + .. function:: dequantize(tensors) -> sequence of Tensors + :noindex: + + Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors + + Args: + tensors (sequence of Tensors): A list of quantized Tensors + """ + ... +def det(input: Tensor) -> Tensor: + r""" + det(input) -> Tensor + + Alias for :func:`torch.linalg.det` + """ + ... +def detach(input: Tensor) -> Tensor: ... +def detach_(input: Tensor) -> Tensor: ... +def detach_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.detach`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def diag(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: + r""" + diag(input, diagonal=0, *, out=None) -> Tensor + + - If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor + with the elements of :attr:`input` as the diagonal. + - If :attr:`input` is a matrix (2-D tensor), then returns a 1-D tensor with + the diagonal elements of :attr:`input`. + + The argument :attr:`diagonal` controls which diagonal to consider: + + - If :attr:`diagonal` = 0, it is the main diagonal. + - If :attr:`diagonal` > 0, it is above the main diagonal. + - If :attr:`diagonal` < 0, it is below the main diagonal. + + Args: + input (Tensor): the input tensor. + diagonal (int, optional): the diagonal to consider + + Keyword args: + out (Tensor, optional): the output tensor. + + .. seealso:: + + :func:`torch.diagonal` always returns the diagonal of its input. + + :func:`torch.diagflat` always constructs a tensor with diagonal elements + specified by the input. + + Examples: + + Get the square matrix where the input vector is the diagonal:: + + >>> a = torch.randn(3) + >>> a + tensor([ 0.5950,-0.0872, 2.3298]) + >>> torch.diag(a) + tensor([[ 0.5950, 0.0000, 0.0000], + [ 0.0000,-0.0872, 0.0000], + [ 0.0000, 0.0000, 2.3298]]) + >>> torch.diag(a, 1) + tensor([[ 0.0000, 0.5950, 0.0000, 0.0000], + [ 0.0000, 0.0000,-0.0872, 0.0000], + [ 0.0000, 0.0000, 0.0000, 2.3298], + [ 0.0000, 0.0000, 0.0000, 0.0000]]) + + Get the k-th diagonal of a given matrix:: + + >>> a = torch.randn(3, 3) + >>> a + tensor([[-0.4264, 0.0255,-0.1064], + [ 0.8795,-0.2429, 0.1374], + [ 0.1029,-0.6482,-1.6300]]) + >>> torch.diag(a, 0) + tensor([-0.4264,-0.2429,-1.6300]) + >>> torch.diag(a, 1) + tensor([ 0.0255, 0.1374]) + """ + ... +def diag_embed(input: Tensor, offset: _int = 0, dim1: _int = -2, dim2: _int = -1) -> Tensor: + r""" + diag_embed(input, offset=0, dim1=-2, dim2=-1) -> Tensor + + Creates a tensor whose diagonals of certain 2D planes (specified by + :attr:`dim1` and :attr:`dim2`) are filled by :attr:`input`. + To facilitate creating batched diagonal matrices, the 2D planes formed by + the last two dimensions of the returned tensor are chosen by default. + + The argument :attr:`offset` controls which diagonal to consider: + + - If :attr:`offset` = 0, it is the main diagonal. + - If :attr:`offset` > 0, it is above the main diagonal. + - If :attr:`offset` < 0, it is below the main diagonal. + + The size of the new matrix will be calculated to make the specified diagonal + of the size of the last input dimension. + Note that for :attr:`offset` other than :math:`0`, the order of :attr:`dim1` + and :attr:`dim2` matters. Exchanging them is equivalent to changing the + sign of :attr:`offset`. + + Applying :meth:`torch.diagonal` to the output of this function with + the same arguments yields a matrix identical to input. However, + :meth:`torch.diagonal` has different default dimensions, so those + need to be explicitly specified. + + Args: + input (Tensor): the input tensor. Must be at least 1-dimensional. + offset (int, optional): which diagonal to consider. Default: 0 + (main diagonal). + dim1 (int, optional): first dimension with respect to which to + take diagonal. Default: -2. + dim2 (int, optional): second dimension with respect to which to + take diagonal. Default: -1. + + Example:: + + >>> a = torch.randn(2, 3) + >>> torch.diag_embed(a) + tensor([[[ 1.5410, 0.0000, 0.0000], + [ 0.0000, -0.2934, 0.0000], + [ 0.0000, 0.0000, -2.1788]], + + [[ 0.5684, 0.0000, 0.0000], + [ 0.0000, -1.0845, 0.0000], + [ 0.0000, 0.0000, -1.3986]]]) + + >>> torch.diag_embed(a, offset=1, dim1=0, dim2=2) + tensor([[[ 0.0000, 1.5410, 0.0000, 0.0000], + [ 0.0000, 0.5684, 0.0000, 0.0000]], + + [[ 0.0000, 0.0000, -0.2934, 0.0000], + [ 0.0000, 0.0000, -1.0845, 0.0000]], + + [[ 0.0000, 0.0000, 0.0000, -2.1788], + [ 0.0000, 0.0000, 0.0000, -1.3986]], + + [[ 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.0000, 0.0000, 0.0000, 0.0000]]]) + """ + ... +def diagflat(input: Tensor, offset: _int = 0) -> Tensor: + r""" + diagflat(input, offset=0) -> Tensor + + - If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor + with the elements of :attr:`input` as the diagonal. + - If :attr:`input` is a tensor with more than one dimension, then returns a + 2-D tensor with diagonal elements equal to a flattened :attr:`input`. + + The argument :attr:`offset` controls which diagonal to consider: + + - If :attr:`offset` = 0, it is the main diagonal. + - If :attr:`offset` > 0, it is above the main diagonal. + - If :attr:`offset` < 0, it is below the main diagonal. + + Args: + input (Tensor): the input tensor. + offset (int, optional): the diagonal to consider. Default: 0 (main + diagonal). + + Examples:: + + >>> a = torch.randn(3) + >>> a + tensor([-0.2956, -0.9068, 0.1695]) + >>> torch.diagflat(a) + tensor([[-0.2956, 0.0000, 0.0000], + [ 0.0000, -0.9068, 0.0000], + [ 0.0000, 0.0000, 0.1695]]) + >>> torch.diagflat(a, 1) + tensor([[ 0.0000, -0.2956, 0.0000, 0.0000], + [ 0.0000, 0.0000, -0.9068, 0.0000], + [ 0.0000, 0.0000, 0.0000, 0.1695], + [ 0.0000, 0.0000, 0.0000, 0.0000]]) + + >>> a = torch.randn(2, 2) + >>> a + tensor([[ 0.2094, -0.3018], + [-0.1516, 1.9342]]) + >>> torch.diagflat(a) + tensor([[ 0.2094, 0.0000, 0.0000, 0.0000], + [ 0.0000, -0.3018, 0.0000, 0.0000], + [ 0.0000, 0.0000, -0.1516, 0.0000], + [ 0.0000, 0.0000, 0.0000, 1.9342]]) + """ + ... +@overload +def diagonal(input: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor: + r""" + diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor + + Returns a partial view of :attr:`input` with the its diagonal elements + with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension + at the end of the shape. + + The argument :attr:`offset` controls which diagonal to consider: + + - If :attr:`offset` = 0, it is the main diagonal. + - If :attr:`offset` > 0, it is above the main diagonal. + - If :attr:`offset` < 0, it is below the main diagonal. + + Applying :meth:`torch.diag_embed` to the output of this function with + the same arguments yields a diagonal matrix with the diagonal entries + of the input. However, :meth:`torch.diag_embed` has different default + dimensions, so those need to be explicitly specified. + + Args: + input (Tensor): the input tensor. Must be at least 2-dimensional. + offset (int, optional): which diagonal to consider. Default: 0 + (main diagonal). + dim1 (int, optional): first dimension with respect to which to + take diagonal. Default: 0. + dim2 (int, optional): second dimension with respect to which to + take diagonal. Default: 1. + + .. note:: To take a batch diagonal, pass in dim1=-2, dim2=-1. + + Examples:: + + >>> a = torch.randn(3, 3) + >>> a + tensor([[-1.0854, 1.1431, -0.1752], + [ 0.8536, -0.0905, 0.0360], + [ 0.6927, -0.3735, -0.4945]]) + + + >>> torch.diagonal(a, 0) + tensor([-1.0854, -0.0905, -0.4945]) + + + >>> torch.diagonal(a, 1) + tensor([ 1.1431, 0.0360]) + + + >>> x = torch.randn(2, 5, 4, 2) + >>> torch.diagonal(x, offset=-1, dim1=1, dim2=2) + tensor([[[-1.2631, 0.3755, -1.5977, -1.8172], + [-1.1065, 1.0401, -0.2235, -0.7938]], + + [[-1.7325, -0.3081, 0.6166, 0.2335], + [ 1.0500, 0.7336, -0.3836, -1.1015]]]) + """ + ... +@overload +def diagonal(input: Tensor, *, outdim: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None], dim2: Union[str, ellipsis, None], offset: _int = 0) -> Tensor: + r""" + diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor + + Returns a partial view of :attr:`input` with the its diagonal elements + with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension + at the end of the shape. + + The argument :attr:`offset` controls which diagonal to consider: + + - If :attr:`offset` = 0, it is the main diagonal. + - If :attr:`offset` > 0, it is above the main diagonal. + - If :attr:`offset` < 0, it is below the main diagonal. + + Applying :meth:`torch.diag_embed` to the output of this function with + the same arguments yields a diagonal matrix with the diagonal entries + of the input. However, :meth:`torch.diag_embed` has different default + dimensions, so those need to be explicitly specified. + + Args: + input (Tensor): the input tensor. Must be at least 2-dimensional. + offset (int, optional): which diagonal to consider. Default: 0 + (main diagonal). + dim1 (int, optional): first dimension with respect to which to + take diagonal. Default: 0. + dim2 (int, optional): second dimension with respect to which to + take diagonal. Default: 1. + + .. note:: To take a batch diagonal, pass in dim1=-2, dim2=-1. + + Examples:: + + >>> a = torch.randn(3, 3) + >>> a + tensor([[-1.0854, 1.1431, -0.1752], + [ 0.8536, -0.0905, 0.0360], + [ 0.6927, -0.3735, -0.4945]]) + + + >>> torch.diagonal(a, 0) + tensor([-1.0854, -0.0905, -0.4945]) + + + >>> torch.diagonal(a, 1) + tensor([ 1.1431, 0.0360]) + + + >>> x = torch.randn(2, 5, 4, 2) + >>> torch.diagonal(x, offset=-1, dim1=1, dim2=2) + tensor([[[-1.2631, 0.3755, -1.5977, -1.8172], + [-1.1065, 1.0401, -0.2235, -0.7938]], + + [[-1.7325, -0.3081, 0.6166, 0.2335], + [ 1.0500, 0.7336, -0.3836, -1.1015]]]) + """ + ... +def diagonal_copy(input: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.diagonal`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def diagonal_scatter(input: Tensor, src: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor: + r""" + diagonal_scatter(input, src, offset=0, dim1=0, dim2=1) -> Tensor + + Embeds the values of the :attr:`src` tensor into :attr:`input` along + the diagonal elements of :attr:`input`, with respect to :attr:`dim1` + and :attr:`dim2`. + + This function returns a tensor with fresh storage; it does not + return a view. + + The argument :attr:`offset` controls which diagonal to consider: + + - If :attr:`offset` = 0, it is the main diagonal. + - If :attr:`offset` > 0, it is above the main diagonal. + - If :attr:`offset` < 0, it is below the main diagonal. + + Args: + input (Tensor): the input tensor. Must be at least 2-dimensional. + src (Tensor): the tensor to embed into :attr:`input`. + offset (int, optional): which diagonal to consider. Default: 0 + (main diagonal). + dim1 (int, optional): first dimension with respect to which to + take diagonal. Default: 0. + dim2 (int, optional): second dimension with respect to which to + take diagonal. Default: 1. + + .. note:: + + :attr:`src` must be of the proper size in order to be embedded + into :attr:`input`. Specifically, it should have the same shape as + ``torch.diagonal(input, offset, dim1, dim2)`` + + Examples:: + + >>> a = torch.zeros(3, 3) + >>> a + tensor([[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]]) + + >>> torch.diagonal_scatter(a, torch.ones(3), 0) + tensor([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + >>> torch.diagonal_scatter(a, torch.ones(2), 1) + tensor([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) + """ + ... +def diff(input: Tensor, n: _int = 1, dim: _int = -1, prepend: Optional[Tensor] = None, append: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + diff(input, n=1, dim=-1, prepend=None, append=None) -> Tensor + + Computes the n-th forward difference along the given dimension. + + The first-order differences are given by `out[i] = input[i + 1] - input[i]`. Higher-order + differences are calculated by using :func:`torch.diff` recursively. + + Args: + input (Tensor): the tensor to compute the differences on + n (int, optional): the number of times to recursively compute the difference + dim (int, optional): the dimension to compute the difference along. + Default is the last dimension. + prepend, append (Tensor, optional): values to prepend or append to + :attr:`input` along :attr:`dim` before computing the difference. + Their dimensions must be equivalent to that of input, and their shapes + must match input's shape except on :attr:`dim`. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([1, 3, 2]) + >>> torch.diff(a) + tensor([ 2, -1]) + >>> b = torch.tensor([4, 5]) + >>> torch.diff(a, append=b) + tensor([ 2, -1, 2, 1]) + >>> c = torch.tensor([[1, 2, 3], [3, 4, 5]]) + >>> torch.diff(c, dim=0) + tensor([[2, 2, 2]]) + >>> torch.diff(c, dim=1) + tensor([[1, 1], + [1, 1]]) + """ + ... +def digamma(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + digamma(input, *, out=None) -> Tensor + + Alias for :func:`torch.special.digamma`. + """ + ... +def dist(input: Tensor, other: Tensor, p: Union[Number, _complex] = 2) -> Tensor: + r""" + dist(input, other, p=2) -> Tensor + + Returns the p-norm of (:attr:`input` - :attr:`other`) + + The shapes of :attr:`input` and :attr:`other` must be + :ref:`broadcastable `. + + Args: + input (Tensor): the input tensor. + other (Tensor): the Right-hand-side input tensor + p (float, optional): the norm to be computed + + Example:: + + >>> x = torch.randn(4) + >>> x + tensor([-1.5393, -0.8675, 0.5916, 1.6321]) + >>> y = torch.randn(4) + >>> y + tensor([ 0.0967, -1.0511, 0.6295, 0.8360]) + >>> torch.dist(x, y, 3.5) + tensor(1.6727) + >>> torch.dist(x, y, 3) + tensor(1.6973) + >>> torch.dist(x, y, 0) + tensor(4.) + >>> torch.dist(x, y, 1) + tensor(2.6537) + """ + ... +def div(input: Union[Tensor, Number], other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + div(input, other, *, rounding_mode=None, out=None) -> Tensor + + Divides each element of the input ``input`` by the corresponding element of + :attr:`other`. + + .. math:: + \text{out}_i = \frac{\text{input}_i}{\text{other}_i} + + .. note:: + By default, this performs a "true" division like Python 3. + See the :attr:`rounding_mode` argument for floor division. + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer, float, and complex inputs. + Always promotes integer types to the default scalar type. + + Args: + input (Tensor): the dividend + other (Tensor or Number): the divisor + + Keyword args: + rounding_mode (str, optional): Type of rounding applied to the result: + + * None - default behavior. Performs no rounding and, if both :attr:`input` and + :attr:`other` are integer types, promotes the inputs to the default scalar type. + Equivalent to true division in Python (the ``/`` operator) and NumPy's ``np.true_divide``. + * ``"trunc"`` - rounds the results of the division towards zero. + Equivalent to C-style integer division. + * ``"floor"`` - rounds the results of the division down. + Equivalent to floor division in Python (the ``//`` operator) and NumPy's ``np.floor_divide``. + + out (Tensor, optional): the output tensor. + + Examples:: + + >>> x = torch.tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637]) + >>> torch.div(x, 0.5) + tensor([ 0.7620, 2.5548, -0.5944, -0.7438, 0.9274]) + + >>> a = torch.tensor([[-0.3711, -1.9353, -0.4605, -0.2917], + ... [ 0.1815, -1.0111, 0.9805, -1.5923], + ... [ 0.1062, 1.4581, 0.7759, -1.2344], + ... [-0.1830, -0.0313, 1.1908, -1.4757]]) + >>> b = torch.tensor([ 0.8032, 0.2930, -0.8113, -0.2308]) + >>> torch.div(a, b) + tensor([[-0.4620, -6.6051, 0.5676, 1.2639], + [ 0.2260, -3.4509, -1.2086, 6.8990], + [ 0.1322, 4.9764, -0.9564, 5.3484], + [-0.2278, -0.1068, -1.4678, 6.3938]]) + + >>> torch.div(a, b, rounding_mode='trunc') + tensor([[-0., -6., 0., 1.], + [ 0., -3., -1., 6.], + [ 0., 4., -0., 5.], + [-0., -0., -1., 6.]]) + + >>> torch.div(a, b, rounding_mode='floor') + tensor([[-1., -7., 0., 1.], + [ 0., -4., -2., 6.], + [ 0., 4., -1., 5.], + [-1., -1., -2., 6.]]) + """ + ... +@overload +def divide(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + divide(input, other, *, rounding_mode=None, out=None) -> Tensor + + Alias for :func:`torch.div`. + """ + ... +@overload +def divide(input: Tensor, other: Tensor, *, rounding_mode: Optional[str], out: Optional[Tensor] = None) -> Tensor: + r""" + divide(input, other, *, rounding_mode=None, out=None) -> Tensor + + Alias for :func:`torch.div`. + """ + ... +@overload +def divide(input: Tensor, other: Union[Number, _complex], *, rounding_mode: Optional[str]) -> Tensor: + r""" + divide(input, other, *, rounding_mode=None, out=None) -> Tensor + + Alias for :func:`torch.div`. + """ + ... +@overload +def divide(input: Tensor, other: Union[Number, _complex]) -> Tensor: + r""" + divide(input, other, *, rounding_mode=None, out=None) -> Tensor + + Alias for :func:`torch.div`. + """ + ... +def dot(input: Tensor, tensor: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + dot(input, other, *, out=None) -> Tensor + + Computes the dot product of two 1D tensors. + + .. note:: + + Unlike NumPy's dot, torch.dot intentionally only supports computing the dot product + of two 1D tensors with the same number of elements. + + Args: + input (Tensor): first tensor in the dot product, must be 1D. + other (Tensor): second tensor in the dot product, must be 1D. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1])) + tensor(7) + """ + ... +def dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def dsmm(input: Tensor, mat2: Tensor) -> Tensor: ... +@overload +def dsplit(input: Tensor, sections: _int) -> Tuple[Tensor, ...]: + r""" + dsplit(input, indices_or_sections) -> List of Tensors + + Splits :attr:`input`, a tensor with three or more dimensions, into multiple tensors + depthwise according to :attr:`indices_or_sections`. Each split is a view of + :attr:`input`. + + This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=2) + (the split dimension is 2), except that if :attr:`indices_or_sections` is an integer + it must evenly divide the split dimension or a runtime error will be thrown. + + This function is based on NumPy's :func:`numpy.dsplit`. + + Args: + input (Tensor): tensor to split. + indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. + + Example:: + >>> t = torch.arange(16.0).reshape(2, 2, 4) + >>> t + tensor([[[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.]], + [[ 8., 9., 10., 11.], + [12., 13., 14., 15.]]]) + >>> torch.dsplit(t, 2) + (tensor([[[ 0., 1.], + [ 4., 5.]], + [[ 8., 9.], + [12., 13.]]]), + tensor([[[ 2., 3.], + [ 6., 7.]], + [[10., 11.], + [14., 15.]]])) + + >>> torch.dsplit(t, [3, 6]) + (tensor([[[ 0., 1., 2.], + [ 4., 5., 6.]], + [[ 8., 9., 10.], + [12., 13., 14.]]]), + tensor([[[ 3.], + [ 7.]], + [[11.], + [15.]]]), + tensor([], size=(2, 2, 0))) + """ + ... +@overload +def dsplit(input: Tensor, indices: _size) -> Tuple[Tensor, ...]: + r""" + dsplit(input, indices_or_sections) -> List of Tensors + + Splits :attr:`input`, a tensor with three or more dimensions, into multiple tensors + depthwise according to :attr:`indices_or_sections`. Each split is a view of + :attr:`input`. + + This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=2) + (the split dimension is 2), except that if :attr:`indices_or_sections` is an integer + it must evenly divide the split dimension or a runtime error will be thrown. + + This function is based on NumPy's :func:`numpy.dsplit`. + + Args: + input (Tensor): tensor to split. + indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. + + Example:: + >>> t = torch.arange(16.0).reshape(2, 2, 4) + >>> t + tensor([[[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.]], + [[ 8., 9., 10., 11.], + [12., 13., 14., 15.]]]) + >>> torch.dsplit(t, 2) + (tensor([[[ 0., 1.], + [ 4., 5.]], + [[ 8., 9.], + [12., 13.]]]), + tensor([[[ 2., 3.], + [ 6., 7.]], + [[10., 11.], + [14., 15.]]])) + + >>> torch.dsplit(t, [3, 6]) + (tensor([[[ 0., 1., 2.], + [ 4., 5., 6.]], + [[ 8., 9., 10.], + [12., 13., 14.]]]), + tensor([[[ 3.], + [ 7.]], + [[11.], + [15.]]]), + tensor([], size=(2, 2, 0))) + """ + ... +def dstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor: + r""" + dstack(tensors, *, out=None) -> Tensor + + Stack tensors in sequence depthwise (along third axis). + + This is equivalent to concatenation along the third axis after 1-D and 2-D tensors have been reshaped by :func:`torch.atleast_3d`. + + Args: + tensors (sequence of Tensors): sequence of tensors to concatenate + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([1, 2, 3]) + >>> b = torch.tensor([4, 5, 6]) + >>> torch.dstack((a,b)) + tensor([[[1, 4], + [2, 5], + [3, 6]]]) + >>> a = torch.tensor([[1],[2],[3]]) + >>> b = torch.tensor([[4],[5],[6]]) + >>> torch.dstack((a,b)) + tensor([[[1, 4]], + [[2, 5]], + [[3, 6]]]) + """ + ... +def embedding(weight: Tensor, indices: Tensor, padding_idx: Union[_int, SymInt] = -1, scale_grad_by_freq: _bool = False, sparse: _bool = False) -> Tensor: ... +@overload +def embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool, mode: _int, sparse: _bool, per_sample_weights: Optional[Tensor], include_last_offset: _bool, padding_idx: Optional[_int]) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +@overload +def embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +def embedding_renorm_(input: Tensor, indices: Tensor, max_norm: _float, norm_type: _float) -> Tensor: ... +@overload +def empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor + + Returns a tensor filled with uninitialized data. The shape of the tensor is + defined by the variable argument :attr:`size`. + + .. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.contiguous_format``. + + Example:: + + >>> torch.empty((2,3), dtype=torch.int64) + tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13], + [ 7.5751e+18, 7.1428e+18, 7.5955e+18]]) + """ + ... +@overload +def empty(*size: _int, memory_format: Optional[memory_format] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor + + Returns a tensor filled with uninitialized data. The shape of the tensor is + defined by the variable argument :attr:`size`. + + .. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.contiguous_format``. + + Example:: + + >>> torch.empty((2,3), dtype=torch.int64) + tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13], + [ 7.5751e+18, 7.1428e+18, 7.5955e+18]]) + """ + ... +@overload +def empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor + + Returns a tensor filled with uninitialized data. The shape of the tensor is + defined by the variable argument :attr:`size`. + + .. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.contiguous_format``. + + Example:: + + >>> torch.empty((2,3), dtype=torch.int64) + tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13], + [ 7.5751e+18, 7.1428e+18, 7.5955e+18]]) + """ + ... +@overload +def empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor + + Returns a tensor filled with uninitialized data. The shape of the tensor is + defined by the variable argument :attr:`size`. + + .. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.contiguous_format``. + + Example:: + + >>> torch.empty((2,3), dtype=torch.int64) + tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13], + [ 7.5751e+18, 7.1428e+18, 7.5955e+18]]) + """ + ... +def empty_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + empty_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + + Returns an uninitialized tensor with the same size as :attr:`input`. + ``torch.empty_like(input)`` is equivalent to + ``torch.empty(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. + + .. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + + Args: + input (Tensor): the size of :attr:`input` will determine size of the output tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. + Default: if ``None``, defaults to the dtype of :attr:`input`. + layout (:class:`torch.layout`, optional): the desired layout of returned tensor. + Default: if ``None``, defaults to the layout of :attr:`input`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, defaults to the device of :attr:`input`. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + + Example:: + + >>> a=torch.empty((2,3), dtype=torch.int32, device = 'cuda') + >>> torch.empty_like(a) + tensor([[0, 0, 0], + [0, 0, 0]], device='cuda:0', dtype=torch.int32) + """ + ... +def empty_permuted(size: Sequence[Union[_int, SymInt]], physical_layout: _size, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + empty_permuted(size, physical_layout, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Creates an uninitialized, non-overlapping and dense tensor with the + specified :attr:`size`, with :attr:`physical_layout` specifying how the + dimensions are physically laid out in memory (each logical dimension is listed + from outermost to innermost). :attr:`physical_layout` is a generalization + of NCHW/NHWC notation: if each dimension is assigned a number according to + what order they occur in size (N=0, C=1, H=2, W=3), then NCHW is ``(0, 1, 2, 3)`` + while NHWC is ``(0, 2, 3, 1)``. Equivalently, the strides of the output + tensor ``t`` are such that ``t.stride(physical_layout[i]) == contiguous_strides[i]`` + (notably, this function is *not* equivalent to ``torch.empty(size).permute(physical_layout)``). + + Unlike :func:`torch.empty_strided`, this is guaranteed to produce a dense + tensor with no overlaps. If possible, prefer using this function over + :func:`torch.empty_strided` or manual use of :func:`torch.as_strided`. + + .. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + + Args: + size (tuple of int): the shape of the output tensor + physical_layout (tuple of int): the ordering of dimensions physically in memory + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Examples: + + >>> torch.empty((2, 3, 5, 7)).stride() + (105, 35, 7, 1) + >>> torch.empty_permuted((2, 3, 5, 7), (0, 1, 2, 3)).stride() + (105, 35, 7, 1) + >>> torch.empty((2, 3, 5, 7), memory_format=torch.channels_last).stride() + (105, 1, 21, 3) + >>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).stride() + (105, 1, 21, 3) + >>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).dim_order() + (0, 2, 3, 1) + """ + ... +def empty_quantized(size: _size, qtensor: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def empty_strided(size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + empty_strided(size, stride, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Creates a tensor with the specified :attr:`size` and :attr:`stride` and filled with undefined data. + + .. warning:: + If the constructed tensor is "overlapped" (with multiple indices referring to the same element + in memory) its behavior is undefined. + + .. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + + Args: + size (tuple of int): the shape of the output tensor + stride (tuple of int): the strides of the output tensor + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> a = torch.empty_strided((2, 3), (1, 2)) + >>> a + tensor([[8.9683e-44, 4.4842e-44, 5.1239e+07], + [0.0000e+00, 0.0000e+00, 3.0705e-41]]) + >>> a.stride() + (1, 2) + >>> a.size() + torch.Size([2, 3]) + """ + ... +@overload +def eq(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + eq(input, other, *, out=None) -> Tensor + + Computes element-wise equality + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere + + Example:: + + >>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[ True, False], + [False, True]]) + """ + ... +@overload +def eq(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + eq(input, other, *, out=None) -> Tensor + + Computes element-wise equality + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere + + Example:: + + >>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[ True, False], + [False, True]]) + """ + ... +def equal(input: Tensor, other: Tensor) -> _bool: + r""" + equal(input, other) -> bool + + ``True`` if two tensors have the same size and elements, ``False`` otherwise. + + Example:: + + >>> torch.equal(torch.tensor([1, 2]), torch.tensor([1, 2])) + True + """ + ... +def erf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + erf(input, *, out=None) -> Tensor + + Alias for :func:`torch.special.erf`. + """ + ... +def erf_(input: Tensor) -> Tensor: ... +def erfc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + erfc(input, *, out=None) -> Tensor + + Alias for :func:`torch.special.erfc`. + """ + ... +def erfc_(input: Tensor) -> Tensor: ... +def erfinv(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + erfinv(input, *, out=None) -> Tensor + + Alias for :func:`torch.special.erfinv`. + """ + ... +def exp(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + exp(input, *, out=None) -> Tensor + + Returns a new tensor with the exponential of the elements + of the input tensor :attr:`input`. + + .. math:: + y_{i} = e^{x_{i}} + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.exp(torch.tensor([0, math.log(2.)])) + tensor([ 1., 2.]) + """ + ... +def exp2(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + exp2(input, *, out=None) -> Tensor + + Alias for :func:`torch.special.exp2`. + """ + ... +def exp2_(input: Tensor) -> Tensor: ... +def exp_(input: Tensor) -> Tensor: ... +def expand_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], *, implicit: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.expand`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def expm1(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + expm1(input, *, out=None) -> Tensor + + Alias for :func:`torch.special.expm1`. + """ + ... +def expm1_(input: Tensor) -> Tensor: ... +@overload +def eye(n: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + eye(n, m=None, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a 2-D tensor with ones on the diagonal and zeros elsewhere. + + Args: + n (int): the number of rows + m (int, optional): the number of columns with default being :attr:`n` + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere + + Example:: + + >>> torch.eye(3) + tensor([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + """ + ... +@overload +def eye(n: Union[_int, SymInt], m: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + eye(n, m=None, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a 2-D tensor with ones on the diagonal and zeros elsewhere. + + Args: + n (int): the number of rows + m (int, optional): the number of columns with default being :attr:`n` + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere + + Example:: + + >>> torch.eye(3) + tensor([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + """ + ... +def fake_quantize_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int) -> Tensor: + r""" + fake_quantize_per_channel_affine(input, scale, zero_point, axis, quant_min, quant_max) -> Tensor + + Returns a new tensor with the data in :attr:`input` fake quantized per channel using :attr:`scale`, + :attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`, across the channel specified by :attr:`axis`. + + .. math:: + \text{output} = ( + min( + \text{quant\_max}, + max( + \text{quant\_min}, + \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point} + ) + ) - \text{zero\_point} + ) \times \text{scale} + + Args: + input (Tensor): the input value(s), in ``torch.float32`` + scale (Tensor): quantization scale, per channel in ``torch.float32`` + zero_point (Tensor): quantization zero_point, per channel in ``torch.int32`` or ``torch.half`` or ``torch.float32`` + axis (int32): channel axis + quant_min (int64): lower bound of the quantized domain + quant_max (int64): upper bound of the quantized domain + + Returns: + Tensor: A newly fake_quantized per channel ``torch.float32`` tensor + + Example:: + + >>> x = torch.randn(2, 2, 2) + >>> x + tensor([[[-0.2525, -0.0466], + [ 0.3491, -0.2168]], + + [[-0.5906, 1.6258], + [ 0.6444, -0.0542]]]) + >>> scales = (torch.randn(2) + 1) * 0.05 + >>> scales + tensor([0.0475, 0.0486]) + >>> zero_points = torch.zeros(2).to(torch.int32) + >>> zero_points + tensor([0, 0]) + >>> torch.fake_quantize_per_channel_affine(x, scales, zero_points, 1, 0, 255) + tensor([[[0.0000, 0.0000], + [0.3405, 0.0000]], + + [[0.0000, 1.6134], + [0.6323, 0.0000]]]) + """ + ... +@overload +def fake_quantize_per_tensor_affine(input: Tensor, scale: _float, zero_point: _int, quant_min: _int, quant_max: _int) -> Tensor: + r""" + fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor + + Returns a new tensor with the data in :attr:`input` fake quantized using :attr:`scale`, + :attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`. + + .. math:: + \text{output} = ( + min( + \text{quant\_max}, + max( + \text{quant\_min}, + \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point} + ) + ) - \text{zero\_point} + ) \times \text{scale} + + Args: + input (Tensor): the input value(s), ``torch.float32`` tensor + scale (double scalar or ``float32`` Tensor): quantization scale + zero_point (int64 scalar or ``int32`` Tensor): quantization zero_point + quant_min (int64): lower bound of the quantized domain + quant_max (int64): upper bound of the quantized domain + + Returns: + Tensor: A newly fake_quantized ``torch.float32`` tensor + + Example:: + + >>> x = torch.randn(4) + >>> x + tensor([ 0.0552, 0.9730, 0.3973, -1.0780]) + >>> torch.fake_quantize_per_tensor_affine(x, 0.1, 0, 0, 255) + tensor([0.1000, 1.0000, 0.4000, 0.0000]) + >>> torch.fake_quantize_per_tensor_affine(x, torch.tensor(0.1), torch.tensor(0), 0, 255) + tensor([0.1000, 1.0000, 0.4000, 0.0000]) + """ + ... +@overload +def fake_quantize_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int) -> Tensor: + r""" + fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor + + Returns a new tensor with the data in :attr:`input` fake quantized using :attr:`scale`, + :attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`. + + .. math:: + \text{output} = ( + min( + \text{quant\_max}, + max( + \text{quant\_min}, + \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point} + ) + ) - \text{zero\_point} + ) \times \text{scale} + + Args: + input (Tensor): the input value(s), ``torch.float32`` tensor + scale (double scalar or ``float32`` Tensor): quantization scale + zero_point (int64 scalar or ``int32`` Tensor): quantization zero_point + quant_min (int64): lower bound of the quantized domain + quant_max (int64): upper bound of the quantized domain + + Returns: + Tensor: A newly fake_quantized ``torch.float32`` tensor + + Example:: + + >>> x = torch.randn(4) + >>> x + tensor([ 0.0552, 0.9730, 0.3973, -1.0780]) + >>> torch.fake_quantize_per_tensor_affine(x, 0.1, 0, 0, 255) + tensor([0.1000, 1.0000, 0.4000, 0.0000]) + >>> torch.fake_quantize_per_tensor_affine(x, torch.tensor(0.1), torch.tensor(0), 0, 255) + tensor([0.1000, 1.0000, 0.4000, 0.0000]) + """ + ... +def fbgemm_linear_fp16_weight(input: Tensor, packed_weight: Tensor, bias: Tensor) -> Tensor: ... +def fbgemm_linear_fp16_weight_fp32_activation(input: Tensor, packed_weight: Tensor, bias: Tensor) -> Tensor: ... +def fbgemm_linear_int8_weight(input: Tensor, weight: Tensor, packed: Tensor, col_offsets: Tensor, weight_scale: Union[Number, _complex], weight_zero_point: Union[Number, _complex], bias: Tensor) -> Tensor: ... +def fbgemm_linear_int8_weight_fp32_activation(input: Tensor, weight: Tensor, packed: Tensor, col_offsets: Tensor, weight_scale: Union[Number, _complex], weight_zero_point: Union[Number, _complex], bias: Tensor) -> Tensor: ... +def fbgemm_linear_quantize_weight(input: Tensor) -> Tuple[Tensor, Tensor, _float, _int]: ... +def fbgemm_pack_gemm_matrix_fp16(input: Tensor) -> Tensor: ... +@overload +def fbgemm_pack_quantized_matrix(input: Tensor) -> Tensor: ... +@overload +def fbgemm_pack_quantized_matrix(input: Tensor, K: _int, N: _int) -> Tensor: ... +def feature_alpha_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def feature_alpha_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def feature_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def feature_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ... +@overload +def fill(input: Tensor, value: Tensor) -> Tensor: ... +@overload +def fill(input: Tensor, value: Union[Number, _complex]) -> Tensor: ... +@overload +def fill_(input: Tensor, value: Tensor) -> Tensor: ... +@overload +def fill_(input: Tensor, value: Union[Number, _complex]) -> Tensor: ... +def fix(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + fix(input, *, out=None) -> Tensor + + Alias for :func:`torch.trunc` + """ + ... +def fix_(input: Tensor) -> Tensor: ... +@overload +def flatten(input: Tensor, start_dim: _int = 0, end_dim: _int = -1) -> Tensor: + r""" + flatten(input, start_dim=0, end_dim=-1) -> Tensor + + Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim` + are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened. + The order of elements in :attr:`input` is unchanged. + + Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view, + or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can + be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the + flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned. + + .. note:: + Flattening a zero-dimensional tensor will return a one-dimensional view. + + Args: + input (Tensor): the input tensor. + start_dim (int): the first dim to flatten + end_dim (int): the last dim to flatten + + Example:: + + >>> t = torch.tensor([[[1, 2], + ... [3, 4]], + ... [[5, 6], + ... [7, 8]]]) + >>> torch.flatten(t) + tensor([1, 2, 3, 4, 5, 6, 7, 8]) + >>> torch.flatten(t, start_dim=1) + tensor([[1, 2, 3, 4], + [5, 6, 7, 8]]) + """ + ... +@overload +def flatten(input: Tensor, start_dim: _int, end_dim: _int, out_dim: Union[str, ellipsis, None]) -> Tensor: + r""" + flatten(input, start_dim=0, end_dim=-1) -> Tensor + + Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim` + are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened. + The order of elements in :attr:`input` is unchanged. + + Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view, + or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can + be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the + flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned. + + .. note:: + Flattening a zero-dimensional tensor will return a one-dimensional view. + + Args: + input (Tensor): the input tensor. + start_dim (int): the first dim to flatten + end_dim (int): the last dim to flatten + + Example:: + + >>> t = torch.tensor([[[1, 2], + ... [3, 4]], + ... [[5, 6], + ... [7, 8]]]) + >>> torch.flatten(t) + tensor([1, 2, 3, 4, 5, 6, 7, 8]) + >>> torch.flatten(t, start_dim=1) + tensor([[1, 2, 3, 4], + [5, 6, 7, 8]]) + """ + ... +@overload +def flatten(input: Tensor, start_dim: Union[str, ellipsis, None], end_dim: Union[str, ellipsis, None], out_dim: Union[str, ellipsis, None]) -> Tensor: + r""" + flatten(input, start_dim=0, end_dim=-1) -> Tensor + + Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim` + are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened. + The order of elements in :attr:`input` is unchanged. + + Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view, + or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can + be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the + flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned. + + .. note:: + Flattening a zero-dimensional tensor will return a one-dimensional view. + + Args: + input (Tensor): the input tensor. + start_dim (int): the first dim to flatten + end_dim (int): the last dim to flatten + + Example:: + + >>> t = torch.tensor([[[1, 2], + ... [3, 4]], + ... [[5, 6], + ... [7, 8]]]) + >>> torch.flatten(t) + tensor([1, 2, 3, 4, 5, 6, 7, 8]) + >>> torch.flatten(t, start_dim=1) + tensor([[1, 2, 3, 4], + [5, 6, 7, 8]]) + """ + ... +@overload +def flatten(input: Tensor, dims: Sequence[Union[str, ellipsis, None]], out_dim: Union[str, ellipsis, None]) -> Tensor: + r""" + flatten(input, start_dim=0, end_dim=-1) -> Tensor + + Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim` + are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened. + The order of elements in :attr:`input` is unchanged. + + Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view, + or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can + be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the + flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned. + + .. note:: + Flattening a zero-dimensional tensor will return a one-dimensional view. + + Args: + input (Tensor): the input tensor. + start_dim (int): the first dim to flatten + end_dim (int): the last dim to flatten + + Example:: + + >>> t = torch.tensor([[[1, 2], + ... [3, 4]], + ... [[5, 6], + ... [7, 8]]]) + >>> torch.flatten(t) + tensor([1, 2, 3, 4, 5, 6, 7, 8]) + >>> torch.flatten(t, start_dim=1) + tensor([[1, 2, 3, 4], + [5, 6, 7, 8]]) + """ + ... +def flip(input: Tensor, dims: _size) -> Tensor: + r""" + flip(input, dims) -> Tensor + + Reverse the order of an n-D tensor along given axis in dims. + + .. note:: + `torch.flip` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flip`, + which returns a view in constant time. Since copying a tensor's data is more work than viewing that data, + `torch.flip` is expected to be slower than `np.flip`. + + Args: + input (Tensor): the input tensor. + dims (a list or tuple): axis to flip on + + Example:: + + >>> x = torch.arange(8).view(2, 2, 2) + >>> x + tensor([[[ 0, 1], + [ 2, 3]], + + [[ 4, 5], + [ 6, 7]]]) + >>> torch.flip(x, [0, 1]) + tensor([[[ 6, 7], + [ 4, 5]], + + [[ 2, 3], + [ 0, 1]]]) + """ + ... +def fliplr(input: Tensor) -> Tensor: + r""" + fliplr(input) -> Tensor + + Flip tensor in the left/right direction, returning a new tensor. + + Flip the entries in each row in the left/right direction. + Columns are preserved, but appear in a different order than before. + + Note: + Requires the tensor to be at least 2-D. + + .. note:: + `torch.fliplr` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.fliplr`, + which returns a view in constant time. Since copying a tensor's data is more work than viewing that data, + `torch.fliplr` is expected to be slower than `np.fliplr`. + + Args: + input (Tensor): Must be at least 2-dimensional. + + Example:: + + >>> x = torch.arange(4).view(2, 2) + >>> x + tensor([[0, 1], + [2, 3]]) + >>> torch.fliplr(x) + tensor([[1, 0], + [3, 2]]) + """ + ... +def flipud(input: Tensor) -> Tensor: + r""" + flipud(input) -> Tensor + + Flip tensor in the up/down direction, returning a new tensor. + + Flip the entries in each column in the up/down direction. + Rows are preserved, but appear in a different order than before. + + Note: + Requires the tensor to be at least 1-D. + + .. note:: + `torch.flipud` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flipud`, + which returns a view in constant time. Since copying a tensor's data is more work than viewing that data, + `torch.flipud` is expected to be slower than `np.flipud`. + + Args: + input (Tensor): Must be at least 1-dimensional. + + Example:: + + >>> x = torch.arange(4).view(2, 2) + >>> x + tensor([[0, 1], + [2, 3]]) + >>> torch.flipud(x) + tensor([[2, 3], + [0, 1]]) + """ + ... +@overload +def float_power(input: Tensor, exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + float_power(input, exponent, *, out=None) -> Tensor + + Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision. + If neither input is complex returns a ``torch.float64`` tensor, + and if one or more inputs is complex returns a ``torch.complex128`` tensor. + + .. note:: + This function always computes in double precision, unlike :func:`torch.pow`, + which implements more typical :ref:`type promotion `. + This is useful when the computation needs to be performed in a wider or more precise dtype, + or the results of the computation may contain fractional values not representable in the input dtypes, + like when an integer base is raised to a negative integer exponent. + + Args: + input (Tensor or Number): the base value(s) + exponent (Tensor or Number): the exponent value(s) + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randint(10, (4,)) + >>> a + tensor([6, 4, 7, 1]) + >>> torch.float_power(a, 2) + tensor([36., 16., 49., 1.], dtype=torch.float64) + + >>> a = torch.arange(1, 5) + >>> a + tensor([ 1, 2, 3, 4]) + >>> exp = torch.tensor([2, -3, 4, -5]) + >>> exp + tensor([ 2, -3, 4, -5]) + >>> torch.float_power(a, exp) + tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64) + """ + ... +@overload +def float_power(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + float_power(input, exponent, *, out=None) -> Tensor + + Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision. + If neither input is complex returns a ``torch.float64`` tensor, + and if one or more inputs is complex returns a ``torch.complex128`` tensor. + + .. note:: + This function always computes in double precision, unlike :func:`torch.pow`, + which implements more typical :ref:`type promotion `. + This is useful when the computation needs to be performed in a wider or more precise dtype, + or the results of the computation may contain fractional values not representable in the input dtypes, + like when an integer base is raised to a negative integer exponent. + + Args: + input (Tensor or Number): the base value(s) + exponent (Tensor or Number): the exponent value(s) + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randint(10, (4,)) + >>> a + tensor([6, 4, 7, 1]) + >>> torch.float_power(a, 2) + tensor([36., 16., 49., 1.], dtype=torch.float64) + + >>> a = torch.arange(1, 5) + >>> a + tensor([ 1, 2, 3, 4]) + >>> exp = torch.tensor([2, -3, 4, -5]) + >>> exp + tensor([ 2, -3, 4, -5]) + >>> torch.float_power(a, exp) + tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64) + """ + ... +@overload +def float_power(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + float_power(input, exponent, *, out=None) -> Tensor + + Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision. + If neither input is complex returns a ``torch.float64`` tensor, + and if one or more inputs is complex returns a ``torch.complex128`` tensor. + + .. note:: + This function always computes in double precision, unlike :func:`torch.pow`, + which implements more typical :ref:`type promotion `. + This is useful when the computation needs to be performed in a wider or more precise dtype, + or the results of the computation may contain fractional values not representable in the input dtypes, + like when an integer base is raised to a negative integer exponent. + + Args: + input (Tensor or Number): the base value(s) + exponent (Tensor or Number): the exponent value(s) + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randint(10, (4,)) + >>> a + tensor([6, 4, 7, 1]) + >>> torch.float_power(a, 2) + tensor([36., 16., 49., 1.], dtype=torch.float64) + + >>> a = torch.arange(1, 5) + >>> a + tensor([ 1, 2, 3, 4]) + >>> exp = torch.tensor([2, -3, 4, -5]) + >>> exp + tensor([ 2, -3, 4, -5]) + >>> torch.float_power(a, exp) + tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64) + """ + ... +def floor(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + floor(input, *, out=None) -> Tensor + + Returns a new tensor with the floor of the elements of :attr:`input`, + the largest integer less than or equal to each element. + + For integer inputs, follows the array-api convention of returning a + copy of the input tensor. + + .. math:: + \text{out}_{i} = \left\lfloor \text{input}_{i} \right\rfloor + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.8166, 1.5308, -0.2530, -0.2091]) + >>> torch.floor(a) + tensor([-1., 1., -1., -1.]) + """ + ... +def floor_(input: Tensor) -> Tensor: ... +def floor_divide(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor] = None) -> Tensor: + r""" + floor_divide(input, other, *, out=None) -> Tensor + + .. note:: + + Before PyTorch 1.13 :func:`torch.floor_divide` incorrectly performed + truncation division. To restore the previous behavior use + :func:`torch.div` with ``rounding_mode='trunc'``. + + Computes :attr:`input` divided by :attr:`other`, elementwise, and floors + the result. + + .. math:: + \text{{out}}_i = \text{floor} \left( \frac{{\text{{input}}_i}}{{\text{{other}}_i}} \right) + + + + Supports broadcasting to a common shape, type promotion, and integer and float inputs. + + Args: + input (Tensor or Number): the dividend + other (Tensor or Number): the divisor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([4.0, 3.0]) + >>> b = torch.tensor([2.0, 2.0]) + >>> torch.floor_divide(a, b) + tensor([2.0, 1.0]) + >>> torch.floor_divide(a, 1.4) + tensor([2.0, 2.0]) + """ + ... +def fmax(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + fmax(input, other, *, out=None) -> Tensor + + Computes the element-wise maximum of :attr:`input` and :attr:`other`. + + This is like :func:`torch.maximum` except it handles NaNs differently: + if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the maximum. + Only if both elements are NaN is NaN propagated. + + This function is a wrapper around C++'s ``std::fmax`` and is similar to NumPy's ``fmax`` function. + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer and floating-point inputs. + + Args: + input (Tensor): the input tensor. + other (Tensor): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([9.7, float('nan'), 3.1, float('nan')]) + >>> b = torch.tensor([-2.2, 0.5, float('nan'), float('nan')]) + >>> torch.fmax(a, b) + tensor([9.7000, 0.5000, 3.1000, nan]) + """ + ... +def fmin(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + fmin(input, other, *, out=None) -> Tensor + + Computes the element-wise minimum of :attr:`input` and :attr:`other`. + + This is like :func:`torch.minimum` except it handles NaNs differently: + if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the minimum. + Only if both elements are NaN is NaN propagated. + + This function is a wrapper around C++'s ``std::fmin`` and is similar to NumPy's ``fmin`` function. + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer and floating-point inputs. + + Args: + input (Tensor): the input tensor. + other (Tensor): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([2.2, float('nan'), 2.1, float('nan')]) + >>> b = torch.tensor([-9.3, 0.1, float('nan'), float('nan')]) + >>> torch.fmin(a, b) + tensor([-9.3000, 0.1000, 2.1000, nan]) + """ + ... +@overload +def fmod(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + fmod(input, other, *, out=None) -> Tensor + + Applies C++'s `std::fmod `_ entrywise. + The result has the same sign as the dividend :attr:`input` and its absolute value + is less than that of :attr:`other`. + + This function may be defined in terms of :func:`torch.div` as + + .. code:: python + + torch.fmod(a, b) == a - a.div(b, rounding_mode="trunc") * b + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer and float inputs. + + .. note:: + + When the divisor is zero, returns ``NaN`` for floating point dtypes + on both CPU and GPU; raises ``RuntimeError`` for integer division by + zero on CPU; Integer division by zero on GPU may return any value. + + .. note:: + + Complex inputs are not supported. In some cases, it is not mathematically + possible to satisfy the definition of a modulo operation with complex numbers. + + .. seealso:: + + :func:`torch.remainder` which implements Python's modulus operator. + This one is defined using division rounding down the result. + + Args: + input (Tensor): the dividend + other (Tensor or Scalar): the divisor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2) + tensor([-1., -0., -1., 1., 0., 1.]) + >>> torch.fmod(torch.tensor([1, 2, 3, 4, 5]), -1.5) + tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000]) + """ + ... +@overload +def fmod(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + fmod(input, other, *, out=None) -> Tensor + + Applies C++'s `std::fmod `_ entrywise. + The result has the same sign as the dividend :attr:`input` and its absolute value + is less than that of :attr:`other`. + + This function may be defined in terms of :func:`torch.div` as + + .. code:: python + + torch.fmod(a, b) == a - a.div(b, rounding_mode="trunc") * b + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer and float inputs. + + .. note:: + + When the divisor is zero, returns ``NaN`` for floating point dtypes + on both CPU and GPU; raises ``RuntimeError`` for integer division by + zero on CPU; Integer division by zero on GPU may return any value. + + .. note:: + + Complex inputs are not supported. In some cases, it is not mathematically + possible to satisfy the definition of a modulo operation with complex numbers. + + .. seealso:: + + :func:`torch.remainder` which implements Python's modulus operator. + This one is defined using division rounding down the result. + + Args: + input (Tensor): the dividend + other (Tensor or Scalar): the divisor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2) + tensor([-1., -0., -1., 1., 0., 1.]) + >>> torch.fmod(torch.tensor([1, 2, 3, 4, 5]), -1.5) + tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000]) + """ + ... +def frac(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + frac(input, *, out=None) -> Tensor + + Computes the fractional portion of each element in :attr:`input`. + + .. math:: + \text{out}_{i} = \text{input}_{i} - \left\lfloor |\text{input}_{i}| \right\rfloor * \operatorname{sgn}(\text{input}_{i}) + + Example:: + + >>> torch.frac(torch.tensor([1, 2.5, -3.2])) + tensor([ 0.0000, 0.5000, -0.2000]) + """ + ... +def frac_(input: Tensor) -> Tensor: ... +def frexp(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.frexp: + r""" + frexp(input, *, out=None) -> (Tensor mantissa, Tensor exponent) + + Decomposes :attr:`input` into mantissa and exponent tensors + such that :math:`\text{input} = \text{mantissa} \times 2^{\text{exponent}}`. + + The range of mantissa is the open interval (-1, 1). + + Supports float inputs. + + Args: + input (Tensor): the input tensor + + + Keyword args: + out (tuple, optional): the output tensors + + Example:: + + >>> x = torch.arange(9.) + >>> mantissa, exponent = torch.frexp(x) + >>> mantissa + tensor([0.0000, 0.5000, 0.5000, 0.7500, 0.5000, 0.6250, 0.7500, 0.8750, 0.5000]) + >>> exponent + tensor([0, 1, 2, 2, 3, 3, 3, 3, 4], dtype=torch.int32) + >>> torch.ldexp(mantissa, exponent) + tensor([0., 1., 2., 3., 4., 5., 6., 7., 8.]) + """ + ... +def frobenius_norm(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +def from_file(filename: str, shared: Optional[_bool] = None, size: Optional[_int] = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + from_file(filename, shared=None, size=0, *, dtype=None, layout=None, device=None, pin_memory=False) + + Creates a CPU tensor with a storage backed by a memory-mapped file. + + If ``shared`` is True, then memory is shared between processes. All changes are written to the file. + If ``shared`` is False, then changes to the tensor do not affect the file. + + ``size`` is the number of elements in the Tensor. If ``shared`` is ``False``, then the file must contain + at least ``size * sizeof(dtype)`` bytes. If ``shared`` is ``True`` the file will be created if needed. + + .. note:: + Only CPU tensors can be mapped to files. + + .. note:: + For now, tensors with storages backed by a memory-mapped file cannot be created in pinned memory. + + + Args: + filename (str): file name to map + shared (bool): whether to share memory (whether ``MAP_SHARED`` or ``MAP_PRIVATE`` is passed to the + underlying `mmap(2) call `_) + size (int): number of elements in the tensor + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + >>> t = torch.randn(2, 5, dtype=torch.float64) + >>> t.numpy().tofile('storage.pt') + >>> t_mapped = torch.from_file('storage.pt', shared=False, size=10, dtype=torch.float64) + """ + ... +def from_numpy(ndarray) -> Tensor: + r""" + from_numpy(ndarray) -> Tensor + + Creates a :class:`Tensor` from a :class:`numpy.ndarray`. + + The returned tensor and :attr:`ndarray` share the same memory. Modifications to + the tensor will be reflected in the :attr:`ndarray` and vice versa. The returned + tensor is not resizable. + + It currently accepts :attr:`ndarray` with dtypes of ``numpy.float64``, + ``numpy.float32``, ``numpy.float16``, ``numpy.complex64``, ``numpy.complex128``, + ``numpy.int64``, ``numpy.int32``, ``numpy.int16``, ``numpy.int8``, ``numpy.uint8``, + and ``bool``. + + .. warning:: + Writing to a tensor created from a read-only NumPy array is not supported and will result in undefined behavior. + + Example:: + + >>> a = numpy.array([1, 2, 3]) + >>> t = torch.from_numpy(a) + >>> t + tensor([ 1, 2, 3]) + >>> t[0] = -1 + >>> a + array([-1, 2, 3]) + """ + ... +def frombuffer(buffer: Any, *, dtype: _dtype, count: int = -1, offset: int = 0, requires_grad: _bool = False) -> Tensor: + r""" + frombuffer(buffer, *, dtype, count=-1, offset=0, requires_grad=False) -> Tensor + + Creates a 1-dimensional :class:`Tensor` from an object that implements + the Python buffer protocol. + + Skips the first :attr:`offset` bytes in the buffer, and interprets the rest of + the raw bytes as a 1-dimensional tensor of type :attr:`dtype` with :attr:`count` + elements. + + Note that either of the following must be true: + + 1. :attr:`count` is a positive non-zero number, and the total number of bytes + in the buffer is more than :attr:`offset` plus :attr:`count` times the size + (in bytes) of :attr:`dtype`. + + 2. :attr:`count` is negative, and the length (number of bytes) of the buffer + subtracted by the :attr:`offset` is a multiple of the size (in bytes) of + :attr:`dtype`. + + The returned tensor and buffer share the same memory. Modifications to + the tensor will be reflected in the buffer and vice versa. The returned + tensor is not resizable. + + .. note:: + This function increments the reference count for the object that + owns the shared memory. Therefore, such memory will not be deallocated + before the returned tensor goes out of scope. + + .. warning:: + This function's behavior is undefined when passed an object implementing + the buffer protocol whose data is not on the CPU. Doing so is likely to + cause a segmentation fault. + + .. warning:: + This function does not try to infer the :attr:`dtype` (hence, it is not + optional). Passing a different :attr:`dtype` than its source may result + in unexpected behavior. + + Args: + buffer (object): a Python object that exposes the buffer interface. + + Keyword args: + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + count (int, optional): the number of desired elements to be read. + If negative, all the elements (until the end of the buffer) will be + read. Default: -1. + offset (int, optional): the number of bytes to skip at the start of + the buffer. Default: 0. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> import array + >>> a = array.array('i', [1, 2, 3]) + >>> t = torch.frombuffer(a, dtype=torch.int32) + >>> t + tensor([ 1, 2, 3]) + >>> t[0] = -1 + >>> a + array([-1, 2, 3]) + + >>> # Interprets the signed char bytes as 32-bit integers. + >>> # Each 4 signed char elements will be interpreted as + >>> # 1 signed 32-bit integer. + >>> import array + >>> a = array.array('b', [-1, 0, 0, 0]) + >>> torch.frombuffer(a, dtype=torch.int32) + tensor([255], dtype=torch.int32) + """ + ... +@overload +def full(size: _size, fill_value: Union[Number, _complex], *, out: Optional[Tensor] = None, layout: _layout = strided, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The + tensor's dtype is inferred from :attr:`fill_value`. + + Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + fill_value (Scalar): the value to fill the output tensor with. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.full((2, 3), 3.141592) + tensor([[ 3.1416, 3.1416, 3.1416], + [ 3.1416, 3.1416, 3.1416]]) + """ + ... +@overload +def full(size: _size, fill_value: Union[Number, _complex], *, names: List[Union[str, None]], layout: _layout = strided, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The + tensor's dtype is inferred from :attr:`fill_value`. + + Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + fill_value (Scalar): the value to fill the output tensor with. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.full((2, 3), 3.141592) + tensor([[ 3.1416, 3.1416, 3.1416], + [ 3.1416, 3.1416, 3.1416]]) + """ + ... +@overload +def full(size: Sequence[Union[_int, SymInt]], fill_value: Union[Number, _complex], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The + tensor's dtype is inferred from :attr:`fill_value`. + + Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + fill_value (Scalar): the value to fill the output tensor with. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.full((2, 3), 3.141592) + tensor([[ 3.1416, 3.1416, 3.1416], + [ 3.1416, 3.1416, 3.1416]]) + """ + ... +@overload +def full(size: _size, fill_value: Union[Number, _complex], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The + tensor's dtype is inferred from :attr:`fill_value`. + + Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + fill_value (Scalar): the value to fill the output tensor with. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.full((2, 3), 3.141592) + tensor([[ 3.1416, 3.1416, 3.1416], + [ 3.1416, 3.1416, 3.1416]]) + """ + ... +def full_like(input: Tensor, fill_value: Union[Number, _complex], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + full_like(input, fill_value, \*, dtype=None, layout=torch.strided, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + + Returns a tensor with the same size as :attr:`input` filled with :attr:`fill_value`. + ``torch.full_like(input, fill_value)`` is equivalent to + ``torch.full(input.size(), fill_value, dtype=input.dtype, layout=input.layout, device=input.device)``. + + Args: + input (Tensor): the size of :attr:`input` will determine size of the output tensor. + fill_value: the number to fill the output tensor with. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. + Default: if ``None``, defaults to the dtype of :attr:`input`. + layout (:class:`torch.layout`, optional): the desired layout of returned tensor. + Default: if ``None``, defaults to the layout of :attr:`input`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, defaults to the device of :attr:`input`. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... +def fused_moving_avg_obs_fake_quant(input: Tensor, observer_on: Tensor, fake_quant_on: Tensor, running_min: Tensor, running_max: Tensor, scale: Tensor, zero_point: Tensor, averaging_const: _float, quant_min: _int, quant_max: _int, ch_axis: _int, per_row_fake_quant: _bool = False, symmetric_quant: _bool = False) -> Tensor: ... +@overload +def gather(input: Tensor, dim: _int, index: Tensor, *, sparse_grad: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + gather(input, dim, index, *, sparse_grad=False, out=None) -> Tensor + + Gathers values along an axis specified by `dim`. + + For a 3-D tensor the output is specified by:: + + out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0 + out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1 + out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2 + + :attr:`input` and :attr:`index` must have the same number of dimensions. + It is also required that ``index.size(d) <= input.size(d)`` for all + dimensions ``d != dim``. :attr:`out` will have the same shape as :attr:`index`. + Note that ``input`` and ``index`` do not broadcast against each other. + + Args: + input (Tensor): the source tensor + dim (int): the axis along which to index + index (LongTensor): the indices of elements to gather + + Keyword arguments: + sparse_grad (bool, optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor. + out (Tensor, optional): the destination tensor + + Example:: + + >>> t = torch.tensor([[1, 2], [3, 4]]) + >>> torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]])) + tensor([[ 1, 1], + [ 4, 3]]) + """ + ... +@overload +def gather(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, *, sparse_grad: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + gather(input, dim, index, *, sparse_grad=False, out=None) -> Tensor + + Gathers values along an axis specified by `dim`. + + For a 3-D tensor the output is specified by:: + + out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0 + out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1 + out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2 + + :attr:`input` and :attr:`index` must have the same number of dimensions. + It is also required that ``index.size(d) <= input.size(d)`` for all + dimensions ``d != dim``. :attr:`out` will have the same shape as :attr:`index`. + Note that ``input`` and ``index`` do not broadcast against each other. + + Args: + input (Tensor): the source tensor + dim (int): the axis along which to index + index (LongTensor): the indices of elements to gather + + Keyword arguments: + sparse_grad (bool, optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor. + out (Tensor, optional): the destination tensor + + Example:: + + >>> t = torch.tensor([[1, 2], [3, 4]]) + >>> torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]])) + tensor([[ 1, 1], + [ 4, 3]]) + """ + ... +def gcd(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + gcd(input, other, *, out=None) -> Tensor + + Computes the element-wise greatest common divisor (GCD) of :attr:`input` and :attr:`other`. + + Both :attr:`input` and :attr:`other` must have integer types. + + .. note:: + This defines :math:`gcd(0, 0) = 0`. + + Args: + input (Tensor): the input tensor. + other (Tensor): the second input tensor + + Keyword arguments: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([5, 10, 15]) + >>> b = torch.tensor([3, 4, 5]) + >>> torch.gcd(a, b) + tensor([1, 2, 5]) + >>> c = torch.tensor([3]) + >>> torch.gcd(a, c) + tensor([1, 1, 3]) + """ + ... +def gcd_(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def ge(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + ge(input, other, *, out=None) -> Tensor + + Computes :math:`\text{input} \geq \text{other}` element-wise. + + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is greater than or equal to :attr:`other` and False elsewhere + + Example:: + + >>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[True, True], [False, True]]) + """ + ... +@overload +def ge(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + ge(input, other, *, out=None) -> Tensor + + Computes :math:`\text{input} \geq \text{other}` element-wise. + + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is greater than or equal to :attr:`other` and False elsewhere + + Example:: + + >>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[True, True], [False, True]]) + """ + ... +def geqrf(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.geqrf: + r""" + geqrf(input, *, out=None) -> (Tensor, Tensor) + + This is a low-level function for calling LAPACK's geqrf directly. This function + returns a namedtuple (a, tau) as defined in `LAPACK documentation for geqrf`_ . + + Computes a QR decomposition of :attr:`input`. + Both `Q` and `R` matrices are stored in the same output tensor `a`. + The elements of `R` are stored on and above the diagonal. + Elementary reflectors (or Householder vectors) implicitly defining matrix `Q` + are stored below the diagonal. + The results of this function can be used together with :func:`torch.linalg.householder_product` + to obtain the `Q` matrix or + with :func:`torch.ormqr`, which uses an implicit representation of the `Q` matrix, + for an efficient matrix-matrix multiplication. + + See `LAPACK documentation for geqrf`_ for further details. + + .. note:: + See also :func:`torch.linalg.qr`, which computes Q and R matrices, and :func:`torch.linalg.lstsq` + with the ``driver="gels"`` option for a function that can solve matrix equations using a QR decomposition. + + Args: + input (Tensor): the input matrix + + Keyword args: + out (tuple, optional): the output tuple of (Tensor, Tensor). Ignored if `None`. Default: `None`. + + .. _LAPACK documentation for geqrf: + http://www.netlib.org/lapack/explore-html/df/dc5/group__variants_g_ecomputational_ga3766ea903391b5cf9008132f7440ec7b.html + """ + ... +def ger(input: Tensor, vec2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + ger(input, vec2, *, out=None) -> Tensor + + Alias of :func:`torch.outer`. + + .. warning:: + This function is deprecated and will be removed in a future PyTorch release. + Use :func:`torch.outer` instead. + """ + ... +def get_default_dtype() -> _dtype: + r""" + get_default_dtype() -> torch.dtype + + Get the current default floating point :class:`torch.dtype`. + + Example:: + + >>> torch.get_default_dtype() # initial default for floating point is torch.float32 + torch.float32 + >>> torch.set_default_dtype(torch.float64) + >>> torch.get_default_dtype() # default is now changed to torch.float64 + torch.float64 + """ + ... +def get_num_interop_threads() -> _int: + r""" + get_num_interop_threads() -> int + + Returns the number of threads used for inter-op parallelism on CPU + (e.g. in JIT interpreter) + """ + ... +def get_num_threads() -> _int: + r""" + get_num_threads() -> int + + Returns the number of threads used for parallelizing CPU operations + """ + ... +@overload +def gradient(input: Tensor, *, spacing: Optional[Union[Number, _complex]] = None, dim: Optional[_int] = None, edge_order: _int = 1) -> Tuple[Tensor, ...]: + r""" + gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors + + Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in + one or more dimensions using the `second-order accurate central differences method + `_ and + either first or second order estimates at the boundaries. + + The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not + specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates + to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional + :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and + :math:`g(1, 2, 3)\ == input[1, 2, 3]`. + + When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates. + This is detailed in the "Keyword Arguments" section below. + + The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is + accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be + improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative + is estimated using `Taylor’s theorem with remainder `_. + Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring + it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using: + + .. math:: + \begin{aligned} + f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\ + f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\ + \end{aligned} + + Using the fact that :math:`f \in C^3` and solving the linear system, we derive: + + .. math:: + f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l) + + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} } + + .. note:: + We estimate the gradient of functions in complex domain + :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way. + + The value of each partial derivative at the boundary points is computed differently. See edge_order below. + + Args: + input (``Tensor``): the tensor that represents the values of the function + + Keyword args: + spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify + how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then + the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the + indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding + indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9). + Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for + the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then + the coordinates are (t0[1], t1[2], t2[3]) + + dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default + the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of + the :attr:`spacing` argument must correspond with the specified dims." + + edge_order (``int``, optional): 1 or 2, for `first-order + `_ or + `second-order `_ + estimation of the boundary ("edge") values, respectively. + + Examples:: + + >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4] + >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),) + >>> values = torch.tensor([4., 1., 1., 16.], ) + >>> torch.gradient(values, spacing = coordinates) + (tensor([-3., -2., 2., 5.]),) + + >>> # Estimates the gradient of the R^2 -> R function whose samples are + >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost + >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates + >>> # partial derivative for both dimensions. + >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]]) + >>> torch.gradient(t) + (tensor([[ 9., 18., 36., 72.], + [ 9., 18., 36., 72.]]), + tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]])) + + >>> # A scalar value for spacing modifies the relationship between tensor indices + >>> # and input coordinates by multiplying the indices to find the + >>> # coordinates. For example, below the indices of the innermost + >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of + >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.5000, 0.7500, 1.5000, 2.0000], + [ 5.0000, 7.5000, 15.0000, 20.0000]])) + >>> # doubling the spacing between samples halves the estimated partial gradients. + + >>> + >>> # Estimates only the partial derivative for dimension 1 + >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.) + (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]]),) + + >>> # When spacing is a list of scalars, the relationship between the tensor + >>> # indices and input coordinates changes based on dimension. + >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate + >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension + >>> # 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = [3., 2.]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + + >>> # The following example is a replication of the previous one with explicit + >>> # coordinates. + >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9])) + >>> torch.gradient(t, spacing = coords) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + """ + ... +@overload +def gradient(input: Tensor, *, spacing: Sequence[Union[Number, _complex]], dim: Optional[_int] = None, edge_order: _int = 1) -> Tuple[Tensor, ...]: + r""" + gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors + + Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in + one or more dimensions using the `second-order accurate central differences method + `_ and + either first or second order estimates at the boundaries. + + The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not + specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates + to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional + :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and + :math:`g(1, 2, 3)\ == input[1, 2, 3]`. + + When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates. + This is detailed in the "Keyword Arguments" section below. + + The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is + accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be + improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative + is estimated using `Taylor’s theorem with remainder `_. + Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring + it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using: + + .. math:: + \begin{aligned} + f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\ + f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\ + \end{aligned} + + Using the fact that :math:`f \in C^3` and solving the linear system, we derive: + + .. math:: + f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l) + + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} } + + .. note:: + We estimate the gradient of functions in complex domain + :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way. + + The value of each partial derivative at the boundary points is computed differently. See edge_order below. + + Args: + input (``Tensor``): the tensor that represents the values of the function + + Keyword args: + spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify + how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then + the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the + indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding + indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9). + Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for + the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then + the coordinates are (t0[1], t1[2], t2[3]) + + dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default + the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of + the :attr:`spacing` argument must correspond with the specified dims." + + edge_order (``int``, optional): 1 or 2, for `first-order + `_ or + `second-order `_ + estimation of the boundary ("edge") values, respectively. + + Examples:: + + >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4] + >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),) + >>> values = torch.tensor([4., 1., 1., 16.], ) + >>> torch.gradient(values, spacing = coordinates) + (tensor([-3., -2., 2., 5.]),) + + >>> # Estimates the gradient of the R^2 -> R function whose samples are + >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost + >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates + >>> # partial derivative for both dimensions. + >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]]) + >>> torch.gradient(t) + (tensor([[ 9., 18., 36., 72.], + [ 9., 18., 36., 72.]]), + tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]])) + + >>> # A scalar value for spacing modifies the relationship between tensor indices + >>> # and input coordinates by multiplying the indices to find the + >>> # coordinates. For example, below the indices of the innermost + >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of + >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.5000, 0.7500, 1.5000, 2.0000], + [ 5.0000, 7.5000, 15.0000, 20.0000]])) + >>> # doubling the spacing between samples halves the estimated partial gradients. + + >>> + >>> # Estimates only the partial derivative for dimension 1 + >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.) + (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]]),) + + >>> # When spacing is a list of scalars, the relationship between the tensor + >>> # indices and input coordinates changes based on dimension. + >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate + >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension + >>> # 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = [3., 2.]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + + >>> # The following example is a replication of the previous one with explicit + >>> # coordinates. + >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9])) + >>> torch.gradient(t, spacing = coords) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + """ + ... +@overload +def gradient(input: Tensor, *, spacing: Sequence[Union[Number, _complex]], dim: _size, edge_order: _int = 1) -> Tuple[Tensor, ...]: + r""" + gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors + + Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in + one or more dimensions using the `second-order accurate central differences method + `_ and + either first or second order estimates at the boundaries. + + The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not + specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates + to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional + :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and + :math:`g(1, 2, 3)\ == input[1, 2, 3]`. + + When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates. + This is detailed in the "Keyword Arguments" section below. + + The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is + accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be + improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative + is estimated using `Taylor’s theorem with remainder `_. + Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring + it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using: + + .. math:: + \begin{aligned} + f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\ + f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\ + \end{aligned} + + Using the fact that :math:`f \in C^3` and solving the linear system, we derive: + + .. math:: + f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l) + + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} } + + .. note:: + We estimate the gradient of functions in complex domain + :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way. + + The value of each partial derivative at the boundary points is computed differently. See edge_order below. + + Args: + input (``Tensor``): the tensor that represents the values of the function + + Keyword args: + spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify + how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then + the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the + indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding + indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9). + Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for + the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then + the coordinates are (t0[1], t1[2], t2[3]) + + dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default + the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of + the :attr:`spacing` argument must correspond with the specified dims." + + edge_order (``int``, optional): 1 or 2, for `first-order + `_ or + `second-order `_ + estimation of the boundary ("edge") values, respectively. + + Examples:: + + >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4] + >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),) + >>> values = torch.tensor([4., 1., 1., 16.], ) + >>> torch.gradient(values, spacing = coordinates) + (tensor([-3., -2., 2., 5.]),) + + >>> # Estimates the gradient of the R^2 -> R function whose samples are + >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost + >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates + >>> # partial derivative for both dimensions. + >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]]) + >>> torch.gradient(t) + (tensor([[ 9., 18., 36., 72.], + [ 9., 18., 36., 72.]]), + tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]])) + + >>> # A scalar value for spacing modifies the relationship between tensor indices + >>> # and input coordinates by multiplying the indices to find the + >>> # coordinates. For example, below the indices of the innermost + >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of + >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.5000, 0.7500, 1.5000, 2.0000], + [ 5.0000, 7.5000, 15.0000, 20.0000]])) + >>> # doubling the spacing between samples halves the estimated partial gradients. + + >>> + >>> # Estimates only the partial derivative for dimension 1 + >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.) + (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]]),) + + >>> # When spacing is a list of scalars, the relationship between the tensor + >>> # indices and input coordinates changes based on dimension. + >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate + >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension + >>> # 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = [3., 2.]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + + >>> # The following example is a replication of the previous one with explicit + >>> # coordinates. + >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9])) + >>> torch.gradient(t, spacing = coords) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + """ + ... +@overload +def gradient(input: Tensor, *, spacing: Union[Tuple[Tensor, ...], List[Tensor]], dim: Optional[_int] = None, edge_order: _int = 1) -> Tuple[Tensor, ...]: + r""" + gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors + + Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in + one or more dimensions using the `second-order accurate central differences method + `_ and + either first or second order estimates at the boundaries. + + The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not + specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates + to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional + :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and + :math:`g(1, 2, 3)\ == input[1, 2, 3]`. + + When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates. + This is detailed in the "Keyword Arguments" section below. + + The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is + accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be + improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative + is estimated using `Taylor’s theorem with remainder `_. + Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring + it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using: + + .. math:: + \begin{aligned} + f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\ + f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\ + \end{aligned} + + Using the fact that :math:`f \in C^3` and solving the linear system, we derive: + + .. math:: + f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l) + + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} } + + .. note:: + We estimate the gradient of functions in complex domain + :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way. + + The value of each partial derivative at the boundary points is computed differently. See edge_order below. + + Args: + input (``Tensor``): the tensor that represents the values of the function + + Keyword args: + spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify + how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then + the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the + indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding + indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9). + Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for + the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then + the coordinates are (t0[1], t1[2], t2[3]) + + dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default + the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of + the :attr:`spacing` argument must correspond with the specified dims." + + edge_order (``int``, optional): 1 or 2, for `first-order + `_ or + `second-order `_ + estimation of the boundary ("edge") values, respectively. + + Examples:: + + >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4] + >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),) + >>> values = torch.tensor([4., 1., 1., 16.], ) + >>> torch.gradient(values, spacing = coordinates) + (tensor([-3., -2., 2., 5.]),) + + >>> # Estimates the gradient of the R^2 -> R function whose samples are + >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost + >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates + >>> # partial derivative for both dimensions. + >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]]) + >>> torch.gradient(t) + (tensor([[ 9., 18., 36., 72.], + [ 9., 18., 36., 72.]]), + tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]])) + + >>> # A scalar value for spacing modifies the relationship between tensor indices + >>> # and input coordinates by multiplying the indices to find the + >>> # coordinates. For example, below the indices of the innermost + >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of + >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.5000, 0.7500, 1.5000, 2.0000], + [ 5.0000, 7.5000, 15.0000, 20.0000]])) + >>> # doubling the spacing between samples halves the estimated partial gradients. + + >>> + >>> # Estimates only the partial derivative for dimension 1 + >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.) + (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]]),) + + >>> # When spacing is a list of scalars, the relationship between the tensor + >>> # indices and input coordinates changes based on dimension. + >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate + >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension + >>> # 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = [3., 2.]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + + >>> # The following example is a replication of the previous one with explicit + >>> # coordinates. + >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9])) + >>> torch.gradient(t, spacing = coords) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + """ + ... +@overload +def gradient(input: Tensor, *, spacing: Union[Number, _complex], dim: _size, edge_order: _int = 1) -> Tuple[Tensor, ...]: + r""" + gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors + + Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in + one or more dimensions using the `second-order accurate central differences method + `_ and + either first or second order estimates at the boundaries. + + The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not + specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates + to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional + :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and + :math:`g(1, 2, 3)\ == input[1, 2, 3]`. + + When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates. + This is detailed in the "Keyword Arguments" section below. + + The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is + accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be + improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative + is estimated using `Taylor’s theorem with remainder `_. + Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring + it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using: + + .. math:: + \begin{aligned} + f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\ + f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\ + \end{aligned} + + Using the fact that :math:`f \in C^3` and solving the linear system, we derive: + + .. math:: + f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l) + + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} } + + .. note:: + We estimate the gradient of functions in complex domain + :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way. + + The value of each partial derivative at the boundary points is computed differently. See edge_order below. + + Args: + input (``Tensor``): the tensor that represents the values of the function + + Keyword args: + spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify + how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then + the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the + indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding + indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9). + Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for + the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then + the coordinates are (t0[1], t1[2], t2[3]) + + dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default + the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of + the :attr:`spacing` argument must correspond with the specified dims." + + edge_order (``int``, optional): 1 or 2, for `first-order + `_ or + `second-order `_ + estimation of the boundary ("edge") values, respectively. + + Examples:: + + >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4] + >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),) + >>> values = torch.tensor([4., 1., 1., 16.], ) + >>> torch.gradient(values, spacing = coordinates) + (tensor([-3., -2., 2., 5.]),) + + >>> # Estimates the gradient of the R^2 -> R function whose samples are + >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost + >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates + >>> # partial derivative for both dimensions. + >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]]) + >>> torch.gradient(t) + (tensor([[ 9., 18., 36., 72.], + [ 9., 18., 36., 72.]]), + tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]])) + + >>> # A scalar value for spacing modifies the relationship between tensor indices + >>> # and input coordinates by multiplying the indices to find the + >>> # coordinates. For example, below the indices of the innermost + >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of + >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.5000, 0.7500, 1.5000, 2.0000], + [ 5.0000, 7.5000, 15.0000, 20.0000]])) + >>> # doubling the spacing between samples halves the estimated partial gradients. + + >>> + >>> # Estimates only the partial derivative for dimension 1 + >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.) + (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]]),) + + >>> # When spacing is a list of scalars, the relationship between the tensor + >>> # indices and input coordinates changes based on dimension. + >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate + >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension + >>> # 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = [3., 2.]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + + >>> # The following example is a replication of the previous one with explicit + >>> # coordinates. + >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9])) + >>> torch.gradient(t, spacing = coords) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + """ + ... +@overload +def gradient(input: Tensor, *, spacing: Union[Tuple[Tensor, ...], List[Tensor]], dim: _size, edge_order: _int = 1) -> Tuple[Tensor, ...]: + r""" + gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors + + Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in + one or more dimensions using the `second-order accurate central differences method + `_ and + either first or second order estimates at the boundaries. + + The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not + specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates + to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional + :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and + :math:`g(1, 2, 3)\ == input[1, 2, 3]`. + + When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates. + This is detailed in the "Keyword Arguments" section below. + + The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is + accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be + improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative + is estimated using `Taylor’s theorem with remainder `_. + Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring + it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using: + + .. math:: + \begin{aligned} + f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\ + f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\ + \end{aligned} + + Using the fact that :math:`f \in C^3` and solving the linear system, we derive: + + .. math:: + f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l) + + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} } + + .. note:: + We estimate the gradient of functions in complex domain + :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way. + + The value of each partial derivative at the boundary points is computed differently. See edge_order below. + + Args: + input (``Tensor``): the tensor that represents the values of the function + + Keyword args: + spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify + how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then + the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the + indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding + indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9). + Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for + the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then + the coordinates are (t0[1], t1[2], t2[3]) + + dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default + the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of + the :attr:`spacing` argument must correspond with the specified dims." + + edge_order (``int``, optional): 1 or 2, for `first-order + `_ or + `second-order `_ + estimation of the boundary ("edge") values, respectively. + + Examples:: + + >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4] + >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),) + >>> values = torch.tensor([4., 1., 1., 16.], ) + >>> torch.gradient(values, spacing = coordinates) + (tensor([-3., -2., 2., 5.]),) + + >>> # Estimates the gradient of the R^2 -> R function whose samples are + >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost + >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates + >>> # partial derivative for both dimensions. + >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]]) + >>> torch.gradient(t) + (tensor([[ 9., 18., 36., 72.], + [ 9., 18., 36., 72.]]), + tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]])) + + >>> # A scalar value for spacing modifies the relationship between tensor indices + >>> # and input coordinates by multiplying the indices to find the + >>> # coordinates. For example, below the indices of the innermost + >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of + >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.5000, 0.7500, 1.5000, 2.0000], + [ 5.0000, 7.5000, 15.0000, 20.0000]])) + >>> # doubling the spacing between samples halves the estimated partial gradients. + + >>> + >>> # Estimates only the partial derivative for dimension 1 + >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.) + (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]]),) + + >>> # When spacing is a list of scalars, the relationship between the tensor + >>> # indices and input coordinates changes based on dimension. + >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate + >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension + >>> # 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = [3., 2.]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + + >>> # The following example is a replication of the previous one with explicit + >>> # coordinates. + >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9])) + >>> torch.gradient(t, spacing = coords) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + """ + ... +@overload +def gradient(input: Tensor, *, dim: _size, edge_order: _int = 1) -> Tuple[Tensor, ...]: + r""" + gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors + + Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in + one or more dimensions using the `second-order accurate central differences method + `_ and + either first or second order estimates at the boundaries. + + The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not + specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates + to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional + :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and + :math:`g(1, 2, 3)\ == input[1, 2, 3]`. + + When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates. + This is detailed in the "Keyword Arguments" section below. + + The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is + accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be + improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative + is estimated using `Taylor’s theorem with remainder `_. + Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring + it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using: + + .. math:: + \begin{aligned} + f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\ + f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\ + \end{aligned} + + Using the fact that :math:`f \in C^3` and solving the linear system, we derive: + + .. math:: + f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l) + + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} } + + .. note:: + We estimate the gradient of functions in complex domain + :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way. + + The value of each partial derivative at the boundary points is computed differently. See edge_order below. + + Args: + input (``Tensor``): the tensor that represents the values of the function + + Keyword args: + spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify + how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then + the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the + indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding + indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9). + Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for + the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then + the coordinates are (t0[1], t1[2], t2[3]) + + dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default + the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of + the :attr:`spacing` argument must correspond with the specified dims." + + edge_order (``int``, optional): 1 or 2, for `first-order + `_ or + `second-order `_ + estimation of the boundary ("edge") values, respectively. + + Examples:: + + >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4] + >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),) + >>> values = torch.tensor([4., 1., 1., 16.], ) + >>> torch.gradient(values, spacing = coordinates) + (tensor([-3., -2., 2., 5.]),) + + >>> # Estimates the gradient of the R^2 -> R function whose samples are + >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost + >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates + >>> # partial derivative for both dimensions. + >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]]) + >>> torch.gradient(t) + (tensor([[ 9., 18., 36., 72.], + [ 9., 18., 36., 72.]]), + tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]])) + + >>> # A scalar value for spacing modifies the relationship between tensor indices + >>> # and input coordinates by multiplying the indices to find the + >>> # coordinates. For example, below the indices of the innermost + >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of + >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.5000, 0.7500, 1.5000, 2.0000], + [ 5.0000, 7.5000, 15.0000, 20.0000]])) + >>> # doubling the spacing between samples halves the estimated partial gradients. + + >>> + >>> # Estimates only the partial derivative for dimension 1 + >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.) + (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]]),) + + >>> # When spacing is a list of scalars, the relationship between the tensor + >>> # indices and input coordinates changes based on dimension. + >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate + >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension + >>> # 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = [3., 2.]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + + >>> # The following example is a replication of the previous one with explicit + >>> # coordinates. + >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9])) + >>> torch.gradient(t, spacing = coords) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + """ + ... +@overload +def greater(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + greater(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.gt`. + """ + ... +@overload +def greater(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + greater(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.gt`. + """ + ... +@overload +def greater_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + greater_equal(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.ge`. + """ + ... +@overload +def greater_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + greater_equal(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.ge`. + """ + ... +def grid_sampler(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ... +def grid_sampler_2d(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ... +def grid_sampler_3d(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ... +def group_norm(input: Tensor, num_groups: _int, weight: Optional[Tensor] = None, bias: Optional[Tensor] = None, eps: _float = 1e-05, cudnn_enabled: _bool = True) -> Tensor: ... +@overload +def gru(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ... +@overload +def gru(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ... +def gru_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ... +@overload +def gt(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + gt(input, other, *, out=None) -> Tensor + + Computes :math:`\text{input} > \text{other}` element-wise. + + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is greater than :attr:`other` and False elsewhere + + Example:: + + >>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[False, True], [False, False]]) + """ + ... +@overload +def gt(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + gt(input, other, *, out=None) -> Tensor + + Computes :math:`\text{input} > \text{other}` element-wise. + + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is greater than :attr:`other` and False elsewhere + + Example:: + + >>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[False, True], [False, False]]) + """ + ... +@overload +def hamming_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Hamming window function. + + .. math:: + w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right), + + where :math:`N` is the full window size. + + The input :attr:`window_length` is a positive integer controlling the + returned window size. :attr:`periodic` flag determines whether the returned + window trims off the last duplicate value from the symmetric window and is + ready to be used as a periodic window with functions like + :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in + above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have + ``torch.hamming_window(L, periodic=True)`` equal to + ``torch.hamming_window(L + 1, periodic=False)[:-1])``. + + .. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + + .. note:: + This is a generalized version of :meth:`torch.hann_window`. + + Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + alpha (float, optional): The coefficient :math:`\alpha` in the equation above + beta (float, optional): The coefficient :math:`\beta` in the equation above + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window. + """ + ... +@overload +def hamming_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Hamming window function. + + .. math:: + w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right), + + where :math:`N` is the full window size. + + The input :attr:`window_length` is a positive integer controlling the + returned window size. :attr:`periodic` flag determines whether the returned + window trims off the last duplicate value from the symmetric window and is + ready to be used as a periodic window with functions like + :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in + above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have + ``torch.hamming_window(L, periodic=True)`` equal to + ``torch.hamming_window(L + 1, periodic=False)[:-1])``. + + .. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + + .. note:: + This is a generalized version of :meth:`torch.hann_window`. + + Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + alpha (float, optional): The coefficient :math:`\alpha` in the equation above + beta (float, optional): The coefficient :math:`\beta` in the equation above + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window. + """ + ... +@overload +def hamming_window(window_length: _int, periodic: _bool, alpha: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Hamming window function. + + .. math:: + w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right), + + where :math:`N` is the full window size. + + The input :attr:`window_length` is a positive integer controlling the + returned window size. :attr:`periodic` flag determines whether the returned + window trims off the last duplicate value from the symmetric window and is + ready to be used as a periodic window with functions like + :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in + above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have + ``torch.hamming_window(L, periodic=True)`` equal to + ``torch.hamming_window(L + 1, periodic=False)[:-1])``. + + .. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + + .. note:: + This is a generalized version of :meth:`torch.hann_window`. + + Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + alpha (float, optional): The coefficient :math:`\alpha` in the equation above + beta (float, optional): The coefficient :math:`\beta` in the equation above + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window. + """ + ... +@overload +def hamming_window(window_length: _int, periodic: _bool, alpha: _float, beta: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Hamming window function. + + .. math:: + w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right), + + where :math:`N` is the full window size. + + The input :attr:`window_length` is a positive integer controlling the + returned window size. :attr:`periodic` flag determines whether the returned + window trims off the last duplicate value from the symmetric window and is + ready to be used as a periodic window with functions like + :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in + above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have + ``torch.hamming_window(L, periodic=True)`` equal to + ``torch.hamming_window(L + 1, periodic=False)[:-1])``. + + .. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + + .. note:: + This is a generalized version of :meth:`torch.hann_window`. + + Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + alpha (float, optional): The coefficient :math:`\alpha` in the equation above + beta (float, optional): The coefficient :math:`\beta` in the equation above + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window. + """ + ... +@overload +def hann_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + hann_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Hann window function. + + .. math:: + w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] = + \sin^2 \left( \frac{\pi n}{N - 1} \right), + + where :math:`N` is the full window size. + + The input :attr:`window_length` is a positive integer controlling the + returned window size. :attr:`periodic` flag determines whether the returned + window trims off the last duplicate value from the symmetric window and is + ready to be used as a periodic window with functions like + :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in + above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have + ``torch.hann_window(L, periodic=True)`` equal to + ``torch.hann_window(L + 1, periodic=False)[:-1])``. + + .. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + + Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window + """ + ... +@overload +def hann_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + hann_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Hann window function. + + .. math:: + w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] = + \sin^2 \left( \frac{\pi n}{N - 1} \right), + + where :math:`N` is the full window size. + + The input :attr:`window_length` is a positive integer controlling the + returned window size. :attr:`periodic` flag determines whether the returned + window trims off the last duplicate value from the symmetric window and is + ready to be used as a periodic window with functions like + :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in + above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have + ``torch.hann_window(L, periodic=True)`` equal to + ``torch.hann_window(L + 1, periodic=False)[:-1])``. + + .. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + + Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window + """ + ... +def hardshrink(input: Tensor, lambd: Union[Number, _complex] = 0.5, *, out: Optional[Tensor] = None) -> Tensor: ... +def heaviside(input: Tensor, values: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + heaviside(input, values, *, out=None) -> Tensor + + Computes the Heaviside step function for each element in :attr:`input`. + The Heaviside step function is defined as: + + .. math:: + \text{{heaviside}}(input, values) = \begin{cases} + 0, & \text{if input < 0}\\ + values, & \text{if input == 0}\\ + 1, & \text{if input > 0} + \end{cases} + + + Args: + input (Tensor): the input tensor. + values (Tensor): The values to use where :attr:`input` is zero. + + Keyword arguments: + out (Tensor, optional): the output tensor. + + Example:: + + >>> input = torch.tensor([-1.5, 0, 2.0]) + >>> values = torch.tensor([0.5]) + >>> torch.heaviside(input, values) + tensor([0.0000, 0.5000, 1.0000]) + >>> values = torch.tensor([1.2, -2.0, 3.5]) + >>> torch.heaviside(input, values) + tensor([0., -2., 1.]) + """ + ... +def hinge_embedding_loss(input: Tensor, target: Tensor, margin: _float = 1.0, reduction: _int = 1) -> Tensor: ... +def histc(input: Tensor, bins: _int = 100, min: Union[Number, _complex] = 0, max: Union[Number, _complex] = 0, *, out: Optional[Tensor] = None) -> Tensor: + r""" + histc(input, bins=100, min=0, max=0, *, out=None) -> Tensor + + Computes the histogram of a tensor. + + The elements are sorted into equal width bins between :attr:`min` and + :attr:`max`. If :attr:`min` and :attr:`max` are both zero, the minimum and + maximum values of the data are used. + + Elements lower than min and higher than max and ``NaN`` elements are ignored. + + Args: + input (Tensor): the input tensor. + bins (int): number of histogram bins + min (Scalar): lower end of the range (inclusive) + max (Scalar): upper end of the range (inclusive) + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + Tensor: Histogram represented as a tensor + + Example:: + + >>> torch.histc(torch.tensor([1., 2, 1]), bins=4, min=0, max=3) + tensor([ 0., 2., 1., 0.]) + """ + ... +@overload +def histogram(input: Tensor, bins: Tensor, *, weight: Optional[Tensor] = None, density: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.histogram: + r""" + histogram(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor) + + Computes a histogram of the values in a tensor. + + :attr:`bins` can be an integer or a 1D tensor. + + If :attr:`bins` is an int, it specifies the number of equal-width bins. + By default, the lower and upper range of the bins is determined by the + minimum and maximum elements of the input tensor. The :attr:`range` + argument can be provided to specify a range for the bins. + + If :attr:`bins` is a 1D tensor, it specifies the sequence of bin edges + including the rightmost edge. It should contain at least 2 elements + and its elements should be increasing. + + Args: + input (Tensor): the input tensor. + bins: int or 1D Tensor. If int, defines the number of equal-width bins. If tensor, + defines the sequence of bin edges including the rightmost edge. + + Keyword args: + range (tuple of float): Defines the range of the bins. + weight (Tensor): If provided, weight should have the same shape as input. Each value in + input contributes its associated weight towards its bin's result. + density (bool): If False, the result will contain the count (or total weight) in each bin. + If True, the result is the value of the probability density function over the bins, + normalized such that the integral over the range of the bins is 1. + out (Tensor, optional): the output tensor. (tuple, optional): The result tuple of two output tensors (hist, bin_edges). + + Returns: + hist (Tensor): 1D Tensor containing the values of the histogram. + bin_edges(Tensor): 1D Tensor containing the edges of the histogram bins. + + Example:: + + >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.])) + (tensor([ 0., 5., 2., 0.]), tensor([0., 0.75, 1.5, 2.25, 3.])) + >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]), density=True) + (tensor([ 0., 0.9524, 0.3810, 0.]), tensor([0., 0.75, 1.5, 2.25, 3.])) + """ + ... +@overload +def histogram(input: Tensor, bins: _int = 100, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.histogram: + r""" + histogram(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor) + + Computes a histogram of the values in a tensor. + + :attr:`bins` can be an integer or a 1D tensor. + + If :attr:`bins` is an int, it specifies the number of equal-width bins. + By default, the lower and upper range of the bins is determined by the + minimum and maximum elements of the input tensor. The :attr:`range` + argument can be provided to specify a range for the bins. + + If :attr:`bins` is a 1D tensor, it specifies the sequence of bin edges + including the rightmost edge. It should contain at least 2 elements + and its elements should be increasing. + + Args: + input (Tensor): the input tensor. + bins: int or 1D Tensor. If int, defines the number of equal-width bins. If tensor, + defines the sequence of bin edges including the rightmost edge. + + Keyword args: + range (tuple of float): Defines the range of the bins. + weight (Tensor): If provided, weight should have the same shape as input. Each value in + input contributes its associated weight towards its bin's result. + density (bool): If False, the result will contain the count (or total weight) in each bin. + If True, the result is the value of the probability density function over the bins, + normalized such that the integral over the range of the bins is 1. + out (Tensor, optional): the output tensor. (tuple, optional): The result tuple of two output tensors (hist, bin_edges). + + Returns: + hist (Tensor): 1D Tensor containing the values of the histogram. + bin_edges(Tensor): 1D Tensor containing the edges of the histogram bins. + + Example:: + + >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.])) + (tensor([ 0., 5., 2., 0.]), tensor([0., 0.75, 1.5, 2.25, 3.])) + >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]), density=True) + (tensor([ 0., 0.9524, 0.3810, 0.]), tensor([0., 0.75, 1.5, 2.25, 3.])) + """ + ... +@overload +def histogramdd(input: Tensor, bins: _int, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd: + r""" + histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[]) + + Computes a multi-dimensional histogram of the values in a tensor. + + Interprets the elements of an input tensor whose innermost dimension has size N + as a collection of N-dimensional points. Maps each of the points into a set of + N-dimensional bins and returns the number of points (or total weight) in each bin. + + :attr:`input` must be a tensor with at least 2 dimensions. + If input has shape (M, N), each of its M rows defines a point in N-dimensional space. + If input has three or more dimensions, all but the last dimension are flattened. + + Each dimension is independently associated with its own strictly increasing sequence + of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D + tensors. Alternatively, bin edges may be constructed automatically by passing a + sequence of integers specifying the number of equal-width bins in each dimension. + + For each N-dimensional point in input: + - Each of its coordinates is binned independently among the bin edges + corresponding to its dimension + - Binning results are combined to identify the N-dimensional bin (if any) + into which the point falls + - If the point falls into a bin, the bin's count (or total weight) is incremented + - Points which do not fall into any bin do not contribute to the output + + :attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int. + + If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences + of bin edges. Each 1D tensor should contain a strictly increasing sequence with at + least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying + the left and right edges of all bins. Every bin is exclusive of its left edge. Only + the rightmost bin is inclusive of its right edge. + + If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins + in each dimension. By default, the leftmost and rightmost bin edges in each dimension + are determined by the minimum and maximum elements of the input tensor in the + corresponding dimension. The :attr:`range` argument can be provided to manually + specify the leftmost and rightmost bin edges in each dimension. + + If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions. + + .. note:: + See also :func:`torch.histogram`, which specifically computes 1D histograms. + While :func:`torch.histogramdd` infers the dimensionality of its bins and + binned values from the shape of :attr:`input`, :func:`torch.histogram` + accepts and flattens :attr:`input` of any shape. + + Args: + input (Tensor): the input tensor. + bins: Tensor[], int[], or int. + If Tensor[], defines the sequences of bin edges. + If int[], defines the number of equal-width bins in each dimension. + If int, defines the number of equal-width bins for all dimensions. + Keyword args: + range (sequence of float): Defines the leftmost and rightmost bin edges + in each dimension. + weight (Tensor): By default, each value in the input has weight 1. If a weight + tensor is passed, each N-dimensional coordinate in input + contributes its associated weight towards its bin's result. + The weight tensor should have the same shape as the :attr:`input` + tensor excluding its innermost dimension N. + density (bool): If False (default), the result will contain the count (or total weight) + in each bin. If True, each count (weight) is divided by the total count + (total weight), then divided by the volume of its associated bin. + Returns: + hist (Tensor): N-dimensional Tensor containing the values of the histogram. + bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges. + + Example:: + >>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3], + ... weight=torch.tensor([1., 2., 4., 8.])) + torch.return_types.histogramdd( + hist=tensor([[0., 1., 0.], + [2., 0., 0.], + [4., 0., 8.]]), + bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]), + tensor([0.0000, 0.6667, 1.3333, 2.0000]))) + + >>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2], + ... range=[0., 1., 0., 1.], density=True) + torch.return_types.histogramdd( + hist=tensor([[2., 0.], + [0., 2.]]), + bin_edges=(tensor([0.0000, 0.5000, 1.0000]), + tensor([0.0000, 0.5000, 1.0000]))) + """ + ... +@overload +def histogramdd(input: Tensor, bins: _size, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd: + r""" + histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[]) + + Computes a multi-dimensional histogram of the values in a tensor. + + Interprets the elements of an input tensor whose innermost dimension has size N + as a collection of N-dimensional points. Maps each of the points into a set of + N-dimensional bins and returns the number of points (or total weight) in each bin. + + :attr:`input` must be a tensor with at least 2 dimensions. + If input has shape (M, N), each of its M rows defines a point in N-dimensional space. + If input has three or more dimensions, all but the last dimension are flattened. + + Each dimension is independently associated with its own strictly increasing sequence + of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D + tensors. Alternatively, bin edges may be constructed automatically by passing a + sequence of integers specifying the number of equal-width bins in each dimension. + + For each N-dimensional point in input: + - Each of its coordinates is binned independently among the bin edges + corresponding to its dimension + - Binning results are combined to identify the N-dimensional bin (if any) + into which the point falls + - If the point falls into a bin, the bin's count (or total weight) is incremented + - Points which do not fall into any bin do not contribute to the output + + :attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int. + + If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences + of bin edges. Each 1D tensor should contain a strictly increasing sequence with at + least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying + the left and right edges of all bins. Every bin is exclusive of its left edge. Only + the rightmost bin is inclusive of its right edge. + + If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins + in each dimension. By default, the leftmost and rightmost bin edges in each dimension + are determined by the minimum and maximum elements of the input tensor in the + corresponding dimension. The :attr:`range` argument can be provided to manually + specify the leftmost and rightmost bin edges in each dimension. + + If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions. + + .. note:: + See also :func:`torch.histogram`, which specifically computes 1D histograms. + While :func:`torch.histogramdd` infers the dimensionality of its bins and + binned values from the shape of :attr:`input`, :func:`torch.histogram` + accepts and flattens :attr:`input` of any shape. + + Args: + input (Tensor): the input tensor. + bins: Tensor[], int[], or int. + If Tensor[], defines the sequences of bin edges. + If int[], defines the number of equal-width bins in each dimension. + If int, defines the number of equal-width bins for all dimensions. + Keyword args: + range (sequence of float): Defines the leftmost and rightmost bin edges + in each dimension. + weight (Tensor): By default, each value in the input has weight 1. If a weight + tensor is passed, each N-dimensional coordinate in input + contributes its associated weight towards its bin's result. + The weight tensor should have the same shape as the :attr:`input` + tensor excluding its innermost dimension N. + density (bool): If False (default), the result will contain the count (or total weight) + in each bin. If True, each count (weight) is divided by the total count + (total weight), then divided by the volume of its associated bin. + Returns: + hist (Tensor): N-dimensional Tensor containing the values of the histogram. + bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges. + + Example:: + >>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3], + ... weight=torch.tensor([1., 2., 4., 8.])) + torch.return_types.histogramdd( + hist=tensor([[0., 1., 0.], + [2., 0., 0.], + [4., 0., 8.]]), + bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]), + tensor([0.0000, 0.6667, 1.3333, 2.0000]))) + + >>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2], + ... range=[0., 1., 0., 1.], density=True) + torch.return_types.histogramdd( + hist=tensor([[2., 0.], + [0., 2.]]), + bin_edges=(tensor([0.0000, 0.5000, 1.0000]), + tensor([0.0000, 0.5000, 1.0000]))) + """ + ... +@overload +def histogramdd(input: Tensor, bins: Union[Tuple[Tensor, ...], List[Tensor]], range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd: + r""" + histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[]) + + Computes a multi-dimensional histogram of the values in a tensor. + + Interprets the elements of an input tensor whose innermost dimension has size N + as a collection of N-dimensional points. Maps each of the points into a set of + N-dimensional bins and returns the number of points (or total weight) in each bin. + + :attr:`input` must be a tensor with at least 2 dimensions. + If input has shape (M, N), each of its M rows defines a point in N-dimensional space. + If input has three or more dimensions, all but the last dimension are flattened. + + Each dimension is independently associated with its own strictly increasing sequence + of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D + tensors. Alternatively, bin edges may be constructed automatically by passing a + sequence of integers specifying the number of equal-width bins in each dimension. + + For each N-dimensional point in input: + - Each of its coordinates is binned independently among the bin edges + corresponding to its dimension + - Binning results are combined to identify the N-dimensional bin (if any) + into which the point falls + - If the point falls into a bin, the bin's count (or total weight) is incremented + - Points which do not fall into any bin do not contribute to the output + + :attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int. + + If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences + of bin edges. Each 1D tensor should contain a strictly increasing sequence with at + least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying + the left and right edges of all bins. Every bin is exclusive of its left edge. Only + the rightmost bin is inclusive of its right edge. + + If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins + in each dimension. By default, the leftmost and rightmost bin edges in each dimension + are determined by the minimum and maximum elements of the input tensor in the + corresponding dimension. The :attr:`range` argument can be provided to manually + specify the leftmost and rightmost bin edges in each dimension. + + If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions. + + .. note:: + See also :func:`torch.histogram`, which specifically computes 1D histograms. + While :func:`torch.histogramdd` infers the dimensionality of its bins and + binned values from the shape of :attr:`input`, :func:`torch.histogram` + accepts and flattens :attr:`input` of any shape. + + Args: + input (Tensor): the input tensor. + bins: Tensor[], int[], or int. + If Tensor[], defines the sequences of bin edges. + If int[], defines the number of equal-width bins in each dimension. + If int, defines the number of equal-width bins for all dimensions. + Keyword args: + range (sequence of float): Defines the leftmost and rightmost bin edges + in each dimension. + weight (Tensor): By default, each value in the input has weight 1. If a weight + tensor is passed, each N-dimensional coordinate in input + contributes its associated weight towards its bin's result. + The weight tensor should have the same shape as the :attr:`input` + tensor excluding its innermost dimension N. + density (bool): If False (default), the result will contain the count (or total weight) + in each bin. If True, each count (weight) is divided by the total count + (total weight), then divided by the volume of its associated bin. + Returns: + hist (Tensor): N-dimensional Tensor containing the values of the histogram. + bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges. + + Example:: + >>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3], + ... weight=torch.tensor([1., 2., 4., 8.])) + torch.return_types.histogramdd( + hist=tensor([[0., 1., 0.], + [2., 0., 0.], + [4., 0., 8.]]), + bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]), + tensor([0.0000, 0.6667, 1.3333, 2.0000]))) + + >>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2], + ... range=[0., 1., 0., 1.], density=True) + torch.return_types.histogramdd( + hist=tensor([[2., 0.], + [0., 2.]]), + bin_edges=(tensor([0.0000, 0.5000, 1.0000]), + tensor([0.0000, 0.5000, 1.0000]))) + """ + ... +def hsmm(input: Tensor, mat2: Tensor) -> Tensor: ... +@overload +def hsplit(input: Tensor, sections: _int) -> Tuple[Tensor, ...]: + r""" + hsplit(input, indices_or_sections) -> List of Tensors + + Splits :attr:`input`, a tensor with one or more dimensions, into multiple tensors + horizontally according to :attr:`indices_or_sections`. Each split is a view of + :attr:`input`. + + If :attr:`input` is one dimensional this is equivalent to calling + torch.tensor_split(input, indices_or_sections, dim=0) (the split dimension is + zero), and if :attr:`input` has two or more dimensions it's equivalent to calling + torch.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1), + except that if :attr:`indices_or_sections` is an integer it must evenly divide + the split dimension or a runtime error will be thrown. + + This function is based on NumPy's :func:`numpy.hsplit`. + + Args: + input (Tensor): tensor to split. + indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. + + Example:: + >>> t = torch.arange(16.0).reshape(4,4) + >>> t + tensor([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> torch.hsplit(t, 2) + (tensor([[ 0., 1.], + [ 4., 5.], + [ 8., 9.], + [12., 13.]]), + tensor([[ 2., 3.], + [ 6., 7.], + [10., 11.], + [14., 15.]])) + >>> torch.hsplit(t, [3, 6]) + (tensor([[ 0., 1., 2.], + [ 4., 5., 6.], + [ 8., 9., 10.], + [12., 13., 14.]]), + tensor([[ 3.], + [ 7.], + [11.], + [15.]]), + tensor([], size=(4, 0))) + """ + ... +@overload +def hsplit(input: Tensor, indices: _size) -> Tuple[Tensor, ...]: + r""" + hsplit(input, indices_or_sections) -> List of Tensors + + Splits :attr:`input`, a tensor with one or more dimensions, into multiple tensors + horizontally according to :attr:`indices_or_sections`. Each split is a view of + :attr:`input`. + + If :attr:`input` is one dimensional this is equivalent to calling + torch.tensor_split(input, indices_or_sections, dim=0) (the split dimension is + zero), and if :attr:`input` has two or more dimensions it's equivalent to calling + torch.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1), + except that if :attr:`indices_or_sections` is an integer it must evenly divide + the split dimension or a runtime error will be thrown. + + This function is based on NumPy's :func:`numpy.hsplit`. + + Args: + input (Tensor): tensor to split. + indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. + + Example:: + >>> t = torch.arange(16.0).reshape(4,4) + >>> t + tensor([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> torch.hsplit(t, 2) + (tensor([[ 0., 1.], + [ 4., 5.], + [ 8., 9.], + [12., 13.]]), + tensor([[ 2., 3.], + [ 6., 7.], + [10., 11.], + [14., 15.]])) + >>> torch.hsplit(t, [3, 6]) + (tensor([[ 0., 1., 2.], + [ 4., 5., 6.], + [ 8., 9., 10.], + [12., 13., 14.]]), + tensor([[ 3.], + [ 7.], + [11.], + [15.]]), + tensor([], size=(4, 0))) + """ + ... +def hspmm(mat1: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + hspmm(mat1, mat2, *, out=None) -> Tensor + + Performs a matrix multiplication of a :ref:`sparse COO matrix + ` :attr:`mat1` and a strided matrix :attr:`mat2`. The + result is a (1 + 1)-dimensional :ref:`hybrid COO matrix + `. + + Args: + mat1 (Tensor): the first sparse matrix to be matrix multiplied + mat2 (Tensor): the second strided matrix to be matrix multiplied + + Keyword args: + out (Tensor, optional): the output tensor. + """ + ... +def hstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor: + r""" + hstack(tensors, *, out=None) -> Tensor + + Stack tensors in sequence horizontally (column wise). + + This is equivalent to concatenation along the first axis for 1-D tensors, and along the second axis for all other tensors. + + Args: + tensors (sequence of Tensors): sequence of tensors to concatenate + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([1, 2, 3]) + >>> b = torch.tensor([4, 5, 6]) + >>> torch.hstack((a,b)) + tensor([1, 2, 3, 4, 5, 6]) + >>> a = torch.tensor([[1],[2],[3]]) + >>> b = torch.tensor([[4],[5],[6]]) + >>> torch.hstack((a,b)) + tensor([[1, 4], + [2, 5], + [3, 6]]) + """ + ... +def hypot(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + hypot(input, other, *, out=None) -> Tensor + + Given the legs of a right triangle, return its hypotenuse. + + .. math:: + \text{out}_{i} = \sqrt{\text{input}_{i}^{2} + \text{other}_{i}^{2}} + + The shapes of ``input`` and ``other`` must be + :ref:`broadcastable `. + + Args: + input (Tensor): the first input tensor + other (Tensor): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0])) + tensor([5.0000, 5.6569, 6.4031]) + """ + ... +def i0(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + i0(input, *, out=None) -> Tensor + + Alias for :func:`torch.special.i0`. + """ + ... +def i0_(input: Tensor) -> Tensor: ... +def igamma(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + igamma(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.special.gammainc`. + """ + ... +def igammac(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + igammac(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.special.gammaincc`. + """ + ... +def imag(input: Tensor) -> Tensor: + r""" + imag(input) -> Tensor + + Returns a new tensor containing imaginary values of the :attr:`self` tensor. + The returned tensor and :attr:`self` share the same underlying storage. + + .. warning:: + :func:`imag` is only supported for tensors with complex dtypes. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> x=torch.randn(4, dtype=torch.cfloat) + >>> x + tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)]) + >>> x.imag + tensor([ 0.3553, -0.7896, -0.0633, -0.8119]) + """ + ... +@overload +def index_add(input: Tensor, dim: _int, index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor + + See :meth:`~Tensor.index_add_` for function description. + """ + ... +@overload +def index_add(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor + + See :meth:`~Tensor.index_add_` for function description. + """ + ... +@overload +def index_copy(input: Tensor, dim: _int, index: Tensor, source: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + index_copy(input, dim, index, source, *, out=None) -> Tensor + + See :meth:`~Tensor.index_add_` for function description. + """ + ... +@overload +def index_copy(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor: + r""" + index_copy(input, dim, index, source, *, out=None) -> Tensor + + See :meth:`~Tensor.index_add_` for function description. + """ + ... +@overload +def index_fill(input: Tensor, dim: _int, index: Tensor, value: Tensor) -> Tensor: ... +@overload +def index_fill(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor: ... +@overload +def index_fill(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor: ... +@overload +def index_fill(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor: ... +def index_put(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ... +def index_put_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ... +def index_reduce(input: Tensor, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool = True, out: Optional[Tensor] = None) -> Tensor: + r""" + index_reduce(input, dim, index, source, reduce, *, include_self=True, out=None) -> Tensor + + See :meth:`~Tensor.index_reduce_` for function description. + """ + ... +@overload +def index_select(input: Tensor, dim: _int, index: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + index_select(input, dim, index, *, out=None) -> Tensor + + Returns a new tensor which indexes the :attr:`input` tensor along dimension + :attr:`dim` using the entries in :attr:`index` which is a `LongTensor`. + + The returned tensor has the same number of dimensions as the original tensor + (:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length + of :attr:`index`; other dimensions have the same size as in the original tensor. + + .. note:: The returned tensor does **not** use the same storage as the original + tensor. If :attr:`out` has a different shape than expected, we + silently change it to the correct shape, reallocating the underlying + storage if necessary. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension in which we index + index (IntTensor or LongTensor): the 1-D tensor containing the indices to index + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> x = torch.randn(3, 4) + >>> x + tensor([[ 0.1427, 0.0231, -0.5414, -1.0009], + [-0.4664, 0.2647, -0.1228, -1.1068], + [-1.1734, -0.6571, 0.7230, -0.6004]]) + >>> indices = torch.tensor([0, 2]) + >>> torch.index_select(x, 0, indices) + tensor([[ 0.1427, 0.0231, -0.5414, -1.0009], + [-1.1734, -0.6571, 0.7230, -0.6004]]) + >>> torch.index_select(x, 1, indices) + tensor([[ 0.1427, -0.5414], + [-0.4664, -0.1228], + [-1.1734, 0.7230]]) + """ + ... +@overload +def index_select(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + index_select(input, dim, index, *, out=None) -> Tensor + + Returns a new tensor which indexes the :attr:`input` tensor along dimension + :attr:`dim` using the entries in :attr:`index` which is a `LongTensor`. + + The returned tensor has the same number of dimensions as the original tensor + (:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length + of :attr:`index`; other dimensions have the same size as in the original tensor. + + .. note:: The returned tensor does **not** use the same storage as the original + tensor. If :attr:`out` has a different shape than expected, we + silently change it to the correct shape, reallocating the underlying + storage if necessary. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension in which we index + index (IntTensor or LongTensor): the 1-D tensor containing the indices to index + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> x = torch.randn(3, 4) + >>> x + tensor([[ 0.1427, 0.0231, -0.5414, -1.0009], + [-0.4664, 0.2647, -0.1228, -1.1068], + [-1.1734, -0.6571, 0.7230, -0.6004]]) + >>> indices = torch.tensor([0, 2]) + >>> torch.index_select(x, 0, indices) + tensor([[ 0.1427, 0.0231, -0.5414, -1.0009], + [-1.1734, -0.6571, 0.7230, -0.6004]]) + >>> torch.index_select(x, 1, indices) + tensor([[ 0.1427, -0.5414], + [-0.4664, -0.1228], + [-1.1734, 0.7230]]) + """ + ... +def indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.indices`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def init_num_threads() -> None: ... +def inner(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + inner(input, other, *, out=None) -> Tensor + + Computes the dot product for 1D tensors. For higher dimensions, sums the product + of elements from :attr:`input` and :attr:`other` along their last dimension. + + .. note:: + + If either :attr:`input` or :attr:`other` is a scalar, the result is equivalent + to `torch.mul(input, other)`. + + If both :attr:`input` and :attr:`other` are non-scalars, the size of their last + dimension must match and the result is equivalent to `torch.tensordot(input, + other, dims=([-1], [-1]))` + + Args: + input (Tensor): First input tensor + other (Tensor): Second input tensor + + Keyword args: + out (Tensor, optional): Optional output tensor to write result into. The output + shape is `input.shape[:-1] + other.shape[:-1]`. + + Example:: + + # Dot product + >>> torch.inner(torch.tensor([1, 2, 3]), torch.tensor([0, 2, 1])) + tensor(7) + + # Multidimensional input tensors + >>> a = torch.randn(2, 3) + >>> a + tensor([[0.8173, 1.0874, 1.1784], + [0.3279, 0.1234, 2.7894]]) + >>> b = torch.randn(2, 4, 3) + >>> b + tensor([[[-0.4682, -0.7159, 0.1506], + [ 0.4034, -0.3657, 1.0387], + [ 0.9892, -0.6684, 0.1774], + [ 0.9482, 1.3261, 0.3917]], + + [[ 0.4537, 0.7493, 1.1724], + [ 0.2291, 0.5749, -0.2267], + [-0.7920, 0.3607, -0.3701], + [ 1.3666, -0.5850, -1.7242]]]) + >>> torch.inner(a, b) + tensor([[[-0.9837, 1.1560, 0.2907, 2.6785], + [ 2.5671, 0.5452, -0.6912, -1.5509]], + + [[ 0.1782, 2.9843, 0.7366, 1.5672], + [ 3.5115, -0.4864, -1.2476, -4.4337]]]) + + # Scalar input + >>> torch.inner(a, torch.tensor(2)) + tensor([[1.6347, 2.1748, 2.3567], + [0.6558, 0.2469, 5.5787]]) + """ + ... +def instance_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], use_input_stats: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tensor: ... +def int_repr(input: Tensor) -> Tensor: ... +def inverse(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + inverse(input, *, out=None) -> Tensor + + Alias for :func:`torch.linalg.inv` + """ + ... +def is_complex(input: Tensor) -> _bool: + r""" + is_complex(input) -> (bool) + + Returns True if the data type of :attr:`input` is a complex data type i.e., + one of ``torch.complex64``, and ``torch.complex128``. + + Args: + input (Tensor): the input tensor. + """ + ... +def is_conj(input: Tensor) -> _bool: + r""" + is_conj(input) -> (bool) + + Returns True if the :attr:`input` is a conjugated tensor, i.e. its conjugate bit is set to `True`. + + Args: + input (Tensor): the input tensor. + """ + ... +def is_distributed(input: Tensor) -> _bool: ... +def is_floating_point(input: Tensor) -> _bool: + r""" + is_floating_point(input) -> (bool) + + Returns True if the data type of :attr:`input` is a floating point data type i.e., + one of ``torch.float64``, ``torch.float32``, ``torch.float16``, and ``torch.bfloat16``. + + Args: + input (Tensor): the input tensor. + """ + ... +def is_grad_enabled() -> _bool: + r""" + is_grad_enabled() -> (bool) + + Returns True if grad mode is currently enabled. + """ + ... +def is_inference(input: Tensor) -> _bool: + r""" + is_inference(input) -> (bool) + + Returns True if :attr:`input` is an inference tensor. + + A non-view tensor is an inference tensor if and only if it was + allocated during inference mode. A view tensor is an inference + tensor if and only if the tensor it is a view of is an inference tensor. + + For details on inference mode please see + `Inference Mode `_. + + Args: + input (Tensor): the input tensor. + """ + ... +def is_inference_mode_enabled() -> _bool: + r""" + is_inference_mode_enabled() -> (bool) + + Returns True if inference mode is currently enabled. + """ + ... +def is_neg(input: Tensor) -> _bool: ... +def is_nonzero(input: Tensor) -> _bool: + r""" + is_nonzero(input) -> (bool) + + Returns True if the :attr:`input` is a single element tensor which is not equal to zero + after type conversions. + i.e. not equal to ``torch.tensor([0.])`` or ``torch.tensor([0])`` or + ``torch.tensor([False])``. + Throws a ``RuntimeError`` if ``torch.numel() != 1`` (even in case + of sparse tensors). + + Args: + input (Tensor): the input tensor. + + Examples:: + + >>> torch.is_nonzero(torch.tensor([0.])) + False + >>> torch.is_nonzero(torch.tensor([1.5])) + True + >>> torch.is_nonzero(torch.tensor([False])) + False + >>> torch.is_nonzero(torch.tensor([3])) + True + >>> torch.is_nonzero(torch.tensor([1, 3, 5])) + Traceback (most recent call last): + ... + RuntimeError: bool value of Tensor with more than one value is ambiguous + >>> torch.is_nonzero(torch.tensor([])) + Traceback (most recent call last): + ... + RuntimeError: bool value of Tensor with no values is ambiguous + """ + ... +def is_same_size(input: Tensor, other: Tensor) -> _bool: ... +def is_signed(input: Tensor) -> _bool: ... +def is_vulkan_available() -> _bool: ... +def isclose(input: Tensor, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> Tensor: + r""" + isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor + + Returns a new tensor with boolean elements representing if each element of + :attr:`input` is "close" to the corresponding element of :attr:`other`. + Closeness is defined as: + + .. math:: + \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert + + + where :attr:`input` and :attr:`other` are finite. Where :attr:`input` + and/or :attr:`other` are nonfinite they are close if and only if + they are equal, with NaNs being considered equal to each other when + :attr:`equal_nan` is True. + + Args: + input (Tensor): first tensor to compare + other (Tensor): second tensor to compare + atol (float, optional): absolute tolerance. Default: 1e-08 + rtol (float, optional): relative tolerance. Default: 1e-05 + equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False`` + + Examples:: + + >>> torch.isclose(torch.tensor((1., 2, 3)), torch.tensor((1 + 1e-10, 3, 4))) + tensor([ True, False, False]) + >>> torch.isclose(torch.tensor((float('inf'), 4)), torch.tensor((float('inf'), 6)), rtol=.5) + tensor([True, True]) + """ + ... +def isfinite(input: Tensor) -> Tensor: + r""" + isfinite(input) -> Tensor + + Returns a new tensor with boolean elements representing if each element is `finite` or not. + + Real values are finite when they are not NaN, negative infinity, or infinity. + Complex values are finite when both their real and imaginary parts are finite. + + Args: + input (Tensor): the input tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is finite and False elsewhere + + Example:: + + >>> torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')])) + tensor([True, False, True, False, False]) + """ + ... +@overload +def isin(elements: Tensor, test_elements: Tensor, *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor + + Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns + a boolean tensor of the same shape as :attr:`elements` that is True for elements + in :attr:`test_elements` and False otherwise. + + .. note:: + One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both. + + Args: + elements (Tensor or Scalar): Input elements + test_elements (Tensor or Scalar): Values against which to test for each input element + assume_unique (bool, optional): If True, assumes both :attr:`elements` and + :attr:`test_elements` contain unique elements, which can speed up the + calculation. Default: False + invert (bool, optional): If True, inverts the boolean return tensor, resulting in True + values for elements *not* in :attr:`test_elements`. Default: False + + Returns: + A boolean tensor of the same shape as :attr:`elements` that is True for elements in + :attr:`test_elements` and False otherwise + + Example: + >>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3])) + tensor([[False, True], + [ True, False]]) + """ + ... +@overload +def isin(element: Union[Number, _complex], test_elements: Tensor, *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor + + Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns + a boolean tensor of the same shape as :attr:`elements` that is True for elements + in :attr:`test_elements` and False otherwise. + + .. note:: + One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both. + + Args: + elements (Tensor or Scalar): Input elements + test_elements (Tensor or Scalar): Values against which to test for each input element + assume_unique (bool, optional): If True, assumes both :attr:`elements` and + :attr:`test_elements` contain unique elements, which can speed up the + calculation. Default: False + invert (bool, optional): If True, inverts the boolean return tensor, resulting in True + values for elements *not* in :attr:`test_elements`. Default: False + + Returns: + A boolean tensor of the same shape as :attr:`elements` that is True for elements in + :attr:`test_elements` and False otherwise + + Example: + >>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3])) + tensor([[False, True], + [ True, False]]) + """ + ... +@overload +def isin(elements: Tensor, test_element: Union[Number, _complex], *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor + + Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns + a boolean tensor of the same shape as :attr:`elements` that is True for elements + in :attr:`test_elements` and False otherwise. + + .. note:: + One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both. + + Args: + elements (Tensor or Scalar): Input elements + test_elements (Tensor or Scalar): Values against which to test for each input element + assume_unique (bool, optional): If True, assumes both :attr:`elements` and + :attr:`test_elements` contain unique elements, which can speed up the + calculation. Default: False + invert (bool, optional): If True, inverts the boolean return tensor, resulting in True + values for elements *not* in :attr:`test_elements`. Default: False + + Returns: + A boolean tensor of the same shape as :attr:`elements` that is True for elements in + :attr:`test_elements` and False otherwise + + Example: + >>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3])) + tensor([[False, True], + [ True, False]]) + """ + ... +def isinf(input: Tensor) -> Tensor: + r""" + isinf(input) -> Tensor + + Tests if each element of :attr:`input` is infinite + (positive or negative infinity) or not. + + .. note:: + Complex values are infinite when their real or imaginary part is + infinite. + + Args: + input (Tensor): the input tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is infinite and False elsewhere + + Example:: + + >>> torch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')])) + tensor([False, True, False, True, False]) + """ + ... +def isnan(input: Tensor) -> Tensor: + r""" + isnan(input) -> Tensor + + Returns a new tensor with boolean elements representing if each element of :attr:`input` + is NaN or not. Complex values are considered NaN when either their real + and/or imaginary part is NaN. + + Arguments: + input (Tensor): the input tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is NaN and False elsewhere + + Example:: + + >>> torch.isnan(torch.tensor([1, float('nan'), 2])) + tensor([False, True, False]) + """ + ... +def isneginf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + isneginf(input, *, out=None) -> Tensor + Tests if each element of :attr:`input` is negative infinity or not. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([-float('inf'), float('inf'), 1.2]) + >>> torch.isneginf(a) + tensor([ True, False, False]) + """ + ... +def isposinf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + isposinf(input, *, out=None) -> Tensor + Tests if each element of :attr:`input` is positive infinity or not. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([-float('inf'), float('inf'), 1.2]) + >>> torch.isposinf(a) + tensor([False, True, False]) + """ + ... +def isreal(input: Tensor) -> Tensor: + r""" + isreal(input) -> Tensor + + Returns a new tensor with boolean elements representing if each element of :attr:`input` is real-valued or not. + All real-valued types are considered real. Complex values are considered real when their imaginary part is 0. + + Arguments: + input (Tensor): the input tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is real and False elsewhere + + Example:: + + >>> torch.isreal(torch.tensor([1, 1+1j, 2+0j])) + tensor([True, False, True]) + """ + ... +def istft(input: Tensor, n_fft: _int, hop_length: Optional[_int] = None, win_length: Optional[_int] = None, window: Optional[Tensor] = None, center: _bool = True, normalized: _bool = False, onesided: Optional[_bool] = None, length: Optional[_int] = None, return_complex: _bool = False) -> Tensor: ... +@overload +def kaiser_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`. + + Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and + ``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True, + where ``L`` is the :attr:`window_length`. This function computes: + + .. math:: + out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta ) + + Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling + ``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``. + The :attr:`periodic` argument is intended as a helpful shorthand + to produce a periodic window as input to functions like :func:`torch.stft`. + + .. note:: + If :attr:`window_length` is one, then the returned window is a single element tensor containing a one. + + + Args: + window_length (int): length of the window. + periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis. + If False, returns a symmetric window suitable for use in filter design. + beta (float, optional): shape parameter for the window. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + """ + ... +@overload +def kaiser_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`. + + Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and + ``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True, + where ``L`` is the :attr:`window_length`. This function computes: + + .. math:: + out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta ) + + Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling + ``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``. + The :attr:`periodic` argument is intended as a helpful shorthand + to produce a periodic window as input to functions like :func:`torch.stft`. + + .. note:: + If :attr:`window_length` is one, then the returned window is a single element tensor containing a one. + + + Args: + window_length (int): length of the window. + periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis. + If False, returns a symmetric window suitable for use in filter design. + beta (float, optional): shape parameter for the window. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + """ + ... +@overload +def kaiser_window(window_length: _int, periodic: _bool, beta: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`. + + Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and + ``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True, + where ``L`` is the :attr:`window_length`. This function computes: + + .. math:: + out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta ) + + Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling + ``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``. + The :attr:`periodic` argument is intended as a helpful shorthand + to produce a periodic window as input to functions like :func:`torch.stft`. + + .. note:: + If :attr:`window_length` is one, then the returned window is a single element tensor containing a one. + + + Args: + window_length (int): length of the window. + periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis. + If False, returns a symmetric window suitable for use in filter design. + beta (float, optional): shape parameter for the window. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + """ + ... +def kl_div(input: Tensor, target: Tensor, reduction: _int = 1, *, log_target: _bool = False) -> Tensor: ... +def kron(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + kron(input, other, *, out=None) -> Tensor + + Computes the Kronecker product, denoted by :math:`\otimes`, of :attr:`input` and :attr:`other`. + + If :attr:`input` is a :math:`(a_0 \times a_1 \times \dots \times a_n)` tensor and :attr:`other` is a + :math:`(b_0 \times b_1 \times \dots \times b_n)` tensor, the result will be a + :math:`(a_0*b_0 \times a_1*b_1 \times \dots \times a_n*b_n)` tensor with the following entries: + + .. math:: + (\text{input} \otimes \text{other})_{k_0, k_1, \dots, k_n} = + \text{input}_{i_0, i_1, \dots, i_n} * \text{other}_{j_0, j_1, \dots, j_n}, + + where :math:`k_t = i_t * b_t + j_t` for :math:`0 \leq t \leq n`. + If one tensor has fewer dimensions than the other it is unsqueezed until it has the same number of dimensions. + + Supports real-valued and complex-valued inputs. + + .. note:: + This function generalizes the typical definition of the Kronecker product for two matrices to two tensors, + as described above. When :attr:`input` is a :math:`(m \times n)` matrix and :attr:`other` is a + :math:`(p \times q)` matrix, the result will be a :math:`(p*m \times q*n)` block matrix: + + .. math:: + \mathbf{A} \otimes \mathbf{B}=\begin{bmatrix} + a_{11} \mathbf{B} & \cdots & a_{1 n} \mathbf{B} \\ + \vdots & \ddots & \vdots \\ + a_{m 1} \mathbf{B} & \cdots & a_{m n} \mathbf{B} \end{bmatrix} + + where :attr:`input` is :math:`\mathbf{A}` and :attr:`other` is :math:`\mathbf{B}`. + + Arguments: + input (Tensor) + other (Tensor) + + Keyword args: + out (Tensor, optional): The output tensor. Ignored if ``None``. Default: ``None`` + + Examples:: + + >>> mat1 = torch.eye(2) + >>> mat2 = torch.ones(2, 2) + >>> torch.kron(mat1, mat2) + tensor([[1., 1., 0., 0.], + [1., 1., 0., 0.], + [0., 0., 1., 1.], + [0., 0., 1., 1.]]) + + >>> mat1 = torch.eye(2) + >>> mat2 = torch.arange(1, 5).reshape(2, 2) + >>> torch.kron(mat1, mat2) + tensor([[1., 2., 0., 0.], + [3., 4., 0., 0.], + [0., 0., 1., 2.], + [0., 0., 3., 4.]]) + """ + ... +@overload +def kthvalue(input: Tensor, k: _int, dim: _int = -1, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.kthvalue: + r""" + kthvalue(input, k, dim=None, keepdim=False, *, out=None) -> (Tensor, LongTensor) + + Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th + smallest element of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`. And ``indices`` is the index location of each element found. + + If :attr:`dim` is not given, the last dimension of the `input` is chosen. + + If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors + are the same size as :attr:`input`, except in the dimension :attr:`dim` where + they are of size 1. Otherwise, :attr:`dim` is squeezed + (see :func:`torch.squeeze`), resulting in both the :attr:`values` and + :attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor. + + .. note:: + When :attr:`input` is a CUDA tensor and there are multiple valid + :attr:`k` th values, this function may nondeterministically return + :attr:`indices` for any of them. + + Args: + input (Tensor): the input tensor. + k (int): k for the k-th smallest element + dim (int, optional): the dimension to find the kth value along + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (tuple, optional): the output tuple of (Tensor, LongTensor) + can be optionally given to be used as output buffers + + Example:: + + >>> x = torch.arange(1., 6.) + >>> x + tensor([ 1., 2., 3., 4., 5.]) + >>> torch.kthvalue(x, 4) + torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3)) + + >>> x=torch.arange(1.,7.).resize_(2,3) + >>> x + tensor([[ 1., 2., 3.], + [ 4., 5., 6.]]) + >>> torch.kthvalue(x, 2, 0, True) + torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]])) + """ + ... +@overload +def kthvalue(input: Tensor, k: _int, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.kthvalue: + r""" + kthvalue(input, k, dim=None, keepdim=False, *, out=None) -> (Tensor, LongTensor) + + Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th + smallest element of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`. And ``indices`` is the index location of each element found. + + If :attr:`dim` is not given, the last dimension of the `input` is chosen. + + If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors + are the same size as :attr:`input`, except in the dimension :attr:`dim` where + they are of size 1. Otherwise, :attr:`dim` is squeezed + (see :func:`torch.squeeze`), resulting in both the :attr:`values` and + :attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor. + + .. note:: + When :attr:`input` is a CUDA tensor and there are multiple valid + :attr:`k` th values, this function may nondeterministically return + :attr:`indices` for any of them. + + Args: + input (Tensor): the input tensor. + k (int): k for the k-th smallest element + dim (int, optional): the dimension to find the kth value along + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (tuple, optional): the output tuple of (Tensor, LongTensor) + can be optionally given to be used as output buffers + + Example:: + + >>> x = torch.arange(1., 6.) + >>> x + tensor([ 1., 2., 3., 4., 5.]) + >>> torch.kthvalue(x, 4) + torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3)) + + >>> x=torch.arange(1.,7.).resize_(2,3) + >>> x + tensor([[ 1., 2., 3.], + [ 4., 5., 6.]]) + >>> torch.kthvalue(x, 2, 0, True) + torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]])) + """ + ... +def layer_norm(input: Tensor, normalized_shape: Sequence[Union[_int, SymInt]], weight: Optional[Tensor] = None, bias: Optional[Tensor] = None, eps: _float = 1e-05, cudnn_enable: _bool = True) -> Tensor: ... +def lcm(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + lcm(input, other, *, out=None) -> Tensor + + Computes the element-wise least common multiple (LCM) of :attr:`input` and :attr:`other`. + + Both :attr:`input` and :attr:`other` must have integer types. + + .. note:: + This defines :math:`lcm(0, 0) = 0` and :math:`lcm(0, a) = 0`. + + Args: + input (Tensor): the input tensor. + other (Tensor): the second input tensor + + Keyword arguments: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([5, 10, 15]) + >>> b = torch.tensor([3, 4, 5]) + >>> torch.lcm(a, b) + tensor([15, 20, 15]) + >>> c = torch.tensor([3]) + >>> torch.lcm(a, c) + tensor([15, 30, 15]) + """ + ... +def lcm_(input: Tensor, other: Tensor) -> Tensor: ... +def ldexp(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + ldexp(input, other, *, out=None) -> Tensor + + Multiplies :attr:`input` by 2 ** :attr:`other`. + + .. math:: + \text{{out}}_i = \text{{input}}_i * 2^\text{{other}}_i + + + Typically this function is used to construct floating point numbers by multiplying + mantissas in :attr:`input` with integral powers of two created from the exponents + in :attr:`other`. + + Args: + input (Tensor): the input tensor. + other (Tensor): a tensor of exponents, typically integers. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.ldexp(torch.tensor([1.]), torch.tensor([1])) + tensor([2.]) + >>> torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4])) + tensor([ 2., 4., 8., 16.]) + """ + ... +def ldexp_(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def le(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + le(input, other, *, out=None) -> Tensor + + Computes :math:`\text{input} \leq \text{other}` element-wise. + + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or Scalar): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is less than or equal to + :attr:`other` and False elsewhere + + Example:: + + >>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[True, False], [True, True]]) + """ + ... +@overload +def le(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + le(input, other, *, out=None) -> Tensor + + Computes :math:`\text{input} \leq \text{other}` element-wise. + + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or Scalar): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is less than or equal to + :attr:`other` and False elsewhere + + Example:: + + >>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[True, False], [True, True]]) + """ + ... +@overload +def lerp(input: Tensor, end: Tensor, weight: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + lerp(input, end, weight, *, out=None) + + Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based + on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor. + + .. math:: + \text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i) + + The shapes of :attr:`start` and :attr:`end` must be + :ref:`broadcastable `. If :attr:`weight` is a tensor, then + the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable `. + + Args: + input (Tensor): the tensor with the starting points + end (Tensor): the tensor with the ending points + weight (float or tensor): the weight for the interpolation formula + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> start = torch.arange(1., 5.) + >>> end = torch.empty(4).fill_(10) + >>> start + tensor([ 1., 2., 3., 4.]) + >>> end + tensor([ 10., 10., 10., 10.]) + >>> torch.lerp(start, end, 0.5) + tensor([ 5.5000, 6.0000, 6.5000, 7.0000]) + >>> torch.lerp(start, end, torch.full_like(start, 0.5)) + tensor([ 5.5000, 6.0000, 6.5000, 7.0000]) + """ + ... +@overload +def lerp(input: Tensor, end: Tensor, weight: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + lerp(input, end, weight, *, out=None) + + Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based + on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor. + + .. math:: + \text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i) + + The shapes of :attr:`start` and :attr:`end` must be + :ref:`broadcastable `. If :attr:`weight` is a tensor, then + the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable `. + + Args: + input (Tensor): the tensor with the starting points + end (Tensor): the tensor with the ending points + weight (float or tensor): the weight for the interpolation formula + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> start = torch.arange(1., 5.) + >>> end = torch.empty(4).fill_(10) + >>> start + tensor([ 1., 2., 3., 4.]) + >>> end + tensor([ 10., 10., 10., 10.]) + >>> torch.lerp(start, end, 0.5) + tensor([ 5.5000, 6.0000, 6.5000, 7.0000]) + >>> torch.lerp(start, end, torch.full_like(start, 0.5)) + tensor([ 5.5000, 6.0000, 6.5000, 7.0000]) + """ + ... +@overload +def less(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + less(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.lt`. + """ + ... +@overload +def less(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + less(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.lt`. + """ + ... +@overload +def less_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + less_equal(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.le`. + """ + ... +@overload +def less_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + less_equal(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.le`. + """ + ... +def lgamma(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + lgamma(input, *, out=None) -> Tensor + + Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`. + + .. math:: + \text{out}_{i} = \ln |\Gamma(\text{input}_{i})| + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.arange(0.5, 2, 0.5) + >>> torch.lgamma(a) + tensor([ 0.5724, 0.0000, -0.1208]) + """ + ... +@overload +def linspace(start: Number, end: Number, steps: Optional[_int] = None, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly + spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are: + + .. math:: + (\text{start}, + \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \ldots, + \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \text{end}) + + + From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior. + + Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + + Example:: + + >>> torch.linspace(3, 10, steps=5) + tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000]) + >>> torch.linspace(-10, 10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=1) + tensor([-10.]) + """ + ... +@overload +def linspace(start: Tensor, end: Tensor, steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly + spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are: + + .. math:: + (\text{start}, + \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \ldots, + \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \text{end}) + + + From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior. + + Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + + Example:: + + >>> torch.linspace(3, 10, steps=5) + tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000]) + >>> torch.linspace(-10, 10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=1) + tensor([-10.]) + """ + ... +@overload +def linspace(start: Union[Number, _complex], end: Tensor, steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly + spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are: + + .. math:: + (\text{start}, + \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \ldots, + \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \text{end}) + + + From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior. + + Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + + Example:: + + >>> torch.linspace(3, 10, steps=5) + tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000]) + >>> torch.linspace(-10, 10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=1) + tensor([-10.]) + """ + ... +@overload +def linspace(start: Tensor, end: Union[Number, _complex], steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly + spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are: + + .. math:: + (\text{start}, + \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \ldots, + \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \text{end}) + + + From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior. + + Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + + Example:: + + >>> torch.linspace(3, 10, steps=5) + tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000]) + >>> torch.linspace(-10, 10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=1) + tensor([-10.]) + """ + ... +@overload +def linspace(start: Union[Number, _complex], end: Union[Number, _complex], steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly + spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are: + + .. math:: + (\text{start}, + \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \ldots, + \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \text{end}) + + + From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior. + + Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + + Example:: + + >>> torch.linspace(3, 10, steps=5) + tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000]) + >>> torch.linspace(-10, 10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=1) + tensor([-10.]) + """ + ... +def log(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + log(input, *, out=None) -> Tensor + + Returns a new tensor with the natural logarithm of the elements + of :attr:`input`. + + .. math:: + y_{i} = \log_{e} (x_{i}) + + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.rand(5) * 5 + >>> a + tensor([4.7767, 4.3234, 1.2156, 0.2411, 4.5739]) + >>> torch.log(a) + tensor([ 1.5637, 1.4640, 0.1952, -1.4226, 1.5204]) + """ + ... +def log10(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + log10(input, *, out=None) -> Tensor + + Returns a new tensor with the logarithm to the base 10 of the elements + of :attr:`input`. + + .. math:: + y_{i} = \log_{10} (x_{i}) + + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.rand(5) + >>> a + tensor([ 0.5224, 0.9354, 0.7257, 0.1301, 0.2251]) + + + >>> torch.log10(a) + tensor([-0.2820, -0.0290, -0.1392, -0.8857, -0.6476]) + """ + ... +def log10_(input: Tensor) -> Tensor: ... +def log1p(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + log1p(input, *, out=None) -> Tensor + + Returns a new tensor with the natural logarithm of (1 + :attr:`input`). + + .. math:: + y_i = \log_{e} (x_i + 1) + + .. note:: This function is more accurate than :func:`torch.log` for small + values of :attr:`input` + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(5) + >>> a + tensor([-1.0090, -0.9923, 1.0249, -0.5372, 0.2492]) + >>> torch.log1p(a) + tensor([ nan, -4.8653, 0.7055, -0.7705, 0.2225]) + """ + ... +def log1p_(input: Tensor) -> Tensor: ... +def log2(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + log2(input, *, out=None) -> Tensor + + Returns a new tensor with the logarithm to the base 2 of the elements + of :attr:`input`. + + .. math:: + y_{i} = \log_{2} (x_{i}) + + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.rand(5) + >>> a + tensor([ 0.8419, 0.8003, 0.9971, 0.5287, 0.0490]) + + + >>> torch.log2(a) + tensor([-0.2483, -0.3213, -0.0042, -0.9196, -4.3504]) + """ + ... +def log2_(input: Tensor) -> Tensor: ... +def log_(input: Tensor) -> Tensor: ... +@overload +def log_softmax(input: Tensor, dim: _int, dtype: Optional[_dtype] = None, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def log_softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: ... +def logaddexp(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + logaddexp(input, other, *, out=None) -> Tensor + + Logarithm of the sum of exponentiations of the inputs. + + Calculates pointwise :math:`\log\left(e^x + e^y\right)`. This function is useful + in statistics where the calculated probabilities of events may be so small as to + exceed the range of normal floating point numbers. In such cases the logarithm + of the calculated probability is stored. This function allows adding + probabilities stored in such a fashion. + + This op should be disambiguated with :func:`torch.logsumexp` which performs a + reduction on a single tensor. + + Args: + input (Tensor): the input tensor. + other (Tensor): the second input tensor + + Keyword arguments: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1.0, -2, -3])) + tensor([-0.3069, -0.6867, -0.8731]) + >>> torch.logaddexp(torch.tensor([-100.0, -200, -300]), torch.tensor([-1.0, -2, -3])) + tensor([-1., -2., -3.]) + >>> torch.logaddexp(torch.tensor([1.0, 2000, 30000]), torch.tensor([-1.0, -2, -3])) + tensor([1.1269e+00, 2.0000e+03, 3.0000e+04]) + """ + ... +def logaddexp2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + logaddexp2(input, other, *, out=None) -> Tensor + + Logarithm of the sum of exponentiations of the inputs in base-2. + + Calculates pointwise :math:`\log_2\left(2^x + 2^y\right)`. See + :func:`torch.logaddexp` for more details. + + Args: + input (Tensor): the input tensor. + other (Tensor): the second input tensor + + Keyword arguments: + out (Tensor, optional): the output tensor. + """ + ... +@overload +def logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor: + r""" + logcumsumexp(input, dim, *, out=None) -> Tensor + Returns the logarithm of the cumulative summation of the exponentiation of + elements of :attr:`input` in the dimension :attr:`dim`. + + For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is + + .. math:: + \text{logcumsumexp}(x)_{ij} = \log \sum\limits_{j=0}^{i} \exp(x_{ij}) + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(10) + >>> torch.logcumsumexp(a, dim=0) + tensor([-0.42296738, -0.04462666, 0.86278635, 0.94622083, 1.05277811, + 1.39202815, 1.83525007, 1.84492621, 2.06084887, 2.06844475])) + """ + ... +@overload +def logcumsumexp(input: Tensor, dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor: + r""" + logcumsumexp(input, dim, *, out=None) -> Tensor + Returns the logarithm of the cumulative summation of the exponentiation of + elements of :attr:`input` in the dimension :attr:`dim`. + + For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is + + .. math:: + \text{logcumsumexp}(x)_{ij} = \log \sum\limits_{j=0}^{i} \exp(x_{ij}) + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(10) + >>> torch.logcumsumexp(a, dim=0) + tensor([-0.42296738, -0.04462666, 0.86278635, 0.94622083, 1.05277811, + 1.39202815, 1.83525007, 1.84492621, 2.06084887, 2.06844475])) + """ + ... +def logdet(input: Tensor) -> Tensor: + r""" + logdet(input) -> Tensor + + Calculates log determinant of a square matrix or batches of square matrices. + + It returns ``-inf`` if the input has a determinant of zero, and ``NaN`` if it has + a negative determinant. + + .. note:: + Backward through :meth:`logdet` internally uses SVD results when :attr:`input` + is not invertible. In this case, double backward through :meth:`logdet` will + be unstable in when :attr:`input` doesn't have distinct singular values. See + :func:`torch.linalg.svd` for details. + + .. seealso:: + + :func:`torch.linalg.slogdet` computes the sign (resp. angle) and natural logarithm of the + absolute value of the determinant of real-valued (resp. complex) square matrices. + + Arguments: + input (Tensor): the input tensor of size ``(*, n, n)`` where ``*`` is zero or more + batch dimensions. + + Example:: + + >>> A = torch.randn(3, 3) + >>> torch.det(A) + tensor(0.2611) + >>> torch.logdet(A) + tensor(-1.3430) + >>> A + tensor([[[ 0.9254, -0.6213], + [-0.5787, 1.6843]], + + [[ 0.3242, -0.9665], + [ 0.4539, -0.0887]], + + [[ 1.1336, -0.4025], + [-0.7089, 0.9032]]]) + >>> A.det() + tensor([1.1990, 0.4099, 0.7386]) + >>> A.det().log() + tensor([ 0.1815, -0.8917, -0.3031]) + """ + ... +def logical_and(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + logical_and(input, other, *, out=None) -> Tensor + + Computes the element-wise logical AND of the given input tensors. Zeros are treated as ``False`` and nonzeros are + treated as ``True``. + + Args: + input (Tensor): the input tensor. + other (Tensor): the tensor to compute AND with + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.logical_and(torch.tensor([True, False, True]), torch.tensor([True, False, False])) + tensor([ True, False, False]) + >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8) + >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8) + >>> torch.logical_and(a, b) + tensor([False, False, True, False]) + >>> torch.logical_and(a.double(), b.double()) + tensor([False, False, True, False]) + >>> torch.logical_and(a.double(), b) + tensor([False, False, True, False]) + >>> torch.logical_and(a, b, out=torch.empty(4, dtype=torch.bool)) + tensor([False, False, True, False]) + """ + ... +def logical_not(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + logical_not(input, *, out=None) -> Tensor + + Computes the element-wise logical NOT of the given input tensor. If not specified, the output tensor will have the bool + dtype. If the input tensor is not a bool tensor, zeros are treated as ``False`` and non-zeros are treated as ``True``. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.logical_not(torch.tensor([True, False])) + tensor([False, True]) + >>> torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8)) + tensor([ True, False, False]) + >>> torch.logical_not(torch.tensor([0., 1.5, -10.], dtype=torch.double)) + tensor([ True, False, False]) + >>> torch.logical_not(torch.tensor([0., 1., -10.], dtype=torch.double), out=torch.empty(3, dtype=torch.int16)) + tensor([1, 0, 0], dtype=torch.int16) + """ + ... +def logical_or(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + logical_or(input, other, *, out=None) -> Tensor + + Computes the element-wise logical OR of the given input tensors. Zeros are treated as ``False`` and nonzeros are + treated as ``True``. + + Args: + input (Tensor): the input tensor. + other (Tensor): the tensor to compute OR with + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.logical_or(torch.tensor([True, False, True]), torch.tensor([True, False, False])) + tensor([ True, False, True]) + >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8) + >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8) + >>> torch.logical_or(a, b) + tensor([ True, True, True, False]) + >>> torch.logical_or(a.double(), b.double()) + tensor([ True, True, True, False]) + >>> torch.logical_or(a.double(), b) + tensor([ True, True, True, False]) + >>> torch.logical_or(a, b, out=torch.empty(4, dtype=torch.bool)) + tensor([ True, True, True, False]) + """ + ... +def logical_xor(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + logical_xor(input, other, *, out=None) -> Tensor + + Computes the element-wise logical XOR of the given input tensors. Zeros are treated as ``False`` and nonzeros are + treated as ``True``. + + Args: + input (Tensor): the input tensor. + other (Tensor): the tensor to compute XOR with + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.logical_xor(torch.tensor([True, False, True]), torch.tensor([True, False, False])) + tensor([False, False, True]) + >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8) + >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8) + >>> torch.logical_xor(a, b) + tensor([ True, True, False, False]) + >>> torch.logical_xor(a.double(), b.double()) + tensor([ True, True, False, False]) + >>> torch.logical_xor(a.double(), b) + tensor([ True, True, False, False]) + >>> torch.logical_xor(a, b, out=torch.empty(4, dtype=torch.bool)) + tensor([ True, True, False, False]) + """ + ... +def logit(input: Tensor, eps: Optional[_float] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + logit(input, eps=None, *, out=None) -> Tensor + + Alias for :func:`torch.special.logit`. + """ + ... +def logit_(input: Tensor, eps: Optional[_float] = None) -> Tensor: ... +@overload +def logspace(start: Number, end: Number, steps: Optional[_int] = None, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + + Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly + spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to + :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale + with base :attr:`base`. That is, the values are: + + .. math:: + (\text{base}^{\text{start}}, + \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \ldots, + \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \text{base}^{\text{end}}) + + + + From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior. + + Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + base (float, optional): base of the logarithm function. Default: ``10.0``. + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.logspace(start=-10, end=10, steps=5) + tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10]) + >>> torch.logspace(start=0.1, end=1.0, steps=5) + tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000]) + >>> torch.logspace(start=0.1, end=1.0, steps=1) + tensor([1.2589]) + >>> torch.logspace(start=2, end=2, steps=1, base=2) + tensor([4.0]) + """ + ... +@overload +def logspace(start: Tensor, end: Tensor, steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + + Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly + spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to + :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale + with base :attr:`base`. That is, the values are: + + .. math:: + (\text{base}^{\text{start}}, + \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \ldots, + \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \text{base}^{\text{end}}) + + + + From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior. + + Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + base (float, optional): base of the logarithm function. Default: ``10.0``. + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.logspace(start=-10, end=10, steps=5) + tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10]) + >>> torch.logspace(start=0.1, end=1.0, steps=5) + tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000]) + >>> torch.logspace(start=0.1, end=1.0, steps=1) + tensor([1.2589]) + >>> torch.logspace(start=2, end=2, steps=1, base=2) + tensor([4.0]) + """ + ... +@overload +def logspace(start: Union[Number, _complex], end: Tensor, steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + + Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly + spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to + :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale + with base :attr:`base`. That is, the values are: + + .. math:: + (\text{base}^{\text{start}}, + \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \ldots, + \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \text{base}^{\text{end}}) + + + + From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior. + + Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + base (float, optional): base of the logarithm function. Default: ``10.0``. + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.logspace(start=-10, end=10, steps=5) + tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10]) + >>> torch.logspace(start=0.1, end=1.0, steps=5) + tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000]) + >>> torch.logspace(start=0.1, end=1.0, steps=1) + tensor([1.2589]) + >>> torch.logspace(start=2, end=2, steps=1, base=2) + tensor([4.0]) + """ + ... +@overload +def logspace(start: Tensor, end: Union[Number, _complex], steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + + Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly + spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to + :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale + with base :attr:`base`. That is, the values are: + + .. math:: + (\text{base}^{\text{start}}, + \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \ldots, + \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \text{base}^{\text{end}}) + + + + From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior. + + Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + base (float, optional): base of the logarithm function. Default: ``10.0``. + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.logspace(start=-10, end=10, steps=5) + tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10]) + >>> torch.logspace(start=0.1, end=1.0, steps=5) + tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000]) + >>> torch.logspace(start=0.1, end=1.0, steps=1) + tensor([1.2589]) + >>> torch.logspace(start=2, end=2, steps=1, base=2) + tensor([4.0]) + """ + ... +@overload +def logspace(start: Union[Number, _complex], end: Union[Number, _complex], steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + + Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly + spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to + :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale + with base :attr:`base`. That is, the values are: + + .. math:: + (\text{base}^{\text{start}}, + \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \ldots, + \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \text{base}^{\text{end}}) + + + + From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior. + + Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + base (float, optional): base of the logarithm function. Default: ``10.0``. + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.logspace(start=-10, end=10, steps=5) + tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10]) + >>> torch.logspace(start=0.1, end=1.0, steps=5) + tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000]) + >>> torch.logspace(start=0.1, end=1.0, steps=1) + tensor([1.2589]) + >>> torch.logspace(start=2, end=2, steps=1, base=2) + tensor([4.0]) + """ + ... +@overload +def logsumexp(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + logsumexp(input, dim, keepdim=False, *, out=None) + + Returns the log of summed exponentials of each row of the :attr:`input` + tensor in the given dimension :attr:`dim`. The computation is numerically + stabilized. + + For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is + + .. math:: + \text{logsumexp}(x)_{i} = \log \sum_j \exp(x_{ij}) + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(3, 3) + >>> torch.logsumexp(a, 1) + tensor([1.4907, 1.0593, 1.5696]) + >>> torch.dist(torch.logsumexp(a, 1), torch.log(torch.sum(torch.exp(a), 1))) + tensor(1.6859e-07) + """ + ... +@overload +def logsumexp(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + logsumexp(input, dim, keepdim=False, *, out=None) + + Returns the log of summed exponentials of each row of the :attr:`input` + tensor in the given dimension :attr:`dim`. The computation is numerically + stabilized. + + For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is + + .. math:: + \text{logsumexp}(x)_{i} = \log \sum_j \exp(x_{ij}) + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(3, 3) + >>> torch.logsumexp(a, 1) + tensor([1.4907, 1.0593, 1.5696]) + >>> torch.dist(torch.logsumexp(a, 1), torch.log(torch.sum(torch.exp(a), 1))) + tensor(1.6859e-07) + """ + ... +@overload +def lstm(data: Tensor, batch_sizes: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor, Tensor]: ... +@overload +def lstm(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor, Tensor]: ... +def lstm_cell(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ... +@overload +def lt(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + lt(input, other, *, out=None) -> Tensor + + Computes :math:`\text{input} < \text{other}` element-wise. + + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is less than :attr:`other` and False elsewhere + + Example:: + + >>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[False, False], [True, False]]) + """ + ... +@overload +def lt(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + lt(input, other, *, out=None) -> Tensor + + Computes :math:`\text{input} < \text{other}` element-wise. + + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is less than :attr:`other` and False elsewhere + + Example:: + + >>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[False, False], [True, False]]) + """ + ... +def lu_solve(input: Tensor, LU_data: Tensor, LU_pivots: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + lu_solve(b, LU_data, LU_pivots, *, out=None) -> Tensor + + Returns the LU solve of the linear system :math:`Ax = b` using the partially pivoted + LU factorization of A from :func:`~linalg.lu_factor`. + + This function supports ``float``, ``double``, ``cfloat`` and ``cdouble`` dtypes for :attr:`input`. + + .. warning:: + + :func:`torch.lu_solve` is deprecated in favor of :func:`torch.linalg.lu_solve`. + :func:`torch.lu_solve` will be removed in a future PyTorch release. + ``X = torch.lu_solve(B, LU, pivots)`` should be replaced with + + .. code:: python + + X = linalg.lu_solve(LU, pivots, B) + + Arguments: + b (Tensor): the RHS tensor of size :math:`(*, m, k)`, where :math:`*` + is zero or more batch dimensions. + LU_data (Tensor): the pivoted LU factorization of A from :meth:`~linalg.lu_factor` of size :math:`(*, m, m)`, + where :math:`*` is zero or more batch dimensions. + LU_pivots (IntTensor): the pivots of the LU factorization from :meth:`~linalg.lu_factor` of size :math:`(*, m)`, + where :math:`*` is zero or more batch dimensions. + The batch dimensions of :attr:`LU_pivots` must be equal to the batch dimensions of + :attr:`LU_data`. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> A = torch.randn(2, 3, 3) + >>> b = torch.randn(2, 3, 1) + >>> LU, pivots = torch.linalg.lu_factor(A) + >>> x = torch.lu_solve(b, LU, pivots) + >>> torch.dist(A @ x, b) + tensor(1.00000e-07 * + 2.8312) + """ + ... +def lu_unpack(LU_data: Tensor, LU_pivots: Tensor, unpack_data: _bool = True, unpack_pivots: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.lu_unpack: + r""" + lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True, *, out=None) -> (Tensor, Tensor, Tensor) + + Unpacks the LU decomposition returned by :func:`~linalg.lu_factor` into the `P, L, U` matrices. + + .. seealso:: + + :func:`~linalg.lu` returns the matrices from the LU decomposition. Its gradient formula is more efficient + than that of doing :func:`~linalg.lu_factor` followed by :func:`~linalg.lu_unpack`. + + Args: + LU_data (Tensor): the packed LU factorization data + LU_pivots (Tensor): the packed LU factorization pivots + unpack_data (bool): flag indicating if the data should be unpacked. + If ``False``, then the returned ``L`` and ``U`` are empty tensors. + Default: ``True`` + unpack_pivots (bool): flag indicating if the pivots should be unpacked into a permutation matrix ``P``. + If ``False``, then the returned ``P`` is an empty tensor. + Default: ``True`` + + Keyword args: + out (tuple, optional): output tuple of three tensors. Ignored if `None`. + + Returns: + A namedtuple ``(P, L, U)`` + + Examples:: + + >>> A = torch.randn(2, 3, 3) + >>> LU, pivots = torch.linalg.lu_factor(A) + >>> P, L, U = torch.lu_unpack(LU, pivots) + >>> # We can recover A from the factorization + >>> A_ = P @ L @ U + >>> torch.allclose(A, A_) + True + + >>> # LU factorization of a rectangular matrix: + >>> A = torch.randn(2, 3, 2) + >>> LU, pivots = torch.linalg.lu_factor(A) + >>> P, L, U = torch.lu_unpack(LU, pivots) + >>> # P, L, U are the same as returned by linalg.lu + >>> P_, L_, U_ = torch.linalg.lu(A) + >>> torch.allclose(P, P_) and torch.allclose(L, L_) and torch.allclose(U, U_) + True + """ + ... +def margin_ranking_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: _float = 0.0, reduction: _int = 1) -> Tensor: ... +@overload +def masked_fill(input: Tensor, mask: Tensor, value: Tensor) -> Tensor: ... +@overload +def masked_fill(input: Tensor, mask: Tensor, value: Union[Number, _complex]) -> Tensor: ... +def masked_scatter(input: Tensor, mask: Tensor, source: Tensor) -> Tensor: ... +def masked_select(input: Tensor, mask: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + masked_select(input, mask, *, out=None) -> Tensor + + Returns a new 1-D tensor which indexes the :attr:`input` tensor according to + the boolean mask :attr:`mask` which is a `BoolTensor`. + + The shapes of the :attr:`mask` tensor and the :attr:`input` tensor don't need + to match, but they must be :ref:`broadcastable `. + + .. note:: The returned tensor does **not** use the same storage + as the original tensor + + Args: + input (Tensor): the input tensor. + mask (BoolTensor): the tensor containing the binary mask to index with + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> x = torch.randn(3, 4) + >>> x + tensor([[ 0.3552, -2.3825, -0.8297, 0.3477], + [-1.2035, 1.2252, 0.5002, 0.6248], + [ 0.1307, -2.0608, 0.1244, 2.0139]]) + >>> mask = x.ge(0.5) + >>> mask + tensor([[False, False, False, False], + [False, True, True, True], + [False, False, False, True]]) + >>> torch.masked_select(x, mask) + tensor([ 1.2252, 0.5002, 0.6248, 2.0139]) + """ + ... +def matmul(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + matmul(input, other, *, out=None) -> Tensor + + Matrix product of two tensors. + + The behavior depends on the dimensionality of the tensors as follows: + + - If both tensors are 1-dimensional, the dot product (scalar) is returned. + - If both arguments are 2-dimensional, the matrix-matrix product is returned. + - If the first argument is 1-dimensional and the second argument is 2-dimensional, + a 1 is prepended to its dimension for the purpose of the matrix multiply. + After the matrix multiply, the prepended dimension is removed. + - If the first argument is 2-dimensional and the second argument is 1-dimensional, + the matrix-vector product is returned. + - If both arguments are at least 1-dimensional and at least one argument is + N-dimensional (where N > 2), then a batched matrix multiply is returned. If the first + argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the + batched matrix multiply and removed after. If the second argument is 1-dimensional, a + 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. + The non-matrix (i.e. batch) dimensions are :ref:`broadcasted ` (and thus + must be broadcastable). For example, if :attr:`input` is a + :math:`(j \times 1 \times n \times n)` tensor and :attr:`other` is a :math:`(k \times n \times n)` + tensor, :attr:`out` will be a :math:`(j \times k \times n \times n)` tensor. + + Note that the broadcasting logic only looks at the batch dimensions when determining if the inputs + are broadcastable, and not the matrix dimensions. For example, if :attr:`input` is a + :math:`(j \times 1 \times n \times m)` tensor and :attr:`other` is a :math:`(k \times m \times p)` + tensor, these inputs are valid for broadcasting even though the final two dimensions (i.e. the + matrix dimensions) are different. :attr:`out` will be a :math:`(j \times k \times n \times p)` tensor. + + This operation has support for arguments with :ref:`sparse layouts`. In particular the + matrix-matrix (both arguments 2-dimensional) supports sparse arguments with the same restrictions + as :func:`torch.mm` + + + .. warning:: + Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported, + or may not have autograd support. If you notice missing functionality please + open a feature request. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + .. note:: + + The 1-dimensional dot product version of this function does not support an :attr:`out` parameter. + + Arguments: + input (Tensor): the first tensor to be multiplied + other (Tensor): the second tensor to be multiplied + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> # vector x vector + >>> tensor1 = torch.randn(3) + >>> tensor2 = torch.randn(3) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([]) + >>> # matrix x vector + >>> tensor1 = torch.randn(3, 4) + >>> tensor2 = torch.randn(4) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([3]) + >>> # batched matrix x broadcasted vector + >>> tensor1 = torch.randn(10, 3, 4) + >>> tensor2 = torch.randn(4) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([10, 3]) + >>> # batched matrix x batched matrix + >>> tensor1 = torch.randn(10, 3, 4) + >>> tensor2 = torch.randn(10, 4, 5) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([10, 3, 5]) + >>> # batched matrix x broadcasted matrix + >>> tensor1 = torch.randn(10, 3, 4) + >>> tensor2 = torch.randn(4, 5) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([10, 3, 5]) + """ + ... +def matrix_exp(input: Tensor) -> Tensor: + r""" + matrix_exp(A) -> Tensor + + Alias for :func:`torch.linalg.matrix_exp`. + """ + ... +def matrix_power(input: Tensor, n: _int, *, out: Optional[Tensor] = None) -> Tensor: + r""" + matrix_power(input, n, *, out=None) -> Tensor + + Alias for :func:`torch.linalg.matrix_power` + """ + ... +@overload +def max(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + max(input) -> Tensor + + Returns the maximum value of all elements in the ``input`` tensor. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``max(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.6763, 0.7445, -2.2369]]) + >>> torch.max(a) + tensor(0.7445) + + .. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum + value of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`. And ``indices`` is the index location of each maximum value found + (argmax). + + If ``keepdim`` is ``True``, the output tensors are of the same size + as ``input`` except in the dimension ``dim`` where they are of size 1. + Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting + in the output tensors having 1 fewer dimension than ``input``. + + .. note:: If there are multiple maximal values in a reduced row then + the indices of the first maximal value are returned. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``. + + Keyword args: + out (tuple, optional): the result tuple of two output tensors (max, max_indices) + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-1.2360, -0.2942, -0.1222, 0.8475], + [ 1.1949, -1.1127, -2.2379, -0.6702], + [ 1.5717, -0.9207, 0.1297, -1.8768], + [-0.6172, 1.0036, -0.6060, -0.2432]]) + >>> torch.max(a, 1) + torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1])) + + .. function:: max(input, other, *, out=None) -> Tensor + :noindex: + + See :func:`torch.maximum`. + """ + ... +@overload +def max(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + max(input) -> Tensor + + Returns the maximum value of all elements in the ``input`` tensor. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``max(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.6763, 0.7445, -2.2369]]) + >>> torch.max(a) + tensor(0.7445) + + .. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum + value of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`. And ``indices`` is the index location of each maximum value found + (argmax). + + If ``keepdim`` is ``True``, the output tensors are of the same size + as ``input`` except in the dimension ``dim`` where they are of size 1. + Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting + in the output tensors having 1 fewer dimension than ``input``. + + .. note:: If there are multiple maximal values in a reduced row then + the indices of the first maximal value are returned. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``. + + Keyword args: + out (tuple, optional): the result tuple of two output tensors (max, max_indices) + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-1.2360, -0.2942, -0.1222, 0.8475], + [ 1.1949, -1.1127, -2.2379, -0.6702], + [ 1.5717, -0.9207, 0.1297, -1.8768], + [-0.6172, 1.0036, -0.6060, -0.2432]]) + >>> torch.max(a, 1) + torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1])) + + .. function:: max(input, other, *, out=None) -> Tensor + :noindex: + + See :func:`torch.maximum`. + """ + ... +@overload +def max(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.max: + r""" + max(input) -> Tensor + + Returns the maximum value of all elements in the ``input`` tensor. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``max(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.6763, 0.7445, -2.2369]]) + >>> torch.max(a) + tensor(0.7445) + + .. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum + value of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`. And ``indices`` is the index location of each maximum value found + (argmax). + + If ``keepdim`` is ``True``, the output tensors are of the same size + as ``input`` except in the dimension ``dim`` where they are of size 1. + Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting + in the output tensors having 1 fewer dimension than ``input``. + + .. note:: If there are multiple maximal values in a reduced row then + the indices of the first maximal value are returned. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``. + + Keyword args: + out (tuple, optional): the result tuple of two output tensors (max, max_indices) + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-1.2360, -0.2942, -0.1222, 0.8475], + [ 1.1949, -1.1127, -2.2379, -0.6702], + [ 1.5717, -0.9207, 0.1297, -1.8768], + [-0.6172, 1.0036, -0.6060, -0.2432]]) + >>> torch.max(a, 1) + torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1])) + + .. function:: max(input, other, *, out=None) -> Tensor + :noindex: + + See :func:`torch.maximum`. + """ + ... +@overload +def max(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.max: + r""" + max(input) -> Tensor + + Returns the maximum value of all elements in the ``input`` tensor. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``max(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.6763, 0.7445, -2.2369]]) + >>> torch.max(a) + tensor(0.7445) + + .. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum + value of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`. And ``indices`` is the index location of each maximum value found + (argmax). + + If ``keepdim`` is ``True``, the output tensors are of the same size + as ``input`` except in the dimension ``dim`` where they are of size 1. + Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting + in the output tensors having 1 fewer dimension than ``input``. + + .. note:: If there are multiple maximal values in a reduced row then + the indices of the first maximal value are returned. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``. + + Keyword args: + out (tuple, optional): the result tuple of two output tensors (max, max_indices) + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-1.2360, -0.2942, -0.1222, 0.8475], + [ 1.1949, -1.1127, -2.2379, -0.6702], + [ 1.5717, -0.9207, 0.1297, -1.8768], + [-0.6172, 1.0036, -0.6060, -0.2432]]) + >>> torch.max(a, 1) + torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1])) + + .. function:: max(input, other, *, out=None) -> Tensor + :noindex: + + See :func:`torch.maximum`. + """ + ... +def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ... +def max_pool1d_with_indices(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tuple[Tensor, Tensor]: ... +def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ... +def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ... +def maximum(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + maximum(input, other, *, out=None) -> Tensor + + Computes the element-wise maximum of :attr:`input` and :attr:`other`. + + .. note:: + If one of the elements being compared is a NaN, then that element is returned. + :func:`maximum` is not supported for tensors with complex dtypes. + + Args: + input (Tensor): the input tensor. + other (Tensor): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor((1, 2, -1)) + >>> b = torch.tensor((3, 0, 4)) + >>> torch.maximum(a, b) + tensor([3, 2, 4]) + """ + ... +@overload +def mean(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + mean(input, *, dtype=None) -> Tensor + + Returns the mean value of all elements in the :attr:`input` tensor. Input must be floating point or complex. + + Args: + input (Tensor): + the input tensor, either of floating point or complex dtype + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.2294, -0.5481, 1.3288]]) + >>> torch.mean(a) + tensor(0.3367) + + .. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor + :noindex: + + Returns the mean value of each row of the :attr:`input` tensor in the given + dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, + reduce over all of them. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + out (Tensor, optional): the output tensor. + + .. seealso:: + + :func:`torch.nanmean` computes the mean value of `non-NaN` elements. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-0.3841, 0.6320, 0.4254, -0.7384], + [-0.9644, 1.0131, -0.6549, -1.4279], + [-0.2951, -1.3350, -0.7694, 0.5600], + [ 1.0842, -0.9580, 0.3623, 0.2343]]) + >>> torch.mean(a, 1) + tensor([-0.0163, -0.5085, -0.4599, 0.1807]) + >>> torch.mean(a, 1, True) + tensor([[-0.0163], + [-0.5085], + [-0.4599], + [ 0.1807]]) + """ + ... +@overload +def mean(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + mean(input, *, dtype=None) -> Tensor + + Returns the mean value of all elements in the :attr:`input` tensor. Input must be floating point or complex. + + Args: + input (Tensor): + the input tensor, either of floating point or complex dtype + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.2294, -0.5481, 1.3288]]) + >>> torch.mean(a) + tensor(0.3367) + + .. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor + :noindex: + + Returns the mean value of each row of the :attr:`input` tensor in the given + dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, + reduce over all of them. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + out (Tensor, optional): the output tensor. + + .. seealso:: + + :func:`torch.nanmean` computes the mean value of `non-NaN` elements. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-0.3841, 0.6320, 0.4254, -0.7384], + [-0.9644, 1.0131, -0.6549, -1.4279], + [-0.2951, -1.3350, -0.7694, 0.5600], + [ 1.0842, -0.9580, 0.3623, 0.2343]]) + >>> torch.mean(a, 1) + tensor([-0.0163, -0.5085, -0.4599, 0.1807]) + >>> torch.mean(a, 1, True) + tensor([[-0.0163], + [-0.5085], + [-0.4599], + [ 0.1807]]) + """ + ... +@overload +def mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + mean(input, *, dtype=None) -> Tensor + + Returns the mean value of all elements in the :attr:`input` tensor. Input must be floating point or complex. + + Args: + input (Tensor): + the input tensor, either of floating point or complex dtype + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.2294, -0.5481, 1.3288]]) + >>> torch.mean(a) + tensor(0.3367) + + .. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor + :noindex: + + Returns the mean value of each row of the :attr:`input` tensor in the given + dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, + reduce over all of them. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + out (Tensor, optional): the output tensor. + + .. seealso:: + + :func:`torch.nanmean` computes the mean value of `non-NaN` elements. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-0.3841, 0.6320, 0.4254, -0.7384], + [-0.9644, 1.0131, -0.6549, -1.4279], + [-0.2951, -1.3350, -0.7694, 0.5600], + [ 1.0842, -0.9580, 0.3623, 0.2343]]) + >>> torch.mean(a, 1) + tensor([-0.0163, -0.5085, -0.4599, 0.1807]) + >>> torch.mean(a, 1, True) + tensor([[-0.0163], + [-0.5085], + [-0.4599], + [ 0.1807]]) + """ + ... +@overload +def median(input: Tensor) -> Tensor: + r""" + median(input) -> Tensor + + Returns the median of the values in :attr:`input`. + + .. note:: + The median is not unique for :attr:`input` tensors with an even number + of elements. In this case the lower of the two medians is returned. To + compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``median(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 1.5219, -1.5212, 0.2202]]) + >>> torch.median(a) + tensor(0.2202) + + .. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input` + in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`. + + By default, :attr:`dim` is the last dimension of the :attr:`input` tensor. + + If :attr:`keepdim` is ``True``, the output tensors are of the same size + as :attr:`input` except in the dimension :attr:`dim` where they are of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in + the outputs tensor having 1 fewer dimension than :attr:`input`. + + .. note:: + The median is not unique for :attr:`input` tensors with an even number + of elements in the dimension :attr:`dim`. In this case the lower of the + two medians is returned. To compute the mean of both medians in + :attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead. + + .. warning:: + ``indices`` does not necessarily contain the first occurrence of each + median value found, unless it is unique. + The exact implementation details are device-specific. + Do not expect the same result when run on CPU and GPU in general. + For the same reason do not expect the gradients to be deterministic. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second + tensor, which must have dtype long, with their indices in the dimension + :attr:`dim` of :attr:`input`. + + Example:: + + >>> a = torch.randn(4, 5) + >>> a + tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131], + [ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270], + [-0.2751, 0.7303, 0.2192, 0.3321, 0.2488], + [ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]]) + >>> torch.median(a, 1) + torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3])) + """ + ... +@overload +def median(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.median: + r""" + median(input) -> Tensor + + Returns the median of the values in :attr:`input`. + + .. note:: + The median is not unique for :attr:`input` tensors with an even number + of elements. In this case the lower of the two medians is returned. To + compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``median(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 1.5219, -1.5212, 0.2202]]) + >>> torch.median(a) + tensor(0.2202) + + .. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input` + in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`. + + By default, :attr:`dim` is the last dimension of the :attr:`input` tensor. + + If :attr:`keepdim` is ``True``, the output tensors are of the same size + as :attr:`input` except in the dimension :attr:`dim` where they are of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in + the outputs tensor having 1 fewer dimension than :attr:`input`. + + .. note:: + The median is not unique for :attr:`input` tensors with an even number + of elements in the dimension :attr:`dim`. In this case the lower of the + two medians is returned. To compute the mean of both medians in + :attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead. + + .. warning:: + ``indices`` does not necessarily contain the first occurrence of each + median value found, unless it is unique. + The exact implementation details are device-specific. + Do not expect the same result when run on CPU and GPU in general. + For the same reason do not expect the gradients to be deterministic. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second + tensor, which must have dtype long, with their indices in the dimension + :attr:`dim` of :attr:`input`. + + Example:: + + >>> a = torch.randn(4, 5) + >>> a + tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131], + [ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270], + [-0.2751, 0.7303, 0.2192, 0.3321, 0.2488], + [ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]]) + >>> torch.median(a, 1) + torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3])) + """ + ... +@overload +def median(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.median: + r""" + median(input) -> Tensor + + Returns the median of the values in :attr:`input`. + + .. note:: + The median is not unique for :attr:`input` tensors with an even number + of elements. In this case the lower of the two medians is returned. To + compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``median(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 1.5219, -1.5212, 0.2202]]) + >>> torch.median(a) + tensor(0.2202) + + .. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input` + in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`. + + By default, :attr:`dim` is the last dimension of the :attr:`input` tensor. + + If :attr:`keepdim` is ``True``, the output tensors are of the same size + as :attr:`input` except in the dimension :attr:`dim` where they are of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in + the outputs tensor having 1 fewer dimension than :attr:`input`. + + .. note:: + The median is not unique for :attr:`input` tensors with an even number + of elements in the dimension :attr:`dim`. In this case the lower of the + two medians is returned. To compute the mean of both medians in + :attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead. + + .. warning:: + ``indices`` does not necessarily contain the first occurrence of each + median value found, unless it is unique. + The exact implementation details are device-specific. + Do not expect the same result when run on CPU and GPU in general. + For the same reason do not expect the gradients to be deterministic. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second + tensor, which must have dtype long, with their indices in the dimension + :attr:`dim` of :attr:`input`. + + Example:: + + >>> a = torch.randn(4, 5) + >>> a + tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131], + [ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270], + [-0.2751, 0.7303, 0.2192, 0.3321, 0.2488], + [ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]]) + >>> torch.median(a, 1) + torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3])) + """ + ... +@overload +def min(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + min(input) -> Tensor + + Returns the minimum value of all elements in the :attr:`input` tensor. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``min(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.6750, 1.0857, 1.7197]]) + >>> torch.min(a) + tensor(0.6750) + + .. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum + value of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`. And ``indices`` is the index location of each minimum value found + (argmin). + + If :attr:`keepdim` is ``True``, the output tensors are of the same size as + :attr:`input` except in the dimension :attr:`dim` where they are of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in + the output tensors having 1 fewer dimension than :attr:`input`. + + .. note:: If there are multiple minimal values in a reduced row then + the indices of the first minimal value are returned. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (tuple, optional): the tuple of two output tensors (min, min_indices) + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-0.6248, 1.1334, -1.1899, -0.2803], + [-1.4644, -0.2635, -0.3651, 0.6134], + [ 0.2457, 0.0384, 1.0128, 0.7015], + [-0.1153, 2.9849, 2.1458, 0.5788]]) + >>> torch.min(a, 1) + torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0])) + + .. function:: min(input, other, *, out=None) -> Tensor + :noindex: + + See :func:`torch.minimum`. + """ + ... +@overload +def min(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + min(input) -> Tensor + + Returns the minimum value of all elements in the :attr:`input` tensor. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``min(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.6750, 1.0857, 1.7197]]) + >>> torch.min(a) + tensor(0.6750) + + .. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum + value of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`. And ``indices`` is the index location of each minimum value found + (argmin). + + If :attr:`keepdim` is ``True``, the output tensors are of the same size as + :attr:`input` except in the dimension :attr:`dim` where they are of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in + the output tensors having 1 fewer dimension than :attr:`input`. + + .. note:: If there are multiple minimal values in a reduced row then + the indices of the first minimal value are returned. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (tuple, optional): the tuple of two output tensors (min, min_indices) + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-0.6248, 1.1334, -1.1899, -0.2803], + [-1.4644, -0.2635, -0.3651, 0.6134], + [ 0.2457, 0.0384, 1.0128, 0.7015], + [-0.1153, 2.9849, 2.1458, 0.5788]]) + >>> torch.min(a, 1) + torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0])) + + .. function:: min(input, other, *, out=None) -> Tensor + :noindex: + + See :func:`torch.minimum`. + """ + ... +@overload +def min(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.min: + r""" + min(input) -> Tensor + + Returns the minimum value of all elements in the :attr:`input` tensor. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``min(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.6750, 1.0857, 1.7197]]) + >>> torch.min(a) + tensor(0.6750) + + .. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum + value of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`. And ``indices`` is the index location of each minimum value found + (argmin). + + If :attr:`keepdim` is ``True``, the output tensors are of the same size as + :attr:`input` except in the dimension :attr:`dim` where they are of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in + the output tensors having 1 fewer dimension than :attr:`input`. + + .. note:: If there are multiple minimal values in a reduced row then + the indices of the first minimal value are returned. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (tuple, optional): the tuple of two output tensors (min, min_indices) + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-0.6248, 1.1334, -1.1899, -0.2803], + [-1.4644, -0.2635, -0.3651, 0.6134], + [ 0.2457, 0.0384, 1.0128, 0.7015], + [-0.1153, 2.9849, 2.1458, 0.5788]]) + >>> torch.min(a, 1) + torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0])) + + .. function:: min(input, other, *, out=None) -> Tensor + :noindex: + + See :func:`torch.minimum`. + """ + ... +@overload +def min(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.min: + r""" + min(input) -> Tensor + + Returns the minimum value of all elements in the :attr:`input` tensor. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``min(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.6750, 1.0857, 1.7197]]) + >>> torch.min(a) + tensor(0.6750) + + .. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum + value of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`. And ``indices`` is the index location of each minimum value found + (argmin). + + If :attr:`keepdim` is ``True``, the output tensors are of the same size as + :attr:`input` except in the dimension :attr:`dim` where they are of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in + the output tensors having 1 fewer dimension than :attr:`input`. + + .. note:: If there are multiple minimal values in a reduced row then + the indices of the first minimal value are returned. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (tuple, optional): the tuple of two output tensors (min, min_indices) + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-0.6248, 1.1334, -1.1899, -0.2803], + [-1.4644, -0.2635, -0.3651, 0.6134], + [ 0.2457, 0.0384, 1.0128, 0.7015], + [-0.1153, 2.9849, 2.1458, 0.5788]]) + >>> torch.min(a, 1) + torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0])) + + .. function:: min(input, other, *, out=None) -> Tensor + :noindex: + + See :func:`torch.minimum`. + """ + ... +def minimum(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + minimum(input, other, *, out=None) -> Tensor + + Computes the element-wise minimum of :attr:`input` and :attr:`other`. + + .. note:: + If one of the elements being compared is a NaN, then that element is returned. + :func:`minimum` is not supported for tensors with complex dtypes. + + Args: + input (Tensor): the input tensor. + other (Tensor): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor((1, 2, -1)) + >>> b = torch.tensor((3, 0, 4)) + >>> torch.minimum(a, b) + tensor([1, 0, -1]) + """ + ... +def miopen_batch_norm(input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, exponential_average_factor: _float, epsilon: _float) -> Tuple[Tensor, Tensor, Tensor]: ... +def miopen_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ... +def miopen_convolution_add_relu(input: Tensor, weight: Tensor, z: Tensor, alpha: Optional[Union[Number, _complex]], bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def miopen_convolution_relu(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def miopen_convolution_transpose(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ... +def miopen_depthwise_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ... +def miopen_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: _int, num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: _size, dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ... +def mkldnn_adaptive_avg_pool2d(input: Tensor, output_size: Union[_int, _size], *, out: Optional[Tensor] = None) -> Tensor: ... +def mkldnn_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def mkldnn_linear_backward_weights(grad_output: Tensor, input: Tensor, weight: Tensor, bias_defined: _bool) -> Tuple[Tensor, Tensor]: ... +def mkldnn_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ... +def mkldnn_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ... +def mkldnn_rnn_layer(input: Tensor, weight0: Tensor, weight1: Tensor, weight2: Tensor, weight3: Tensor, hx_: Tensor, cx_: Tensor, reverse: _bool, batch_sizes: _size, mode: _int, hidden_size: _int, num_layers: _int, has_biases: _bool, bidirectional: _bool, batch_first: _bool, train: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +def mm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + mm(input, mat2, *, out=None) -> Tensor + + Performs a matrix multiplication of the matrices :attr:`input` and :attr:`mat2`. + + If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`mat2` is a + :math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor. + + .. note:: This function does not :ref:`broadcast `. + For broadcasting matrix products, see :func:`torch.matmul`. + + Supports strided and sparse 2-D tensors as inputs, autograd with + respect to strided inputs. + + This operation has support for arguments with :ref:`sparse layouts`. + If :attr:`out` is provided it's layout will be used. Otherwise, the result + layout will be deduced from that of :attr:`input`. + + + .. warning:: + Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported, + or may not have autograd support. If you notice missing functionality please + open a feature request. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): the first matrix to be matrix multiplied + mat2 (Tensor): the second matrix to be matrix multiplied + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> mat1 = torch.randn(2, 3) + >>> mat2 = torch.randn(3, 3) + >>> torch.mm(mat1, mat2) + tensor([[ 0.4851, 0.5037, -0.3633], + [-0.0760, -3.6705, 2.4784]]) + """ + ... +@overload +def mode(input: Tensor, dim: _int = -1, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.mode: + r""" + mode(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + + Returns a namedtuple ``(values, indices)`` where ``values`` is the mode + value of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`, i.e. a value which appears most often + in that row, and ``indices`` is the index location of each mode value found. + + By default, :attr:`dim` is the last dimension of the :attr:`input` tensor. + + If :attr:`keepdim` is ``True``, the output tensors are of the same size as + :attr:`input` except in the dimension :attr:`dim` where they are of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting + in the output tensors having 1 fewer dimension than :attr:`input`. + + .. note:: This function is not defined for ``torch.cuda.Tensor`` yet. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (tuple, optional): the result tuple of two output tensors (values, indices) + + Example:: + + >>> b = torch.tensor( + [[0, 0, 0, 2, 0, 0, 2], + [0, 3, 0, 0, 2, 0, 1], + [2, 2, 2, 0, 0, 0, 3], + [2, 2, 3, 0, 1, 1, 0], + [1, 1, 0, 0, 2, 0, 2]]) + >>> torch.mode(b, 0) + torch.return_types.mode( + values=tensor([0, 2, 0, 0, 0, 0, 2]), + indices=tensor([1, 3, 4, 4, 2, 4, 4])) + """ + ... +@overload +def mode(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.mode: + r""" + mode(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + + Returns a namedtuple ``(values, indices)`` where ``values`` is the mode + value of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`, i.e. a value which appears most often + in that row, and ``indices`` is the index location of each mode value found. + + By default, :attr:`dim` is the last dimension of the :attr:`input` tensor. + + If :attr:`keepdim` is ``True``, the output tensors are of the same size as + :attr:`input` except in the dimension :attr:`dim` where they are of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting + in the output tensors having 1 fewer dimension than :attr:`input`. + + .. note:: This function is not defined for ``torch.cuda.Tensor`` yet. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (tuple, optional): the result tuple of two output tensors (values, indices) + + Example:: + + >>> b = torch.tensor( + [[0, 0, 0, 2, 0, 0, 2], + [0, 3, 0, 0, 2, 0, 1], + [2, 2, 2, 0, 0, 0, 3], + [2, 2, 3, 0, 1, 1, 0], + [1, 1, 0, 0, 2, 0, 2]]) + >>> torch.mode(b, 0) + torch.return_types.mode( + values=tensor([0, 2, 0, 0, 0, 0, 2]), + indices=tensor([1, 3, 4, 4, 2, 4, 4])) + """ + ... +@overload +def moveaxis(input: Tensor, source: _int, destination: _int) -> Tensor: + r""" + moveaxis(input, source, destination) -> Tensor + + Alias for :func:`torch.movedim`. + + This function is equivalent to NumPy's moveaxis function. + + Examples:: + + >>> t = torch.randn(3,2,1) + >>> t + tensor([[[-0.3362], + [-0.8437]], + + [[-0.9627], + [ 0.1727]], + + [[ 0.5173], + [-0.1398]]]) + >>> torch.moveaxis(t, 1, 0).shape + torch.Size([2, 3, 1]) + >>> torch.moveaxis(t, 1, 0) + tensor([[[-0.3362], + [-0.9627], + [ 0.5173]], + + [[-0.8437], + [ 0.1727], + [-0.1398]]]) + >>> torch.moveaxis(t, (1, 2), (0, 1)).shape + torch.Size([2, 1, 3]) + >>> torch.moveaxis(t, (1, 2), (0, 1)) + tensor([[[-0.3362, -0.9627, 0.5173]], + + [[-0.8437, 0.1727, -0.1398]]]) + """ + ... +@overload +def moveaxis(input: Tensor, source: _size, destination: _size) -> Tensor: + r""" + moveaxis(input, source, destination) -> Tensor + + Alias for :func:`torch.movedim`. + + This function is equivalent to NumPy's moveaxis function. + + Examples:: + + >>> t = torch.randn(3,2,1) + >>> t + tensor([[[-0.3362], + [-0.8437]], + + [[-0.9627], + [ 0.1727]], + + [[ 0.5173], + [-0.1398]]]) + >>> torch.moveaxis(t, 1, 0).shape + torch.Size([2, 3, 1]) + >>> torch.moveaxis(t, 1, 0) + tensor([[[-0.3362], + [-0.9627], + [ 0.5173]], + + [[-0.8437], + [ 0.1727], + [-0.1398]]]) + >>> torch.moveaxis(t, (1, 2), (0, 1)).shape + torch.Size([2, 1, 3]) + >>> torch.moveaxis(t, (1, 2), (0, 1)) + tensor([[[-0.3362, -0.9627, 0.5173]], + + [[-0.8437, 0.1727, -0.1398]]]) + """ + ... +@overload +def movedim(input: Tensor, source: _int, destination: _int) -> Tensor: + r""" + movedim(input, source, destination) -> Tensor + + Moves the dimension(s) of :attr:`input` at the position(s) in :attr:`source` + to the position(s) in :attr:`destination`. + + Other dimensions of :attr:`input` that are not explicitly moved remain in + their original order and appear at the positions not specified in :attr:`destination`. + + Args: + input (Tensor): the input tensor. + source (int or tuple of ints): Original positions of the dims to move. These must be unique. + destination (int or tuple of ints): Destination positions for each of the original dims. These must also be unique. + + Examples:: + + >>> t = torch.randn(3,2,1) + >>> t + tensor([[[-0.3362], + [-0.8437]], + + [[-0.9627], + [ 0.1727]], + + [[ 0.5173], + [-0.1398]]]) + >>> torch.movedim(t, 1, 0).shape + torch.Size([2, 3, 1]) + >>> torch.movedim(t, 1, 0) + tensor([[[-0.3362], + [-0.9627], + [ 0.5173]], + + [[-0.8437], + [ 0.1727], + [-0.1398]]]) + >>> torch.movedim(t, (1, 2), (0, 1)).shape + torch.Size([2, 1, 3]) + >>> torch.movedim(t, (1, 2), (0, 1)) + tensor([[[-0.3362, -0.9627, 0.5173]], + + [[-0.8437, 0.1727, -0.1398]]]) + """ + ... +@overload +def movedim(input: Tensor, source: _size, destination: _size) -> Tensor: + r""" + movedim(input, source, destination) -> Tensor + + Moves the dimension(s) of :attr:`input` at the position(s) in :attr:`source` + to the position(s) in :attr:`destination`. + + Other dimensions of :attr:`input` that are not explicitly moved remain in + their original order and appear at the positions not specified in :attr:`destination`. + + Args: + input (Tensor): the input tensor. + source (int or tuple of ints): Original positions of the dims to move. These must be unique. + destination (int or tuple of ints): Destination positions for each of the original dims. These must also be unique. + + Examples:: + + >>> t = torch.randn(3,2,1) + >>> t + tensor([[[-0.3362], + [-0.8437]], + + [[-0.9627], + [ 0.1727]], + + [[ 0.5173], + [-0.1398]]]) + >>> torch.movedim(t, 1, 0).shape + torch.Size([2, 3, 1]) + >>> torch.movedim(t, 1, 0) + tensor([[[-0.3362], + [-0.9627], + [ 0.5173]], + + [[-0.8437], + [ 0.1727], + [-0.1398]]]) + >>> torch.movedim(t, (1, 2), (0, 1)).shape + torch.Size([2, 1, 3]) + >>> torch.movedim(t, (1, 2), (0, 1)) + tensor([[[-0.3362, -0.9627, 0.5173]], + + [[-0.8437, 0.1727, -0.1398]]]) + """ + ... +def msort(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + msort(input, *, out=None) -> Tensor + + Sorts the elements of the :attr:`input` tensor along its first dimension + in ascending order by value. + + .. note:: `torch.msort(t)` is equivalent to `torch.sort(t, dim=0)[0]`. + See also :func:`torch.sort`. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.randn(3, 4) + >>> t + tensor([[-0.1321, 0.4370, -1.2631, -1.1289], + [-2.0527, -1.1250, 0.2275, 0.3077], + [-0.0881, -0.1259, -0.5495, 1.0284]]) + >>> torch.msort(t) + tensor([[-2.0527, -1.1250, -1.2631, -1.1289], + [-0.1321, -0.1259, -0.5495, 0.3077], + [-0.0881, 0.4370, 0.2275, 1.0284]]) + """ + ... +def mul(input: Union[Tensor, Number, _complex], other: Union[Tensor, Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + mul(input, other, *, out=None) -> Tensor + + Multiplies :attr:`input` by :attr:`other`. + + + .. math:: + \text{out}_i = \text{input}_i \times \text{other}_i + + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer, float, and complex inputs. + + Args: + input (Tensor): the input tensor. + other (Tensor or Number) - the tensor or number to multiply input by. + + Keyword args: + out (Tensor, optional): the output tensor. + + Examples:: + + >>> a = torch.randn(3) + >>> a + tensor([ 0.2015, -0.4255, 2.6087]) + >>> torch.mul(a, 100) + tensor([ 20.1494, -42.5491, 260.8663]) + + >>> b = torch.randn(4, 1) + >>> b + tensor([[ 1.1207], + [-0.3137], + [ 0.0700], + [ 0.8378]]) + >>> c = torch.randn(1, 4) + >>> c + tensor([[ 0.5146, 0.1216, -0.5244, 2.2382]]) + >>> torch.mul(b, c) + tensor([[ 0.5767, 0.1363, -0.5877, 2.5083], + [-0.1614, -0.0382, 0.1645, -0.7021], + [ 0.0360, 0.0085, -0.0367, 0.1567], + [ 0.4312, 0.1019, -0.4394, 1.8753]]) + """ + ... +def multinomial(input: Tensor, num_samples: _int, replacement: _bool = False, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + multinomial(input, num_samples, replacement=False, *, generator=None, out=None) -> LongTensor + + Returns a tensor where each row contains :attr:`num_samples` indices sampled + from the multinomial (a stricter definition would be multivariate, + refer to torch.distributions.multinomial.Multinomial for more details) + probability distribution located in the corresponding row + of tensor :attr:`input`. + + .. note:: + The rows of :attr:`input` do not need to sum to one (in which case we use + the values as weights), but must be non-negative, finite and have + a non-zero sum. + + Indices are ordered from left to right according to when each was sampled + (first samples are placed in first column). + + If :attr:`input` is a vector, :attr:`out` is a vector of size :attr:`num_samples`. + + If :attr:`input` is a matrix with `m` rows, :attr:`out` is an matrix of shape + :math:`(m \times \text{num\_samples})`. + + If replacement is ``True``, samples are drawn with replacement. + + If not, they are drawn without replacement, which means that when a + sample index is drawn for a row, it cannot be drawn again for that row. + + .. note:: + When drawn without replacement, :attr:`num_samples` must be lower than + number of non-zero elements in :attr:`input` (or the min number of non-zero + elements in each row of :attr:`input` if it is a matrix). + + Args: + input (Tensor): the input tensor containing probabilities + num_samples (int): number of samples to draw + replacement (bool, optional): whether to draw with replacement or not + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + + Example:: + + >>> weights = torch.tensor([0, 10, 3, 0], dtype=torch.float) # create a tensor of weights + >>> torch.multinomial(weights, 2) + tensor([1, 2]) + >>> torch.multinomial(weights, 4) # ERROR! + RuntimeError: invalid argument 2: invalid multinomial distribution (with replacement=False, + not enough non-negative category to sample) at ../aten/src/TH/generic/THTensorRandom.cpp:320 + >>> torch.multinomial(weights, 4, replacement=True) + tensor([ 2, 1, 1, 1]) + """ + ... +@overload +def multiply(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + multiply(input, other, *, out=None) + + Alias for :func:`torch.mul`. + """ + ... +@overload +def multiply(input: Tensor, other: Union[Number, _complex]) -> Tensor: + r""" + multiply(input, other, *, out=None) + + Alias for :func:`torch.mul`. + """ + ... +def mv(input: Tensor, vec: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + mv(input, vec, *, out=None) -> Tensor + + Performs a matrix-vector product of the matrix :attr:`input` and the vector + :attr:`vec`. + + If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of + size :math:`m`, :attr:`out` will be 1-D of size :math:`n`. + + .. note:: This function does not :ref:`broadcast `. + + Args: + input (Tensor): matrix to be multiplied + vec (Tensor): vector to be multiplied + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> mat = torch.randn(2, 3) + >>> vec = torch.randn(3) + >>> torch.mv(mat, vec) + tensor([ 1.0404, -0.6361]) + """ + ... +def mvlgamma(input: Tensor, p: _int, *, out: Optional[Tensor] = None) -> Tensor: + r""" + mvlgamma(input, p, *, out=None) -> Tensor + + Alias for :func:`torch.special.multigammaln`. + """ + ... +def nan_to_num(input: Tensor, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None) -> Tensor + + Replaces :literal:`NaN`, positive infinity, and negative infinity values in :attr:`input` + with the values specified by :attr:`nan`, :attr:`posinf`, and :attr:`neginf`, respectively. + By default, :literal:`NaN`\ s are replaced with zero, positive infinity is replaced with the + greatest finite value representable by :attr:`input`'s dtype, and negative infinity + is replaced with the least finite value representable by :attr:`input`'s dtype. + + Args: + input (Tensor): the input tensor. + nan (Number, optional): the value to replace :literal:`NaN`\s with. Default is zero. + posinf (Number, optional): if a Number, the value to replace positive infinity values with. + If None, positive infinity values are replaced with the greatest finite value representable by :attr:`input`'s dtype. + Default is None. + neginf (Number, optional): if a Number, the value to replace negative infinity values with. + If None, negative infinity values are replaced with the lowest finite value representable by :attr:`input`'s dtype. + Default is None. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> x = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14]) + >>> torch.nan_to_num(x) + tensor([ 0.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00]) + >>> torch.nan_to_num(x, nan=2.0) + tensor([ 2.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00]) + >>> torch.nan_to_num(x, nan=2.0, posinf=1.0) + tensor([ 2.0000e+00, 1.0000e+00, -3.4028e+38, 3.1400e+00]) + """ + ... +def nan_to_num_(input: Tensor, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None) -> Tensor: ... +def nanmean(input: Tensor, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor + + Computes the mean of all `non-NaN` elements along the specified dimensions. + + This function is identical to :func:`torch.mean` when there are no `NaN` values + in the :attr:`input` tensor. In the presence of `NaN`, :func:`torch.mean` will + propagate the `NaN` to the output whereas :func:`torch.nanmean` will ignore the + `NaN` values (`torch.nanmean(a)` is equivalent to `torch.mean(a[~a.isnan()])`). + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + out (Tensor, optional): the output tensor. + + .. seealso:: + + :func:`torch.mean` computes the mean value, propagating `NaN`. + + Example:: + + >>> x = torch.tensor([[torch.nan, 1, 2], [1, 2, 3]]) + >>> x.mean() + tensor(nan) + >>> x.nanmean() + tensor(1.8000) + >>> x.mean(dim=0) + tensor([ nan, 1.5000, 2.5000]) + >>> x.nanmean(dim=0) + tensor([1.0000, 1.5000, 2.5000]) + + # If all elements in the reduced dimensions are NaN then the result is NaN + >>> torch.tensor([torch.nan]).nanmean() + tensor(nan) + """ + ... +@overload +def nanmedian(input: Tensor) -> Tensor: + r""" + nanmedian(input) -> Tensor + + Returns the median of the values in :attr:`input`, ignoring ``NaN`` values. + + This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`. + When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``, + while this function will return the median of the non-``NaN`` elements in :attr:`input`. + If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.tensor([1, float('nan'), 3, 2]) + >>> a.median() + tensor(nan) + >>> a.nanmedian() + tensor(2.) + + .. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input` + in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values + found in the dimension :attr:`dim`. + + This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has + one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the + median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second + tensor, which must have dtype long, with their indices in the dimension + :attr:`dim` of :attr:`input`. + + Example:: + + >>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]]) + >>> a + tensor([[2., 3., 1.], + [nan, 1., nan]]) + >>> a.median(0) + torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1])) + >>> a.nanmedian(0) + torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0])) + """ + ... +@overload +def nanmedian(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.nanmedian: + r""" + nanmedian(input) -> Tensor + + Returns the median of the values in :attr:`input`, ignoring ``NaN`` values. + + This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`. + When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``, + while this function will return the median of the non-``NaN`` elements in :attr:`input`. + If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.tensor([1, float('nan'), 3, 2]) + >>> a.median() + tensor(nan) + >>> a.nanmedian() + tensor(2.) + + .. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input` + in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values + found in the dimension :attr:`dim`. + + This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has + one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the + median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second + tensor, which must have dtype long, with their indices in the dimension + :attr:`dim` of :attr:`input`. + + Example:: + + >>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]]) + >>> a + tensor([[2., 3., 1.], + [nan, 1., nan]]) + >>> a.median(0) + torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1])) + >>> a.nanmedian(0) + torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0])) + """ + ... +@overload +def nanmedian(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.nanmedian: + r""" + nanmedian(input) -> Tensor + + Returns the median of the values in :attr:`input`, ignoring ``NaN`` values. + + This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`. + When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``, + while this function will return the median of the non-``NaN`` elements in :attr:`input`. + If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.tensor([1, float('nan'), 3, 2]) + >>> a.median() + tensor(nan) + >>> a.nanmedian() + tensor(2.) + + .. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input` + in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values + found in the dimension :attr:`dim`. + + This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has + one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the + median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second + tensor, which must have dtype long, with their indices in the dimension + :attr:`dim` of :attr:`input`. + + Example:: + + >>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]]) + >>> a + tensor([[2., 3., 1.], + [nan, 1., nan]]) + >>> a.median(0) + torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1])) + >>> a.nanmedian(0) + torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0])) + """ + ... +@overload +def nanquantile(input: Tensor, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor: + r""" + nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor + + This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values, + computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did + not exist. If all values in a reduced row are ``NaN`` then the quantiles for + that reduction will be ``NaN``. See the documentation for :func:`torch.quantile`. + + Args: + input (Tensor): the input tensor. + q (float or Tensor): a scalar or 1D tensor of quantile values in the range [0, 1] + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword arguments: + interpolation (str): interpolation method to use when the desired quantile lies between two data points. + Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``. + Default is ``linear``. + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.tensor([float('nan'), 1, 2]) + >>> t.quantile(0.5) + tensor(nan) + >>> t.nanquantile(0.5) + tensor(1.5000) + >>> t = torch.tensor([[float('nan'), float('nan')], [1, 2]]) + >>> t + tensor([[nan, nan], + [1., 2.]]) + >>> t.nanquantile(0.5, dim=0) + tensor([1., 2.]) + >>> t.nanquantile(0.5, dim=1) + tensor([ nan, 1.5000]) + """ + ... +@overload +def nanquantile(input: Tensor, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor: + r""" + nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor + + This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values, + computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did + not exist. If all values in a reduced row are ``NaN`` then the quantiles for + that reduction will be ``NaN``. See the documentation for :func:`torch.quantile`. + + Args: + input (Tensor): the input tensor. + q (float or Tensor): a scalar or 1D tensor of quantile values in the range [0, 1] + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword arguments: + interpolation (str): interpolation method to use when the desired quantile lies between two data points. + Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``. + Default is ``linear``. + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.tensor([float('nan'), 1, 2]) + >>> t.quantile(0.5) + tensor(nan) + >>> t.nanquantile(0.5) + tensor(1.5000) + >>> t = torch.tensor([[float('nan'), float('nan')], [1, 2]]) + >>> t + tensor([[nan, nan], + [1., 2.]]) + >>> t.nanquantile(0.5, dim=0) + tensor([1., 2.]) + >>> t.nanquantile(0.5, dim=1) + tensor([ nan, 1.5000]) + """ + ... +def nansum(input: Tensor, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + nansum(input, *, dtype=None) -> Tensor + + Returns the sum of all elements, treating Not a Numbers (NaNs) as zero. + + Args: + input (Tensor): the input tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.tensor([1., 2., float('nan'), 4.]) + >>> torch.nansum(a) + tensor(7.) + + .. function:: nansum(input, dim, keepdim=False, *, dtype=None) -> Tensor + :noindex: + + Returns the sum of each row of the :attr:`input` tensor in the given + dimension :attr:`dim`, treating Not a Numbers (NaNs) as zero. + If :attr:`dim` is a list of dimensions, reduce over all of them. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> torch.nansum(torch.tensor([1., float("nan")])) + 1.0 + >>> a = torch.tensor([[1, 2], [3., float("nan")]]) + >>> torch.nansum(a) + tensor(6.) + >>> torch.nansum(a, dim=0) + tensor([4., 2.]) + >>> torch.nansum(a, dim=1) + tensor([3., 3.]) + """ + ... +@overload +def narrow(input: Tensor, dim: _int, start: Tensor, length: Union[_int, SymInt]) -> Tensor: + r""" + narrow(input, dim, start, length) -> Tensor + + Returns a new tensor that is a narrowed version of :attr:`input` tensor. The + dimension :attr:`dim` is input from :attr:`start` to ``start + length``. The + returned tensor and :attr:`input` tensor share the same underlying storage. + + Args: + input (Tensor): the tensor to narrow + dim (int): the dimension along which to narrow + start (int or Tensor): index of the element to start the narrowed dimension + from. Can be negative, which means indexing from the end of `dim`. If + `Tensor`, it must be an 0-dim integral `Tensor` (bools not allowed) + length (int): length of the narrowed dimension, must be weakly positive + + Example:: + + >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> torch.narrow(x, 0, 0, 2) + tensor([[ 1, 2, 3], + [ 4, 5, 6]]) + >>> torch.narrow(x, 1, 1, 2) + tensor([[ 2, 3], + [ 5, 6], + [ 8, 9]]) + >>> torch.narrow(x, -1, torch.tensor(-1), 1) + tensor([[3], + [6], + [9]]) + """ + ... +@overload +def narrow(input: Tensor, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt]) -> Tensor: + r""" + narrow(input, dim, start, length) -> Tensor + + Returns a new tensor that is a narrowed version of :attr:`input` tensor. The + dimension :attr:`dim` is input from :attr:`start` to ``start + length``. The + returned tensor and :attr:`input` tensor share the same underlying storage. + + Args: + input (Tensor): the tensor to narrow + dim (int): the dimension along which to narrow + start (int or Tensor): index of the element to start the narrowed dimension + from. Can be negative, which means indexing from the end of `dim`. If + `Tensor`, it must be an 0-dim integral `Tensor` (bools not allowed) + length (int): length of the narrowed dimension, must be weakly positive + + Example:: + + >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> torch.narrow(x, 0, 0, 2) + tensor([[ 1, 2, 3], + [ 4, 5, 6]]) + >>> torch.narrow(x, 1, 1, 2) + tensor([[ 2, 3], + [ 5, 6], + [ 8, 9]]) + >>> torch.narrow(x, -1, torch.tensor(-1), 1) + tensor([[3], + [6], + [9]]) + """ + ... +def narrow_copy(input: Tensor, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor: + r""" + narrow_copy(input, dim, start, length, *, out=None) -> Tensor + + Same as :meth:`Tensor.narrow` except this returns a copy rather + than shared storage. This is primarily for sparse tensors, which + do not have a shared-storage narrow method. + + Args: + input (Tensor): the tensor to narrow + dim (int): the dimension along which to narrow + start (int): index of the element to start the narrowed dimension from. Can + be negative, which means indexing from the end of `dim` + length (int): length of the narrowed dimension, must be weakly positive + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> torch.narrow_copy(x, 0, 0, 2) + tensor([[ 1, 2, 3], + [ 4, 5, 6]]) + >>> torch.narrow_copy(x, 1, 1, 2) + tensor([[ 2, 3], + [ 5, 6], + [ 8, 9]]) + >>> s = torch.arange(16).reshape(2, 2, 2, 2).to_sparse(2) + >>> torch.narrow_copy(s, 0, 0, 1) + tensor(indices=tensor([[0, 0], + [0, 1]]), + values=tensor([[[0, 1], + [2, 3]], + + [[4, 5], + [6, 7]]]), + size=(1, 2, 2, 2), nnz=2, layout=torch.sparse_coo) + + .. seealso:: + + :func:`torch.narrow` for a non copy variant + """ + ... +def native_batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ... +def native_channel_shuffle(input: Tensor, groups: Union[_int, SymInt]) -> Tensor: ... +def native_dropout(input: Tensor, p: _float, train: Optional[_bool]) -> Tuple[Tensor, Tensor]: ... +def native_group_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], N: Union[_int, SymInt], C: Union[_int, SymInt], HxW: Union[_int, SymInt], group: _int, eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ... +def native_layer_norm(input: Tensor, normalized_shape: Sequence[Union[_int, SymInt]], weight: Optional[Tensor], bias: Optional[Tensor], eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ... +@overload +def native_norm(input: Tensor, p: Optional[Union[Number, _complex]], dim: Union[_int, _size], keepdim: _bool, dtype: Optional[_dtype]) -> Tensor: ... +@overload +def native_norm(input: Tensor, p: Union[Number, _complex] = 2) -> Tensor: ... +@overload +def ne(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + ne(input, other, *, out=None) -> Tensor + + Computes :math:`\text{input} \neq \text{other}` element-wise. + + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is not equal to :attr:`other` and False elsewhere + + Example:: + + >>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[False, True], [True, False]]) + """ + ... +@overload +def ne(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + ne(input, other, *, out=None) -> Tensor + + Computes :math:`\text{input} \neq \text{other}` element-wise. + + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is not equal to :attr:`other` and False elsewhere + + Example:: + + >>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[False, True], [True, False]]) + """ + ... +def neg(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + neg(input, *, out=None) -> Tensor + + Returns a new tensor with the negative of the elements of :attr:`input`. + + .. math:: + \text{out} = -1 \times \text{input} + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(5) + >>> a + tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940]) + >>> torch.neg(a) + tensor([-0.0090, 0.2262, 0.0682, 0.2866, -0.3940]) + """ + ... +def neg_(input: Tensor) -> Tensor: ... +def negative(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + negative(input, *, out=None) -> Tensor + + Alias for :func:`torch.neg` + """ + ... +def negative_(input: Tensor) -> Tensor: ... +def nextafter(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + nextafter(input, other, *, out=None) -> Tensor + + Return the next floating-point value after :attr:`input` towards :attr:`other`, elementwise. + + The shapes of ``input`` and ``other`` must be + :ref:`broadcastable `. + + Args: + input (Tensor): the first input tensor + other (Tensor): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> eps = torch.finfo(torch.float32).eps + >>> torch.nextafter(torch.tensor([1.0, 2.0]), torch.tensor([2.0, 1.0])) == torch.tensor([eps + 1, 2 - eps]) + tensor([True, True]) + """ + ... +@overload +def nonzero(input: Tensor, *, as_tuple: Literal[False] = False, out: Optional[Tensor] = None) -> Tensor: + r""" + nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors + + .. note:: + :func:`torch.nonzero(..., as_tuple=False) ` (default) returns a + 2-D tensor where each row is the index for a nonzero value. + + :func:`torch.nonzero(..., as_tuple=True) ` returns a tuple of 1-D + index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]`` + gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor + contains nonzero indices for a certain dimension. + + See below for more details on the two behaviors. + + When :attr:`input` is on CUDA, :func:`torch.nonzero() ` causes + host-device synchronization. + + **When** :attr:`as_tuple` **is** ``False`` **(default)**: + + Returns a tensor containing the indices of all non-zero elements of + :attr:`input`. Each row in the result contains the indices of a non-zero + element in :attr:`input`. The result is sorted lexicographically, with + the last index changing the fastest (C-style). + + If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor + :attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of + non-zero elements in the :attr:`input` tensor. + + **When** :attr:`as_tuple` **is** ``True``: + + Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`, + each containing the indices (in that dimension) of all non-zero elements of + :attr:`input` . + + If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n` + tensors of size :math:`z`, where :math:`z` is the total number of + non-zero elements in the :attr:`input` tensor. + + As a special case, when :attr:`input` has zero dimensions and a nonzero scalar + value, it is treated as a one-dimensional tensor with one element. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (LongTensor, optional): the output tensor containing indices + + Returns: + LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output + tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for + each dimension, containing the indices of each nonzero element along that + dimension. + + Example:: + + >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1])) + tensor([[ 0], + [ 1], + [ 2], + [ 4]]) + >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0], + ... [0.0, 0.4, 0.0, 0.0], + ... [0.0, 0.0, 1.2, 0.0], + ... [0.0, 0.0, 0.0,-0.4]])) + tensor([[ 0, 0], + [ 1, 1], + [ 2, 2], + [ 3, 3]]) + >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True) + (tensor([0, 1, 2, 4]),) + >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0], + ... [0.0, 0.4, 0.0, 0.0], + ... [0.0, 0.0, 1.2, 0.0], + ... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True) + (tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3])) + >>> torch.nonzero(torch.tensor(5), as_tuple=True) + (tensor([0]),) + """ + ... +@overload +def nonzero(input: Tensor, *, as_tuple: Literal[True]) -> Tuple[Tensor, ...]: + r""" + nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors + + .. note:: + :func:`torch.nonzero(..., as_tuple=False) ` (default) returns a + 2-D tensor where each row is the index for a nonzero value. + + :func:`torch.nonzero(..., as_tuple=True) ` returns a tuple of 1-D + index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]`` + gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor + contains nonzero indices for a certain dimension. + + See below for more details on the two behaviors. + + When :attr:`input` is on CUDA, :func:`torch.nonzero() ` causes + host-device synchronization. + + **When** :attr:`as_tuple` **is** ``False`` **(default)**: + + Returns a tensor containing the indices of all non-zero elements of + :attr:`input`. Each row in the result contains the indices of a non-zero + element in :attr:`input`. The result is sorted lexicographically, with + the last index changing the fastest (C-style). + + If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor + :attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of + non-zero elements in the :attr:`input` tensor. + + **When** :attr:`as_tuple` **is** ``True``: + + Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`, + each containing the indices (in that dimension) of all non-zero elements of + :attr:`input` . + + If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n` + tensors of size :math:`z`, where :math:`z` is the total number of + non-zero elements in the :attr:`input` tensor. + + As a special case, when :attr:`input` has zero dimensions and a nonzero scalar + value, it is treated as a one-dimensional tensor with one element. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (LongTensor, optional): the output tensor containing indices + + Returns: + LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output + tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for + each dimension, containing the indices of each nonzero element along that + dimension. + + Example:: + + >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1])) + tensor([[ 0], + [ 1], + [ 2], + [ 4]]) + >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0], + ... [0.0, 0.4, 0.0, 0.0], + ... [0.0, 0.0, 1.2, 0.0], + ... [0.0, 0.0, 0.0,-0.4]])) + tensor([[ 0, 0], + [ 1, 1], + [ 2, 2], + [ 3, 3]]) + >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True) + (tensor([0, 1, 2, 4]),) + >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0], + ... [0.0, 0.4, 0.0, 0.0], + ... [0.0, 0.0, 1.2, 0.0], + ... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True) + (tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3])) + >>> torch.nonzero(torch.tensor(5), as_tuple=True) + (tensor([0]),) + """ + ... +def nonzero_static(input: Tensor, *, size: _int, fill_value: _int = -1, out: Optional[Tensor] = None) -> Tensor: ... +def norm_except_dim(v: Tensor, pow: _int = 2, dim: _int = 0) -> Tensor: ... +@overload +def normal(mean: Tensor, std: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + normal(mean, std, *, generator=None, out=None) -> Tensor + + Returns a tensor of random numbers drawn from separate normal distributions + whose mean and standard deviation are given. + + The :attr:`mean` is a tensor with the mean of + each output element's normal distribution + + The :attr:`std` is a tensor with the standard deviation of + each output element's normal distribution + + The shapes of :attr:`mean` and :attr:`std` don't need to match, but the + total number of elements in each tensor need to be the same. + + .. note:: When the shapes do not match, the shape of :attr:`mean` + is used as the shape for the returned output tensor + + .. note:: When :attr:`std` is a CUDA tensor, this function synchronizes + its device with the CPU. + + Args: + mean (Tensor): the tensor of per-element means + std (Tensor): the tensor of per-element standard deviations + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1)) + tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134, + 8.0505, 8.1408, 9.0563, 10.0566]) + + .. function:: normal(mean=0.0, std, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the means are shared among all drawn + elements. + + Args: + mean (float, optional): the mean for all distributions + std (Tensor): the tensor of per-element standard deviations + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(mean=0.5, std=torch.arange(1., 6.)) + tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303]) + + .. function:: normal(mean, std=1.0, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the standard deviations are shared among + all drawn elements. + + Args: + mean (Tensor): the tensor of per-element means + std (float, optional): the standard deviation for all distributions + + Keyword args: + out (Tensor, optional): the output tensor + + Example:: + + >>> torch.normal(mean=torch.arange(1., 6.)) + tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361]) + + .. function:: normal(mean, std, size, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the means and standard deviations are shared + among all drawn elements. The resulting tensor has size given by :attr:`size`. + + Args: + mean (float): the mean for all distributions + std (float): the standard deviation for all distributions + size (int...): a sequence of integers defining the shape of the output tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(2, 3, size=(1, 4)) + tensor([[-1.3987, -1.9544, 3.6048, 0.7909]]) + """ + ... +@overload +def normal(mean: Tensor, std: _float = 1, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + normal(mean, std, *, generator=None, out=None) -> Tensor + + Returns a tensor of random numbers drawn from separate normal distributions + whose mean and standard deviation are given. + + The :attr:`mean` is a tensor with the mean of + each output element's normal distribution + + The :attr:`std` is a tensor with the standard deviation of + each output element's normal distribution + + The shapes of :attr:`mean` and :attr:`std` don't need to match, but the + total number of elements in each tensor need to be the same. + + .. note:: When the shapes do not match, the shape of :attr:`mean` + is used as the shape for the returned output tensor + + .. note:: When :attr:`std` is a CUDA tensor, this function synchronizes + its device with the CPU. + + Args: + mean (Tensor): the tensor of per-element means + std (Tensor): the tensor of per-element standard deviations + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1)) + tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134, + 8.0505, 8.1408, 9.0563, 10.0566]) + + .. function:: normal(mean=0.0, std, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the means are shared among all drawn + elements. + + Args: + mean (float, optional): the mean for all distributions + std (Tensor): the tensor of per-element standard deviations + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(mean=0.5, std=torch.arange(1., 6.)) + tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303]) + + .. function:: normal(mean, std=1.0, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the standard deviations are shared among + all drawn elements. + + Args: + mean (Tensor): the tensor of per-element means + std (float, optional): the standard deviation for all distributions + + Keyword args: + out (Tensor, optional): the output tensor + + Example:: + + >>> torch.normal(mean=torch.arange(1., 6.)) + tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361]) + + .. function:: normal(mean, std, size, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the means and standard deviations are shared + among all drawn elements. The resulting tensor has size given by :attr:`size`. + + Args: + mean (float): the mean for all distributions + std (float): the standard deviation for all distributions + size (int...): a sequence of integers defining the shape of the output tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(2, 3, size=(1, 4)) + tensor([[-1.3987, -1.9544, 3.6048, 0.7909]]) + """ + ... +@overload +def normal(mean: _float, std: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + normal(mean, std, *, generator=None, out=None) -> Tensor + + Returns a tensor of random numbers drawn from separate normal distributions + whose mean and standard deviation are given. + + The :attr:`mean` is a tensor with the mean of + each output element's normal distribution + + The :attr:`std` is a tensor with the standard deviation of + each output element's normal distribution + + The shapes of :attr:`mean` and :attr:`std` don't need to match, but the + total number of elements in each tensor need to be the same. + + .. note:: When the shapes do not match, the shape of :attr:`mean` + is used as the shape for the returned output tensor + + .. note:: When :attr:`std` is a CUDA tensor, this function synchronizes + its device with the CPU. + + Args: + mean (Tensor): the tensor of per-element means + std (Tensor): the tensor of per-element standard deviations + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1)) + tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134, + 8.0505, 8.1408, 9.0563, 10.0566]) + + .. function:: normal(mean=0.0, std, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the means are shared among all drawn + elements. + + Args: + mean (float, optional): the mean for all distributions + std (Tensor): the tensor of per-element standard deviations + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(mean=0.5, std=torch.arange(1., 6.)) + tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303]) + + .. function:: normal(mean, std=1.0, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the standard deviations are shared among + all drawn elements. + + Args: + mean (Tensor): the tensor of per-element means + std (float, optional): the standard deviation for all distributions + + Keyword args: + out (Tensor, optional): the output tensor + + Example:: + + >>> torch.normal(mean=torch.arange(1., 6.)) + tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361]) + + .. function:: normal(mean, std, size, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the means and standard deviations are shared + among all drawn elements. The resulting tensor has size given by :attr:`size`. + + Args: + mean (float): the mean for all distributions + std (float): the standard deviation for all distributions + size (int...): a sequence of integers defining the shape of the output tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(2, 3, size=(1, 4)) + tensor([[-1.3987, -1.9544, 3.6048, 0.7909]]) + """ + ... +@overload +def normal(mean: _float, std: _float, size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + normal(mean, std, *, generator=None, out=None) -> Tensor + + Returns a tensor of random numbers drawn from separate normal distributions + whose mean and standard deviation are given. + + The :attr:`mean` is a tensor with the mean of + each output element's normal distribution + + The :attr:`std` is a tensor with the standard deviation of + each output element's normal distribution + + The shapes of :attr:`mean` and :attr:`std` don't need to match, but the + total number of elements in each tensor need to be the same. + + .. note:: When the shapes do not match, the shape of :attr:`mean` + is used as the shape for the returned output tensor + + .. note:: When :attr:`std` is a CUDA tensor, this function synchronizes + its device with the CPU. + + Args: + mean (Tensor): the tensor of per-element means + std (Tensor): the tensor of per-element standard deviations + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1)) + tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134, + 8.0505, 8.1408, 9.0563, 10.0566]) + + .. function:: normal(mean=0.0, std, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the means are shared among all drawn + elements. + + Args: + mean (float, optional): the mean for all distributions + std (Tensor): the tensor of per-element standard deviations + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(mean=0.5, std=torch.arange(1., 6.)) + tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303]) + + .. function:: normal(mean, std=1.0, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the standard deviations are shared among + all drawn elements. + + Args: + mean (Tensor): the tensor of per-element means + std (float, optional): the standard deviation for all distributions + + Keyword args: + out (Tensor, optional): the output tensor + + Example:: + + >>> torch.normal(mean=torch.arange(1., 6.)) + tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361]) + + .. function:: normal(mean, std, size, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the means and standard deviations are shared + among all drawn elements. The resulting tensor has size given by :attr:`size`. + + Args: + mean (float): the mean for all distributions + std (float): the standard deviation for all distributions + size (int...): a sequence of integers defining the shape of the output tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(2, 3, size=(1, 4)) + tensor([[-1.3987, -1.9544, 3.6048, 0.7909]]) + """ + ... +@overload +def not_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + not_equal(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.ne`. + """ + ... +@overload +def not_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + not_equal(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.ne`. + """ + ... +@overload +def nuclear_norm(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def nuclear_norm(input: Tensor, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +def numel(self: Tensor) -> _int: + r""" + numel(input) -> int + + Returns the total number of elements in the :attr:`input` tensor. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 2, 3, 4, 5) + >>> torch.numel(a) + 120 + >>> a = torch.zeros(4,4) + >>> torch.numel(a) + 16 + """ + ... +@overload +def ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with the scalar value `1`, with the shape defined + by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.ones(2, 3) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.]]) + + >>> torch.ones(5) + tensor([ 1., 1., 1., 1., 1.]) + """ + ... +@overload +def ones(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with the scalar value `1`, with the shape defined + by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.ones(2, 3) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.]]) + + >>> torch.ones(5) + tensor([ 1., 1., 1., 1., 1.]) + """ + ... +@overload +def ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with the scalar value `1`, with the shape defined + by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.ones(2, 3) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.]]) + + >>> torch.ones(5) + tensor([ 1., 1., 1., 1., 1.]) + """ + ... +@overload +def ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with the scalar value `1`, with the shape defined + by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.ones(2, 3) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.]]) + + >>> torch.ones(5) + tensor([ 1., 1., 1., 1., 1.]) + """ + ... +def ones_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + ones_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + + Returns a tensor filled with the scalar value `1`, with the same size as + :attr:`input`. ``torch.ones_like(input)`` is equivalent to + ``torch.ones(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. + + .. warning:: + As of 0.4, this function does not support an :attr:`out` keyword. As an alternative, + the old ``torch.ones_like(input, out=output)`` is equivalent to + ``torch.ones(input.size(), out=output)``. + + Args: + input (Tensor): the size of :attr:`input` will determine size of the output tensor. + + Keyword arguments: + dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. + Default: if ``None``, defaults to the dtype of :attr:`input`. + layout (:class:`torch.layout`, optional): the desired layout of returned tensor. + Default: if ``None``, defaults to the layout of :attr:`input`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, defaults to the device of :attr:`input`. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + + Example:: + + >>> input = torch.empty(2, 3) + >>> torch.ones_like(input) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.]]) + """ + ... +def orgqr(input: Tensor, input2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + orgqr(input, tau) -> Tensor + + Alias for :func:`torch.linalg.householder_product`. + """ + ... +def ormqr(input: Tensor, input2: Tensor, input3: Tensor, left: _bool = True, transpose: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + ormqr(input, tau, other, left=True, transpose=False, *, out=None) -> Tensor + + Computes the matrix-matrix multiplication of a product of Householder matrices with a general matrix. + + Multiplies a :math:`m \times n` matrix `C` (given by :attr:`other`) with a matrix `Q`, + where `Q` is represented using Householder reflectors `(input, tau)`. + See `Representation of Orthogonal or Unitary Matrices`_ for further details. + + If :attr:`left` is `True` then `op(Q)` times `C` is computed, otherwise the result is `C` times `op(Q)`. + When :attr:`left` is `True`, the implicit matrix `Q` has size :math:`m \times m`. + It has size :math:`n \times n` otherwise. + If :attr:`transpose` is `True` then `op` is the conjugate transpose operation, otherwise it's a no-op. + + Supports inputs of float, double, cfloat and cdouble dtypes. + Also supports batched inputs, and, if the input is batched, the output is batched with the same dimensions. + + .. seealso:: + :func:`torch.geqrf` can be used to form the Householder representation `(input, tau)` of matrix `Q` + from the QR decomposition. + + .. note:: + This function supports backward but it is only fast when ``(input, tau)`` do not require gradients + and/or ``tau.size(-1)`` is very small. + `` + + Args: + input (Tensor): tensor of shape `(*, mn, k)` where `*` is zero or more batch dimensions + and `mn` equals to `m` or `n` depending on the :attr:`left`. + tau (Tensor): tensor of shape `(*, min(mn, k))` where `*` is zero or more batch dimensions. + other (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions. + left (bool): controls the order of multiplication. + transpose (bool): controls whether the matrix `Q` is conjugate transposed or not. + + Keyword args: + out (Tensor, optional): the output Tensor. Ignored if `None`. Default: `None`. + + .. _Representation of Orthogonal or Unitary Matrices: + https://www.netlib.org/lapack/lug/node128.html + """ + ... +def outer(input: Tensor, vec2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + outer(input, vec2, *, out=None) -> Tensor + + Outer product of :attr:`input` and :attr:`vec2`. + If :attr:`input` is a vector of size :math:`n` and :attr:`vec2` is a vector of + size :math:`m`, then :attr:`out` must be a matrix of size :math:`(n \times m)`. + + .. note:: This function does not :ref:`broadcast `. + + Args: + input (Tensor): 1-D input vector + vec2 (Tensor): 1-D input vector + + Keyword args: + out (Tensor, optional): optional output matrix + + Example:: + + >>> v1 = torch.arange(1., 5.) + >>> v2 = torch.arange(1., 4.) + >>> torch.outer(v1, v2) + tensor([[ 1., 2., 3.], + [ 2., 4., 6.], + [ 3., 6., 9.], + [ 4., 8., 12.]]) + """ + ... +def pairwise_distance(x1: Tensor, x2: Tensor, p: _float = 2, eps: _float = 1e-06, keepdim: _bool = False) -> Tensor: ... +def pdist(input: Tensor, p: _float = 2) -> Tensor: ... +def permute(input: Tensor, dims: _size) -> Tensor: + r""" + permute(input, dims) -> Tensor + + Returns a view of the original tensor :attr:`input` with its dimensions permuted. + + Args: + input (Tensor): the input tensor. + dims (tuple of int): The desired ordering of dimensions + + Example: + >>> x = torch.randn(2, 3, 5) + >>> x.size() + torch.Size([2, 3, 5]) + >>> torch.permute(x, (2, 0, 1)).size() + torch.Size([5, 2, 3]) + """ + ... +def permute_copy(input: Tensor, dims: _size, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.permute`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def pinverse(input: Tensor, rcond: _float = 1e-15) -> Tensor: + r""" + pinverse(input, rcond=1e-15) -> Tensor + + Alias for :func:`torch.linalg.pinv` + """ + ... +def pixel_shuffle(input: Tensor, upscale_factor: _int) -> Tensor: ... +def pixel_unshuffle(input: Tensor, downscale_factor: _int) -> Tensor: ... +def poisson(input: Tensor, generator: Optional[Generator] = None) -> Tensor: + r""" + poisson(input, generator=None) -> Tensor + + Returns a tensor of the same size as :attr:`input` with each element + sampled from a Poisson distribution with rate parameter given by the corresponding + element in :attr:`input` i.e., + + .. math:: + \text{out}_i \sim \text{Poisson}(\text{input}_i) + + :attr:`input` must be non-negative. + + Args: + input (Tensor): the input tensor containing the rates of the Poisson distribution + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + + Example:: + + >>> rates = torch.rand(4, 4) * 5 # rate parameter between 0 and 5 + >>> torch.poisson(rates) + tensor([[9., 1., 3., 5.], + [8., 6., 6., 0.], + [0., 4., 5., 3.], + [2., 1., 4., 2.]]) + """ + ... +def poisson_nll_loss(input: Tensor, target: Tensor, log_input: _bool, full: _bool, eps: _float, reduction: _int) -> Tensor: ... +def polar(abs: Tensor, angle: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + polar(abs, angle, *, out=None) -> Tensor + + Constructs a complex tensor whose elements are Cartesian coordinates + corresponding to the polar coordinates with absolute value :attr:`abs` and angle + :attr:`angle`. + + .. math:: + \text{out} = \text{abs} \cdot \cos(\text{angle}) + \text{abs} \cdot \sin(\text{angle}) \cdot j + + .. note:: + `torch.polar` is similar to + `std::polar `_ + and does not compute the polar decomposition + of a complex tensor like Python's `cmath.polar` and SciPy's `linalg.polar` do. + The behavior of this function is undefined if `abs` is negative or NaN, or if `angle` is + infinite. + + + Args: + abs (Tensor): The absolute value the complex tensor. Must be float or double. + angle (Tensor): The angle of the complex tensor. Must be same dtype as + :attr:`abs`. + + Keyword args: + out (Tensor): If the inputs are ``torch.float32``, must be + ``torch.complex64``. If the inputs are ``torch.float64``, must be + ``torch.complex128``. + + Example:: + + >>> import numpy as np + >>> abs = torch.tensor([1, 2], dtype=torch.float64) + >>> angle = torch.tensor([np.pi / 2, 5 * np.pi / 4], dtype=torch.float64) + >>> z = torch.polar(abs, angle) + >>> z + tensor([(0.0000+1.0000j), (-1.4142-1.4142j)], dtype=torch.complex128) + """ + ... +def polygamma(n: _int, input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + polygamma(n, input, *, out=None) -> Tensor + + Alias for :func:`torch.special.polygamma`. + """ + ... +def positive(input: Tensor) -> Tensor: + r""" + positive(input) -> Tensor + + Returns :attr:`input`. + Throws a runtime error if :attr:`input` is a bool tensor. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> t = torch.randn(5) + >>> t + tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940]) + >>> torch.positive(t) + tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940]) + """ + ... +@overload +def pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + pow(input, exponent, *, out=None) -> Tensor + + Takes the power of each element in :attr:`input` with :attr:`exponent` and + returns a tensor with the result. + + :attr:`exponent` can be either a single ``float`` number or a `Tensor` + with the same number of elements as :attr:`input`. + + When :attr:`exponent` is a scalar value, the operation applied is: + + .. math:: + \text{out}_i = x_i ^ \text{exponent} + + When :attr:`exponent` is a tensor, the operation applied is: + + .. math:: + \text{out}_i = x_i ^ {\text{exponent}_i} + + When :attr:`exponent` is a tensor, the shapes of :attr:`input` + and :attr:`exponent` must be :ref:`broadcastable `. + + Args: + input (Tensor): the input tensor. + exponent (float or tensor): the exponent value + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.4331, 1.2475, 0.6834, -0.2791]) + >>> torch.pow(a, 2) + tensor([ 0.1875, 1.5561, 0.4670, 0.0779]) + >>> exp = torch.arange(1., 5.) + + >>> a = torch.arange(1., 5.) + >>> a + tensor([ 1., 2., 3., 4.]) + >>> exp + tensor([ 1., 2., 3., 4.]) + >>> torch.pow(a, exp) + tensor([ 1., 4., 27., 256.]) + + .. function:: pow(self, exponent, *, out=None) -> Tensor + :noindex: + + :attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor. + The returned tensor :attr:`out` is of the same shape as :attr:`exponent` + + The operation applied is: + + .. math:: + \text{out}_i = \text{self} ^ {\text{exponent}_i} + + Args: + self (float): the scalar base value for the power operation + exponent (Tensor): the exponent tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> exp = torch.arange(1., 5.) + >>> base = 2 + >>> torch.pow(base, exp) + tensor([ 2., 4., 8., 16.]) + """ + ... +@overload +def pow(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + pow(input, exponent, *, out=None) -> Tensor + + Takes the power of each element in :attr:`input` with :attr:`exponent` and + returns a tensor with the result. + + :attr:`exponent` can be either a single ``float`` number or a `Tensor` + with the same number of elements as :attr:`input`. + + When :attr:`exponent` is a scalar value, the operation applied is: + + .. math:: + \text{out}_i = x_i ^ \text{exponent} + + When :attr:`exponent` is a tensor, the operation applied is: + + .. math:: + \text{out}_i = x_i ^ {\text{exponent}_i} + + When :attr:`exponent` is a tensor, the shapes of :attr:`input` + and :attr:`exponent` must be :ref:`broadcastable `. + + Args: + input (Tensor): the input tensor. + exponent (float or tensor): the exponent value + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.4331, 1.2475, 0.6834, -0.2791]) + >>> torch.pow(a, 2) + tensor([ 0.1875, 1.5561, 0.4670, 0.0779]) + >>> exp = torch.arange(1., 5.) + + >>> a = torch.arange(1., 5.) + >>> a + tensor([ 1., 2., 3., 4.]) + >>> exp + tensor([ 1., 2., 3., 4.]) + >>> torch.pow(a, exp) + tensor([ 1., 4., 27., 256.]) + + .. function:: pow(self, exponent, *, out=None) -> Tensor + :noindex: + + :attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor. + The returned tensor :attr:`out` is of the same shape as :attr:`exponent` + + The operation applied is: + + .. math:: + \text{out}_i = \text{self} ^ {\text{exponent}_i} + + Args: + self (float): the scalar base value for the power operation + exponent (Tensor): the exponent tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> exp = torch.arange(1., 5.) + >>> base = 2 + >>> torch.pow(base, exp) + tensor([ 2., 4., 8., 16.]) + """ + ... +@overload +def pow(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + pow(input, exponent, *, out=None) -> Tensor + + Takes the power of each element in :attr:`input` with :attr:`exponent` and + returns a tensor with the result. + + :attr:`exponent` can be either a single ``float`` number or a `Tensor` + with the same number of elements as :attr:`input`. + + When :attr:`exponent` is a scalar value, the operation applied is: + + .. math:: + \text{out}_i = x_i ^ \text{exponent} + + When :attr:`exponent` is a tensor, the operation applied is: + + .. math:: + \text{out}_i = x_i ^ {\text{exponent}_i} + + When :attr:`exponent` is a tensor, the shapes of :attr:`input` + and :attr:`exponent` must be :ref:`broadcastable `. + + Args: + input (Tensor): the input tensor. + exponent (float or tensor): the exponent value + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.4331, 1.2475, 0.6834, -0.2791]) + >>> torch.pow(a, 2) + tensor([ 0.1875, 1.5561, 0.4670, 0.0779]) + >>> exp = torch.arange(1., 5.) + + >>> a = torch.arange(1., 5.) + >>> a + tensor([ 1., 2., 3., 4.]) + >>> exp + tensor([ 1., 2., 3., 4.]) + >>> torch.pow(a, exp) + tensor([ 1., 4., 27., 256.]) + + .. function:: pow(self, exponent, *, out=None) -> Tensor + :noindex: + + :attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor. + The returned tensor :attr:`out` is of the same shape as :attr:`exponent` + + The operation applied is: + + .. math:: + \text{out}_i = \text{self} ^ {\text{exponent}_i} + + Args: + self (float): the scalar base value for the power operation + exponent (Tensor): the exponent tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> exp = torch.arange(1., 5.) + >>> base = 2 + >>> torch.pow(base, exp) + tensor([ 2., 4., 8., 16.]) + """ + ... +def prelu(input: Tensor, weight: Tensor) -> Tensor: ... +@overload +def prod(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + prod(input, *, dtype=None) -> Tensor + + Returns the product of all elements in the :attr:`input` tensor. + + Args: + input (Tensor): the input tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[-0.8020, 0.5428, -1.5854]]) + >>> torch.prod(a) + tensor(0.6902) + + .. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor + :noindex: + + Returns the product of each row of the :attr:`input` tensor in the given + dimension :attr:`dim`. + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in + the output tensor having 1 fewer dimension than :attr:`input`. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(4, 2) + >>> a + tensor([[ 0.5261, -0.3837], + [ 1.1857, -0.2498], + [-1.1646, 0.0705], + [ 1.1131, -1.0629]]) + >>> torch.prod(a, 1) + tensor([-0.2018, -0.2962, -0.0821, -1.1831]) + """ + ... +@overload +def prod(input: Tensor, dim: _int, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + prod(input, *, dtype=None) -> Tensor + + Returns the product of all elements in the :attr:`input` tensor. + + Args: + input (Tensor): the input tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[-0.8020, 0.5428, -1.5854]]) + >>> torch.prod(a) + tensor(0.6902) + + .. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor + :noindex: + + Returns the product of each row of the :attr:`input` tensor in the given + dimension :attr:`dim`. + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in + the output tensor having 1 fewer dimension than :attr:`input`. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(4, 2) + >>> a + tensor([[ 0.5261, -0.3837], + [ 1.1857, -0.2498], + [-1.1646, 0.0705], + [ 1.1131, -1.0629]]) + >>> torch.prod(a, 1) + tensor([-0.2018, -0.2962, -0.0821, -1.1831]) + """ + ... +@overload +def prod(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + prod(input, *, dtype=None) -> Tensor + + Returns the product of all elements in the :attr:`input` tensor. + + Args: + input (Tensor): the input tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[-0.8020, 0.5428, -1.5854]]) + >>> torch.prod(a) + tensor(0.6902) + + .. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor + :noindex: + + Returns the product of each row of the :attr:`input` tensor in the given + dimension :attr:`dim`. + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in + the output tensor having 1 fewer dimension than :attr:`input`. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(4, 2) + >>> a + tensor([[ 0.5261, -0.3837], + [ 1.1857, -0.2498], + [-1.1646, 0.0705], + [ 1.1131, -1.0629]]) + >>> torch.prod(a, 1) + tensor([-0.2018, -0.2962, -0.0821, -1.1831]) + """ + ... +def promote_types(type1: _dtype, type2: _dtype) -> _dtype: + r""" + promote_types(type1, type2) -> dtype + + Returns the :class:`torch.dtype` with the smallest size and scalar kind that is + not smaller nor of lower kind than either `type1` or `type2`. See type promotion + :ref:`documentation ` for more information on the type + promotion logic. + + Args: + type1 (:class:`torch.dtype`) + type2 (:class:`torch.dtype`) + + Example:: + + >>> torch.promote_types(torch.int32, torch.float32) + torch.float32 + >>> torch.promote_types(torch.uint8, torch.long) + torch.long + """ + ... +def put(input: Tensor, index: Tensor, source: Tensor, accumulate: _bool = False) -> Tensor: ... +def q_per_channel_axis(input: Tensor) -> _int: ... +def q_per_channel_scales(input: Tensor) -> Tensor: ... +def q_per_channel_zero_points(input: Tensor) -> Tensor: ... +def q_scale(input: Tensor) -> _float: ... +def q_zero_point(input: Tensor) -> _int: ... +def qr(input: Tensor, some: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.qr: + r""" + qr(input, some=True, *, out=None) -> (Tensor, Tensor) + + Computes the QR decomposition of a matrix or a batch of matrices :attr:`input`, + and returns a namedtuple (Q, R) of tensors such that :math:`\text{input} = Q R` + with :math:`Q` being an orthogonal matrix or batch of orthogonal matrices and + :math:`R` being an upper triangular matrix or batch of upper triangular matrices. + + If :attr:`some` is ``True``, then this function returns the thin (reduced) QR factorization. + Otherwise, if :attr:`some` is ``False``, this function returns the complete QR factorization. + + .. warning:: + + :func:`torch.qr` is deprecated in favor of :func:`torch.linalg.qr` + and will be removed in a future PyTorch release. The boolean parameter :attr:`some` has been + replaced with a string parameter :attr:`mode`. + + ``Q, R = torch.qr(A)`` should be replaced with + + .. code:: python + + Q, R = torch.linalg.qr(A) + + ``Q, R = torch.qr(A, some=False)`` should be replaced with + + .. code:: python + + Q, R = torch.linalg.qr(A, mode="complete") + + .. warning:: + If you plan to backpropagate through QR, note that the current backward implementation + is only well-defined when the first :math:`\min(input.size(-1), input.size(-2))` + columns of :attr:`input` are linearly independent. + This behavior will probably change once QR supports pivoting. + + .. note:: This function uses LAPACK for CPU inputs and MAGMA for CUDA inputs, + and may produce different (valid) decompositions on different device types + or different platforms. + + Args: + input (Tensor): the input tensor of size :math:`(*, m, n)` where `*` is zero or more + batch dimensions consisting of matrices of dimension :math:`m \times n`. + some (bool, optional): Set to ``True`` for reduced QR decomposition and ``False`` for + complete QR decomposition. If `k = min(m, n)` then: + + * ``some=True`` : returns `(Q, R)` with dimensions (m, k), (k, n) (default) + + * ``'some=False'``: returns `(Q, R)` with dimensions (m, m), (m, n) + + Keyword args: + out (tuple, optional): tuple of `Q` and `R` tensors. + The dimensions of `Q` and `R` are detailed in the description of :attr:`some` above. + + Example:: + + >>> a = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]]) + >>> q, r = torch.qr(a) + >>> q + tensor([[-0.8571, 0.3943, 0.3314], + [-0.4286, -0.9029, -0.0343], + [ 0.2857, -0.1714, 0.9429]]) + >>> r + tensor([[ -14.0000, -21.0000, 14.0000], + [ 0.0000, -175.0000, 70.0000], + [ 0.0000, 0.0000, -35.0000]]) + >>> torch.mm(q, r).round() + tensor([[ 12., -51., 4.], + [ 6., 167., -68.], + [ -4., 24., -41.]]) + >>> torch.mm(q.t(), q).round() + tensor([[ 1., 0., 0.], + [ 0., 1., -0.], + [ 0., -0., 1.]]) + >>> a = torch.randn(3, 4, 5) + >>> q, r = torch.qr(a, some=False) + >>> torch.allclose(torch.matmul(q, r), a) + True + >>> torch.allclose(torch.matmul(q.mT, q), torch.eye(5)) + True + """ + ... +@overload +def quantile(input: Tensor, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor: + r""" + quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor + + Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`. + + To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location + of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with + indices ``i`` and ``j`` in the sorted order, result is computed according to the given + :attr:`interpolation` method as follows: + + - ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index. + - ``lower``: ``a``. + - ``higher``: ``b``. + - ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions). + - ``midpoint``: ``(a + b) / 2``. + + If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size + equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction. + + .. note:: + By default :attr:`dim` is ``None`` resulting in the :attr:`input` tensor being flattened before computation. + + Args: + input (Tensor): the input tensor. + q (float or Tensor): a scalar or 1D tensor of values in the range [0, 1]. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword arguments: + interpolation (str): interpolation method to use when the desired quantile lies between two data points. + Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``. + Default is ``linear``. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(2, 3) + >>> a + tensor([[ 0.0795, -1.2117, 0.9765], + [ 1.1707, 0.6706, 0.4884]]) + >>> q = torch.tensor([0.25, 0.5, 0.75]) + >>> torch.quantile(a, q, dim=1, keepdim=True) + tensor([[[-0.5661], + [ 0.5795]], + + [[ 0.0795], + [ 0.6706]], + + [[ 0.5280], + [ 0.9206]]]) + >>> torch.quantile(a, q, dim=1, keepdim=True).shape + torch.Size([3, 2, 1]) + >>> a = torch.arange(4.) + >>> a + tensor([0., 1., 2., 3.]) + >>> torch.quantile(a, 0.6, interpolation='linear') + tensor(1.8000) + >>> torch.quantile(a, 0.6, interpolation='lower') + tensor(1.) + >>> torch.quantile(a, 0.6, interpolation='higher') + tensor(2.) + >>> torch.quantile(a, 0.6, interpolation='midpoint') + tensor(1.5000) + >>> torch.quantile(a, 0.6, interpolation='nearest') + tensor(2.) + >>> torch.quantile(a, 0.4, interpolation='nearest') + tensor(1.) + """ + ... +@overload +def quantile(input: Tensor, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor: + r""" + quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor + + Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`. + + To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location + of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with + indices ``i`` and ``j`` in the sorted order, result is computed according to the given + :attr:`interpolation` method as follows: + + - ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index. + - ``lower``: ``a``. + - ``higher``: ``b``. + - ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions). + - ``midpoint``: ``(a + b) / 2``. + + If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size + equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction. + + .. note:: + By default :attr:`dim` is ``None`` resulting in the :attr:`input` tensor being flattened before computation. + + Args: + input (Tensor): the input tensor. + q (float or Tensor): a scalar or 1D tensor of values in the range [0, 1]. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword arguments: + interpolation (str): interpolation method to use when the desired quantile lies between two data points. + Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``. + Default is ``linear``. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(2, 3) + >>> a + tensor([[ 0.0795, -1.2117, 0.9765], + [ 1.1707, 0.6706, 0.4884]]) + >>> q = torch.tensor([0.25, 0.5, 0.75]) + >>> torch.quantile(a, q, dim=1, keepdim=True) + tensor([[[-0.5661], + [ 0.5795]], + + [[ 0.0795], + [ 0.6706]], + + [[ 0.5280], + [ 0.9206]]]) + >>> torch.quantile(a, q, dim=1, keepdim=True).shape + torch.Size([3, 2, 1]) + >>> a = torch.arange(4.) + >>> a + tensor([0., 1., 2., 3.]) + >>> torch.quantile(a, 0.6, interpolation='linear') + tensor(1.8000) + >>> torch.quantile(a, 0.6, interpolation='lower') + tensor(1.) + >>> torch.quantile(a, 0.6, interpolation='higher') + tensor(2.) + >>> torch.quantile(a, 0.6, interpolation='midpoint') + tensor(1.5000) + >>> torch.quantile(a, 0.6, interpolation='nearest') + tensor(2.) + >>> torch.quantile(a, 0.4, interpolation='nearest') + tensor(1.) + """ + ... +def quantize_per_channel(input: Tensor, scales: Tensor, zero_points: Tensor, axis: _int, dtype: _dtype) -> Tensor: + r""" + quantize_per_channel(input, scales, zero_points, axis, dtype) -> Tensor + + Converts a float tensor to a per-channel quantized tensor with given scales and zero points. + + Arguments: + input (Tensor): float tensor to quantize + scales (Tensor): float 1D tensor of scales to use, size should match ``input.size(axis)`` + zero_points (int): integer 1D tensor of offset to use, size should match ``input.size(axis)`` + axis (int): dimension on which apply per-channel quantization + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32`` + + Returns: + Tensor: A newly quantized tensor + + Example:: + + >>> x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]]) + >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8) + tensor([[-1., 0.], + [ 1., 2.]], size=(2, 2), dtype=torch.quint8, + quantization_scheme=torch.per_channel_affine, + scale=tensor([0.1000, 0.0100], dtype=torch.float64), + zero_point=tensor([10, 0]), axis=0) + >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8).int_repr() + tensor([[ 0, 10], + [100, 200]], dtype=torch.uint8) + """ + ... +@overload +def quantize_per_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, dtype: _dtype) -> Tensor: + r""" + quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor + + Converts a float tensor to a quantized tensor with given scale and zero point. + + Arguments: + input (Tensor): float tensor or list of tensors to quantize + scale (float or Tensor): scale to apply in quantization formula + zero_point (int or Tensor): offset in integer value that maps to float zero + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32`` + + Returns: + Tensor: A newly quantized tensor or list of quantized tensors. + + Example:: + + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10) + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr() + tensor([ 0, 10, 20, 30], dtype=torch.uint8) + >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])], + >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8) + (tensor([-1., 0.], size=(2,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10), + tensor([-2., 2.], size=(2,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20)) + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10) + """ + ... +@overload +def quantize_per_tensor(input: Tensor, scale: _float, zero_point: _int, dtype: _dtype) -> Tensor: + r""" + quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor + + Converts a float tensor to a quantized tensor with given scale and zero point. + + Arguments: + input (Tensor): float tensor or list of tensors to quantize + scale (float or Tensor): scale to apply in quantization formula + zero_point (int or Tensor): offset in integer value that maps to float zero + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32`` + + Returns: + Tensor: A newly quantized tensor or list of quantized tensors. + + Example:: + + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10) + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr() + tensor([ 0, 10, 20, 30], dtype=torch.uint8) + >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])], + >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8) + (tensor([-1., 0.], size=(2,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10), + tensor([-2., 2.], size=(2,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20)) + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10) + """ + ... +@overload +def quantize_per_tensor(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scales: Tensor, zero_points: Tensor, dtype: _dtype) -> Tuple[Tensor, ...]: + r""" + quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor + + Converts a float tensor to a quantized tensor with given scale and zero point. + + Arguments: + input (Tensor): float tensor or list of tensors to quantize + scale (float or Tensor): scale to apply in quantization formula + zero_point (int or Tensor): offset in integer value that maps to float zero + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32`` + + Returns: + Tensor: A newly quantized tensor or list of quantized tensors. + + Example:: + + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10) + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr() + tensor([ 0, 10, 20, 30], dtype=torch.uint8) + >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])], + >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8) + (tensor([-1., 0.], size=(2,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10), + tensor([-2., 2.], size=(2,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20)) + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10) + """ + ... +def quantize_per_tensor_dynamic(input: Tensor, dtype: _dtype, reduce_range: _bool) -> Tensor: + r""" + quantize_per_tensor_dynamic(input, dtype, reduce_range) -> Tensor + + Converts a float tensor to a quantized tensor with scale and zero_point calculated + dynamically based on the input. + + Arguments: + input (Tensor): float tensor or list of tensors to quantize + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8`` + reduce_range (bool): a flag to indicate whether to reduce the range of quantized + data by 1 bit, it's required to avoid instruction overflow for some hardwares + + Returns: + Tensor: A newly (dynamically) quantized tensor + + Example:: + + >>> t = torch.quantize_per_tensor_dynamic(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.quint8, False) + >>> print(t) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.011764705882352941, + zero_point=85) + >>> t.int_repr() + tensor([ 0, 85, 170, 255], dtype=torch.uint8) + """ + ... +def quantized_batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], mean: Tensor, var: Tensor, eps: _float, output_scale: _float, output_zero_point: _int) -> Tensor: + r""" + quantized_batch_norm(input, weight=None, bias=None, mean, var, eps, output_scale, output_zero_point) -> Tensor + + Applies batch normalization on a 4D (NCHW) quantized tensor. + + .. math:: + + y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + + Arguments: + input (Tensor): quantized tensor + weight (Tensor): float tensor that corresponds to the gamma, size C + bias (Tensor): float tensor that corresponds to the beta, size C + mean (Tensor): float mean value in batch normalization, size C + var (Tensor): float tensor for variance, size C + eps (float): a value added to the denominator for numerical stability. + output_scale (float): output quantized tensor scale + output_zero_point (int): output quantized tensor zero_point + + Returns: + Tensor: A quantized tensor with batch normalization applied. + + Example:: + + >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8) + >>> torch.quantized_batch_norm(qx, torch.ones(2), torch.zeros(2), torch.rand(2), torch.rand(2), 0.00001, 0.2, 2) + tensor([[[[-0.2000, -0.2000], + [ 1.6000, -0.2000]], + + [[-0.4000, -0.4000], + [-0.4000, 0.6000]]], + + + [[[-0.2000, -0.2000], + [-0.2000, -0.2000]], + + [[ 0.6000, -0.4000], + [ 0.6000, -0.4000]]]], size=(2, 2, 2, 2), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=2) + """ + ... +def quantized_gru_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ... +def quantized_lstm_cell(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tuple[Tensor, Tensor]: ... +def quantized_max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: + r""" + quantized_max_pool1d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor + + Applies a 1D max pooling over an input quantized tensor composed of several input planes. + + Arguments: + input (Tensor): quantized tensor + kernel_size (list of int): the size of the sliding window + stride (``list of int``, optional): the stride of the sliding window + padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2 + dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1 + ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape. + Defaults to False. + + + Returns: + Tensor: A quantized tensor with max_pool1d applied. + + Example:: + + >>> qx = torch.quantize_per_tensor(torch.rand(2, 2), 1.5, 3, torch.quint8) + >>> torch.quantized_max_pool1d(qx, [2]) + tensor([[0.0000], + [1.5000]], size=(2, 1), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3) + """ + ... +def quantized_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: + r""" + quantized_max_pool2d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor + + Applies a 2D max pooling over an input quantized tensor composed of several input planes. + + Arguments: + input (Tensor): quantized tensor + kernel_size (``list of int``): the size of the sliding window + stride (``list of int``, optional): the stride of the sliding window + padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2 + dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1 + ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape. + Defaults to False. + + + Returns: + Tensor: A quantized tensor with max_pool2d applied. + + Example:: + + >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8) + >>> torch.quantized_max_pool2d(qx, [2,2]) + tensor([[[[1.5000]], + + [[1.5000]]], + + + [[[0.0000]], + + [[0.0000]]]], size=(2, 2, 1, 1), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3) + """ + ... +def quantized_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ... +def quantized_rnn_relu_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ... +def quantized_rnn_tanh_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ... +def rad2deg(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + rad2deg(input, *, out=None) -> Tensor + + Returns a new tensor with each of the elements of :attr:`input` + converted from angles in radians to degrees. + + Args: + input (Tensor): the input tensor. + + Keyword arguments: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]]) + >>> torch.rad2deg(a) + tensor([[ 180.0233, -180.0233], + [ 359.9894, -359.9894], + [ 89.9544, -89.9544]]) + """ + ... +def rad2deg_(input: Tensor) -> Tensor: ... +@overload +def rand(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Returns a tensor filled with random numbers from a uniform distribution + on the interval :math:`[0, 1)` + + The shape of the tensor is defined by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.rand(4) + tensor([ 0.5204, 0.2503, 0.3525, 0.5673]) + >>> torch.rand(2, 3) + tensor([[ 0.8237, 0.5781, 0.6879], + [ 0.3816, 0.7249, 0.0998]]) + """ + ... +@overload +def rand(*size: _int, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Returns a tensor filled with random numbers from a uniform distribution + on the interval :math:`[0, 1)` + + The shape of the tensor is defined by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.rand(4) + tensor([ 0.5204, 0.2503, 0.3525, 0.5673]) + >>> torch.rand(2, 3) + tensor([[ 0.8237, 0.5781, 0.6879], + [ 0.3816, 0.7249, 0.0998]]) + """ + ... +@overload +def rand(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Returns a tensor filled with random numbers from a uniform distribution + on the interval :math:`[0, 1)` + + The shape of the tensor is defined by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.rand(4) + tensor([ 0.5204, 0.2503, 0.3525, 0.5673]) + >>> torch.rand(2, 3) + tensor([[ 0.8237, 0.5781, 0.6879], + [ 0.3816, 0.7249, 0.0998]]) + """ + ... +@overload +def rand(*size: _int, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Returns a tensor filled with random numbers from a uniform distribution + on the interval :math:`[0, 1)` + + The shape of the tensor is defined by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.rand(4) + tensor([ 0.5204, 0.2503, 0.3525, 0.5673]) + >>> torch.rand(2, 3) + tensor([[ 0.8237, 0.5781, 0.6879], + [ 0.3816, 0.7249, 0.0998]]) + """ + ... +@overload +def rand(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Returns a tensor filled with random numbers from a uniform distribution + on the interval :math:`[0, 1)` + + The shape of the tensor is defined by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.rand(4) + tensor([ 0.5204, 0.2503, 0.3525, 0.5673]) + >>> torch.rand(2, 3) + tensor([[ 0.8237, 0.5781, 0.6879], + [ 0.3816, 0.7249, 0.0998]]) + """ + ... +@overload +def rand(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Returns a tensor filled with random numbers from a uniform distribution + on the interval :math:`[0, 1)` + + The shape of the tensor is defined by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.rand(4) + tensor([ 0.5204, 0.2503, 0.3525, 0.5673]) + >>> torch.rand(2, 3) + tensor([[ 0.8237, 0.5781, 0.6879], + [ 0.3816, 0.7249, 0.0998]]) + """ + ... +@overload +def rand(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Returns a tensor filled with random numbers from a uniform distribution + on the interval :math:`[0, 1)` + + The shape of the tensor is defined by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.rand(4) + tensor([ 0.5204, 0.2503, 0.3525, 0.5673]) + >>> torch.rand(2, 3) + tensor([[ 0.8237, 0.5781, 0.6879], + [ 0.3816, 0.7249, 0.0998]]) + """ + ... +@overload +def rand(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Returns a tensor filled with random numbers from a uniform distribution + on the interval :math:`[0, 1)` + + The shape of the tensor is defined by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.rand(4) + tensor([ 0.5204, 0.2503, 0.3525, 0.5673]) + >>> torch.rand(2, 3) + tensor([[ 0.8237, 0.5781, 0.6879], + [ 0.3816, 0.7249, 0.0998]]) + """ + ... +def rand_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + rand_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + + Returns a tensor with the same size as :attr:`input` that is filled with + random numbers from a uniform distribution on the interval :math:`[0, 1)`. + ``torch.rand_like(input)`` is equivalent to + ``torch.rand(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. + + Args: + input (Tensor): the size of :attr:`input` will determine size of the output tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. + Default: if ``None``, defaults to the dtype of :attr:`input`. + layout (:class:`torch.layout`, optional): the desired layout of returned tensor. + Default: if ``None``, defaults to the layout of :attr:`input`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, defaults to the device of :attr:`input`. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... +@overload +def randint(low: _int, high: _int, size: _size, *, generator: Optional[Generator] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with random integers generated uniformly + between :attr:`low` (inclusive) and :attr:`high` (exclusive). + + The shape of the tensor is defined by the variable argument :attr:`size`. + + .. note:: + With the global dtype default (``torch.float32``), this function returns + a tensor with dtype ``torch.int64``. + + Args: + low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. + high (int): One above the highest integer to be drawn from the distribution. + size (tuple): a tuple defining the shape of the output tensor. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``, + this function returns a tensor with dtype ``torch.int64``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.randint(3, 5, (3,)) + tensor([4, 3, 4]) + + + >>> torch.randint(10, (2, 2)) + tensor([[0, 2], + [5, 5]]) + + + >>> torch.randint(3, 10, (2, 2)) + tensor([[4, 5], + [6, 7]]) + """ + ... +@overload +def randint(high: _int, size: _size, *, generator: Optional[Generator] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with random integers generated uniformly + between :attr:`low` (inclusive) and :attr:`high` (exclusive). + + The shape of the tensor is defined by the variable argument :attr:`size`. + + .. note:: + With the global dtype default (``torch.float32``), this function returns + a tensor with dtype ``torch.int64``. + + Args: + low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. + high (int): One above the highest integer to be drawn from the distribution. + size (tuple): a tuple defining the shape of the output tensor. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``, + this function returns a tensor with dtype ``torch.int64``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.randint(3, 5, (3,)) + tensor([4, 3, 4]) + + + >>> torch.randint(10, (2, 2)) + tensor([[0, 2], + [5, 5]]) + + + >>> torch.randint(3, 10, (2, 2)) + tensor([[4, 5], + [6, 7]]) + """ + ... +@overload +def randint(high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with random integers generated uniformly + between :attr:`low` (inclusive) and :attr:`high` (exclusive). + + The shape of the tensor is defined by the variable argument :attr:`size`. + + .. note:: + With the global dtype default (``torch.float32``), this function returns + a tensor with dtype ``torch.int64``. + + Args: + low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. + high (int): One above the highest integer to be drawn from the distribution. + size (tuple): a tuple defining the shape of the output tensor. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``, + this function returns a tensor with dtype ``torch.int64``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.randint(3, 5, (3,)) + tensor([4, 3, 4]) + + + >>> torch.randint(10, (2, 2)) + tensor([[0, 2], + [5, 5]]) + + + >>> torch.randint(3, 10, (2, 2)) + tensor([[4, 5], + [6, 7]]) + """ + ... +@overload +def randint(high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with random integers generated uniformly + between :attr:`low` (inclusive) and :attr:`high` (exclusive). + + The shape of the tensor is defined by the variable argument :attr:`size`. + + .. note:: + With the global dtype default (``torch.float32``), this function returns + a tensor with dtype ``torch.int64``. + + Args: + low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. + high (int): One above the highest integer to be drawn from the distribution. + size (tuple): a tuple defining the shape of the output tensor. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``, + this function returns a tensor with dtype ``torch.int64``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.randint(3, 5, (3,)) + tensor([4, 3, 4]) + + + >>> torch.randint(10, (2, 2)) + tensor([[0, 2], + [5, 5]]) + + + >>> torch.randint(3, 10, (2, 2)) + tensor([[4, 5], + [6, 7]]) + """ + ... +@overload +def randint(low: Union[_int, SymInt], high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with random integers generated uniformly + between :attr:`low` (inclusive) and :attr:`high` (exclusive). + + The shape of the tensor is defined by the variable argument :attr:`size`. + + .. note:: + With the global dtype default (``torch.float32``), this function returns + a tensor with dtype ``torch.int64``. + + Args: + low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. + high (int): One above the highest integer to be drawn from the distribution. + size (tuple): a tuple defining the shape of the output tensor. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``, + this function returns a tensor with dtype ``torch.int64``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.randint(3, 5, (3,)) + tensor([4, 3, 4]) + + + >>> torch.randint(10, (2, 2)) + tensor([[0, 2], + [5, 5]]) + + + >>> torch.randint(3, 10, (2, 2)) + tensor([[4, 5], + [6, 7]]) + """ + ... +@overload +def randint(low: Union[_int, SymInt], high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with random integers generated uniformly + between :attr:`low` (inclusive) and :attr:`high` (exclusive). + + The shape of the tensor is defined by the variable argument :attr:`size`. + + .. note:: + With the global dtype default (``torch.float32``), this function returns + a tensor with dtype ``torch.int64``. + + Args: + low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. + high (int): One above the highest integer to be drawn from the distribution. + size (tuple): a tuple defining the shape of the output tensor. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``, + this function returns a tensor with dtype ``torch.int64``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.randint(3, 5, (3,)) + tensor([4, 3, 4]) + + + >>> torch.randint(10, (2, 2)) + tensor([[0, 2], + [5, 5]]) + + + >>> torch.randint(3, 10, (2, 2)) + tensor([[4, 5], + [6, 7]]) + """ + ... +@overload +def randint_like(input: Tensor, high: Union[_int, SymInt], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randint_like(input, low=0, high, \*, dtype=None, layout=torch.strided, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + + Returns a tensor with the same shape as Tensor :attr:`input` filled with + random integers generated uniformly between :attr:`low` (inclusive) and + :attr:`high` (exclusive). + + .. note: + With the global dtype default (``torch.float32``), this function returns + a tensor with dtype ``torch.int64``. + + Args: + input (Tensor): the size of :attr:`input` will determine size of the output tensor. + low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. + high (int): One above the highest integer to be drawn from the distribution. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. + Default: if ``None``, defaults to the dtype of :attr:`input`. + layout (:class:`torch.layout`, optional): the desired layout of returned tensor. + Default: if ``None``, defaults to the layout of :attr:`input`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, defaults to the device of :attr:`input`. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... +@overload +def randint_like(input: Tensor, low: Union[_int, SymInt], high: Union[_int, SymInt], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randint_like(input, low=0, high, \*, dtype=None, layout=torch.strided, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + + Returns a tensor with the same shape as Tensor :attr:`input` filled with + random integers generated uniformly between :attr:`low` (inclusive) and + :attr:`high` (exclusive). + + .. note: + With the global dtype default (``torch.float32``), this function returns + a tensor with dtype ``torch.int64``. + + Args: + input (Tensor): the size of :attr:`input` will determine size of the output tensor. + low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. + high (int): One above the highest integer to be drawn from the distribution. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. + Default: if ``None``, defaults to the dtype of :attr:`input`. + layout (:class:`torch.layout`, optional): the desired layout of returned tensor. + Default: if ``None``, defaults to the layout of :attr:`input`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, defaults to the device of :attr:`input`. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... +@overload +def randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + + Returns a tensor filled with random numbers from a normal distribution + with mean `0` and variance `1` (also called the standard normal + distribution). + + .. math:: + \text{out}_{i} \sim \mathcal{N}(0, 1) + + For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and + unit variance as + + .. math:: + \text{out}_{i} \sim \mathcal{CN}(0, 1) + + This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary + :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as + + .. math:: + \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad + \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}) + + The shape of the tensor is defined by the variable argument :attr:`size`. + + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.randn(4) + tensor([-2.1436, 0.9966, 2.3426, -0.6366]) + >>> torch.randn(2, 3) + tensor([[ 1.5954, 2.8929, -1.0923], + [ 1.1719, -0.4709, -0.1996]]) + + .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution + """ + ... +@overload +def randn(*size: _int, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + + Returns a tensor filled with random numbers from a normal distribution + with mean `0` and variance `1` (also called the standard normal + distribution). + + .. math:: + \text{out}_{i} \sim \mathcal{N}(0, 1) + + For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and + unit variance as + + .. math:: + \text{out}_{i} \sim \mathcal{CN}(0, 1) + + This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary + :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as + + .. math:: + \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad + \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}) + + The shape of the tensor is defined by the variable argument :attr:`size`. + + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.randn(4) + tensor([-2.1436, 0.9966, 2.3426, -0.6366]) + >>> torch.randn(2, 3) + tensor([[ 1.5954, 2.8929, -1.0923], + [ 1.1719, -0.4709, -0.1996]]) + + .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution + """ + ... +@overload +def randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + + Returns a tensor filled with random numbers from a normal distribution + with mean `0` and variance `1` (also called the standard normal + distribution). + + .. math:: + \text{out}_{i} \sim \mathcal{N}(0, 1) + + For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and + unit variance as + + .. math:: + \text{out}_{i} \sim \mathcal{CN}(0, 1) + + This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary + :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as + + .. math:: + \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad + \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}) + + The shape of the tensor is defined by the variable argument :attr:`size`. + + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.randn(4) + tensor([-2.1436, 0.9966, 2.3426, -0.6366]) + >>> torch.randn(2, 3) + tensor([[ 1.5954, 2.8929, -1.0923], + [ 1.1719, -0.4709, -0.1996]]) + + .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution + """ + ... +@overload +def randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + + Returns a tensor filled with random numbers from a normal distribution + with mean `0` and variance `1` (also called the standard normal + distribution). + + .. math:: + \text{out}_{i} \sim \mathcal{N}(0, 1) + + For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and + unit variance as + + .. math:: + \text{out}_{i} \sim \mathcal{CN}(0, 1) + + This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary + :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as + + .. math:: + \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad + \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}) + + The shape of the tensor is defined by the variable argument :attr:`size`. + + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.randn(4) + tensor([-2.1436, 0.9966, 2.3426, -0.6366]) + >>> torch.randn(2, 3) + tensor([[ 1.5954, 2.8929, -1.0923], + [ 1.1719, -0.4709, -0.1996]]) + + .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution + """ + ... +@overload +def randn(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + + Returns a tensor filled with random numbers from a normal distribution + with mean `0` and variance `1` (also called the standard normal + distribution). + + .. math:: + \text{out}_{i} \sim \mathcal{N}(0, 1) + + For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and + unit variance as + + .. math:: + \text{out}_{i} \sim \mathcal{CN}(0, 1) + + This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary + :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as + + .. math:: + \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad + \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}) + + The shape of the tensor is defined by the variable argument :attr:`size`. + + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.randn(4) + tensor([-2.1436, 0.9966, 2.3426, -0.6366]) + >>> torch.randn(2, 3) + tensor([[ 1.5954, 2.8929, -1.0923], + [ 1.1719, -0.4709, -0.1996]]) + + .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution + """ + ... +@overload +def randn(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + + Returns a tensor filled with random numbers from a normal distribution + with mean `0` and variance `1` (also called the standard normal + distribution). + + .. math:: + \text{out}_{i} \sim \mathcal{N}(0, 1) + + For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and + unit variance as + + .. math:: + \text{out}_{i} \sim \mathcal{CN}(0, 1) + + This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary + :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as + + .. math:: + \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad + \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}) + + The shape of the tensor is defined by the variable argument :attr:`size`. + + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.randn(4) + tensor([-2.1436, 0.9966, 2.3426, -0.6366]) + >>> torch.randn(2, 3) + tensor([[ 1.5954, 2.8929, -1.0923], + [ 1.1719, -0.4709, -0.1996]]) + + .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution + """ + ... +@overload +def randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + + Returns a tensor filled with random numbers from a normal distribution + with mean `0` and variance `1` (also called the standard normal + distribution). + + .. math:: + \text{out}_{i} \sim \mathcal{N}(0, 1) + + For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and + unit variance as + + .. math:: + \text{out}_{i} \sim \mathcal{CN}(0, 1) + + This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary + :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as + + .. math:: + \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad + \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}) + + The shape of the tensor is defined by the variable argument :attr:`size`. + + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.randn(4) + tensor([-2.1436, 0.9966, 2.3426, -0.6366]) + >>> torch.randn(2, 3) + tensor([[ 1.5954, 2.8929, -1.0923], + [ 1.1719, -0.4709, -0.1996]]) + + .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution + """ + ... +@overload +def randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + + Returns a tensor filled with random numbers from a normal distribution + with mean `0` and variance `1` (also called the standard normal + distribution). + + .. math:: + \text{out}_{i} \sim \mathcal{N}(0, 1) + + For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and + unit variance as + + .. math:: + \text{out}_{i} \sim \mathcal{CN}(0, 1) + + This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary + :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as + + .. math:: + \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad + \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}) + + The shape of the tensor is defined by the variable argument :attr:`size`. + + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.randn(4) + tensor([-2.1436, 0.9966, 2.3426, -0.6366]) + >>> torch.randn(2, 3) + tensor([[ 1.5954, 2.8929, -1.0923], + [ 1.1719, -0.4709, -0.1996]]) + + .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution + """ + ... +def randn_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randn_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + + Returns a tensor with the same size as :attr:`input` that is filled with + random numbers from a normal distribution with mean 0 and variance 1. Please refer to :func:`torch.randn` for the + sampling process of complex dtypes. ``torch.randn_like(input)`` is equivalent to + ``torch.randn(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. + + Args: + input (Tensor): the size of :attr:`input` will determine size of the output tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. + Default: if ``None``, defaults to the dtype of :attr:`input`. + layout (:class:`torch.layout`, optional): the desired layout of returned tensor. + Default: if ``None``, defaults to the layout of :attr:`input`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, defaults to the device of :attr:`input`. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... +@overload +def randperm(n: Union[_int, SymInt], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randperm(n, *, generator=None, out=None, dtype=torch.int64,layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Returns a random permutation of integers from ``0`` to ``n - 1``. + + Args: + n (int): the upper bound (exclusive) + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: ``torch.int64``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.randperm(4) + tensor([2, 1, 0, 3]) + """ + ... +@overload +def randperm(n: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randperm(n, *, generator=None, out=None, dtype=torch.int64,layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Returns a random permutation of integers from ``0`` to ``n - 1``. + + Args: + n (int): the upper bound (exclusive) + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: ``torch.int64``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.randperm(4) + tensor([2, 1, 0, 3]) + """ + ... +def range(start: Number, end: Number, step: Number = 1, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + range(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a 1-D tensor of size :math:`\left\lfloor \frac{\text{end} - \text{start}}{\text{step}} \right\rfloor + 1` + with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is + the gap between two values in the tensor. + + .. math:: + \text{out}_{i+1} = \text{out}_i + \text{step}. + + .. warning:: + This function is deprecated and will be removed in a future release because its behavior is inconsistent with + Python's range builtin. Instead, use :func:`torch.arange`, which produces values in [start, end). + + Args: + start (float): the starting value for the set of points. Default: ``0``. + end (float): the ending value for the set of points + step (float): the gap between each pair of adjacent points. Default: ``1``. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input + arguments. If any of `start`, `end`, or `stop` are floating-point, the + `dtype` is inferred to be the default dtype, see + :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to + be `torch.int64`. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.range(1, 4) + tensor([ 1., 2., 3., 4.]) + >>> torch.range(1, 4, 0.5) + tensor([ 1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000]) + """ + ... +def ravel(input: Tensor) -> Tensor: + r""" + ravel(input) -> Tensor + + Return a contiguous flattened tensor. A copy is made only if needed. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> t = torch.tensor([[[1, 2], + ... [3, 4]], + ... [[5, 6], + ... [7, 8]]]) + >>> torch.ravel(t) + tensor([1, 2, 3, 4, 5, 6, 7, 8]) + """ + ... +def real(input: Tensor) -> Tensor: + r""" + real(input) -> Tensor + + Returns a new tensor containing real values of the :attr:`self` tensor. + The returned tensor and :attr:`self` share the same underlying storage. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> x=torch.randn(4, dtype=torch.cfloat) + >>> x + tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)]) + >>> x.real + tensor([ 0.3100, -0.5445, -1.6492, -0.0638]) + """ + ... +def reciprocal(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + reciprocal(input, *, out=None) -> Tensor + + Returns a new tensor with the reciprocal of the elements of :attr:`input` + + .. math:: + \text{out}_{i} = \frac{1}{\text{input}_{i}} + + .. note:: + Unlike NumPy's reciprocal, torch.reciprocal supports integral inputs. Integral + inputs to reciprocal are automatically :ref:`promoted ` to + the default scalar type. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.4595, -2.1219, -1.4314, 0.7298]) + >>> torch.reciprocal(a) + tensor([-2.1763, -0.4713, -0.6986, 1.3702]) + """ + ... +def reciprocal_(input: Tensor) -> Tensor: ... +def relu(input: Tensor) -> Tensor: ... +def relu_(input: Tensor) -> Tensor: ... +@overload +def remainder(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + remainder(input, other, *, out=None) -> Tensor + + Computes + `Python's modulus operation `_ + entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value + is less than that of :attr:`other`. + + It may also be defined in terms of :func:`torch.div` as + + .. code:: python + + torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer and float inputs. + + .. note:: + Complex inputs are not supported. In some cases, it is not mathematically + possible to satisfy the definition of a modulo operation with complex numbers. + See :func:`torch.fmod` for how division by zero is handled. + + .. seealso:: + + :func:`torch.fmod` which implements C++'s `std::fmod `_. + This one is defined in terms of division rounding towards zero. + + Args: + input (Tensor or Scalar): the dividend + other (Tensor or Scalar): the divisor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2) + tensor([ 1., 0., 1., 1., 0., 1.]) + >>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5) + tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ]) + """ + ... +@overload +def remainder(self: Union[Number, _complex], other: Tensor) -> Tensor: + r""" + remainder(input, other, *, out=None) -> Tensor + + Computes + `Python's modulus operation `_ + entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value + is less than that of :attr:`other`. + + It may also be defined in terms of :func:`torch.div` as + + .. code:: python + + torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer and float inputs. + + .. note:: + Complex inputs are not supported. In some cases, it is not mathematically + possible to satisfy the definition of a modulo operation with complex numbers. + See :func:`torch.fmod` for how division by zero is handled. + + .. seealso:: + + :func:`torch.fmod` which implements C++'s `std::fmod `_. + This one is defined in terms of division rounding towards zero. + + Args: + input (Tensor or Scalar): the dividend + other (Tensor or Scalar): the divisor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2) + tensor([ 1., 0., 1., 1., 0., 1.]) + >>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5) + tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ]) + """ + ... +@overload +def remainder(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + remainder(input, other, *, out=None) -> Tensor + + Computes + `Python's modulus operation `_ + entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value + is less than that of :attr:`other`. + + It may also be defined in terms of :func:`torch.div` as + + .. code:: python + + torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer and float inputs. + + .. note:: + Complex inputs are not supported. In some cases, it is not mathematically + possible to satisfy the definition of a modulo operation with complex numbers. + See :func:`torch.fmod` for how division by zero is handled. + + .. seealso:: + + :func:`torch.fmod` which implements C++'s `std::fmod `_. + This one is defined in terms of division rounding towards zero. + + Args: + input (Tensor or Scalar): the dividend + other (Tensor or Scalar): the divisor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2) + tensor([ 1., 0., 1., 1., 0., 1.]) + >>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5) + tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ]) + """ + ... +def renorm(input: Tensor, p: Union[Number, _complex], dim: _int, maxnorm: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + renorm(input, p, dim, maxnorm, *, out=None) -> Tensor + + Returns a tensor where each sub-tensor of :attr:`input` along dimension + :attr:`dim` is normalized such that the `p`-norm of the sub-tensor is lower + than the value :attr:`maxnorm` + + .. note:: If the norm of a row is lower than `maxnorm`, the row is unchanged + + Args: + input (Tensor): the input tensor. + p (float): the power for the norm computation + dim (int): the dimension to slice over to get the sub-tensors + maxnorm (float): the maximum norm to keep each sub-tensor under + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> x = torch.ones(3, 3) + >>> x[1].fill_(2) + tensor([ 2., 2., 2.]) + >>> x[2].fill_(3) + tensor([ 3., 3., 3.]) + >>> x + tensor([[ 1., 1., 1.], + [ 2., 2., 2.], + [ 3., 3., 3.]]) + >>> torch.renorm(x, 1, 0, 5) + tensor([[ 1.0000, 1.0000, 1.0000], + [ 1.6667, 1.6667, 1.6667], + [ 1.6667, 1.6667, 1.6667]]) + """ + ... +@overload +def repeat_interleave(input: Tensor, repeats: Tensor, dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor: + r""" + repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor + + Repeat elements of a tensor. + + .. warning:: + + This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``. + + Args: + input (Tensor): the input tensor. + repeats (Tensor or int): The number of repetitions for each element. + repeats is broadcasted to fit the shape of the given axis. + dim (int, optional): The dimension along which to repeat values. + By default, use the flattened input array, and return a flat output + array. + + Keyword args: + output_size (int, optional): Total output size for the given axis + ( e.g. sum of repeats). If given, it will avoid stream synchronization + needed to calculate output shape of the tensor. + + Returns: + Tensor: Repeated tensor which has the same shape as input, except along the given axis. + + Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> x.repeat_interleave(2) + tensor([1, 1, 2, 2, 3, 3]) + >>> y = torch.tensor([[1, 2], [3, 4]]) + >>> torch.repeat_interleave(y, 2) + tensor([1, 1, 2, 2, 3, 3, 4, 4]) + >>> torch.repeat_interleave(y, 3, dim=1) + tensor([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]]) + >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0) + tensor([[1, 2], + [3, 4], + [3, 4]]) + >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3) + tensor([[1, 2], + [3, 4], + [3, 4]]) + + If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be + `tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times, + `1` appears `n2` times, `2` appears `n3` times, etc. + + .. function:: repeat_interleave(repeats, *) -> Tensor + :noindex: + + Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc. + + Args: + repeats (Tensor): The number of repetitions for each element. + + Returns: + Tensor: Repeated tensor of size `sum(repeats)`. + + Example:: + + >>> torch.repeat_interleave(torch.tensor([1, 2, 3])) + tensor([0, 1, 1, 2, 2, 2]) + """ + ... +@overload +def repeat_interleave(repeats: Tensor, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor: + r""" + repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor + + Repeat elements of a tensor. + + .. warning:: + + This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``. + + Args: + input (Tensor): the input tensor. + repeats (Tensor or int): The number of repetitions for each element. + repeats is broadcasted to fit the shape of the given axis. + dim (int, optional): The dimension along which to repeat values. + By default, use the flattened input array, and return a flat output + array. + + Keyword args: + output_size (int, optional): Total output size for the given axis + ( e.g. sum of repeats). If given, it will avoid stream synchronization + needed to calculate output shape of the tensor. + + Returns: + Tensor: Repeated tensor which has the same shape as input, except along the given axis. + + Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> x.repeat_interleave(2) + tensor([1, 1, 2, 2, 3, 3]) + >>> y = torch.tensor([[1, 2], [3, 4]]) + >>> torch.repeat_interleave(y, 2) + tensor([1, 1, 2, 2, 3, 3, 4, 4]) + >>> torch.repeat_interleave(y, 3, dim=1) + tensor([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]]) + >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0) + tensor([[1, 2], + [3, 4], + [3, 4]]) + >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3) + tensor([[1, 2], + [3, 4], + [3, 4]]) + + If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be + `tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times, + `1` appears `n2` times, `2` appears `n3` times, etc. + + .. function:: repeat_interleave(repeats, *) -> Tensor + :noindex: + + Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc. + + Args: + repeats (Tensor): The number of repetitions for each element. + + Returns: + Tensor: Repeated tensor of size `sum(repeats)`. + + Example:: + + >>> torch.repeat_interleave(torch.tensor([1, 2, 3])) + tensor([0, 1, 1, 2, 2, 2]) + """ + ... +@overload +def repeat_interleave(input: Tensor, repeats: Union[_int, SymInt], dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor: + r""" + repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor + + Repeat elements of a tensor. + + .. warning:: + + This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``. + + Args: + input (Tensor): the input tensor. + repeats (Tensor or int): The number of repetitions for each element. + repeats is broadcasted to fit the shape of the given axis. + dim (int, optional): The dimension along which to repeat values. + By default, use the flattened input array, and return a flat output + array. + + Keyword args: + output_size (int, optional): Total output size for the given axis + ( e.g. sum of repeats). If given, it will avoid stream synchronization + needed to calculate output shape of the tensor. + + Returns: + Tensor: Repeated tensor which has the same shape as input, except along the given axis. + + Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> x.repeat_interleave(2) + tensor([1, 1, 2, 2, 3, 3]) + >>> y = torch.tensor([[1, 2], [3, 4]]) + >>> torch.repeat_interleave(y, 2) + tensor([1, 1, 2, 2, 3, 3, 4, 4]) + >>> torch.repeat_interleave(y, 3, dim=1) + tensor([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]]) + >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0) + tensor([[1, 2], + [3, 4], + [3, 4]]) + >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3) + tensor([[1, 2], + [3, 4], + [3, 4]]) + + If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be + `tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times, + `1` appears `n2` times, `2` appears `n3` times, etc. + + .. function:: repeat_interleave(repeats, *) -> Tensor + :noindex: + + Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc. + + Args: + repeats (Tensor): The number of repetitions for each element. + + Returns: + Tensor: Repeated tensor of size `sum(repeats)`. + + Example:: + + >>> torch.repeat_interleave(torch.tensor([1, 2, 3])) + tensor([0, 1, 1, 2, 2, 2]) + """ + ... +def reshape(input: Tensor, shape: Sequence[Union[_int, SymInt]]) -> Tensor: + r""" + reshape(input, shape) -> Tensor + + Returns a tensor with the same data and number of elements as :attr:`input`, + but with the specified shape. When possible, the returned tensor will be a view + of :attr:`input`. Otherwise, it will be a copy. Contiguous inputs and inputs + with compatible strides can be reshaped without copying, but you should not + depend on the copying vs. viewing behavior. + + See :meth:`torch.Tensor.view` on when it is possible to return a view. + + A single dimension may be -1, in which case it's inferred from the remaining + dimensions and the number of elements in :attr:`input`. + + Args: + input (Tensor): the tensor to be reshaped + shape (tuple of int): the new shape + + Example:: + + >>> a = torch.arange(4.) + >>> torch.reshape(a, (2, 2)) + tensor([[ 0., 1.], + [ 2., 3.]]) + >>> b = torch.tensor([[0, 1], [2, 3]]) + >>> torch.reshape(b, (-1,)) + tensor([ 0, 1, 2, 3]) + """ + ... +def resize_as_(input: Tensor, the_template: Tensor, *, memory_format: Optional[memory_format] = None) -> Tensor: ... +def resize_as_sparse_(input: Tensor, the_template: Tensor) -> Tensor: ... +def resolve_conj(input: Tensor) -> Tensor: + r""" + resolve_conj(input) -> Tensor + + Returns a new tensor with materialized conjugation if :attr:`input`'s conjugate bit is set to `True`, + else returns :attr:`input`. The output tensor will always have its conjugate bit set to `False`. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]) + >>> y = x.conj() + >>> y.is_conj() + True + >>> z = y.resolve_conj() + >>> z + tensor([-1 - 1j, -2 - 2j, 3 + 3j]) + >>> z.is_conj() + False + """ + ... +def resolve_neg(input: Tensor) -> Tensor: + r""" + resolve_neg(input) -> Tensor + + Returns a new tensor with materialized negation if :attr:`input`'s negative bit is set to `True`, + else returns :attr:`input`. The output tensor will always have its negative bit set to `False`. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]) + >>> y = x.conj() + >>> z = y.imag + >>> z.is_neg() + True + >>> out = z.resolve_neg() + >>> out + tensor([-1., -2., 3.]) + >>> out.is_neg() + False + """ + ... +@overload +def result_type(tensor: Tensor, other: Tensor) -> _dtype: + r""" + result_type(tensor1, tensor2) -> dtype + + Returns the :class:`torch.dtype` that would result from performing an arithmetic + operation on the provided input tensors. See type promotion :ref:`documentation ` + for more information on the type promotion logic. + + Args: + tensor1 (Tensor or Number): an input tensor or number + tensor2 (Tensor or Number): an input tensor or number + + Example:: + + >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0) + torch.float32 + >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1)) + torch.uint8 + """ + ... +@overload +def result_type(scalar: Union[Number, _complex], tensor: Tensor) -> _dtype: + r""" + result_type(tensor1, tensor2) -> dtype + + Returns the :class:`torch.dtype` that would result from performing an arithmetic + operation on the provided input tensors. See type promotion :ref:`documentation ` + for more information on the type promotion logic. + + Args: + tensor1 (Tensor or Number): an input tensor or number + tensor2 (Tensor or Number): an input tensor or number + + Example:: + + >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0) + torch.float32 + >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1)) + torch.uint8 + """ + ... +@overload +def result_type(tensor: Tensor, other: Union[Number, _complex]) -> _dtype: + r""" + result_type(tensor1, tensor2) -> dtype + + Returns the :class:`torch.dtype` that would result from performing an arithmetic + operation on the provided input tensors. See type promotion :ref:`documentation ` + for more information on the type promotion logic. + + Args: + tensor1 (Tensor or Number): an input tensor or number + tensor2 (Tensor or Number): an input tensor or number + + Example:: + + >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0) + torch.float32 + >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1)) + torch.uint8 + """ + ... +@overload +def result_type(scalar1: Union[Number, _complex], scalar2: Union[Number, _complex]) -> _dtype: + r""" + result_type(tensor1, tensor2) -> dtype + + Returns the :class:`torch.dtype` that would result from performing an arithmetic + operation on the provided input tensors. See type promotion :ref:`documentation ` + for more information on the type promotion logic. + + Args: + tensor1 (Tensor or Number): an input tensor or number + tensor2 (Tensor or Number): an input tensor or number + + Example:: + + >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0) + torch.float32 + >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1)) + torch.uint8 + """ + ... +@overload +def rnn_relu(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ... +@overload +def rnn_relu(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ... +def rnn_relu_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ... +@overload +def rnn_tanh(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ... +@overload +def rnn_tanh(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ... +def rnn_tanh_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ... +def roll(input: Tensor, shifts: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]], dims: Union[_int, _size] = ()) -> Tensor: + r""" + roll(input, shifts, dims=None) -> Tensor + + Roll the tensor :attr:`input` along the given dimension(s). Elements that are + shifted beyond the last position are re-introduced at the first position. If + :attr:`dims` is `None`, the tensor will be flattened before rolling and then + restored to the original shape. + + Args: + input (Tensor): the input tensor. + shifts (int or tuple of ints): The number of places by which the elements + of the tensor are shifted. If shifts is a tuple, dims must be a tuple of + the same size, and each dimension will be rolled by the corresponding + value + dims (int or tuple of ints): Axis along which to roll + + Example:: + + >>> x = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(4, 2) + >>> x + tensor([[1, 2], + [3, 4], + [5, 6], + [7, 8]]) + >>> torch.roll(x, 1) + tensor([[8, 1], + [2, 3], + [4, 5], + [6, 7]]) + >>> torch.roll(x, 1, 0) + tensor([[7, 8], + [1, 2], + [3, 4], + [5, 6]]) + >>> torch.roll(x, -1, 0) + tensor([[3, 4], + [5, 6], + [7, 8], + [1, 2]]) + >>> torch.roll(x, shifts=(2, 1), dims=(0, 1)) + tensor([[6, 5], + [8, 7], + [2, 1], + [4, 3]]) + """ + ... +def rot90(input: Tensor, k: _int = 1, dims: _size = (0,1)) -> Tensor: + r""" + rot90(input, k=1, dims=[0,1]) -> Tensor + + Rotate an n-D tensor by 90 degrees in the plane specified by dims axis. + Rotation direction is from the first towards the second axis if k > 0, and from the second towards the first for k < 0. + + Args: + input (Tensor): the input tensor. + k (int): number of times to rotate. Default value is 1 + dims (a list or tuple): axis to rotate. Default value is [0, 1] + + Example:: + + >>> x = torch.arange(4).view(2, 2) + >>> x + tensor([[0, 1], + [2, 3]]) + >>> torch.rot90(x, 1, [0, 1]) + tensor([[1, 3], + [0, 2]]) + + >>> x = torch.arange(8).view(2, 2, 2) + >>> x + tensor([[[0, 1], + [2, 3]], + + [[4, 5], + [6, 7]]]) + >>> torch.rot90(x, 1, [1, 2]) + tensor([[[1, 3], + [0, 2]], + + [[5, 7], + [4, 6]]]) + """ + ... +@overload +def round(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + round(input, *, decimals=0, out=None) -> Tensor + + Rounds elements of :attr:`input` to the nearest integer. + + For integer inputs, follows the array-api convention of returning a + copy of the input tensor. + The return type of output is same as that of input's dtype. + + .. note:: + This function implements the "round half to even" to + break ties when a number is equidistant from two + integers (e.g. `round(2.5)` is 2). + + When the :attr:\`decimals\` argument is specified the + algorithm used is similar to NumPy's `around`. This + algorithm is fast but inexact and it can easily + overflow for low precision dtypes. + Eg. `round(tensor([10000], dtype=torch.float16), decimals=3)` is `inf`. + + .. seealso:: + :func:`torch.ceil`, which rounds up. + :func:`torch.floor`, which rounds down. + :func:`torch.trunc`, which rounds towards zero. + + Args: + input (Tensor): the input tensor. + decimals (int): Number of decimal places to round to (default: 0). + If decimals is negative, it specifies the number of positions + to the left of the decimal point. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.round(torch.tensor((4.7, -2.3, 9.1, -7.7))) + tensor([ 5., -2., 9., -8.]) + + >>> # Values equidistant from two integers are rounded towards the + >>> # the nearest even value (zero is treated as even) + >>> torch.round(torch.tensor([-0.5, 0.5, 1.5, 2.5])) + tensor([-0., 0., 2., 2.]) + + >>> # A positive decimals argument rounds to the to that decimal place + >>> torch.round(torch.tensor([0.1234567]), decimals=3) + tensor([0.1230]) + + >>> # A negative decimals argument rounds to the left of the decimal + >>> torch.round(torch.tensor([1200.1234567]), decimals=-3) + tensor([1000.]) + """ + ... +@overload +def round(input: Tensor, *, decimals: _int, out: Optional[Tensor] = None) -> Tensor: + r""" + round(input, *, decimals=0, out=None) -> Tensor + + Rounds elements of :attr:`input` to the nearest integer. + + For integer inputs, follows the array-api convention of returning a + copy of the input tensor. + The return type of output is same as that of input's dtype. + + .. note:: + This function implements the "round half to even" to + break ties when a number is equidistant from two + integers (e.g. `round(2.5)` is 2). + + When the :attr:\`decimals\` argument is specified the + algorithm used is similar to NumPy's `around`. This + algorithm is fast but inexact and it can easily + overflow for low precision dtypes. + Eg. `round(tensor([10000], dtype=torch.float16), decimals=3)` is `inf`. + + .. seealso:: + :func:`torch.ceil`, which rounds up. + :func:`torch.floor`, which rounds down. + :func:`torch.trunc`, which rounds towards zero. + + Args: + input (Tensor): the input tensor. + decimals (int): Number of decimal places to round to (default: 0). + If decimals is negative, it specifies the number of positions + to the left of the decimal point. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.round(torch.tensor((4.7, -2.3, 9.1, -7.7))) + tensor([ 5., -2., 9., -8.]) + + >>> # Values equidistant from two integers are rounded towards the + >>> # the nearest even value (zero is treated as even) + >>> torch.round(torch.tensor([-0.5, 0.5, 1.5, 2.5])) + tensor([-0., 0., 2., 2.]) + + >>> # A positive decimals argument rounds to the to that decimal place + >>> torch.round(torch.tensor([0.1234567]), decimals=3) + tensor([0.1230]) + + >>> # A negative decimals argument rounds to the left of the decimal + >>> torch.round(torch.tensor([1200.1234567]), decimals=-3) + tensor([1000.]) + """ + ... +@overload +def round_(input: Tensor) -> Tensor: ... +@overload +def round_(input: Tensor, *, decimals: _int) -> Tensor: ... +def row_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def row_stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor: + r""" + row_stack(tensors, *, out=None) -> Tensor + + Alias of :func:`torch.vstack`. + """ + ... +def rrelu(input: Tensor, lower: Union[Number, _complex] = 0.125, upper: Union[Number, _complex] = 0.3333333333333333, training: _bool = False, generator: Optional[Generator] = None) -> Tensor: ... +def rrelu_(input: Tensor, lower: Union[Number, _complex] = 0.125, upper: Union[Number, _complex] = 0.3333333333333333, training: _bool = False, generator: Optional[Generator] = None) -> Tensor: ... +def rsqrt(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + rsqrt(input, *, out=None) -> Tensor + + Returns a new tensor with the reciprocal of the square-root of each of + the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}} + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.0370, 0.2970, 1.5420, -0.9105]) + >>> torch.rsqrt(a) + tensor([ nan, 1.8351, 0.8053, nan]) + """ + ... +def rsqrt_(input: Tensor) -> Tensor: ... +@overload +def rsub(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ... +@overload +def rsub(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ... +def saddmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Number = 1, alpha: Number = 1, out: Optional[Tensor] = None) -> Tensor: ... +def scalar_tensor(s: Union[Number, _complex], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def scatter(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, reduce: str, out: Optional[Tensor] = None) -> Tensor: + r""" + scatter(input, dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_` + """ + ... +@overload +def scatter(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + scatter(input, dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_` + """ + ... +@overload +def scatter(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex], *, reduce: str, out: Optional[Tensor] = None) -> Tensor: + r""" + scatter(input, dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_` + """ + ... +@overload +def scatter(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: + r""" + scatter(input, dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_` + """ + ... +@overload +def scatter(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + scatter(input, dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_` + """ + ... +@overload +def scatter(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor: + r""" + scatter(input, dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_` + """ + ... +@overload +def scatter_add(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + scatter_add(input, dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_add_` + """ + ... +@overload +def scatter_add(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: + r""" + scatter_add(input, dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_add_` + """ + ... +def scatter_reduce(input: Tensor, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool = True, out: Optional[Tensor] = None) -> Tensor: + r""" + scatter_reduce(input, dim, index, src, reduce, *, include_self=True) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_reduce_` + """ + ... +@overload +def searchsorted(sorted_sequence: Tensor, input: Tensor, *, out_int32: _bool = False, right: _bool = False, side: Optional[str] = None, sorter: Optional[Tensor] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=None, out=None, sorter=None) -> Tensor + + Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the + corresponding values in :attr:`values` were inserted before the indices, when sorted, the order + of the corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved. + Return a new tensor with the same size as :attr:`values`. More formally, + the returned index satisfies the following rules: + + .. list-table:: + :widths: 12 10 78 + :header-rows: 1 + + * - :attr:`sorted_sequence` + - :attr:`right` + - *returned index satisfies* + * - 1-D + - False + - ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]`` + * - 1-D + - True + - ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]`` + * - N-D + - False + - ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]`` + * - N-D + - True + - ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]`` + + Args: + sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost* + dimension unless :attr:`sorter` is provided, in which case the sequence does not + need to be sorted + values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s). + + Keyword args: + out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise. + Default value is False, i.e. default output data type is torch.int64. + right (bool, optional): if False, return the first suitable location that is found. If True, return the + last such index. If no suitable index found, return 0 for non-numerical value + (eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence` + (one pass the last index of the *innermost* dimension). In other words, if False, + gets the lower bound index for each value in :attr:`values` on the corresponding + *innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper + bound index instead. Default value is False. :attr:`side` does the same and is + preferred. It will error if :attr:`side` is set to "left" while this is True. + side (str, optional): the same as :attr:`right` but preferred. "left" corresponds to False for :attr:`right` + and "right" corresponds to True for :attr:`right`. It will error if this is set to + "left" while :attr:`right` is True. Default value is None. + out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided. + sorter (LongTensor, optional): if provided, a tensor matching the shape of the unsorted + :attr:`sorted_sequence` containing a sequence of indices that sort it in the + ascending order on the innermost dimension + + + Example:: + + >>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]]) + >>> sorted_sequence + tensor([[ 1, 3, 5, 7, 9], + [ 2, 4, 6, 8, 10]]) + >>> values = torch.tensor([[3, 6, 9], [3, 6, 9]]) + >>> values + tensor([[3, 6, 9], + [3, 6, 9]]) + >>> torch.searchsorted(sorted_sequence, values) + tensor([[1, 3, 4], + [1, 2, 4]]) + >>> torch.searchsorted(sorted_sequence, values, side='right') + tensor([[2, 3, 5], + [1, 3, 4]]) + + >>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9]) + >>> sorted_sequence_1d + tensor([1, 3, 5, 7, 9]) + >>> torch.searchsorted(sorted_sequence_1d, values) + tensor([[1, 3, 4], + [1, 3, 4]]) + """ + ... +@overload +def searchsorted(sorted_sequence: Tensor, self: Union[Number, _complex], *, out_int32: _bool = False, right: _bool = False, side: Optional[str] = None, sorter: Optional[Tensor] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=None, out=None, sorter=None) -> Tensor + + Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the + corresponding values in :attr:`values` were inserted before the indices, when sorted, the order + of the corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved. + Return a new tensor with the same size as :attr:`values`. More formally, + the returned index satisfies the following rules: + + .. list-table:: + :widths: 12 10 78 + :header-rows: 1 + + * - :attr:`sorted_sequence` + - :attr:`right` + - *returned index satisfies* + * - 1-D + - False + - ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]`` + * - 1-D + - True + - ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]`` + * - N-D + - False + - ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]`` + * - N-D + - True + - ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]`` + + Args: + sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost* + dimension unless :attr:`sorter` is provided, in which case the sequence does not + need to be sorted + values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s). + + Keyword args: + out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise. + Default value is False, i.e. default output data type is torch.int64. + right (bool, optional): if False, return the first suitable location that is found. If True, return the + last such index. If no suitable index found, return 0 for non-numerical value + (eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence` + (one pass the last index of the *innermost* dimension). In other words, if False, + gets the lower bound index for each value in :attr:`values` on the corresponding + *innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper + bound index instead. Default value is False. :attr:`side` does the same and is + preferred. It will error if :attr:`side` is set to "left" while this is True. + side (str, optional): the same as :attr:`right` but preferred. "left" corresponds to False for :attr:`right` + and "right" corresponds to True for :attr:`right`. It will error if this is set to + "left" while :attr:`right` is True. Default value is None. + out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided. + sorter (LongTensor, optional): if provided, a tensor matching the shape of the unsorted + :attr:`sorted_sequence` containing a sequence of indices that sort it in the + ascending order on the innermost dimension + + + Example:: + + >>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]]) + >>> sorted_sequence + tensor([[ 1, 3, 5, 7, 9], + [ 2, 4, 6, 8, 10]]) + >>> values = torch.tensor([[3, 6, 9], [3, 6, 9]]) + >>> values + tensor([[3, 6, 9], + [3, 6, 9]]) + >>> torch.searchsorted(sorted_sequence, values) + tensor([[1, 3, 4], + [1, 2, 4]]) + >>> torch.searchsorted(sorted_sequence, values, side='right') + tensor([[2, 3, 5], + [1, 3, 4]]) + + >>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9]) + >>> sorted_sequence_1d + tensor([1, 3, 5, 7, 9]) + >>> torch.searchsorted(sorted_sequence_1d, values) + tensor([[1, 3, 4], + [1, 3, 4]]) + """ + ... +def segment_reduce(data: Tensor, reduce: str, *, lengths: Optional[Tensor] = None, indices: Optional[Tensor] = None, offsets: Optional[Tensor] = None, axis: _int = 0, unsafe: _bool = False, initial: Optional[Union[Number, _complex]] = None) -> Tensor: ... +@overload +def select(input: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor: + r""" + select(input, dim, index) -> Tensor + + Slices the :attr:`input` tensor along the selected dimension at the given index. + This function returns a view of the original tensor with the given dimension removed. + + .. note:: If :attr:`input` is a sparse tensor and returning a view of + the tensor is not possible, a RuntimeError exception is + raised. In this is the case, consider using + :func:`torch.select_copy` function. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to slice + index (int): the index to select with + + .. note:: + + :meth:`select` is equivalent to slicing. For example, + ``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and + ``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``. + """ + ... +@overload +def select(input: Tensor, dim: Union[str, ellipsis, None], index: _int) -> Tensor: + r""" + select(input, dim, index) -> Tensor + + Slices the :attr:`input` tensor along the selected dimension at the given index. + This function returns a view of the original tensor with the given dimension removed. + + .. note:: If :attr:`input` is a sparse tensor and returning a view of + the tensor is not possible, a RuntimeError exception is + raised. In this is the case, consider using + :func:`torch.select_copy` function. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to slice + index (int): the index to select with + + .. note:: + + :meth:`select` is equivalent to slicing. For example, + ``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and + ``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``. + """ + ... +def select_copy(input: Tensor, dim: _int, index: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.select`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def select_scatter(input: Tensor, src: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor: + r""" + select_scatter(input, src, dim, index) -> Tensor + + Embeds the values of the :attr:`src` tensor into :attr:`input` at the given index. + This function returns a tensor with fresh storage; it does not create a view. + + + Args: + input (Tensor): the input tensor. + src (Tensor): The tensor to embed into :attr:`input` + dim (int): the dimension to insert the slice into. + index (int): the index to select with + + .. note:: + + :attr:`src` must be of the proper size in order to be embedded + into :attr:`input`. Specifically, it should have the same shape as + ``torch.select(input, dim, index)`` + + Example:: + + >>> a = torch.zeros(2, 2) + >>> b = torch.ones(2) + >>> a.select_scatter(b, 0, 0) + tensor([[1., 1.], + [0., 0.]]) + """ + ... +def selu(input: Tensor) -> Tensor: ... +def selu_(input: Tensor) -> Tensor: ... +def set_flush_denormal(mode: _bool) -> _bool: + r""" + set_flush_denormal(mode) -> bool + + Disables denormal floating numbers on CPU. + + Returns ``True`` if your system supports flushing denormal numbers and it + successfully configures flush denormal mode. :meth:`~torch.set_flush_denormal` + is supported on x86 architectures supporting SSE3 and AArch64 architecture. + + Args: + mode (bool): Controls whether to enable flush denormal mode or not + + Example:: + + >>> torch.set_flush_denormal(True) + True + >>> torch.tensor([1e-323], dtype=torch.float64) + tensor([ 0.], dtype=torch.float64) + >>> torch.set_flush_denormal(False) + True + >>> torch.tensor([1e-323], dtype=torch.float64) + tensor(9.88131e-324 * + [ 1.0000], dtype=torch.float64) + """ + ... +def set_num_interop_threads(num: _int) -> None: + r""" + set_num_interop_threads(int) + + Sets the number of threads used for interop parallelism + (e.g. in JIT interpreter) on CPU. + + .. warning:: + Can only be called once and before any inter-op parallel work + is started (e.g. JIT execution). + """ + ... +def set_num_threads(num: _int) -> None: + r""" + set_num_threads(int) + + Sets the number of threads used for intraop parallelism on CPU. + + .. warning:: + To ensure that the correct number of threads is used, set_num_threads + must be called before running eager, JIT or autograd code. + """ + ... +def sgn(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + sgn(input, *, out=None) -> Tensor + + This function is an extension of torch.sign() to complex tensors. + It computes a new tensor whose elements have + the same angles as the corresponding elements of :attr:`input` and + absolute values (i.e. magnitudes) of one for complex tensors and + is equivalent to torch.sign() for non-complex tensors. + + .. math:: + \text{out}_{i} = \begin{cases} + 0 & |\text{{input}}_i| == 0 \\ + \frac{{\text{{input}}_i}}{|{\text{{input}}_i}|} & \text{otherwise} + \end{cases} + + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.tensor([3+4j, 7-24j, 0, 1+2j]) + >>> t.sgn() + tensor([0.6000+0.8000j, 0.2800-0.9600j, 0.0000+0.0000j, 0.4472+0.8944j]) + """ + ... +def sigmoid(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + sigmoid(input, *, out=None) -> Tensor + + Alias for :func:`torch.special.expit`. + """ + ... +def sigmoid_(input: Tensor) -> Tensor: ... +def sign(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + sign(input, *, out=None) -> Tensor + + Returns a new tensor with the signs of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \operatorname{sgn}(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([0.7, -1.2, 0., 2.3]) + >>> a + tensor([ 0.7000, -1.2000, 0.0000, 2.3000]) + >>> torch.sign(a) + tensor([ 1., -1., 0., 1.]) + """ + ... +def signbit(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + signbit(input, *, out=None) -> Tensor + + Tests if each element of :attr:`input` has its sign bit set or not. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([0.7, -1.2, 0., 2.3]) + >>> torch.signbit(a) + tensor([ False, True, False, False]) + >>> a = torch.tensor([-0.0, 0.0]) + >>> torch.signbit(a) + tensor([ True, False]) + + .. note:: + signbit handles signed zeros, so negative zero (-0) returns True. + """ + ... +def sin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + sin(input, *, out=None) -> Tensor + + Returns a new tensor with the sine of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \sin(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.5461, 0.1347, -2.7266, -0.2746]) + >>> torch.sin(a) + tensor([-0.5194, 0.1343, -0.4032, -0.2711]) + """ + ... +def sin_(input: Tensor) -> Tensor: ... +def sinc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + sinc(input, *, out=None) -> Tensor + + Alias for :func:`torch.special.sinc`. + """ + ... +def sinc_(input: Tensor) -> Tensor: ... +def sinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + sinh(input, *, out=None) -> Tensor + + Returns a new tensor with the hyperbolic sine of the elements of + :attr:`input`. + + .. math:: + \text{out}_{i} = \sinh(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.5380, -0.8632, -0.1265, 0.9399]) + >>> torch.sinh(a) + tensor([ 0.5644, -0.9744, -0.1268, 1.0845]) + + .. note:: + When :attr:`input` is on the CPU, the implementation of torch.sinh may use + the Sleef library, which rounds very large results to infinity or negative + infinity. See `here `_ for details. + """ + ... +def sinh_(input: Tensor) -> Tensor: ... +def slice_copy(input: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.slice`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def slice_inverse(input: Tensor, src: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1) -> Tensor: ... +def slice_scatter(input: Tensor, src: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1, *, out: Optional[Tensor] = None) -> Tensor: + r""" + slice_scatter(input, src, dim=0, start=None, end=None, step=1) -> Tensor + + Embeds the values of the :attr:`src` tensor into :attr:`input` at the given + dimension. + This function returns a tensor with fresh storage; it does not create a view. + + + Args: + input (Tensor): the input tensor. + src (Tensor): The tensor to embed into :attr:`input` + dim (int): the dimension to insert the slice into + start (Optional[int]): the start index of where to insert the slice + end (Optional[int]): the end index of where to insert the slice + step (int): the how many elements to skip in + + Example:: + + >>> a = torch.zeros(8, 8) + >>> b = torch.ones(2, 8) + >>> a.slice_scatter(b, start=6) + tensor([[0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [1., 1., 1., 1., 1., 1., 1., 1.], + [1., 1., 1., 1., 1., 1., 1., 1.]]) + + >>> b = torch.ones(8, 2) + >>> a.slice_scatter(b, dim=1, start=2, end=6, step=2) + tensor([[0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.]]) + """ + ... +def slogdet(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.slogdet: + r""" + slogdet(input) -> (Tensor, Tensor) + + Alias for :func:`torch.linalg.slogdet` + """ + ... +def smm(input: Tensor, mat2: Tensor) -> Tensor: + r""" + smm(input, mat) -> Tensor + + Performs a matrix multiplication of the sparse matrix :attr:`input` + with the dense matrix :attr:`mat`. + + Args: + input (Tensor): a sparse matrix to be matrix multiplied + mat (Tensor): a dense matrix to be matrix multiplied + """ + ... +@overload +def softmax(input: Tensor, dim: _int, dtype: Optional[_dtype] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + softmax(input, dim, *, dtype=None) -> Tensor + + Alias for :func:`torch.nn.functional.softmax`. + """ + ... +@overload +def softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + softmax(input, dim, *, dtype=None) -> Tensor + + Alias for :func:`torch.nn.functional.softmax`. + """ + ... +@overload +def sort(input: Tensor, *, stable: Optional[_bool], dim: _int = -1, descending: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort: + r""" + sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor) + + Sorts the elements of the :attr:`input` tensor along a given dimension + in ascending order by value. + + If :attr:`dim` is not given, the last dimension of the `input` is chosen. + + If :attr:`descending` is ``True`` then the elements are sorted in descending + order by value. + + If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving + the order of equivalent elements. + + A namedtuple of (values, indices) is returned, where the `values` are the + sorted values and `indices` are the indices of the elements in the original + `input` tensor. + + Args: + input (Tensor): the input tensor. + dim (int, optional): the dimension to sort along + descending (bool, optional): controls the sorting order (ascending or descending) + stable (bool, optional): makes the sorting routine stable, which guarantees that the order + of equivalent elements is preserved. + + Keyword args: + out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can + be optionally given to be used as output buffers + + Example:: + + >>> x = torch.randn(3, 4) + >>> sorted, indices = torch.sort(x) + >>> sorted + tensor([[-0.2162, 0.0608, 0.6719, 2.3332], + [-0.5793, 0.0061, 0.6058, 0.9497], + [-0.5071, 0.3343, 0.9553, 1.0960]]) + >>> indices + tensor([[ 1, 0, 2, 3], + [ 3, 1, 0, 2], + [ 0, 3, 1, 2]]) + + >>> sorted, indices = torch.sort(x, 0) + >>> sorted + tensor([[-0.5071, -0.2162, 0.6719, -0.5793], + [ 0.0608, 0.0061, 0.9497, 0.3343], + [ 0.6058, 0.9553, 1.0960, 2.3332]]) + >>> indices + tensor([[ 2, 0, 0, 1], + [ 0, 1, 1, 2], + [ 1, 2, 2, 0]]) + >>> x = torch.tensor([0, 1] * 9) + >>> x.sort() + torch.return_types.sort( + values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), + indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1])) + >>> x.sort(stable=True) + torch.return_types.sort( + values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), + indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17])) + """ + ... +@overload +def sort(input: Tensor, dim: _int = -1, descending: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort: + r""" + sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor) + + Sorts the elements of the :attr:`input` tensor along a given dimension + in ascending order by value. + + If :attr:`dim` is not given, the last dimension of the `input` is chosen. + + If :attr:`descending` is ``True`` then the elements are sorted in descending + order by value. + + If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving + the order of equivalent elements. + + A namedtuple of (values, indices) is returned, where the `values` are the + sorted values and `indices` are the indices of the elements in the original + `input` tensor. + + Args: + input (Tensor): the input tensor. + dim (int, optional): the dimension to sort along + descending (bool, optional): controls the sorting order (ascending or descending) + stable (bool, optional): makes the sorting routine stable, which guarantees that the order + of equivalent elements is preserved. + + Keyword args: + out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can + be optionally given to be used as output buffers + + Example:: + + >>> x = torch.randn(3, 4) + >>> sorted, indices = torch.sort(x) + >>> sorted + tensor([[-0.2162, 0.0608, 0.6719, 2.3332], + [-0.5793, 0.0061, 0.6058, 0.9497], + [-0.5071, 0.3343, 0.9553, 1.0960]]) + >>> indices + tensor([[ 1, 0, 2, 3], + [ 3, 1, 0, 2], + [ 0, 3, 1, 2]]) + + >>> sorted, indices = torch.sort(x, 0) + >>> sorted + tensor([[-0.5071, -0.2162, 0.6719, -0.5793], + [ 0.0608, 0.0061, 0.9497, 0.3343], + [ 0.6058, 0.9553, 1.0960, 2.3332]]) + >>> indices + tensor([[ 2, 0, 0, 1], + [ 0, 1, 1, 2], + [ 1, 2, 2, 0]]) + >>> x = torch.tensor([0, 1] * 9) + >>> x.sort() + torch.return_types.sort( + values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), + indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1])) + >>> x.sort(stable=True) + torch.return_types.sort( + values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), + indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17])) + """ + ... +@overload +def sort(input: Tensor, *, stable: Optional[_bool], dim: Union[str, ellipsis, None], descending: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort: + r""" + sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor) + + Sorts the elements of the :attr:`input` tensor along a given dimension + in ascending order by value. + + If :attr:`dim` is not given, the last dimension of the `input` is chosen. + + If :attr:`descending` is ``True`` then the elements are sorted in descending + order by value. + + If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving + the order of equivalent elements. + + A namedtuple of (values, indices) is returned, where the `values` are the + sorted values and `indices` are the indices of the elements in the original + `input` tensor. + + Args: + input (Tensor): the input tensor. + dim (int, optional): the dimension to sort along + descending (bool, optional): controls the sorting order (ascending or descending) + stable (bool, optional): makes the sorting routine stable, which guarantees that the order + of equivalent elements is preserved. + + Keyword args: + out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can + be optionally given to be used as output buffers + + Example:: + + >>> x = torch.randn(3, 4) + >>> sorted, indices = torch.sort(x) + >>> sorted + tensor([[-0.2162, 0.0608, 0.6719, 2.3332], + [-0.5793, 0.0061, 0.6058, 0.9497], + [-0.5071, 0.3343, 0.9553, 1.0960]]) + >>> indices + tensor([[ 1, 0, 2, 3], + [ 3, 1, 0, 2], + [ 0, 3, 1, 2]]) + + >>> sorted, indices = torch.sort(x, 0) + >>> sorted + tensor([[-0.5071, -0.2162, 0.6719, -0.5793], + [ 0.0608, 0.0061, 0.9497, 0.3343], + [ 0.6058, 0.9553, 1.0960, 2.3332]]) + >>> indices + tensor([[ 2, 0, 0, 1], + [ 0, 1, 1, 2], + [ 1, 2, 2, 0]]) + >>> x = torch.tensor([0, 1] * 9) + >>> x.sort() + torch.return_types.sort( + values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), + indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1])) + >>> x.sort(stable=True) + torch.return_types.sort( + values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), + indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17])) + """ + ... +@overload +def sort(input: Tensor, dim: Union[str, ellipsis, None], descending: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort: + r""" + sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor) + + Sorts the elements of the :attr:`input` tensor along a given dimension + in ascending order by value. + + If :attr:`dim` is not given, the last dimension of the `input` is chosen. + + If :attr:`descending` is ``True`` then the elements are sorted in descending + order by value. + + If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving + the order of equivalent elements. + + A namedtuple of (values, indices) is returned, where the `values` are the + sorted values and `indices` are the indices of the elements in the original + `input` tensor. + + Args: + input (Tensor): the input tensor. + dim (int, optional): the dimension to sort along + descending (bool, optional): controls the sorting order (ascending or descending) + stable (bool, optional): makes the sorting routine stable, which guarantees that the order + of equivalent elements is preserved. + + Keyword args: + out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can + be optionally given to be used as output buffers + + Example:: + + >>> x = torch.randn(3, 4) + >>> sorted, indices = torch.sort(x) + >>> sorted + tensor([[-0.2162, 0.0608, 0.6719, 2.3332], + [-0.5793, 0.0061, 0.6058, 0.9497], + [-0.5071, 0.3343, 0.9553, 1.0960]]) + >>> indices + tensor([[ 1, 0, 2, 3], + [ 3, 1, 0, 2], + [ 0, 3, 1, 2]]) + + >>> sorted, indices = torch.sort(x, 0) + >>> sorted + tensor([[-0.5071, -0.2162, 0.6719, -0.5793], + [ 0.0608, 0.0061, 0.9497, 0.3343], + [ 0.6058, 0.9553, 1.0960, 2.3332]]) + >>> indices + tensor([[ 2, 0, 0, 1], + [ 0, 1, 1, 2], + [ 1, 2, 2, 0]]) + >>> x = torch.tensor([0, 1] * 9) + >>> x.sort() + torch.return_types.sort( + values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), + indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1])) + >>> x.sort(stable=True) + torch.return_types.sort( + values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), + indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17])) + """ + ... +def sparse_bsc_tensor(ccol_indices: Union[Tensor, List], row_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor: + r""" + sparse_bsc_tensor(ccol_indices, row_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor + + Constructs a :ref:`sparse tensor in BSC (Block Compressed Sparse + Column)) ` with specified 2-dimensional blocks at the + given :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix + multiplication operations in BSC format are typically faster than that + for sparse tensors in COO format. Make you have a look at :ref:`the + note on the data type of the indices `. + + .. note:: + + If the ``device`` argument is not specified the device of the given + :attr:`values` and indices tensor(s) must match. If, however, the + argument is specified the input Tensors will be converted to the + given device and in turn determine the device of the constructed + sparse tensor. + + Args: + ccol_indices (array_like): (B+1)-dimensional array of size + ``(*batchsize, ncolblocks + 1)``. The last element of each + batch is the number of non-zeros. This tensor encodes the + index in values and row_indices depending on where the given + column starts. Each successive number in the tensor subtracted + by the number before it denotes the number of elements in a + given column. + row_indices (array_like): Row block co-ordinates of each block in + values. (B+1)-dimensional tensor with the same length + as values. + values (array_list): Initial blocks for the tensor. Can be a list, + tuple, NumPy ``ndarray``, and other types that + represents a (1 + 2 + K)-dimensional tensor where ``K`` is the + number of dense dimensions. + size (list, tuple, :class:`torch.Size`, optional): Size of the + sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols * + blocksize[1], *densesize)`` If not provided, the size will be + inferred as the minimum size big enough to hold all non-zero + blocks. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. Default: if None, infers data type from + :attr:`values`. + device (:class:`torch.device`, optional): the desired device of + returned tensor. Default: if None, uses the current device + for the default tensor type (see + :func:`torch.set_default_device`). :attr:`device` will be + the CPU for CPU tensor types and the current CUDA device for + CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + check_invariants (bool, optional): If sparse tensor invariants are checked. + Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`, + initially False. + + Example:: + >>> ccol_indices = [0, 1, 2] + >>> row_indices = [0, 1] + >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] + >>> torch.sparse_bsc_tensor(torch.tensor(ccol_indices, dtype=torch.int64), + ... torch.tensor(row_indices, dtype=torch.int64), + ... torch.tensor(values), dtype=torch.double) + tensor(ccol_indices=tensor([0, 1, 2]), + row_indices=tensor([0, 1]), + values=tensor([[[1., 2.], + [3., 4.]], + [[5., 6.], + [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64, + layout=torch.sparse_bsc) + """ + ... +def sparse_bsr_tensor(crow_indices: Union[Tensor, List], col_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor: + r""" + sparse_bsr_tensor(crow_indices, col_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor + + Constructs a :ref:`sparse tensor in BSR (Block Compressed Sparse Row)) + ` with specified 2-dimensional blocks at the given + :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix + multiplication operations in BSR format are typically faster than that + for sparse tensors in COO format. Make you have a look at :ref:`the + note on the data type of the indices `. + + .. note:: + + If the ``device`` argument is not specified the device of the given + :attr:`values` and indices tensor(s) must match. If, however, the + argument is specified the input Tensors will be converted to the + given device and in turn determine the device of the constructed + sparse tensor. + + Args: + crow_indices (array_like): (B+1)-dimensional array of size + ``(*batchsize, nrowblocks + 1)``. The last element of each + batch is the number of non-zeros. This tensor encodes the + block index in values and col_indices depending on where the + given row block starts. Each successive number in the tensor + subtracted by the number before it denotes the number of + blocks in a given row. + col_indices (array_like): Column block co-ordinates of each block + in values. (B+1)-dimensional tensor with the same length as + values. + values (array_list): Initial values for the tensor. Can be a list, + tuple, NumPy ``ndarray``, scalar, and other types that + represents a (1 + 2 + K)-dimensional tensor where ``K`` is the + number of dense dimensions. + size (list, tuple, :class:`torch.Size`, optional): Size of the + sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols * + blocksize[1], *densesize)`` where ``blocksize == + values.shape[1:3]``. If not provided, the size will be + inferred as the minimum size big enough to hold all non-zero + blocks. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. Default: if None, infers data type from + :attr:`values`. + device (:class:`torch.device`, optional): the desired device of + returned tensor. Default: if None, uses the current device + for the default tensor type (see + :func:`torch.set_default_device`). :attr:`device` will be + the CPU for CPU tensor types and the current CUDA device for + CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + check_invariants (bool, optional): If sparse tensor invariants are checked. + Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`, + initially False. + + Example:: + >>> crow_indices = [0, 1, 2] + >>> col_indices = [0, 1] + >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] + >>> torch.sparse_bsr_tensor(torch.tensor(crow_indices, dtype=torch.int64), + ... torch.tensor(col_indices, dtype=torch.int64), + ... torch.tensor(values), dtype=torch.double) + tensor(crow_indices=tensor([0, 1, 2]), + col_indices=tensor([0, 1]), + values=tensor([[[1., 2.], + [3., 4.]], + [[5., 6.], + [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64, + layout=torch.sparse_bsr) + """ + ... +def sparse_compressed_tensor(compressed_indices: Union[Tensor, List], plain_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor: + r""" + sparse_compressed_tensor(compressed_indices, plain_indices, values, size=None, *, dtype=None, layout=None, device=None, requires_grad=False, check_invariants=None) -> Tensor + + Constructs a :ref:`sparse tensor in Compressed Sparse format - CSR, + CSC, BSR, or BSC - ` with specified values at + the given :attr:`compressed_indices` and :attr:`plain_indices`. Sparse + matrix multiplication operations in Compressed Sparse format are + typically faster than that for sparse tensors in COO format. Make you + have a look at :ref:`the note on the data type of the indices + `. + + .. note:: + + If the ``device`` argument is not specified the device of the given + :attr:`values` and indices tensor(s) must match. If, however, the + argument is specified the input Tensors will be converted to the + given device and in turn determine the device of the constructed + sparse tensor. + + Args: + compressed_indices (array_like): (B+1)-dimensional array of size + ``(*batchsize, compressed_dim_size + 1)``. The last element of + each batch is the number of non-zero elements or blocks. This + tensor encodes the index in ``values`` and ``plain_indices`` + depending on where the given compressed dimension (row or + column) starts. Each successive number in the tensor + subtracted by the number before it denotes the number of + elements or blocks in a given compressed dimension. + plain_indices (array_like): Plain dimension (column or row) + co-ordinates of each element or block in values. (B+1)-dimensional + tensor with the same length as values. + + values (array_list): Initial values for the tensor. Can be a list, + tuple, NumPy ``ndarray``, scalar, and other types. that + represents a (1+K)-dimensional (for CSR and CSC layouts) or + (1+2+K)-dimensional tensor (for BSR and BSC layouts) where + ``K`` is the number of dense dimensions. + size (list, tuple, :class:`torch.Size`, optional): Size of the + sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols * + blocksize[1], *densesize)`` where ``blocksize[0] == + blocksize[1] == 1`` for CSR and CSC formats. If not provided, + the size will be inferred as the minimum size big enough to + hold all non-zero elements or blocks. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. Default: if None, infers data type from + :attr:`values`. + layout (:class:`torch.layout`, required): the desired layout of + returned tensor: :attr:`torch.sparse_csr`, + :attr:`torch.sparse_csc`, :attr:`torch.sparse_bsr`, or + :attr:`torch.sparse_bsc`. + device (:class:`torch.device`, optional): the desired device of + returned tensor. Default: if None, uses the current device + for the default tensor type (see + :func:`torch.set_default_device`). :attr:`device` will be + the CPU for CPU tensor types and the current CUDA device for + CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + check_invariants (bool, optional): If sparse tensor invariants are checked. + Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`, + initially False. + + Example:: + >>> compressed_indices = [0, 2, 4] + >>> plain_indices = [0, 1, 0, 1] + >>> values = [1, 2, 3, 4] + >>> torch.sparse_compressed_tensor(torch.tensor(compressed_indices, dtype=torch.int64), + ... torch.tensor(plain_indices, dtype=torch.int64), + ... torch.tensor(values), dtype=torch.double, layout=torch.sparse_csr) + tensor(crow_indices=tensor([0, 2, 4]), + col_indices=tensor([0, 1, 0, 1]), + values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4, + dtype=torch.float64, layout=torch.sparse_csr) + """ + ... +def sparse_coo_tensor(indices: Tensor, values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None, is_coalesced: Optional[_bool] = None) -> Tensor: + r""" + sparse_coo_tensor(indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None, is_coalesced=None) -> Tensor + + Constructs a :ref:`sparse tensor in COO(rdinate) format + ` with specified values at the given + :attr:`indices`. + + .. note:: + + This function returns an :ref:`uncoalesced tensor + ` when :attr:`is_coalesced` is + unspecified or ``None``. + + .. note:: + + If the ``device`` argument is not specified the device of the given + :attr:`values` and indices tensor(s) must match. If, however, the + argument is specified the input Tensors will be converted to the + given device and in turn determine the device of the constructed + sparse tensor. + + Args: + indices (array_like): Initial data for the tensor. Can be a list, tuple, + NumPy ``ndarray``, scalar, and other types. Will be cast to a :class:`torch.LongTensor` + internally. The indices are the coordinates of the non-zero values in the matrix, and thus + should be two-dimensional where the first dimension is the number of tensor dimensions and + the second dimension is the number of non-zero values. + values (array_like): Initial values for the tensor. Can be a list, tuple, + NumPy ``ndarray``, scalar, and other types. + size (list, tuple, or :class:`torch.Size`, optional): Size of the sparse tensor. If not + provided the size will be inferred as the minimum size big enough to hold all non-zero + elements. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if None, infers data type from :attr:`values`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if None, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + check_invariants (bool, optional): If sparse tensor invariants are checked. + Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`, + initially False. + is_coalesced (bool, optional): When``True``, the caller is + responsible for providing tensor indices that correspond to a + coalesced tensor. If the :attr:`check_invariants` flag is + False, no error will be raised if the prerequisites are not + met and this will lead to silently incorrect results. To force + coalescion please use :meth:`coalesce` on the resulting + Tensor. + Default: None: except for trivial cases (e.g. nnz < 2) the + resulting Tensor has is_coalesced set to ``False```. + + Example:: + + >>> i = torch.tensor([[0, 1, 1], + ... [2, 0, 2]]) + >>> v = torch.tensor([3, 4, 5], dtype=torch.float32) + >>> torch.sparse_coo_tensor(i, v, [2, 4]) + tensor(indices=tensor([[0, 1, 1], + [2, 0, 2]]), + values=tensor([3., 4., 5.]), + size=(2, 4), nnz=3, layout=torch.sparse_coo) + + >>> torch.sparse_coo_tensor(i, v) # Shape inference + tensor(indices=tensor([[0, 1, 1], + [2, 0, 2]]), + values=tensor([3., 4., 5.]), + size=(2, 3), nnz=3, layout=torch.sparse_coo) + + >>> torch.sparse_coo_tensor(i, v, [2, 4], + ... dtype=torch.float64, + ... device=torch.device('cuda:0')) + tensor(indices=tensor([[0, 1, 1], + [2, 0, 2]]), + values=tensor([3., 4., 5.]), + device='cuda:0', size=(2, 4), nnz=3, dtype=torch.float64, + layout=torch.sparse_coo) + + # Create an empty sparse tensor with the following invariants: + # 1. sparse_dim + dense_dim = len(SparseTensor.shape) + # 2. SparseTensor._indices().shape = (sparse_dim, nnz) + # 3. SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:]) + # + # For instance, to create an empty sparse tensor with nnz = 0, dense_dim = 0 and + # sparse_dim = 1 (hence indices is a 2D tensor of shape = (1, 0)) + >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1]) + tensor(indices=tensor([], size=(1, 0)), + values=tensor([], size=(0,)), + size=(1,), nnz=0, layout=torch.sparse_coo) + + # and to create an empty sparse tensor with nnz = 0, dense_dim = 1 and + # sparse_dim = 1 + >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), torch.empty([0, 2]), [1, 2]) + tensor(indices=tensor([], size=(1, 0)), + values=tensor([], size=(0, 2)), + size=(1, 2), nnz=0, layout=torch.sparse_coo) + + .. _torch.sparse: https://pytorch.org/docs/stable/sparse.html + """ + ... +def sparse_csc_tensor(ccol_indices: Union[Tensor, List], row_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor: + r""" + sparse_csc_tensor(ccol_indices, row_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor + + Constructs a :ref:`sparse tensor in CSC (Compressed Sparse Column) + ` with specified values at the given + :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix + multiplication operations in CSC format are typically faster than that + for sparse tensors in COO format. Make you have a look at :ref:`the + note on the data type of the indices `. + + .. note:: + + If the ``device`` argument is not specified the device of the given + :attr:`values` and indices tensor(s) must match. If, however, the + argument is specified the input Tensors will be converted to the + given device and in turn determine the device of the constructed + sparse tensor. + + Args: + ccol_indices (array_like): (B+1)-dimensional array of size + ``(*batchsize, ncols + 1)``. The last element of each batch + is the number of non-zeros. This tensor encodes the index in + values and row_indices depending on where the given column + starts. Each successive number in the tensor subtracted by the + number before it denotes the number of elements in a given + column. + row_indices (array_like): Row co-ordinates of each element in + values. (B+1)-dimensional tensor with the same length as + values. + values (array_list): Initial values for the tensor. Can be a list, + tuple, NumPy ``ndarray``, scalar, and other types that + represents a (1+K)-dimensional tensor where ``K`` is the number + of dense dimensions. + size (list, tuple, :class:`torch.Size`, optional): Size of the + sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If + not provided, the size will be inferred as the minimum size + big enough to hold all non-zero elements. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. Default: if None, infers data type from + :attr:`values`. + device (:class:`torch.device`, optional): the desired device of + returned tensor. Default: if None, uses the current device + for the default tensor type (see + :func:`torch.set_default_device`). :attr:`device` will be + the CPU for CPU tensor types and the current CUDA device for + CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + check_invariants (bool, optional): If sparse tensor invariants are checked. + Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`, + initially False. + + Example:: + >>> ccol_indices = [0, 2, 4] + >>> row_indices = [0, 1, 0, 1] + >>> values = [1, 2, 3, 4] + >>> torch.sparse_csc_tensor(torch.tensor(ccol_indices, dtype=torch.int64), + ... torch.tensor(row_indices, dtype=torch.int64), + ... torch.tensor(values), dtype=torch.double) + tensor(ccol_indices=tensor([0, 2, 4]), + row_indices=tensor([0, 1, 0, 1]), + values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4, + dtype=torch.float64, layout=torch.sparse_csc) + """ + ... +def sparse_csr_tensor(crow_indices: Union[Tensor, List], col_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor: + r""" + sparse_csr_tensor(crow_indices, col_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor + + Constructs a :ref:`sparse tensor in CSR (Compressed Sparse Row) ` with specified + values at the given :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix multiplication operations + in CSR format are typically faster than that for sparse tensors in COO format. Make you have a look + at :ref:`the note on the data type of the indices `. + + .. note:: + + If the ``device`` argument is not specified the device of the given + :attr:`values` and indices tensor(s) must match. If, however, the + argument is specified the input Tensors will be converted to the + given device and in turn determine the device of the constructed + sparse tensor. + + Args: + crow_indices (array_like): (B+1)-dimensional array of size + ``(*batchsize, nrows + 1)``. The last element of each batch + is the number of non-zeros. This tensor encodes the index in + values and col_indices depending on where the given row + starts. Each successive number in the tensor subtracted by the + number before it denotes the number of elements in a given + row. + col_indices (array_like): Column co-ordinates of each element in + values. (B+1)-dimensional tensor with the same length + as values. + values (array_list): Initial values for the tensor. Can be a list, + tuple, NumPy ``ndarray``, scalar, and other types that + represents a (1+K)-dimensional tensor where ``K`` is the number + of dense dimensions. + size (list, tuple, :class:`torch.Size`, optional): Size of the + sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If + not provided, the size will be inferred as the minimum size + big enough to hold all non-zero elements. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. Default: if None, infers data type from + :attr:`values`. + device (:class:`torch.device`, optional): the desired device of + returned tensor. Default: if None, uses the current device + for the default tensor type (see + :func:`torch.set_default_device`). :attr:`device` will be + the CPU for CPU tensor types and the current CUDA device for + CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + check_invariants (bool, optional): If sparse tensor invariants are checked. + Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`, + initially False. + + Example:: + >>> crow_indices = [0, 2, 4] + >>> col_indices = [0, 1, 0, 1] + >>> values = [1, 2, 3, 4] + >>> torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=torch.int64), + ... torch.tensor(col_indices, dtype=torch.int64), + ... torch.tensor(values), dtype=torch.double) + tensor(crow_indices=tensor([0, 2, 4]), + col_indices=tensor([0, 1, 0, 1]), + values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4, + dtype=torch.float64, layout=torch.sparse_csr) + """ + ... +def split_copy(input: Tensor, split_size: Union[_int, SymInt], dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None: + r""" + Performs the same operation as :func:`torch.split`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def split_with_sizes(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]: ... +def split_with_sizes_copy(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None: + r""" + Performs the same operation as :func:`torch.split_with_sizes`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def spmm(input: Tensor, mat2: Tensor) -> Tensor: ... +def sqrt(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + sqrt(input, *, out=None) -> Tensor + + Returns a new tensor with the square-root of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \sqrt{\text{input}_{i}} + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-2.0755, 1.0226, 0.0831, 0.4806]) + >>> torch.sqrt(a) + tensor([ nan, 1.0112, 0.2883, 0.6933]) + """ + ... +def sqrt_(input: Tensor) -> Tensor: ... +def square(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + square(input, *, out=None) -> Tensor + + Returns a new tensor with the square of the elements of :attr:`input`. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-2.0755, 1.0226, 0.0831, 0.4806]) + >>> torch.square(a) + tensor([ 4.3077, 1.0457, 0.0069, 0.2310]) + """ + ... +def square_(input: Tensor) -> Tensor: ... +@overload +def squeeze(input: Tensor) -> Tensor: + r""" + squeeze(input, dim=None) -> Tensor + + Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed. + + For example, if `input` is of shape: + :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()` + will be of shape: :math:`(A \times B \times C \times D)`. + + When :attr:`dim` is given, a squeeze operation is done only in the given + dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`, + ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)`` + will squeeze the tensor to the shape :math:`(A \times B)`. + + .. note:: The returned tensor shares the storage with the input tensor, + so changing the contents of one will change the contents of the other. + + .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)` + will also remove the batch dimension, which can lead to unexpected + errors. Consider specifying only the dims you wish to be squeezed. + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints, optional): if given, the input will be squeezed + only in the specified dimensions. + + .. versionchanged:: 2.0 + :attr:`dim` now accepts tuples of dimensions. + + Example:: + + >>> x = torch.zeros(2, 1, 2, 1, 2) + >>> x.size() + torch.Size([2, 1, 2, 1, 2]) + >>> y = torch.squeeze(x) + >>> y.size() + torch.Size([2, 2, 2]) + >>> y = torch.squeeze(x, 0) + >>> y.size() + torch.Size([2, 1, 2, 1, 2]) + >>> y = torch.squeeze(x, 1) + >>> y.size() + torch.Size([2, 2, 1, 2]) + >>> y = torch.squeeze(x, (1, 2, 3)) + torch.Size([2, 2, 2]) + """ + ... +@overload +def squeeze(input: Tensor, dim: _int) -> Tensor: + r""" + squeeze(input, dim=None) -> Tensor + + Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed. + + For example, if `input` is of shape: + :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()` + will be of shape: :math:`(A \times B \times C \times D)`. + + When :attr:`dim` is given, a squeeze operation is done only in the given + dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`, + ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)`` + will squeeze the tensor to the shape :math:`(A \times B)`. + + .. note:: The returned tensor shares the storage with the input tensor, + so changing the contents of one will change the contents of the other. + + .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)` + will also remove the batch dimension, which can lead to unexpected + errors. Consider specifying only the dims you wish to be squeezed. + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints, optional): if given, the input will be squeezed + only in the specified dimensions. + + .. versionchanged:: 2.0 + :attr:`dim` now accepts tuples of dimensions. + + Example:: + + >>> x = torch.zeros(2, 1, 2, 1, 2) + >>> x.size() + torch.Size([2, 1, 2, 1, 2]) + >>> y = torch.squeeze(x) + >>> y.size() + torch.Size([2, 2, 2]) + >>> y = torch.squeeze(x, 0) + >>> y.size() + torch.Size([2, 1, 2, 1, 2]) + >>> y = torch.squeeze(x, 1) + >>> y.size() + torch.Size([2, 2, 1, 2]) + >>> y = torch.squeeze(x, (1, 2, 3)) + torch.Size([2, 2, 2]) + """ + ... +@overload +def squeeze(input: Tensor, dim: _size) -> Tensor: + r""" + squeeze(input, dim=None) -> Tensor + + Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed. + + For example, if `input` is of shape: + :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()` + will be of shape: :math:`(A \times B \times C \times D)`. + + When :attr:`dim` is given, a squeeze operation is done only in the given + dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`, + ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)`` + will squeeze the tensor to the shape :math:`(A \times B)`. + + .. note:: The returned tensor shares the storage with the input tensor, + so changing the contents of one will change the contents of the other. + + .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)` + will also remove the batch dimension, which can lead to unexpected + errors. Consider specifying only the dims you wish to be squeezed. + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints, optional): if given, the input will be squeezed + only in the specified dimensions. + + .. versionchanged:: 2.0 + :attr:`dim` now accepts tuples of dimensions. + + Example:: + + >>> x = torch.zeros(2, 1, 2, 1, 2) + >>> x.size() + torch.Size([2, 1, 2, 1, 2]) + >>> y = torch.squeeze(x) + >>> y.size() + torch.Size([2, 2, 2]) + >>> y = torch.squeeze(x, 0) + >>> y.size() + torch.Size([2, 1, 2, 1, 2]) + >>> y = torch.squeeze(x, 1) + >>> y.size() + torch.Size([2, 2, 1, 2]) + >>> y = torch.squeeze(x, (1, 2, 3)) + torch.Size([2, 2, 2]) + """ + ... +@overload +def squeeze(input: Tensor, dim: Union[str, ellipsis, None]) -> Tensor: + r""" + squeeze(input, dim=None) -> Tensor + + Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed. + + For example, if `input` is of shape: + :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()` + will be of shape: :math:`(A \times B \times C \times D)`. + + When :attr:`dim` is given, a squeeze operation is done only in the given + dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`, + ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)`` + will squeeze the tensor to the shape :math:`(A \times B)`. + + .. note:: The returned tensor shares the storage with the input tensor, + so changing the contents of one will change the contents of the other. + + .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)` + will also remove the batch dimension, which can lead to unexpected + errors. Consider specifying only the dims you wish to be squeezed. + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints, optional): if given, the input will be squeezed + only in the specified dimensions. + + .. versionchanged:: 2.0 + :attr:`dim` now accepts tuples of dimensions. + + Example:: + + >>> x = torch.zeros(2, 1, 2, 1, 2) + >>> x.size() + torch.Size([2, 1, 2, 1, 2]) + >>> y = torch.squeeze(x) + >>> y.size() + torch.Size([2, 2, 2]) + >>> y = torch.squeeze(x, 0) + >>> y.size() + torch.Size([2, 1, 2, 1, 2]) + >>> y = torch.squeeze(x, 1) + >>> y.size() + torch.Size([2, 2, 1, 2]) + >>> y = torch.squeeze(x, (1, 2, 3)) + torch.Size([2, 2, 2]) + """ + ... +@overload +def squeeze_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.squeeze`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +@overload +def squeeze_copy(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.squeeze`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +@overload +def squeeze_copy(input: Tensor, dim: _size, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.squeeze`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +@overload +def sspaddmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor) -> Tensor: + r""" + sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor + + Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor + :attr:`mat2`, then adds the sparse tensor :attr:`input` to the result. + + Note: This function is equivalent to :func:`torch.addmm`, except + :attr:`input` and :attr:`mat1` are sparse. + + Args: + input (Tensor): a sparse matrix to be added + mat1 (Tensor): a sparse matrix to be matrix multiplied + mat2 (Tensor): a dense matrix to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + """ + ... +@overload +def sspaddmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor + + Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor + :attr:`mat2`, then adds the sparse tensor :attr:`input` to the result. + + Note: This function is equivalent to :func:`torch.addmm`, except + :attr:`input` and :attr:`mat1` are sparse. + + Args: + input (Tensor): a sparse matrix to be added + mat1 (Tensor): a sparse matrix to be matrix multiplied + mat2 (Tensor): a dense matrix to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + """ + ... +@overload +def sspaddmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor) -> Tensor: + r""" + sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor + + Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor + :attr:`mat2`, then adds the sparse tensor :attr:`input` to the result. + + Note: This function is equivalent to :func:`torch.addmm`, except + :attr:`input` and :attr:`mat1` are sparse. + + Args: + input (Tensor): a sparse matrix to be added + mat1 (Tensor): a sparse matrix to be matrix multiplied + mat2 (Tensor): a dense matrix to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + """ + ... +def stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: + r""" + stack(tensors, dim=0, *, out=None) -> Tensor + + Concatenates a sequence of tensors along a new dimension. + + All tensors need to be of the same size. + + .. seealso:: + + :func:`torch.cat` concatenates the given sequence along an existing dimension. + + Arguments: + tensors (sequence of Tensors): sequence of tensors to concatenate + dim (int, optional): dimension to insert. Has to be between 0 and the number + of dimensions of concatenated tensors (inclusive). Default: 0 + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> x = torch.randn(2, 3) + >>> x + tensor([[ 0.3367, 0.1288, 0.2345], + [ 0.2303, -1.1229, -0.1863]]) + >>> x = torch.stack((x, x)) # same as torch.stack((x, x), dim=0) + >>> x + tensor([[[ 0.3367, 0.1288, 0.2345], + [ 0.2303, -1.1229, -0.1863]], + + [[ 0.3367, 0.1288, 0.2345], + [ 0.2303, -1.1229, -0.1863]]]) + >>> x.size() + torch.Size([2, 2, 3]) + >>> x = torch.stack((x, x), dim=1) + tensor([[[ 0.3367, 0.1288, 0.2345], + [ 0.3367, 0.1288, 0.2345]], + + [[ 0.2303, -1.1229, -0.1863], + [ 0.2303, -1.1229, -0.1863]]]) + >>> x = torch.stack((x, x), dim=2) + tensor([[[ 0.3367, 0.3367], + [ 0.1288, 0.1288], + [ 0.2345, 0.2345]], + + [[ 0.2303, 0.2303], + [-1.1229, -1.1229], + [-0.1863, -0.1863]]]) + >>> x = torch.stack((x, x), dim=-1) + tensor([[[ 0.3367, 0.3367], + [ 0.1288, 0.1288], + [ 0.2345, 0.2345]], + + [[ 0.2303, 0.2303], + [-1.1229, -1.1229], + [-0.1863, -0.1863]]]) + """ + ... +@overload +def std(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + + Calculates the standard deviation over the dimensions specified by :attr:`dim`. + :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to + reduce over all dimensions. + + The standard deviation (:math:`\sigma`) is calculated as + + .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std(a, dim=1, keepdim=True) + tensor([[1.0311], + [0.7477], + [1.2204], + [0.9087]]) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def std(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + + Calculates the standard deviation over the dimensions specified by :attr:`dim`. + :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to + reduce over all dimensions. + + The standard deviation (:math:`\sigma`) is calculated as + + .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std(a, dim=1, keepdim=True) + tensor([[1.0311], + [0.7477], + [1.2204], + [0.9087]]) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def std(input: Tensor, unbiased: _bool = True) -> Tensor: + r""" + std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + + Calculates the standard deviation over the dimensions specified by :attr:`dim`. + :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to + reduce over all dimensions. + + The standard deviation (:math:`\sigma`) is calculated as + + .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std(a, dim=1, keepdim=True) + tensor([[1.0311], + [0.7477], + [1.2204], + [0.9087]]) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def std(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + + Calculates the standard deviation over the dimensions specified by :attr:`dim`. + :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to + reduce over all dimensions. + + The standard deviation (:math:`\sigma`) is calculated as + + .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std(a, dim=1, keepdim=True) + tensor([[1.0311], + [0.7477], + [1.2204], + [0.9087]]) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def std(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + + Calculates the standard deviation over the dimensions specified by :attr:`dim`. + :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to + reduce over all dimensions. + + The standard deviation (:math:`\sigma`) is calculated as + + .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std(a, dim=1, keepdim=True) + tensor([[1.0311], + [0.7477], + [1.2204], + [0.9087]]) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def std_mean(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: + r""" + std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + + Calculates the standard deviation and mean over the dimensions specified by + :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or + ``None`` to reduce over all dimensions. + + The standard deviation (:math:`\sigma`) is calculated as + + .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Returns: + A tuple (std, mean) containing the standard deviation and mean. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std_mean(a, dim=0, keepdim=True) + (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def std_mean(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: + r""" + std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + + Calculates the standard deviation and mean over the dimensions specified by + :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or + ``None`` to reduce over all dimensions. + + The standard deviation (:math:`\sigma`) is calculated as + + .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Returns: + A tuple (std, mean) containing the standard deviation and mean. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std_mean(a, dim=0, keepdim=True) + (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def std_mean(input: Tensor, unbiased: _bool = True) -> Tuple[Tensor, Tensor]: + r""" + std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + + Calculates the standard deviation and mean over the dimensions specified by + :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or + ``None`` to reduce over all dimensions. + + The standard deviation (:math:`\sigma`) is calculated as + + .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Returns: + A tuple (std, mean) containing the standard deviation and mean. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std_mean(a, dim=0, keepdim=True) + (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def std_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: + r""" + std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + + Calculates the standard deviation and mean over the dimensions specified by + :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or + ``None`` to reduce over all dimensions. + + The standard deviation (:math:`\sigma`) is calculated as + + .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Returns: + A tuple (std, mean) containing the standard deviation and mean. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std_mean(a, dim=0, keepdim=True) + (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def std_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: + r""" + std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + + Calculates the standard deviation and mean over the dimensions specified by + :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or + ``None`` to reduce over all dimensions. + + The standard deviation (:math:`\sigma`) is calculated as + + .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Returns: + A tuple (std, mean) containing the standard deviation and mean. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std_mean(a, dim=0, keepdim=True) + (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def sub(input: Union[Tensor, Number, _complex], other: Union[Tensor, Number, _complex], *, alpha: Optional[Union[Number, _complex]] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + sub(input, other, *, alpha=1, out=None) -> Tensor + + Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`. + + .. math:: + \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i + + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer, float, and complex inputs. + + Args: + input (Tensor): the input tensor. + other (Tensor or Number): the tensor or number to subtract from :attr:`input`. + + Keyword args: + alpha (Number): the multiplier for :attr:`other`. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor((1, 2)) + >>> b = torch.tensor((0, 1)) + >>> torch.sub(a, b, alpha=2) + tensor([1, 0]) + """ + ... +@overload +def sub(self: Tensor, alpha: Union[Number, _complex], other: Tensor) -> Tensor: + r""" + sub(input, other, *, alpha=1, out=None) -> Tensor + + Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`. + + .. math:: + \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i + + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer, float, and complex inputs. + + Args: + input (Tensor): the input tensor. + other (Tensor or Number): the tensor or number to subtract from :attr:`input`. + + Keyword args: + alpha (Number): the multiplier for :attr:`other`. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor((1, 2)) + >>> b = torch.tensor((0, 1)) + >>> torch.sub(a, b, alpha=2) + tensor([1, 0]) + """ + ... +@overload +def sub(self: Tensor, alpha: Union[Number, _complex], other: Tensor, *, out: Tensor) -> Tensor: + r""" + sub(input, other, *, alpha=1, out=None) -> Tensor + + Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`. + + .. math:: + \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i + + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer, float, and complex inputs. + + Args: + input (Tensor): the input tensor. + other (Tensor or Number): the tensor or number to subtract from :attr:`input`. + + Keyword args: + alpha (Number): the multiplier for :attr:`other`. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor((1, 2)) + >>> b = torch.tensor((0, 1)) + >>> torch.sub(a, b, alpha=2) + tensor([1, 0]) + """ + ... +@overload +def subtract(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + subtract(input, other, *, alpha=1, out=None) -> Tensor + + Alias for :func:`torch.sub`. + """ + ... +@overload +def subtract(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + subtract(input, other, *, alpha=1, out=None) -> Tensor + + Alias for :func:`torch.sub`. + """ + ... +@overload +def sum(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + sum(input, *, dtype=None) -> Tensor + + Returns the sum of all elements in the :attr:`input` tensor. + + Args: + input (Tensor): the input tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.1133, -0.9567, 0.2958]]) + >>> torch.sum(a) + tensor(-0.5475) + + .. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor + :noindex: + + Returns the sum of each row of the :attr:`input` tensor in the given + dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, + reduce over all of them. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.0569, -0.2475, 0.0737, -0.3429], + [-0.2993, 0.9138, 0.9337, -1.6864], + [ 0.1132, 0.7892, -0.1003, 0.5688], + [ 0.3637, -0.9906, -0.4752, -1.5197]]) + >>> torch.sum(a, 1) + tensor([-0.4598, -0.1381, 1.3708, -2.6217]) + >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6) + >>> torch.sum(b, (2, 1)) + tensor([ 435., 1335., 2235., 3135.]) + """ + ... +@overload +def sum(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + sum(input, *, dtype=None) -> Tensor + + Returns the sum of all elements in the :attr:`input` tensor. + + Args: + input (Tensor): the input tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.1133, -0.9567, 0.2958]]) + >>> torch.sum(a) + tensor(-0.5475) + + .. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor + :noindex: + + Returns the sum of each row of the :attr:`input` tensor in the given + dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, + reduce over all of them. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.0569, -0.2475, 0.0737, -0.3429], + [-0.2993, 0.9138, 0.9337, -1.6864], + [ 0.1132, 0.7892, -0.1003, 0.5688], + [ 0.3637, -0.9906, -0.4752, -1.5197]]) + >>> torch.sum(a, 1) + tensor([-0.4598, -0.1381, 1.3708, -2.6217]) + >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6) + >>> torch.sum(b, (2, 1)) + tensor([ 435., 1335., 2235., 3135.]) + """ + ... +@overload +def sum(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + sum(input, *, dtype=None) -> Tensor + + Returns the sum of all elements in the :attr:`input` tensor. + + Args: + input (Tensor): the input tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.1133, -0.9567, 0.2958]]) + >>> torch.sum(a) + tensor(-0.5475) + + .. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor + :noindex: + + Returns the sum of each row of the :attr:`input` tensor in the given + dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, + reduce over all of them. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.0569, -0.2475, 0.0737, -0.3429], + [-0.2993, 0.9138, 0.9337, -1.6864], + [ 0.1132, 0.7892, -0.1003, 0.5688], + [ 0.3637, -0.9906, -0.4752, -1.5197]]) + >>> torch.sum(a, 1) + tensor([-0.4598, -0.1381, 1.3708, -2.6217]) + >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6) + >>> torch.sum(b, (2, 1)) + tensor([ 435., 1335., 2235., 3135.]) + """ + ... +def svd(input: Tensor, some: _bool = True, compute_uv: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.svd: + r""" + svd(input, some=True, compute_uv=True, *, out=None) -> (Tensor, Tensor, Tensor) + + Computes the singular value decomposition of either a matrix or batch of + matrices :attr:`input`. The singular value decomposition is represented as a + namedtuple `(U, S, V)`, such that :attr:`input` :math:`= U \text{diag}(S) V^{\text{H}}`. + where :math:`V^{\text{H}}` is the transpose of `V` for real inputs, + and the conjugate transpose of `V` for complex inputs. + If :attr:`input` is a batch of matrices, then `U`, `S`, and `V` are also + batched with the same batch dimensions as :attr:`input`. + + If :attr:`some` is `True` (default), the method returns the reduced singular + value decomposition. In this case, if the last two dimensions of :attr:`input` are + `m` and `n`, then the returned `U` and `V` matrices will contain only + `min(n, m)` orthonormal columns. + + If :attr:`compute_uv` is `False`, the returned `U` and `V` will be + zero-filled matrices of shape `(m, m)` and `(n, n)` + respectively, and the same device as :attr:`input`. The argument :attr:`some` + has no effect when :attr:`compute_uv` is `False`. + + Supports :attr:`input` of float, double, cfloat and cdouble data types. + The dtypes of `U` and `V` are the same as :attr:`input`'s. `S` will + always be real-valued, even if :attr:`input` is complex. + + .. warning:: + + :func:`torch.svd` is deprecated in favor of :func:`torch.linalg.svd` + and will be removed in a future PyTorch release. + + ``U, S, V = torch.svd(A, some=some, compute_uv=True)`` (default) should be replaced with + + .. code:: python + + U, S, Vh = torch.linalg.svd(A, full_matrices=not some) + V = Vh.mH + + ``_, S, _ = torch.svd(A, some=some, compute_uv=False)`` should be replaced with + + .. code:: python + + S = torch.linalg.svdvals(A) + + .. note:: Differences with :func:`torch.linalg.svd`: + + * :attr:`some` is the opposite of + :func:`torch.linalg.svd`'s :attr:`full_matrices`. Note that + default value for both is `True`, so the default behavior is + effectively the opposite. + * :func:`torch.svd` returns `V`, whereas :func:`torch.linalg.svd` returns + `Vh`, that is, :math:`V^{\text{H}}`. + * If :attr:`compute_uv` is `False`, :func:`torch.svd` returns zero-filled + tensors for `U` and `Vh`, whereas :func:`torch.linalg.svd` returns + empty tensors. + + .. note:: The singular values are returned in descending order. If :attr:`input` is a batch of matrices, + then the singular values of each matrix in the batch are returned in descending order. + + .. note:: The `S` tensor can only be used to compute gradients if :attr:`compute_uv` is `True`. + + .. note:: When :attr:`some` is `False`, the gradients on `U[..., :, min(m, n):]` + and `V[..., :, min(m, n):]` will be ignored in the backward pass, as those vectors + can be arbitrary bases of the corresponding subspaces. + + .. note:: The implementation of :func:`torch.linalg.svd` on CPU uses LAPACK's routine `?gesdd` + (a divide-and-conquer algorithm) instead of `?gesvd` for speed. Analogously, + on GPU, it uses cuSOLVER's routines `gesvdj` and `gesvdjBatched` on CUDA 10.1.243 + and later, and MAGMA's routine `gesdd` on earlier versions of CUDA. + + .. note:: The returned `U` will not be contiguous. The matrix (or batch of matrices) will + be represented as a column-major matrix (i.e. Fortran-contiguous). + + .. warning:: The gradients with respect to `U` and `V` will only be finite when the input does not + have zero nor repeated singular values. + + .. warning:: If the distance between any two singular values is close to zero, the gradients with respect to + `U` and `V` will be numerically unstable, as they depends on + :math:`\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}`. The same happens when the matrix + has small singular values, as these gradients also depend on `S^{-1}`. + + .. warning:: For complex-valued :attr:`input` the singular value decomposition is not unique, + as `U` and `V` may be multiplied by an arbitrary phase factor :math:`e^{i \phi}` on every column. + The same happens when :attr:`input` has repeated singular values, where one may multiply + the columns of the spanning subspace in `U` and `V` by a rotation matrix + and `the resulting vectors will span the same subspace`_. + Different platforms, like NumPy, or inputs on different device types, + may produce different `U` and `V` tensors. + + Args: + input (Tensor): the input tensor of size `(*, m, n)` where `*` is zero or more + batch dimensions consisting of `(m, n)` matrices. + some (bool, optional): controls whether to compute the reduced or full decomposition, and + consequently, the shape of returned `U` and `V`. Default: `True`. + compute_uv (bool, optional): controls whether to compute `U` and `V`. Default: `True`. + + Keyword args: + out (tuple, optional): the output tuple of tensors + + Example:: + + >>> a = torch.randn(5, 3) + >>> a + tensor([[ 0.2364, -0.7752, 0.6372], + [ 1.7201, 0.7394, -0.0504], + [-0.3371, -1.0584, 0.5296], + [ 0.3550, -0.4022, 1.5569], + [ 0.2445, -0.0158, 1.1414]]) + >>> u, s, v = torch.svd(a) + >>> u + tensor([[ 0.4027, 0.0287, 0.5434], + [-0.1946, 0.8833, 0.3679], + [ 0.4296, -0.2890, 0.5261], + [ 0.6604, 0.2717, -0.2618], + [ 0.4234, 0.2481, -0.4733]]) + >>> s + tensor([2.3289, 2.0315, 0.7806]) + >>> v + tensor([[-0.0199, 0.8766, 0.4809], + [-0.5080, 0.4054, -0.7600], + [ 0.8611, 0.2594, -0.4373]]) + >>> torch.dist(a, torch.mm(torch.mm(u, torch.diag(s)), v.t())) + tensor(8.6531e-07) + >>> a_big = torch.randn(7, 5, 3) + >>> u, s, v = torch.svd(a_big) + >>> torch.dist(a_big, torch.matmul(torch.matmul(u, torch.diag_embed(s)), v.mT)) + tensor(2.6503e-06) + + .. _the resulting vectors will span the same subspace: + (https://en.wikipedia.org/wiki/Singular_value_decomposition#Singular_values,_singular_vectors,_and_their_relation_to_the_SVD) + """ + ... +def swapaxes(input: Tensor, axis0: _int, axis1: _int) -> Tensor: + r""" + swapaxes(input, axis0, axis1) -> Tensor + + Alias for :func:`torch.transpose`. + + This function is equivalent to NumPy's swapaxes function. + + Examples:: + + >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]]) + >>> x + tensor([[[0, 1], + [2, 3]], + + [[4, 5], + [6, 7]]]) + >>> torch.swapaxes(x, 0, 1) + tensor([[[0, 1], + [4, 5]], + + [[2, 3], + [6, 7]]]) + >>> torch.swapaxes(x, 0, 2) + tensor([[[0, 4], + [2, 6]], + + [[1, 5], + [3, 7]]]) + """ + ... +def swapdims(input: Tensor, dim0: _int, dim1: _int) -> Tensor: + r""" + swapdims(input, dim0, dim1) -> Tensor + + Alias for :func:`torch.transpose`. + + This function is equivalent to NumPy's swapaxes function. + + Examples:: + + >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]]) + >>> x + tensor([[[0, 1], + [2, 3]], + + [[4, 5], + [6, 7]]]) + >>> torch.swapdims(x, 0, 1) + tensor([[[0, 1], + [4, 5]], + + [[2, 3], + [6, 7]]]) + >>> torch.swapdims(x, 0, 2) + tensor([[[0, 4], + [2, 6]], + + [[1, 5], + [3, 7]]]) + """ + ... +def sym_constrain_range(size: Union[Number, _complex], *, min: Optional[_int] = None, max: Optional[_int] = None) -> None: ... +def sym_constrain_range_for_size(size: Union[Number, _complex], *, min: Optional[_int] = None, max: Optional[_int] = None) -> None: ... +def t(input: Tensor) -> Tensor: + r""" + t(input) -> Tensor + + Expects :attr:`input` to be <= 2-D tensor and transposes dimensions 0 + and 1. + + 0-D and 1-D tensors are returned as is. When input is a 2-D tensor this + is equivalent to ``transpose(input, 0, 1)``. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> x = torch.randn(()) + >>> x + tensor(0.1995) + >>> torch.t(x) + tensor(0.1995) + >>> x = torch.randn(3) + >>> x + tensor([ 2.4320, -0.4608, 0.7702]) + >>> torch.t(x) + tensor([ 2.4320, -0.4608, 0.7702]) + >>> x = torch.randn(2, 3) + >>> x + tensor([[ 0.4875, 0.9158, -0.5872], + [ 0.3938, -0.6929, 0.6932]]) + >>> torch.t(x) + tensor([[ 0.4875, 0.3938], + [ 0.9158, -0.6929], + [-0.5872, 0.6932]]) + + See also :func:`torch.transpose`. + """ + ... +def t_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.t`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def take(input: Tensor, index: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + take(input, index) -> Tensor + + Returns a new tensor with the elements of :attr:`input` at the given indices. + The input tensor is treated as if it were viewed as a 1-D tensor. The result + takes the same shape as the indices. + + Args: + input (Tensor): the input tensor. + index (LongTensor): the indices into tensor + + Example:: + + >>> src = torch.tensor([[4, 3, 5], + ... [6, 7, 8]]) + >>> torch.take(src, torch.tensor([0, 2, 5])) + tensor([ 4, 5, 8]) + """ + ... +def take_along_dim(input: Tensor, indices: Tensor, dim: Optional[_int] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + take_along_dim(input, indices, dim=None, *, out=None) -> Tensor + + Selects values from :attr:`input` at the 1-dimensional indices from :attr:`indices` along the given :attr:`dim`. + + If :attr:`dim` is None, the input array is treated as if it has been flattened to 1d. + + Functions that return indices along a dimension, like :func:`torch.argmax` and :func:`torch.argsort`, + are designed to work with this function. See the examples below. + + .. note:: + This function is similar to NumPy's `take_along_axis`. + See also :func:`torch.gather`. + + Args: + input (Tensor): the input tensor. + indices (tensor): the indices into :attr:`input`. Must have long dtype. + dim (int, optional): dimension to select along. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.tensor([[10, 30, 20], [60, 40, 50]]) + >>> max_idx = torch.argmax(t) + >>> torch.take_along_dim(t, max_idx) + tensor([60]) + >>> sorted_idx = torch.argsort(t, dim=1) + >>> torch.take_along_dim(t, sorted_idx, dim=1) + tensor([[10, 20, 30], + [40, 50, 60]]) + """ + ... +def tan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + tan(input, *, out=None) -> Tensor + + Returns a new tensor with the tangent of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \tan(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-1.2027, -1.7687, 0.4412, -1.3856]) + >>> torch.tan(a) + tensor([-2.5930, 4.9859, 0.4722, -5.3366]) + """ + ... +def tan_(input: Tensor) -> Tensor: ... +def tanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + tanh(input, *, out=None) -> Tensor + + Returns a new tensor with the hyperbolic tangent of the elements + of :attr:`input`. + + .. math:: + \text{out}_{i} = \tanh(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.8986, -0.7279, 1.1745, 0.2611]) + >>> torch.tanh(a) + tensor([ 0.7156, -0.6218, 0.8257, 0.2553]) + """ + ... +def tanh_(input: Tensor) -> Tensor: ... +def tensor(data: Any, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + tensor(data, *, dtype=None, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Constructs a tensor with no autograd history (also known as a "leaf tensor", see :doc:`/notes/autograd`) by copying :attr:`data`. + + .. warning:: + + When working with tensors prefer using :func:`torch.Tensor.clone`, + :func:`torch.Tensor.detach`, and :func:`torch.Tensor.requires_grad_` for + readability. Letting `t` be a tensor, ``torch.tensor(t)`` is equivalent to + ``t.clone().detach()``, and ``torch.tensor(t, requires_grad=True)`` + is equivalent to ``t.clone().detach().requires_grad_(True)``. + + .. seealso:: + + :func:`torch.as_tensor` preserves autograd history and avoids copies where possible. + :func:`torch.from_numpy` creates a tensor that shares storage with a NumPy array. + + Args: + data (array_like): Initial data for the tensor. Can be a list, tuple, + NumPy ``ndarray``, scalar, and other types. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, infers data type from :attr:`data`. + device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor + then the device of data is used. If None and data is not a tensor then + the result tensor is constructed on the current device. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + + Example:: + + >>> torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]]) + tensor([[ 0.1000, 1.2000], + [ 2.2000, 3.1000], + [ 4.9000, 5.2000]]) + + >>> torch.tensor([0, 1]) # Type inference on data + tensor([ 0, 1]) + + >>> torch.tensor([[0.11111, 0.222222, 0.3333333]], + ... dtype=torch.float64, + ... device=torch.device('cuda:0')) # creates a double tensor on a CUDA device + tensor([[ 0.1111, 0.2222, 0.3333]], dtype=torch.float64, device='cuda:0') + + >>> torch.tensor(3.14159) # Create a zero-dimensional (scalar) tensor + tensor(3.1416) + + >>> torch.tensor([]) # Create an empty tensor (of size (0,)) + tensor([]) + """ + ... +@overload +def tensor_split(input: Tensor, tensor_indices_or_sections: Tensor, dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + tensor_split(input, indices_or_sections, dim=0) -> List of Tensors + + Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`, + along dimension :attr:`dim` according to the indices or number of sections specified + by :attr:`indices_or_sections`. This function is based on NumPy's + :func:`numpy.array_split`. + + Args: + input (Tensor): the tensor to split + indices_or_sections (Tensor, int or list or tuple of ints): + If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor + with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`. + If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each + section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input` + is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)` + sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will + have size :code:`int(input.size(dim) / n)`. + + If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long + tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices + in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0` + would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`. + + If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional + long tensor on the CPU. + + dim (int, optional): dimension along which to split the tensor. Default: ``0`` + + Example:: + + >>> x = torch.arange(8) + >>> torch.tensor_split(x, 3) + (tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7])) + + >>> x = torch.arange(7) + >>> torch.tensor_split(x, 3) + (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6])) + >>> torch.tensor_split(x, (1, 6)) + (tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6])) + + >>> x = torch.arange(14).reshape(2, 7) + >>> x + tensor([[ 0, 1, 2, 3, 4, 5, 6], + [ 7, 8, 9, 10, 11, 12, 13]]) + >>> torch.tensor_split(x, 3, dim=1) + (tensor([[0, 1, 2], + [7, 8, 9]]), + tensor([[ 3, 4], + [10, 11]]), + tensor([[ 5, 6], + [12, 13]])) + >>> torch.tensor_split(x, (1, 6), dim=1) + (tensor([[0], + [7]]), + tensor([[ 1, 2, 3, 4, 5], + [ 8, 9, 10, 11, 12]]), + tensor([[ 6], + [13]])) + """ + ... +@overload +def tensor_split(input: Tensor, sections: Union[_int, SymInt], dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + tensor_split(input, indices_or_sections, dim=0) -> List of Tensors + + Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`, + along dimension :attr:`dim` according to the indices or number of sections specified + by :attr:`indices_or_sections`. This function is based on NumPy's + :func:`numpy.array_split`. + + Args: + input (Tensor): the tensor to split + indices_or_sections (Tensor, int or list or tuple of ints): + If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor + with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`. + If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each + section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input` + is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)` + sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will + have size :code:`int(input.size(dim) / n)`. + + If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long + tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices + in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0` + would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`. + + If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional + long tensor on the CPU. + + dim (int, optional): dimension along which to split the tensor. Default: ``0`` + + Example:: + + >>> x = torch.arange(8) + >>> torch.tensor_split(x, 3) + (tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7])) + + >>> x = torch.arange(7) + >>> torch.tensor_split(x, 3) + (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6])) + >>> torch.tensor_split(x, (1, 6)) + (tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6])) + + >>> x = torch.arange(14).reshape(2, 7) + >>> x + tensor([[ 0, 1, 2, 3, 4, 5, 6], + [ 7, 8, 9, 10, 11, 12, 13]]) + >>> torch.tensor_split(x, 3, dim=1) + (tensor([[0, 1, 2], + [7, 8, 9]]), + tensor([[ 3, 4], + [10, 11]]), + tensor([[ 5, 6], + [12, 13]])) + >>> torch.tensor_split(x, (1, 6), dim=1) + (tensor([[0], + [7]]), + tensor([[ 1, 2, 3, 4, 5], + [ 8, 9, 10, 11, 12]]), + tensor([[ 6], + [13]])) + """ + ... +@overload +def tensor_split(input: Tensor, indices: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + tensor_split(input, indices_or_sections, dim=0) -> List of Tensors + + Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`, + along dimension :attr:`dim` according to the indices or number of sections specified + by :attr:`indices_or_sections`. This function is based on NumPy's + :func:`numpy.array_split`. + + Args: + input (Tensor): the tensor to split + indices_or_sections (Tensor, int or list or tuple of ints): + If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor + with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`. + If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each + section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input` + is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)` + sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will + have size :code:`int(input.size(dim) / n)`. + + If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long + tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices + in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0` + would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`. + + If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional + long tensor on the CPU. + + dim (int, optional): dimension along which to split the tensor. Default: ``0`` + + Example:: + + >>> x = torch.arange(8) + >>> torch.tensor_split(x, 3) + (tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7])) + + >>> x = torch.arange(7) + >>> torch.tensor_split(x, 3) + (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6])) + >>> torch.tensor_split(x, (1, 6)) + (tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6])) + + >>> x = torch.arange(14).reshape(2, 7) + >>> x + tensor([[ 0, 1, 2, 3, 4, 5, 6], + [ 7, 8, 9, 10, 11, 12, 13]]) + >>> torch.tensor_split(x, 3, dim=1) + (tensor([[0, 1, 2], + [7, 8, 9]]), + tensor([[ 3, 4], + [10, 11]]), + tensor([[ 5, 6], + [12, 13]])) + >>> torch.tensor_split(x, (1, 6), dim=1) + (tensor([[0], + [7]]), + tensor([[ 1, 2, 3, 4, 5], + [ 8, 9, 10, 11, 12]]), + tensor([[ 6], + [13]])) + """ + ... +def threshold(input: Tensor, threshold: Union[Number, _complex], value: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +def threshold_(input: Tensor, threshold: Union[Number, _complex], value: Union[Number, _complex]) -> Tensor: ... +def tile(input: Tensor, dims: Sequence[Union[_int, SymInt]]) -> Tensor: + r""" + tile(input, dims) -> Tensor + + Constructs a tensor by repeating the elements of :attr:`input`. + The :attr:`dims` argument specifies the number of repetitions + in each dimension. + + If :attr:`dims` specifies fewer dimensions than :attr:`input` has, then + ones are prepended to :attr:`dims` until all dimensions are specified. + For example, if :attr:`input` has shape (8, 6, 4, 2) and :attr:`dims` + is (2, 2), then :attr:`dims` is treated as (1, 1, 2, 2). + + Analogously, if :attr:`input` has fewer dimensions than :attr:`dims` + specifies, then :attr:`input` is treated as if it were unsqueezed at + dimension zero until it has as many dimensions as :attr:`dims` specifies. + For example, if :attr:`input` has shape (4, 2) and :attr:`dims` + is (3, 3, 2, 2), then :attr:`input` is treated as if it had the + shape (1, 1, 4, 2). + + .. note:: + + This function is similar to NumPy's tile function. + + Args: + input (Tensor): the tensor whose elements to repeat. + dims (tuple): the number of repetitions per dimension. + + Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> x.tile((2,)) + tensor([1, 2, 3, 1, 2, 3]) + >>> y = torch.tensor([[1, 2], [3, 4]]) + >>> torch.tile(y, (2, 2)) + tensor([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + """ + ... +def topk(input: Tensor, k: Union[_int, SymInt], dim: _int = -1, largest: _bool = True, sorted: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.topk: + r""" + topk(input, k, dim=None, largest=True, sorted=True, *, out=None) -> (Tensor, LongTensor) + + Returns the :attr:`k` largest elements of the given :attr:`input` tensor along + a given dimension. + + If :attr:`dim` is not given, the last dimension of the `input` is chosen. + + If :attr:`largest` is ``False`` then the `k` smallest elements are returned. + + A namedtuple of `(values, indices)` is returned with the `values` and + `indices` of the largest `k` elements of each row of the `input` tensor in the + given dimension `dim`. + + The boolean option :attr:`sorted` if ``True``, will make sure that the returned + `k` elements are themselves sorted + + Args: + input (Tensor): the input tensor. + k (int): the k in "top-k" + dim (int, optional): the dimension to sort along + largest (bool, optional): controls whether to return largest or + smallest elements + sorted (bool, optional): controls whether to return the elements + in sorted order + + Keyword args: + out (tuple, optional): the output tuple of (Tensor, LongTensor) that can be + optionally given to be used as output buffers + + Example:: + + >>> x = torch.arange(1., 6.) + >>> x + tensor([ 1., 2., 3., 4., 5.]) + >>> torch.topk(x, 3) + torch.return_types.topk(values=tensor([5., 4., 3.]), indices=tensor([4, 3, 2])) + """ + ... +def trace(input: Tensor) -> Tensor: + r""" + trace(input) -> Tensor + + Returns the sum of the elements of the diagonal of the input 2-D matrix. + + Example:: + + >>> x = torch.arange(1., 10.).view(3, 3) + >>> x + tensor([[ 1., 2., 3.], + [ 4., 5., 6.], + [ 7., 8., 9.]]) + >>> torch.trace(x) + tensor(15.) + """ + ... +@overload +def transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor: + r""" + transpose(input, dim0, dim1) -> Tensor + + Returns a tensor that is a transposed version of :attr:`input`. + The given dimensions :attr:`dim0` and :attr:`dim1` are swapped. + + If :attr:`input` is a strided tensor then the resulting :attr:`out` + tensor shares its underlying storage with the :attr:`input` tensor, so + changing the content of one would change the content of the other. + + If :attr:`input` is a :ref:`sparse tensor ` then the + resulting :attr:`out` tensor *does not* share the underlying storage + with the :attr:`input` tensor. + + If :attr:`input` is a :ref:`sparse tensor ` with compressed + layout (SparseCSR, SparseBSR, SparseCSC or SparseBSC) the arguments + :attr:`dim0` and :attr:`dim1` must be both batch dimensions, or must + both be sparse dimensions. The batch dimensions of a sparse tensor are the + dimensions preceding the sparse dimensions. + + .. note:: + Transpositions which interchange the sparse dimensions of a `SparseCSR` + or `SparseCSC` layout tensor will result in the layout changing between + the two options. Transposition of the sparse dimensions of a ` SparseBSR` + or `SparseBSC` layout tensor will likewise generate a result with the + opposite layout. + + + Args: + input (Tensor): the input tensor. + dim0 (int): the first dimension to be transposed + dim1 (int): the second dimension to be transposed + + Example:: + + >>> x = torch.randn(2, 3) + >>> x + tensor([[ 1.0028, -0.9893, 0.5809], + [-0.1669, 0.7299, 0.4942]]) + >>> torch.transpose(x, 0, 1) + tensor([[ 1.0028, -0.1669], + [-0.9893, 0.7299], + [ 0.5809, 0.4942]]) + + See also :func:`torch.t`. + """ + ... +@overload +def transpose(input: Tensor, dim0: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None]) -> Tensor: + r""" + transpose(input, dim0, dim1) -> Tensor + + Returns a tensor that is a transposed version of :attr:`input`. + The given dimensions :attr:`dim0` and :attr:`dim1` are swapped. + + If :attr:`input` is a strided tensor then the resulting :attr:`out` + tensor shares its underlying storage with the :attr:`input` tensor, so + changing the content of one would change the content of the other. + + If :attr:`input` is a :ref:`sparse tensor ` then the + resulting :attr:`out` tensor *does not* share the underlying storage + with the :attr:`input` tensor. + + If :attr:`input` is a :ref:`sparse tensor ` with compressed + layout (SparseCSR, SparseBSR, SparseCSC or SparseBSC) the arguments + :attr:`dim0` and :attr:`dim1` must be both batch dimensions, or must + both be sparse dimensions. The batch dimensions of a sparse tensor are the + dimensions preceding the sparse dimensions. + + .. note:: + Transpositions which interchange the sparse dimensions of a `SparseCSR` + or `SparseCSC` layout tensor will result in the layout changing between + the two options. Transposition of the sparse dimensions of a ` SparseBSR` + or `SparseBSC` layout tensor will likewise generate a result with the + opposite layout. + + + Args: + input (Tensor): the input tensor. + dim0 (int): the first dimension to be transposed + dim1 (int): the second dimension to be transposed + + Example:: + + >>> x = torch.randn(2, 3) + >>> x + tensor([[ 1.0028, -0.9893, 0.5809], + [-0.1669, 0.7299, 0.4942]]) + >>> torch.transpose(x, 0, 1) + tensor([[ 1.0028, -0.1669], + [-0.9893, 0.7299], + [ 0.5809, 0.4942]]) + + See also :func:`torch.t`. + """ + ... +def transpose_copy(input: Tensor, dim0: _int, dim1: _int, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.transpose`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +@overload +def trapezoid(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor: + r""" + trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor + + Computes the `trapezoidal rule `_ along + :attr:`dim`. By default the spacing between elements is assumed to be 1, but + :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be + used to specify arbitrary spacing along :attr:`dim`. + + + Assuming :attr:`y` is a one-dimensional tensor with elements :math:`{y_0, y_1, ..., y_n}`, + the default computation is + + .. math:: + \begin{aligned} + \sum_{i = 1}^{n-1} \frac{1}{2} (y_i + y_{i-1}) + \end{aligned} + + When :attr:`dx` is specified the computation becomes + + .. math:: + \begin{aligned} + \sum_{i = 1}^{n-1} \frac{\Delta x}{2} (y_i + y_{i-1}) + \end{aligned} + + effectively multiplying the result by :attr:`dx`. When :attr:`x` is specified, + assuming :attr:`x` is also a one-dimensional tensor with + elements :math:`{x_0, x_1, ..., x_n}`, the computation becomes + + .. math:: + \begin{aligned} + \sum_{i = 1}^{n-1} \frac{(x_i - x_{i-1})}{2} (y_i + y_{i-1}) + \end{aligned} + + When :attr:`x` and :attr:`y` have the same size, the computation is as described above and no broadcasting is needed. + The broadcasting behavior of this function is as follows when their sizes are different. For both :attr:`x` + and :attr:`y`, the function computes the difference between consecutive elements along + dimension :attr:`dim`. This effectively creates two tensors, `x_diff` and `y_diff`, that have + the same shape as the original tensors except their lengths along the dimension :attr:`dim` is reduced by 1. + After that, those two tensors are broadcast together to compute final output as part of the trapezoidal rule. + See the examples below for details. + + .. note:: + The trapezoidal rule is a technique for approximating the definite integral of a function + by averaging its left and right Riemann sums. The approximation becomes more accurate as + the resolution of the partition increases. + + Arguments: + y (Tensor): Values to use when computing the trapezoidal rule. + x (Tensor): If specified, defines spacing between values as specified above. + + Keyword arguments: + dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx` + are specified then this defaults to 1. Effectively multiplies the result by its value. + dim (int): The dimension along which to compute the trapezoidal rule. + The last (inner-most) dimension by default. + + Examples:: + + >>> # Computes the trapezoidal rule in 1D, spacing is implicitly 1 + >>> y = torch.tensor([1, 5, 10]) + >>> torch.trapezoid(y) + tensor(10.5) + + >>> # Computes the same trapezoidal rule directly to verify + >>> (1 + 10 + 10) / 2 + 10.5 + + >>> # Computes the trapezoidal rule in 1D with constant spacing of 2 + >>> # NOTE: the result is the same as before, but multiplied by 2 + >>> torch.trapezoid(y, dx=2) + 21.0 + + >>> # Computes the trapezoidal rule in 1D with arbitrary spacing + >>> x = torch.tensor([1, 3, 6]) + >>> torch.trapezoid(y, x) + 28.5 + + >>> # Computes the same trapezoidal rule directly to verify + >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2 + 28.5 + + >>> # Computes the trapezoidal rule for each row of a 3x3 matrix + >>> y = torch.arange(9).reshape(3, 3) + tensor([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> torch.trapezoid(y) + tensor([ 2., 8., 14.]) + + >>> # Computes the trapezoidal rule for each column of the matrix + >>> torch.trapezoid(y, dim=0) + tensor([ 6., 8., 10.]) + + >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with the same arbitrary spacing + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([1, 3, 6]) + >>> torch.trapezoid(y, x) + array([5., 5., 5.]) + + >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with different arbitrary spacing per row + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]]) + >>> torch.trapezoid(y, x) + array([2., 4., 6.]) + """ + ... +@overload +def trapezoid(y: Tensor, *, dx: Union[Number, _complex] = 1, dim: _int = -1) -> Tensor: + r""" + trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor + + Computes the `trapezoidal rule `_ along + :attr:`dim`. By default the spacing between elements is assumed to be 1, but + :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be + used to specify arbitrary spacing along :attr:`dim`. + + + Assuming :attr:`y` is a one-dimensional tensor with elements :math:`{y_0, y_1, ..., y_n}`, + the default computation is + + .. math:: + \begin{aligned} + \sum_{i = 1}^{n-1} \frac{1}{2} (y_i + y_{i-1}) + \end{aligned} + + When :attr:`dx` is specified the computation becomes + + .. math:: + \begin{aligned} + \sum_{i = 1}^{n-1} \frac{\Delta x}{2} (y_i + y_{i-1}) + \end{aligned} + + effectively multiplying the result by :attr:`dx`. When :attr:`x` is specified, + assuming :attr:`x` is also a one-dimensional tensor with + elements :math:`{x_0, x_1, ..., x_n}`, the computation becomes + + .. math:: + \begin{aligned} + \sum_{i = 1}^{n-1} \frac{(x_i - x_{i-1})}{2} (y_i + y_{i-1}) + \end{aligned} + + When :attr:`x` and :attr:`y` have the same size, the computation is as described above and no broadcasting is needed. + The broadcasting behavior of this function is as follows when their sizes are different. For both :attr:`x` + and :attr:`y`, the function computes the difference between consecutive elements along + dimension :attr:`dim`. This effectively creates two tensors, `x_diff` and `y_diff`, that have + the same shape as the original tensors except their lengths along the dimension :attr:`dim` is reduced by 1. + After that, those two tensors are broadcast together to compute final output as part of the trapezoidal rule. + See the examples below for details. + + .. note:: + The trapezoidal rule is a technique for approximating the definite integral of a function + by averaging its left and right Riemann sums. The approximation becomes more accurate as + the resolution of the partition increases. + + Arguments: + y (Tensor): Values to use when computing the trapezoidal rule. + x (Tensor): If specified, defines spacing between values as specified above. + + Keyword arguments: + dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx` + are specified then this defaults to 1. Effectively multiplies the result by its value. + dim (int): The dimension along which to compute the trapezoidal rule. + The last (inner-most) dimension by default. + + Examples:: + + >>> # Computes the trapezoidal rule in 1D, spacing is implicitly 1 + >>> y = torch.tensor([1, 5, 10]) + >>> torch.trapezoid(y) + tensor(10.5) + + >>> # Computes the same trapezoidal rule directly to verify + >>> (1 + 10 + 10) / 2 + 10.5 + + >>> # Computes the trapezoidal rule in 1D with constant spacing of 2 + >>> # NOTE: the result is the same as before, but multiplied by 2 + >>> torch.trapezoid(y, dx=2) + 21.0 + + >>> # Computes the trapezoidal rule in 1D with arbitrary spacing + >>> x = torch.tensor([1, 3, 6]) + >>> torch.trapezoid(y, x) + 28.5 + + >>> # Computes the same trapezoidal rule directly to verify + >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2 + 28.5 + + >>> # Computes the trapezoidal rule for each row of a 3x3 matrix + >>> y = torch.arange(9).reshape(3, 3) + tensor([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> torch.trapezoid(y) + tensor([ 2., 8., 14.]) + + >>> # Computes the trapezoidal rule for each column of the matrix + >>> torch.trapezoid(y, dim=0) + tensor([ 6., 8., 10.]) + + >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with the same arbitrary spacing + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([1, 3, 6]) + >>> torch.trapezoid(y, x) + array([5., 5., 5.]) + + >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with different arbitrary spacing per row + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]]) + >>> torch.trapezoid(y, x) + array([2., 4., 6.]) + """ + ... +@overload +def trapz(y: Tensor, *, dx: _float = 1, dim: _int = -1) -> Tensor: + r""" + trapz(y, x, *, dim=-1) -> Tensor + + Alias for :func:`torch.trapezoid`. + """ + ... +@overload +def trapz(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor: + r""" + trapz(y, x, *, dim=-1) -> Tensor + + Alias for :func:`torch.trapezoid`. + """ + ... +def triangular_solve(input: Tensor, A: Tensor, upper: _bool = True, transpose: _bool = False, unitriangular: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.triangular_solve: + r""" + triangular_solve(b, A, upper=True, transpose=False, unitriangular=False, *, out=None) -> (Tensor, Tensor) + + Solves a system of equations with a square upper or lower triangular invertible matrix :math:`A` + and multiple right-hand sides :math:`b`. + + In symbols, it solves :math:`AX = b` and assumes :math:`A` is square upper-triangular + (or lower-triangular if :attr:`upper`\ `= False`) and does not have zeros on the diagonal. + + `torch.triangular_solve(b, A)` can take in 2D inputs `b, A` or inputs that are + batches of 2D matrices. If the inputs are batches, then returns + batched outputs `X` + + If the diagonal of :attr:`A` contains zeros or elements that are very close to zero and + :attr:`unitriangular`\ `= False` (default) or if the input matrix is badly conditioned, + the result may contain `NaN` s. + + Supports input of float, double, cfloat and cdouble data types. + + .. warning:: + + :func:`torch.triangular_solve` is deprecated in favor of :func:`torch.linalg.solve_triangular` + and will be removed in a future PyTorch release. + :func:`torch.linalg.solve_triangular` has its arguments reversed and does not return a + copy of one of the inputs. + + ``X = torch.triangular_solve(B, A).solution`` should be replaced with + + .. code:: python + + X = torch.linalg.solve_triangular(A, B) + + Args: + b (Tensor): multiple right-hand sides of size :math:`(*, m, k)` where + :math:`*` is zero of more batch dimensions + A (Tensor): the input triangular coefficient matrix of size :math:`(*, m, m)` + where :math:`*` is zero or more batch dimensions + upper (bool, optional): whether :math:`A` is upper or lower triangular. Default: ``True``. + transpose (bool, optional): solves `op(A)X = b` where `op(A) = A^T` if this flag is ``True``, + and `op(A) = A` if it is ``False``. Default: ``False``. + unitriangular (bool, optional): whether :math:`A` is unit triangular. + If True, the diagonal elements of :math:`A` are assumed to be + 1 and not referenced from :math:`A`. Default: ``False``. + + Keyword args: + out ((Tensor, Tensor), optional): tuple of two tensors to write + the output to. Ignored if `None`. Default: `None`. + + Returns: + A namedtuple `(solution, cloned_coefficient)` where `cloned_coefficient` + is a clone of :math:`A` and `solution` is the solution :math:`X` to :math:`AX = b` + (or whatever variant of the system of equations, depending on the keyword arguments.) + + Examples:: + + >>> A = torch.randn(2, 2).triu() + >>> A + tensor([[ 1.1527, -1.0753], + [ 0.0000, 0.7986]]) + >>> b = torch.randn(2, 3) + >>> b + tensor([[-0.0210, 2.3513, -1.5492], + [ 1.5429, 0.7403, -1.0243]]) + >>> torch.triangular_solve(b, A) + torch.return_types.triangular_solve( + solution=tensor([[ 1.7841, 2.9046, -2.5405], + [ 1.9320, 0.9270, -1.2826]]), + cloned_coefficient=tensor([[ 1.1527, -1.0753], + [ 0.0000, 0.7986]])) + """ + ... +def tril(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: + r""" + tril(input, diagonal=0, *, out=None) -> Tensor + + Returns the lower triangular part of the matrix (2-D tensor) or batch of matrices + :attr:`input`, the other elements of the result tensor :attr:`out` are set to 0. + + The lower triangular part of the matrix is defined as the elements on and + below the diagonal. + + The argument :attr:`diagonal` controls which diagonal to consider. If + :attr:`diagonal` = 0, all elements on and below the main diagonal are + retained. A positive value includes just as many diagonals above the main + diagonal, and similarly a negative value excludes just as many diagonals below + the main diagonal. The main diagonal are the set of indices + :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where + :math:`d_{1}, d_{2}` are the dimensions of the matrix. + + Args: + input (Tensor): the input tensor. + diagonal (int, optional): the diagonal to consider + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(3, 3) + >>> a + tensor([[-1.0813, -0.8619, 0.7105], + [ 0.0935, 0.1380, 2.2112], + [-0.3409, -0.9828, 0.0289]]) + >>> torch.tril(a) + tensor([[-1.0813, 0.0000, 0.0000], + [ 0.0935, 0.1380, 0.0000], + [-0.3409, -0.9828, 0.0289]]) + + >>> b = torch.randn(4, 6) + >>> b + tensor([[ 1.2219, 0.5653, -0.2521, -0.2345, 1.2544, 0.3461], + [ 0.4785, -0.4477, 0.6049, 0.6368, 0.8775, 0.7145], + [ 1.1502, 3.2716, -1.1243, -0.5413, 0.3615, 0.6864], + [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0978]]) + >>> torch.tril(b, diagonal=1) + tensor([[ 1.2219, 0.5653, 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.4785, -0.4477, 0.6049, 0.0000, 0.0000, 0.0000], + [ 1.1502, 3.2716, -1.1243, -0.5413, 0.0000, 0.0000], + [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0000]]) + >>> torch.tril(b, diagonal=-1) + tensor([[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.4785, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], + [ 1.1502, 3.2716, 0.0000, 0.0000, 0.0000, 0.0000], + [-0.0614, -0.7344, -1.3164, 0.0000, 0.0000, 0.0000]]) + """ + ... +def tril_indices(row: _int, col: _int, offset: _int = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + tril_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor + + Returns the indices of the lower triangular part of a :attr:`row`-by- + :attr:`col` matrix in a 2-by-N Tensor, where the first row contains row + coordinates of all indices and the second row contains column coordinates. + Indices are ordered based on rows and then columns. + + The lower triangular part of the matrix is defined as the elements on and + below the diagonal. + + The argument :attr:`offset` controls which diagonal to consider. If + :attr:`offset` = 0, all elements on and below the main diagonal are + retained. A positive value includes just as many diagonals above the main + diagonal, and similarly a negative value excludes just as many diagonals below + the main diagonal. The main diagonal are the set of indices + :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` + where :math:`d_{1}, d_{2}` are the dimensions of the matrix. + + .. note:: + When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to + prevent overflow during calculation. + + Args: + row (``int``): number of rows in the 2-D matrix. + col (``int``): number of columns in the 2-D matrix. + offset (``int``): diagonal offset from the main diagonal. + Default: if not provided, 0. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, ``torch.long``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + layout (:class:`torch.layout`, optional): currently only support ``torch.strided``. + + Example:: + + >>> a = torch.tril_indices(3, 3) + >>> a + tensor([[0, 1, 1, 2, 2, 2], + [0, 0, 1, 0, 1, 2]]) + + >>> a = torch.tril_indices(4, 3, -1) + >>> a + tensor([[1, 2, 2, 3, 3, 3], + [0, 0, 1, 0, 1, 2]]) + + >>> a = torch.tril_indices(4, 3, 1) + >>> a + tensor([[0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3], + [0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2]]) + """ + ... +def triplet_margin_loss(anchor: Tensor, positive: Tensor, negative: Tensor, margin: _float = 1.0, p: _float = 2, eps: _float = 1e-06, swap: _bool = False, reduction: _int = 1) -> Tensor: ... +def triu(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: + r""" + triu(input, diagonal=0, *, out=None) -> Tensor + + Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices + :attr:`input`, the other elements of the result tensor :attr:`out` are set to 0. + + The upper triangular part of the matrix is defined as the elements on and + above the diagonal. + + The argument :attr:`diagonal` controls which diagonal to consider. If + :attr:`diagonal` = 0, all elements on and above the main diagonal are + retained. A positive value excludes just as many diagonals above the main + diagonal, and similarly a negative value includes just as many diagonals below + the main diagonal. The main diagonal are the set of indices + :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where + :math:`d_{1}, d_{2}` are the dimensions of the matrix. + + Args: + input (Tensor): the input tensor. + diagonal (int, optional): the diagonal to consider + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(3, 3) + >>> a + tensor([[ 0.2309, 0.5207, 2.0049], + [ 0.2072, -1.0680, 0.6602], + [ 0.3480, -0.5211, -0.4573]]) + >>> torch.triu(a) + tensor([[ 0.2309, 0.5207, 2.0049], + [ 0.0000, -1.0680, 0.6602], + [ 0.0000, 0.0000, -0.4573]]) + >>> torch.triu(a, diagonal=1) + tensor([[ 0.0000, 0.5207, 2.0049], + [ 0.0000, 0.0000, 0.6602], + [ 0.0000, 0.0000, 0.0000]]) + >>> torch.triu(a, diagonal=-1) + tensor([[ 0.2309, 0.5207, 2.0049], + [ 0.2072, -1.0680, 0.6602], + [ 0.0000, -0.5211, -0.4573]]) + + >>> b = torch.randn(4, 6) + >>> b + tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235], + [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857], + [ 0.4333, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410], + [-0.9888, 1.0679, -1.3337, -1.6556, 0.4798, 0.2830]]) + >>> torch.triu(b, diagonal=1) + tensor([[ 0.0000, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235], + [ 0.0000, 0.0000, -1.2919, 1.3378, -0.1768, -1.0857], + [ 0.0000, 0.0000, 0.0000, -1.0432, 0.9348, -0.4410], + [ 0.0000, 0.0000, 0.0000, 0.0000, 0.4798, 0.2830]]) + >>> torch.triu(b, diagonal=-1) + tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235], + [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857], + [ 0.0000, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410], + [ 0.0000, 0.0000, -1.3337, -1.6556, 0.4798, 0.2830]]) + """ + ... +def triu_indices(row: _int, col: _int, offset: _int = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + triu_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor + + Returns the indices of the upper triangular part of a :attr:`row` by + :attr:`col` matrix in a 2-by-N Tensor, where the first row contains row + coordinates of all indices and the second row contains column coordinates. + Indices are ordered based on rows and then columns. + + The upper triangular part of the matrix is defined as the elements on and + above the diagonal. + + The argument :attr:`offset` controls which diagonal to consider. If + :attr:`offset` = 0, all elements on and above the main diagonal are + retained. A positive value excludes just as many diagonals above the main + diagonal, and similarly a negative value includes just as many diagonals below + the main diagonal. The main diagonal are the set of indices + :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` + where :math:`d_{1}, d_{2}` are the dimensions of the matrix. + + .. note:: + When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to + prevent overflow during calculation. + + Args: + row (``int``): number of rows in the 2-D matrix. + col (``int``): number of columns in the 2-D matrix. + offset (``int``): diagonal offset from the main diagonal. + Default: if not provided, 0. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, ``torch.long``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + layout (:class:`torch.layout`, optional): currently only support ``torch.strided``. + + Example:: + + >>> a = torch.triu_indices(3, 3) + >>> a + tensor([[0, 0, 0, 1, 1, 2], + [0, 1, 2, 1, 2, 2]]) + + >>> a = torch.triu_indices(4, 3, -1) + >>> a + tensor([[0, 0, 0, 1, 1, 1, 2, 2, 3], + [0, 1, 2, 0, 1, 2, 1, 2, 2]]) + + >>> a = torch.triu_indices(4, 3, 1) + >>> a + tensor([[0, 0, 1], + [1, 2, 2]]) + """ + ... +def true_divide(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor] = None) -> Tensor: + r""" + true_divide(dividend, divisor, *, out) -> Tensor + + Alias for :func:`torch.div` with ``rounding_mode=None``. + """ + ... +def trunc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + trunc(input, *, out=None) -> Tensor + + Returns a new tensor with the truncated integer values of + the elements of :attr:`input`. + + For integer inputs, follows the array-api convention of returning a + copy of the input tensor. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 3.4742, 0.5466, -0.8008, -0.9079]) + >>> torch.trunc(a) + tensor([ 3., 0., -0., -0.]) + """ + ... +def trunc_(input: Tensor) -> Tensor: ... +@overload +def unbind(input: Tensor, dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + unbind(input, dim=0) -> seq + + Removes a tensor dimension. + + Returns a tuple of all slices along a given dimension, already without it. + + Arguments: + input (Tensor): the tensor to unbind + dim (int): dimension to remove + + Example:: + + >>> torch.unbind(torch.tensor([[1, 2, 3], + >>> [4, 5, 6], + >>> [7, 8, 9]])) + (tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9])) + """ + ... +@overload +def unbind(input: Tensor, dim: Union[str, ellipsis, None]) -> Tuple[Tensor, ...]: + r""" + unbind(input, dim=0) -> seq + + Removes a tensor dimension. + + Returns a tuple of all slices along a given dimension, already without it. + + Arguments: + input (Tensor): the tensor to unbind + dim (int): dimension to remove + + Example:: + + >>> torch.unbind(torch.tensor([[1, 2, 3], + >>> [4, 5, 6], + >>> [7, 8, 9]])) + (tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9])) + """ + ... +def unbind_copy(input: Tensor, dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None: + r""" + Performs the same operation as :func:`torch.unbind`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +@overload +def unflatten(input: Tensor, dim: Union[str, ellipsis, None], sizes: Sequence[Union[_int, SymInt]], names: Sequence[Union[str, ellipsis, None]]) -> Tensor: + r""" + unflatten(input, dim, sizes) -> Tensor + + Expands a dimension of the input tensor over multiple dimensions. + + .. seealso:: + + :func:`torch.flatten` the inverse of this function. It coalesces several dimensions into one. + + Args: + input (Tensor): the input tensor. + dim (int): Dimension to be unflattened, specified as an index into + ``input.shape``. + sizes (Tuple[int]): New shape of the unflattened dimension. + One of its elements can be `-1` in which case the corresponding output + dimension is inferred. Otherwise, the product of ``sizes`` *must* + equal ``input.shape[dim]``. + + Returns: + A View of input with the specified dimension unflattened. + + Examples:: + >>> torch.unflatten(torch.randn(3, 4, 1), 1, (2, 2)).shape + torch.Size([3, 2, 2, 1]) + >>> torch.unflatten(torch.randn(3, 4, 1), 1, (-1, 2)).shape + torch.Size([3, 2, 2, 1]) + >>> torch.unflatten(torch.randn(5, 12, 3), -2, (2, 2, 3, 1, 1)).shape + torch.Size([5, 2, 2, 3, 1, 1, 3]) + """ + ... +@overload +def unflatten(input: Tensor, dim: _int, sizes: Sequence[Union[_int, SymInt]]) -> Tensor: + r""" + unflatten(input, dim, sizes) -> Tensor + + Expands a dimension of the input tensor over multiple dimensions. + + .. seealso:: + + :func:`torch.flatten` the inverse of this function. It coalesces several dimensions into one. + + Args: + input (Tensor): the input tensor. + dim (int): Dimension to be unflattened, specified as an index into + ``input.shape``. + sizes (Tuple[int]): New shape of the unflattened dimension. + One of its elements can be `-1` in which case the corresponding output + dimension is inferred. Otherwise, the product of ``sizes`` *must* + equal ``input.shape[dim]``. + + Returns: + A View of input with the specified dimension unflattened. + + Examples:: + >>> torch.unflatten(torch.randn(3, 4, 1), 1, (2, 2)).shape + torch.Size([3, 2, 2, 1]) + >>> torch.unflatten(torch.randn(3, 4, 1), 1, (-1, 2)).shape + torch.Size([3, 2, 2, 1]) + >>> torch.unflatten(torch.randn(5, 12, 3), -2, (2, 2, 3, 1, 1)).shape + torch.Size([5, 2, 2, 3, 1, 1, 3]) + """ + ... +def unfold_copy(input: Tensor, dimension: _int, size: _int, step: _int, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.unfold`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def unique_dim(input: Tensor, dim: _int, sorted: _bool = True, return_inverse: _bool = False, return_counts: _bool = False) -> Tuple[Tensor, Tensor, Tensor]: ... +def unsafe_chunk(input: Tensor, chunks: _int, dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + unsafe_chunk(input, chunks, dim=0) -> List of Tensors + + Works like :func:`torch.chunk` but without enforcing the autograd restrictions + on inplace modification of the outputs. + + .. warning:: + This function is safe to use as long as only the input, or only the outputs + are modified inplace after calling this function. It is user's + responsibility to ensure that is the case. If both the input and one or more + of the outputs are modified inplace, gradients computed by autograd will be + silently incorrect. + """ + ... +def unsafe_split(input: Tensor, split_size: Union[_int, SymInt], dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + unsafe_split(tensor, split_size_or_sections, dim=0) -> List of Tensors + + Works like :func:`torch.split` but without enforcing the autograd restrictions + on inplace modification of the outputs. + + .. warning:: + This function is safe to use as long as only the input, or only the outputs + are modified inplace after calling this function. It is user's + responsibility to ensure that is the case. If both the input and one or more + of the outputs are modified inplace, gradients computed by autograd will be + silently incorrect. + """ + ... +def unsafe_split_with_sizes(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]: ... +def unsqueeze(input: Tensor, dim: _int) -> Tensor: + r""" + unsqueeze(input, dim) -> Tensor + + Returns a new tensor with a dimension of size one inserted at the + specified position. + + The returned tensor shares the same underlying data with this tensor. + + A :attr:`dim` value within the range ``[-input.dim() - 1, input.dim() + 1)`` + can be used. Negative :attr:`dim` will correspond to :meth:`unsqueeze` + applied at :attr:`dim` = ``dim + input.dim() + 1``. + + Args: + input (Tensor): the input tensor. + dim (int): the index at which to insert the singleton dimension + + Example:: + + >>> x = torch.tensor([1, 2, 3, 4]) + >>> torch.unsqueeze(x, 0) + tensor([[ 1, 2, 3, 4]]) + >>> torch.unsqueeze(x, 1) + tensor([[ 1], + [ 2], + [ 3], + [ 4]]) + """ + ... +def unsqueeze_copy(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.unsqueeze`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def values_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.values`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def vander(x: Tensor, N: Optional[_int] = None, increasing: _bool = False) -> Tensor: + r""" + vander(x, N=None, increasing=False) -> Tensor + + Generates a Vandermonde matrix. + + The columns of the output matrix are elementwise powers of the input vector :math:`x^{(N-1)}, x^{(N-2)}, ..., x^0`. + If increasing is True, the order of the columns is reversed :math:`x^0, x^1, ..., x^{(N-1)}`. Such a + matrix with a geometric progression in each row is named for Alexandre-Theophile Vandermonde. + + Arguments: + x (Tensor): 1-D input tensor. + N (int, optional): Number of columns in the output. If N is not specified, + a square array is returned :math:`(N = len(x))`. + increasing (bool, optional): Order of the powers of the columns. If True, + the powers increase from left to right, if False (the default) they are reversed. + + Returns: + Tensor: Vandermonde matrix. If increasing is False, the first column is :math:`x^{(N-1)}`, + the second :math:`x^{(N-2)}` and so forth. If increasing is True, the columns + are :math:`x^0, x^1, ..., x^{(N-1)}`. + + Example:: + + >>> x = torch.tensor([1, 2, 3, 5]) + >>> torch.vander(x) + tensor([[ 1, 1, 1, 1], + [ 8, 4, 2, 1], + [ 27, 9, 3, 1], + [125, 25, 5, 1]]) + >>> torch.vander(x, N=3) + tensor([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + >>> torch.vander(x, N=3, increasing=True) + tensor([[ 1, 1, 1], + [ 1, 2, 4], + [ 1, 3, 9], + [ 1, 5, 25]]) + """ + ... +@overload +def var(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + + Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim` + can be a single dimension, list of dimensions, or ``None`` to reduce over all + dimensions. + + The variance (:math:`\sigma^2`) is calculated as + + .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var(a, dim=1, keepdim=True) + tensor([[1.0631], + [0.5590], + [1.4893], + [0.8258]]) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def var(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + + Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim` + can be a single dimension, list of dimensions, or ``None`` to reduce over all + dimensions. + + The variance (:math:`\sigma^2`) is calculated as + + .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var(a, dim=1, keepdim=True) + tensor([[1.0631], + [0.5590], + [1.4893], + [0.8258]]) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def var(input: Tensor, unbiased: _bool = True) -> Tensor: + r""" + var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + + Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim` + can be a single dimension, list of dimensions, or ``None`` to reduce over all + dimensions. + + The variance (:math:`\sigma^2`) is calculated as + + .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var(a, dim=1, keepdim=True) + tensor([[1.0631], + [0.5590], + [1.4893], + [0.8258]]) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def var(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + + Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim` + can be a single dimension, list of dimensions, or ``None`` to reduce over all + dimensions. + + The variance (:math:`\sigma^2`) is calculated as + + .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var(a, dim=1, keepdim=True) + tensor([[1.0631], + [0.5590], + [1.4893], + [0.8258]]) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def var(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + + Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim` + can be a single dimension, list of dimensions, or ``None`` to reduce over all + dimensions. + + The variance (:math:`\sigma^2`) is calculated as + + .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var(a, dim=1, keepdim=True) + tensor([[1.0631], + [0.5590], + [1.4893], + [0.8258]]) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def var_mean(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: + r""" + var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + + Calculates the variance and mean over the dimensions specified by :attr:`dim`. + :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to + reduce over all dimensions. + + The variance (:math:`\sigma^2`) is calculated as + + .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Returns: + A tuple (var, mean) containing the variance and mean. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var_mean(a, dim=0, keepdim=True) + (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def var_mean(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: + r""" + var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + + Calculates the variance and mean over the dimensions specified by :attr:`dim`. + :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to + reduce over all dimensions. + + The variance (:math:`\sigma^2`) is calculated as + + .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Returns: + A tuple (var, mean) containing the variance and mean. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var_mean(a, dim=0, keepdim=True) + (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def var_mean(input: Tensor, unbiased: _bool = True) -> Tuple[Tensor, Tensor]: + r""" + var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + + Calculates the variance and mean over the dimensions specified by :attr:`dim`. + :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to + reduce over all dimensions. + + The variance (:math:`\sigma^2`) is calculated as + + .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Returns: + A tuple (var, mean) containing the variance and mean. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var_mean(a, dim=0, keepdim=True) + (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def var_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: + r""" + var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + + Calculates the variance and mean over the dimensions specified by :attr:`dim`. + :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to + reduce over all dimensions. + + The variance (:math:`\sigma^2`) is calculated as + + .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Returns: + A tuple (var, mean) containing the variance and mean. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var_mean(a, dim=0, keepdim=True) + (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def var_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: + r""" + var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + + Calculates the variance and mean over the dimensions specified by :attr:`dim`. + :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to + reduce over all dimensions. + + The variance (:math:`\sigma^2`) is calculated as + + .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Returns: + A tuple (var, mean) containing the variance and mean. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var_mean(a, dim=0, keepdim=True) + (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +def vdot(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + vdot(input, other, *, out=None) -> Tensor + + Computes the dot product of two 1D vectors along a dimension. + + In symbols, this function computes + + .. math:: + + \sum_{i=1}^n \overline{x_i}y_i. + + where :math:`\overline{x_i}` denotes the conjugate for complex + vectors, and it is the identity for real vectors. + + .. note:: + + Unlike NumPy's vdot, torch.vdot intentionally only supports computing the dot product + of two 1D tensors with the same number of elements. + + .. seealso:: + + :func:`torch.linalg.vecdot` computes the dot product of two batches of vectors along a dimension. + + Args: + input (Tensor): first tensor in the dot product, must be 1D. Its conjugate is used if it's complex. + other (Tensor): second tensor in the dot product, must be 1D. + + Keyword args: + + .. note:: out (Tensor, optional): the output tensor. + + + Example:: + + >>> torch.vdot(torch.tensor([2, 3]), torch.tensor([2, 1])) + tensor(7) + >>> a = torch.tensor((1 +2j, 3 - 1j)) + >>> b = torch.tensor((2 +1j, 4 - 0j)) + >>> torch.vdot(a, b) + tensor([16.+1.j]) + >>> torch.vdot(b, a) + tensor([16.-1.j]) + """ + ... +def view_as_complex(input: Tensor) -> Tensor: + r""" + view_as_complex(input) -> Tensor + + Returns a view of :attr:`input` as a complex tensor. For an input complex + tensor of :attr:`size` :math:`m1, m2, \dots, mi, 2`, this function returns a + new complex tensor of :attr:`size` :math:`m1, m2, \dots, mi` where the last + dimension of the input tensor is expected to represent the real and imaginary + components of complex numbers. + + .. warning:: + :func:`view_as_complex` is only supported for tensors with + :class:`torch.dtype` ``torch.float64`` and ``torch.float32``. The input is + expected to have the last dimension of :attr:`size` 2. In addition, the + tensor must have a `stride` of 1 for its last dimension. The strides of all + other dimensions must be even numbers. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> x=torch.randn(4, 2) + >>> x + tensor([[ 1.6116, -0.5772], + [-1.4606, -0.9120], + [ 0.0786, -1.7497], + [-0.6561, -1.6623]]) + >>> torch.view_as_complex(x) + tensor([(1.6116-0.5772j), (-1.4606-0.9120j), (0.0786-1.7497j), (-0.6561-1.6623j)]) + """ + ... +def view_as_complex_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.view_as_complex`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def view_as_real(input: Tensor) -> Tensor: + r""" + view_as_real(input) -> Tensor + + Returns a view of :attr:`input` as a real tensor. For an input complex tensor of + :attr:`size` :math:`m1, m2, \dots, mi`, this function returns a new + real tensor of size :math:`m1, m2, \dots, mi, 2`, where the last dimension of size 2 + represents the real and imaginary components of complex numbers. + + .. warning:: + :func:`view_as_real` is only supported for tensors with ``complex dtypes``. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> x=torch.randn(4, dtype=torch.cfloat) + >>> x + tensor([(0.4737-0.3839j), (-0.2098-0.6699j), (0.3470-0.9451j), (-0.5174-1.3136j)]) + >>> torch.view_as_real(x) + tensor([[ 0.4737, -0.3839], + [-0.2098, -0.6699], + [ 0.3470, -0.9451], + [-0.5174, -1.3136]]) + """ + ... +def view_as_real_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.view_as_real`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +@overload +def view_copy(input: Tensor, dtype: _dtype, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.view`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +@overload +def view_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.view`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +@overload +def vsplit(input: Tensor, sections: _int) -> Tuple[Tensor, ...]: + r""" + vsplit(input, indices_or_sections) -> List of Tensors + + Splits :attr:`input`, a tensor with two or more dimensions, into multiple tensors + vertically according to :attr:`indices_or_sections`. Each split is a view of + :attr:`input`. + + This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=0) + (the split dimension is 0), except that if :attr:`indices_or_sections` is an integer + it must evenly divide the split dimension or a runtime error will be thrown. + + This function is based on NumPy's :func:`numpy.vsplit`. + + Args: + input (Tensor): tensor to split. + indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. + + Example:: + >>> t = torch.arange(16.0).reshape(4,4) + >>> t + tensor([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> torch.vsplit(t, 2) + (tensor([[0., 1., 2., 3.], + [4., 5., 6., 7.]]), + tensor([[ 8., 9., 10., 11.], + [12., 13., 14., 15.]])) + >>> torch.vsplit(t, [3, 6]) + (tensor([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]]), + tensor([[12., 13., 14., 15.]]), + tensor([], size=(0, 4))) + """ + ... +@overload +def vsplit(input: Tensor, indices: _size) -> Tuple[Tensor, ...]: + r""" + vsplit(input, indices_or_sections) -> List of Tensors + + Splits :attr:`input`, a tensor with two or more dimensions, into multiple tensors + vertically according to :attr:`indices_or_sections`. Each split is a view of + :attr:`input`. + + This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=0) + (the split dimension is 0), except that if :attr:`indices_or_sections` is an integer + it must evenly divide the split dimension or a runtime error will be thrown. + + This function is based on NumPy's :func:`numpy.vsplit`. + + Args: + input (Tensor): tensor to split. + indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. + + Example:: + >>> t = torch.arange(16.0).reshape(4,4) + >>> t + tensor([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> torch.vsplit(t, 2) + (tensor([[0., 1., 2., 3.], + [4., 5., 6., 7.]]), + tensor([[ 8., 9., 10., 11.], + [12., 13., 14., 15.]])) + >>> torch.vsplit(t, [3, 6]) + (tensor([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]]), + tensor([[12., 13., 14., 15.]]), + tensor([], size=(0, 4))) + """ + ... +def vstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor: + r""" + vstack(tensors, *, out=None) -> Tensor + + Stack tensors in sequence vertically (row wise). + + This is equivalent to concatenation along the first axis after all 1-D tensors have been reshaped by :func:`torch.atleast_2d`. + + Args: + tensors (sequence of Tensors): sequence of tensors to concatenate + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([1, 2, 3]) + >>> b = torch.tensor([4, 5, 6]) + >>> torch.vstack((a,b)) + tensor([[1, 2, 3], + [4, 5, 6]]) + >>> a = torch.tensor([[1],[2],[3]]) + >>> b = torch.tensor([[4],[5],[6]]) + >>> torch.vstack((a,b)) + tensor([[1], + [2], + [3], + [4], + [5], + [6]]) + """ + ... +@overload +def where(condition: Tensor) -> Tuple[Tensor, ...]: + r""" + where(condition, input, other, *, out=None) -> Tensor + + Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`. + + The operation is defined as: + + .. math:: + \text{out}_i = \begin{cases} + \text{input}_i & \text{if } \text{condition}_i \\ + \text{other}_i & \text{otherwise} \\ + \end{cases} + + .. note:: + The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable `. + + Arguments: + condition (BoolTensor): When True (nonzero), yield input, otherwise yield other + input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices + where :attr:`condition` is ``True`` + other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices + where :attr:`condition` is ``False`` + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other` + + Example:: + + >>> x = torch.randn(3, 2) + >>> y = torch.ones(3, 2) + >>> x + tensor([[-0.4620, 0.3139], + [ 0.3898, -0.7197], + [ 0.0478, -0.1657]]) + >>> torch.where(x > 0, 1.0, 0.0) + tensor([[0., 1.], + [1., 0.], + [1., 0.]]) + >>> torch.where(x > 0, x, y) + tensor([[ 1.0000, 0.3139], + [ 0.3898, 1.0000], + [ 0.0478, 1.0000]]) + >>> x = torch.randn(2, 2, dtype=torch.double) + >>> x + tensor([[ 1.0779, 0.0383], + [-0.8785, -1.1089]], dtype=torch.float64) + >>> torch.where(x > 0, x, 0.) + tensor([[1.0779, 0.0383], + [0.0000, 0.0000]], dtype=torch.float64) + + .. function:: where(condition) -> tuple of LongTensor + :noindex: + + ``torch.where(condition)`` is identical to + ``torch.nonzero(condition, as_tuple=True)``. + + .. note:: + See also :func:`torch.nonzero`. + """ + ... +@overload +def where(condition: Tensor, input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + where(condition, input, other, *, out=None) -> Tensor + + Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`. + + The operation is defined as: + + .. math:: + \text{out}_i = \begin{cases} + \text{input}_i & \text{if } \text{condition}_i \\ + \text{other}_i & \text{otherwise} \\ + \end{cases} + + .. note:: + The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable `. + + Arguments: + condition (BoolTensor): When True (nonzero), yield input, otherwise yield other + input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices + where :attr:`condition` is ``True`` + other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices + where :attr:`condition` is ``False`` + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other` + + Example:: + + >>> x = torch.randn(3, 2) + >>> y = torch.ones(3, 2) + >>> x + tensor([[-0.4620, 0.3139], + [ 0.3898, -0.7197], + [ 0.0478, -0.1657]]) + >>> torch.where(x > 0, 1.0, 0.0) + tensor([[0., 1.], + [1., 0.], + [1., 0.]]) + >>> torch.where(x > 0, x, y) + tensor([[ 1.0000, 0.3139], + [ 0.3898, 1.0000], + [ 0.0478, 1.0000]]) + >>> x = torch.randn(2, 2, dtype=torch.double) + >>> x + tensor([[ 1.0779, 0.0383], + [-0.8785, -1.1089]], dtype=torch.float64) + >>> torch.where(x > 0, x, 0.) + tensor([[1.0779, 0.0383], + [0.0000, 0.0000]], dtype=torch.float64) + + .. function:: where(condition) -> tuple of LongTensor + :noindex: + + ``torch.where(condition)`` is identical to + ``torch.nonzero(condition, as_tuple=True)``. + + .. note:: + See also :func:`torch.nonzero`. + """ + ... +@overload +def where(condition: Tensor, self: Union[Number, _complex], other: Tensor) -> Tensor: + r""" + where(condition, input, other, *, out=None) -> Tensor + + Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`. + + The operation is defined as: + + .. math:: + \text{out}_i = \begin{cases} + \text{input}_i & \text{if } \text{condition}_i \\ + \text{other}_i & \text{otherwise} \\ + \end{cases} + + .. note:: + The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable `. + + Arguments: + condition (BoolTensor): When True (nonzero), yield input, otherwise yield other + input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices + where :attr:`condition` is ``True`` + other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices + where :attr:`condition` is ``False`` + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other` + + Example:: + + >>> x = torch.randn(3, 2) + >>> y = torch.ones(3, 2) + >>> x + tensor([[-0.4620, 0.3139], + [ 0.3898, -0.7197], + [ 0.0478, -0.1657]]) + >>> torch.where(x > 0, 1.0, 0.0) + tensor([[0., 1.], + [1., 0.], + [1., 0.]]) + >>> torch.where(x > 0, x, y) + tensor([[ 1.0000, 0.3139], + [ 0.3898, 1.0000], + [ 0.0478, 1.0000]]) + >>> x = torch.randn(2, 2, dtype=torch.double) + >>> x + tensor([[ 1.0779, 0.0383], + [-0.8785, -1.1089]], dtype=torch.float64) + >>> torch.where(x > 0, x, 0.) + tensor([[1.0779, 0.0383], + [0.0000, 0.0000]], dtype=torch.float64) + + .. function:: where(condition) -> tuple of LongTensor + :noindex: + + ``torch.where(condition)`` is identical to + ``torch.nonzero(condition, as_tuple=True)``. + + .. note:: + See also :func:`torch.nonzero`. + """ + ... +@overload +def where(condition: Tensor, input: Tensor, other: Union[Number, _complex]) -> Tensor: + r""" + where(condition, input, other, *, out=None) -> Tensor + + Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`. + + The operation is defined as: + + .. math:: + \text{out}_i = \begin{cases} + \text{input}_i & \text{if } \text{condition}_i \\ + \text{other}_i & \text{otherwise} \\ + \end{cases} + + .. note:: + The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable `. + + Arguments: + condition (BoolTensor): When True (nonzero), yield input, otherwise yield other + input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices + where :attr:`condition` is ``True`` + other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices + where :attr:`condition` is ``False`` + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other` + + Example:: + + >>> x = torch.randn(3, 2) + >>> y = torch.ones(3, 2) + >>> x + tensor([[-0.4620, 0.3139], + [ 0.3898, -0.7197], + [ 0.0478, -0.1657]]) + >>> torch.where(x > 0, 1.0, 0.0) + tensor([[0., 1.], + [1., 0.], + [1., 0.]]) + >>> torch.where(x > 0, x, y) + tensor([[ 1.0000, 0.3139], + [ 0.3898, 1.0000], + [ 0.0478, 1.0000]]) + >>> x = torch.randn(2, 2, dtype=torch.double) + >>> x + tensor([[ 1.0779, 0.0383], + [-0.8785, -1.1089]], dtype=torch.float64) + >>> torch.where(x > 0, x, 0.) + tensor([[1.0779, 0.0383], + [0.0000, 0.0000]], dtype=torch.float64) + + .. function:: where(condition) -> tuple of LongTensor + :noindex: + + ``torch.where(condition)`` is identical to + ``torch.nonzero(condition, as_tuple=True)``. + + .. note:: + See also :func:`torch.nonzero`. + """ + ... +@overload +def where(condition: Tensor, self: Union[Number, _complex], other: Union[Number, _complex]) -> Tensor: + r""" + where(condition, input, other, *, out=None) -> Tensor + + Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`. + + The operation is defined as: + + .. math:: + \text{out}_i = \begin{cases} + \text{input}_i & \text{if } \text{condition}_i \\ + \text{other}_i & \text{otherwise} \\ + \end{cases} + + .. note:: + The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable `. + + Arguments: + condition (BoolTensor): When True (nonzero), yield input, otherwise yield other + input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices + where :attr:`condition` is ``True`` + other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices + where :attr:`condition` is ``False`` + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other` + + Example:: + + >>> x = torch.randn(3, 2) + >>> y = torch.ones(3, 2) + >>> x + tensor([[-0.4620, 0.3139], + [ 0.3898, -0.7197], + [ 0.0478, -0.1657]]) + >>> torch.where(x > 0, 1.0, 0.0) + tensor([[0., 1.], + [1., 0.], + [1., 0.]]) + >>> torch.where(x > 0, x, y) + tensor([[ 1.0000, 0.3139], + [ 0.3898, 1.0000], + [ 0.0478, 1.0000]]) + >>> x = torch.randn(2, 2, dtype=torch.double) + >>> x + tensor([[ 1.0779, 0.0383], + [-0.8785, -1.1089]], dtype=torch.float64) + >>> torch.where(x > 0, x, 0.) + tensor([[1.0779, 0.0383], + [0.0000, 0.0000]], dtype=torch.float64) + + .. function:: where(condition) -> tuple of LongTensor + :noindex: + + ``torch.where(condition)`` is identical to + ``torch.nonzero(condition, as_tuple=True)``. + + .. note:: + See also :func:`torch.nonzero`. + """ + ... +@overload +def xlogy(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + xlogy(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.special.xlogy`. + """ + ... +@overload +def xlogy(self: Union[Number, _complex], other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + xlogy(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.special.xlogy`. + """ + ... +@overload +def xlogy(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + xlogy(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.special.xlogy`. + """ + ... +@overload +def xlogy_(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def xlogy_(input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +def zero_(input: Tensor) -> Tensor: ... +@overload +def zeros(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with the scalar value `0`, with the shape defined + by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.zeros(2, 3) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.]]) + + >>> torch.zeros(5) + tensor([ 0., 0., 0., 0., 0.]) + """ + ... +@overload +def zeros(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with the scalar value `0`, with the shape defined + by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.zeros(2, 3) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.]]) + + >>> torch.zeros(5) + tensor([ 0., 0., 0., 0., 0.]) + """ + ... +@overload +def zeros(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with the scalar value `0`, with the shape defined + by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.zeros(2, 3) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.]]) + + >>> torch.zeros(5) + tensor([ 0., 0., 0., 0., 0.]) + """ + ... +@overload +def zeros(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with the scalar value `0`, with the shape defined + by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.zeros(2, 3) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.]]) + + >>> torch.zeros(5) + tensor([ 0., 0., 0., 0., 0.]) + """ + ... +def zeros_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + zeros_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + + Returns a tensor filled with the scalar value `0`, with the same size as + :attr:`input`. ``torch.zeros_like(input)`` is equivalent to + ``torch.zeros(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. + + .. warning:: + As of 0.4, this function does not support an :attr:`out` keyword. As an alternative, + the old ``torch.zeros_like(input, out=output)`` is equivalent to + ``torch.zeros(input.size(), out=output)``. + + Args: + input (Tensor): the size of :attr:`input` will determine size of the output tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. + Default: if ``None``, defaults to the dtype of :attr:`input`. + layout (:class:`torch.layout`, optional): the desired layout of returned tensor. + Default: if ``None``, defaults to the layout of :attr:`input`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, defaults to the device of :attr:`input`. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + + Example:: + + >>> input = torch.empty(2, 3) + >>> torch.zeros_like(input) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.]]) + """ + ... + +__all__ = ['__and__', '__lshift__', '__or__', '__rshift__', '__xor__', '_adaptive_avg_pool2d', + '_adaptive_avg_pool3d', '_add_batch_dim', '_add_relu', '_add_relu_', '_addmm_activation', + '_aminmax', '_amp_foreach_non_finite_check_and_unscale_', '_amp_update_scale_', '_assert_async', + '_assert_scalar', '_assert_tensor_metadata', '_batch_norm_impl_index', '_cast_Byte', '_cast_Char', + '_cast_Double', '_cast_Float', '_cast_Half', '_cast_Int', '_cast_Long', '_cast_Short', + '_choose_qparams_per_tensor', '_chunk_cat', '_coalesce', '_compute_linear_combination', '_conj', + '_conj_copy', '_conj_physical', '_convert_indices_from_coo_to_csr', + '_convert_indices_from_csr_to_coo', '_convert_weight_to_int4pack', '_convolution', + '_convolution_mode', '_copy_from', '_copy_from_and_resize', '_cslt_compress', '_cslt_sparse_mm', + '_cslt_sparse_mm_search', '_ctc_loss', '_cudnn_ctc_loss', '_cudnn_init_dropout_state', + '_cudnn_rnn', '_cudnn_rnn_flatten_weight', '_cufft_clear_plan_cache', + '_cufft_get_plan_cache_max_size', '_cufft_get_plan_cache_size', '_cufft_set_plan_cache_max_size', + '_cummax_helper', '_cummin_helper', '_debug_has_internal_overlap', '_dim_arange', + '_dirichlet_grad', '_disable_functionalization', '_efficientzerotensor', '_embedding_bag', + '_embedding_bag_forward_only', '_empty_affine_quantized', '_empty_per_channel_affine_quantized', + '_enable_functionalization', '_euclidean_dist', '_fake_quantize_learnable_per_channel_affine', + '_fake_quantize_learnable_per_tensor_affine', + '_fake_quantize_per_tensor_affine_cachemask_tensor_qparams', + '_fake_quantize_per_tensor_affine_cachemask_tensor_qparams', '_fft_c2c', '_fft_c2r', '_fft_r2c', + '_fill_mem_eff_dropout_mask_', '_foobar', '_foreach_abs', '_foreach_abs_', '_foreach_acos', + '_foreach_acos_', '_foreach_add', '_foreach_add_', '_foreach_addcdiv', '_foreach_addcdiv_', + '_foreach_addcmul', '_foreach_addcmul_', '_foreach_asin', '_foreach_asin_', '_foreach_atan', + '_foreach_atan_', '_foreach_ceil', '_foreach_ceil_', '_foreach_clamp_max', '_foreach_clamp_max_', + '_foreach_clamp_min', '_foreach_clamp_min_', '_foreach_copy_', '_foreach_cos', '_foreach_cos_', + '_foreach_cosh', '_foreach_cosh_', '_foreach_div', '_foreach_div_', '_foreach_erf', + '_foreach_erf_', '_foreach_erfc', '_foreach_erfc_', '_foreach_exp', '_foreach_exp_', + '_foreach_expm1', '_foreach_expm1_', '_foreach_floor', '_foreach_floor_', '_foreach_frac', + '_foreach_frac_', '_foreach_lerp', '_foreach_lerp_', '_foreach_lgamma', '_foreach_lgamma_', + '_foreach_log', '_foreach_log10', '_foreach_log10_', '_foreach_log1p', '_foreach_log1p_', + '_foreach_log2', '_foreach_log2_', '_foreach_log_', '_foreach_maximum', '_foreach_maximum_', + '_foreach_minimum', '_foreach_minimum_', '_foreach_mul', '_foreach_mul_', '_foreach_neg', + '_foreach_neg_', '_foreach_norm', '_foreach_pow', '_foreach_pow_', '_foreach_reciprocal', + '_foreach_reciprocal_', '_foreach_round', '_foreach_round_', '_foreach_sigmoid', + '_foreach_sigmoid_', '_foreach_sign', '_foreach_sign_', '_foreach_sin', '_foreach_sin_', + '_foreach_sinh', '_foreach_sinh_', '_foreach_sqrt', '_foreach_sqrt_', '_foreach_sub', + '_foreach_sub_', '_foreach_tan', '_foreach_tan_', '_foreach_tanh', '_foreach_tanh_', + '_foreach_trunc', '_foreach_trunc_', '_foreach_zero_', '_from_functional_tensor', + '_functional_assert_async', '_functional_assert_scalar', '_functional_sym_constrain_range', + '_functional_sym_constrain_range_for_size', + '_functionalize_are_all_mutations_hidden_from_autograd', + '_functionalize_are_all_mutations_under_no_grad_or_inference_mode', '_functionalize_commit_update', + '_functionalize_mark_mutation_hidden_from_autograd', '_functionalize_replace', + '_functionalize_sync', '_fused_adam_', '_fused_adamw_', '_fused_dropout', + '_fused_moving_avg_obs_fq_helper', '_fused_moving_avg_obs_fq_helper', '_fused_sdp_choice', + '_fused_sgd_', '_fw_primal_copy', '_grid_sampler_2d_cpu_fallback', + '_has_compatible_shallow_copy_type', '_histogramdd_bin_edges', '_histogramdd_from_bin_cts', + '_histogramdd_from_bin_tensors', '_index_put_impl_', '_indices_copy', '_int_mm', '_is_all_true', + '_is_any_true', '_is_functional_tensor', '_is_zerotensor', '_lazy_clone', '_linalg_check_errors', + '_linalg_det', '_linalg_det', '_linalg_eigh', '_linalg_eigh', '_linalg_slogdet', '_linalg_slogdet', + '_linalg_solve_ex', '_linalg_solve_ex', '_linalg_svd', '_linalg_svd', '_log_softmax', + '_log_softmax_backward_data', '_logcumsumexp', '_lstm_mps', '_lu_with_info', '_lu_with_info', + '_make_dep_token', '_make_dual', '_make_dual_copy', '_make_per_channel_quantized_tensor', + '_make_per_tensor_quantized_tensor', '_masked_scale', '_masked_softmax', '_mixed_dtypes_linear', + '_mkldnn_reshape', '_mkldnn_transpose', '_mkldnn_transpose_', '_mps_convolution', + '_mps_convolution_transpose', '_native_batch_norm_legit', '_native_batch_norm_legit_no_training', + '_native_multi_head_attention', '_neg_view', '_neg_view_copy', '_nested_from_padded', + '_nested_from_padded_and_nested_example', '_nested_get_jagged_dummy', '_nested_get_lengths', + '_nested_get_offsets', '_nested_get_ragged_idx', '_nested_get_values', '_nested_get_values_copy', + '_nested_tensor_from_mask', '_nested_tensor_from_mask_left_aligned', + '_nested_tensor_from_tensor_list', '_nested_tensor_softmax_with_shape', '_nested_view_from_buffer', + '_nested_view_from_buffer_copy', '_nested_view_from_jagged', '_nested_view_from_jagged_copy', + '_nnpack_available', '_nnpack_spatial_convolution', '_pack_padded_sequence', + '_pad_packed_sequence', '_pin_memory', '_prelu_kernel', '_print', '_propagate_xla_data', + '_remove_batch_dim', '_reshape_alias_copy', '_reshape_from_tensor', '_resize_output_', + '_rowwise_prune', '_sample_dirichlet', '_saturate_weight_to_fp16', + '_scaled_dot_product_attention_math', '_scaled_dot_product_cudnn_attention', + '_scaled_dot_product_cudnn_attention', '_scaled_dot_product_efficient_attention', + '_scaled_dot_product_efficient_attention', '_scaled_dot_product_flash_attention', + '_scaled_dot_product_flash_attention', '_scaled_dot_product_flash_attention_for_cpu', + '_scaled_dot_product_flash_attention_for_cpu', '_scaled_mm', '_shape_as_tensor', + '_sobol_engine_draw', '_sobol_engine_ff_', '_sobol_engine_initialize_state_', + '_sobol_engine_scramble_', '_softmax', '_softmax_backward_data', '_sparse_broadcast_to', + '_sparse_broadcast_to_copy', '_sparse_csr_prod', '_sparse_csr_sum', + '_sparse_log_softmax_backward_data', '_sparse_semi_structured_linear', + '_sparse_softmax_backward_data', '_sparse_sparse_matmul', '_sparse_sum', '_stack', + '_standard_gamma', '_standard_gamma_grad', '_sync', '_test_autograd_multiple_dispatch', + '_test_autograd_multiple_dispatch_view', '_test_autograd_multiple_dispatch_view_copy', + '_test_check_tensor', '_test_functorch_fallback', '_test_parallel_materialize', + '_test_serialization_subcmul', '_to_cpu', '_to_functional_tensor', '_to_sparse_semi_structured', + '_transform_bias_rescale_qkv', '_transformer_encoder_layer_fwd', '_trilinear', + '_triton_multi_head_attention', '_triton_scaled_dot_attention', '_unique', '_unique2', + '_unpack_dual', '_unpack_dual', '_unsafe_index', '_unsafe_index_put', '_use_cudnn_ctc_loss', + '_use_cudnn_rnn_flatten_weight', '_validate_compressed_sparse_indices', + '_validate_sparse_bsc_tensor_args', '_validate_sparse_bsr_tensor_args', + '_validate_sparse_compressed_tensor_args', '_validate_sparse_coo_tensor_args', + '_validate_sparse_csc_tensor_args', '_validate_sparse_csr_tensor_args', '_values_copy', + '_weight_int4pack_mm', '_weight_int8pack_mm', '_weight_norm', '_weight_norm_interface', 'abs', + 'abs_', 'absolute', 'acos', 'acos_', 'acosh', 'acosh_', 'adaptive_avg_pool1d', + 'adaptive_max_pool1d', 'add', 'addbmm', 'addcdiv', 'addcmul', 'addmm', 'addmv', 'addmv_', 'addr', + 'adjoint', 'affine_grid_generator', 'alias_copy', 'all', 'allclose', 'alpha_dropout', + 'alpha_dropout_', 'amax', 'amin', 'aminmax', 'aminmax', 'angle', 'any', 'arange', 'arccos', + 'arccos_', 'arccosh', 'arccosh_', 'arcsin', 'arcsin_', 'arcsinh', 'arcsinh_', 'arctan', 'arctan2', + 'arctan_', 'arctanh', 'arctanh_', 'argmax', 'argmin', 'argsort', 'argwhere', 'as_strided', + 'as_strided_', 'as_strided_copy', 'as_strided_scatter', 'as_tensor', 'asarray', 'asin', 'asin_', + 'asinh', 'asinh_', 'atan', 'atan2', 'atan_', 'atanh', 'atanh_', 'avg_pool1d', 'baddbmm', + 'bartlett_window', 'batch_norm', 'batch_norm_backward_elemt', 'batch_norm_backward_reduce', + 'batch_norm_elemt', 'batch_norm_gather_stats', 'batch_norm_gather_stats_with_counts', + 'batch_norm_stats', 'batch_norm_update_stats', 'bernoulli', 'bilinear', + 'binary_cross_entropy_with_logits', 'bincount', 'binomial', 'bitwise_and', 'bitwise_left_shift', + 'bitwise_not', 'bitwise_or', 'bitwise_right_shift', 'bitwise_xor', 'blackman_window', 'bmm', + 'broadcast_to', 'bucketize', 'can_cast', 'cat', 'ccol_indices_copy', 'ceil', 'ceil_', 'celu', + 'celu_', 'channel_shuffle', 'cholesky', 'cholesky_inverse', 'cholesky_solve', + 'choose_qparams_optimized', 'chunk', 'clamp', 'clamp_', 'clamp_max', 'clamp_max_', 'clamp_min', + 'clamp_min_', 'clip', 'clip_', 'clone', 'col_indices_copy', 'column_stack', 'combinations', + 'complex', 'concat', 'concatenate', 'conj', 'conj_physical', 'conj_physical_', 'constant_pad_nd', + 'conv1d', 'conv2d', 'conv3d', 'conv_tbc', 'conv_transpose1d', 'conv_transpose2d', + 'conv_transpose3d', 'convolution', 'copysign', 'corrcoef', 'cos', 'cos_', 'cosh', 'cosh_', + 'cosine_embedding_loss', 'cosine_similarity', 'count_nonzero', 'cov', 'cross', 'crow_indices_copy', + 'ctc_loss', 'cudnn_affine_grid_generator', 'cudnn_batch_norm', 'cudnn_convolution', + 'cudnn_convolution_add_relu', 'cudnn_convolution_relu', 'cudnn_convolution_transpose', + 'cudnn_grid_sampler', 'cudnn_is_acceptable', 'cummax', 'cummax', 'cummin', 'cummin', 'cumprod', + 'cumsum', 'cumulative_trapezoid', 'deg2rad', 'deg2rad_', 'dequantize', 'det', 'detach', 'detach_', + 'detach_copy', 'diag', 'diag_embed', 'diagflat', 'diagonal', 'diagonal_copy', 'diagonal_scatter', + 'diff', 'digamma', 'dist', 'div', 'divide', 'dot', 'dropout', 'dropout_', 'dsmm', 'dsplit', + 'dstack', 'embedding', 'embedding_bag', 'embedding_renorm_', 'empty', 'empty_like', + 'empty_permuted', 'empty_quantized', 'empty_strided', 'eq', 'equal', 'erf', 'erf_', 'erfc', + 'erfc_', 'erfinv', 'exp', 'exp2', 'exp2_', 'exp_', 'expand_copy', 'expm1', 'expm1_', 'eye', + 'fake_quantize_per_channel_affine', 'fake_quantize_per_tensor_affine', 'fbgemm_linear_fp16_weight', + 'fbgemm_linear_fp16_weight_fp32_activation', 'fbgemm_linear_int8_weight', + 'fbgemm_linear_int8_weight_fp32_activation', 'fbgemm_linear_quantize_weight', + 'fbgemm_pack_gemm_matrix_fp16', 'fbgemm_pack_quantized_matrix', 'feature_alpha_dropout', + 'feature_alpha_dropout_', 'feature_dropout', 'feature_dropout_', 'fill', 'fill_', 'fix', 'fix_', + 'flatten', 'flip', 'fliplr', 'flipud', 'float_power', 'floor', 'floor_', 'floor_divide', 'fmax', + 'fmin', 'fmod', 'frac', 'frac_', 'frexp', 'frexp', 'frobenius_norm', 'from_file', 'from_numpy', + 'frombuffer', 'full', 'full_like', 'fused_moving_avg_obs_fake_quant', 'gather', 'gcd', 'gcd_', + 'ge', 'geqrf', 'geqrf', 'ger', 'get_default_dtype', 'get_num_interop_threads', 'get_num_threads', + 'gradient', 'greater', 'greater_equal', 'grid_sampler', 'grid_sampler_2d', 'grid_sampler_3d', + 'group_norm', 'gru', 'gru_cell', 'gt', 'hamming_window', 'hann_window', 'hardshrink', 'heaviside', + 'hinge_embedding_loss', 'histc', 'histogram', 'histogram', 'histogramdd', 'histogramdd', 'hsmm', + 'hsplit', 'hspmm', 'hstack', 'hypot', 'i0', 'i0_', 'igamma', 'igammac', 'imag', 'index_add', + 'index_copy', 'index_fill', 'index_put', 'index_put_', 'index_reduce', 'index_select', + 'indices_copy', 'init_num_threads', 'inner', 'instance_norm', 'int_repr', 'inverse', 'is_complex', + 'is_conj', 'is_distributed', 'is_floating_point', 'is_grad_enabled', 'is_inference', + 'is_inference_mode_enabled', 'is_neg', 'is_nonzero', 'is_same_size', 'is_signed', + 'is_vulkan_available', 'isclose', 'isfinite', 'isin', 'isinf', 'isnan', 'isneginf', 'isposinf', + 'isreal', 'istft', 'kaiser_window', 'kl_div', 'kron', 'kthvalue', 'kthvalue', 'layer_norm', 'lcm', + 'lcm_', 'ldexp', 'ldexp_', 'le', 'lerp', 'less', 'less_equal', 'lgamma', 'linspace', 'log', + 'log10', 'log10_', 'log1p', 'log1p_', 'log2', 'log2_', 'log_', 'log_softmax', 'logaddexp', + 'logaddexp2', 'logcumsumexp', 'logdet', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', + 'logit', 'logit_', 'logspace', 'logsumexp', 'lstm', 'lstm_cell', 'lt', 'lu_solve', 'lu_unpack', + 'lu_unpack', 'margin_ranking_loss', 'masked_fill', 'masked_scatter', 'masked_select', 'matmul', + 'matrix_exp', 'matrix_power', 'max', 'max', 'max_pool1d', 'max_pool1d_with_indices', 'max_pool2d', + 'max_pool3d', 'maximum', 'mean', 'median', 'median', 'min', 'min', 'minimum', 'miopen_batch_norm', + 'miopen_convolution', 'miopen_convolution_add_relu', 'miopen_convolution_relu', + 'miopen_convolution_transpose', 'miopen_depthwise_convolution', 'miopen_rnn', + 'mkldnn_adaptive_avg_pool2d', 'mkldnn_convolution', 'mkldnn_linear_backward_weights', + 'mkldnn_max_pool2d', 'mkldnn_max_pool3d', 'mkldnn_rnn_layer', 'mm', 'mode', 'mode', 'moveaxis', + 'movedim', 'msort', 'mul', 'multinomial', 'multiply', 'mv', 'mvlgamma', 'nan_to_num', + 'nan_to_num_', 'nanmean', 'nanmedian', 'nanmedian', 'nanquantile', 'nansum', 'narrow', + 'narrow_copy', 'native_batch_norm', 'native_channel_shuffle', 'native_dropout', + 'native_group_norm', 'native_layer_norm', 'native_norm', 'ne', 'neg', 'neg_', 'negative', + 'negative_', 'nextafter', 'nonzero', 'nonzero_static', 'norm_except_dim', 'normal', 'not_equal', + 'nuclear_norm', 'numel', 'ones', 'ones_like', 'orgqr', 'ormqr', 'outer', 'pairwise_distance', + 'pdist', 'permute', 'permute_copy', 'pinverse', 'pixel_shuffle', 'pixel_unshuffle', 'poisson', + 'poisson_nll_loss', 'polar', 'polygamma', 'positive', 'pow', 'prelu', 'prod', 'promote_types', + 'put', 'q_per_channel_axis', 'q_per_channel_scales', 'q_per_channel_zero_points', 'q_scale', + 'q_zero_point', 'qr', 'qr', 'quantile', 'quantize_per_channel', 'quantize_per_tensor', + 'quantize_per_tensor_dynamic', 'quantized_batch_norm', 'quantized_gru_cell', 'quantized_lstm_cell', + 'quantized_max_pool1d', 'quantized_max_pool2d', 'quantized_max_pool3d', 'quantized_rnn_relu_cell', + 'quantized_rnn_tanh_cell', 'rad2deg', 'rad2deg_', 'rand', 'rand_like', 'randint', 'randint_like', + 'randn', 'randn_like', 'randperm', 'range', 'ravel', 'real', 'reciprocal', 'reciprocal_', 'relu', + 'relu_', 'remainder', 'renorm', 'repeat_interleave', 'reshape', 'resize_as_', 'resize_as_sparse_', + 'resolve_conj', 'resolve_neg', 'result_type', 'rnn_relu', 'rnn_relu_cell', 'rnn_tanh', + 'rnn_tanh_cell', 'roll', 'rot90', 'round', 'round_', 'row_indices_copy', 'row_stack', 'rrelu', + 'rrelu_', 'rsqrt', 'rsqrt_', 'rsub', 'saddmm', 'scalar_tensor', 'scatter', 'scatter_add', + 'scatter_reduce', 'searchsorted', 'segment_reduce', 'select', 'select_copy', 'select_scatter', + 'selu', 'selu_', 'set_flush_denormal', 'set_num_interop_threads', 'set_num_threads', 'sgn', + 'sigmoid', 'sigmoid_', 'sign', 'signbit', 'sin', 'sin_', 'sinc', 'sinc_', 'sinh', 'sinh_', + 'slice_copy', 'slice_inverse', 'slice_scatter', 'slogdet', 'slogdet', 'smm', 'softmax', 'sort', + 'sort', 'sparse_bsc_tensor', 'sparse_bsr_tensor', 'sparse_compressed_tensor', 'sparse_coo_tensor', + 'sparse_csc_tensor', 'sparse_csr_tensor', 'split_copy', 'split_with_sizes', + 'split_with_sizes_copy', 'spmm', 'sqrt', 'sqrt_', 'square', 'square_', 'squeeze', 'squeeze_copy', + 'sspaddmm', 'stack', 'std', 'std_mean', 'sub', 'subtract', 'sum', 'svd', 'svd', 'swapaxes', + 'swapdims', 'sym_constrain_range', 'sym_constrain_range_for_size', 't', 't_copy', 'take', + 'take_along_dim', 'tan', 'tan_', 'tanh', 'tanh_', 'tensor', 'tensor_split', 'threshold', + 'threshold_', 'tile', 'topk', 'topk', 'trace', 'transpose', 'transpose_copy', 'trapezoid', 'trapz', + 'triangular_solve', 'triangular_solve', 'tril', 'tril_indices', 'triplet_margin_loss', 'triu', + 'triu_indices', 'true_divide', 'trunc', 'trunc_', 'unbind', 'unbind_copy', 'unflatten', + 'unfold_copy', 'unique_dim', 'unsafe_chunk', 'unsafe_split', 'unsafe_split_with_sizes', + 'unsqueeze', 'unsqueeze_copy', 'values_copy', 'vander', 'var', 'var_mean', 'vdot', + 'view_as_complex', 'view_as_complex_copy', 'view_as_real', 'view_as_real_copy', 'view_copy', + 'vsplit', 'vstack', 'where', 'xlogy', 'xlogy_', 'zero_', 'zeros', 'zeros_like'] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/__init__.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..0eefb36a2a67b2b3aa719abafa1499d33ce5a34d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/__init__.pyi @@ -0,0 +1,10976 @@ +# @generated from torch/_C/__init__.pyi.in +# mypy: disable-error-code="type-arg" + +import builtins +from enum import Enum, IntEnum +from pathlib import Path +from typing import ( + Any, + AnyStr, + BinaryIO, + Callable, + ContextManager, + Dict, + Generic, + Iterable, + Iterator, + List, + Literal, + NamedTuple, + Optional, + Protocol, + Sequence, + Set, + SupportsIndex, + Tuple, + Type, + TypeVar, + Union, + overload, + runtime_checkable, +) +from typing_extensions import ParamSpec + +import torch +from torch import inf, SymInt, Tensor +from torch.autograd.graph import Node as _Node +from torch.package import PackageExporter +from torch.storage import UntypedStorage, TypedStorage +from torch.types import ( + _bool, + _complex, + _device, + _dispatchkey, + _dtype, + _float, + _int, + _layout, + _qscheme, + _size, + Device, + Number, + Storage, +) + +from torch._prims_common import DeviceLikeType + +# This module is defined in torch/csrc/Module.cpp + +from . import _functorch, _lazy, _lazy_ts_backend, _nn, _onnx, _VariableFunctions, _cpu, _aoti, _verbose + +K = TypeVar("K") +T = TypeVar("T") +S = TypeVar("S", bound="torch.Tensor") +P = ParamSpec("P") +ReturnVal = TypeVar("ReturnVal", covariant=True) # return value (always covariant) +_T_co = TypeVar("_T_co", covariant=True) + + +@runtime_checkable +class _NestedSequence(Protocol[_T_co]): + """A protocol for representing nested sequences. + + References:: + `numpy._typing._NestedSequence` + + """ + + def __len__(self, /) -> builtins.int: ... + def __getitem__(self, index: builtins.int, /) -> _T_co | _NestedSequence[_T_co]: ... + def __contains__(self, x: builtins.object, /) -> builtins.bool: ... + def __iter__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: ... + def __reversed__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]: ... + def count(self, value: Any, /) -> builtins.int: ... + def index(self, value: Any, /) -> builtins.int: ... + + +# Defined in torch/csrc/Device.cpp +class device: + type: str # THPDevice_type + index: _int # THPDevice_index + + def __get__(self, instance, owner=None) -> device: ... + + # THPDevice_pynew + @overload + def __init__(self, device: DeviceLikeType) -> None: ... + @overload + def __init__(self, type: str, index: _int) -> None: ... + + # Uncomment if we ever make torch.device a decorator + # def __call__(self, func: T) -> T: ... + + def __enter__(self) -> device: ... + def __exit__(self, exc_type, exc_val, exc_tb) -> None: ... + def __reduce__(self) -> Tuple[Any, ...]: ... # THPDevice_reduce + +# Defined in torch/csrc/Stream.cpp +class Stream: + stream_id: _int # Stream id + device_index: _int + device_type: _int + + device: device # The device of the stream + +# Defined in torch/csrc/Size.cpp +class Size(Tuple[_int, ...]): + # TODO: __reduce__ + + @overload # type: ignore[override] + def __getitem__(self: Size, key: _int) -> _int: ... + @overload + def __getitem__(self: Size, key: slice) -> Size: ... + def numel(self: Size) -> _int: ... + +# Defined in torch/csrc/Dtype.cpp +class dtype: + # TODO: __reduce__ + is_floating_point: _bool + is_complex: _bool + is_signed: _bool + itemsize: _int + def to_real(self) -> dtype: ... + def to_complex(self) -> dtype: ... + +# Defined in torch/csrc/TypeInfo.cpp +class iinfo: + bits: _int + min: _int + max: _int + dtype: str + + def __init__(self, dtype: _dtype) -> None: ... + +class finfo: + bits: _int + min: _float + max: _float + eps: _float + tiny: _float + smallest_normal: _float + resolution: _float + dtype: str + + @overload + def __init__(self, dtype: _dtype) -> None: ... + @overload + def __init__(self) -> None: ... + +float32: dtype = ... +float: dtype = ... +float64: dtype = ... +double: dtype = ... +float16: dtype = ... +bfloat16: dtype = ... +float8_e4m3fn: dtype = ... +float8_e4m3fnuz: dtype = ... +float8_e5m2: dtype = ... +float8_e5m2fnuz: dtype = ... +half: dtype = ... +uint8: dtype = ... +uint16: dtype = ... +uint32: dtype = ... +uint64: dtype = ... +int8: dtype = ... +int16: dtype = ... +short: dtype = ... +int32: dtype = ... +int: dtype = ... +int64: dtype = ... +long: dtype = ... +complex32: dtype = ... +complex64: dtype = ... +chalf: dtype = ... +cfloat: dtype = ... +complex128: dtype = ... +cdouble: dtype = ... +quint8: dtype = ... +qint8: dtype = ... +qint32: dtype = ... +bool: dtype = ... +quint4x2: dtype = ... +quint2x4: dtype = ... +bits1x8: dtype = ... +bits2x4: dtype = ... +bits4x2: dtype = ... +bits8: dtype = ... +bits16: dtype = ... + +# Defined in torch/csrc/Layout.cpp +class layout: ... + +# Defined in torch/csrc/utils/disable_torch_function.cpp +def DisableTorchFunction(): ... +def DisableTorchFunctionSubclass(): ... + +# Defined in torch/csrc/utils/tensor_layouts.cpp +strided: layout = ... +sparse_coo: layout = ... +sparse_csr: layout = ... +sparse_csc: layout = ... +sparse_bsr: layout = ... +sparse_bsc: layout = ... +_mkldnn: layout = ... +jagged: layout = ... + +# Defined in torch/csrc/MemoryFormat.cpp +class memory_format: ... + +# Defined in torch/csrc/utils/tensor_memoryformats.cpp +contiguous_format: memory_format = ... +channels_last: memory_format = ... +channels_last_3d: memory_format = ... +preserve_format: memory_format = ... + +# Defined in torch/csrc/QScheme.cpp +class qscheme: ... + +# Defined in torch/csrc/utils/tensor_qschemes.h +per_tensor_affine: qscheme = ... +per_channel_affine: qscheme = ... +per_tensor_symmetric: qscheme = ... +per_channel_symmetric: qscheme = ... +per_channel_affine_float_qparams: qscheme = ... + +# Defined in torch/csrc/autograd/python_function.cpp +class _FunctionBase: + saved_tensors: Tuple[Tensor] + _raw_saved_tensors: Tuple[Any] + next_functions: Tuple[Tuple[Any, _int], ...] + needs_input_grad: Tuple[_bool] + metadata: dict + _materialize_non_diff_grads: _bool + # skip adding type hints for the fields that have wrappers defined + # in torch/autograd/function.py + +# Defined in torch/csrc/autograd/python_legacy_variable.cpp +class _LegacyVariableBase(Tensor): # inherits from Tensor to appease mypy + def __init__( + self, + data: Optional[Tensor] = ..., + requires_grad: Optional[_bool] = ..., + volatile: Optional[_bool] = ..., + _grad_fn: Optional[_FunctionBase] = ..., + ) -> None: ... + +# Defined in torch/csrc/jit/python/init.cpp +class IODescriptor: ... +class JITException: ... + +class Future(Generic[T]): + def __init__(self, devices: List[device]) -> None: ... + def done(self) -> _bool: ... + def value(self) -> T: ... + def wait(self) -> T: ... + def add_done_callback(self, callback: Callable) -> None: ... + def then(self, callback: Callable) -> Future[T]: ... + def set_result(self, result: T) -> None: ... + def _set_unwrap_func(self, callback: Callable) -> None: ... + +class _Await: + def __init__(self) -> None: ... + def fn(self) -> Callable: ... + def args(self) -> Tuple[Any, ...]: ... + def is_nowait(self) -> _bool: ... + +def _jit_set_num_profiled_runs(num: _size) -> _size: ... + +# Defined in torch/csrc/jit/passes/mobile_optimizer_type.h +class _MobileOptimizerType: ... + +CONV_BN_FUSION: _MobileOptimizerType +INSERT_FOLD_PREPACK_OPS: _MobileOptimizerType +REMOVE_DROPOUT: _MobileOptimizerType +FUSE_ADD_RELU: _MobileOptimizerType +HOIST_CONV_PACKED_PARAMS: _MobileOptimizerType +VULKAN_AUTOMATIC_GPU_TRANSFER: _MobileOptimizerType + +def fork(*args: Any, **kwargs: Any) -> Future: ... +def wait(fut: Future) -> Any: ... +def _awaitable(*args: Any, **kwargs: Any) -> _Await: ... +def _awaitable_wait(aw: _Await) -> Any: ... +def _awaitable_nowait(x: Any) -> _Await: ... +def _collect_all(futures: List[Future]) -> Future: ... +def _set_print_stack_traces_on_fatal_signal(print: _bool) -> None: ... +def unify_type_list(types: List[JitType]) -> JitType: ... +def _freeze_module( + module: ScriptModule, + preserved_attrs: List[str] = [], + freeze_interfaces: _bool = True, + preserveParameters: _bool = True, +) -> ScriptModule: ... +def _jit_pass_optimize_frozen_graph(Graph, optimize_numerics: _bool = True) -> None: ... +def _jit_pass_optimize_for_inference( + module: torch.jit.ScriptModule, + other_methods: List[str] = [], +) -> None: ... +def _jit_pass_fold_frozen_conv_bn(graph: Graph): ... +def _jit_pass_fold_frozen_conv_add_or_sub(graph: Graph): ... +def _jit_pass_fold_frozen_conv_mul_or_div(graph: Graph): ... +def _jit_pass_fuse_frozen_conv_add_relu(graph: Graph): ... +def _jit_pass_concat_frozen_linear(graph: Graph): ... +def _jit_pass_convert_frozen_ops_to_mkldnn(graph: Graph): ... +def _jit_pass_transpose_frozen_linear(graph: Graph): ... +def _jit_pass_remove_dropout(module: torch.jit.ScriptModule): ... +def _is_tracing() -> _bool: ... +def _jit_init() -> _bool: ... +def _jit_flatten(arg: Any) -> Tuple[List[Tensor], IODescriptor]: ... +def _jit_unflatten(vars: List[Tensor], desc: IODescriptor) -> Any: ... +def _jit_get_operation(op_name: str) -> Tuple[Callable, List[str]]: ... +def _get_operation_overload( + op_name: str, + op_overload_name: str, +) -> Tuple[Callable, Callable, List[Any]]: ... +def _get_schema(op_name: str, overload_name: str) -> FunctionSchema: ... +def _jit_pass_optimize_for_mobile( + module: torch.jit.ScriptModule, + optimization_blocklist: Set[_MobileOptimizerType], + preserved_methods: List[AnyStr], +) -> torch.jit.ScriptModule: ... +def _clone_module_with_class( + module: torch.jit.ScriptModule, + ignored_methods: List[AnyStr], + ignored_attributes: List[AnyStr], +) -> torch.jit.ScriptModule: ... +def _jit_pass_vulkan_optimize_for_mobile( + module: torch.jit.ScriptModule, + optimization_blocklist: Set[_MobileOptimizerType], + preserved_methods: List[AnyStr], +) -> torch.jit.ScriptModule: ... +def _jit_pass_metal_optimize_for_mobile( + module: torch.jit.ScriptModule, + preserved_methods: List[AnyStr], +) -> torch.jit.ScriptModule: ... +def _jit_pass_inline(Graph) -> None: ... +def _jit_pass_constant_propagation(Graph) -> None: ... +def _jit_pass_propagate_shapes_on_graph(Graph) -> None: ... +def _jit_register_decomposition_for_schema(schema: FunctionSchema, Graph) -> None: ... +def _jit_erase_non_input_shape_information(Graph) -> None: ... +def _jit_get_schemas_for_operator(name: str) -> List[FunctionSchema]: ... +def _jit_get_all_schemas() -> List[FunctionSchema]: ... +def _jit_check_alias_annotation( + g: Graph, + args: Tuple[Any, ...], + unqualified_op_name: str, +): ... +def _jit_can_fuse_on_cpu() -> _bool: ... +def _jit_can_fuse_on_gpu() -> _bool: ... +def _jit_can_fuse_on_cpu_legacy() -> _bool: ... +def _debug_get_fusion_group_inlining() -> _bool: ... +def _debug_set_fusion_group_inlining(enable: _bool): ... +def _jit_texpr_fuser_enabled() -> _bool: ... +def _jit_nvfuser_enabled() -> _bool: ... +def _jit_llga_enabled() -> _bool: ... +def _jit_set_llga_enabled(enable: _bool): ... +def _llvm_enabled() -> _bool: ... +def _jit_override_can_fuse_on_cpu(override: _bool): ... +def _jit_override_can_fuse_on_gpu(override: _bool): ... +def _jit_override_can_fuse_on_cpu_legacy(override: _bool): ... +def _jit_set_symbolic_shapes_test_mode(override: _bool): ... +def _jit_symbolic_shapes_test_mode_enabled() -> _bool: ... +def _jit_set_texpr_fuser_enabled(enable: _bool): ... +def _jit_set_te_must_use_llvm_cpu(use_llvm: _bool): ... +def _jit_set_nvfuser_enabled(enable: _bool) -> _bool: ... +def _jit_cat_wo_conditionals(optimize_cat: _bool): ... +def _jit_opt_conditionals(opt_conds: _bool): ... +def _jit_pass_canonicalize(graph: Graph, keep_unique_names: _bool = True): ... +def _jit_pass_erase_shape_information(graph: Graph): ... +def _jit_pass_fold_convbn(module: torch.jit.ScriptModule): ... +def _jit_pass_insert_observers( + module: torch.jit.ScriptModule, + method_name: str, + qconfig_dict: Dict[str, Any], + inplace: _bool, + quant_type: _int, +): ... +def _jit_pass_insert_quant_dequant( + module: torch.jit.ScriptModule, + method_name: str, + inplace: _bool, + debug: _bool, + quant_type: _int, +): ... +def _jit_pass_insert_quant_dequant_for_ondevice_ptq( + module: torch.jit.ScriptModule, + method_name: str, + inplace: _bool, + debug: _bool, + quant_type: _int, +): ... +def _jit_pass_quant_finalize( + module: torch.jit.ScriptModule, + quant_type: _int, + preserved_attrs: Sequence[str], +): ... +def _jit_pass_quant_finalize_for_ondevice_ptq( + module: torch.jit.ScriptModule, + quant_type: _int, + method_name: str, +): ... +def _jit_pass_insert_observer_method_for_ondevice_ptq( + module: torch.jit.ScriptModule, + method_name: str, + qconfig_dict: Dict[str, Any], + inplace: _bool, + quant_type: _int, +): ... +def _jit_set_profiling_executor(profiling_flag: _bool) -> _bool: ... +def _jit_set_profiling_mode(profiling_flag: _bool) -> _bool: ... +def _jit_set_fusion_strategy( + strategy: List[Tuple[str, _int]], +) -> List[Tuple[str, _int]]: ... +def _jit_try_infer_type(obj: Any) -> InferredType: ... +def _jit_get_trigger_value(trigger_name: str) -> _int: ... + +# Defined in torch/csrc/jit/python/script_init.cpp +ResolutionCallback = Callable[[str], Callable[..., Any]] + +# Defined in torch/csrc/jit/python/script_init.cpp +# and torch/csrc/jit/python/init.cpp +def _create_function_from_graph(qualname: str, graph: Graph) -> ScriptFunction: ... +def _debug_set_autodiff_subgraph_inlining(disabled: _bool) -> None: ... +def _ivalue_tags_match(lhs: ScriptModule, rhs: ScriptModule) -> _bool: ... +def _jit_assert_is_instance(obj: Any, type: JitType): ... +def _jit_clear_class_registry() -> None: ... +def _jit_set_emit_hooks( + ModuleHook: Optional[Callable], + FunctionHook: Optional[Callable], +) -> None: ... +def _jit_get_emit_hooks() -> Tuple[Callable, Callable]: ... +def _load_for_lite_interpreter( + filename: Union[str, Path], + map_location: Optional[DeviceLikeType], +): ... +def _load_for_lite_interpreter_from_buffer( + buffer: BinaryIO, + map_location: Optional[DeviceLikeType], +): ... +def _export_operator_list(module: LiteScriptModule): ... +def _quantize_ondevice_ptq_dynamic(module: LiteScriptModule, method_name: str): ... +def _get_model_bytecode_version(filename: Union[str, Path]) -> _int: ... +def _get_model_bytecode_version_from_buffer(buffer: BinaryIO) -> _int: ... +def _backport_for_mobile( + filename_input: Union[str, Path], + filename_output: Union[str, Path], + to_version: _int, +) -> None: ... +def _backport_for_mobile_from_buffer( + buffer: BinaryIO, + filename_output: Union[str, Path], + to_version: _int, +) -> None: ... +def _backport_for_mobile_to_buffer( + filename_input: Union[str, Path], + to_version: _int, +) -> bytes: ... +def _backport_for_mobile_from_buffer_to_buffer( + buffer: BinaryIO, + to_version: _int, +) -> bytes: ... +def _get_model_ops_and_info(filename: Union[str, Path]): ... +def _get_model_ops_and_info_from_buffer(buffer: BinaryIO): ... +def _get_mobile_model_contained_types(filename: Union[str, Path]): ... +def _get_mobile_model_contained_types_from_buffer(buffer: BinaryIO): ... +def _logging_set_logger(logger: LoggerBase) -> LoggerBase: ... +def _get_graph_executor_optimize(optimize: Optional[_bool] = None) -> _bool: ... +def _set_graph_executor_optimize(optimize: _bool): ... +def _export_opnames(module: ScriptModule) -> List[str]: ... +def _create_function_from_trace( + qualname: str, + func: Callable[..., Any], + input_tuple: Tuple[Any, ...], + var_lookup_fn: Callable[[Tensor], str], + strict: _bool, + force_outplace: _bool, + argument_names: List[str], +) -> Tuple[Graph, Stack]: ... +def _create_function_from_trace_with_dict( + qualname: str, + func: Callable[..., Any], + input_dict: Dict[str, Any], + var_lookup_fn: Callable[[Tensor], str], + strict: _bool, + force_outplace: _bool, + argument_names: List[str], +) -> Tuple[Graph, Stack]: ... +def _jit_is_script_object(obj: Any) -> _bool: ... +def _last_executed_optimized_graph() -> Graph: ... +def parse_type_comment(comment: str) -> Decl: ... +def _get_upgraders_map_size() -> _int: ... +def _get_upgraders_entry_map() -> Dict[str, str]: ... +def _dump_upgraders_map() -> Dict[str, str]: ... +def _test_only_populate_upgraders(content: Dict[str, str]) -> None: ... +def _test_only_remove_upgraders(content: Dict[str, str]) -> None: ... +def merge_type_from_type_comment( + decl: Decl, + type_annotation_decl: Decl, + is_method: _bool, +) -> Decl: ... +def parse_ir(input: str, parse_tensor_constants: _bool = False) -> Graph: ... +def parse_schema(schema: str) -> FunctionSchema: ... +def get_device(input: Tensor) -> _int: ... +def _resolve_type_from_object( + obj: Any, + range: SourceRange, + rcb: ResolutionCallback, +) -> JitType: ... +def _create_module_with_type(ty: JitType) -> ScriptModule: ... +def _create_object_with_type(ty: ClassType) -> ScriptObject: ... +def _run_emit_module_hook(m: ScriptModule): ... +def _replace_overloaded_method_decl( + overload_decl: Decl, + implementation_def: Def, + new_name: str, +) -> Def: ... +def _jit_pass_lower_all_tuples(graph: Graph) -> None: ... +def _jit_pass_onnx_set_dynamic_input_shape( + graph: Graph, + dynamic_axes: Dict[str, Dict[_int, str]], + input_names: List[str], +) -> None: ... +def _jit_pass_onnx_graph_shape_type_inference( + graph: Graph, + params_dict: Dict[str, IValue], + opset_version: _int, +) -> None: ... +def _jit_pass_onnx_assign_output_shape( + graph: Graph, + tensors: List[Tensor], + desc: IODescriptor, + onnx_shape_inference: _bool, + is_script: _bool, + opset_version: _int, +) -> None: ... +def _jit_pass_onnx_remove_inplace_ops_for_onnx( + graph: Graph, + module: Optional[ScriptModule] = None, +) -> None: ... +def _jit_pass_remove_inplace_ops(graph: Graph) -> None: ... +def _jit_pass_canonicalize_graph_fuser_ops(graph: Graph) -> None: ... +def _jit_pass_peephole( + graph: Graph, + disable_shape_peepholes: _bool = False, +) -> None: ... +def _jit_pass_onnx_autograd_function_process(graph: Graph) -> None: ... +def _jit_pass_fuse_addmm(graph: Graph) -> None: ... +def _jit_pass_onnx_preprocess(graph: Graph) -> None: ... +def _jit_pass_prepare_division_for_onnx(graph: Graph) -> None: ... +def _jit_pass_onnx_remove_print(graph: Graph) -> None: ... +def _jit_pass_onnx_preprocess_caffe2(graph: Graph) -> None: ... +def _jit_pass_onnx_unpack_quantized_weights( + graph: Graph, + paramsDict: Dict[str, IValue], + caffe2: _bool, +) -> Dict[str, IValue]: ... +def _jit_pass_onnx_quantization_insert_permutes( + graph: Graph, + paramsDict: Dict[str, IValue], +) -> Dict[str, IValue]: ... +def _jit_pass_custom_pattern_based_rewrite_graph( + pattern: str, + fused_node_name: str, + graph: Graph, +) -> None: ... +def _jit_onnx_list_model_parameters( + module: ScriptModule, +) -> Tuple[ScriptModule, List[IValue]]: ... +def _jit_pass_erase_number_types(graph: Graph) -> None: ... +def _jit_pass_onnx_lint(graph: Graph) -> None: ... +def _jit_pass_onnx( + graph: Graph, + _jit_pass_onnx: _onnx.OperatorExportTypes, +) -> Graph: ... +def _jit_pass_onnx_scalar_type_analysis( + graph: Graph, + lowprecision_cast: _bool, + opset_version: _int, +) -> None: ... +def _jit_pass_onnx_peephole( + graph: Graph, + opset_version: _int, + fixed_batch_size: _bool, +) -> None: ... +def _jit_pass_dce_allow_deleting_nodes_with_side_effects(graph: Graph) -> None: ... +def _jit_pass_onnx_function_substitution(graph: Graph) -> None: ... +def _jit_pass_onnx_function_extraction( + graph: Graph, + module_names: Set[str], + param_names: List[str], +) -> Dict[Node, Dict[str, str]]: ... +def _jit_pass_onnx_clear_scope_records() -> None: ... +def _jit_pass_onnx_track_scope_attributes( + graph: Graph, + onnx_attrs: Dict[str, Any], +) -> None: ... +def _jit_is_onnx_log_enabled() -> _bool: ... +def _jit_set_onnx_log_enabled(enabled: _bool) -> None: ... +def _jit_set_onnx_log_output_stream(stream_name: str) -> None: ... +def _jit_onnx_log(*args: Any) -> None: ... +def _jit_pass_lower_graph(graph: Graph, m: Module) -> Tuple[Graph, List[IValue]]: ... +def _jit_pass_inline_fork_wait(graph: Graph) -> None: ... +def _jit_pass_onnx_deduplicate_initializers( + graph: Graph, + params_dict: Dict[str, IValue], + is_train: _bool, +) -> Dict[str, IValue]: ... +def _jit_pass_onnx_eval_peephole( + graph: Graph, + paramsDict: Dict[str, IValue], +) -> Dict[str, IValue]: ... +def _jit_pass_onnx_constant_fold( + graph: Graph, + paramsDict: Dict[str, IValue], + opset_version: _int, +) -> Dict[str, IValue]: ... +def _jit_pass_onnx_eliminate_unused_items( + graph: Graph, + paramsDict: Dict[str, IValue], +) -> Dict[str, IValue]: ... +def _jit_pass_onnx_cast_all_constant_to_floating(graph: Graph) -> None: ... +def _jit_pass_filter_non_tensor_arguments( + params: Dict[str, IValue], +) -> Dict[str, Tensor]: ... +def _jit_decay_packed_param_input_types(graph: Graph) -> None: ... +def _jit_pass_onnx_node_shape_type_inference( + n: Node, + paramsDict: Dict[str, IValue], + opset_version: _int, +) -> None: ... +def _jit_onnx_convert_pattern_from_subblock( + block: Block, + n: Node, + env: Dict[Value, Value], +) -> List[Value]: ... +def _jit_pass_onnx_block( + old_block: Block, + new_block: Block, + operator_export_type: _onnx.OperatorExportTypes, + env: Dict[Value, Value], + is_sub_block: _bool, +) -> Dict[Value, Value]: ... +def _jit_pass_onnx_assign_scoped_names_for_node_and_value(graph: Graph) -> None: ... +def _jit_pass_fixup_onnx_controlflow_node( + n: Node, + opset_version: _int, +) -> List[Value]: ... +def _jit_onnx_create_full_scope_name(class_name: str, variable_name: str) -> str: ... +def _compile_graph_to_code_table(name: str, graph: Graph) -> IValue: ... +def _generate_upgraders_graph() -> Dict[str, Graph]: ... +def _calculate_package_version_based_on_upgraders(val: _bool): ... +def _get_version_calculator_flag() -> _bool: ... +def _jit_script_interface_compile( + name: str, + class_def: ClassDef, + rcb: ResolutionCallback, + is_module: _bool, +): ... +def _jit_script_compile_overload( + qualname: str, + overload_decl: Decl, + implementation_def: Def, + rcb: ResolutionCallback, + implementation_defaults: Dict[str, Any], + signature: Any, +): ... +def _jit_script_compile( + qual_name: str, + definition: Def, + rcb: ResolutionCallback, + defaults: Dict[str, Any], +): ... +def _jit_script_class_compile( + qual_name: str, + definition: ClassDef, + defaults: Dict[str, Dict[str, Any]], + rcb: ResolutionCallback, +): ... +def _parse_source_def(src: str) -> Def: ... +def import_ir_module( + cu: CompilationUnit, + filename: Union[str, Path], + map_location: Optional[DeviceLikeType], + extra_files: Dict[str, Any], +) -> ScriptModule: ... +def import_ir_module_from_buffer( + cu: CompilationUnit, + buffer: BinaryIO, + map_location: Optional[DeviceLikeType], + extra_files: Dict[str, Any], +) -> ScriptModule: ... +def _import_ir_module_from_package( + cu: CompilationUnit, + reader: PyTorchFileReader, + storage_context: DeserializationStorageContext, + map_location: Optional[DeviceLikeType], + ts_id: str, +) -> ScriptModule: ... +def _assign_output_shapes(graph: Graph, inputs: List[Tensor]) -> Graph: ... +def _check_onnx_proto(proto: str) -> None: ... +def _propagate_and_assign_input_shapes( + graph: Graph, + inputs: Tuple[Tensor, ...], + param_count_list: List[_int], + with_grad: _bool, + propagate: _bool, +) -> Graph: ... + +# Defined in torch/csrc/jit/runtime/graph_executor.h +class GraphExecutorState: ... + +# Defined in torch/torch/csrc/jit/ir/alias_analysis.h +class AliasDb: + def __str__(self) -> str: ... + +class _InsertPoint: + def __enter__(self) -> None: ... + def __exit__(self, *args) -> None: ... + +# Defined in torch/csrc/jit/ir/ir.h +class Use: + @property + def user(self) -> Node: ... + @property + def offset(self) -> _int: ... + def isAfter(self, other: Use) -> _bool: ... + +# Defined in torch/csrc/jit/ir/ir.h +class Value: + def type(self) -> JitType: ... + def setType(self, t: JitType) -> Value: ... + def setTypeAs(self, other: Value) -> Value: ... + def inferTypeFrom(self, t: Tensor) -> None: ... + def debugName(self) -> str: ... + def setDebugName(self, name: str) -> None: ... + def unique(self) -> _int: ... + def offset(self) -> _int: ... + def node(self) -> Node: ... + def uses(self) -> List[Use]: ... + def replaceAllUsesWith(self, val: Value) -> None: ... + def replaceAllUsesAfterNodeWith(self, node: Node, val: Value) -> None: ... + def requires_grad(self) -> _bool: ... + def requiresGrad(self) -> _bool: ... + def copyMetadata(self, other: Value) -> Value: ... + def isCompleteTensor(self) -> _bool: ... + def toIValue(self) -> IValue: ... + +# Defined in torch/csrc/jit/ir/ir.h +class Block: + def inputs(self) -> Iterator[Value]: ... + def outputs(self) -> Iterator[Value]: ... + def nodes(self) -> Iterator[Node]: ... + def paramNode(self) -> Node: ... + def returnNode(self) -> Node: ... + def owningNode(self) -> Node: ... + def registerOutput(self, n: Value) -> _int: ... + def addNode(self, name: str, inputs: Sequence[Value]) -> Node: ... + +# Defined in torch/csrc/jit/ir/ir.h +class Node: + def __getitem__(self, key: str) -> Any: ... + def schema(self) -> str: ... + def input(self) -> Value: ... + def inputs(self) -> Iterator[Value]: ... + def inputsAt(self, idx: _int) -> Value: ... + def inputsSize(self) -> _int: ... + def output(self) -> Value: ... + def outputs(self) -> Iterator[Value]: ... + def outputsAt(self, idx: _int) -> Value: ... + def outputsSize(self) -> _int: ... + def hasMultipleOutputs(self) -> _bool: ... + def blocks(self) -> List[Block]: ... + def addBlock(self) -> Block: ... + def mustBeNone(self) -> _bool: ... + def matches(self, pattern: str) -> _bool: ... + def kind(self) -> str: ... + def kindOf(self, name: str) -> str: ... + def addInput(self, name: str) -> Value: ... + def replaceInput(self, i: _int, newValue: Value) -> Value: ... + def replaceInputWith(self, from_: Value, to: Value) -> None: ... + def replaceAllUsesWith(self, n: Node) -> None: ... + def insertBefore(self, n: Node) -> Node: ... + def insertAfter(self, n: Node) -> Node: ... + def isBefore(self, n: Node) -> _bool: ... + def isAfter(self, n: Node) -> _bool: ... + def moveBefore(self, n: Node) -> None: ... + def moveAfter(self, n: Node) -> None: ... + def removeInput(self, i: _int) -> None: ... + def removeAllInputs(self, i: _int) -> None: ... + def hasUses(self) -> _bool: ... + def eraseOutput(self, i: _int) -> None: ... + def addOutput(self) -> Value: ... + def scopeName(self) -> str: ... + def isNondeterministic(self) -> _bool: ... + def copyAttributes(self, rhs: Node) -> Node: ... + def copyMetadata(self, rhs: Node) -> Node: ... + def hasAttributes(self) -> _bool: ... + def hasAttribute(self, name: str) -> _bool: ... + def removeAttribute(self, attr: str) -> Node: ... + def namedInput(self, name: str) -> Value: ... + def sourceRange(self) -> SourceRange: ... + def owningBlock(self) -> Block: ... + def findNode(self, kind: str, recurse: _bool = True) -> Node: ... + def findAllNodes(self, kind: str, recurse: _bool = True) -> List[Node]: ... + def getModuleHierarchy(self) -> str: ... + def prev(self) -> Node: ... + def destroy(self) -> None: ... + def attributeNames(self) -> List[str]: ... + + # Accessors for attributes as types. + def f(self, name: str) -> _float: ... + def f_(self, name: str, val: _float) -> Node: ... + def fs(self, name: str) -> List[_float]: ... + def fs_(self, name: str, val: List[_float]) -> Node: ... + def c(self, name: str) -> complex: ... + def c_(self, name: str, val: complex) -> Node: ... + def s(self, name: str) -> str: ... + def s_(self, name: str, val: str) -> Node: ... + def ss(self, name: str) -> List[str]: ... + def ss_(self, name: str, val: List[str]) -> Node: ... + def i(self, name: str) -> _int: ... + def i_(self, name: str, val: _int) -> Node: ... + # Cannot define "is" like this because it's a reserved keyword in python. + # def is(self, name: str) -> List[_int]: ... + # def is_(self, name: str, val: List[_int]) -> Node: ... + def g(self, name: str) -> Graph: ... + def g_(self, name: str, val: Graph) -> Node: ... + def gs(self, name: str) -> List[Graph]: ... + def gs_(self, name: str, val: List[Graph]) -> Node: ... + def ival(self, name: str) -> IValue: ... + def ival_(self, name: str, val: IValue) -> Node: ... + def t(self, name: str) -> Tensor: ... + def t_(self, name: str, val: Tensor) -> Node: ... + def ts(self, name: str) -> List[Tensor]: ... + def ts_(self, name: str, val: List[Tensor]) -> Node: ... + def ty(self, name: str) -> JitType: ... + def ty_(self, name: str, val: JitType) -> Node: ... + def tys(self, name: str) -> List[JitType]: ... + def tys_(self, name: str, val: List[JitType]) -> Node: ... + +# Defined in torch/torch/csrc/jit/ir/ir.h +class Graph: + def inputs(self) -> Iterator[Value]: ... + def outputs(self) -> Iterator[Value]: ... + def nodes(self) -> Iterator[Node]: ... + def param_node(self) -> Node: ... + def return_node(self) -> Node: ... + def addInput(self, name: str = "") -> Value: ... + def eraseInput(self, i: _int) -> None: ... + def registerOutput(self, n: Value) -> _int: ... + def eraseOutput(self, i: _int) -> None: ... + def create(self, name: str, args, num_outputs: _int) -> Node: ... + def appendNode(self, n: Node) -> Node: ... + def prependNode(self, n: Node) -> Node: ... + def insertNode(self, n: Node) -> Node: ... + def block(self) -> Block: ... + def lint(self) -> None: ... + def alias_db(self) -> AliasDb: ... + def setInsertPoint(self, n: Union[Block, Node]) -> None: ... + def insert_point_guard(self, n: Union[Block, Node]) -> _InsertPoint: ... + def insertPoint(self) -> Node: ... + def insertGraph(self, callee: Graph, inputs: List[Value]) -> List[Value]: ... + def makeMultiOutputIntoTuple(self) -> None: ... + def copy(self) -> Graph: ... + +# Defined in torch/aten/src/ATen/core/alias_info.h +class AliasInfo: + is_write: _bool + before_set: Set[str] + after_set: Set[str] + +# Defined in torch/aten/src/ATen/core/function_schema.h +class Argument: + name: str + type: JitType + default_value: Optional[Any] + def has_default_value(self) -> _bool: ... + kwarg_only: _bool + is_out: _bool + alias_info: Optional[AliasInfo] + +class FunctionSchema: + arguments: List[Argument] + returns: List[Argument] + name: str + overload_name: str + +class _UpgraderEntry: + bumped_at_version: _int + upgrader_name: str + old_schema: str + def __init__( + self, + bumped_at_version: _int, + upgrader_name: str, + old_schema: str, + ) -> None: ... + +class _UpgraderRange: + min_version: _int + max_version: _int + +def _get_max_operator_version() -> _int: ... +def _get_operator_version_map() -> Dict[str, List[_UpgraderEntry]]: ... +def _get_upgrader_ranges(name: str) -> List[_UpgraderRange]: ... +def _test_only_add_entry_to_op_version(op_name: str, entry: _UpgraderEntry) -> None: ... +def _test_only_remove_entry_to_op_version(op_name: str) -> None: ... + +# Defined in torch/csrc/jit/python/script_init.cpp +class ScriptModuleSerializer: + def __init__(self, export_writer: PyTorchFileWriter) -> None: ... + def serialize(self, model: ScriptModule, script_module_id: _int) -> None: ... + def write_files(self) -> None: ... + def storage_context(self) -> SerializationStorageContext: ... + +# Defined in torch/csrc/jit/python/script_init.cpp +class SerializationStorageContext: + def __init__(self) -> None: ... + def has_storage(self, storage: Storage) -> _bool: ... + def get_or_add_storage(self, storage: Storage) -> _int: ... + +# Defined in torch/csrc/jit/python/script_init.cpp +class DeserializationStorageContext: + def __init__(self) -> None: ... + def get_storage(self, name: str, dtype: _dtype) -> Tensor: ... + def has_storage(self, name: str) -> _bool: ... + def add_storage(self, name: str, tensor: Tensor) -> _int: ... + +# Defined in torch/csrc/jit/python/script_init.cpp +class ConcreteModuleTypeBuilder: + def __init__(self, obj: Any) -> None: ... + def set_module_dict(self): ... + def set_module_list(self): ... + def set_parameter_list(self): ... + def set_parameter_dict(self): ... + def add_attribute( + self, + name: str, + ty: JitType, + is_param: _bool, + is_buffer: _bool, + ): ... + def add_module(self, name: str, meta: ConcreteModuleType): ... + def add_constant(self, name: str, value: Any): ... + def add_overload(self, method_name: str, overloaded_method_names: List[str]): ... + def add_builtin_function(self, name: str, symbol_name: str): ... + def add_failed_attribute(self, name: str, failure_reason: str): ... + def add_function_attribute( + self, + name: str, + ty: JitType, + func: Callable[..., Any], + ): ... + def add_ignored_attribute(self, name: str): ... + def add_ignored_attributes(self, names: List[str]): ... + def add_forward_hook(self, hook: Callable[..., Any]): ... + def add_forward_pre_hook(self, pre_hook: Callable[..., Any]): ... + +class ConcreteModuleType: + def get_constants(self) -> Dict[str, Any]: ... + def equals(self, other: ConcreteModuleType) -> _bool: ... + @staticmethod + def from_jit_type(ty: JitType) -> ConcreteModuleType: ... + +class CallStack: + def __init__(self, name: str, range: SourceRange): ... + +class ErrorReport: + def __init__(self, range: SourceRange) -> None: ... + def what(self) -> str: ... + @staticmethod + def call_stack() -> str: ... + +class CompilationUnit: + def __init__(self, lang: str = ..., _frames_up: _int = ...) -> None: ... + def find_function(self, name: str) -> ScriptFunction: ... + def __getattr__(self, name: str) -> ScriptFunction: ... + def define( + self, + script: str, + rcb: ResolutionCallback = ..., + _frames_up: _int = ..., + ): ... + def get_interface(self, name: str) -> InterfaceType: ... + def get_functions(self) -> List[ScriptFunction]: ... + def create_function( + self, + name: str, + graph: Graph, + shouldMangle: _bool = ..., + ) -> ScriptFunction: ... + def get_class(self, name: str) -> ClassType: ... + +class ScriptObject: + def setattr(self, name: str, value: Any): ... + +class ScriptModule(ScriptObject): + def _method_names(self) -> List[str]: ... + def _get_method(self, name: str) -> ScriptMethod: ... + +class LiteScriptModule: + def __call__(self, *input): ... + def find_method(self, method_name: str): ... + def forward(self, *input) -> List[str]: ... + def run_method(self, method_name: str, *input): ... + +# NOTE: switch to collections.abc.Callable in python 3.9 +class ScriptFunction(Generic[P, ReturnVal]): + def __call__(self, *args: P.args, **kwargs: P.kwargs) -> ReturnVal: ... + def save(self, filename: str, _extra_files: Dict[str, bytes]) -> None: ... + def save_to_buffer(self, _extra_files: Dict[str, bytes]) -> bytes: ... + @property + def graph(self) -> Graph: ... + def inlined_graph(self) -> Graph: ... + def schema(self) -> FunctionSchema: ... + def code(self) -> str: ... + def name(self) -> str: ... + @property + def qualified_name(self) -> str: ... + +# NOTE: switch to collections.abc.Callable in python 3.9 +class ScriptMethod(Generic[P, ReturnVal]): + graph: Graph + def __call__(self, *args: P.args, **kwargs: P.kwargs) -> ReturnVal: ... + @property + def owner(self) -> ScriptModule: ... + @property + def name(self) -> str: ... + +class ScriptDict(Generic[K, T]): + def __init__(self, dict: Dict[K, T]) -> None: ... + def __len__(self) -> _int: ... + def __contains__(self, key: K) -> _bool: ... + def __getitem__(self, key: K) -> T: ... + def __setitem__(self, key: K, value: T) -> None: ... + def __delitem__(self, key: K) -> None: ... + def __iter__(self) -> Iterator[K]: ... + def items(self) -> Iterator[tuple[K, T]]: ... + def keys(self) -> Iterator[K]: ... + +class ScriptList(Generic[T]): + def __init__(self, list: List[T]) -> None: ... + def __len__(self) -> _int: ... + def __contains__(self, item: T) -> _bool: ... + @overload + def __getitem__(self, idx: _int) -> T: ... + @overload + def __getitem__(self, idx: slice) -> ScriptList[T]: ... + @overload + def __setitem__(self, idx: _int, value: T) -> None: ... + @overload + def __setitem__(self, idx: slice, value: List[T]) -> None: ... + def __delitem__(self, idx: _int) -> None: ... + def __iter__(self) -> Iterator[T]: ... + def count(self, value: T) -> _int: ... + def remove(self, value: T) -> None: ... + def append(self, value: T) -> None: ... + def clear(self) -> None: ... + @overload + def extend(self, values: List[T]) -> None: ... + @overload + def extend(self, values: Iterable[T]) -> None: ... + @overload + def pop(self) -> T: ... + @overload + def pop(self, idx: _int) -> T: ... + +class ModuleDict: + def __init__(self, mod: ScriptModule) -> None: ... + def items(self) -> List[Tuple[str, Any]]: ... + +class ParameterDict: + def __init__(self, mod: ScriptModule) -> None: ... + +class BufferDict: + def __init__(self, mod: ScriptModule) -> None: ... + +# Defined in torch/csrc/jit/api/module.h +class Module: ... + +# Defined in torch/csrc/Module.cpp +def _initExtension(shm_manager_path: str) -> None: ... # THPModule_initExtension +def _autograd_init() -> _bool: ... # THPAutograd_initExtension +def _add_docstr(obj: T, doc_obj: str) -> T: ... # THPModule_addDocStr +def _init_names(arg: Sequence[Type]) -> None: ... # THPModule_initNames +def _has_distributed() -> _bool: ... # THPModule_hasDistributed +def _set_default_tensor_type(type) -> None: ... # THPModule_setDefaultTensorType +def _set_default_dtype(d: _dtype) -> None: ... # THPModule_setDefaultDtype +def _infer_size(arg1: Size, arg2: Size) -> Size: ... # THPModule_inferSize +def _crash_if_csrc_asan() -> _int: ... # THPModule_crashIfCsrcASAN +def _crash_if_csrc_ubsan() -> _int: ... # THPModule_crashIfCsrcUBSAN +def _crash_if_aten_asan() -> _int: ... # THPModule_crashIfATenASAN +def _show_config() -> str: ... # THPModule_showConfig +def _cxx_flags() -> str: ... # THPModule_cxxFlags +def _parallel_info() -> str: ... # THPModule_parallelInfo +def _get_cpu_capability() -> str: ... # THPModule_getCpuCapability +def _set_backcompat_broadcast_warn( + arg: _bool, +) -> None: ... # THPModule_setBackcompatBroadcastWarn +def _get_backcompat_broadcast_warn() -> _bool: ... # THPModule_getBackcompatBroadcastWarn +def _set_backcompat_keepdim_warn( + arg: _bool, +) -> None: ... # THPModule_setBackcompatKeepdimWarn +def _get_backcompat_keepdim_warn() -> _bool: ... # THPModule_getBackcompatKeepdimWarn +def get_num_thread() -> _int: ... # THPModule_getNumThreads +def set_num_threads(nthreads: _int) -> None: ... # THPModule_setNumThreads +def get_num_interop_threads() -> _int: ... # THPModule_getNumInteropThreads +def set_num_interop_threads( + nthreads: _int, +) -> None: ... # THPModule_setNumInteropThreads +def _get_cudnn_enabled() -> _bool: ... # THPModule_userEnabledCuDNN +def _set_cudnn_enabled(arg: _bool) -> None: ... # THPModule_setUserEnabledCuDNN +def _get_flash_sdp_enabled() -> _bool: ... # THPModule_userEnabledFusedSDP +def _set_sdp_use_flash(arg: _bool) -> None: ... # THPModule_setSDPUseFlash +def _get_mem_efficient_sdp_enabled() -> _bool: ... # THPModule_userEnabledMathSDP +def _set_sdp_use_mem_efficient( + arg: _bool, +) -> None: ... # THPModule_setSDPUseMemEfficient +def _get_math_sdp_enabled() -> _bool: ... # THPModule_userEnabledMathSDP +def _set_sdp_use_math(arg: _bool) -> None: ... # THPModule_setSDPUseMath +def _get_cudnn_sdp_enabled() -> _bool: ... # THPModule_userEnabledMathSDP +def _set_sdp_use_cudnn(arg: _bool) -> None: ... # THPModule_setSDPUseMath +def _get_mkldnn_enabled() -> _bool: ... # THPModule_userEnabledMkldnn +def _set_mkldnn_enabled(arg: _bool) -> None: ... # THPModule_setUserEnabledMkldnn +def _get_cudnn_benchmark() -> _bool: ... # THPModule_benchmarkCuDNN +def _set_cudnn_benchmark(arg: _bool) -> None: ... # THPModule_setBenchmarkCuDNN +def _get_cudnn_deterministic() -> _bool: ... # THPModule_deterministicCuDNN +def _set_cudnn_deterministic(arg: _bool) -> None: ... # THPModule_setDeterministicCuDNN +def _get_deterministic_algorithms() -> _bool: ... # THPModule_deterministicAlgorithms +def _get_deterministic_algorithms_warn_only() -> _bool: ... # THPModule_deterministicAlgorithmsWarnOnly +def _set_deterministic_algorithms( + mode: _bool, + *, + warn_only: _bool = ..., +) -> None: ... # THPModule_setDeterministicAlgorithms +def _get_deterministic_fill_uninitialized_memory() -> _bool: ... # THPModule_deterministicFillUninitializedMemory +def _set_deterministic_fill_uninitialized_memory(arg: _bool) -> None: ... # THPModule_setDeterministicFillUninitializedMemory +def _get_nnpack_enabled() -> _bool: ... # THPModule_userEnabledNNPACK +def _set_nnpack_enabled(arg: _bool) -> None: ... # THPModule_setUserEnabledNNPACK +def _get_warnAlways() -> _bool: ... # THPModule_warnAlways +def _set_warnAlways(arg: _bool) -> None: ... # THPModule_setWarnAlways +def _get_cudnn_allow_tf32() -> _bool: ... # THPModule_allowTF32CuDNN +def _set_cudnn_allow_tf32(arg: _bool) -> None: ... # THPModule_setAllowTF32CuDNN +def _get_cublas_allow_tf32() -> _bool: ... # THPModule_allowTF32CuBLAS +def _set_cublas_allow_tf32(arg: _bool) -> None: ... # THPModule_setAllowTF32CuBLAS +def _get_float32_matmul_precision() -> str: ... # THPModule_float32MatmulPrecision +def _set_float32_matmul_precision( + arg: str, +) -> None: ... # THPModule_setFloat32MatmulPrecision +def _get_cublas_allow_fp16_reduced_precision_reduction() -> _bool: ... # THPModule_allowFP16ReductionCuBLAS +def _set_cublas_allow_fp16_reduced_precision_reduction( + arg: _bool, +) -> None: ... # THPModule_setAllowFP16ReductionCuBLAS +def _get_cublas_allow_bf16_reduced_precision_reduction() -> _bool: ... # THPModule_allowBF16ReductionCuBLAS +def _set_cublas_allow_bf16_reduced_precision_reduction( + arg: _bool, +) -> None: ... # THPModule_setAllowBF16ReductionCuBLAS +def _set_conj(x: Tensor, conj: _bool) -> None: ... +def _set_neg(x: Tensor, neg: _bool) -> None: ... +def _set_meta_in_tls_dispatch_include(meta_in_tls: _bool) -> None: ... +def _meta_in_tls_dispatch_include() -> _bool: ... +def _stash_obj_in_tls(key: str, arg: Any) -> None: ... +def _get_obj_in_tls(key: str) -> Any: ... +def _is_key_in_tls(key: str) -> _bool: ... +def _select_conv_backend(*args, **kwargs) -> ConvBackend: ... +def _conv_determine_backend_memory_format( + input: Tensor, + weight: Tensor, + backend: ConvBackend, +) -> memory_format: ... +def _has_storage(x: Tensor) -> _bool: ... +def _construct_storage_from_data_pointer(data_ptr: _int, device: torch.device, size: _int) -> Storage: ... +def _should_allow_numbers_as_tensors(func_name: str) -> _bool: ... +def _group_tensors_by_device_and_dtype(nested_tensorlists: List[List[Optional[Tensor]]], with_indices: _bool = False) -> Dict[Tuple[torch.device, str], Tuple[List[List[Optional[Tensor]]], List[_int]]]: ... + +# NB: There is no Capsule type in typing, see +# https://code.activestate.com/lists/python-dev/139675/ +def _to_dlpack(data: Tensor) -> Any: ... # THPModule_toDLPack +def _from_dlpack(data: Any) -> Tensor: ... # THPModule_fromDLPack +def _get_cpp_backtrace( + frames_to_skip: _int, + maximum_number_of_frames: _int, +) -> str: ... # THPModule_getCppBacktrace +def set_flush_denormal(arg: _bool) -> _bool: ... # THPModule_setFlushDenormal +def get_default_dtype() -> _dtype: ... # THPModule_getDefaultDtype +def _get_default_device() -> str: ... # THPModule_getDefaultDevice +def _get_qengine() -> _int: ... # THPModule_qEngine +def _set_qengine(qengine: _int) -> None: ... # THPModule_setQEngine +def _supported_qengines() -> List[_int]: ... # THPModule_supportedQEngines +def _is_xnnpack_enabled() -> _bool: ... # THPModule_isEnabledXNNPACK +def _check_sparse_tensor_invariants() -> _bool: ... # THPModule_checkSparseTensorInvariants +def _set_check_sparse_tensor_invariants( + arg: _bool, +) -> None: ... # THPModule_setCheckSparseTensorInvariants +def _set_default_mobile_cpu_allocator() -> None: ... # THPModule_setDefaultMobileCPUAllocator +def _unset_default_mobile_cpu_allocator() -> None: ... # THPModule_unsetDefaultMobileCPUAllocator +def _is_torch_function_enabled() -> _bool: ... # THPModule_isEnabledTorchFunction +def _has_torch_function( + args: Iterable[Any], +) -> _bool: ... # THPModule_has_torch_function +def _has_torch_function_unary(Any) -> _bool: ... # THPModule_has_torch_function_unary +def _has_torch_function_variadic( + *args: Any, +) -> _bool: ... # THPModule_has_torch_function_variadic +def _vmapmode_increment_nesting() -> _int: ... # THPModule_vmapmode_increment_nesting +def _vmapmode_decrement_nesting() -> _int: ... # THPModule_vmapmode_decrement_nesting +def _log_api_usage_once(str) -> None: ... # LogAPIUsageOnceFromPython +def _log_api_usage_metadata(event: str, metadata_map: Dict[str, str]) -> None: ... # LogAPIUsageMetadataFromPython +def _demangle(str) -> str: ... # c10::demangle +def _disabled_torch_function_impl( + func: Callable, + types: Iterable[Type], + args: Tuple, + kwargs: Dict, +) -> Any: ... # THPModule_disable_torch_function +def _disabled_torch_dispatch_impl( + func: Callable, + types: Iterable[Type], + args: Tuple, + kwargs: Dict, +) -> Any: ... # THPModule_disable_dispatch_function +def _get_linalg_preferred_backend() -> torch._C._LinalgBackend: ... +def _set_linalg_preferred_backend(arg: torch._C._LinalgBackend): ... + +class _LinalgBackend: + Default: _LinalgBackend + Cusolver: _LinalgBackend + Magma: _LinalgBackend + +class ConvBackend(Enum): ... + +class Tag(Enum): + core: _int = 0 + data_dependent_output: _int = 1 + dynamic_output_shape: _int = 2 + generated: _int = 3 + inplace_view: _int = 4 + needs_fixed_stride_order: _int = 5 + nondeterministic_bitwise: _int = 6 + nondeterministic_seeded: _int = 7 + pointwise: _int = 8 + pt2_compliant_tag: _int = 9 + view_copy: _int = 10 + +# Defined in `valgrind.h` and `callgrind.h` respectively. +def _valgrind_supported_platform() -> _bool: ... # NVALGRIND +def _valgrind_toggle() -> None: ... # CALLGRIND_TOGGLE_COLLECT +def _valgrind_toggle_and_dump_stats() -> None: ... # CALLGRIND_TOGGLE_COLLECT and CALLGRIND_DUMP_STATS + +has_openmp: _bool +has_mkl: _bool +_has_mps: _bool +has_lapack: _bool +_has_cuda: _bool +_has_magma: _bool +_has_xpu: _bool +_has_mkldnn: _bool +_has_cudnn: _bool +has_spectral: _bool +_GLIBCXX_USE_CXX11_ABI: _bool +default_generator: Generator + +# Defined in torch/csrc/autograd/init.cpp +def _set_grad_enabled(enabled: _bool) -> None: ... +def is_grad_enabled() -> _bool: ... +def _set_fwd_grad_enabled(enabled: _bool) -> None: ... +def _is_fwd_grad_enabled() -> _bool: ... +def is_inference_mode_enabled() -> _bool: ... +def set_autocast_enabled(enabled: _bool) -> None: ... +def is_autocast_enabled() -> _bool: ... +def clear_autocast_cache() -> None: ... +def set_autocast_cpu_enabled(enabled: _bool) -> None: ... +def is_autocast_cpu_enabled() -> _bool: ... +def _is_any_autocast_enabled() -> _bool: ... +def set_autocast_cpu_dtype(dtype: _dtype) -> None: ... +def set_autocast_gpu_dtype(dtype: _dtype) -> None: ... +def get_autocast_cpu_dtype() -> _dtype: ... +def get_autocast_gpu_dtype() -> _dtype: ... +def autocast_increment_nesting() -> _int: ... +def autocast_decrement_nesting() -> _int: ... +def is_autocast_cache_enabled() -> _bool: ... +def set_autocast_cache_enabled(enabled: _bool) -> None: ... +def _increment_version(tensor: Tensor) -> None: ... +def set_anomaly_enabled(enabled: _bool, check_nan: _bool = True) -> None: ... +def is_anomaly_enabled() -> _bool: ... +def is_anomaly_check_nan_enabled() -> _bool: ... +def _is_multithreading_enabled() -> _bool: ... +def _set_multithreading_enabled(enabled: _bool) -> None: ... +def _set_view_replay_enabled(enabled: _bool) -> None: ... +def _is_view_replay_enabled() -> _bool: ... +def _enter_dual_level() -> _int: ... +def _exit_dual_level(level: _int) -> None: ... +def _make_dual(tensor: Tensor, tangent: Tensor, level: _int) -> Tensor: ... +def _unpack_dual(tensor: Tensor, level: _int) -> Tensor: ... +def __set_forward_AD_enabled(enabled: _bool) -> None: ... +def __is_forward_AD_enabled() -> _bool: ... +def _register_default_hooks(pack_hook: Callable, unpack_hook: Callable) -> None: ... +def _reset_default_hooks() -> None: ... +def _is_torch_function_mode_enabled() -> _bool: ... +def _set_torch_function_mode(cls: Any) -> None: ... +def _push_on_torch_function_stack(cls: Any) -> None: ... +def _pop_torch_function_stack() -> Any: ... +def _get_function_stack_at(idx: _int) -> Any: ... +def _len_torch_function_stack() -> _int: ... +def _set_torch_dispatch_mode(cls: Any) -> None: ... +def _push_on_torch_dispatch_stack(cls: Any) -> None: ... +def _pop_torch_dispatch_stack(mode_key: Optional[torch._C._TorchDispatchModeKey] = None) -> Any: ... +def _get_dispatch_mode(mode_key: Optional[torch._C._TorchDispatchModeKey]) -> Any: ... +def _unset_dispatch_mode(mode: torch._C._TorchDispatchModeKey) -> Any: ... +def _set_dispatch_mode(mode: Any) -> None: ... +def _get_dispatch_stack_at(idx: _int) -> Any: ... +def _len_torch_dispatch_stack() -> _int: ... + +class _DisableTorchDispatch: + def __init__(self): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +class _EnableTorchFunction: + def __init__(self): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +class _EnablePythonDispatcher: + def __init__(self): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +class _DisablePythonDispatcher: + def __init__(self): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +class _EnablePreDispatch: + def __init__(self): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +class _DisableFuncTorch: + def __init__(self): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +class _DisableAutocast: + def __init__(self): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +class _InferenceMode: + def __init__(self, enabled: _bool): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +def _set_autograd_fallback_mode(mode: str) -> None: ... +def _get_autograd_fallback_mode() -> str: ... + +# Defined in torch/csrc/jit/python/script_init.cpp +class LoggerBase: ... +class NoopLogger(LoggerBase): ... +class LockingLogger(LoggerBase): ... + +class AggregationType(Enum): + SUM = 0 + AVG = 1 + +class FileCheck: + def run(self, test_string: str) -> None: ... + def check(self, test_string: str) -> FileCheck: ... + def check_not(self, test_string: str) -> FileCheck: ... + def check_same(self, test_string: str) -> FileCheck: ... + def check_next(self, test_string: str) -> FileCheck: ... + def check_count( + self, + test_string: str, + count: _int, + exactly: _bool = False, + ) -> FileCheck: ... + def check_dag(self, test_string: str) -> FileCheck: ... + def check_source_highlighted(self, test_string: str) -> FileCheck: ... + def check_regex(self, test_string: str) -> FileCheck: ... + +# Defined in torch/csrc/jit/python/init.cpp +class PyTorchFileReader: + @overload + def __init__(self, name: str) -> None: ... + @overload + def __init__(self, buffer: BinaryIO) -> None: ... + def get_record(self, name: str) -> bytes: ... + def serialization_id(self) -> str: ... + +class PyTorchFileWriter: + @overload + def __init__(self, name: str) -> None: ... + @overload + def __init__(self, buffer: BinaryIO) -> None: ... + def write_record(self, name: str, data: Union[Storage, bytes, _int], size: _int) -> None: ... + def write_end_of_file(self) -> None: ... + def set_min_version(self, version: _int) -> None: ... + def get_all_written_records(self) -> List[str]: ... + def archive_name(self) -> str: ... + def serialization_id(self) -> str: ... + +def _jit_get_inline_everything_mode() -> _bool: ... +def _jit_set_inline_everything_mode(enabled: _bool) -> None: ... +def _jit_get_logging_option() -> str: ... +def _jit_set_logging_option(option: str) -> None: ... +def _jit_set_logging_stream(stream_name: str) -> None: ... +def _jit_pass_cse(Graph) -> _bool: ... +def _jit_pass_dce(Graph) -> None: ... +def _jit_pass_lint(Graph) -> None: ... + +# Defined in torch/csrc/jit/python/python_custom_class.cpp +def _get_custom_class_python_wrapper(name: str, attr: str) -> Any: ... + +# Defined in torch/csrc/Module.cpp +def _rename_privateuse1_backend(backend: str) -> None: ... +def _get_privateuse1_backend_name() -> str: ... + +# Defined in torch/csrc/Generator.cpp +class Generator: + device: _device + def __init__(self, device: Optional[DeviceLikeType] = None) -> None: ... + def get_state(self) -> Tensor: ... + def set_state(self, _new_state: Tensor) -> Generator: ... + def set_offset(self, offset: _int) -> Generator: ... + def get_offset(self) -> _int: ... + def manual_seed(self, seed: _int) -> Generator: ... + def seed(self) -> _int: ... + def initial_seed(self) -> _int: ... + +# Defined in torch/csrc/utils/python_dispatch.cpp + +class _DispatchOperatorHandle: + def schema(self) -> FunctionSchema: ... + def debug(self) -> str: ... + +class _DispatchModule: + def def_(self, schema: str, alias: str = "") -> _DispatchModule: ... + def def_legacy(self, schema: str) -> _DispatchModule: ... + def def_name_t_t( + self, + name: str, + dispatch: str, + debug: str = "default_def_name_t_t", + ) -> _DispatchModule: ... + def def_schema_t_t( + self, + schema: str, + dispatch: str, + alias: str, + debug: str = "default_def_schema_t_t", + ) -> _DispatchModule: ... + def impl_t_t( + self, + name: str, + dispatch: str, + debug: str = "impl_t_t", + ) -> _DispatchModule: ... + def impl(self, name: str, dispatch: str, func: Callable) -> _DispatchModule: ... + def define(self, schema: str, alias: str = "") -> _DispatchModule: ... + def fallback_fallthrough(self, dispatch: str = "") -> _DispatchModule: ... + +def _dispatch_library( + kind: str, + name: str, + dispatch: str, + file: str = "", + linenum: Any = 0, +) -> _DispatchModule: ... +def _dispatch_dump(name: str) -> str: ... +def _dispatch_dump_table(name: str) -> str: ... +def _dispatch_check_invariants(name: str) -> None: ... +def _dispatch_check_all_invariants() -> None: ... +def _dispatch_call_boxed(handle: _DispatchOperatorHandle, *args, **kwargs) -> Any: ... +def _dispatch_find_schema_or_throw(name: str, overload_name: str) -> _DispatchOperatorHandle: ... +def _dispatch_set_report_error_callback(handle: _DispatchOperatorHandle, callback: Callable) -> None: ... +def _dispatch_has_kernel(name: str) -> _bool: ... +def _dispatch_has_kernel_for_dispatch_key( + name: str, + dispatch: _dispatchkey, +) -> _bool: ... +def _dispatch_has_kernel_for_any_dispatch_key( + name: str, + dispatch_key_set: DispatchKeySet, +) -> _bool: ... +def _dispatch_has_computed_kernel_for_dispatch_key( + name: str, + dispatch: _dispatchkey, +) -> _bool: ... +def _dispatch_find_dangling_impls() -> List[str]: ... +def _dispatch_get_all_op_names() -> List[str]: ... +def _dispatch_tls_set_dispatch_key_excluded( + dispatch: _dispatchkey, + val: _bool, +) -> None: ... +def _dispatch_tls_is_dispatch_key_excluded(dispatch: _dispatchkey) -> _bool: ... +def _dispatch_tls_set_dispatch_key_included( + dispatch: _dispatchkey, + val: _bool, +) -> None: ... +def _dispatch_tls_is_dispatch_key_included(dispatch: _dispatchkey) -> _bool: ... +def _dispatch_isTensorSubclassLike(tensor: Tensor) -> _bool: ... +def _dispatch_key_name(dispatch: _dispatchkey) -> str: ... +def _dispatch_key_for_device(device_type: str) -> str: ... +def _parse_dispatch_key(key: str) -> Optional[DispatchKey]: ... +def _dispatch_key_parse(dispatch: _dispatchkey) -> DispatchKey: ... +def _dispatch_num_backends() -> _int: ... +def _dispatch_pystub(name: str, overload: str) -> Optional[Tuple[str, str]]: ... +def _dispatch_is_alias_key(dispatch: _dispatchkey) -> _bool: ... +def _functionality_to_backend_keys(dispatch: _dispatchkey) -> List[DispatchKey]: ... +def _functionalization_reapply_views_tls() -> _bool: ... + +class DispatchKey(Enum): + Undefined: DispatchKey = ... + FPGA: DispatchKey = ... + ORT: DispatchKey = ... + Vulkan: DispatchKey = ... + Metal: DispatchKey = ... + MKLDNN: DispatchKey = ... + OpenGL: DispatchKey = ... + OpenCL: DispatchKey = ... + IDEEP: DispatchKey = ... + CustomRNGKeyId: DispatchKey = ... + MkldnnCPU: DispatchKey = ... + Sparse: DispatchKey = ... + SparseCsr: DispatchKey = ... + NestedTensor: DispatchKey = ... + Dense: DispatchKey = ... + PreDispatch: DispatchKey = ... + Python: DispatchKey = ... + FuncTorchDynamicLayerBackMode: DispatchKey = ... + ZeroTensor: DispatchKey = ... + Conjugate: DispatchKey = ... + Negative: DispatchKey = ... + BackendSelect: DispatchKey = ... + Named: DispatchKey = ... + AutogradOther: DispatchKey = ... + AutogradFunctionality: DispatchKey = ... + AutogradNestedTensor: DispatchKey = ... + Tracer: DispatchKey = ... + Autocast: DispatchKey = ... + Batched: DispatchKey = ... + VmapMode: DispatchKey = ... + FuncTorchGradWrapper: DispatchKey = ... + FuncTorchBatched: DispatchKey = ... + BatchedNestedTensor: DispatchKey = ... + FuncTorchVmapMode: DispatchKey = ... + FuncTorchDynamicLayerFrontMode: DispatchKey = ... + Functionalize: DispatchKey = ... + TESTING_ONLY_GenericWrapper: DispatchKey = ... + TESTING_ONLY_GenericMode: DispatchKey = ... + ADInplaceOrView: DispatchKey = ... + Autograd: DispatchKey = ... + CompositeImplicitAutograd: DispatchKey = ... + CompositeImplicitAutogradNestedTensor: DispatchKey = ... + CompositeExplicitAutograd: DispatchKey = ... + CompositeExplicitAutogradNonFunctional: DispatchKey = ... + FuncTorchBatchedDecomposition: DispatchKey = ... + CPU: DispatchKey = ... + CUDA: DispatchKey = ... + HIP: DispatchKey = ... + XLA: DispatchKey = ... + MTIA: DispatchKey = ... + MPS: DispatchKey = ... + IPU: DispatchKey = ... + XPU: DispatchKey = ... + HPU: DispatchKey = ... + VE: DispatchKey = ... + Lazy: DispatchKey = ... + Meta: DispatchKey = ... + PrivateUse1: DispatchKey = ... + PrivateUse2: DispatchKey = ... + PrivateUse3: DispatchKey = ... + QuantizedCPU: DispatchKey = ... + QuantizedCUDA: DispatchKey = ... + QuantizedHIP: DispatchKey = ... + QuantizedXLA: DispatchKey = ... + QuantizedMTIA: DispatchKey = ... + QuantizedMPS: DispatchKey = ... + QuantizedIPU: DispatchKey = ... + QuantizedXPU: DispatchKey = ... + QuantizedHPU: DispatchKey = ... + QuantizedVE: DispatchKey = ... + QuantizedLazy: DispatchKey = ... + QuantizedMeta: DispatchKey = ... + QuantizedPrivateUse1: DispatchKey = ... + QuantizedPrivateUse2: DispatchKey = ... + QuantizedPrivateUse3: DispatchKey = ... + SparseCPU: DispatchKey = ... + SparseCUDA: DispatchKey = ... + SparseHIP: DispatchKey = ... + SparseXLA: DispatchKey = ... + SparseMTIA: DispatchKey = ... + SparseMPS: DispatchKey = ... + SparseIPU: DispatchKey = ... + SparseXPU: DispatchKey = ... + SparseHPU: DispatchKey = ... + SparseVE: DispatchKey = ... + SparseLazy: DispatchKey = ... + SparseMeta: DispatchKey = ... + SparsePrivateUse1: DispatchKey = ... + SparsePrivateUse2: DispatchKey = ... + SparsePrivateUse3: DispatchKey = ... + SparseCsrCPU: DispatchKey = ... + SparseCsrCUDA: DispatchKey = ... + SparseCsrHIP: DispatchKey = ... + SparseCsrXLA: DispatchKey = ... + SparseCsrMTIA: DispatchKey = ... + SparseCsrMPS: DispatchKey = ... + SparseCsrIPU: DispatchKey = ... + SparseCsrXPU: DispatchKey = ... + SparseCsrHPU: DispatchKey = ... + SparseCsrVE: DispatchKey = ... + SparseCsrLazy: DispatchKey = ... + SparseCsrMeta: DispatchKey = ... + SparseCsrPrivateUse1: DispatchKey = ... + SparseCsrPrivateUse2: DispatchKey = ... + SparseCsrPrivateUse3: DispatchKey = ... + NestedTensorCPU: DispatchKey = ... + NestedTensorCUDA: DispatchKey = ... + NestedTensorHIP: DispatchKey = ... + NestedTensorXLA: DispatchKey = ... + NestedTensorMTIA: DispatchKey = ... + NestedTensorMPS: DispatchKey = ... + NestedTensorIPU: DispatchKey = ... + NestedTensorXPU: DispatchKey = ... + NestedTensorHPU: DispatchKey = ... + NestedTensorVE: DispatchKey = ... + NestedTensorLazy: DispatchKey = ... + NestedTensorMeta: DispatchKey = ... + NestedTensorPrivateUse1: DispatchKey = ... + NestedTensorPrivateUse2: DispatchKey = ... + NestedTensorPrivateUse3: DispatchKey = ... + AutogradCPU: DispatchKey = ... + AutogradCUDA: DispatchKey = ... + AutogradHIP: DispatchKey = ... + AutogradXLA: DispatchKey = ... + AutogradMTIA: DispatchKey = ... + AutogradMPS: DispatchKey = ... + AutogradIPU: DispatchKey = ... + AutogradXPU: DispatchKey = ... + AutogradHPU: DispatchKey = ... + AutogradVE: DispatchKey = ... + AutogradLazy: DispatchKey = ... + AutogradMeta: DispatchKey = ... + AutogradPrivateUse1: DispatchKey = ... + AutogradPrivateUse2: DispatchKey = ... + AutogradPrivateUse3: DispatchKey = ... + +class DispatchKeySet: + def __init__(self, key: DispatchKey) -> None: ... + def __or__(self, other: DispatchKeySet) -> DispatchKeySet: ... + def __sub__(self, other: DispatchKeySet) -> DispatchKeySet: ... + def __and__(self, other: DispatchKeySet) -> DispatchKeySet: ... + def highestPriorityTypeId(self) -> DispatchKey: ... + def has(self, k: _dispatchkey) -> _bool: ... + def add(self, k: _dispatchkey) -> DispatchKeySet: ... + def remove(self, k: _dispatchkey) -> DispatchKeySet: ... + def __repr__(self) -> str: ... + +_dispatch_autogradother_backends: DispatchKeySet +_additional_keys_to_prop_for_wrapper_tensors: DispatchKeySet + +def _dispatch_has_backend_fallback(dispatch: _dispatchkey) -> _bool: ... +def _dispatch_keyset_full_after(t: _dispatchkey) -> DispatchKeySet: ... +def _dispatch_keyset_full() -> DispatchKeySet: ... +def _dispatch_keyset_to_string(keyset: DispatchKeySet) -> str: ... +def _dispatch_get_backend_keyset_from_autograd( + dispatch: _dispatchkey, +) -> DispatchKeySet: ... +def _dispatch_keys(tensor: Tensor) -> DispatchKeySet: ... +def _dispatch_tls_local_exclude_set() -> DispatchKeySet: ... +def _dispatch_tls_local_include_set() -> DispatchKeySet: ... +def _dispatch_is_included_in_alias( + dispatch_a: _dispatchkey, + dispatch_b: _dispatchkey, +) -> _bool: ... +def _propagate_xla_data(a: Tensor, b: Tensor) -> None: ... +def _replace_(a: Tensor, b: Tensor) -> None: ... +def _commit_update(a: Tensor) -> None: ... + +class _ExcludeDispatchKeyGuard: + def __init__(self, keyset: DispatchKeySet): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +class _IncludeDispatchKeyGuard: + def __init__(self, k: DispatchKey): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +class _ForceDispatchKeyGuard: + def __init__(self, include: DispatchKeySet, exclude: DispatchKeySet): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +class _AutoDispatchBelowAutograd: + def __init__(self): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +def _dispatch_print_registrations_for_dispatch_key(dispatch_key: str = "") -> None: ... +def _dispatch_get_registrations_for_dispatch_key( + dispatch_key: str = "", +) -> List[str]: ... +def _are_functorch_transforms_active() -> _bool: ... + +# Define in torch/csrc/autograd/init.cpp +def _set_python_dispatcher(dispatcher: object) -> None: ... + +def _get_nested_int(id: _int, coeff: _int) -> SymInt: ... + +def _get_constant_bool_symnode(val: _bool) -> Any: ... + +class _TorchDispatchModeKey(Enum): + FAKE: _TorchDispatchModeKey = ... + PROXY: _TorchDispatchModeKey = ... + FUNCTIONAL: _TorchDispatchModeKey = ... + +class _SetExcludeDispatchKeyGuard: + def __init__(self, k: DispatchKey, enabled: _bool): ... + def __enter__(self): ... + def __exit__(self, exc_type, exc_value, traceback): ... + +# Defined in torch/csrc/utils/init.cpp +class BenchmarkConfig: + num_calling_threads: _int + num_worker_threads: _int + num_warmup_iters: _int + num_iters: _int + profiler_output_path: str + +class BenchmarkExecutionStats: + latency_avg_ms: _float + num_iters: _int + +class ThroughputBenchmark: + def __init__(self, module: Any) -> None: ... + def add_input(self, *args: Any, **kwargs: Any) -> None: ... + def run_once(self, *args: Any, **kwargs: Any) -> Any: ... + def benchmark(self, config: BenchmarkConfig) -> BenchmarkExecutionStats: ... + +# Defined in torch/csrc/Storage.cpp +class StorageBase(object): ... + +# TODO: where +class DoubleTensor(Tensor): ... +class FloatTensor(Tensor): ... +class BFloat16Tensor(Tensor): ... +class LongTensor(Tensor): ... +class IntTensor(Tensor): ... +class ShortTensor(Tensor): ... +class HalfTensor(Tensor): ... +class CharTensor(Tensor): ... +class ByteTensor(Tensor): ... +class BoolTensor(Tensor): ... + +# Defined in torch/csrc/autograd/python_engine.cpp +class _ImperativeEngine: + def queue_callback(self, callback: Callable[[], None]) -> None: ... + def run_backward(self, *args: Any, **kwargs: Any) -> Tuple[Tensor, ...]: ... + def is_checkpoint_valid(self) -> _bool: ... + +# Defined in torch/csrc/autograd/python_variable.cpp +class _TensorMeta(type): ... + +# Defined in torch/csrc/autograd/python_variable.cpp +class TensorBase(metaclass=_TensorMeta): + requires_grad: _bool + retains_grad: _bool + shape: Size + data: Tensor + names: List[str] + device: _device + dtype: _dtype + layout: _layout + real: Tensor + imag: Tensor + T: Tensor + H: Tensor + mT: Tensor + mH: Tensor + ndim: _int + output_nr: _int + _version: _int + _base: Optional[Tensor] + _cdata: _int + grad_fn: Optional[_Node] + _grad_fn: Any + _grad: Optional[Tensor] + grad: Optional[Tensor] + _backward_hooks: Optional[Dict[_int, Callable[[Tensor], Optional[Tensor]]]] + nbytes: _int + itemsize: _int + _has_symbolic_sizes_strides: _bool + def __abs__(self) -> Tensor: ... + def __add__(self, other: Any) -> Tensor: ... + @overload + def __and__(self, other: Tensor) -> Tensor: ... + @overload + def __and__(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def __and__(self, other: Any) -> Tensor: ... + def __bool__(self) -> builtins.bool: ... + def __complex__(self) -> builtins.complex: ... + def __div__(self, other: Any) -> Tensor: ... + def __eq__(self, other: Any) -> Tensor: ... # type: ignore[override] + def __float__(self) -> builtins.float: ... + def __floordiv__(self, other: Any) -> Tensor: ... + def __ge__(self, other: Any) -> Tensor: ... + def __getitem__(self, indices: Union[Union[SupportsIndex, Union[None, _bool, _int, slice, ellipsis, Tensor], _NestedSequence[Union[None, _bool, _int, slice, ellipsis, Tensor]]], tuple[Union[SupportsIndex, Union[None, _bool, _int, slice, ellipsis, Tensor], _NestedSequence[Union[None, _bool, _int, slice, ellipsis, Tensor]]], ...]]) -> Tensor: ... + def __gt__(self, other: Any) -> Tensor: ... + def __iadd__(self, other: Any) -> Tensor: ... + @overload + def __iand__(self, other: Tensor) -> Tensor: ... + @overload + def __iand__(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def __iand__(self, other: Any) -> Tensor: ... + def __idiv__(self, other: Any) -> Tensor: ... + def __ifloordiv__(self, other: Any) -> Tensor: ... + @overload + def __ilshift__(self, other: Tensor) -> Tensor: ... + @overload + def __ilshift__(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def __ilshift__(self, other: Any) -> Tensor: ... + def __imod__(self, other: Any) -> Tensor: ... + def __imul__(self, other: Any) -> Tensor: ... + def __index__(self) -> builtins.int: ... + @overload + def __init__(self, *args: Any, device: Optional[DeviceLikeType] = None) -> None: ... + @overload + def __init__(self, storage: Storage) -> None: ... + @overload + def __init__(self, other: Tensor) -> None: ... + @overload + def __init__(self, size: _size, *, device: Optional[DeviceLikeType] = None) -> None: ... + def __int__(self) -> builtins.int: ... + def __invert__(self) -> Tensor: ... + @overload + def __ior__(self, other: Tensor) -> Tensor: ... + @overload + def __ior__(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def __ior__(self, other: Any) -> Tensor: ... + @overload + def __irshift__(self, other: Tensor) -> Tensor: ... + @overload + def __irshift__(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def __irshift__(self, other: Any) -> Tensor: ... + def __isub__(self, other: Any) -> Tensor: ... + @overload + def __ixor__(self, other: Tensor) -> Tensor: ... + @overload + def __ixor__(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def __ixor__(self, other: Any) -> Tensor: ... + def __le__(self, other: Any) -> Tensor: ... + def __long__(self) -> builtins.int: ... + @overload + def __lshift__(self, other: Tensor) -> Tensor: ... + @overload + def __lshift__(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def __lshift__(self, other: Any) -> Tensor: ... + def __lt__(self, other: Any) -> Tensor: ... + def __matmul__(self, other: Any) -> Tensor: ... + def __mod__(self, other: Any) -> Tensor: ... + def __mul__(self, other: Any) -> Tensor: ... + def __ne__(self, other: Any) -> Tensor: ... # type: ignore[override] + def __neg__(self) -> Tensor: ... + def __new__(self, *args, **kwargs) -> Tensor: ... + def __nonzero__(self) -> builtins.bool: ... + @overload + def __or__(self, other: Tensor) -> Tensor: ... + @overload + def __or__(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def __or__(self, other: Any) -> Tensor: ... + def __pow__(self, other: Any) -> Tensor: ... + def __radd__(self, other: Any) -> Tensor: ... + def __rand__(self, other: Any) -> Tensor: ... + def __rfloordiv__(self, other: Any) -> Tensor: ... + def __rmul__(self, other: Any) -> Tensor: ... + def __ror__(self, other: Any) -> Tensor: ... + def __rpow__(self, other: Any) -> Tensor: ... + @overload + def __rshift__(self, other: Tensor) -> Tensor: ... + @overload + def __rshift__(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def __rshift__(self, other: Any) -> Tensor: ... + def __rsub__(self, other: Any) -> Tensor: ... + def __rtruediv__(self, other: Any) -> Tensor: ... + def __rxor__(self, other: Any) -> Tensor: ... + def __setitem__(self, indices: Union[Union[SupportsIndex, Union[None, _bool, _int, slice, ellipsis, Tensor], _NestedSequence[Union[None, _bool, _int, slice, ellipsis, Tensor]]], tuple[Union[SupportsIndex, Union[None, _bool, _int, slice, ellipsis, Tensor], _NestedSequence[Union[None, _bool, _int, slice, ellipsis, Tensor]]], ...]], val: Union[Tensor, Number]) -> None: ... + def __sub__(self, other: Any) -> Tensor: ... + def __truediv__(self, other: Any) -> Tensor: ... + @overload + def __xor__(self, other: Tensor) -> Tensor: ... + @overload + def __xor__(self, other: Union[Number, _complex]) -> Tensor: ... + @overload + def __xor__(self, other: Any) -> Tensor: ... + def _addmm_activation(self, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, use_gelu: _bool = False) -> Tensor: ... + def _autocast_to_full_precision(self, cuda_enabled: _bool, cpu_enabled: _bool) -> Tensor: ... + def _autocast_to_reduced_precision(self, cuda_enabled: _bool, cpu_enabled: _bool, cuda_dtype: _dtype, cpu_dtype: _dtype) -> Tensor: ... + def _coalesced_(self, coalesced: _bool) -> Tensor: ... + def _conj(self) -> Tensor: ... + def _conj_physical(self) -> Tensor: ... + def _dimI(self) -> _int: ... + def _dimV(self) -> _int: ... + def _indices(self) -> Tensor: ... + def _is_all_true(self) -> Tensor: ... + def _is_any_true(self) -> Tensor: ... + def _is_view(self) -> _bool: ... + def _is_zerotensor(self) -> _bool: ... + def _lazy_clone(self) -> Tensor: ... + @staticmethod + def _make_subclass(cls: Type[S], data: Tensor, require_grad: _bool = False, dispatch_strides: _bool = False, dispatch_device: _bool = False, device_for_backend_keys: Optional[_device] = None) -> S: ... + def _neg_view(self) -> Tensor: ... + def _nested_tensor_size(self) -> Tensor: ... + def _nested_tensor_storage_offsets(self) -> Tensor: ... + def _nested_tensor_strides(self) -> Tensor: ... + def _nnz(self) -> _int: ... + def _sparse_mask_projection(self, mask: Tensor, accumulate_matches: _bool = False) -> Tensor: ... + def _to_dense(self, dtype: Optional[_dtype] = None, masked_grad: Optional[_bool] = None) -> Tensor: ... + @overload + def _to_sparse(self, *, layout: Optional[_layout] = None, blocksize: Optional[Union[_int, _size]] = None, dense_dim: Optional[_int] = None) -> Tensor: ... + @overload + def _to_sparse(self, sparse_dim: _int) -> Tensor: ... + def _to_sparse_bsc(self, blocksize: Union[_int, _size], dense_dim: Optional[_int] = None) -> Tensor: ... + def _to_sparse_bsr(self, blocksize: Union[_int, _size], dense_dim: Optional[_int] = None) -> Tensor: ... + def _to_sparse_csc(self, dense_dim: Optional[_int] = None) -> Tensor: ... + def _to_sparse_csr(self, dense_dim: Optional[_int] = None) -> Tensor: ... + def _values(self) -> Tensor: ... + def abs(self) -> Tensor: + r""" + abs() -> Tensor + + See :func:`torch.abs` + """ + ... + def abs_(self) -> Tensor: + r""" + abs_() -> Tensor + + In-place version of :meth:`~Tensor.abs` + """ + ... + def absolute(self) -> Tensor: + r""" + absolute() -> Tensor + + Alias for :func:`abs` + """ + ... + def absolute_(self) -> Tensor: + r""" + absolute_() -> Tensor + + In-place version of :meth:`~Tensor.absolute` + Alias for :func:`abs_` + """ + ... + def acos(self) -> Tensor: + r""" + acos() -> Tensor + + See :func:`torch.acos` + """ + ... + def acos_(self) -> Tensor: + r""" + acos_() -> Tensor + + In-place version of :meth:`~Tensor.acos` + """ + ... + def acosh(self) -> Tensor: + r""" + acosh() -> Tensor + + See :func:`torch.acosh` + """ + ... + def acosh_(self) -> Tensor: + r""" + acosh_() -> Tensor + + In-place version of :meth:`~Tensor.acosh` + """ + ... + def add(self, other: Union[Tensor, Number, _complex, torch.SymInt, torch.SymFloat], *, alpha: Optional[Union[Number, _complex]] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + add(other, *, alpha=1) -> Tensor + + Add a scalar or tensor to :attr:`self` tensor. If both :attr:`alpha` + and :attr:`other` are specified, each element of :attr:`other` is scaled by + :attr:`alpha` before being used. + + When :attr:`other` is a tensor, the shape of :attr:`other` must be + :ref:`broadcastable ` with the shape of the underlying + tensor + + See :func:`torch.add` + """ + ... + def add_(self, other: Union[Tensor, Number, _complex, torch.SymInt, torch.SymFloat], *, alpha: Optional[Union[Number, _complex]] = 1) -> Tensor: + r""" + add_(other, *, alpha=1) -> Tensor + + In-place version of :meth:`~Tensor.add` + """ + ... + def addbmm(self, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + addbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor + + See :func:`torch.addbmm` + """ + ... + def addbmm_(self, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + addbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor + + In-place version of :meth:`~Tensor.addbmm` + """ + ... + def addcdiv(self, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1) -> Tensor: + r""" + addcdiv(tensor1, tensor2, *, value=1) -> Tensor + + See :func:`torch.addcdiv` + """ + ... + def addcdiv_(self, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1) -> Tensor: + r""" + addcdiv_(tensor1, tensor2, *, value=1) -> Tensor + + In-place version of :meth:`~Tensor.addcdiv` + """ + ... + def addcmul(self, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1) -> Tensor: + r""" + addcmul(tensor1, tensor2, *, value=1) -> Tensor + + See :func:`torch.addcmul` + """ + ... + def addcmul_(self, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1) -> Tensor: + r""" + addcmul_(tensor1, tensor2, *, value=1) -> Tensor + + In-place version of :meth:`~Tensor.addcmul` + """ + ... + def addmm(self, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + addmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor + + See :func:`torch.addmm` + """ + ... + def addmm_(self, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + addmm_(mat1, mat2, *, beta=1, alpha=1) -> Tensor + + In-place version of :meth:`~Tensor.addmm` + """ + ... + def addmv(self, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + addmv(mat, vec, *, beta=1, alpha=1) -> Tensor + + See :func:`torch.addmv` + """ + ... + def addmv_(self, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + addmv_(mat, vec, *, beta=1, alpha=1) -> Tensor + + In-place version of :meth:`~Tensor.addmv` + """ + ... + def addr(self, vec1: Tensor, vec2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + addr(vec1, vec2, *, beta=1, alpha=1) -> Tensor + + See :func:`torch.addr` + """ + ... + def addr_(self, vec1: Tensor, vec2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + addr_(vec1, vec2, *, beta=1, alpha=1) -> Tensor + + In-place version of :meth:`~Tensor.addr` + """ + ... + def adjoint(self) -> Tensor: + r""" + adjoint() -> Tensor + + Alias for :func:`adjoint` + """ + ... + def align_as(self, other: Tensor) -> Tensor: + r""" + align_as(other) -> Tensor + + Permutes the dimensions of the :attr:`self` tensor to match the dimension order + in the :attr:`other` tensor, adding size-one dims for any new names. + + This operation is useful for explicit broadcasting by names (see examples). + + All of the dims of :attr:`self` must be named in order to use this method. + The resulting tensor is a view on the original tensor. + + All dimension names of :attr:`self` must be present in ``other.names``. + :attr:`other` may contain named dimensions that are not in ``self.names``; + the output tensor has a size-one dimension for each of those new names. + + To align a tensor to a specific order, use :meth:`~Tensor.align_to`. + + Examples:: + + # Example 1: Applying a mask + >>> mask = torch.randint(2, [127, 128], dtype=torch.bool).refine_names('W', 'H') + >>> imgs = torch.randn(32, 128, 127, 3, names=('N', 'H', 'W', 'C')) + >>> imgs.masked_fill_(mask.align_as(imgs), 0) + + + # Example 2: Applying a per-channel-scale + >>> def scale_channels(input, scale): + >>> scale = scale.refine_names('C') + >>> return input * scale.align_as(input) + + >>> num_channels = 3 + >>> scale = torch.randn(num_channels, names=('C',)) + >>> imgs = torch.rand(32, 128, 128, num_channels, names=('N', 'H', 'W', 'C')) + >>> more_imgs = torch.rand(32, num_channels, 128, 128, names=('N', 'C', 'H', 'W')) + >>> videos = torch.randn(3, num_channels, 128, 128, 128, names=('N', 'C', 'H', 'W', 'D')) + + # scale_channels is agnostic to the dimension order of the input + >>> scale_channels(imgs, scale) + >>> scale_channels(more_imgs, scale) + >>> scale_channels(videos, scale) + + .. warning:: + The named tensor API is experimental and subject to change. + """ + ... + @overload + def align_to(self, order: Sequence[Union[str, ellipsis, None]], ellipsis_idx: _int) -> Tensor: ... + @overload + def align_to(self, names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ... + @overload + def all(self) -> Tensor: + r""" + all(dim=None, keepdim=False) -> Tensor + + See :func:`torch.all` + """ + ... + @overload + def all(self, dim: Optional[_size] = None, keepdim: _bool = False) -> Tensor: + r""" + all(dim=None, keepdim=False) -> Tensor + + See :func:`torch.all` + """ + ... + @overload + def all(self, dim: _int, keepdim: _bool = False) -> Tensor: + r""" + all(dim=None, keepdim=False) -> Tensor + + See :func:`torch.all` + """ + ... + @overload + def all(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> Tensor: + r""" + all(dim=None, keepdim=False) -> Tensor + + See :func:`torch.all` + """ + ... + def allclose(self, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> _bool: + r""" + allclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor + + See :func:`torch.allclose` + """ + ... + def amax(self, dim: Union[_int, _size] = (), keepdim: _bool = False) -> Tensor: + r""" + amax(dim=None, keepdim=False) -> Tensor + + See :func:`torch.amax` + """ + ... + def amin(self, dim: Union[_int, _size] = (), keepdim: _bool = False) -> Tensor: + r""" + amin(dim=None, keepdim=False) -> Tensor + + See :func:`torch.amin` + """ + ... + def aminmax(self, *, dim: Optional[_int] = None, keepdim: _bool = False) -> torch.return_types.aminmax: + r""" + aminmax(*, dim=None, keepdim=False) -> (Tensor min, Tensor max) + + See :func:`torch.aminmax` + """ + ... + def angle(self) -> Tensor: + r""" + angle() -> Tensor + + See :func:`torch.angle` + """ + ... + @overload + def any(self) -> Tensor: + r""" + any(dim=None, keepdim=False) -> Tensor + + See :func:`torch.any` + """ + ... + @overload + def any(self, dim: Optional[_size] = None, keepdim: _bool = False) -> Tensor: + r""" + any(dim=None, keepdim=False) -> Tensor + + See :func:`torch.any` + """ + ... + @overload + def any(self, dim: _int, keepdim: _bool = False) -> Tensor: + r""" + any(dim=None, keepdim=False) -> Tensor + + See :func:`torch.any` + """ + ... + @overload + def any(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> Tensor: + r""" + any(dim=None, keepdim=False) -> Tensor + + See :func:`torch.any` + """ + ... + def apply_(self, callable: Callable) -> Tensor: + r""" + apply_(callable) -> Tensor + + Applies the function :attr:`callable` to each element in the tensor, replacing + each element with the value returned by :attr:`callable`. + + .. note:: + + This function only works with CPU tensors and should not be used in code + sections that require high performance. + """ + ... + def arccos(self) -> Tensor: + r""" + arccos() -> Tensor + + See :func:`torch.arccos` + """ + ... + def arccos_(self) -> Tensor: + r""" + arccos_() -> Tensor + + In-place version of :meth:`~Tensor.arccos` + """ + ... + def arccosh(self) -> Tensor: + r""" + acosh() -> Tensor + + See :func:`torch.arccosh` + """ + ... + def arccosh_(self) -> Tensor: + r""" + acosh_() -> Tensor + + In-place version of :meth:`~Tensor.arccosh` + """ + ... + def arcsin(self) -> Tensor: + r""" + arcsin() -> Tensor + + See :func:`torch.arcsin` + """ + ... + def arcsin_(self) -> Tensor: + r""" + arcsin_() -> Tensor + + In-place version of :meth:`~Tensor.arcsin` + """ + ... + def arcsinh(self) -> Tensor: + r""" + arcsinh() -> Tensor + + See :func:`torch.arcsinh` + """ + ... + def arcsinh_(self) -> Tensor: + r""" + arcsinh_() -> Tensor + + In-place version of :meth:`~Tensor.arcsinh` + """ + ... + def arctan(self) -> Tensor: + r""" + arctan() -> Tensor + + See :func:`torch.arctan` + """ + ... + def arctan2(self, other: Tensor) -> Tensor: + r""" + arctan2(other) -> Tensor + + See :func:`torch.arctan2` + """ + ... + def arctan2_(self, other: Tensor) -> Tensor: + r""" + atan2_(other) -> Tensor + + In-place version of :meth:`~Tensor.arctan2` + """ + ... + def arctan_(self) -> Tensor: + r""" + arctan_() -> Tensor + + In-place version of :meth:`~Tensor.arctan` + """ + ... + def arctanh(self) -> Tensor: + r""" + arctanh() -> Tensor + + See :func:`torch.arctanh` + """ + ... + def arctanh_(self) -> Tensor: + r""" + arctanh_(other) -> Tensor + + In-place version of :meth:`~Tensor.arctanh` + """ + ... + def argmax(self, dim: Optional[_int] = None, keepdim: _bool = False) -> Tensor: + r""" + argmax(dim=None, keepdim=False) -> LongTensor + + See :func:`torch.argmax` + """ + ... + def argmin(self, dim: Optional[_int] = None, keepdim: _bool = False) -> Tensor: + r""" + argmin(dim=None, keepdim=False) -> LongTensor + + See :func:`torch.argmin` + """ + ... + @overload + def argsort(self, *, stable: _bool, dim: _int = -1, descending: _bool = False) -> Tensor: + r""" + argsort(dim=-1, descending=False) -> LongTensor + + See :func:`torch.argsort` + """ + ... + @overload + def argsort(self, dim: _int = -1, descending: _bool = False) -> Tensor: + r""" + argsort(dim=-1, descending=False) -> LongTensor + + See :func:`torch.argsort` + """ + ... + @overload + def argsort(self, dim: Union[str, ellipsis, None], descending: _bool = False) -> Tensor: + r""" + argsort(dim=-1, descending=False) -> LongTensor + + See :func:`torch.argsort` + """ + ... + def argwhere(self) -> Tensor: + r""" + argwhere() -> Tensor + + See :func:`torch.argwhere` + """ + ... + def as_strided(self, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: + r""" + as_strided(size, stride, storage_offset=None) -> Tensor + + See :func:`torch.as_strided` + """ + ... + def as_strided_(self, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: + r""" + as_strided_(size, stride, storage_offset=None) -> Tensor + + In-place version of :meth:`~Tensor.as_strided` + """ + ... + def as_strided_scatter(self, src: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: + r""" + as_strided_scatter(src, size, stride, storage_offset=None) -> Tensor + + See :func:`torch.as_strided_scatter` + """ + ... + def as_subclass(self, cls: Type[S]) -> S: + r""" + as_subclass(cls) -> Tensor + + Makes a ``cls`` instance with the same data pointer as ``self``. Changes + in the output mirror changes in ``self``, and the output stays attached + to the autograd graph. ``cls`` must be a subclass of ``Tensor``. + """ + ... + def asin(self) -> Tensor: + r""" + asin() -> Tensor + + See :func:`torch.asin` + """ + ... + def asin_(self) -> Tensor: + r""" + asin_() -> Tensor + + In-place version of :meth:`~Tensor.asin` + """ + ... + def asinh(self) -> Tensor: + r""" + asinh() -> Tensor + + See :func:`torch.asinh` + """ + ... + def asinh_(self) -> Tensor: + r""" + asinh_() -> Tensor + + In-place version of :meth:`~Tensor.asinh` + """ + ... + def atan(self) -> Tensor: + r""" + atan() -> Tensor + + See :func:`torch.atan` + """ + ... + def atan2(self, other: Tensor) -> Tensor: + r""" + atan2(other) -> Tensor + + See :func:`torch.atan2` + """ + ... + def atan2_(self, other: Tensor) -> Tensor: + r""" + atan2_(other) -> Tensor + + In-place version of :meth:`~Tensor.atan2` + """ + ... + def atan_(self) -> Tensor: + r""" + atan_() -> Tensor + + In-place version of :meth:`~Tensor.atan` + """ + ... + def atanh(self) -> Tensor: + r""" + atanh() -> Tensor + + See :func:`torch.atanh` + """ + ... + def atanh_(self) -> Tensor: + r""" + atanh_(other) -> Tensor + + In-place version of :meth:`~Tensor.atanh` + """ + ... + def baddbmm(self, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + baddbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor + + See :func:`torch.baddbmm` + """ + ... + def baddbmm_(self, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + baddbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor + + In-place version of :meth:`~Tensor.baddbmm` + """ + ... + @overload + def bernoulli(self, *, generator: Optional[Generator] = None) -> Tensor: + r""" + bernoulli(*, generator=None) -> Tensor + + Returns a result tensor where each :math:`\texttt{result[i]}` is independently + sampled from :math:`\text{Bernoulli}(\texttt{self[i]})`. :attr:`self` must have + floating point ``dtype``, and the result will have the same ``dtype``. + + See :func:`torch.bernoulli` + """ + ... + @overload + def bernoulli(self, p: _float, *, generator: Optional[Generator] = None) -> Tensor: + r""" + bernoulli(*, generator=None) -> Tensor + + Returns a result tensor where each :math:`\texttt{result[i]}` is independently + sampled from :math:`\text{Bernoulli}(\texttt{self[i]})`. :attr:`self` must have + floating point ``dtype``, and the result will have the same ``dtype``. + + See :func:`torch.bernoulli` + """ + ... + @overload + def bernoulli_(self, p: Tensor, *, generator: Optional[Generator] = None) -> Tensor: + r""" + bernoulli_(p=0.5, *, generator=None) -> Tensor + + Fills each location of :attr:`self` with an independent sample from + :math:`\text{Bernoulli}(\texttt{p})`. :attr:`self` can have integral + ``dtype``. + + :attr:`p` should either be a scalar or tensor containing probabilities to be + used for drawing the binary random number. + + If it is a tensor, the :math:`\text{i}^{th}` element of :attr:`self` tensor + will be set to a value sampled from + :math:`\text{Bernoulli}(\texttt{p\_tensor[i]})`. In this case `p` must have + floating point ``dtype``. + + See also :meth:`~Tensor.bernoulli` and :func:`torch.bernoulli` + """ + ... + @overload + def bernoulli_(self, p: _float = 0.5, *, generator: Optional[Generator] = None) -> Tensor: + r""" + bernoulli_(p=0.5, *, generator=None) -> Tensor + + Fills each location of :attr:`self` with an independent sample from + :math:`\text{Bernoulli}(\texttt{p})`. :attr:`self` can have integral + ``dtype``. + + :attr:`p` should either be a scalar or tensor containing probabilities to be + used for drawing the binary random number. + + If it is a tensor, the :math:`\text{i}^{th}` element of :attr:`self` tensor + will be set to a value sampled from + :math:`\text{Bernoulli}(\texttt{p\_tensor[i]})`. In this case `p` must have + floating point ``dtype``. + + See also :meth:`~Tensor.bernoulli` and :func:`torch.bernoulli` + """ + ... + def bfloat16(self) -> Tensor: + r""" + bfloat16(memory_format=torch.preserve_format) -> Tensor + ``self.bfloat16()`` is equivalent to ``self.to(torch.bfloat16)``. See :func:`to`. + + Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... + def bincount(self, weights: Optional[Tensor] = None, minlength: _int = 0) -> Tensor: + r""" + bincount(weights=None, minlength=0) -> Tensor + + See :func:`torch.bincount` + """ + ... + @overload + def bitwise_and(self, other: Tensor) -> Tensor: + r""" + bitwise_and() -> Tensor + + See :func:`torch.bitwise_and` + """ + ... + @overload + def bitwise_and(self, other: Union[Number, _complex]) -> Tensor: + r""" + bitwise_and() -> Tensor + + See :func:`torch.bitwise_and` + """ + ... + @overload + def bitwise_and_(self, other: Tensor) -> Tensor: + r""" + bitwise_and_() -> Tensor + + In-place version of :meth:`~Tensor.bitwise_and` + """ + ... + @overload + def bitwise_and_(self, other: Union[Number, _complex]) -> Tensor: + r""" + bitwise_and_() -> Tensor + + In-place version of :meth:`~Tensor.bitwise_and` + """ + ... + @overload + def bitwise_left_shift(self, other: Tensor) -> Tensor: + r""" + bitwise_left_shift(other) -> Tensor + + See :func:`torch.bitwise_left_shift` + """ + ... + @overload + def bitwise_left_shift(self, other: Union[Number, _complex]) -> Tensor: + r""" + bitwise_left_shift(other) -> Tensor + + See :func:`torch.bitwise_left_shift` + """ + ... + @overload + def bitwise_left_shift_(self, other: Tensor) -> Tensor: + r""" + bitwise_left_shift_(other) -> Tensor + + In-place version of :meth:`~Tensor.bitwise_left_shift` + """ + ... + @overload + def bitwise_left_shift_(self, other: Union[Number, _complex]) -> Tensor: + r""" + bitwise_left_shift_(other) -> Tensor + + In-place version of :meth:`~Tensor.bitwise_left_shift` + """ + ... + def bitwise_not(self) -> Tensor: + r""" + bitwise_not() -> Tensor + + See :func:`torch.bitwise_not` + """ + ... + def bitwise_not_(self) -> Tensor: + r""" + bitwise_not_() -> Tensor + + In-place version of :meth:`~Tensor.bitwise_not` + """ + ... + @overload + def bitwise_or(self, other: Tensor) -> Tensor: + r""" + bitwise_or() -> Tensor + + See :func:`torch.bitwise_or` + """ + ... + @overload + def bitwise_or(self, other: Union[Number, _complex]) -> Tensor: + r""" + bitwise_or() -> Tensor + + See :func:`torch.bitwise_or` + """ + ... + @overload + def bitwise_or_(self, other: Tensor) -> Tensor: + r""" + bitwise_or_() -> Tensor + + In-place version of :meth:`~Tensor.bitwise_or` + """ + ... + @overload + def bitwise_or_(self, other: Union[Number, _complex]) -> Tensor: + r""" + bitwise_or_() -> Tensor + + In-place version of :meth:`~Tensor.bitwise_or` + """ + ... + @overload + def bitwise_right_shift(self, other: Tensor) -> Tensor: + r""" + bitwise_right_shift(other) -> Tensor + + See :func:`torch.bitwise_right_shift` + """ + ... + @overload + def bitwise_right_shift(self, other: Union[Number, _complex]) -> Tensor: + r""" + bitwise_right_shift(other) -> Tensor + + See :func:`torch.bitwise_right_shift` + """ + ... + @overload + def bitwise_right_shift_(self, other: Tensor) -> Tensor: + r""" + bitwise_right_shift_(other) -> Tensor + + In-place version of :meth:`~Tensor.bitwise_right_shift` + """ + ... + @overload + def bitwise_right_shift_(self, other: Union[Number, _complex]) -> Tensor: + r""" + bitwise_right_shift_(other) -> Tensor + + In-place version of :meth:`~Tensor.bitwise_right_shift` + """ + ... + @overload + def bitwise_xor(self, other: Tensor) -> Tensor: + r""" + bitwise_xor() -> Tensor + + See :func:`torch.bitwise_xor` + """ + ... + @overload + def bitwise_xor(self, other: Union[Number, _complex]) -> Tensor: + r""" + bitwise_xor() -> Tensor + + See :func:`torch.bitwise_xor` + """ + ... + @overload + def bitwise_xor_(self, other: Tensor) -> Tensor: + r""" + bitwise_xor_() -> Tensor + + In-place version of :meth:`~Tensor.bitwise_xor` + """ + ... + @overload + def bitwise_xor_(self, other: Union[Number, _complex]) -> Tensor: + r""" + bitwise_xor_() -> Tensor + + In-place version of :meth:`~Tensor.bitwise_xor` + """ + ... + def bmm(self, mat2: Tensor) -> Tensor: + r""" + bmm(batch2) -> Tensor + + See :func:`torch.bmm` + """ + ... + def bool(self) -> Tensor: + r""" + bool(memory_format=torch.preserve_format) -> Tensor + + ``self.bool()`` is equivalent to ``self.to(torch.bool)``. See :func:`to`. + + Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... + @overload + def broadcast_to(self, size: Sequence[Union[_int, SymInt]]) -> Tensor: + r""" + broadcast_to(shape) -> Tensor + + See :func:`torch.broadcast_to`. + """ + ... + @overload + def broadcast_to(self, *size: _int) -> Tensor: + r""" + broadcast_to(shape) -> Tensor + + See :func:`torch.broadcast_to`. + """ + ... + def byte(self) -> Tensor: + r""" + byte(memory_format=torch.preserve_format) -> Tensor + + ``self.byte()`` is equivalent to ``self.to(torch.uint8)``. See :func:`to`. + + Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... + def cauchy_(self, median: _float = 0, sigma: _float = 1, *, generator: Optional[Generator] = None) -> Tensor: + r""" + cauchy_(median=0, sigma=1, *, generator=None) -> Tensor + + Fills the tensor with numbers drawn from the Cauchy distribution: + + .. math:: + + f(x) = \dfrac{1}{\pi} \dfrac{\sigma}{(x - \text{median})^2 + \sigma^2} + + .. note:: + Sigma (:math:`\sigma`) is used to denote the scale parameter in Cauchy distribution. + """ + ... + def ccol_indices(self) -> Tensor: ... + def ceil(self) -> Tensor: + r""" + ceil() -> Tensor + + See :func:`torch.ceil` + """ + ... + def ceil_(self) -> Tensor: + r""" + ceil_() -> Tensor + + In-place version of :meth:`~Tensor.ceil` + """ + ... + def chalf(self, *, memory_format: Optional[memory_format] = None) -> Tensor: + r""" + chalf(memory_format=torch.preserve_format) -> Tensor + + ``self.chalf()`` is equivalent to ``self.to(torch.complex32)``. See :func:`to`. + + Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... + def char(self) -> Tensor: + r""" + char(memory_format=torch.preserve_format) -> Tensor + + ``self.char()`` is equivalent to ``self.to(torch.int8)``. See :func:`to`. + + Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... + def cholesky(self, upper: _bool = False) -> Tensor: + r""" + cholesky(upper=False) -> Tensor + + See :func:`torch.cholesky` + """ + ... + def cholesky_inverse(self, upper: _bool = False) -> Tensor: + r""" + cholesky_inverse(upper=False) -> Tensor + + See :func:`torch.cholesky_inverse` + """ + ... + def cholesky_solve(self, input2: Tensor, upper: _bool = False) -> Tensor: + r""" + cholesky_solve(input2, upper=False) -> Tensor + + See :func:`torch.cholesky_solve` + """ + ... + def chunk(self, chunks: _int, dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + chunk(chunks, dim=0) -> List of Tensors + + See :func:`torch.chunk` + """ + ... + @overload + def clamp(self, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: + r""" + clamp(min=None, max=None) -> Tensor + + See :func:`torch.clamp` + """ + ... + @overload + def clamp(self, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: + r""" + clamp(min=None, max=None) -> Tensor + + See :func:`torch.clamp` + """ + ... + @overload + def clamp_(self, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: + r""" + clamp_(min=None, max=None) -> Tensor + + In-place version of :meth:`~Tensor.clamp` + """ + ... + @overload + def clamp_(self, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: + r""" + clamp_(min=None, max=None) -> Tensor + + In-place version of :meth:`~Tensor.clamp` + """ + ... + @overload + def clamp_max(self, max: Tensor) -> Tensor: ... + @overload + def clamp_max(self, max: Union[Number, _complex]) -> Tensor: ... + @overload + def clamp_max_(self, max: Tensor) -> Tensor: ... + @overload + def clamp_max_(self, max: Union[Number, _complex]) -> Tensor: ... + @overload + def clamp_min(self, min: Tensor) -> Tensor: ... + @overload + def clamp_min(self, min: Union[Number, _complex]) -> Tensor: ... + @overload + def clamp_min_(self, min: Tensor) -> Tensor: ... + @overload + def clamp_min_(self, min: Union[Number, _complex]) -> Tensor: ... + @overload + def clip(self, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: + r""" + clip(min=None, max=None) -> Tensor + + Alias for :meth:`~Tensor.clamp`. + """ + ... + @overload + def clip(self, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: + r""" + clip(min=None, max=None) -> Tensor + + Alias for :meth:`~Tensor.clamp`. + """ + ... + @overload + def clip_(self, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: + r""" + clip_(min=None, max=None) -> Tensor + + Alias for :meth:`~Tensor.clamp_`. + """ + ... + @overload + def clip_(self, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: + r""" + clip_(min=None, max=None) -> Tensor + + Alias for :meth:`~Tensor.clamp_`. + """ + ... + def clone(self, *, memory_format: Optional[memory_format] = None) -> Tensor: + r""" + clone(*, memory_format=torch.preserve_format) -> Tensor + + See :func:`torch.clone` + """ + ... + def coalesce(self) -> Tensor: + r""" + coalesce() -> Tensor + + Returns a coalesced copy of :attr:`self` if :attr:`self` is an + :ref:`uncoalesced tensor `. + + Returns :attr:`self` if :attr:`self` is a coalesced tensor. + + .. warning:: + Throws an error if :attr:`self` is not a sparse COO tensor. + """ + ... + def col_indices(self) -> Tensor: + r""" + col_indices() -> IntTensor + + Returns the tensor containing the column indices of the :attr:`self` + tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``. + The ``col_indices`` tensor is strictly of shape (:attr:`self`.nnz()) + and of type ``int32`` or ``int64``. When using MKL routines such as sparse + matrix multiplication, it is necessary to use ``int32`` indexing in order + to avoid downcasting and potentially losing information. + + Example:: + >>> csr = torch.eye(5,5).to_sparse_csr() + >>> csr.col_indices() + tensor([0, 1, 2, 3, 4], dtype=torch.int32) + """ + ... + def conj(self) -> Tensor: + r""" + conj() -> Tensor + + See :func:`torch.conj` + """ + ... + def conj_physical(self) -> Tensor: + r""" + conj_physical() -> Tensor + + See :func:`torch.conj_physical` + """ + ... + def conj_physical_(self) -> Tensor: + r""" + conj_physical_() -> Tensor + + In-place version of :meth:`~Tensor.conj_physical` + """ + ... + def contiguous(self, memory_format=torch.contiguous_format) -> Tensor: + r""" + contiguous(memory_format=torch.contiguous_format) -> Tensor + + Returns a contiguous in memory tensor containing the same data as :attr:`self` tensor. If + :attr:`self` tensor is already in the specified memory format, this function returns the + :attr:`self` tensor. + + Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.contiguous_format``. + """ + ... + def copy_(self, src: Tensor, non_blocking: _bool = False) -> Tensor: + r""" + copy_(src, non_blocking=False) -> Tensor + + Copies the elements from :attr:`src` into :attr:`self` tensor and returns + :attr:`self`. + + The :attr:`src` tensor must be :ref:`broadcastable ` + with the :attr:`self` tensor. It may be of a different data type or reside on a + different device. + + Args: + src (Tensor): the source tensor to copy from + non_blocking (bool): if ``True`` and this copy is between CPU and GPU, + the copy may occur asynchronously with respect to the host. For other + cases, this argument has no effect. + """ + ... + @overload + def copysign(self, other: Tensor) -> Tensor: + r""" + copysign(other) -> Tensor + + See :func:`torch.copysign` + """ + ... + @overload + def copysign(self, other: Union[Number, _complex]) -> Tensor: + r""" + copysign(other) -> Tensor + + See :func:`torch.copysign` + """ + ... + @overload + def copysign_(self, other: Tensor) -> Tensor: + r""" + copysign_(other) -> Tensor + + In-place version of :meth:`~Tensor.copysign` + """ + ... + @overload + def copysign_(self, other: Union[Number, _complex]) -> Tensor: + r""" + copysign_(other) -> Tensor + + In-place version of :meth:`~Tensor.copysign` + """ + ... + def corrcoef(self) -> Tensor: + r""" + corrcoef() -> Tensor + + See :func:`torch.corrcoef` + """ + ... + def cos(self) -> Tensor: + r""" + cos() -> Tensor + + See :func:`torch.cos` + """ + ... + def cos_(self) -> Tensor: + r""" + cos_() -> Tensor + + In-place version of :meth:`~Tensor.cos` + """ + ... + def cosh(self) -> Tensor: + r""" + cosh() -> Tensor + + See :func:`torch.cosh` + """ + ... + def cosh_(self) -> Tensor: + r""" + cosh_() -> Tensor + + In-place version of :meth:`~Tensor.cosh` + """ + ... + @overload + def count_nonzero(self, dim: Optional[_int] = None) -> Tensor: + r""" + count_nonzero(dim=None) -> Tensor + + See :func:`torch.count_nonzero` + """ + ... + @overload + def count_nonzero(self, dim: _size) -> Tensor: + r""" + count_nonzero(dim=None) -> Tensor + + See :func:`torch.count_nonzero` + """ + ... + @overload + def count_nonzero(self, *dim: _int) -> Tensor: + r""" + count_nonzero(dim=None) -> Tensor + + See :func:`torch.count_nonzero` + """ + ... + def cov(self, *, correction: _int = 1, fweights: Optional[Tensor] = None, aweights: Optional[Tensor] = None) -> Tensor: + r""" + cov(*, correction=1, fweights=None, aweights=None) -> Tensor + + See :func:`torch.cov` + """ + ... + def cpu(self, memory_format: torch.memory_format = torch.preserve_format) -> Tensor: + r""" + cpu(memory_format=torch.preserve_format) -> Tensor + + Returns a copy of this object in CPU memory. + + If this object is already in CPU memory and on the correct device, + then no copy is performed and the original object is returned. + + Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... + def cross(self, other: Tensor, dim: Optional[_int] = None) -> Tensor: + r""" + cross(other, dim=None) -> Tensor + + See :func:`torch.cross` + """ + ... + def crow_indices(self) -> Tensor: + r""" + crow_indices() -> IntTensor + + Returns the tensor containing the compressed row indices of the :attr:`self` + tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``. + The ``crow_indices`` tensor is strictly of shape (:attr:`self`.size(0) + 1) + and of type ``int32`` or ``int64``. When using MKL routines such as sparse + matrix multiplication, it is necessary to use ``int32`` indexing in order + to avoid downcasting and potentially losing information. + + Example:: + >>> csr = torch.eye(5,5).to_sparse_csr() + >>> csr.crow_indices() + tensor([0, 1, 2, 3, 4, 5], dtype=torch.int32) + """ + ... + def cuda(self, device: Optional[Union[_device, _int, str]] = None, non_blocking: _bool = False, memory_format: torch.memory_format = torch.preserve_format) -> Tensor: + r""" + cuda(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor + + Returns a copy of this object in CUDA memory. + + If this object is already in CUDA memory and on the correct device, + then no copy is performed and the original object is returned. + + Args: + device (:class:`torch.device`): The destination GPU device. + Defaults to the current CUDA device. + non_blocking (bool): If ``True`` and the source is in pinned memory, + the copy will be asynchronous with respect to the host. + Otherwise, the argument has no effect. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... + @overload + def cummax(self, dim: _int) -> torch.return_types.cummax: + r""" + cummax(dim) -> (Tensor, Tensor) + + See :func:`torch.cummax` + """ + ... + @overload + def cummax(self, dim: Union[str, ellipsis, None]) -> torch.return_types.cummax: + r""" + cummax(dim) -> (Tensor, Tensor) + + See :func:`torch.cummax` + """ + ... + @overload + def cummin(self, dim: _int) -> torch.return_types.cummin: + r""" + cummin(dim) -> (Tensor, Tensor) + + See :func:`torch.cummin` + """ + ... + @overload + def cummin(self, dim: Union[str, ellipsis, None]) -> torch.return_types.cummin: + r""" + cummin(dim) -> (Tensor, Tensor) + + See :func:`torch.cummin` + """ + ... + @overload + def cumprod(self, dim: _int, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + cumprod(dim, dtype=None) -> Tensor + + See :func:`torch.cumprod` + """ + ... + @overload + def cumprod(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + cumprod(dim, dtype=None) -> Tensor + + See :func:`torch.cumprod` + """ + ... + @overload + def cumprod_(self, dim: _int, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + cumprod_(dim, dtype=None) -> Tensor + + In-place version of :meth:`~Tensor.cumprod` + """ + ... + @overload + def cumprod_(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + cumprod_(dim, dtype=None) -> Tensor + + In-place version of :meth:`~Tensor.cumprod` + """ + ... + @overload + def cumsum(self, dim: _int, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + cumsum(dim, dtype=None) -> Tensor + + See :func:`torch.cumsum` + """ + ... + @overload + def cumsum(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + cumsum(dim, dtype=None) -> Tensor + + See :func:`torch.cumsum` + """ + ... + @overload + def cumsum_(self, dim: _int, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + cumsum_(dim, dtype=None) -> Tensor + + In-place version of :meth:`~Tensor.cumsum` + """ + ... + @overload + def cumsum_(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + cumsum_(dim, dtype=None) -> Tensor + + In-place version of :meth:`~Tensor.cumsum` + """ + ... + def data_ptr(self) -> _int: + r""" + data_ptr() -> int + + Returns the address of the first element of :attr:`self` tensor. + """ + ... + def deg2rad(self) -> Tensor: + r""" + deg2rad() -> Tensor + + See :func:`torch.deg2rad` + """ + ... + def deg2rad_(self) -> Tensor: + r""" + deg2rad_() -> Tensor + + In-place version of :meth:`~Tensor.deg2rad` + """ + ... + def dense_dim(self) -> _int: + r""" + dense_dim() -> int + + Return the number of dense dimensions in a :ref:`sparse tensor ` :attr:`self`. + + .. note:: + Returns ``len(self.shape)`` if :attr:`self` is not a sparse tensor. + + See also :meth:`Tensor.sparse_dim` and :ref:`hybrid tensors `. + """ + ... + def dequantize(self) -> Tensor: + r""" + dequantize() -> Tensor + + Given a quantized Tensor, dequantize it and return the dequantized float Tensor. + """ + ... + def det(self) -> Tensor: + r""" + det() -> Tensor + + See :func:`torch.det` + """ + ... + def detach(self) -> Tensor: ... + def detach_(self) -> Tensor: ... + def diag(self, diagonal: _int = 0) -> Tensor: + r""" + diag(diagonal=0) -> Tensor + + See :func:`torch.diag` + """ + ... + def diag_embed(self, offset: _int = 0, dim1: _int = -2, dim2: _int = -1) -> Tensor: + r""" + diag_embed(offset=0, dim1=-2, dim2=-1) -> Tensor + + See :func:`torch.diag_embed` + """ + ... + def diagflat(self, offset: _int = 0) -> Tensor: + r""" + diagflat(offset=0) -> Tensor + + See :func:`torch.diagflat` + """ + ... + @overload + def diagonal(self, *, outdim: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None], dim2: Union[str, ellipsis, None], offset: _int = 0) -> Tensor: + r""" + diagonal(offset=0, dim1=0, dim2=1) -> Tensor + + See :func:`torch.diagonal` + """ + ... + @overload + def diagonal(self, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor: + r""" + diagonal(offset=0, dim1=0, dim2=1) -> Tensor + + See :func:`torch.diagonal` + """ + ... + def diagonal_scatter(self, src: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor: + r""" + diagonal_scatter(src, offset=0, dim1=0, dim2=1) -> Tensor + + See :func:`torch.diagonal_scatter` + """ + ... + def diff(self, n: _int = 1, dim: _int = -1, prepend: Optional[Tensor] = None, append: Optional[Tensor] = None) -> Tensor: + r""" + diff(n=1, dim=-1, prepend=None, append=None) -> Tensor + + See :func:`torch.diff` + """ + ... + def digamma(self) -> Tensor: + r""" + digamma() -> Tensor + + See :func:`torch.digamma` + """ + ... + def digamma_(self) -> Tensor: + r""" + digamma_() -> Tensor + + In-place version of :meth:`~Tensor.digamma` + """ + ... + def dim(self) -> _int: + r""" + dim() -> int + + Returns the number of dimensions of :attr:`self` tensor. + """ + ... + def dist(self, other: Tensor, p: Union[Number, _complex] = 2) -> Tensor: + r""" + dist(other, p=2) -> Tensor + + See :func:`torch.dist` + """ + ... + def div(self, other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None) -> Tensor: + r""" + div(value, *, rounding_mode=None) -> Tensor + + See :func:`torch.div` + """ + ... + def div_(self, other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None) -> Tensor: + r""" + div_(value, *, rounding_mode=None) -> Tensor + + In-place version of :meth:`~Tensor.div` + """ + ... + @overload + def divide(self, other: Tensor) -> Tensor: + r""" + divide(value, *, rounding_mode=None) -> Tensor + + See :func:`torch.divide` + """ + ... + @overload + def divide(self, other: Tensor, *, rounding_mode: Optional[str]) -> Tensor: + r""" + divide(value, *, rounding_mode=None) -> Tensor + + See :func:`torch.divide` + """ + ... + @overload + def divide(self, other: Union[Number, _complex], *, rounding_mode: Optional[str]) -> Tensor: + r""" + divide(value, *, rounding_mode=None) -> Tensor + + See :func:`torch.divide` + """ + ... + @overload + def divide(self, other: Union[Number, _complex]) -> Tensor: + r""" + divide(value, *, rounding_mode=None) -> Tensor + + See :func:`torch.divide` + """ + ... + @overload + def divide_(self, other: Tensor) -> Tensor: + r""" + divide_(value, *, rounding_mode=None) -> Tensor + + In-place version of :meth:`~Tensor.divide` + """ + ... + @overload + def divide_(self, other: Tensor, *, rounding_mode: Optional[str]) -> Tensor: + r""" + divide_(value, *, rounding_mode=None) -> Tensor + + In-place version of :meth:`~Tensor.divide` + """ + ... + @overload + def divide_(self, other: Union[Number, _complex], *, rounding_mode: Optional[str]) -> Tensor: + r""" + divide_(value, *, rounding_mode=None) -> Tensor + + In-place version of :meth:`~Tensor.divide` + """ + ... + @overload + def divide_(self, other: Union[Number, _complex]) -> Tensor: + r""" + divide_(value, *, rounding_mode=None) -> Tensor + + In-place version of :meth:`~Tensor.divide` + """ + ... + def dot(self, tensor: Tensor) -> Tensor: + r""" + dot(other) -> Tensor + + See :func:`torch.dot` + """ + ... + def double(self) -> Tensor: + r""" + double(memory_format=torch.preserve_format) -> Tensor + + ``self.double()`` is equivalent to ``self.to(torch.float64)``. See :func:`to`. + + Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... + @overload + def dsplit(self, sections: _int) -> Tuple[Tensor, ...]: + r""" + dsplit(split_size_or_sections) -> List of Tensors + + See :func:`torch.dsplit` + """ + ... + @overload + def dsplit(self, indices: _size) -> Tuple[Tensor, ...]: + r""" + dsplit(split_size_or_sections) -> List of Tensors + + See :func:`torch.dsplit` + """ + ... + @overload + def dsplit(self, *indices: _int) -> Tuple[Tensor, ...]: + r""" + dsplit(split_size_or_sections) -> List of Tensors + + See :func:`torch.dsplit` + """ + ... + def element_size(self) -> _int: + r""" + element_size() -> int + + Returns the size in bytes of an individual element. + + Example:: + + >>> torch.tensor([]).element_size() + 4 + >>> torch.tensor([], dtype=torch.uint8).element_size() + 1 + """ + ... + @overload + def eq(self, other: Tensor) -> Tensor: + r""" + eq(other) -> Tensor + + See :func:`torch.eq` + """ + ... + @overload + def eq(self, other: Union[Number, _complex]) -> Tensor: + r""" + eq(other) -> Tensor + + See :func:`torch.eq` + """ + ... + @overload + def eq_(self, other: Tensor) -> Tensor: + r""" + eq_(other) -> Tensor + + In-place version of :meth:`~Tensor.eq` + """ + ... + @overload + def eq_(self, other: Union[Number, _complex]) -> Tensor: + r""" + eq_(other) -> Tensor + + In-place version of :meth:`~Tensor.eq` + """ + ... + def equal(self, other: Tensor) -> _bool: + r""" + equal(other) -> bool + + See :func:`torch.equal` + """ + ... + def erf(self) -> Tensor: + r""" + erf() -> Tensor + + See :func:`torch.erf` + """ + ... + def erf_(self) -> Tensor: + r""" + erf_() -> Tensor + + In-place version of :meth:`~Tensor.erf` + """ + ... + def erfc(self) -> Tensor: + r""" + erfc() -> Tensor + + See :func:`torch.erfc` + """ + ... + def erfc_(self) -> Tensor: + r""" + erfc_() -> Tensor + + In-place version of :meth:`~Tensor.erfc` + """ + ... + def erfinv(self) -> Tensor: + r""" + erfinv() -> Tensor + + See :func:`torch.erfinv` + """ + ... + def erfinv_(self) -> Tensor: + r""" + erfinv_() -> Tensor + + In-place version of :meth:`~Tensor.erfinv` + """ + ... + def exp(self) -> Tensor: + r""" + exp() -> Tensor + + See :func:`torch.exp` + """ + ... + def exp2(self) -> Tensor: + r""" + exp2() -> Tensor + + See :func:`torch.exp2` + """ + ... + def exp2_(self) -> Tensor: + r""" + exp2_() -> Tensor + + In-place version of :meth:`~Tensor.exp2` + """ + ... + def exp_(self) -> Tensor: + r""" + exp_() -> Tensor + + In-place version of :meth:`~Tensor.exp` + """ + ... + @overload + def expand(self, size: Sequence[Union[_int, SymInt]], *, implicit: _bool = False) -> Tensor: + r""" + expand(*sizes) -> Tensor + + Returns a new view of the :attr:`self` tensor with singleton dimensions expanded + to a larger size. + + Passing -1 as the size for a dimension means not changing the size of + that dimension. + + Tensor can be also expanded to a larger number of dimensions, and the + new ones will be appended at the front. For the new dimensions, the + size cannot be set to -1. + + Expanding a tensor does not allocate new memory, but only creates a + new view on the existing tensor where a dimension of size one is + expanded to a larger size by setting the ``stride`` to 0. Any dimension + of size 1 can be expanded to an arbitrary value without allocating new + memory. + + Args: + *sizes (torch.Size or int...): the desired expanded size + + .. warning:: + + More than one element of an expanded tensor may refer to a single + memory location. As a result, in-place operations (especially ones that + are vectorized) may result in incorrect behavior. If you need to write + to the tensors, please clone them first. + + Example:: + + >>> x = torch.tensor([[1], [2], [3]]) + >>> x.size() + torch.Size([3, 1]) + >>> x.expand(3, 4) + tensor([[ 1, 1, 1, 1], + [ 2, 2, 2, 2], + [ 3, 3, 3, 3]]) + >>> x.expand(-1, 4) # -1 means not changing the size of that dimension + tensor([[ 1, 1, 1, 1], + [ 2, 2, 2, 2], + [ 3, 3, 3, 3]]) + """ + ... + @overload + def expand(self, *size: _int, implicit: _bool = False) -> Tensor: + r""" + expand(*sizes) -> Tensor + + Returns a new view of the :attr:`self` tensor with singleton dimensions expanded + to a larger size. + + Passing -1 as the size for a dimension means not changing the size of + that dimension. + + Tensor can be also expanded to a larger number of dimensions, and the + new ones will be appended at the front. For the new dimensions, the + size cannot be set to -1. + + Expanding a tensor does not allocate new memory, but only creates a + new view on the existing tensor where a dimension of size one is + expanded to a larger size by setting the ``stride`` to 0. Any dimension + of size 1 can be expanded to an arbitrary value without allocating new + memory. + + Args: + *sizes (torch.Size or int...): the desired expanded size + + .. warning:: + + More than one element of an expanded tensor may refer to a single + memory location. As a result, in-place operations (especially ones that + are vectorized) may result in incorrect behavior. If you need to write + to the tensors, please clone them first. + + Example:: + + >>> x = torch.tensor([[1], [2], [3]]) + >>> x.size() + torch.Size([3, 1]) + >>> x.expand(3, 4) + tensor([[ 1, 1, 1, 1], + [ 2, 2, 2, 2], + [ 3, 3, 3, 3]]) + >>> x.expand(-1, 4) # -1 means not changing the size of that dimension + tensor([[ 1, 1, 1, 1], + [ 2, 2, 2, 2], + [ 3, 3, 3, 3]]) + """ + ... + def expand_as(self, other: Tensor) -> Tensor: + r""" + expand_as(other) -> Tensor + + Expand this tensor to the same size as :attr:`other`. + ``self.expand_as(other)`` is equivalent to ``self.expand(other.size())``. + + Please see :meth:`~Tensor.expand` for more information about ``expand``. + + Args: + other (:class:`torch.Tensor`): The result tensor has the same size + as :attr:`other`. + """ + ... + def expm1(self) -> Tensor: + r""" + expm1() -> Tensor + + See :func:`torch.expm1` + """ + ... + def expm1_(self) -> Tensor: + r""" + expm1_() -> Tensor + + In-place version of :meth:`~Tensor.expm1` + """ + ... + def exponential_(self, lambd: _float = 1, *, generator: Optional[Generator] = None) -> Tensor: + r""" + exponential_(lambd=1, *, generator=None) -> Tensor + + Fills :attr:`self` tensor with elements drawn from the PDF (probability density function): + + .. math:: + + f(x) = \lambda e^{-\lambda x}, x > 0 + + .. note:: + In probability theory, exponential distribution is supported on interval [0, :math:`\inf`) (i.e., :math:`x >= 0`) + implying that zero can be sampled from the exponential distribution. + However, :func:`torch.Tensor.exponential_` does not sample zero, + which means that its actual support is the interval (0, :math:`\inf`). + + Note that :func:`torch.distributions.exponential.Exponential` is supported on the interval [0, :math:`\inf`) and can sample zero. + """ + ... + @overload + def fill_(self, value: Tensor) -> Tensor: + r""" + fill_(value) -> Tensor + + Fills :attr:`self` tensor with the specified value. + """ + ... + @overload + def fill_(self, value: Union[Number, _complex]) -> Tensor: + r""" + fill_(value) -> Tensor + + Fills :attr:`self` tensor with the specified value. + """ + ... + def fill_diagonal_(self, fill_value: Union[Number, _complex], wrap: _bool = False) -> Tensor: + r""" + fill_diagonal_(fill_value, wrap=False) -> Tensor + + Fill the main diagonal of a tensor that has at least 2-dimensions. + When dims>2, all dimensions of input must be of equal length. + This function modifies the input tensor in-place, and returns the input tensor. + + Arguments: + fill_value (Scalar): the fill value + wrap (bool): the diagonal 'wrapped' after N columns for tall matrices. + + Example:: + + >>> a = torch.zeros(3, 3) + >>> a.fill_diagonal_(5) + tensor([[5., 0., 0.], + [0., 5., 0.], + [0., 0., 5.]]) + >>> b = torch.zeros(7, 3) + >>> b.fill_diagonal_(5) + tensor([[5., 0., 0.], + [0., 5., 0.], + [0., 0., 5.], + [0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]]) + >>> c = torch.zeros(7, 3) + >>> c.fill_diagonal_(5, wrap=True) + tensor([[5., 0., 0.], + [0., 5., 0.], + [0., 0., 5.], + [0., 0., 0.], + [5., 0., 0.], + [0., 5., 0.], + [0., 0., 5.]]) + """ + ... + def fix(self) -> Tensor: + r""" + fix() -> Tensor + + See :func:`torch.fix`. + """ + ... + def fix_(self) -> Tensor: + r""" + fix_() -> Tensor + + In-place version of :meth:`~Tensor.fix` + """ + ... + @overload + def flatten(self, start_dim: _int = 0, end_dim: _int = -1) -> Tensor: + r""" + flatten(start_dim=0, end_dim=-1) -> Tensor + + See :func:`torch.flatten` + """ + ... + @overload + def flatten(self, start_dim: _int, end_dim: _int, out_dim: Union[str, ellipsis, None]) -> Tensor: + r""" + flatten(start_dim=0, end_dim=-1) -> Tensor + + See :func:`torch.flatten` + """ + ... + @overload + def flatten(self, start_dim: Union[str, ellipsis, None], end_dim: Union[str, ellipsis, None], out_dim: Union[str, ellipsis, None]) -> Tensor: + r""" + flatten(start_dim=0, end_dim=-1) -> Tensor + + See :func:`torch.flatten` + """ + ... + @overload + def flatten(self, dims: Sequence[Union[str, ellipsis, None]], out_dim: Union[str, ellipsis, None]) -> Tensor: + r""" + flatten(start_dim=0, end_dim=-1) -> Tensor + + See :func:`torch.flatten` + """ + ... + @overload + def flip(self, dims: _size) -> Tensor: + r""" + flip(dims) -> Tensor + + See :func:`torch.flip` + """ + ... + @overload + def flip(self, *dims: _int) -> Tensor: + r""" + flip(dims) -> Tensor + + See :func:`torch.flip` + """ + ... + def fliplr(self) -> Tensor: + r""" + fliplr() -> Tensor + + See :func:`torch.fliplr` + """ + ... + def flipud(self) -> Tensor: + r""" + flipud() -> Tensor + + See :func:`torch.flipud` + """ + ... + def float(self) -> Tensor: + r""" + float(memory_format=torch.preserve_format) -> Tensor + + ``self.float()`` is equivalent to ``self.to(torch.float32)``. See :func:`to`. + + Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... + @overload + def float_power(self, exponent: Tensor) -> Tensor: + r""" + float_power(exponent) -> Tensor + + See :func:`torch.float_power` + """ + ... + @overload + def float_power(self, exponent: Union[Number, _complex]) -> Tensor: + r""" + float_power(exponent) -> Tensor + + See :func:`torch.float_power` + """ + ... + @overload + def float_power_(self, exponent: Tensor) -> Tensor: + r""" + float_power_(exponent) -> Tensor + + In-place version of :meth:`~Tensor.float_power` + """ + ... + @overload + def float_power_(self, exponent: Union[Number, _complex]) -> Tensor: + r""" + float_power_(exponent) -> Tensor + + In-place version of :meth:`~Tensor.float_power` + """ + ... + def floor(self) -> Tensor: + r""" + floor() -> Tensor + + See :func:`torch.floor` + """ + ... + def floor_(self) -> Tensor: + r""" + floor_() -> Tensor + + In-place version of :meth:`~Tensor.floor` + """ + ... + def floor_divide(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], *, out: Optional[Tensor] = None) -> Tensor: + r""" + floor_divide(value) -> Tensor + + See :func:`torch.floor_divide` + """ + ... + def floor_divide_(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat]) -> Tensor: + r""" + floor_divide_(value) -> Tensor + + In-place version of :meth:`~Tensor.floor_divide` + """ + ... + def fmax(self, other: Tensor) -> Tensor: + r""" + fmax(other) -> Tensor + + See :func:`torch.fmax` + """ + ... + def fmin(self, other: Tensor) -> Tensor: + r""" + fmin(other) -> Tensor + + See :func:`torch.fmin` + """ + ... + @overload + def fmod(self, other: Tensor) -> Tensor: + r""" + fmod(divisor) -> Tensor + + See :func:`torch.fmod` + """ + ... + @overload + def fmod(self, other: Union[Number, _complex]) -> Tensor: + r""" + fmod(divisor) -> Tensor + + See :func:`torch.fmod` + """ + ... + @overload + def fmod_(self, other: Tensor) -> Tensor: + r""" + fmod_(divisor) -> Tensor + + In-place version of :meth:`~Tensor.fmod` + """ + ... + @overload + def fmod_(self, other: Union[Number, _complex]) -> Tensor: + r""" + fmod_(divisor) -> Tensor + + In-place version of :meth:`~Tensor.fmod` + """ + ... + def frac(self) -> Tensor: + r""" + frac() -> Tensor + + See :func:`torch.frac` + """ + ... + def frac_(self) -> Tensor: + r""" + frac_() -> Tensor + + In-place version of :meth:`~Tensor.frac` + """ + ... + def frexp(self) -> torch.return_types.frexp: + r""" + frexp(input) -> (Tensor mantissa, Tensor exponent) + + See :func:`torch.frexp` + """ + ... + @overload + def gather(self, dim: _int, index: Tensor, *, sparse_grad: _bool = False) -> Tensor: + r""" + gather(dim, index) -> Tensor + + See :func:`torch.gather` + """ + ... + @overload + def gather(self, dim: Union[str, ellipsis, None], index: Tensor, *, sparse_grad: _bool = False) -> Tensor: + r""" + gather(dim, index) -> Tensor + + See :func:`torch.gather` + """ + ... + def gcd(self, other: Tensor) -> Tensor: + r""" + gcd(other) -> Tensor + + See :func:`torch.gcd` + """ + ... + def gcd_(self, other: Tensor) -> Tensor: + r""" + gcd_(other) -> Tensor + + In-place version of :meth:`~Tensor.gcd` + """ + ... + @overload + def ge(self, other: Tensor) -> Tensor: + r""" + ge(other) -> Tensor + + See :func:`torch.ge`. + """ + ... + @overload + def ge(self, other: Union[Number, _complex]) -> Tensor: + r""" + ge(other) -> Tensor + + See :func:`torch.ge`. + """ + ... + @overload + def ge_(self, other: Tensor) -> Tensor: + r""" + ge_(other) -> Tensor + + In-place version of :meth:`~Tensor.ge`. + """ + ... + @overload + def ge_(self, other: Union[Number, _complex]) -> Tensor: + r""" + ge_(other) -> Tensor + + In-place version of :meth:`~Tensor.ge`. + """ + ... + def geometric_(self, p: _float, *, generator: Optional[Generator] = None) -> Tensor: + r""" + geometric_(p, *, generator=None) -> Tensor + + Fills :attr:`self` tensor with elements drawn from the geometric distribution: + + .. math:: + + P(X=k) = (1 - p)^{k - 1} p, k = 1, 2, ... + + .. note:: + :func:`torch.Tensor.geometric_` `k`-th trial is the first success hence draws samples in :math:`\{1, 2, \ldots\}`, whereas + :func:`torch.distributions.geometric.Geometric` :math:`(k+1)`-th trial is the first success + hence draws samples in :math:`\{0, 1, \ldots\}`. + """ + ... + def geqrf(self) -> torch.return_types.geqrf: + r""" + geqrf() -> (Tensor, Tensor) + + See :func:`torch.geqrf` + """ + ... + def ger(self, vec2: Tensor) -> Tensor: + r""" + ger(vec2) -> Tensor + + See :func:`torch.ger` + """ + ... + def get_device(self) -> _int: + r""" + get_device() -> Device ordinal (Integer) + + For CUDA tensors, this function returns the device ordinal of the GPU on which the tensor resides. + For CPU tensors, this function returns `-1`. + + Example:: + + >>> x = torch.randn(3, 4, 5, device='cuda:0') + >>> x.get_device() + 0 + >>> x.cpu().get_device() + -1 + """ + ... + @overload + def greater(self, other: Tensor) -> Tensor: + r""" + greater(other) -> Tensor + + See :func:`torch.greater`. + """ + ... + @overload + def greater(self, other: Union[Number, _complex]) -> Tensor: + r""" + greater(other) -> Tensor + + See :func:`torch.greater`. + """ + ... + @overload + def greater_(self, other: Tensor) -> Tensor: + r""" + greater_(other) -> Tensor + + In-place version of :meth:`~Tensor.greater`. + """ + ... + @overload + def greater_(self, other: Union[Number, _complex]) -> Tensor: + r""" + greater_(other) -> Tensor + + In-place version of :meth:`~Tensor.greater`. + """ + ... + @overload + def greater_equal(self, other: Tensor) -> Tensor: + r""" + greater_equal(other) -> Tensor + + See :func:`torch.greater_equal`. + """ + ... + @overload + def greater_equal(self, other: Union[Number, _complex]) -> Tensor: + r""" + greater_equal(other) -> Tensor + + See :func:`torch.greater_equal`. + """ + ... + @overload + def greater_equal_(self, other: Tensor) -> Tensor: + r""" + greater_equal_(other) -> Tensor + + In-place version of :meth:`~Tensor.greater_equal`. + """ + ... + @overload + def greater_equal_(self, other: Union[Number, _complex]) -> Tensor: + r""" + greater_equal_(other) -> Tensor + + In-place version of :meth:`~Tensor.greater_equal`. + """ + ... + @overload + def gt(self, other: Tensor) -> Tensor: + r""" + gt(other) -> Tensor + + See :func:`torch.gt`. + """ + ... + @overload + def gt(self, other: Union[Number, _complex]) -> Tensor: + r""" + gt(other) -> Tensor + + See :func:`torch.gt`. + """ + ... + @overload + def gt_(self, other: Tensor) -> Tensor: + r""" + gt_(other) -> Tensor + + In-place version of :meth:`~Tensor.gt`. + """ + ... + @overload + def gt_(self, other: Union[Number, _complex]) -> Tensor: + r""" + gt_(other) -> Tensor + + In-place version of :meth:`~Tensor.gt`. + """ + ... + def half(self) -> Tensor: + r""" + half(memory_format=torch.preserve_format) -> Tensor + + ``self.half()`` is equivalent to ``self.to(torch.float16)``. See :func:`to`. + + Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... + def hardshrink(self, lambd: Union[Number, _complex] = 0.5) -> Tensor: + r""" + hardshrink(lambd=0.5) -> Tensor + + See :func:`torch.nn.functional.hardshrink` + """ + ... + def has_names(self) -> _bool: + r""" + Is ``True`` if any of this tensor's dimensions are named. Otherwise, is ``False``. + """ + ... + def heaviside(self, values: Tensor) -> Tensor: + r""" + heaviside(values) -> Tensor + + See :func:`torch.heaviside` + """ + ... + def heaviside_(self, values: Tensor) -> Tensor: + r""" + heaviside_(values) -> Tensor + + In-place version of :meth:`~Tensor.heaviside` + """ + ... + def histc(self, bins: _int = 100, min: Union[Number, _complex] = 0, max: Union[Number, _complex] = 0) -> Tensor: + r""" + histc(bins=100, min=0, max=0) -> Tensor + + See :func:`torch.histc` + """ + ... + @overload + def histogram(self, bins: Tensor, *, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogram: + r""" + histogram(input, bins, *, range=None, weight=None, density=False) -> (Tensor, Tensor) + + See :func:`torch.histogram` + """ + ... + @overload + def histogram(self, bins: _int = 100, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogram: + r""" + histogram(input, bins, *, range=None, weight=None, density=False) -> (Tensor, Tensor) + + See :func:`torch.histogram` + """ + ... + @overload + def hsplit(self, sections: _int) -> Tuple[Tensor, ...]: + r""" + hsplit(split_size_or_sections) -> List of Tensors + + See :func:`torch.hsplit` + """ + ... + @overload + def hsplit(self, indices: _size) -> Tuple[Tensor, ...]: + r""" + hsplit(split_size_or_sections) -> List of Tensors + + See :func:`torch.hsplit` + """ + ... + @overload + def hsplit(self, *indices: _int) -> Tuple[Tensor, ...]: + r""" + hsplit(split_size_or_sections) -> List of Tensors + + See :func:`torch.hsplit` + """ + ... + def hypot(self, other: Tensor) -> Tensor: + r""" + hypot(other) -> Tensor + + See :func:`torch.hypot` + """ + ... + def hypot_(self, other: Tensor) -> Tensor: + r""" + hypot_(other) -> Tensor + + In-place version of :meth:`~Tensor.hypot` + """ + ... + def i0(self) -> Tensor: + r""" + i0() -> Tensor + + See :func:`torch.i0` + """ + ... + def i0_(self) -> Tensor: + r""" + i0_() -> Tensor + + In-place version of :meth:`~Tensor.i0` + """ + ... + def igamma(self, other: Tensor) -> Tensor: + r""" + igamma(other) -> Tensor + + See :func:`torch.igamma` + """ + ... + def igamma_(self, other: Tensor) -> Tensor: + r""" + igamma_(other) -> Tensor + + In-place version of :meth:`~Tensor.igamma` + """ + ... + def igammac(self, other: Tensor) -> Tensor: + r""" + igammac(other) -> Tensor + See :func:`torch.igammac` + """ + ... + def igammac_(self, other: Tensor) -> Tensor: + r""" + igammac_(other) -> Tensor + In-place version of :meth:`~Tensor.igammac` + """ + ... + @overload + def index_add(self, dim: _int, index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + index_add(dim, index, source, *, alpha=1) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.index_add_`. + """ + ... + @overload + def index_add(self, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + index_add(dim, index, source, *, alpha=1) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.index_add_`. + """ + ... + def index_add_(self, dim: _int, index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + index_add_(dim, index, source, *, alpha=1) -> Tensor + + Accumulate the elements of :attr:`alpha` times ``source`` into the :attr:`self` + tensor by adding to the indices in the order given in :attr:`index`. For example, + if ``dim == 0``, ``index[i] == j``, and ``alpha=-1``, then the ``i``\ th row of + ``source`` is subtracted from the ``j``\ th row of :attr:`self`. + + The :attr:`dim`\ th dimension of ``source`` must have the same size as the + length of :attr:`index` (which must be a vector), and all other dimensions must + match :attr:`self`, or an error will be raised. + + For a 3-D tensor the output is given as:: + + self[index[i], :, :] += alpha * src[i, :, :] # if dim == 0 + self[:, index[i], :] += alpha * src[:, i, :] # if dim == 1 + self[:, :, index[i]] += alpha * src[:, :, i] # if dim == 2 + + Note: + This operation may behave nondeterministically when given tensors on a CUDA device. See :doc:`/notes/randomness` for more information. + + Args: + dim (int): dimension along which to index + index (Tensor): indices of ``source`` to select from, + should have dtype either `torch.int64` or `torch.int32` + source (Tensor): the tensor containing values to add + + Keyword args: + alpha (Number): the scalar multiplier for ``source`` + + Example:: + + >>> x = torch.ones(5, 3) + >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float) + >>> index = torch.tensor([0, 4, 2]) + >>> x.index_add_(0, index, t) + tensor([[ 2., 3., 4.], + [ 1., 1., 1.], + [ 8., 9., 10.], + [ 1., 1., 1.], + [ 5., 6., 7.]]) + >>> x.index_add_(0, index, t, alpha=-1) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.], + [ 1., 1., 1.], + [ 1., 1., 1.], + [ 1., 1., 1.]]) + """ + ... + @overload + def index_copy(self, dim: _int, index: Tensor, source: Tensor) -> Tensor: + r""" + index_copy(dim, index, tensor2) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.index_copy_`. + """ + ... + @overload + def index_copy(self, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor: + r""" + index_copy(dim, index, tensor2) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.index_copy_`. + """ + ... + @overload + def index_copy_(self, dim: _int, index: Tensor, source: Tensor) -> Tensor: + r""" + index_copy_(dim, index, tensor) -> Tensor + + Copies the elements of :attr:`tensor` into the :attr:`self` tensor by selecting + the indices in the order given in :attr:`index`. For example, if ``dim == 0`` + and ``index[i] == j``, then the ``i``\ th row of :attr:`tensor` is copied to the + ``j``\ th row of :attr:`self`. + + The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the + length of :attr:`index` (which must be a vector), and all other dimensions must + match :attr:`self`, or an error will be raised. + + .. note:: + If :attr:`index` contains duplicate entries, multiple elements from + :attr:`tensor` will be copied to the same index of :attr:`self`. The result + is nondeterministic since it depends on which copy occurs last. + + Args: + dim (int): dimension along which to index + index (LongTensor): indices of :attr:`tensor` to select from + tensor (Tensor): the tensor containing values to copy + + Example:: + + >>> x = torch.zeros(5, 3) + >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float) + >>> index = torch.tensor([0, 4, 2]) + >>> x.index_copy_(0, index, t) + tensor([[ 1., 2., 3.], + [ 0., 0., 0.], + [ 7., 8., 9.], + [ 0., 0., 0.], + [ 4., 5., 6.]]) + """ + ... + @overload + def index_copy_(self, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor: + r""" + index_copy_(dim, index, tensor) -> Tensor + + Copies the elements of :attr:`tensor` into the :attr:`self` tensor by selecting + the indices in the order given in :attr:`index`. For example, if ``dim == 0`` + and ``index[i] == j``, then the ``i``\ th row of :attr:`tensor` is copied to the + ``j``\ th row of :attr:`self`. + + The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the + length of :attr:`index` (which must be a vector), and all other dimensions must + match :attr:`self`, or an error will be raised. + + .. note:: + If :attr:`index` contains duplicate entries, multiple elements from + :attr:`tensor` will be copied to the same index of :attr:`self`. The result + is nondeterministic since it depends on which copy occurs last. + + Args: + dim (int): dimension along which to index + index (LongTensor): indices of :attr:`tensor` to select from + tensor (Tensor): the tensor containing values to copy + + Example:: + + >>> x = torch.zeros(5, 3) + >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float) + >>> index = torch.tensor([0, 4, 2]) + >>> x.index_copy_(0, index, t) + tensor([[ 1., 2., 3.], + [ 0., 0., 0.], + [ 7., 8., 9.], + [ 0., 0., 0.], + [ 4., 5., 6.]]) + """ + ... + @overload + def index_fill(self, dim: _int, index: Tensor, value: Tensor) -> Tensor: + r""" + index_fill(dim, index, value) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.index_fill_`. + """ + ... + @overload + def index_fill(self, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor: + r""" + index_fill(dim, index, value) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.index_fill_`. + """ + ... + @overload + def index_fill(self, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor: + r""" + index_fill(dim, index, value) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.index_fill_`. + """ + ... + @overload + def index_fill(self, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor: + r""" + index_fill(dim, index, value) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.index_fill_`. + """ + ... + @overload + def index_fill_(self, dim: _int, index: Tensor, value: Tensor) -> Tensor: + r""" + index_fill_(dim, index, value) -> Tensor + + Fills the elements of the :attr:`self` tensor with value :attr:`value` by + selecting the indices in the order given in :attr:`index`. + + Args: + dim (int): dimension along which to index + index (LongTensor): indices of :attr:`self` tensor to fill in + value (float): the value to fill with + + Example:: + >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float) + >>> index = torch.tensor([0, 2]) + >>> x.index_fill_(1, index, -1) + tensor([[-1., 2., -1.], + [-1., 5., -1.], + [-1., 8., -1.]]) + """ + ... + @overload + def index_fill_(self, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor: + r""" + index_fill_(dim, index, value) -> Tensor + + Fills the elements of the :attr:`self` tensor with value :attr:`value` by + selecting the indices in the order given in :attr:`index`. + + Args: + dim (int): dimension along which to index + index (LongTensor): indices of :attr:`self` tensor to fill in + value (float): the value to fill with + + Example:: + >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float) + >>> index = torch.tensor([0, 2]) + >>> x.index_fill_(1, index, -1) + tensor([[-1., 2., -1.], + [-1., 5., -1.], + [-1., 8., -1.]]) + """ + ... + @overload + def index_fill_(self, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor: + r""" + index_fill_(dim, index, value) -> Tensor + + Fills the elements of the :attr:`self` tensor with value :attr:`value` by + selecting the indices in the order given in :attr:`index`. + + Args: + dim (int): dimension along which to index + index (LongTensor): indices of :attr:`self` tensor to fill in + value (float): the value to fill with + + Example:: + >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float) + >>> index = torch.tensor([0, 2]) + >>> x.index_fill_(1, index, -1) + tensor([[-1., 2., -1.], + [-1., 5., -1.], + [-1., 8., -1.]]) + """ + ... + @overload + def index_fill_(self, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor: + r""" + index_fill_(dim, index, value) -> Tensor + + Fills the elements of the :attr:`self` tensor with value :attr:`value` by + selecting the indices in the order given in :attr:`index`. + + Args: + dim (int): dimension along which to index + index (LongTensor): indices of :attr:`self` tensor to fill in + value (float): the value to fill with + + Example:: + >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float) + >>> index = torch.tensor([0, 2]) + >>> x.index_fill_(1, index, -1) + tensor([[-1., 2., -1.], + [-1., 5., -1.], + [-1., 8., -1.]]) + """ + ... + def index_put(self, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: + r""" + index_put(indices, values, accumulate=False) -> Tensor + + Out-place version of :meth:`~Tensor.index_put_`. + """ + ... + def index_put_(self, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: + r""" + index_put_(indices, values, accumulate=False) -> Tensor + + Puts values from the tensor :attr:`values` into the tensor :attr:`self` using + the indices specified in :attr:`indices` (which is a tuple of Tensors). The + expression ``tensor.index_put_(indices, values)`` is equivalent to + ``tensor[indices] = values``. Returns :attr:`self`. + + If :attr:`accumulate` is ``True``, the elements in :attr:`values` are added to + :attr:`self`. If accumulate is ``False``, the behavior is undefined if indices + contain duplicate elements. + + Args: + indices (tuple of LongTensor): tensors used to index into `self`. + values (Tensor): tensor of same dtype as `self`. + accumulate (bool): whether to accumulate into self + """ + ... + def index_reduce(self, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool = True) -> Tensor: ... + def index_reduce_(self, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool = True) -> Tensor: + r""" + index_reduce_(dim, index, source, reduce, *, include_self=True) -> Tensor + + Accumulate the elements of ``source`` into the :attr:`self` + tensor by accumulating to the indices in the order given in :attr:`index` + using the reduction given by the ``reduce`` argument. For example, if ``dim == 0``, + ``index[i] == j``, ``reduce == prod`` and ``include_self == True`` then the ``i``\ th + row of ``source`` is multiplied by the ``j``\ th row of :attr:`self`. If + :obj:`include_self="True"`, the values in the :attr:`self` tensor are included + in the reduction, otherwise, rows in the :attr:`self` tensor that are accumulated + to are treated as if they were filled with the reduction identites. + + The :attr:`dim`\ th dimension of ``source`` must have the same size as the + length of :attr:`index` (which must be a vector), and all other dimensions must + match :attr:`self`, or an error will be raised. + + For a 3-D tensor with :obj:`reduce="prod"` and :obj:`include_self=True` the + output is given as:: + + self[index[i], :, :] *= src[i, :, :] # if dim == 0 + self[:, index[i], :] *= src[:, i, :] # if dim == 1 + self[:, :, index[i]] *= src[:, :, i] # if dim == 2 + + Note: + This operation may behave nondeterministically when given tensors on a CUDA device. See :doc:`/notes/randomness` for more information. + + .. note:: + + This function only supports floating point tensors. + + .. warning:: + + This function is in beta and may change in the near future. + + Args: + dim (int): dimension along which to index + index (Tensor): indices of ``source`` to select from, + should have dtype either `torch.int64` or `torch.int32` + source (FloatTensor): the tensor containing values to accumulate + reduce (str): the reduction operation to apply + (:obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`) + + Keyword args: + include_self (bool): whether the elements from the ``self`` tensor are + included in the reduction + + Example:: + + >>> x = torch.empty(5, 3).fill_(2) + >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=torch.float) + >>> index = torch.tensor([0, 4, 2, 0]) + >>> x.index_reduce_(0, index, t, 'prod') + tensor([[20., 44., 72.], + [ 2., 2., 2.], + [14., 16., 18.], + [ 2., 2., 2.], + [ 8., 10., 12.]]) + >>> x = torch.empty(5, 3).fill_(2) + >>> x.index_reduce_(0, index, t, 'prod', include_self=False) + tensor([[10., 22., 36.], + [ 2., 2., 2.], + [ 7., 8., 9.], + [ 2., 2., 2.], + [ 4., 5., 6.]]) + """ + ... + @overload + def index_select(self, dim: _int, index: Tensor) -> Tensor: + r""" + index_select(dim, index) -> Tensor + + See :func:`torch.index_select` + """ + ... + @overload + def index_select(self, dim: Union[str, ellipsis, None], index: Tensor) -> Tensor: + r""" + index_select(dim, index) -> Tensor + + See :func:`torch.index_select` + """ + ... + def indices(self) -> Tensor: + r""" + indices() -> Tensor + + Return the indices tensor of a :ref:`sparse COO tensor `. + + .. warning:: + Throws an error if :attr:`self` is not a sparse COO tensor. + + See also :meth:`Tensor.values`. + + .. note:: + This method can only be called on a coalesced sparse tensor. See + :meth:`Tensor.coalesce` for details. + """ + ... + def inner(self, other: Tensor) -> Tensor: + r""" + inner(other) -> Tensor + + See :func:`torch.inner`. + """ + ... + def int(self) -> Tensor: + r""" + int(memory_format=torch.preserve_format) -> Tensor + + ``self.int()`` is equivalent to ``self.to(torch.int32)``. See :func:`to`. + + Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... + def int_repr(self) -> Tensor: + r""" + int_repr() -> Tensor + + Given a quantized Tensor, + ``self.int_repr()`` returns a CPU Tensor with uint8_t as data type that stores the + underlying uint8_t values of the given Tensor. + """ + ... + def inverse(self) -> Tensor: + r""" + inverse() -> Tensor + + See :func:`torch.inverse` + """ + ... + def is_coalesced(self) -> _bool: + r""" + is_coalesced() -> bool + + Returns ``True`` if :attr:`self` is a :ref:`sparse COO tensor + ` that is coalesced, ``False`` otherwise. + + .. warning:: + Throws an error if :attr:`self` is not a sparse COO tensor. + + See :meth:`coalesce` and :ref:`uncoalesced tensors `. + """ + ... + def is_complex(self) -> _bool: + r""" + is_complex() -> bool + + Returns True if the data type of :attr:`self` is a complex data type. + """ + ... + def is_conj(self) -> _bool: + r""" + is_conj() -> bool + + Returns True if the conjugate bit of :attr:`self` is set to true. + """ + ... + def is_contiguous(self, memory_format=torch.contiguous_format) -> _bool: + r""" + is_contiguous(memory_format=torch.contiguous_format) -> bool + + Returns True if :attr:`self` tensor is contiguous in memory in the order specified + by memory format. + + Args: + memory_format (:class:`torch.memory_format`, optional): Specifies memory allocation + order. Default: ``torch.contiguous_format``. + """ + ... + is_cpu: _bool + r"""Is ``True`` if the Tensor is stored on the CPU, ``False`` otherwise.""" + is_cuda: _bool + r"""Is ``True`` if the Tensor is stored on the GPU, ``False`` otherwise.""" + def is_distributed(self) -> _bool: ... + def is_floating_point(self) -> _bool: + r""" + is_floating_point() -> bool + + Returns True if the data type of :attr:`self` is a floating point data type. + """ + ... + def is_inference(self) -> _bool: + r""" + is_inference() -> bool + + See :func:`torch.is_inference` + """ + ... + is_ipu: _bool + r"""Is ``True`` if the Tensor is stored on the IPU, ``False`` otherwise.""" + is_leaf: _bool + r"""All Tensors that have :attr:`requires_grad` which is ``False`` will be leaf Tensors by convention. + + For Tensors that have :attr:`requires_grad` which is ``True``, they will be leaf Tensors if they were + created by the user. This means that they are not the result of an operation and so + :attr:`grad_fn` is None. + + Only leaf Tensors will have their :attr:`grad` populated during a call to :func:`backward`. + To get :attr:`grad` populated for non-leaf Tensors, you can use :func:`retain_grad`. + + Example:: + + >>> a = torch.rand(10, requires_grad=True) + >>> a.is_leaf + True + >>> b = torch.rand(10, requires_grad=True).cuda() + >>> b.is_leaf + False + # b was created by the operation that cast a cpu Tensor into a cuda Tensor + >>> c = torch.rand(10, requires_grad=True) + 2 + >>> c.is_leaf + False + # c was created by the addition operation + >>> d = torch.rand(10).cuda() + >>> d.is_leaf + True + # d does not require gradients and so has no operation creating it (that is tracked by the autograd engine) + >>> e = torch.rand(10).cuda().requires_grad_() + >>> e.is_leaf + True + # e requires gradients and has no operations creating it + >>> f = torch.rand(10, requires_grad=True, device="cuda") + >>> f.is_leaf + True + # f requires grad, has no operation creating it""" + is_meta: _bool + r"""Is ``True`` if the Tensor is a meta tensor, ``False`` otherwise. Meta tensors + are like normal tensors, but they carry no data.""" + is_mkldnn: _bool + is_mps: _bool + r"""Is ``True`` if the Tensor is stored on the MPS device, ``False`` otherwise.""" + is_mtia: _bool + def is_neg(self) -> _bool: + r""" + is_neg() -> bool + + Returns True if the negative bit of :attr:`self` is set to true. + """ + ... + is_nested: _bool + def is_nonzero(self) -> _bool: ... + is_ort: _bool + def is_pinned(self, device: Optional[Optional[DeviceLikeType]] = None) -> _bool: + r""" + Returns true if this tensor resides in pinned memory. + """ + ... + is_quantized: _bool + r"""Is ``True`` if the Tensor is quantized, ``False`` otherwise.""" + def is_same_size(self, other: Tensor) -> _bool: ... + def is_set_to(self, tensor: Tensor) -> _bool: + r""" + is_set_to(tensor) -> bool + + Returns True if both tensors are pointing to the exact same memory (same + storage, offset, size and stride). + """ + ... + def is_signed(self) -> _bool: + r""" + is_signed() -> bool + + Returns True if the data type of :attr:`self` is a signed data type. + """ + ... + is_sparse: _bool + r"""Is ``True`` if the Tensor uses sparse COO storage layout, ``False`` otherwise.""" + is_sparse_csr: _bool + r"""Is ``True`` if the Tensor uses sparse CSR storage layout, ``False`` otherwise.""" + is_vulkan: _bool + def isclose(self, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> Tensor: + r""" + isclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor + + See :func:`torch.isclose` + """ + ... + def isfinite(self) -> Tensor: + r""" + isfinite() -> Tensor + + See :func:`torch.isfinite` + """ + ... + def isinf(self) -> Tensor: + r""" + isinf() -> Tensor + + See :func:`torch.isinf` + """ + ... + def isnan(self) -> Tensor: + r""" + isnan() -> Tensor + + See :func:`torch.isnan` + """ + ... + def isneginf(self) -> Tensor: + r""" + isneginf() -> Tensor + + See :func:`torch.isneginf` + """ + ... + def isposinf(self) -> Tensor: + r""" + isposinf() -> Tensor + + See :func:`torch.isposinf` + """ + ... + def isreal(self) -> Tensor: + r""" + isreal() -> Tensor + + See :func:`torch.isreal` + """ + ... + def istft(self, n_fft: _int, hop_length: Optional[_int] = None, win_length: Optional[_int] = None, window: Optional[Tensor] = None, center: _bool = True, normalized: _bool = False, onesided: Optional[_bool] = None, length: Optional[_int] = None, return_complex: _bool = False) -> Tensor: + r""" + istft(n_fft, hop_length=None, win_length=None, window=None, + center=True, normalized=False, onesided=True, length=None) -> Tensor + + See :func:`torch.istft` + """ + ... + def item(self) -> Number: + r""" + item() -> number + + Returns the value of this tensor as a standard Python number. This only works + for tensors with one element. For other cases, see :meth:`~Tensor.tolist`. + + This operation is not differentiable. + + Example:: + + >>> x = torch.tensor([1.0]) + >>> x.item() + 1.0 + """ + ... + def kron(self, other: Tensor) -> Tensor: + r""" + kron(other) -> Tensor + + See :func:`torch.kron` + """ + ... + @overload + def kthvalue(self, k: _int, dim: _int = -1, keepdim: _bool = False) -> torch.return_types.kthvalue: + r""" + kthvalue(k, dim=None, keepdim=False) -> (Tensor, LongTensor) + + See :func:`torch.kthvalue` + """ + ... + @overload + def kthvalue(self, k: _int, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.kthvalue: + r""" + kthvalue(k, dim=None, keepdim=False) -> (Tensor, LongTensor) + + See :func:`torch.kthvalue` + """ + ... + def lcm(self, other: Tensor) -> Tensor: + r""" + lcm(other) -> Tensor + + See :func:`torch.lcm` + """ + ... + def lcm_(self, other: Tensor) -> Tensor: + r""" + lcm_(other) -> Tensor + + In-place version of :meth:`~Tensor.lcm` + """ + ... + def ldexp(self, other: Tensor) -> Tensor: + r""" + ldexp(other) -> Tensor + + See :func:`torch.ldexp` + """ + ... + def ldexp_(self, other: Tensor) -> Tensor: + r""" + ldexp_(other) -> Tensor + + In-place version of :meth:`~Tensor.ldexp` + """ + ... + @overload + def le(self, other: Tensor) -> Tensor: + r""" + le(other) -> Tensor + + See :func:`torch.le`. + """ + ... + @overload + def le(self, other: Union[Number, _complex]) -> Tensor: + r""" + le(other) -> Tensor + + See :func:`torch.le`. + """ + ... + @overload + def le_(self, other: Tensor) -> Tensor: + r""" + le_(other) -> Tensor + + In-place version of :meth:`~Tensor.le`. + """ + ... + @overload + def le_(self, other: Union[Number, _complex]) -> Tensor: + r""" + le_(other) -> Tensor + + In-place version of :meth:`~Tensor.le`. + """ + ... + @overload + def lerp(self, end: Tensor, weight: Tensor) -> Tensor: + r""" + lerp(end, weight) -> Tensor + + See :func:`torch.lerp` + """ + ... + @overload + def lerp(self, end: Tensor, weight: Union[Number, _complex]) -> Tensor: + r""" + lerp(end, weight) -> Tensor + + See :func:`torch.lerp` + """ + ... + @overload + def lerp_(self, end: Tensor, weight: Tensor) -> Tensor: + r""" + lerp_(end, weight) -> Tensor + + In-place version of :meth:`~Tensor.lerp` + """ + ... + @overload + def lerp_(self, end: Tensor, weight: Union[Number, _complex]) -> Tensor: + r""" + lerp_(end, weight) -> Tensor + + In-place version of :meth:`~Tensor.lerp` + """ + ... + @overload + def less(self, other: Tensor) -> Tensor: + r""" + lt(other) -> Tensor + + See :func:`torch.less`. + """ + ... + @overload + def less(self, other: Union[Number, _complex]) -> Tensor: + r""" + lt(other) -> Tensor + + See :func:`torch.less`. + """ + ... + @overload + def less_(self, other: Tensor) -> Tensor: + r""" + less_(other) -> Tensor + + In-place version of :meth:`~Tensor.less`. + """ + ... + @overload + def less_(self, other: Union[Number, _complex]) -> Tensor: + r""" + less_(other) -> Tensor + + In-place version of :meth:`~Tensor.less`. + """ + ... + @overload + def less_equal(self, other: Tensor) -> Tensor: + r""" + less_equal(other) -> Tensor + + See :func:`torch.less_equal`. + """ + ... + @overload + def less_equal(self, other: Union[Number, _complex]) -> Tensor: + r""" + less_equal(other) -> Tensor + + See :func:`torch.less_equal`. + """ + ... + @overload + def less_equal_(self, other: Tensor) -> Tensor: + r""" + less_equal_(other) -> Tensor + + In-place version of :meth:`~Tensor.less_equal`. + """ + ... + @overload + def less_equal_(self, other: Union[Number, _complex]) -> Tensor: + r""" + less_equal_(other) -> Tensor + + In-place version of :meth:`~Tensor.less_equal`. + """ + ... + def lgamma(self) -> Tensor: + r""" + lgamma() -> Tensor + + See :func:`torch.lgamma` + """ + ... + def lgamma_(self) -> Tensor: + r""" + lgamma_() -> Tensor + + In-place version of :meth:`~Tensor.lgamma` + """ + ... + def log(self) -> Tensor: + r""" + log() -> Tensor + + See :func:`torch.log` + """ + ... + def log10(self) -> Tensor: + r""" + log10() -> Tensor + + See :func:`torch.log10` + """ + ... + def log10_(self) -> Tensor: + r""" + log10_() -> Tensor + + In-place version of :meth:`~Tensor.log10` + """ + ... + def log1p(self) -> Tensor: + r""" + log1p() -> Tensor + + See :func:`torch.log1p` + """ + ... + def log1p_(self) -> Tensor: + r""" + log1p_() -> Tensor + + In-place version of :meth:`~Tensor.log1p` + """ + ... + def log2(self) -> Tensor: + r""" + log2() -> Tensor + + See :func:`torch.log2` + """ + ... + def log2_(self) -> Tensor: + r""" + log2_() -> Tensor + + In-place version of :meth:`~Tensor.log2` + """ + ... + def log_(self) -> Tensor: + r""" + log_() -> Tensor + + In-place version of :meth:`~Tensor.log` + """ + ... + def log_normal_(self, mean: _float = 1, std: _float = 2, *, generator: Optional[Generator] = None) -> Tensor: + r""" + log_normal_(mean=1, std=2, *, generator=None) + + Fills :attr:`self` tensor with numbers samples from the log-normal distribution + parameterized by the given mean :math:`\mu` and standard deviation + :math:`\sigma`. Note that :attr:`mean` and :attr:`std` are the mean and + standard deviation of the underlying normal distribution, and not of the + returned distribution: + + .. math:: + + f(x) = \dfrac{1}{x \sigma \sqrt{2\pi}}\ e^{-\frac{(\ln x - \mu)^2}{2\sigma^2}} + """ + ... + @overload + def log_softmax(self, dim: _int, dtype: Optional[_dtype] = None) -> Tensor: ... + @overload + def log_softmax(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: ... + def logaddexp(self, other: Tensor) -> Tensor: + r""" + logaddexp(other) -> Tensor + + See :func:`torch.logaddexp` + """ + ... + def logaddexp2(self, other: Tensor) -> Tensor: + r""" + logaddexp2(other) -> Tensor + + See :func:`torch.logaddexp2` + """ + ... + @overload + def logcumsumexp(self, dim: _int) -> Tensor: + r""" + logcumsumexp(dim) -> Tensor + + See :func:`torch.logcumsumexp` + """ + ... + @overload + def logcumsumexp(self, dim: Union[str, ellipsis, None]) -> Tensor: + r""" + logcumsumexp(dim) -> Tensor + + See :func:`torch.logcumsumexp` + """ + ... + def logdet(self) -> Tensor: + r""" + logdet() -> Tensor + + See :func:`torch.logdet` + """ + ... + def logical_and(self, other: Tensor) -> Tensor: + r""" + logical_and() -> Tensor + + See :func:`torch.logical_and` + """ + ... + def logical_and_(self, other: Tensor) -> Tensor: + r""" + logical_and_() -> Tensor + + In-place version of :meth:`~Tensor.logical_and` + """ + ... + def logical_not(self) -> Tensor: + r""" + logical_not() -> Tensor + + See :func:`torch.logical_not` + """ + ... + def logical_not_(self) -> Tensor: + r""" + logical_not_() -> Tensor + + In-place version of :meth:`~Tensor.logical_not` + """ + ... + def logical_or(self, other: Tensor) -> Tensor: + r""" + logical_or() -> Tensor + + See :func:`torch.logical_or` + """ + ... + def logical_or_(self, other: Tensor) -> Tensor: + r""" + logical_or_() -> Tensor + + In-place version of :meth:`~Tensor.logical_or` + """ + ... + def logical_xor(self, other: Tensor) -> Tensor: + r""" + logical_xor() -> Tensor + + See :func:`torch.logical_xor` + """ + ... + def logical_xor_(self, other: Tensor) -> Tensor: + r""" + logical_xor_() -> Tensor + + In-place version of :meth:`~Tensor.logical_xor` + """ + ... + def logit(self, eps: Optional[_float] = None) -> Tensor: + r""" + logit() -> Tensor + + See :func:`torch.logit` + """ + ... + def logit_(self, eps: Optional[_float] = None) -> Tensor: + r""" + logit_() -> Tensor + + In-place version of :meth:`~Tensor.logit` + """ + ... + @overload + def logsumexp(self, dim: Union[_int, _size], keepdim: _bool = False) -> Tensor: + r""" + logsumexp(dim, keepdim=False) -> Tensor + + See :func:`torch.logsumexp` + """ + ... + @overload + def logsumexp(self, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False) -> Tensor: + r""" + logsumexp(dim, keepdim=False) -> Tensor + + See :func:`torch.logsumexp` + """ + ... + def long(self) -> Tensor: + r""" + long(memory_format=torch.preserve_format) -> Tensor + + ``self.long()`` is equivalent to ``self.to(torch.int64)``. See :func:`to`. + + Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... + @overload + def lt(self, other: Tensor) -> Tensor: + r""" + lt(other) -> Tensor + + See :func:`torch.lt`. + """ + ... + @overload + def lt(self, other: Union[Number, _complex]) -> Tensor: + r""" + lt(other) -> Tensor + + See :func:`torch.lt`. + """ + ... + @overload + def lt_(self, other: Tensor) -> Tensor: + r""" + lt_(other) -> Tensor + + In-place version of :meth:`~Tensor.lt`. + """ + ... + @overload + def lt_(self, other: Union[Number, _complex]) -> Tensor: + r""" + lt_(other) -> Tensor + + In-place version of :meth:`~Tensor.lt`. + """ + ... + def lu_solve(self, LU_data: Tensor, LU_pivots: Tensor) -> Tensor: + r""" + lu_solve(LU_data, LU_pivots) -> Tensor + + See :func:`torch.lu_solve` + """ + ... + def map2_(self, x: Tensor, y: Tensor, callable: Callable) -> Tensor: ... + def map_(self, tensor: Tensor, callable: Callable) -> Tensor: + r""" + map_(tensor, callable) + + Applies :attr:`callable` for each element in :attr:`self` tensor and the given + :attr:`tensor` and stores the results in :attr:`self` tensor. :attr:`self` tensor and + the given :attr:`tensor` must be :ref:`broadcastable `. + + The :attr:`callable` should have the signature:: + + def callable(a, b) -> number + """ + ... + @overload + def masked_fill(self, mask: Tensor, value: Tensor) -> Tensor: + r""" + masked_fill(mask, value) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.masked_fill_` + """ + ... + @overload + def masked_fill(self, mask: Tensor, value: Union[Number, _complex]) -> Tensor: + r""" + masked_fill(mask, value) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.masked_fill_` + """ + ... + @overload + def masked_fill_(self, mask: Tensor, value: Tensor) -> Tensor: + r""" + masked_fill_(mask, value) + + Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is + True. The shape of :attr:`mask` must be + :ref:`broadcastable ` with the shape of the underlying + tensor. + + Args: + mask (BoolTensor): the boolean mask + value (float): the value to fill in with + """ + ... + @overload + def masked_fill_(self, mask: Tensor, value: Union[Number, _complex]) -> Tensor: + r""" + masked_fill_(mask, value) + + Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is + True. The shape of :attr:`mask` must be + :ref:`broadcastable ` with the shape of the underlying + tensor. + + Args: + mask (BoolTensor): the boolean mask + value (float): the value to fill in with + """ + ... + def masked_scatter(self, mask: Tensor, source: Tensor) -> Tensor: + r""" + masked_scatter(mask, tensor) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.masked_scatter_` + + .. note:: + + The inputs :attr:`self` and :attr:`mask` + :ref:`broadcast `. + + Example: + + >>> self = torch.tensor([0, 0, 0, 0, 0]) + >>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]]) + >>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) + >>> self.masked_scatter(mask, source) + tensor([[0, 0, 0, 0, 1], + [2, 3, 0, 4, 5]]) + """ + ... + def masked_scatter_(self, mask: Tensor, source: Tensor) -> Tensor: + r""" + masked_scatter_(mask, source) + + Copies elements from :attr:`source` into :attr:`self` tensor at positions where + the :attr:`mask` is True. Elements from :attr:`source` are copied into :attr:`self` + starting at position 0 of :attr:`source` and continuing in order one-by-one for each + occurrence of :attr:`mask` being True. + The shape of :attr:`mask` must be :ref:`broadcastable ` + with the shape of the underlying tensor. The :attr:`source` should have at least + as many elements as the number of ones in :attr:`mask`. + + Args: + mask (BoolTensor): the boolean mask + source (Tensor): the tensor to copy from + + .. note:: + + The :attr:`mask` operates on the :attr:`self` tensor, not on the given + :attr:`source` tensor. + + Example: + + >>> self = torch.tensor([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) + >>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]]) + >>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) + >>> self.masked_scatter_(mask, source) + tensor([[0, 0, 0, 0, 1], + [2, 3, 0, 4, 5]]) + """ + ... + def masked_select(self, mask: Tensor) -> Tensor: + r""" + masked_select(mask) -> Tensor + + See :func:`torch.masked_select` + """ + ... + def matmul(self, other: Tensor) -> Tensor: + r""" + matmul(tensor2) -> Tensor + + See :func:`torch.matmul` + """ + ... + def matrix_exp(self) -> Tensor: + r""" + matrix_exp() -> Tensor + + See :func:`torch.matrix_exp` + """ + ... + def matrix_power(self, n: _int) -> Tensor: + r""" + matrix_power(n) -> Tensor + + .. note:: :meth:`~Tensor.matrix_power` is deprecated, use :func:`torch.linalg.matrix_power` instead. + + Alias for :func:`torch.linalg.matrix_power` + """ + ... + @overload + def max(self) -> Tensor: + r""" + max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor) + + See :func:`torch.max` + """ + ... + @overload + def max(self, other: Tensor) -> Tensor: + r""" + max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor) + + See :func:`torch.max` + """ + ... + @overload + def max(self, dim: _int, keepdim: _bool = False) -> torch.return_types.max: + r""" + max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor) + + See :func:`torch.max` + """ + ... + @overload + def max(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.max: + r""" + max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor) + + See :func:`torch.max` + """ + ... + def maximum(self, other: Tensor) -> Tensor: + r""" + maximum(other) -> Tensor + + See :func:`torch.maximum` + """ + ... + @overload + def mean(self, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + mean(dim=None, keepdim=False, *, dtype=None) -> Tensor + + See :func:`torch.mean` + """ + ... + @overload + def mean(self, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + mean(dim=None, keepdim=False, *, dtype=None) -> Tensor + + See :func:`torch.mean` + """ + ... + @overload + def mean(self, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + mean(dim=None, keepdim=False, *, dtype=None) -> Tensor + + See :func:`torch.mean` + """ + ... + @overload + def median(self) -> Tensor: + r""" + median(dim=None, keepdim=False) -> (Tensor, LongTensor) + + See :func:`torch.median` + """ + ... + @overload + def median(self, dim: _int, keepdim: _bool = False) -> torch.return_types.median: + r""" + median(dim=None, keepdim=False) -> (Tensor, LongTensor) + + See :func:`torch.median` + """ + ... + @overload + def median(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.median: + r""" + median(dim=None, keepdim=False) -> (Tensor, LongTensor) + + See :func:`torch.median` + """ + ... + @overload + def min(self) -> Tensor: + r""" + min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor) + + See :func:`torch.min` + """ + ... + @overload + def min(self, other: Tensor) -> Tensor: + r""" + min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor) + + See :func:`torch.min` + """ + ... + @overload + def min(self, dim: _int, keepdim: _bool = False) -> torch.return_types.min: + r""" + min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor) + + See :func:`torch.min` + """ + ... + @overload + def min(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.min: + r""" + min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor) + + See :func:`torch.min` + """ + ... + def minimum(self, other: Tensor) -> Tensor: + r""" + minimum(other) -> Tensor + + See :func:`torch.minimum` + """ + ... + def mm(self, mat2: Tensor) -> Tensor: + r""" + mm(mat2) -> Tensor + + See :func:`torch.mm` + """ + ... + @overload + def mode(self, dim: _int = -1, keepdim: _bool = False) -> torch.return_types.mode: + r""" + mode(dim=None, keepdim=False) -> (Tensor, LongTensor) + + See :func:`torch.mode` + """ + ... + @overload + def mode(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.mode: + r""" + mode(dim=None, keepdim=False) -> (Tensor, LongTensor) + + See :func:`torch.mode` + """ + ... + @overload + def moveaxis(self, source: _int, destination: _int) -> Tensor: + r""" + moveaxis(source, destination) -> Tensor + + See :func:`torch.moveaxis` + """ + ... + @overload + def moveaxis(self, source: _size, destination: _size) -> Tensor: + r""" + moveaxis(source, destination) -> Tensor + + See :func:`torch.moveaxis` + """ + ... + @overload + def movedim(self, source: _int, destination: _int) -> Tensor: + r""" + movedim(source, destination) -> Tensor + + See :func:`torch.movedim` + """ + ... + @overload + def movedim(self, source: _size, destination: _size) -> Tensor: + r""" + movedim(source, destination) -> Tensor + + See :func:`torch.movedim` + """ + ... + def msort(self) -> Tensor: + r""" + msort() -> Tensor + + See :func:`torch.msort` + """ + ... + def mul(self, other: Union[Tensor, Number, _complex, torch.SymInt, torch.SymFloat], *, out: Optional[Tensor] = None) -> Tensor: + r""" + mul(value) -> Tensor + + See :func:`torch.mul`. + """ + ... + def mul_(self, other: Union[Tensor, Number, _complex, torch.SymInt, torch.SymFloat]) -> Tensor: + r""" + mul_(value) -> Tensor + + In-place version of :meth:`~Tensor.mul`. + """ + ... + def multinomial(self, num_samples: _int, replacement: _bool = False, *, generator: Optional[Generator] = None) -> Tensor: + r""" + multinomial(num_samples, replacement=False, *, generator=None) -> Tensor + + See :func:`torch.multinomial` + """ + ... + @overload + def multiply(self, other: Tensor) -> Tensor: + r""" + multiply(value) -> Tensor + + See :func:`torch.multiply`. + """ + ... + @overload + def multiply(self, other: Union[Number, _complex]) -> Tensor: + r""" + multiply(value) -> Tensor + + See :func:`torch.multiply`. + """ + ... + @overload + def multiply_(self, other: Tensor) -> Tensor: + r""" + multiply_(value) -> Tensor + + In-place version of :meth:`~Tensor.multiply`. + """ + ... + @overload + def multiply_(self, other: Union[Number, _complex]) -> Tensor: + r""" + multiply_(value) -> Tensor + + In-place version of :meth:`~Tensor.multiply`. + """ + ... + def mv(self, vec: Tensor) -> Tensor: + r""" + mv(vec) -> Tensor + + See :func:`torch.mv` + """ + ... + def mvlgamma(self, p: _int) -> Tensor: + r""" + mvlgamma(p) -> Tensor + + See :func:`torch.mvlgamma` + """ + ... + def mvlgamma_(self, p: _int) -> Tensor: + r""" + mvlgamma_(p) -> Tensor + + In-place version of :meth:`~Tensor.mvlgamma` + """ + ... + def nan_to_num(self, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None) -> Tensor: + r""" + nan_to_num(nan=0.0, posinf=None, neginf=None) -> Tensor + + See :func:`torch.nan_to_num`. + """ + ... + def nan_to_num_(self, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None) -> Tensor: + r""" + nan_to_num_(nan=0.0, posinf=None, neginf=None) -> Tensor + + In-place version of :meth:`~Tensor.nan_to_num`. + """ + ... + def nanmean(self, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + nanmean(dim=None, keepdim=False, *, dtype=None) -> Tensor + + See :func:`torch.nanmean` + """ + ... + @overload + def nanmedian(self) -> Tensor: + r""" + nanmedian(dim=None, keepdim=False) -> (Tensor, LongTensor) + + See :func:`torch.nanmedian` + """ + ... + @overload + def nanmedian(self, dim: _int, keepdim: _bool = False) -> torch.return_types.nanmedian: + r""" + nanmedian(dim=None, keepdim=False) -> (Tensor, LongTensor) + + See :func:`torch.nanmedian` + """ + ... + @overload + def nanmedian(self, dim: Union[str, ellipsis, None], keepdim: _bool = False) -> torch.return_types.nanmedian: + r""" + nanmedian(dim=None, keepdim=False) -> (Tensor, LongTensor) + + See :func:`torch.nanmedian` + """ + ... + @overload + def nanquantile(self, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear") -> Tensor: + r""" + nanquantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor + + See :func:`torch.nanquantile` + """ + ... + @overload + def nanquantile(self, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear") -> Tensor: + r""" + nanquantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor + + See :func:`torch.nanquantile` + """ + ... + def nansum(self, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + nansum(dim=None, keepdim=False, dtype=None) -> Tensor + + See :func:`torch.nansum` + """ + ... + @overload + def narrow(self, dim: _int, start: Tensor, length: Union[_int, SymInt]) -> Tensor: + r""" + narrow(dimension, start, length) -> Tensor + + See :func:`torch.narrow`. + """ + ... + @overload + def narrow(self, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt]) -> Tensor: + r""" + narrow(dimension, start, length) -> Tensor + + See :func:`torch.narrow`. + """ + ... + def narrow_copy(self, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt]) -> Tensor: + r""" + narrow_copy(dimension, start, length) -> Tensor + + See :func:`torch.narrow_copy`. + """ + ... + def ndimension(self) -> _int: + r""" + ndimension() -> int + + Alias for :meth:`~Tensor.dim()` + """ + ... + @overload + def ne(self, other: Tensor) -> Tensor: + r""" + ne(other) -> Tensor + + See :func:`torch.ne`. + """ + ... + @overload + def ne(self, other: Union[Number, _complex]) -> Tensor: + r""" + ne(other) -> Tensor + + See :func:`torch.ne`. + """ + ... + @overload + def ne_(self, other: Tensor) -> Tensor: + r""" + ne_(other) -> Tensor + + In-place version of :meth:`~Tensor.ne`. + """ + ... + @overload + def ne_(self, other: Union[Number, _complex]) -> Tensor: + r""" + ne_(other) -> Tensor + + In-place version of :meth:`~Tensor.ne`. + """ + ... + def neg(self) -> Tensor: + r""" + neg() -> Tensor + + See :func:`torch.neg` + """ + ... + def neg_(self) -> Tensor: + r""" + neg_() -> Tensor + + In-place version of :meth:`~Tensor.neg` + """ + ... + def negative(self) -> Tensor: + r""" + negative() -> Tensor + + See :func:`torch.negative` + """ + ... + def negative_(self) -> Tensor: + r""" + negative_() -> Tensor + + In-place version of :meth:`~Tensor.negative` + """ + ... + def nelement(self) -> _int: + r""" + nelement() -> int + + Alias for :meth:`~Tensor.numel` + """ + ... + @overload + def new(self, *args: Any, device: Optional[DeviceLikeType] = None) -> Tensor: ... + @overload + def new(self, storage: Storage) -> Tensor: ... + @overload + def new(self, other: Tensor) -> Tensor: ... + @overload + def new(self, size: _size, *, device: Optional[DeviceLikeType] = None) -> Tensor: ... + @overload + def new_empty(self, size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + new_empty(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor + + + Returns a Tensor of size :attr:`size` filled with uninitialized data. + By default, the returned Tensor has the same :class:`torch.dtype` and + :class:`torch.device` as this tensor. + + Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired type of returned tensor. + Default: if None, same :class:`torch.dtype` as this tensor. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if None, same :class:`torch.device` as this tensor. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> tensor = torch.ones(()) + >>> tensor.new_empty((2, 3)) + tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30], + [ 3.0949e-41, 4.4842e-44, 0.0000e+00]]) + """ + ... + @overload + def new_empty(self, *size: _int, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + new_empty(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor + + + Returns a Tensor of size :attr:`size` filled with uninitialized data. + By default, the returned Tensor has the same :class:`torch.dtype` and + :class:`torch.device` as this tensor. + + Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired type of returned tensor. + Default: if None, same :class:`torch.dtype` as this tensor. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if None, same :class:`torch.device` as this tensor. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> tensor = torch.ones(()) + >>> tensor.new_empty((2, 3)) + tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30], + [ 3.0949e-41, 4.4842e-44, 0.0000e+00]]) + """ + ... + def new_empty_strided(self, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + new_empty_strided(size, stride, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor + + + Returns a Tensor of size :attr:`size` and strides :attr:`stride` filled with + uninitialized data. By default, the returned Tensor has the same + :class:`torch.dtype` and :class:`torch.device` as this tensor. + + Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired type of returned tensor. + Default: if None, same :class:`torch.dtype` as this tensor. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if None, same :class:`torch.device` as this tensor. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> tensor = torch.ones(()) + >>> tensor.new_empty_strided((2, 3), (3, 1)) + tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30], + [ 3.0949e-41, 4.4842e-44, 0.0000e+00]]) + """ + ... + def new_full(self, size: Sequence[Union[_int, SymInt]], fill_value: Union[Number, _complex], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + new_full(size, fill_value, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor + + + Returns a Tensor of size :attr:`size` filled with :attr:`fill_value`. + By default, the returned Tensor has the same :class:`torch.dtype` and + :class:`torch.device` as this tensor. + + Args: + fill_value (scalar): the number to fill the output tensor with. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired type of returned tensor. + Default: if None, same :class:`torch.dtype` as this tensor. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if None, same :class:`torch.device` as this tensor. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> tensor = torch.ones((2,), dtype=torch.float64) + >>> tensor.new_full((3, 4), 3.141592) + tensor([[ 3.1416, 3.1416, 3.1416, 3.1416], + [ 3.1416, 3.1416, 3.1416, 3.1416], + [ 3.1416, 3.1416, 3.1416, 3.1416]], dtype=torch.float64) + """ + ... + @overload + def new_ones(self, size: _size, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + new_ones(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor + + + Returns a Tensor of size :attr:`size` filled with ``1``. + By default, the returned Tensor has the same :class:`torch.dtype` and + :class:`torch.device` as this tensor. + + Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired type of returned tensor. + Default: if None, same :class:`torch.dtype` as this tensor. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if None, same :class:`torch.device` as this tensor. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> tensor = torch.tensor((), dtype=torch.int32) + >>> tensor.new_ones((2, 3)) + tensor([[ 1, 1, 1], + [ 1, 1, 1]], dtype=torch.int32) + """ + ... + @overload + def new_ones(self, size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + new_ones(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor + + + Returns a Tensor of size :attr:`size` filled with ``1``. + By default, the returned Tensor has the same :class:`torch.dtype` and + :class:`torch.device` as this tensor. + + Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired type of returned tensor. + Default: if None, same :class:`torch.dtype` as this tensor. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if None, same :class:`torch.device` as this tensor. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> tensor = torch.tensor((), dtype=torch.int32) + >>> tensor.new_ones((2, 3)) + tensor([[ 1, 1, 1], + [ 1, 1, 1]], dtype=torch.int32) + """ + ... + @overload + def new_ones(self, *size: _int, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + new_ones(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor + + + Returns a Tensor of size :attr:`size` filled with ``1``. + By default, the returned Tensor has the same :class:`torch.dtype` and + :class:`torch.device` as this tensor. + + Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired type of returned tensor. + Default: if None, same :class:`torch.dtype` as this tensor. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if None, same :class:`torch.device` as this tensor. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> tensor = torch.tensor((), dtype=torch.int32) + >>> tensor.new_ones((2, 3)) + tensor([[ 1, 1, 1], + [ 1, 1, 1]], dtype=torch.int32) + """ + ... + def new_tensor(self, data: Any, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + new_tensor(data, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor + + + Returns a new Tensor with :attr:`data` as the tensor data. + By default, the returned Tensor has the same :class:`torch.dtype` and + :class:`torch.device` as this tensor. + + .. warning:: + + :func:`new_tensor` always copies :attr:`data`. If you have a Tensor + ``data`` and want to avoid a copy, use :func:`torch.Tensor.requires_grad_` + or :func:`torch.Tensor.detach`. + If you have a numpy array and want to avoid a copy, use + :func:`torch.from_numpy`. + + .. warning:: + + When data is a tensor `x`, :func:`new_tensor()` reads out 'the data' from whatever it is passed, + and constructs a leaf variable. Therefore ``tensor.new_tensor(x)`` is equivalent to ``x.clone().detach()`` + and ``tensor.new_tensor(x, requires_grad=True)`` is equivalent to ``x.clone().detach().requires_grad_(True)``. + The equivalents using ``clone()`` and ``detach()`` are recommended. + + Args: + data (array_like): The returned Tensor copies :attr:`data`. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired type of returned tensor. + Default: if None, same :class:`torch.dtype` as this tensor. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if None, same :class:`torch.device` as this tensor. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> tensor = torch.ones((2,), dtype=torch.int8) + >>> data = [[0, 1], [2, 3]] + >>> tensor.new_tensor(data) + tensor([[ 0, 1], + [ 2, 3]], dtype=torch.int8) + """ + ... + @overload + def new_zeros(self, size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + new_zeros(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor + + + Returns a Tensor of size :attr:`size` filled with ``0``. + By default, the returned Tensor has the same :class:`torch.dtype` and + :class:`torch.device` as this tensor. + + Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired type of returned tensor. + Default: if None, same :class:`torch.dtype` as this tensor. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if None, same :class:`torch.device` as this tensor. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> tensor = torch.tensor((), dtype=torch.float64) + >>> tensor.new_zeros((2, 3)) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.]], dtype=torch.float64) + """ + ... + @overload + def new_zeros(self, *size: _int, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + new_zeros(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, pin_memory=False) -> Tensor + + + Returns a Tensor of size :attr:`size` filled with ``0``. + By default, the returned Tensor has the same :class:`torch.dtype` and + :class:`torch.device` as this tensor. + + Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired type of returned tensor. + Default: if None, same :class:`torch.dtype` as this tensor. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if None, same :class:`torch.device` as this tensor. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> tensor = torch.tensor((), dtype=torch.float64) + >>> tensor.new_zeros((2, 3)) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.]], dtype=torch.float64) + """ + ... + def nextafter(self, other: Tensor) -> Tensor: + r""" + nextafter(other) -> Tensor + See :func:`torch.nextafter` + """ + ... + def nextafter_(self, other: Tensor) -> Tensor: + r""" + nextafter_(other) -> Tensor + In-place version of :meth:`~Tensor.nextafter` + """ + ... + @overload + def nonzero(self, *, as_tuple: Literal[False] = False) -> Tensor: + r""" + nonzero() -> LongTensor + + See :func:`torch.nonzero` + """ + ... + @overload + def nonzero(self, *, as_tuple: Literal[True]) -> Tuple[Tensor, ...]: + r""" + nonzero() -> LongTensor + + See :func:`torch.nonzero` + """ + ... + def nonzero_static(self, *, size: _int, fill_value: _int = -1) -> Tensor: + r""" + nonzero_static(input, *, size, fill_value=-1) -> Tensor + + Returns a 2-D tensor where each row is the index for a non-zero value. + The returned Tensor has the same `torch.dtype` as `torch.nonzero()`. + + Args: + input (Tensor): the input tensor to count non-zero elements. + + Keyword args: + size (int): the size of non-zero elements expected to be included in the out + tensor. Pad the out tensor with `fill_value` if the `size` is larger + than total number of non-zero elements, truncate out tensor if `size` + is smaller. The size must be a non-negative integer. + fill_value (int): the value to fill the output tensor with when `size` is larger + than the total number of non-zero elements. Default is `-1` to represent + invalid index. + + Example: + + # Example 1: Padding + >>> input_tensor = torch.tensor([[1, 0], [3, 2]]) + >>> static_size = 4 + >>> t = torch.nonzero_static(input_tensor, size = static_size) + tensor([[ 0, 0], + [ 1, 0], + [ 1, 1], + [ -1, -1]], dtype=torch.int64) + + # Example 2: Truncating + >>> input_tensor = torch.tensor([[1, 0], [3, 2]]) + >>> static_size = 2 + >>> t = torch.nonzero_static(input_tensor, size = static_size) + tensor([[ 0, 0], + [ 1, 0]], dtype=torch.int64) + + # Example 3: 0 size + >>> input_tensor = torch.tensor([10]) + >>> static_size = 0 + >>> t = torch.nonzero_static(input_tensor, size = static_size) + tensor([], size=(0, 1), dtype=torch.int64) + + # Example 4: 0 rank input + >>> input_tensor = torch.tensor(10) + >>> static_size = 2 + >>> t = torch.nonzero_static(input_tensor, size = static_size) + tensor([], size=(2, 0), dtype=torch.int64) + """ + ... + def normal_(self, mean: _float = 0, std: _float = 1, *, generator: Optional[Generator] = None) -> Tensor: + r""" + normal_(mean=0, std=1, *, generator=None) -> Tensor + + Fills :attr:`self` tensor with elements samples from the normal distribution + parameterized by :attr:`mean` and :attr:`std`. + """ + ... + @overload + def not_equal(self, other: Tensor) -> Tensor: + r""" + not_equal(other) -> Tensor + + See :func:`torch.not_equal`. + """ + ... + @overload + def not_equal(self, other: Union[Number, _complex]) -> Tensor: + r""" + not_equal(other) -> Tensor + + See :func:`torch.not_equal`. + """ + ... + @overload + def not_equal_(self, other: Tensor) -> Tensor: + r""" + not_equal_(other) -> Tensor + + In-place version of :meth:`~Tensor.not_equal`. + """ + ... + @overload + def not_equal_(self, other: Union[Number, _complex]) -> Tensor: + r""" + not_equal_(other) -> Tensor + + In-place version of :meth:`~Tensor.not_equal`. + """ + ... + def numel(self) -> _int: + r""" + numel() -> int + + See :func:`torch.numel` + """ + ... + def numpy(self, *, force: _bool = False) -> Any: + r""" + numpy(*, force=False) -> numpy.ndarray + + Returns the tensor as a NumPy :class:`ndarray`. + + If :attr:`force` is ``False`` (the default), the conversion + is performed only if the tensor is on the CPU, does not require grad, + does not have its conjugate bit set, and is a dtype and layout that + NumPy supports. The returned ndarray and the tensor will share their + storage, so changes to the tensor will be reflected in the ndarray + and vice versa. + + If :attr:`force` is ``True`` this is equivalent to + calling ``t.detach().cpu().resolve_conj().resolve_neg().numpy()``. + If the tensor isn't on the CPU or the conjugate or negative bit is set, + the tensor won't share its storage with the returned ndarray. + Setting :attr:`force` to ``True`` can be a useful shorthand. + + Args: + force (bool): if ``True``, the ndarray may be a copy of the tensor + instead of always sharing memory, defaults to ``False``. + """ + ... + def orgqr(self, input2: Tensor) -> Tensor: + r""" + orgqr(input2) -> Tensor + + See :func:`torch.orgqr` + """ + ... + def ormqr(self, input2: Tensor, input3: Tensor, left: _bool = True, transpose: _bool = False) -> Tensor: + r""" + ormqr(input2, input3, left=True, transpose=False) -> Tensor + + See :func:`torch.ormqr` + """ + ... + def outer(self, vec2: Tensor) -> Tensor: + r""" + outer(vec2) -> Tensor + + See :func:`torch.outer`. + """ + ... + @overload + def permute(self, dims: _size) -> Tensor: + r""" + permute(*dims) -> Tensor + + See :func:`torch.permute` + """ + ... + @overload + def permute(self, *dims: _int) -> Tensor: + r""" + permute(*dims) -> Tensor + + See :func:`torch.permute` + """ + ... + def pin_memory(self, device: Optional[Optional[DeviceLikeType]] = None) -> Tensor: + r""" + pin_memory() -> Tensor + + Copies the tensor to pinned memory, if it's not already pinned. + """ + ... + def pinverse(self, rcond: _float = 1e-15) -> Tensor: + r""" + pinverse() -> Tensor + + See :func:`torch.pinverse` + """ + ... + def polygamma(self, n: _int) -> Tensor: + r""" + polygamma(n) -> Tensor + + See :func:`torch.polygamma` + """ + ... + def polygamma_(self, n: _int) -> Tensor: + r""" + polygamma_(n) -> Tensor + + In-place version of :meth:`~Tensor.polygamma` + """ + ... + def positive(self) -> Tensor: + r""" + positive() -> Tensor + + See :func:`torch.positive` + """ + ... + @overload + def pow(self, exponent: Tensor) -> Tensor: + r""" + pow(exponent) -> Tensor + + See :func:`torch.pow` + """ + ... + @overload + def pow(self, exponent: Union[Number, _complex]) -> Tensor: + r""" + pow(exponent) -> Tensor + + See :func:`torch.pow` + """ + ... + @overload + def pow_(self, exponent: Tensor) -> Tensor: + r""" + pow_(exponent) -> Tensor + + In-place version of :meth:`~Tensor.pow` + """ + ... + @overload + def pow_(self, exponent: Union[Number, _complex]) -> Tensor: + r""" + pow_(exponent) -> Tensor + + In-place version of :meth:`~Tensor.pow` + """ + ... + def prelu(self, weight: Tensor) -> Tensor: ... + @overload + def prod(self, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + prod(dim=None, keepdim=False, dtype=None) -> Tensor + + See :func:`torch.prod` + """ + ... + @overload + def prod(self, dim: _int, keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + prod(dim=None, keepdim=False, dtype=None) -> Tensor + + See :func:`torch.prod` + """ + ... + @overload + def prod(self, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + prod(dim=None, keepdim=False, dtype=None) -> Tensor + + See :func:`torch.prod` + """ + ... + def put(self, index: Tensor, source: Tensor, accumulate: _bool = False) -> Tensor: + r""" + put(input, index, source, accumulate=False) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.put_`. + `input` corresponds to `self` in :meth:`torch.Tensor.put_`. + """ + ... + def put_(self, index: Tensor, source: Tensor, accumulate: _bool = False) -> Tensor: + r""" + put_(index, source, accumulate=False) -> Tensor + + Copies the elements from :attr:`source` into the positions specified by + :attr:`index`. For the purpose of indexing, the :attr:`self` tensor is treated as if + it were a 1-D tensor. + + :attr:`index` and :attr:`source` need to have the same number of elements, but not necessarily + the same shape. + + If :attr:`accumulate` is ``True``, the elements in :attr:`source` are added to + :attr:`self`. If accumulate is ``False``, the behavior is undefined if :attr:`index` + contain duplicate elements. + + Args: + index (LongTensor): the indices into self + source (Tensor): the tensor containing values to copy from + accumulate (bool): whether to accumulate into self + + Example:: + + >>> src = torch.tensor([[4, 3, 5], + ... [6, 7, 8]]) + >>> src.put_(torch.tensor([1, 3]), torch.tensor([9, 10])) + tensor([[ 4, 9, 5], + [ 10, 7, 8]]) + """ + ... + def q_per_channel_axis(self) -> _int: + r""" + q_per_channel_axis() -> int + + Given a Tensor quantized by linear (affine) per-channel quantization, + returns the index of dimension on which per-channel quantization is applied. + """ + ... + def q_per_channel_scales(self) -> Tensor: + r""" + q_per_channel_scales() -> Tensor + + Given a Tensor quantized by linear (affine) per-channel quantization, + returns a Tensor of scales of the underlying quantizer. It has the number of + elements that matches the corresponding dimensions (from q_per_channel_axis) of + the tensor. + """ + ... + def q_per_channel_zero_points(self) -> Tensor: + r""" + q_per_channel_zero_points() -> Tensor + + Given a Tensor quantized by linear (affine) per-channel quantization, + returns a tensor of zero_points of the underlying quantizer. It has the number of + elements that matches the corresponding dimensions (from q_per_channel_axis) of + the tensor. + """ + ... + def q_scale(self) -> _float: + r""" + q_scale() -> float + + Given a Tensor quantized by linear(affine) quantization, + returns the scale of the underlying quantizer(). + """ + ... + def q_zero_point(self) -> _int: + r""" + q_zero_point() -> int + + Given a Tensor quantized by linear(affine) quantization, + returns the zero_point of the underlying quantizer(). + """ + ... + def qr(self, some: _bool = True) -> torch.return_types.qr: + r""" + qr(some=True) -> (Tensor, Tensor) + + See :func:`torch.qr` + """ + ... + def qscheme(self) -> _qscheme: + r""" + qscheme() -> torch.qscheme + + Returns the quantization scheme of a given QTensor. + """ + ... + @overload + def quantile(self, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear") -> Tensor: + r""" + quantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor + + See :func:`torch.quantile` + """ + ... + @overload + def quantile(self, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear") -> Tensor: + r""" + quantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor + + See :func:`torch.quantile` + """ + ... + def rad2deg(self) -> Tensor: + r""" + rad2deg() -> Tensor + + See :func:`torch.rad2deg` + """ + ... + def rad2deg_(self) -> Tensor: + r""" + rad2deg_() -> Tensor + + In-place version of :meth:`~Tensor.rad2deg` + """ + ... + @overload + def random_(self, *, generator: Optional[Generator] = None) -> Tensor: + r""" + random_(from=0, to=None, *, generator=None) -> Tensor + + Fills :attr:`self` tensor with numbers sampled from the discrete uniform + distribution over ``[from, to - 1]``. If not specified, the values are usually + only bounded by :attr:`self` tensor's data type. However, for floating point + types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every + value is representable. For example, `torch.tensor(1, dtype=torch.double).random_()` + will be uniform in ``[0, 2^53]``. + """ + ... + @overload + def random_(self, from_: _int, to: Optional[_int], *, generator: Optional[Generator] = None) -> Tensor: + r""" + random_(from=0, to=None, *, generator=None) -> Tensor + + Fills :attr:`self` tensor with numbers sampled from the discrete uniform + distribution over ``[from, to - 1]``. If not specified, the values are usually + only bounded by :attr:`self` tensor's data type. However, for floating point + types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every + value is representable. For example, `torch.tensor(1, dtype=torch.double).random_()` + will be uniform in ``[0, 2^53]``. + """ + ... + @overload + def random_(self, to: _int, *, generator: Optional[Generator] = None) -> Tensor: + r""" + random_(from=0, to=None, *, generator=None) -> Tensor + + Fills :attr:`self` tensor with numbers sampled from the discrete uniform + distribution over ``[from, to - 1]``. If not specified, the values are usually + only bounded by :attr:`self` tensor's data type. However, for floating point + types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every + value is representable. For example, `torch.tensor(1, dtype=torch.double).random_()` + will be uniform in ``[0, 2^53]``. + """ + ... + def ravel(self) -> Tensor: + r""" + ravel() -> Tensor + + see :func:`torch.ravel` + """ + ... + def reciprocal(self) -> Tensor: + r""" + reciprocal() -> Tensor + + See :func:`torch.reciprocal` + """ + ... + def reciprocal_(self) -> Tensor: + r""" + reciprocal_() -> Tensor + + In-place version of :meth:`~Tensor.reciprocal` + """ + ... + def record_stream(self, s: Stream) -> None: + r""" + record_stream(stream) + + Marks the tensor as having been used by this stream. When the tensor + is deallocated, ensure the tensor memory is not reused for another tensor + until all work queued on :attr:`stream` at the time of deallocation is + complete. + + .. note:: + + The caching allocator is aware of only the stream where a tensor was + allocated. Due to the awareness, it already correctly manages the life + cycle of tensors on only one stream. But if a tensor is used on a stream + different from the stream of origin, the allocator might reuse the memory + unexpectedly. Calling this method lets the allocator know which streams + have used the tensor. + + .. warning:: + + This method is most suitable for use cases where you are providing a + function that created a tensor on a side stream, and want users to be able + to make use of the tensor without having to think carefully about stream + safety when making use of them. These safety guarantees come at some + performance and predictability cost (analogous to the tradeoff between GC + and manual memory management), so if you are in a situation where + you manage the full lifetime of your tensors, you may consider instead + manually managing CUDA events so that calling this method is not necessary. + In particular, when you call this method, on later allocations the + allocator will poll the recorded stream to see if all operations have + completed yet; you can potentially race with side stream computation and + non-deterministically reuse or fail to reuse memory for an allocation. + + You can safely use tensors allocated on side streams without + :meth:`~Tensor.record_stream`; you must manually ensure that + any non-creation stream uses of a tensor are synced back to the creation + stream before you deallocate the tensor. As the CUDA caching allocator + guarantees that the memory will only be reused with the same creation stream, + this is sufficient to ensure that writes to future reallocations of the + memory will be delayed until non-creation stream uses are done. + (Counterintuitively, you may observe that on the CPU side we have already + reallocated the tensor, even though CUDA kernels on the old tensor are + still in progress. This is fine, because CUDA operations on the new + tensor will appropriately wait for the old operations to complete, as they + are all on the same stream.) + + Concretely, this looks like this:: + + with torch.cuda.stream(s0): + x = torch.zeros(N) + + s1.wait_stream(s0) + with torch.cuda.stream(s1): + y = some_comm_op(x) + + ... some compute on s0 ... + + # synchronize creation stream s0 to side stream s1 + # before deallocating x + s0.wait_stream(s1) + del x + + Note that some discretion is required when deciding when to perform + ``s0.wait_stream(s1)``. In particular, if we were to wait immediately + after ``some_comm_op``, there wouldn't be any point in having the side + stream; it would be equivalent to have run ``some_comm_op`` on ``s0``. + Instead, the synchronization must be placed at some appropriate, later + point in time where you expect the side stream ``s1`` to have finished + work. This location is typically identified via profiling, e.g., using + Chrome traces produced + :meth:`torch.autograd.profiler.profile.export_chrome_trace`. If you + place the wait too early, work on s0 will block until ``s1`` has finished, + preventing further overlapping of communication and computation. If you + place the wait too late, you will use more memory than is strictly + necessary (as you are keeping ``x`` live for longer.) For a concrete + example of how this guidance can be applied in practice, see this post: + `FSDP and CUDACachingAllocator + `_. + """ + ... + def refine_names(self, names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ... + def relu(self) -> Tensor: ... + def relu_(self) -> Tensor: ... + @overload + def remainder(self, other: Tensor) -> Tensor: + r""" + remainder(divisor) -> Tensor + + See :func:`torch.remainder` + """ + ... + @overload + def remainder(self, other: Union[Number, _complex]) -> Tensor: + r""" + remainder(divisor) -> Tensor + + See :func:`torch.remainder` + """ + ... + @overload + def remainder_(self, other: Tensor) -> Tensor: + r""" + remainder_(divisor) -> Tensor + + In-place version of :meth:`~Tensor.remainder` + """ + ... + @overload + def remainder_(self, other: Union[Number, _complex]) -> Tensor: + r""" + remainder_(divisor) -> Tensor + + In-place version of :meth:`~Tensor.remainder` + """ + ... + def rename(self, names: Optional[Sequence[Union[str, ellipsis, None]]]) -> Tensor: ... + def rename_(self, names: Optional[Sequence[Union[str, ellipsis, None]]]) -> Tensor: ... + def renorm(self, p: Union[Number, _complex], dim: _int, maxnorm: Union[Number, _complex]) -> Tensor: + r""" + renorm(p, dim, maxnorm) -> Tensor + + See :func:`torch.renorm` + """ + ... + def renorm_(self, p: Union[Number, _complex], dim: _int, maxnorm: Union[Number, _complex]) -> Tensor: + r""" + renorm_(p, dim, maxnorm) -> Tensor + + In-place version of :meth:`~Tensor.renorm` + """ + ... + @overload + def repeat(self, repeats: Sequence[Union[_int, SymInt]]) -> Tensor: + r""" + repeat(*sizes) -> Tensor + + Repeats this tensor along the specified dimensions. + + Unlike :meth:`~Tensor.expand`, this function copies the tensor's data. + + .. warning:: + + :meth:`~Tensor.repeat` behaves differently from + `numpy.repeat `_, + but is more similar to + `numpy.tile `_. + For the operator similar to `numpy.repeat`, see :func:`torch.repeat_interleave`. + + Args: + sizes (torch.Size or int...): The number of times to repeat this tensor along each + dimension + + Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> x.repeat(4, 2) + tensor([[ 1, 2, 3, 1, 2, 3], + [ 1, 2, 3, 1, 2, 3], + [ 1, 2, 3, 1, 2, 3], + [ 1, 2, 3, 1, 2, 3]]) + >>> x.repeat(4, 2, 1).size() + torch.Size([4, 2, 3]) + """ + ... + @overload + def repeat(self, *repeats: _int) -> Tensor: + r""" + repeat(*sizes) -> Tensor + + Repeats this tensor along the specified dimensions. + + Unlike :meth:`~Tensor.expand`, this function copies the tensor's data. + + .. warning:: + + :meth:`~Tensor.repeat` behaves differently from + `numpy.repeat `_, + but is more similar to + `numpy.tile `_. + For the operator similar to `numpy.repeat`, see :func:`torch.repeat_interleave`. + + Args: + sizes (torch.Size or int...): The number of times to repeat this tensor along each + dimension + + Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> x.repeat(4, 2) + tensor([[ 1, 2, 3, 1, 2, 3], + [ 1, 2, 3, 1, 2, 3], + [ 1, 2, 3, 1, 2, 3], + [ 1, 2, 3, 1, 2, 3]]) + >>> x.repeat(4, 2, 1).size() + torch.Size([4, 2, 3]) + """ + ... + @overload + def repeat_interleave(self, repeats: Tensor, dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor: + r""" + repeat_interleave(repeats, dim=None, *, output_size=None) -> Tensor + + See :func:`torch.repeat_interleave`. + """ + ... + @overload + def repeat_interleave(self, repeats: Union[_int, SymInt], dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor: + r""" + repeat_interleave(repeats, dim=None, *, output_size=None) -> Tensor + + See :func:`torch.repeat_interleave`. + """ + ... + def requires_grad_(self, mode: _bool = True) -> Tensor: + r""" + requires_grad_(requires_grad=True) -> Tensor + + Change if autograd should record operations on this tensor: sets this tensor's + :attr:`requires_grad` attribute in-place. Returns this tensor. + + :func:`requires_grad_`'s main use case is to tell autograd to begin recording + operations on a Tensor ``tensor``. If ``tensor`` has ``requires_grad=False`` + (because it was obtained through a DataLoader, or required preprocessing or + initialization), ``tensor.requires_grad_()`` makes it so that autograd will + begin to record operations on ``tensor``. + + Args: + requires_grad (bool): If autograd should record operations on this tensor. + Default: ``True``. + + Example:: + + >>> # Let's say we want to preprocess some saved weights and use + >>> # the result as new weights. + >>> saved_weights = [0.1, 0.2, 0.3, 0.25] + >>> loaded_weights = torch.tensor(saved_weights) + >>> weights = preprocess(loaded_weights) # some function + >>> weights + tensor([-0.5503, 0.4926, -2.1158, -0.8303]) + + >>> # Now, start to record operations done to weights + >>> weights.requires_grad_() + >>> out = weights.pow(2).sum() + >>> out.backward() + >>> weights.grad + tensor([-1.1007, 0.9853, -4.2316, -1.6606]) + """ + ... + @overload + def reshape(self, shape: Sequence[Union[_int, SymInt]]) -> Tensor: + r""" + reshape(*shape) -> Tensor + + Returns a tensor with the same data and number of elements as :attr:`self` + but with the specified shape. This method returns a view if :attr:`shape` is + compatible with the current shape. See :meth:`torch.Tensor.view` on when it is + possible to return a view. + + See :func:`torch.reshape` + + Args: + shape (tuple of ints or int...): the desired shape + """ + ... + @overload + def reshape(self, *shape: _int) -> Tensor: + r""" + reshape(*shape) -> Tensor + + Returns a tensor with the same data and number of elements as :attr:`self` + but with the specified shape. This method returns a view if :attr:`shape` is + compatible with the current shape. See :meth:`torch.Tensor.view` on when it is + possible to return a view. + + See :func:`torch.reshape` + + Args: + shape (tuple of ints or int...): the desired shape + """ + ... + def reshape_as(self, other: Tensor) -> Tensor: + r""" + reshape_as(other) -> Tensor + + Returns this tensor as the same shape as :attr:`other`. + ``self.reshape_as(other)`` is equivalent to ``self.reshape(other.sizes())``. + This method returns a view if ``other.sizes()`` is compatible with the current + shape. See :meth:`torch.Tensor.view` on when it is possible to return a view. + + Please see :meth:`reshape` for more information about ``reshape``. + + Args: + other (:class:`torch.Tensor`): The result tensor has the same shape + as :attr:`other`. + """ + ... + @overload + def resize_(self, size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format] = None) -> Tensor: + r""" + resize_(*sizes, memory_format=torch.contiguous_format) -> Tensor + + Resizes :attr:`self` tensor to the specified size. If the number of elements is + larger than the current storage size, then the underlying storage is resized + to fit the new number of elements. If the number of elements is smaller, the + underlying storage is not changed. Existing elements are preserved but any new + memory is uninitialized. + + .. warning:: + + This is a low-level method. The storage is reinterpreted as C-contiguous, + ignoring the current strides (unless the target size equals the current + size, in which case the tensor is left unchanged). For most purposes, you + will instead want to use :meth:`~Tensor.view()`, which checks for + contiguity, or :meth:`~Tensor.reshape()`, which copies data if needed. To + change the size in-place with custom strides, see :meth:`~Tensor.set_()`. + + .. note:: + + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, new elements are initialized to prevent nondeterministic behavior + from using the result as an input to an operation. Floating point and + complex values are set to NaN, and integer values are set to the maximum + value. + + Args: + sizes (torch.Size or int...): the desired size + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + Tensor. Default: ``torch.contiguous_format``. Note that memory format of + :attr:`self` is going to be unaffected if ``self.size()`` matches ``sizes``. + + Example:: + + >>> x = torch.tensor([[1, 2], [3, 4], [5, 6]]) + >>> x.resize_(2, 2) + tensor([[ 1, 2], + [ 3, 4]]) + """ + ... + @overload + def resize_(self, *size: _int, memory_format: Optional[memory_format] = None) -> Tensor: + r""" + resize_(*sizes, memory_format=torch.contiguous_format) -> Tensor + + Resizes :attr:`self` tensor to the specified size. If the number of elements is + larger than the current storage size, then the underlying storage is resized + to fit the new number of elements. If the number of elements is smaller, the + underlying storage is not changed. Existing elements are preserved but any new + memory is uninitialized. + + .. warning:: + + This is a low-level method. The storage is reinterpreted as C-contiguous, + ignoring the current strides (unless the target size equals the current + size, in which case the tensor is left unchanged). For most purposes, you + will instead want to use :meth:`~Tensor.view()`, which checks for + contiguity, or :meth:`~Tensor.reshape()`, which copies data if needed. To + change the size in-place with custom strides, see :meth:`~Tensor.set_()`. + + .. note:: + + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, new elements are initialized to prevent nondeterministic behavior + from using the result as an input to an operation. Floating point and + complex values are set to NaN, and integer values are set to the maximum + value. + + Args: + sizes (torch.Size or int...): the desired size + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + Tensor. Default: ``torch.contiguous_format``. Note that memory format of + :attr:`self` is going to be unaffected if ``self.size()`` matches ``sizes``. + + Example:: + + >>> x = torch.tensor([[1, 2], [3, 4], [5, 6]]) + >>> x.resize_(2, 2) + tensor([[ 1, 2], + [ 3, 4]]) + """ + ... + def resize_as_(self, the_template: Tensor, *, memory_format: Optional[memory_format] = None) -> Tensor: + r""" + resize_as_(tensor, memory_format=torch.contiguous_format) -> Tensor + + Resizes the :attr:`self` tensor to be the same size as the specified + :attr:`tensor`. This is equivalent to ``self.resize_(tensor.size())``. + + Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + Tensor. Default: ``torch.contiguous_format``. Note that memory format of + :attr:`self` is going to be unaffected if ``self.size()`` matches ``tensor.size()``. + """ + ... + def resize_as_sparse_(self, the_template: Tensor) -> Tensor: ... + def resolve_conj(self) -> Tensor: + r""" + resolve_conj() -> Tensor + + See :func:`torch.resolve_conj` + """ + ... + def resolve_neg(self) -> Tensor: + r""" + resolve_neg() -> Tensor + + See :func:`torch.resolve_neg` + """ + ... + def retain_grad(self) -> None: + r""" + retain_grad() -> None + + Enables this Tensor to have their :attr:`grad` populated during + :func:`backward`. This is a no-op for leaf tensors. + """ + ... + def roll(self, shifts: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]], dims: Union[_int, _size] = ()) -> Tensor: + r""" + roll(shifts, dims) -> Tensor + + See :func:`torch.roll` + """ + ... + def rot90(self, k: _int = 1, dims: _size = (0,1)) -> Tensor: + r""" + rot90(k, dims) -> Tensor + + See :func:`torch.rot90` + """ + ... + @overload + def round(self) -> Tensor: + r""" + round(decimals=0) -> Tensor + + See :func:`torch.round` + """ + ... + @overload + def round(self, *, decimals: _int) -> Tensor: + r""" + round(decimals=0) -> Tensor + + See :func:`torch.round` + """ + ... + @overload + def round_(self) -> Tensor: + r""" + round_(decimals=0) -> Tensor + + In-place version of :meth:`~Tensor.round` + """ + ... + @overload + def round_(self, *, decimals: _int) -> Tensor: + r""" + round_(decimals=0) -> Tensor + + In-place version of :meth:`~Tensor.round` + """ + ... + def row_indices(self) -> Tensor: ... + def rsqrt(self) -> Tensor: + r""" + rsqrt() -> Tensor + + See :func:`torch.rsqrt` + """ + ... + def rsqrt_(self) -> Tensor: + r""" + rsqrt_() -> Tensor + + In-place version of :meth:`~Tensor.rsqrt` + """ + ... + @overload + def scatter(self, dim: _int, index: Tensor, src: Tensor) -> Tensor: + r""" + scatter(dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_` + """ + ... + @overload + def scatter(self, dim: _int, index: Tensor, src: Tensor, *, reduce: str) -> Tensor: + r""" + scatter(dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_` + """ + ... + @overload + def scatter(self, dim: _int, index: Tensor, value: Union[Number, _complex], *, reduce: str) -> Tensor: + r""" + scatter(dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_` + """ + ... + @overload + def scatter(self, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: + r""" + scatter(dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_` + """ + ... + @overload + def scatter(self, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor: + r""" + scatter(dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_` + """ + ... + @overload + def scatter(self, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor: + r""" + scatter(dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_` + """ + ... + @overload + def scatter_(self, dim: _int, index: Tensor, src: Tensor) -> Tensor: + r""" + scatter_(dim, index, src, *, reduce=None) -> Tensor + + Writes all values from the tensor :attr:`src` into :attr:`self` at the indices + specified in the :attr:`index` tensor. For each value in :attr:`src`, its output + index is specified by its index in :attr:`src` for ``dimension != dim`` and by + the corresponding value in :attr:`index` for ``dimension = dim``. + + For a 3-D tensor, :attr:`self` is updated as:: + + self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0 + self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1 + self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2 + + This is the reverse operation of the manner described in :meth:`~Tensor.gather`. + + :attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have + the same number of dimensions. It is also required that + ``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that + ``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``. + Note that ``index`` and ``src`` do not broadcast. + + Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be + between ``0`` and ``self.size(dim) - 1`` inclusive. + + .. warning:: + + When indices are not unique, the behavior is non-deterministic (one of the + values from ``src`` will be picked arbitrarily) and the gradient will be + incorrect (it will be propagated to all locations in the source that + correspond to the same index)! + + .. note:: + + The backward pass is implemented only for ``src.shape == index.shape``. + + Additionally accepts an optional :attr:`reduce` argument that allows + specification of an optional reduction operation, which is applied to all + values in the tensor :attr:`src` into :attr:`self` at the indices + specified in the :attr:`index`. For each value in :attr:`src`, the reduction + operation is applied to an index in :attr:`self` which is specified by + its index in :attr:`src` for ``dimension != dim`` and by the corresponding + value in :attr:`index` for ``dimension = dim``. + + Given a 3-D tensor and reduction using the multiplication operation, :attr:`self` + is updated as:: + + self[index[i][j][k]][j][k] *= src[i][j][k] # if dim == 0 + self[i][index[i][j][k]][k] *= src[i][j][k] # if dim == 1 + self[i][j][index[i][j][k]] *= src[i][j][k] # if dim == 2 + + Reducing with the addition operation is the same as using + :meth:`~torch.Tensor.scatter_add_`. + + .. warning:: + The reduce argument with Tensor ``src`` is deprecated and will be removed in + a future PyTorch release. Please use :meth:`~torch.Tensor.scatter_reduce_` + instead for more reduction options. + + Args: + dim (int): the axis along which to index + index (LongTensor): the indices of elements to scatter, can be either empty + or of the same dimensionality as ``src``. When empty, the operation + returns ``self`` unchanged. + src (Tensor): the source element(s) to scatter. + + Keyword args: + reduce (str, optional): reduction operation to apply, can be either + ``'add'`` or ``'multiply'``. + + Example:: + + >>> src = torch.arange(1, 11).reshape((2, 5)) + >>> src + tensor([[ 1, 2, 3, 4, 5], + [ 6, 7, 8, 9, 10]]) + >>> index = torch.tensor([[0, 1, 2, 0]]) + >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src) + tensor([[1, 0, 0, 4, 0], + [0, 2, 0, 0, 0], + [0, 0, 3, 0, 0]]) + >>> index = torch.tensor([[0, 1, 2], [0, 1, 4]]) + >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src) + tensor([[1, 2, 3, 0, 0], + [6, 7, 0, 0, 8], + [0, 0, 0, 0, 0]]) + + >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]), + ... 1.23, reduce='multiply') + tensor([[2.0000, 2.0000, 2.4600, 2.0000], + [2.0000, 2.0000, 2.0000, 2.4600]]) + >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]), + ... 1.23, reduce='add') + tensor([[2.0000, 2.0000, 3.2300, 2.0000], + [2.0000, 2.0000, 2.0000, 3.2300]]) + + .. function:: scatter_(dim, index, value, *, reduce=None) -> Tensor: + :noindex: + + Writes the value from :attr:`value` into :attr:`self` at the indices + specified in the :attr:`index` tensor. This operation is equivalent to the previous version, + with the :attr:`src` tensor filled entirely with :attr:`value`. + + Args: + dim (int): the axis along which to index + index (LongTensor): the indices of elements to scatter, can be either empty + or of the same dimensionality as ``src``. When empty, the operation + returns ``self`` unchanged. + value (Scalar): the value to scatter. + + Keyword args: + reduce (str, optional): reduction operation to apply, can be either + ``'add'`` or ``'multiply'``. + + Example:: + + >>> index = torch.tensor([[0, 1]]) + >>> value = 2 + >>> torch.zeros(3, 5).scatter_(0, index, value) + tensor([[2., 0., 0., 0., 0.], + [0., 2., 0., 0., 0.], + [0., 0., 0., 0., 0.]]) + """ + ... + @overload + def scatter_(self, dim: _int, index: Tensor, src: Tensor, *, reduce: str) -> Tensor: + r""" + scatter_(dim, index, src, *, reduce=None) -> Tensor + + Writes all values from the tensor :attr:`src` into :attr:`self` at the indices + specified in the :attr:`index` tensor. For each value in :attr:`src`, its output + index is specified by its index in :attr:`src` for ``dimension != dim`` and by + the corresponding value in :attr:`index` for ``dimension = dim``. + + For a 3-D tensor, :attr:`self` is updated as:: + + self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0 + self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1 + self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2 + + This is the reverse operation of the manner described in :meth:`~Tensor.gather`. + + :attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have + the same number of dimensions. It is also required that + ``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that + ``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``. + Note that ``index`` and ``src`` do not broadcast. + + Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be + between ``0`` and ``self.size(dim) - 1`` inclusive. + + .. warning:: + + When indices are not unique, the behavior is non-deterministic (one of the + values from ``src`` will be picked arbitrarily) and the gradient will be + incorrect (it will be propagated to all locations in the source that + correspond to the same index)! + + .. note:: + + The backward pass is implemented only for ``src.shape == index.shape``. + + Additionally accepts an optional :attr:`reduce` argument that allows + specification of an optional reduction operation, which is applied to all + values in the tensor :attr:`src` into :attr:`self` at the indices + specified in the :attr:`index`. For each value in :attr:`src`, the reduction + operation is applied to an index in :attr:`self` which is specified by + its index in :attr:`src` for ``dimension != dim`` and by the corresponding + value in :attr:`index` for ``dimension = dim``. + + Given a 3-D tensor and reduction using the multiplication operation, :attr:`self` + is updated as:: + + self[index[i][j][k]][j][k] *= src[i][j][k] # if dim == 0 + self[i][index[i][j][k]][k] *= src[i][j][k] # if dim == 1 + self[i][j][index[i][j][k]] *= src[i][j][k] # if dim == 2 + + Reducing with the addition operation is the same as using + :meth:`~torch.Tensor.scatter_add_`. + + .. warning:: + The reduce argument with Tensor ``src`` is deprecated and will be removed in + a future PyTorch release. Please use :meth:`~torch.Tensor.scatter_reduce_` + instead for more reduction options. + + Args: + dim (int): the axis along which to index + index (LongTensor): the indices of elements to scatter, can be either empty + or of the same dimensionality as ``src``. When empty, the operation + returns ``self`` unchanged. + src (Tensor): the source element(s) to scatter. + + Keyword args: + reduce (str, optional): reduction operation to apply, can be either + ``'add'`` or ``'multiply'``. + + Example:: + + >>> src = torch.arange(1, 11).reshape((2, 5)) + >>> src + tensor([[ 1, 2, 3, 4, 5], + [ 6, 7, 8, 9, 10]]) + >>> index = torch.tensor([[0, 1, 2, 0]]) + >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src) + tensor([[1, 0, 0, 4, 0], + [0, 2, 0, 0, 0], + [0, 0, 3, 0, 0]]) + >>> index = torch.tensor([[0, 1, 2], [0, 1, 4]]) + >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src) + tensor([[1, 2, 3, 0, 0], + [6, 7, 0, 0, 8], + [0, 0, 0, 0, 0]]) + + >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]), + ... 1.23, reduce='multiply') + tensor([[2.0000, 2.0000, 2.4600, 2.0000], + [2.0000, 2.0000, 2.0000, 2.4600]]) + >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]), + ... 1.23, reduce='add') + tensor([[2.0000, 2.0000, 3.2300, 2.0000], + [2.0000, 2.0000, 2.0000, 3.2300]]) + + .. function:: scatter_(dim, index, value, *, reduce=None) -> Tensor: + :noindex: + + Writes the value from :attr:`value` into :attr:`self` at the indices + specified in the :attr:`index` tensor. This operation is equivalent to the previous version, + with the :attr:`src` tensor filled entirely with :attr:`value`. + + Args: + dim (int): the axis along which to index + index (LongTensor): the indices of elements to scatter, can be either empty + or of the same dimensionality as ``src``. When empty, the operation + returns ``self`` unchanged. + value (Scalar): the value to scatter. + + Keyword args: + reduce (str, optional): reduction operation to apply, can be either + ``'add'`` or ``'multiply'``. + + Example:: + + >>> index = torch.tensor([[0, 1]]) + >>> value = 2 + >>> torch.zeros(3, 5).scatter_(0, index, value) + tensor([[2., 0., 0., 0., 0.], + [0., 2., 0., 0., 0.], + [0., 0., 0., 0., 0.]]) + """ + ... + @overload + def scatter_(self, dim: _int, index: Tensor, value: Union[Number, _complex], *, reduce: str) -> Tensor: + r""" + scatter_(dim, index, src, *, reduce=None) -> Tensor + + Writes all values from the tensor :attr:`src` into :attr:`self` at the indices + specified in the :attr:`index` tensor. For each value in :attr:`src`, its output + index is specified by its index in :attr:`src` for ``dimension != dim`` and by + the corresponding value in :attr:`index` for ``dimension = dim``. + + For a 3-D tensor, :attr:`self` is updated as:: + + self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0 + self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1 + self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2 + + This is the reverse operation of the manner described in :meth:`~Tensor.gather`. + + :attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have + the same number of dimensions. It is also required that + ``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that + ``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``. + Note that ``index`` and ``src`` do not broadcast. + + Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be + between ``0`` and ``self.size(dim) - 1`` inclusive. + + .. warning:: + + When indices are not unique, the behavior is non-deterministic (one of the + values from ``src`` will be picked arbitrarily) and the gradient will be + incorrect (it will be propagated to all locations in the source that + correspond to the same index)! + + .. note:: + + The backward pass is implemented only for ``src.shape == index.shape``. + + Additionally accepts an optional :attr:`reduce` argument that allows + specification of an optional reduction operation, which is applied to all + values in the tensor :attr:`src` into :attr:`self` at the indices + specified in the :attr:`index`. For each value in :attr:`src`, the reduction + operation is applied to an index in :attr:`self` which is specified by + its index in :attr:`src` for ``dimension != dim`` and by the corresponding + value in :attr:`index` for ``dimension = dim``. + + Given a 3-D tensor and reduction using the multiplication operation, :attr:`self` + is updated as:: + + self[index[i][j][k]][j][k] *= src[i][j][k] # if dim == 0 + self[i][index[i][j][k]][k] *= src[i][j][k] # if dim == 1 + self[i][j][index[i][j][k]] *= src[i][j][k] # if dim == 2 + + Reducing with the addition operation is the same as using + :meth:`~torch.Tensor.scatter_add_`. + + .. warning:: + The reduce argument with Tensor ``src`` is deprecated and will be removed in + a future PyTorch release. Please use :meth:`~torch.Tensor.scatter_reduce_` + instead for more reduction options. + + Args: + dim (int): the axis along which to index + index (LongTensor): the indices of elements to scatter, can be either empty + or of the same dimensionality as ``src``. When empty, the operation + returns ``self`` unchanged. + src (Tensor): the source element(s) to scatter. + + Keyword args: + reduce (str, optional): reduction operation to apply, can be either + ``'add'`` or ``'multiply'``. + + Example:: + + >>> src = torch.arange(1, 11).reshape((2, 5)) + >>> src + tensor([[ 1, 2, 3, 4, 5], + [ 6, 7, 8, 9, 10]]) + >>> index = torch.tensor([[0, 1, 2, 0]]) + >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src) + tensor([[1, 0, 0, 4, 0], + [0, 2, 0, 0, 0], + [0, 0, 3, 0, 0]]) + >>> index = torch.tensor([[0, 1, 2], [0, 1, 4]]) + >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src) + tensor([[1, 2, 3, 0, 0], + [6, 7, 0, 0, 8], + [0, 0, 0, 0, 0]]) + + >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]), + ... 1.23, reduce='multiply') + tensor([[2.0000, 2.0000, 2.4600, 2.0000], + [2.0000, 2.0000, 2.0000, 2.4600]]) + >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]), + ... 1.23, reduce='add') + tensor([[2.0000, 2.0000, 3.2300, 2.0000], + [2.0000, 2.0000, 2.0000, 3.2300]]) + + .. function:: scatter_(dim, index, value, *, reduce=None) -> Tensor: + :noindex: + + Writes the value from :attr:`value` into :attr:`self` at the indices + specified in the :attr:`index` tensor. This operation is equivalent to the previous version, + with the :attr:`src` tensor filled entirely with :attr:`value`. + + Args: + dim (int): the axis along which to index + index (LongTensor): the indices of elements to scatter, can be either empty + or of the same dimensionality as ``src``. When empty, the operation + returns ``self`` unchanged. + value (Scalar): the value to scatter. + + Keyword args: + reduce (str, optional): reduction operation to apply, can be either + ``'add'`` or ``'multiply'``. + + Example:: + + >>> index = torch.tensor([[0, 1]]) + >>> value = 2 + >>> torch.zeros(3, 5).scatter_(0, index, value) + tensor([[2., 0., 0., 0., 0.], + [0., 2., 0., 0., 0.], + [0., 0., 0., 0., 0.]]) + """ + ... + @overload + def scatter_(self, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor: + r""" + scatter_(dim, index, src, *, reduce=None) -> Tensor + + Writes all values from the tensor :attr:`src` into :attr:`self` at the indices + specified in the :attr:`index` tensor. For each value in :attr:`src`, its output + index is specified by its index in :attr:`src` for ``dimension != dim`` and by + the corresponding value in :attr:`index` for ``dimension = dim``. + + For a 3-D tensor, :attr:`self` is updated as:: + + self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0 + self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1 + self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2 + + This is the reverse operation of the manner described in :meth:`~Tensor.gather`. + + :attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have + the same number of dimensions. It is also required that + ``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that + ``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``. + Note that ``index`` and ``src`` do not broadcast. + + Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be + between ``0`` and ``self.size(dim) - 1`` inclusive. + + .. warning:: + + When indices are not unique, the behavior is non-deterministic (one of the + values from ``src`` will be picked arbitrarily) and the gradient will be + incorrect (it will be propagated to all locations in the source that + correspond to the same index)! + + .. note:: + + The backward pass is implemented only for ``src.shape == index.shape``. + + Additionally accepts an optional :attr:`reduce` argument that allows + specification of an optional reduction operation, which is applied to all + values in the tensor :attr:`src` into :attr:`self` at the indices + specified in the :attr:`index`. For each value in :attr:`src`, the reduction + operation is applied to an index in :attr:`self` which is specified by + its index in :attr:`src` for ``dimension != dim`` and by the corresponding + value in :attr:`index` for ``dimension = dim``. + + Given a 3-D tensor and reduction using the multiplication operation, :attr:`self` + is updated as:: + + self[index[i][j][k]][j][k] *= src[i][j][k] # if dim == 0 + self[i][index[i][j][k]][k] *= src[i][j][k] # if dim == 1 + self[i][j][index[i][j][k]] *= src[i][j][k] # if dim == 2 + + Reducing with the addition operation is the same as using + :meth:`~torch.Tensor.scatter_add_`. + + .. warning:: + The reduce argument with Tensor ``src`` is deprecated and will be removed in + a future PyTorch release. Please use :meth:`~torch.Tensor.scatter_reduce_` + instead for more reduction options. + + Args: + dim (int): the axis along which to index + index (LongTensor): the indices of elements to scatter, can be either empty + or of the same dimensionality as ``src``. When empty, the operation + returns ``self`` unchanged. + src (Tensor): the source element(s) to scatter. + + Keyword args: + reduce (str, optional): reduction operation to apply, can be either + ``'add'`` or ``'multiply'``. + + Example:: + + >>> src = torch.arange(1, 11).reshape((2, 5)) + >>> src + tensor([[ 1, 2, 3, 4, 5], + [ 6, 7, 8, 9, 10]]) + >>> index = torch.tensor([[0, 1, 2, 0]]) + >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src) + tensor([[1, 0, 0, 4, 0], + [0, 2, 0, 0, 0], + [0, 0, 3, 0, 0]]) + >>> index = torch.tensor([[0, 1, 2], [0, 1, 4]]) + >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src) + tensor([[1, 2, 3, 0, 0], + [6, 7, 0, 0, 8], + [0, 0, 0, 0, 0]]) + + >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]), + ... 1.23, reduce='multiply') + tensor([[2.0000, 2.0000, 2.4600, 2.0000], + [2.0000, 2.0000, 2.0000, 2.4600]]) + >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]), + ... 1.23, reduce='add') + tensor([[2.0000, 2.0000, 3.2300, 2.0000], + [2.0000, 2.0000, 2.0000, 3.2300]]) + + .. function:: scatter_(dim, index, value, *, reduce=None) -> Tensor: + :noindex: + + Writes the value from :attr:`value` into :attr:`self` at the indices + specified in the :attr:`index` tensor. This operation is equivalent to the previous version, + with the :attr:`src` tensor filled entirely with :attr:`value`. + + Args: + dim (int): the axis along which to index + index (LongTensor): the indices of elements to scatter, can be either empty + or of the same dimensionality as ``src``. When empty, the operation + returns ``self`` unchanged. + value (Scalar): the value to scatter. + + Keyword args: + reduce (str, optional): reduction operation to apply, can be either + ``'add'`` or ``'multiply'``. + + Example:: + + >>> index = torch.tensor([[0, 1]]) + >>> value = 2 + >>> torch.zeros(3, 5).scatter_(0, index, value) + tensor([[2., 0., 0., 0., 0.], + [0., 2., 0., 0., 0.], + [0., 0., 0., 0., 0.]]) + """ + ... + @overload + def scatter_add(self, dim: _int, index: Tensor, src: Tensor) -> Tensor: + r""" + scatter_add(dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_add_` + """ + ... + @overload + def scatter_add(self, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: + r""" + scatter_add(dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_add_` + """ + ... + def scatter_add_(self, dim: _int, index: Tensor, src: Tensor) -> Tensor: + r""" + scatter_add_(dim, index, src) -> Tensor + + Adds all values from the tensor :attr:`src` into :attr:`self` at the indices + specified in the :attr:`index` tensor in a similar fashion as + :meth:`~torch.Tensor.scatter_`. For each value in :attr:`src`, it is added to + an index in :attr:`self` which is specified by its index in :attr:`src` + for ``dimension != dim`` and by the corresponding value in :attr:`index` for + ``dimension = dim``. + + For a 3-D tensor, :attr:`self` is updated as:: + + self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0 + self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1 + self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2 + + :attr:`self`, :attr:`index` and :attr:`src` should have same number of + dimensions. It is also required that ``index.size(d) <= src.size(d)`` for all + dimensions ``d``, and that ``index.size(d) <= self.size(d)`` for all dimensions + ``d != dim``. Note that ``index`` and ``src`` do not broadcast. + + Note: + This operation may behave nondeterministically when given tensors on a CUDA device. See :doc:`/notes/randomness` for more information. + + .. note:: + + The backward pass is implemented only for ``src.shape == index.shape``. + + Args: + dim (int): the axis along which to index + index (LongTensor): the indices of elements to scatter and add, can be + either empty or of the same dimensionality as ``src``. When empty, the + operation returns ``self`` unchanged. + src (Tensor): the source elements to scatter and add + + Example:: + + >>> src = torch.ones((2, 5)) + >>> index = torch.tensor([[0, 1, 2, 0, 0]]) + >>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src) + tensor([[1., 0., 0., 1., 1.], + [0., 1., 0., 0., 0.], + [0., 0., 1., 0., 0.]]) + >>> index = torch.tensor([[0, 1, 2, 0, 0], [0, 1, 2, 2, 2]]) + >>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src) + tensor([[2., 0., 0., 1., 1.], + [0., 2., 0., 0., 0.], + [0., 0., 2., 1., 1.]]) + """ + ... + def scatter_reduce(self, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool = True) -> Tensor: + r""" + scatter_reduce(dim, index, src, reduce, *, include_self=True) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_reduce_` + """ + ... + def scatter_reduce_(self, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool = True) -> Tensor: + r""" + scatter_reduce_(dim, index, src, reduce, *, include_self=True) -> Tensor + + Reduces all values from the :attr:`src` tensor to the indices specified in + the :attr:`index` tensor in the :attr:`self` tensor using the applied reduction + defined via the :attr:`reduce` argument (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`, + :obj:`"amax"`, :obj:`"amin"`). For each value in :attr:`src`, it is reduced to an + index in :attr:`self` which is specified by its index in :attr:`src` for + ``dimension != dim`` and by the corresponding value in :attr:`index` for + ``dimension = dim``. If :obj:`include_self="True"`, the values in the :attr:`self` + tensor are included in the reduction. + + :attr:`self`, :attr:`index` and :attr:`src` should all have + the same number of dimensions. It is also required that + ``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that + ``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``. + Note that ``index`` and ``src`` do not broadcast. + + For a 3-D tensor with :obj:`reduce="sum"` and :obj:`include_self=True` the + output is given as:: + + self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0 + self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1 + self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2 + + Note: + This operation may behave nondeterministically when given tensors on a CUDA device. See :doc:`/notes/randomness` for more information. + + .. note:: + + The backward pass is implemented only for ``src.shape == index.shape``. + + .. warning:: + + This function is in beta and may change in the near future. + + Args: + dim (int): the axis along which to index + index (LongTensor): the indices of elements to scatter and reduce. + src (Tensor): the source elements to scatter and reduce + reduce (str): the reduction operation to apply for non-unique indices + (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`) + include_self (bool): whether elements from the :attr:`self` tensor are + included in the reduction + + Example:: + + >>> src = torch.tensor([1., 2., 3., 4., 5., 6.]) + >>> index = torch.tensor([0, 1, 0, 1, 2, 1]) + >>> input = torch.tensor([1., 2., 3., 4.]) + >>> input.scatter_reduce(0, index, src, reduce="sum") + tensor([5., 14., 8., 4.]) + >>> input.scatter_reduce(0, index, src, reduce="sum", include_self=False) + tensor([4., 12., 5., 4.]) + >>> input2 = torch.tensor([5., 4., 3., 2.]) + >>> input2.scatter_reduce(0, index, src, reduce="amax") + tensor([5., 6., 5., 2.]) + >>> input2.scatter_reduce(0, index, src, reduce="amax", include_self=False) + tensor([3., 6., 5., 2.]) + """ + ... + @overload + def select(self, dim: _int, index: Union[_int, SymInt]) -> Tensor: + r""" + select(dim, index) -> Tensor + + See :func:`torch.select` + """ + ... + @overload + def select(self, dim: Union[str, ellipsis, None], index: _int) -> Tensor: + r""" + select(dim, index) -> Tensor + + See :func:`torch.select` + """ + ... + def select_scatter(self, src: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor: + r""" + select_scatter(src, dim, index) -> Tensor + + See :func:`torch.select_scatter` + """ + ... + @overload + def set_(self, storage: Union[Storage, TypedStorage, UntypedStorage], offset: _int, size: _size, stride: _size) -> Tensor: + r""" + set_(source=None, storage_offset=0, size=None, stride=None) -> Tensor + + Sets the underlying storage, size, and strides. If :attr:`source` is a tensor, + :attr:`self` tensor will share the same storage and have the same size and + strides as :attr:`source`. Changes to elements in one tensor will be reflected + in the other. + + If :attr:`source` is a :class:`~torch.Storage`, the method sets the underlying + storage, offset, size, and stride. + + Args: + source (Tensor or Storage): the tensor or storage to use + storage_offset (int, optional): the offset in the storage + size (torch.Size, optional): the desired size. Defaults to the size of the source. + stride (tuple, optional): the desired stride. Defaults to C-contiguous strides. + """ + ... + @overload + def set_(self, storage: Union[Storage, TypedStorage, UntypedStorage]) -> Tensor: + r""" + set_(source=None, storage_offset=0, size=None, stride=None) -> Tensor + + Sets the underlying storage, size, and strides. If :attr:`source` is a tensor, + :attr:`self` tensor will share the same storage and have the same size and + strides as :attr:`source`. Changes to elements in one tensor will be reflected + in the other. + + If :attr:`source` is a :class:`~torch.Storage`, the method sets the underlying + storage, offset, size, and stride. + + Args: + source (Tensor or Storage): the tensor or storage to use + storage_offset (int, optional): the offset in the storage + size (torch.Size, optional): the desired size. Defaults to the size of the source. + stride (tuple, optional): the desired stride. Defaults to C-contiguous strides. + """ + ... + def sgn(self) -> Tensor: + r""" + sgn() -> Tensor + + See :func:`torch.sgn` + """ + ... + def sgn_(self) -> Tensor: + r""" + sgn_() -> Tensor + + In-place version of :meth:`~Tensor.sgn` + """ + ... + def short(self) -> Tensor: + r""" + short(memory_format=torch.preserve_format) -> Tensor + + ``self.short()`` is equivalent to ``self.to(torch.int16)``. See :func:`to`. + + Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... + def sigmoid(self) -> Tensor: + r""" + sigmoid() -> Tensor + + See :func:`torch.sigmoid` + """ + ... + def sigmoid_(self) -> Tensor: + r""" + sigmoid_() -> Tensor + + In-place version of :meth:`~Tensor.sigmoid` + """ + ... + def sign(self) -> Tensor: + r""" + sign() -> Tensor + + See :func:`torch.sign` + """ + ... + def sign_(self) -> Tensor: + r""" + sign_() -> Tensor + + In-place version of :meth:`~Tensor.sign` + """ + ... + def signbit(self) -> Tensor: + r""" + signbit() -> Tensor + + See :func:`torch.signbit` + """ + ... + def sin(self) -> Tensor: + r""" + sin() -> Tensor + + See :func:`torch.sin` + """ + ... + def sin_(self) -> Tensor: + r""" + sin_() -> Tensor + + In-place version of :meth:`~Tensor.sin` + """ + ... + def sinc(self) -> Tensor: + r""" + sinc() -> Tensor + + See :func:`torch.sinc` + """ + ... + def sinc_(self) -> Tensor: + r""" + sinc_() -> Tensor + + In-place version of :meth:`~Tensor.sinc` + """ + ... + def sinh(self) -> Tensor: + r""" + sinh() -> Tensor + + See :func:`torch.sinh` + """ + ... + def sinh_(self) -> Tensor: + r""" + sinh_() -> Tensor + + In-place version of :meth:`~Tensor.sinh` + """ + ... + @overload + def size(self, dim: None = None) -> Size: + r""" + size(dim=None) -> torch.Size or int + + Returns the size of the :attr:`self` tensor. If ``dim`` is not specified, + the returned value is a :class:`torch.Size`, a subclass of :class:`tuple`. + If ``dim`` is specified, returns an int holding the size of that dimension. + + Args: + dim (int, optional): The dimension for which to retrieve the size. + + Example:: + + >>> t = torch.empty(3, 4, 5) + >>> t.size() + torch.Size([3, 4, 5]) + >>> t.size(dim=1) + 4 + """ + ... + @overload + def size(self, dim: _int) -> _int: + r""" + size(dim=None) -> torch.Size or int + + Returns the size of the :attr:`self` tensor. If ``dim`` is not specified, + the returned value is a :class:`torch.Size`, a subclass of :class:`tuple`. + If ``dim`` is specified, returns an int holding the size of that dimension. + + Args: + dim (int, optional): The dimension for which to retrieve the size. + + Example:: + + >>> t = torch.empty(3, 4, 5) + >>> t.size() + torch.Size([3, 4, 5]) + >>> t.size(dim=1) + 4 + """ + ... + def slice_inverse(self, src: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1) -> Tensor: ... + def slice_scatter(self, src: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1) -> Tensor: + r""" + slice_scatter(src, dim=0, start=None, end=None, step=1) -> Tensor + + See :func:`torch.slice_scatter` + """ + ... + def slogdet(self) -> torch.return_types.slogdet: + r""" + slogdet() -> (Tensor, Tensor) + + See :func:`torch.slogdet` + """ + ... + def smm(self, mat2: Tensor) -> Tensor: + r""" + smm(mat) -> Tensor + + See :func:`torch.smm` + """ + ... + @overload + def softmax(self, dim: _int, dtype: Optional[_dtype] = None) -> Tensor: + r""" + softmax(dim) -> Tensor + + Alias for :func:`torch.nn.functional.softmax`. + """ + ... + @overload + def softmax(self, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + softmax(dim) -> Tensor + + Alias for :func:`torch.nn.functional.softmax`. + """ + ... + @overload + def sort(self, *, stable: Optional[_bool], dim: _int = -1, descending: _bool = False) -> torch.return_types.sort: + r""" + sort(dim=-1, descending=False) -> (Tensor, LongTensor) + + See :func:`torch.sort` + """ + ... + @overload + def sort(self, dim: _int = -1, descending: _bool = False) -> torch.return_types.sort: + r""" + sort(dim=-1, descending=False) -> (Tensor, LongTensor) + + See :func:`torch.sort` + """ + ... + @overload + def sort(self, *, stable: Optional[_bool], dim: Union[str, ellipsis, None], descending: _bool = False) -> torch.return_types.sort: + r""" + sort(dim=-1, descending=False) -> (Tensor, LongTensor) + + See :func:`torch.sort` + """ + ... + @overload + def sort(self, dim: Union[str, ellipsis, None], descending: _bool = False) -> torch.return_types.sort: + r""" + sort(dim=-1, descending=False) -> (Tensor, LongTensor) + + See :func:`torch.sort` + """ + ... + def sparse_dim(self) -> _int: + r""" + sparse_dim() -> int + + Return the number of sparse dimensions in a :ref:`sparse tensor ` :attr:`self`. + + .. note:: + Returns ``0`` if :attr:`self` is not a sparse tensor. + + See also :meth:`Tensor.dense_dim` and :ref:`hybrid tensors `. + """ + ... + def sparse_mask(self, mask: Tensor) -> Tensor: + r""" + sparse_mask(mask) -> Tensor + + Returns a new :ref:`sparse tensor ` with values from a + strided tensor :attr:`self` filtered by the indices of the sparse + tensor :attr:`mask`. The values of :attr:`mask` sparse tensor are + ignored. :attr:`self` and :attr:`mask` tensors must have the same + shape. + + .. note:: + + The returned sparse tensor might contain duplicate values if :attr:`mask` + is not coalesced. It is therefore advisable to pass ``mask.coalesce()`` + if such behavior is not desired. + + .. note:: + + The returned sparse tensor has the same indices as the sparse tensor + :attr:`mask`, even when the corresponding values in :attr:`self` are + zeros. + + Args: + mask (Tensor): a sparse tensor whose indices are used as a filter + + Example:: + + >>> nse = 5 + >>> dims = (5, 5, 2, 2) + >>> I = torch.cat([torch.randint(0, dims[0], size=(nse,)), + ... torch.randint(0, dims[1], size=(nse,))], 0).reshape(2, nse) + >>> V = torch.randn(nse, dims[2], dims[3]) + >>> S = torch.sparse_coo_tensor(I, V, dims).coalesce() + >>> D = torch.randn(dims) + >>> D.sparse_mask(S) + tensor(indices=tensor([[0, 0, 0, 2], + [0, 1, 4, 3]]), + values=tensor([[[ 1.6550, 0.2397], + [-0.1611, -0.0779]], + + [[ 0.2326, -1.0558], + [ 1.4711, 1.9678]], + + [[-0.5138, -0.0411], + [ 1.9417, 0.5158]], + + [[ 0.0793, 0.0036], + [-0.2569, -0.1055]]]), + size=(5, 5, 2, 2), nnz=4, layout=torch.sparse_coo) + """ + ... + def sparse_resize_(self, size: _size, sparse_dim: _int, dense_dim: _int) -> Tensor: + r""" + sparse_resize_(size, sparse_dim, dense_dim) -> Tensor + + Resizes :attr:`self` :ref:`sparse tensor ` to the desired + size and the number of sparse and dense dimensions. + + .. note:: + If the number of specified elements in :attr:`self` is zero, then + :attr:`size`, :attr:`sparse_dim`, and :attr:`dense_dim` can be any + size and positive integers such that ``len(size) == sparse_dim + + dense_dim``. + + If :attr:`self` specifies one or more elements, however, then each + dimension in :attr:`size` must not be smaller than the corresponding + dimension of :attr:`self`, :attr:`sparse_dim` must equal the number + of sparse dimensions in :attr:`self`, and :attr:`dense_dim` must + equal the number of dense dimensions in :attr:`self`. + + .. warning:: + Throws an error if :attr:`self` is not a sparse tensor. + + Args: + size (torch.Size): the desired size. If :attr:`self` is non-empty + sparse tensor, the desired size cannot be smaller than the + original size. + sparse_dim (int): the number of sparse dimensions + dense_dim (int): the number of dense dimensions + """ + ... + def sparse_resize_and_clear_(self, size: _size, sparse_dim: _int, dense_dim: _int) -> Tensor: + r""" + sparse_resize_and_clear_(size, sparse_dim, dense_dim) -> Tensor + + Removes all specified elements from a :ref:`sparse tensor + ` :attr:`self` and resizes :attr:`self` to the desired + size and the number of sparse and dense dimensions. + + .. warning: + Throws an error if :attr:`self` is not a sparse tensor. + + Args: + size (torch.Size): the desired size. + sparse_dim (int): the number of sparse dimensions + dense_dim (int): the number of dense dimensions + """ + ... + @overload + def split(self, split_size: _int, dim: _int = 0) -> Sequence[Tensor]: ... + @overload + def split(self, split_size: Tuple[_int, ...], dim: _int = 0) -> Sequence[Tensor]: ... + def split_with_sizes(self, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]: ... + def sqrt(self) -> Tensor: + r""" + sqrt() -> Tensor + + See :func:`torch.sqrt` + """ + ... + def sqrt_(self) -> Tensor: + r""" + sqrt_() -> Tensor + + In-place version of :meth:`~Tensor.sqrt` + """ + ... + def square(self) -> Tensor: + r""" + square() -> Tensor + + See :func:`torch.square` + """ + ... + def square_(self) -> Tensor: + r""" + square_() -> Tensor + + In-place version of :meth:`~Tensor.square` + """ + ... + @overload + def squeeze(self) -> Tensor: + r""" + squeeze(dim=None) -> Tensor + + See :func:`torch.squeeze` + """ + ... + @overload + def squeeze(self, dim: _int) -> Tensor: + r""" + squeeze(dim=None) -> Tensor + + See :func:`torch.squeeze` + """ + ... + @overload + def squeeze(self, dim: _size) -> Tensor: + r""" + squeeze(dim=None) -> Tensor + + See :func:`torch.squeeze` + """ + ... + @overload + def squeeze(self, *dim: _int) -> Tensor: + r""" + squeeze(dim=None) -> Tensor + + See :func:`torch.squeeze` + """ + ... + @overload + def squeeze(self, dim: Union[str, ellipsis, None]) -> Tensor: + r""" + squeeze(dim=None) -> Tensor + + See :func:`torch.squeeze` + """ + ... + @overload + def squeeze_(self) -> Tensor: + r""" + squeeze_(dim=None) -> Tensor + + In-place version of :meth:`~Tensor.squeeze` + """ + ... + @overload + def squeeze_(self, dim: _int) -> Tensor: + r""" + squeeze_(dim=None) -> Tensor + + In-place version of :meth:`~Tensor.squeeze` + """ + ... + @overload + def squeeze_(self, dim: _size) -> Tensor: + r""" + squeeze_(dim=None) -> Tensor + + In-place version of :meth:`~Tensor.squeeze` + """ + ... + @overload + def squeeze_(self, *dim: _int) -> Tensor: + r""" + squeeze_(dim=None) -> Tensor + + In-place version of :meth:`~Tensor.squeeze` + """ + ... + @overload + def squeeze_(self, dim: Union[str, ellipsis, None]) -> Tensor: + r""" + squeeze_(dim=None) -> Tensor + + In-place version of :meth:`~Tensor.squeeze` + """ + ... + def sspaddmm(self, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + sspaddmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor + + See :func:`torch.sspaddmm` + """ + ... + @overload + def std(self, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tensor: + r""" + std(dim=None, *, correction=1, keepdim=False) -> Tensor + + See :func:`torch.std` + """ + ... + @overload + def std(self, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tensor: + r""" + std(dim=None, *, correction=1, keepdim=False) -> Tensor + + See :func:`torch.std` + """ + ... + @overload + def std(self, unbiased: _bool = True) -> Tensor: + r""" + std(dim=None, *, correction=1, keepdim=False) -> Tensor + + See :func:`torch.std` + """ + ... + @overload + def std(self, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tensor: + r""" + std(dim=None, *, correction=1, keepdim=False) -> Tensor + + See :func:`torch.std` + """ + ... + @overload + def std(self, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tensor: + r""" + std(dim=None, *, correction=1, keepdim=False) -> Tensor + + See :func:`torch.std` + """ + ... + def untyped_storage(self) -> UntypedStorage: ... + def storage_offset(self) -> _int: + r""" + storage_offset() -> int + + Returns :attr:`self` tensor's offset in the underlying storage in terms of + number of storage elements (not bytes). + + Example:: + + >>> x = torch.tensor([1, 2, 3, 4, 5]) + >>> x.storage_offset() + 0 + >>> x[3:].storage_offset() + 3 + """ + ... + def storage_type(self) -> Storage: ... + @overload + def stride(self, dim: None = None) -> Tuple[_int, ...]: + r""" + stride(dim) -> tuple or int + + Returns the stride of :attr:`self` tensor. + + Stride is the jump necessary to go from one element to the next one in the + specified dimension :attr:`dim`. A tuple of all strides is returned when no + argument is passed in. Otherwise, an integer value is returned as the stride in + the particular dimension :attr:`dim`. + + Args: + dim (int, optional): the desired dimension in which stride is required + + Example:: + + >>> x = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]) + >>> x.stride() + (5, 1) + >>> x.stride(0) + 5 + >>> x.stride(-1) + 1 + """ + ... + @overload + def stride(self, dim: _int) -> _int: + r""" + stride(dim) -> tuple or int + + Returns the stride of :attr:`self` tensor. + + Stride is the jump necessary to go from one element to the next one in the + specified dimension :attr:`dim`. A tuple of all strides is returned when no + argument is passed in. Otherwise, an integer value is returned as the stride in + the particular dimension :attr:`dim`. + + Args: + dim (int, optional): the desired dimension in which stride is required + + Example:: + + >>> x = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]) + >>> x.stride() + (5, 1) + >>> x.stride(0) + 5 + >>> x.stride(-1) + 1 + """ + ... + def sub(self, other: Union[Tensor, Number, _complex, torch.SymInt, torch.SymFloat], *, alpha: Optional[Union[Number, _complex]] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + sub(other, *, alpha=1) -> Tensor + + See :func:`torch.sub`. + """ + ... + def sub_(self, other: Union[Tensor, Number, _complex, torch.SymInt, torch.SymFloat], *, alpha: Optional[Union[Number, _complex]] = 1) -> Tensor: + r""" + sub_(other, *, alpha=1) -> Tensor + + In-place version of :meth:`~Tensor.sub` + """ + ... + @overload + def subtract(self, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + subtract(other, *, alpha=1) -> Tensor + + See :func:`torch.subtract`. + """ + ... + @overload + def subtract(self, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + subtract(other, *, alpha=1) -> Tensor + + See :func:`torch.subtract`. + """ + ... + @overload + def subtract_(self, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + subtract_(other, *, alpha=1) -> Tensor + + In-place version of :meth:`~Tensor.subtract`. + """ + ... + @overload + def subtract_(self, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + subtract_(other, *, alpha=1) -> Tensor + + In-place version of :meth:`~Tensor.subtract`. + """ + ... + @overload + def sum(self, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + sum(dim=None, keepdim=False, dtype=None) -> Tensor + + See :func:`torch.sum` + """ + ... + @overload + def sum(self, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + sum(dim=None, keepdim=False, dtype=None) -> Tensor + + See :func:`torch.sum` + """ + ... + @overload + def sum(self, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + sum(dim=None, keepdim=False, dtype=None) -> Tensor + + See :func:`torch.sum` + """ + ... + @overload + def sum_to_size(self, size: Sequence[Union[_int, SymInt]]) -> Tensor: + r""" + sum_to_size(*size) -> Tensor + + Sum ``this`` tensor to :attr:`size`. + :attr:`size` must be broadcastable to ``this`` tensor size. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + """ + ... + @overload + def sum_to_size(self, *size: _int) -> Tensor: + r""" + sum_to_size(*size) -> Tensor + + Sum ``this`` tensor to :attr:`size`. + :attr:`size` must be broadcastable to ``this`` tensor size. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + """ + ... + def svd(self, some: _bool = True, compute_uv: _bool = True) -> torch.return_types.svd: + r""" + svd(some=True, compute_uv=True) -> (Tensor, Tensor, Tensor) + + See :func:`torch.svd` + """ + ... + def swapaxes(self, axis0: _int, axis1: _int) -> Tensor: + r""" + swapaxes(axis0, axis1) -> Tensor + + See :func:`torch.swapaxes` + """ + ... + def swapaxes_(self, axis0: _int, axis1: _int) -> Tensor: + r""" + swapaxes_(axis0, axis1) -> Tensor + + In-place version of :meth:`~Tensor.swapaxes` + """ + ... + def swapdims(self, dim0: _int, dim1: _int) -> Tensor: + r""" + swapdims(dim0, dim1) -> Tensor + + See :func:`torch.swapdims` + """ + ... + def swapdims_(self, dim0: _int, dim1: _int) -> Tensor: + r""" + swapdims_(dim0, dim1) -> Tensor + + In-place version of :meth:`~Tensor.swapdims` + """ + ... + def t(self) -> Tensor: + r""" + t() -> Tensor + + See :func:`torch.t` + """ + ... + def t_(self) -> Tensor: + r""" + t_() -> Tensor + + In-place version of :meth:`~Tensor.t` + """ + ... + def take(self, index: Tensor) -> Tensor: + r""" + take(indices) -> Tensor + + See :func:`torch.take` + """ + ... + def take_along_dim(self, indices: Tensor, dim: Optional[_int] = None) -> Tensor: + r""" + take_along_dim(indices, dim) -> Tensor + + See :func:`torch.take_along_dim` + """ + ... + def tan(self) -> Tensor: + r""" + tan() -> Tensor + + See :func:`torch.tan` + """ + ... + def tan_(self) -> Tensor: + r""" + tan_() -> Tensor + + In-place version of :meth:`~Tensor.tan` + """ + ... + def tanh(self) -> Tensor: + r""" + tanh() -> Tensor + + See :func:`torch.tanh` + """ + ... + def tanh_(self) -> Tensor: + r""" + tanh_() -> Tensor + + In-place version of :meth:`~Tensor.tanh` + """ + ... + @overload + def tensor_split(self, indices: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + tensor_split(indices_or_sections, dim=0) -> List of Tensors + + See :func:`torch.tensor_split` + """ + ... + @overload + def tensor_split(self, tensor_indices_or_sections: Tensor, dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + tensor_split(indices_or_sections, dim=0) -> List of Tensors + + See :func:`torch.tensor_split` + """ + ... + @overload + def tensor_split(self, sections: Union[_int, SymInt], dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + tensor_split(indices_or_sections, dim=0) -> List of Tensors + + See :func:`torch.tensor_split` + """ + ... + @overload + def tile(self, dims: Sequence[Union[_int, SymInt]]) -> Tensor: + r""" + tile(dims) -> Tensor + + See :func:`torch.tile` + """ + ... + @overload + def tile(self, *dims: _int) -> Tensor: + r""" + tile(dims) -> Tensor + + See :func:`torch.tile` + """ + ... + @overload + def to(self, dtype: _dtype, non_blocking: _bool = False, copy: _bool = False, *, memory_format: Optional[torch.memory_format] = None) -> Tensor: + r""" + to(*args, **kwargs) -> Tensor + + Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are + inferred from the arguments of ``self.to(*args, **kwargs)``. + + .. note:: + + If the ``self`` Tensor already + has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned. + Otherwise, the returned tensor is a copy of ``self`` with the desired + :class:`torch.dtype` and :class:`torch.device`. + + Here are the ways to call ``to``: + + .. method:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor + :noindex: + + Returns a Tensor with the specified :attr:`dtype` + + Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + + .. method:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor + :noindex: + + Returns a Tensor with the specified :attr:`device` and (optional) + :attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``. + When :attr:`non_blocking`, tries to convert asynchronously with respect to + the host if possible, e.g., converting a CPU Tensor with pinned memory to a + CUDA Tensor. + When :attr:`copy` is set, a new Tensor is created even when the Tensor + already matches the desired conversion. + + Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + + .. method:: to(other, non_blocking=False, copy=False) -> Tensor + :noindex: + + Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as + the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert + asynchronously with respect to the host if possible, e.g., converting a CPU + Tensor with pinned memory to a CUDA Tensor. + When :attr:`copy` is set, a new Tensor is created even when the Tensor + already matches the desired conversion. + + Example:: + + >>> tensor = torch.randn(2, 2) # Initially dtype=float32, device=cpu + >>> tensor.to(torch.float64) + tensor([[-0.5044, 0.0005], + [ 0.3310, -0.0584]], dtype=torch.float64) + + >>> cuda0 = torch.device('cuda:0') + >>> tensor.to(cuda0) + tensor([[-0.5044, 0.0005], + [ 0.3310, -0.0584]], device='cuda:0') + + >>> tensor.to(cuda0, dtype=torch.float64) + tensor([[-0.5044, 0.0005], + [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0') + + >>> other = torch.randn((), dtype=torch.float64, device=cuda0) + >>> tensor.to(other, non_blocking=True) + tensor([[-0.5044, 0.0005], + [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0') + """ + ... + @overload + def to(self, device: Optional[DeviceLikeType] = None, dtype: Optional[_dtype] = None, non_blocking: _bool = False, copy: _bool = False, *, memory_format: Optional[torch.memory_format] = None) -> Tensor: + r""" + to(*args, **kwargs) -> Tensor + + Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are + inferred from the arguments of ``self.to(*args, **kwargs)``. + + .. note:: + + If the ``self`` Tensor already + has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned. + Otherwise, the returned tensor is a copy of ``self`` with the desired + :class:`torch.dtype` and :class:`torch.device`. + + Here are the ways to call ``to``: + + .. method:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor + :noindex: + + Returns a Tensor with the specified :attr:`dtype` + + Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + + .. method:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor + :noindex: + + Returns a Tensor with the specified :attr:`device` and (optional) + :attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``. + When :attr:`non_blocking`, tries to convert asynchronously with respect to + the host if possible, e.g., converting a CPU Tensor with pinned memory to a + CUDA Tensor. + When :attr:`copy` is set, a new Tensor is created even when the Tensor + already matches the desired conversion. + + Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + + .. method:: to(other, non_blocking=False, copy=False) -> Tensor + :noindex: + + Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as + the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert + asynchronously with respect to the host if possible, e.g., converting a CPU + Tensor with pinned memory to a CUDA Tensor. + When :attr:`copy` is set, a new Tensor is created even when the Tensor + already matches the desired conversion. + + Example:: + + >>> tensor = torch.randn(2, 2) # Initially dtype=float32, device=cpu + >>> tensor.to(torch.float64) + tensor([[-0.5044, 0.0005], + [ 0.3310, -0.0584]], dtype=torch.float64) + + >>> cuda0 = torch.device('cuda:0') + >>> tensor.to(cuda0) + tensor([[-0.5044, 0.0005], + [ 0.3310, -0.0584]], device='cuda:0') + + >>> tensor.to(cuda0, dtype=torch.float64) + tensor([[-0.5044, 0.0005], + [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0') + + >>> other = torch.randn((), dtype=torch.float64, device=cuda0) + >>> tensor.to(other, non_blocking=True) + tensor([[-0.5044, 0.0005], + [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0') + """ + ... + @overload + def to(self, other: Tensor, non_blocking: _bool = False, copy: _bool = False, *, memory_format: Optional[torch.memory_format] = None) -> Tensor: + r""" + to(*args, **kwargs) -> Tensor + + Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are + inferred from the arguments of ``self.to(*args, **kwargs)``. + + .. note:: + + If the ``self`` Tensor already + has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned. + Otherwise, the returned tensor is a copy of ``self`` with the desired + :class:`torch.dtype` and :class:`torch.device`. + + Here are the ways to call ``to``: + + .. method:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor + :noindex: + + Returns a Tensor with the specified :attr:`dtype` + + Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + + .. method:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor + :noindex: + + Returns a Tensor with the specified :attr:`device` and (optional) + :attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``. + When :attr:`non_blocking`, tries to convert asynchronously with respect to + the host if possible, e.g., converting a CPU Tensor with pinned memory to a + CUDA Tensor. + When :attr:`copy` is set, a new Tensor is created even when the Tensor + already matches the desired conversion. + + Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + + .. method:: to(other, non_blocking=False, copy=False) -> Tensor + :noindex: + + Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as + the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert + asynchronously with respect to the host if possible, e.g., converting a CPU + Tensor with pinned memory to a CUDA Tensor. + When :attr:`copy` is set, a new Tensor is created even when the Tensor + already matches the desired conversion. + + Example:: + + >>> tensor = torch.randn(2, 2) # Initially dtype=float32, device=cpu + >>> tensor.to(torch.float64) + tensor([[-0.5044, 0.0005], + [ 0.3310, -0.0584]], dtype=torch.float64) + + >>> cuda0 = torch.device('cuda:0') + >>> tensor.to(cuda0) + tensor([[-0.5044, 0.0005], + [ 0.3310, -0.0584]], device='cuda:0') + + >>> tensor.to(cuda0, dtype=torch.float64) + tensor([[-0.5044, 0.0005], + [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0') + + >>> other = torch.randn((), dtype=torch.float64, device=cuda0) + >>> tensor.to(other, non_blocking=True) + tensor([[-0.5044, 0.0005], + [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0') + """ + ... + def to_dense(self, dtype: Optional[_dtype] = None, *, masked_grad: Optional[_bool] = None) -> Tensor: + r""" + to_dense(dtype=None, *, masked_grad=True) -> Tensor + + Creates a strided copy of :attr:`self` if :attr:`self` is not a strided tensor, otherwise returns :attr:`self`. + + Keyword args: + {dtype} + masked_grad (bool, optional): If set to ``True`` (default) and + :attr:`self` has a sparse layout then the backward of + :meth:`to_dense` returns ``grad.sparse_mask(self)``. + + Example:: + + >>> s = torch.sparse_coo_tensor( + ... torch.tensor([[1, 1], + ... [0, 2]]), + ... torch.tensor([9, 10]), + ... size=(3, 3)) + >>> s.to_dense() + tensor([[ 0, 0, 0], + [ 9, 0, 10], + [ 0, 0, 0]]) + """ + ... + def to_mkldnn(self, dtype: Optional[_dtype] = None) -> Tensor: + r""" + to_mkldnn() -> Tensor + Returns a copy of the tensor in ``torch.mkldnn`` layout. + """ + ... + def to_padded_tensor(self, padding: _float, output_size: Optional[Sequence[Union[_int, SymInt]]] = None) -> Tensor: + r""" + to_padded_tensor(padding, output_size=None) -> Tensor + See :func:`to_padded_tensor` + """ + ... + @overload + def to_sparse(self, *, layout: Optional[_layout] = None, blocksize: Optional[Union[_int, _size]] = None, dense_dim: Optional[_int] = None) -> Tensor: + r""" + to_sparse(sparseDims) -> Tensor + + Returns a sparse copy of the tensor. PyTorch supports sparse tensors in + :ref:`coordinate format `. + + Args: + sparseDims (int, optional): the number of sparse dimensions to include in the new sparse tensor + + Example:: + + >>> d = torch.tensor([[0, 0, 0], [9, 0, 10], [0, 0, 0]]) + >>> d + tensor([[ 0, 0, 0], + [ 9, 0, 10], + [ 0, 0, 0]]) + >>> d.to_sparse() + tensor(indices=tensor([[1, 1], + [0, 2]]), + values=tensor([ 9, 10]), + size=(3, 3), nnz=2, layout=torch.sparse_coo) + >>> d.to_sparse(1) + tensor(indices=tensor([[1]]), + values=tensor([[ 9, 0, 10]]), + size=(3, 3), nnz=1, layout=torch.sparse_coo) + + .. method:: to_sparse(*, layout=None, blocksize=None, dense_dim=None) -> Tensor + :noindex: + + Returns a sparse tensor with the specified layout and blocksize. If + the :attr:`self` is strided, the number of dense dimensions could be + specified, and a hybrid sparse tensor will be created, with + `dense_dim` dense dimensions and `self.dim() - 2 - dense_dim` batch + dimension. + + .. note:: If the :attr:`self` layout and blocksize parameters match + with the specified layout and blocksize, return + :attr:`self`. Otherwise, return a sparse tensor copy of + :attr:`self`. + + Args: + + layout (:class:`torch.layout`, optional): The desired sparse + layout. One of ``torch.sparse_coo``, ``torch.sparse_csr``, + ``torch.sparse_csc``, ``torch.sparse_bsr``, or + ``torch.sparse_bsc``. Default: if ``None``, + ``torch.sparse_coo``. + + blocksize (list, tuple, :class:`torch.Size`, optional): Block size + of the resulting BSR or BSC tensor. For other layouts, + specifying the block size that is not ``None`` will result in a + RuntimeError exception. A block size must be a tuple of length + two such that its items evenly divide the two sparse dimensions. + + dense_dim (int, optional): Number of dense dimensions of the + resulting CSR, CSC, BSR or BSC tensor. This argument should be + used only if :attr:`self` is a strided tensor, and must be a + value between 0 and dimension of :attr:`self` tensor minus two. + + Example:: + + >>> x = torch.tensor([[1, 0], [0, 0], [2, 3]]) + >>> x.to_sparse(layout=torch.sparse_coo) + tensor(indices=tensor([[0, 2, 2], + [0, 0, 1]]), + values=tensor([1, 2, 3]), + size=(3, 2), nnz=3, layout=torch.sparse_coo) + >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(1, 2)) + tensor(crow_indices=tensor([0, 1, 1, 2]), + col_indices=tensor([0, 0]), + values=tensor([[[1, 0]], + [[2, 3]]]), size=(3, 2), nnz=2, layout=torch.sparse_bsr) + >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(2, 1)) + RuntimeError: Tensor size(-2) 3 needs to be divisible by blocksize[0] 2 + >>> x.to_sparse(layout=torch.sparse_csr, blocksize=(3, 1)) + RuntimeError: to_sparse for Strided to SparseCsr conversion does not use specified blocksize + + >>> x = torch.tensor([[[1], [0]], [[0], [0]], [[2], [3]]]) + >>> x.to_sparse(layout=torch.sparse_csr, dense_dim=1) + tensor(crow_indices=tensor([0, 1, 1, 3]), + col_indices=tensor([0, 0, 1]), + values=tensor([[1], + [2], + [3]]), size=(3, 2, 1), nnz=3, layout=torch.sparse_csr) + """ + ... + @overload + def to_sparse(self, sparse_dim: _int) -> Tensor: + r""" + to_sparse(sparseDims) -> Tensor + + Returns a sparse copy of the tensor. PyTorch supports sparse tensors in + :ref:`coordinate format `. + + Args: + sparseDims (int, optional): the number of sparse dimensions to include in the new sparse tensor + + Example:: + + >>> d = torch.tensor([[0, 0, 0], [9, 0, 10], [0, 0, 0]]) + >>> d + tensor([[ 0, 0, 0], + [ 9, 0, 10], + [ 0, 0, 0]]) + >>> d.to_sparse() + tensor(indices=tensor([[1, 1], + [0, 2]]), + values=tensor([ 9, 10]), + size=(3, 3), nnz=2, layout=torch.sparse_coo) + >>> d.to_sparse(1) + tensor(indices=tensor([[1]]), + values=tensor([[ 9, 0, 10]]), + size=(3, 3), nnz=1, layout=torch.sparse_coo) + + .. method:: to_sparse(*, layout=None, blocksize=None, dense_dim=None) -> Tensor + :noindex: + + Returns a sparse tensor with the specified layout and blocksize. If + the :attr:`self` is strided, the number of dense dimensions could be + specified, and a hybrid sparse tensor will be created, with + `dense_dim` dense dimensions and `self.dim() - 2 - dense_dim` batch + dimension. + + .. note:: If the :attr:`self` layout and blocksize parameters match + with the specified layout and blocksize, return + :attr:`self`. Otherwise, return a sparse tensor copy of + :attr:`self`. + + Args: + + layout (:class:`torch.layout`, optional): The desired sparse + layout. One of ``torch.sparse_coo``, ``torch.sparse_csr``, + ``torch.sparse_csc``, ``torch.sparse_bsr``, or + ``torch.sparse_bsc``. Default: if ``None``, + ``torch.sparse_coo``. + + blocksize (list, tuple, :class:`torch.Size`, optional): Block size + of the resulting BSR or BSC tensor. For other layouts, + specifying the block size that is not ``None`` will result in a + RuntimeError exception. A block size must be a tuple of length + two such that its items evenly divide the two sparse dimensions. + + dense_dim (int, optional): Number of dense dimensions of the + resulting CSR, CSC, BSR or BSC tensor. This argument should be + used only if :attr:`self` is a strided tensor, and must be a + value between 0 and dimension of :attr:`self` tensor minus two. + + Example:: + + >>> x = torch.tensor([[1, 0], [0, 0], [2, 3]]) + >>> x.to_sparse(layout=torch.sparse_coo) + tensor(indices=tensor([[0, 2, 2], + [0, 0, 1]]), + values=tensor([1, 2, 3]), + size=(3, 2), nnz=3, layout=torch.sparse_coo) + >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(1, 2)) + tensor(crow_indices=tensor([0, 1, 1, 2]), + col_indices=tensor([0, 0]), + values=tensor([[[1, 0]], + [[2, 3]]]), size=(3, 2), nnz=2, layout=torch.sparse_bsr) + >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(2, 1)) + RuntimeError: Tensor size(-2) 3 needs to be divisible by blocksize[0] 2 + >>> x.to_sparse(layout=torch.sparse_csr, blocksize=(3, 1)) + RuntimeError: to_sparse for Strided to SparseCsr conversion does not use specified blocksize + + >>> x = torch.tensor([[[1], [0]], [[0], [0]], [[2], [3]]]) + >>> x.to_sparse(layout=torch.sparse_csr, dense_dim=1) + tensor(crow_indices=tensor([0, 1, 1, 3]), + col_indices=tensor([0, 0, 1]), + values=tensor([[1], + [2], + [3]]), size=(3, 2, 1), nnz=3, layout=torch.sparse_csr) + """ + ... + def to_sparse_bsc(self, blocksize: Union[_int, _size], dense_dim: Optional[_int] = None) -> Tensor: + r""" + to_sparse_bsc(blocksize, dense_dim) -> Tensor + + Convert a tensor to a block sparse column (BSC) storage format of + given blocksize. If the :attr:`self` is strided, then the number of + dense dimensions could be specified, and a hybrid BSC tensor will be + created, with `dense_dim` dense dimensions and `self.dim() - 2 - + dense_dim` batch dimension. + + Args: + + blocksize (list, tuple, :class:`torch.Size`, optional): Block size + of the resulting BSC tensor. A block size must be a tuple of + length two such that its items evenly divide the two sparse + dimensions. + + dense_dim (int, optional): Number of dense dimensions of the + resulting BSC tensor. This argument should be used only if + :attr:`self` is a strided tensor, and must be a value between 0 + and dimension of :attr:`self` tensor minus two. + + Example:: + + >>> dense = torch.randn(10, 10) + >>> sparse = dense.to_sparse_csr() + >>> sparse_bsc = sparse.to_sparse_bsc((5, 5)) + >>> sparse_bsc.row_indices() + tensor([0, 1, 0, 1]) + + >>> dense = torch.zeros(4, 3, 1) + >>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1 + >>> dense.to_sparse_bsc((2, 1), 1) + tensor(ccol_indices=tensor([0, 1, 2, 3]), + row_indices=tensor([0, 1, 0]), + values=tensor([[[[1.]], + + [[1.]]], + + + [[[1.]], + + [[1.]]], + + + [[[1.]], + + [[1.]]]]), size=(4, 3, 1), nnz=3, + layout=torch.sparse_bsc) + """ + ... + def to_sparse_bsr(self, blocksize: Union[_int, _size], dense_dim: Optional[_int] = None) -> Tensor: + r""" + to_sparse_bsr(blocksize, dense_dim) -> Tensor + + Convert a tensor to a block sparse row (BSR) storage format of given + blocksize. If the :attr:`self` is strided, then the number of dense + dimensions could be specified, and a hybrid BSR tensor will be + created, with `dense_dim` dense dimensions and `self.dim() - 2 - + dense_dim` batch dimension. + + Args: + + blocksize (list, tuple, :class:`torch.Size`, optional): Block size + of the resulting BSR tensor. A block size must be a tuple of + length two such that its items evenly divide the two sparse + dimensions. + + dense_dim (int, optional): Number of dense dimensions of the + resulting BSR tensor. This argument should be used only if + :attr:`self` is a strided tensor, and must be a value between 0 + and dimension of :attr:`self` tensor minus two. + + Example:: + + >>> dense = torch.randn(10, 10) + >>> sparse = dense.to_sparse_csr() + >>> sparse_bsr = sparse.to_sparse_bsr((5, 5)) + >>> sparse_bsr.col_indices() + tensor([0, 1, 0, 1]) + + >>> dense = torch.zeros(4, 3, 1) + >>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1 + >>> dense.to_sparse_bsr((2, 1), 1) + tensor(crow_indices=tensor([0, 2, 3]), + col_indices=tensor([0, 2, 1]), + values=tensor([[[[1.]], + + [[1.]]], + + + [[[1.]], + + [[1.]]], + + + [[[1.]], + + [[1.]]]]), size=(4, 3, 1), nnz=3, + layout=torch.sparse_bsr) + """ + ... + def to_sparse_csc(self, dense_dim: Optional[_int] = None) -> Tensor: + r""" + to_sparse_csc() -> Tensor + + Convert a tensor to compressed column storage (CSC) format. Except + for strided tensors, only works with 2D tensors. If the :attr:`self` + is strided, then the number of dense dimensions could be specified, + and a hybrid CSC tensor will be created, with `dense_dim` dense + dimensions and `self.dim() - 2 - dense_dim` batch dimension. + + Args: + + dense_dim (int, optional): Number of dense dimensions of the + resulting CSC tensor. This argument should be used only if + :attr:`self` is a strided tensor, and must be a value between 0 + and dimension of :attr:`self` tensor minus two. + + Example:: + + >>> dense = torch.randn(5, 5) + >>> sparse = dense.to_sparse_csc() + >>> sparse._nnz() + 25 + + >>> dense = torch.zeros(3, 3, 1, 1) + >>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1 + >>> dense.to_sparse_csc(dense_dim=2) + tensor(ccol_indices=tensor([0, 1, 2, 3]), + row_indices=tensor([0, 2, 1]), + values=tensor([[[1.]], + + [[1.]], + + [[1.]]]), size=(3, 3, 1, 1), nnz=3, + layout=torch.sparse_csc) + """ + ... + def to_sparse_csr(self, dense_dim: Optional[_int] = None) -> Tensor: + r""" + to_sparse_csr(dense_dim=None) -> Tensor + + Convert a tensor to compressed row storage format (CSR). Except for + strided tensors, only works with 2D tensors. If the :attr:`self` is + strided, then the number of dense dimensions could be specified, and a + hybrid CSR tensor will be created, with `dense_dim` dense dimensions + and `self.dim() - 2 - dense_dim` batch dimension. + + Args: + + dense_dim (int, optional): Number of dense dimensions of the + resulting CSR tensor. This argument should be used only if + :attr:`self` is a strided tensor, and must be a value between 0 + and dimension of :attr:`self` tensor minus two. + + Example:: + + >>> dense = torch.randn(5, 5) + >>> sparse = dense.to_sparse_csr() + >>> sparse._nnz() + 25 + + >>> dense = torch.zeros(3, 3, 1, 1) + >>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1 + >>> dense.to_sparse_csr(dense_dim=2) + tensor(crow_indices=tensor([0, 1, 2, 3]), + col_indices=tensor([0, 2, 1]), + values=tensor([[[1.]], + + [[1.]], + + [[1.]]]), size=(3, 3, 1, 1), nnz=3, + layout=torch.sparse_csr) + """ + ... + def tolist(self) -> List: + r""" + tolist() -> list or number + + Returns the tensor as a (nested) list. For scalars, a standard + Python number is returned, just like with :meth:`~Tensor.item`. + Tensors are automatically moved to the CPU first if necessary. + + This operation is not differentiable. + + Examples:: + + >>> a = torch.randn(2, 2) + >>> a.tolist() + [[0.012766935862600803, 0.5415473580360413], + [-0.08909505605697632, 0.7729271650314331]] + >>> a[0,0].tolist() + 0.012766935862600803 + """ + ... + def topk(self, k: Union[_int, SymInt], dim: _int = -1, largest: _bool = True, sorted: _bool = True) -> torch.return_types.topk: + r""" + topk(k, dim=None, largest=True, sorted=True) -> (Tensor, LongTensor) + + See :func:`torch.topk` + """ + ... + def trace(self) -> Tensor: + r""" + trace() -> Tensor + + See :func:`torch.trace` + """ + ... + @overload + def transpose(self, dim0: _int, dim1: _int) -> Tensor: + r""" + transpose(dim0, dim1) -> Tensor + + See :func:`torch.transpose` + """ + ... + @overload + def transpose(self, dim0: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None]) -> Tensor: + r""" + transpose(dim0, dim1) -> Tensor + + See :func:`torch.transpose` + """ + ... + def transpose_(self, dim0: _int, dim1: _int) -> Tensor: + r""" + transpose_(dim0, dim1) -> Tensor + + In-place version of :meth:`~Tensor.transpose` + """ + ... + def triangular_solve(self, A: Tensor, upper: _bool = True, transpose: _bool = False, unitriangular: _bool = False) -> torch.return_types.triangular_solve: + r""" + triangular_solve(A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor) + + See :func:`torch.triangular_solve` + """ + ... + def tril(self, diagonal: _int = 0) -> Tensor: + r""" + tril(diagonal=0) -> Tensor + + See :func:`torch.tril` + """ + ... + def tril_(self, diagonal: _int = 0) -> Tensor: + r""" + tril_(diagonal=0) -> Tensor + + In-place version of :meth:`~Tensor.tril` + """ + ... + def triu(self, diagonal: _int = 0) -> Tensor: + r""" + triu(diagonal=0) -> Tensor + + See :func:`torch.triu` + """ + ... + def triu_(self, diagonal: _int = 0) -> Tensor: + r""" + triu_(diagonal=0) -> Tensor + + In-place version of :meth:`~Tensor.triu` + """ + ... + def true_divide(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], *, out: Optional[Tensor] = None) -> Tensor: + r""" + true_divide(value) -> Tensor + + See :func:`torch.true_divide` + """ + ... + def true_divide_(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat]) -> Tensor: + r""" + true_divide_(value) -> Tensor + + In-place version of :meth:`~Tensor.true_divide_` + """ + ... + def trunc(self) -> Tensor: + r""" + trunc() -> Tensor + + See :func:`torch.trunc` + """ + ... + def trunc_(self) -> Tensor: + r""" + trunc_() -> Tensor + + In-place version of :meth:`~Tensor.trunc` + """ + ... + @overload + def type(self, dtype: None = None, non_blocking: _bool = False) -> str: + r""" + type(dtype=None, non_blocking=False, **kwargs) -> str or Tensor + Returns the type if `dtype` is not provided, else casts this object to + the specified type. + + If this is already of the correct type, no copy is performed and the + original object is returned. + + Args: + dtype (dtype or string): The desired type + non_blocking (bool): If ``True``, and the source is in pinned memory + and destination is on the GPU or vice versa, the copy is performed + asynchronously with respect to the host. Otherwise, the argument + has no effect. + **kwargs: For compatibility, may contain the key ``async`` in place of + the ``non_blocking`` argument. The ``async`` arg is deprecated. + """ + ... + @overload + def type(self, dtype: Union[str, _dtype], non_blocking: _bool = False) -> Tensor: + r""" + type(dtype=None, non_blocking=False, **kwargs) -> str or Tensor + Returns the type if `dtype` is not provided, else casts this object to + the specified type. + + If this is already of the correct type, no copy is performed and the + original object is returned. + + Args: + dtype (dtype or string): The desired type + non_blocking (bool): If ``True``, and the source is in pinned memory + and destination is on the GPU or vice versa, the copy is performed + asynchronously with respect to the host. Otherwise, the argument + has no effect. + **kwargs: For compatibility, may contain the key ``async`` in place of + the ``non_blocking`` argument. The ``async`` arg is deprecated. + """ + ... + def type_as(self, other: Tensor) -> Tensor: + r""" + type_as(tensor) -> Tensor + + Returns this tensor cast to the type of the given tensor. + + This is a no-op if the tensor is already of the correct type. This is + equivalent to ``self.type(tensor.type())`` + + Args: + tensor (Tensor): the tensor which has the desired type + """ + ... + @overload + def unbind(self, dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + unbind(dim=0) -> seq + + See :func:`torch.unbind` + """ + ... + @overload + def unbind(self, dim: Union[str, ellipsis, None]) -> Tuple[Tensor, ...]: + r""" + unbind(dim=0) -> seq + + See :func:`torch.unbind` + """ + ... + @overload + def unflatten(self, dim: Union[str, ellipsis, None], sizes: Sequence[Union[_int, SymInt]], names: Sequence[Union[str, ellipsis, None]]) -> Tensor: ... + @overload + def unflatten(self, dim: _int, sizes: Sequence[Union[_int, SymInt]]) -> Tensor: ... + def unfold(self, dimension: _int, size: _int, step: _int) -> Tensor: + r""" + unfold(dimension, size, step) -> Tensor + + Returns a view of the original tensor which contains all slices of size :attr:`size` from + :attr:`self` tensor in the dimension :attr:`dimension`. + + Step between two slices is given by :attr:`step`. + + If `sizedim` is the size of dimension :attr:`dimension` for :attr:`self`, the size of + dimension :attr:`dimension` in the returned tensor will be + `(sizedim - size) / step + 1`. + + An additional dimension of size :attr:`size` is appended in the returned tensor. + + Args: + dimension (int): dimension in which unfolding happens + size (int): the size of each slice that is unfolded + step (int): the step between each slice + + Example:: + + >>> x = torch.arange(1., 8) + >>> x + tensor([ 1., 2., 3., 4., 5., 6., 7.]) + >>> x.unfold(0, 2, 1) + tensor([[ 1., 2.], + [ 2., 3.], + [ 3., 4.], + [ 4., 5.], + [ 5., 6.], + [ 6., 7.]]) + >>> x.unfold(0, 2, 2) + tensor([[ 1., 2.], + [ 3., 4.], + [ 5., 6.]]) + """ + ... + def uniform_(self, from_: _float = 0, to: _float = 1, *, generator: Optional[Generator] = None) -> Tensor: + r""" + uniform_(from=0, to=1, *, generator=None) -> Tensor + + Fills :attr:`self` tensor with numbers sampled from the continuous uniform + distribution: + + .. math:: + f(x) = \dfrac{1}{\text{to} - \text{from}} + """ + ... + def unsafe_chunk(self, chunks: _int, dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + unsafe_chunk(chunks, dim=0) -> List of Tensors + + See :func:`torch.unsafe_chunk` + """ + ... + def unsafe_split(self, split_size: Union[_int, SymInt], dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + unsafe_split(split_size, dim=0) -> List of Tensors + + See :func:`torch.unsafe_split` + """ + ... + def unsafe_split_with_sizes(self, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]: ... + def unsqueeze(self, dim: _int) -> Tensor: + r""" + unsqueeze(dim) -> Tensor + + See :func:`torch.unsqueeze` + """ + ... + def unsqueeze_(self, dim: _int) -> Tensor: + r""" + unsqueeze_(dim) -> Tensor + + In-place version of :meth:`~Tensor.unsqueeze` + """ + ... + def values(self) -> Tensor: + r""" + values() -> Tensor + + Return the values tensor of a :ref:`sparse COO tensor `. + + .. warning:: + Throws an error if :attr:`self` is not a sparse COO tensor. + + See also :meth:`Tensor.indices`. + + .. note:: + This method can only be called on a coalesced sparse tensor. See + :meth:`Tensor.coalesce` for details. + """ + ... + @overload + def var(self, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tensor: + r""" + var(dim=None, *, correction=1, keepdim=False) -> Tensor + + See :func:`torch.var` + """ + ... + @overload + def var(self, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tensor: + r""" + var(dim=None, *, correction=1, keepdim=False) -> Tensor + + See :func:`torch.var` + """ + ... + @overload + def var(self, unbiased: _bool = True) -> Tensor: + r""" + var(dim=None, *, correction=1, keepdim=False) -> Tensor + + See :func:`torch.var` + """ + ... + @overload + def var(self, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tensor: + r""" + var(dim=None, *, correction=1, keepdim=False) -> Tensor + + See :func:`torch.var` + """ + ... + @overload + def var(self, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tensor: + r""" + var(dim=None, *, correction=1, keepdim=False) -> Tensor + + See :func:`torch.var` + """ + ... + def vdot(self, other: Tensor) -> Tensor: + r""" + vdot(other) -> Tensor + + See :func:`torch.vdot` + """ + ... + @overload + def view(self, dtype: _dtype) -> Tensor: + r""" + view(*shape) -> Tensor + + Returns a new tensor with the same data as the :attr:`self` tensor but of a + different :attr:`shape`. + + The returned tensor shares the same data and must have the same number + of elements, but may have a different size. For a tensor to be viewed, the new + view size must be compatible with its original size and stride, i.e., each new + view dimension must either be a subspace of an original dimension, or only span + across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following + contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`, + + .. math:: + + \text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1] + + Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape` + without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a + :meth:`view` can be performed, it is advisable to use :meth:`reshape`, which + returns a view if the shapes are compatible, and copies (equivalent to calling + :meth:`contiguous`) otherwise. + + Args: + shape (torch.Size or int...): the desired size + + Example:: + + >>> x = torch.randn(4, 4) + >>> x.size() + torch.Size([4, 4]) + >>> y = x.view(16) + >>> y.size() + torch.Size([16]) + >>> z = x.view(-1, 8) # the size -1 is inferred from other dimensions + >>> z.size() + torch.Size([2, 8]) + + >>> a = torch.randn(1, 2, 3, 4) + >>> a.size() + torch.Size([1, 2, 3, 4]) + >>> b = a.transpose(1, 2) # Swaps 2nd and 3rd dimension + >>> b.size() + torch.Size([1, 3, 2, 4]) + >>> c = a.view(1, 3, 2, 4) # Does not change tensor layout in memory + >>> c.size() + torch.Size([1, 3, 2, 4]) + >>> torch.equal(b, c) + False + + + .. method:: view(dtype) -> Tensor + :noindex: + + Returns a new tensor with the same data as the :attr:`self` tensor but of a + different :attr:`dtype`. + + If the element size of :attr:`dtype` is different than that of ``self.dtype``, + then the size of the last dimension of the output will be scaled + proportionally. For instance, if :attr:`dtype` element size is twice that of + ``self.dtype``, then each pair of elements in the last dimension of + :attr:`self` will be combined, and the size of the last dimension of the output + will be half that of :attr:`self`. If :attr:`dtype` element size is half that + of ``self.dtype``, then each element in the last dimension of :attr:`self` will + be split in two, and the size of the last dimension of the output will be + double that of :attr:`self`. For this to be possible, the following conditions + must be true: + + * ``self.dim()`` must be greater than 0. + * ``self.stride(-1)`` must be 1. + + Additionally, if the element size of :attr:`dtype` is greater than that of + ``self.dtype``, the following conditions must be true as well: + + * ``self.size(-1)`` must be divisible by the ratio between the element + sizes of the dtypes. + * ``self.storage_offset()`` must be divisible by the ratio between the + element sizes of the dtypes. + * The strides of all dimensions, except the last dimension, must be + divisible by the ratio between the element sizes of the dtypes. + + If any of the above conditions are not met, an error is thrown. + + .. warning:: + + This overload is not supported by TorchScript, and using it in a Torchscript + program will cause undefined behavior. + + + Args: + dtype (:class:`torch.dtype`): the desired dtype + + Example:: + + >>> x = torch.randn(4, 4) + >>> x + tensor([[ 0.9482, -0.0310, 1.4999, -0.5316], + [-0.1520, 0.7472, 0.5617, -0.8649], + [-2.4724, -0.0334, -0.2976, -0.8499], + [-0.2109, 1.9913, -0.9607, -0.6123]]) + >>> x.dtype + torch.float32 + + >>> y = x.view(torch.int32) + >>> y + tensor([[ 1064483442, -1124191867, 1069546515, -1089989247], + [-1105482831, 1061112040, 1057999968, -1084397505], + [-1071760287, -1123489973, -1097310419, -1084649136], + [-1101533110, 1073668768, -1082790149, -1088634448]], + dtype=torch.int32) + >>> y[0, 0] = 1000000000 + >>> x + tensor([[ 0.0047, -0.0310, 1.4999, -0.5316], + [-0.1520, 0.7472, 0.5617, -0.8649], + [-2.4724, -0.0334, -0.2976, -0.8499], + [-0.2109, 1.9913, -0.9607, -0.6123]]) + + >>> x.view(torch.cfloat) + tensor([[ 0.0047-0.0310j, 1.4999-0.5316j], + [-0.1520+0.7472j, 0.5617-0.8649j], + [-2.4724-0.0334j, -0.2976-0.8499j], + [-0.2109+1.9913j, -0.9607-0.6123j]]) + >>> x.view(torch.cfloat).size() + torch.Size([4, 2]) + + >>> x.view(torch.uint8) + tensor([[ 0, 202, 154, 59, 182, 243, 253, 188, 185, 252, 191, 63, 240, 22, + 8, 191], + [227, 165, 27, 190, 128, 72, 63, 63, 146, 203, 15, 63, 22, 106, + 93, 191], + [205, 59, 30, 192, 112, 206, 8, 189, 7, 95, 152, 190, 12, 147, + 89, 191], + [ 43, 246, 87, 190, 235, 226, 254, 63, 111, 240, 117, 191, 177, 191, + 28, 191]], dtype=torch.uint8) + >>> x.view(torch.uint8).size() + torch.Size([4, 16]) + """ + ... + @overload + def view(self, size: Sequence[Union[_int, SymInt]]) -> Tensor: + r""" + view(*shape) -> Tensor + + Returns a new tensor with the same data as the :attr:`self` tensor but of a + different :attr:`shape`. + + The returned tensor shares the same data and must have the same number + of elements, but may have a different size. For a tensor to be viewed, the new + view size must be compatible with its original size and stride, i.e., each new + view dimension must either be a subspace of an original dimension, or only span + across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following + contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`, + + .. math:: + + \text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1] + + Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape` + without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a + :meth:`view` can be performed, it is advisable to use :meth:`reshape`, which + returns a view if the shapes are compatible, and copies (equivalent to calling + :meth:`contiguous`) otherwise. + + Args: + shape (torch.Size or int...): the desired size + + Example:: + + >>> x = torch.randn(4, 4) + >>> x.size() + torch.Size([4, 4]) + >>> y = x.view(16) + >>> y.size() + torch.Size([16]) + >>> z = x.view(-1, 8) # the size -1 is inferred from other dimensions + >>> z.size() + torch.Size([2, 8]) + + >>> a = torch.randn(1, 2, 3, 4) + >>> a.size() + torch.Size([1, 2, 3, 4]) + >>> b = a.transpose(1, 2) # Swaps 2nd and 3rd dimension + >>> b.size() + torch.Size([1, 3, 2, 4]) + >>> c = a.view(1, 3, 2, 4) # Does not change tensor layout in memory + >>> c.size() + torch.Size([1, 3, 2, 4]) + >>> torch.equal(b, c) + False + + + .. method:: view(dtype) -> Tensor + :noindex: + + Returns a new tensor with the same data as the :attr:`self` tensor but of a + different :attr:`dtype`. + + If the element size of :attr:`dtype` is different than that of ``self.dtype``, + then the size of the last dimension of the output will be scaled + proportionally. For instance, if :attr:`dtype` element size is twice that of + ``self.dtype``, then each pair of elements in the last dimension of + :attr:`self` will be combined, and the size of the last dimension of the output + will be half that of :attr:`self`. If :attr:`dtype` element size is half that + of ``self.dtype``, then each element in the last dimension of :attr:`self` will + be split in two, and the size of the last dimension of the output will be + double that of :attr:`self`. For this to be possible, the following conditions + must be true: + + * ``self.dim()`` must be greater than 0. + * ``self.stride(-1)`` must be 1. + + Additionally, if the element size of :attr:`dtype` is greater than that of + ``self.dtype``, the following conditions must be true as well: + + * ``self.size(-1)`` must be divisible by the ratio between the element + sizes of the dtypes. + * ``self.storage_offset()`` must be divisible by the ratio between the + element sizes of the dtypes. + * The strides of all dimensions, except the last dimension, must be + divisible by the ratio between the element sizes of the dtypes. + + If any of the above conditions are not met, an error is thrown. + + .. warning:: + + This overload is not supported by TorchScript, and using it in a Torchscript + program will cause undefined behavior. + + + Args: + dtype (:class:`torch.dtype`): the desired dtype + + Example:: + + >>> x = torch.randn(4, 4) + >>> x + tensor([[ 0.9482, -0.0310, 1.4999, -0.5316], + [-0.1520, 0.7472, 0.5617, -0.8649], + [-2.4724, -0.0334, -0.2976, -0.8499], + [-0.2109, 1.9913, -0.9607, -0.6123]]) + >>> x.dtype + torch.float32 + + >>> y = x.view(torch.int32) + >>> y + tensor([[ 1064483442, -1124191867, 1069546515, -1089989247], + [-1105482831, 1061112040, 1057999968, -1084397505], + [-1071760287, -1123489973, -1097310419, -1084649136], + [-1101533110, 1073668768, -1082790149, -1088634448]], + dtype=torch.int32) + >>> y[0, 0] = 1000000000 + >>> x + tensor([[ 0.0047, -0.0310, 1.4999, -0.5316], + [-0.1520, 0.7472, 0.5617, -0.8649], + [-2.4724, -0.0334, -0.2976, -0.8499], + [-0.2109, 1.9913, -0.9607, -0.6123]]) + + >>> x.view(torch.cfloat) + tensor([[ 0.0047-0.0310j, 1.4999-0.5316j], + [-0.1520+0.7472j, 0.5617-0.8649j], + [-2.4724-0.0334j, -0.2976-0.8499j], + [-0.2109+1.9913j, -0.9607-0.6123j]]) + >>> x.view(torch.cfloat).size() + torch.Size([4, 2]) + + >>> x.view(torch.uint8) + tensor([[ 0, 202, 154, 59, 182, 243, 253, 188, 185, 252, 191, 63, 240, 22, + 8, 191], + [227, 165, 27, 190, 128, 72, 63, 63, 146, 203, 15, 63, 22, 106, + 93, 191], + [205, 59, 30, 192, 112, 206, 8, 189, 7, 95, 152, 190, 12, 147, + 89, 191], + [ 43, 246, 87, 190, 235, 226, 254, 63, 111, 240, 117, 191, 177, 191, + 28, 191]], dtype=torch.uint8) + >>> x.view(torch.uint8).size() + torch.Size([4, 16]) + """ + ... + @overload + def view(self, *size: _int) -> Tensor: + r""" + view(*shape) -> Tensor + + Returns a new tensor with the same data as the :attr:`self` tensor but of a + different :attr:`shape`. + + The returned tensor shares the same data and must have the same number + of elements, but may have a different size. For a tensor to be viewed, the new + view size must be compatible with its original size and stride, i.e., each new + view dimension must either be a subspace of an original dimension, or only span + across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following + contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`, + + .. math:: + + \text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1] + + Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape` + without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a + :meth:`view` can be performed, it is advisable to use :meth:`reshape`, which + returns a view if the shapes are compatible, and copies (equivalent to calling + :meth:`contiguous`) otherwise. + + Args: + shape (torch.Size or int...): the desired size + + Example:: + + >>> x = torch.randn(4, 4) + >>> x.size() + torch.Size([4, 4]) + >>> y = x.view(16) + >>> y.size() + torch.Size([16]) + >>> z = x.view(-1, 8) # the size -1 is inferred from other dimensions + >>> z.size() + torch.Size([2, 8]) + + >>> a = torch.randn(1, 2, 3, 4) + >>> a.size() + torch.Size([1, 2, 3, 4]) + >>> b = a.transpose(1, 2) # Swaps 2nd and 3rd dimension + >>> b.size() + torch.Size([1, 3, 2, 4]) + >>> c = a.view(1, 3, 2, 4) # Does not change tensor layout in memory + >>> c.size() + torch.Size([1, 3, 2, 4]) + >>> torch.equal(b, c) + False + + + .. method:: view(dtype) -> Tensor + :noindex: + + Returns a new tensor with the same data as the :attr:`self` tensor but of a + different :attr:`dtype`. + + If the element size of :attr:`dtype` is different than that of ``self.dtype``, + then the size of the last dimension of the output will be scaled + proportionally. For instance, if :attr:`dtype` element size is twice that of + ``self.dtype``, then each pair of elements in the last dimension of + :attr:`self` will be combined, and the size of the last dimension of the output + will be half that of :attr:`self`. If :attr:`dtype` element size is half that + of ``self.dtype``, then each element in the last dimension of :attr:`self` will + be split in two, and the size of the last dimension of the output will be + double that of :attr:`self`. For this to be possible, the following conditions + must be true: + + * ``self.dim()`` must be greater than 0. + * ``self.stride(-1)`` must be 1. + + Additionally, if the element size of :attr:`dtype` is greater than that of + ``self.dtype``, the following conditions must be true as well: + + * ``self.size(-1)`` must be divisible by the ratio between the element + sizes of the dtypes. + * ``self.storage_offset()`` must be divisible by the ratio between the + element sizes of the dtypes. + * The strides of all dimensions, except the last dimension, must be + divisible by the ratio between the element sizes of the dtypes. + + If any of the above conditions are not met, an error is thrown. + + .. warning:: + + This overload is not supported by TorchScript, and using it in a Torchscript + program will cause undefined behavior. + + + Args: + dtype (:class:`torch.dtype`): the desired dtype + + Example:: + + >>> x = torch.randn(4, 4) + >>> x + tensor([[ 0.9482, -0.0310, 1.4999, -0.5316], + [-0.1520, 0.7472, 0.5617, -0.8649], + [-2.4724, -0.0334, -0.2976, -0.8499], + [-0.2109, 1.9913, -0.9607, -0.6123]]) + >>> x.dtype + torch.float32 + + >>> y = x.view(torch.int32) + >>> y + tensor([[ 1064483442, -1124191867, 1069546515, -1089989247], + [-1105482831, 1061112040, 1057999968, -1084397505], + [-1071760287, -1123489973, -1097310419, -1084649136], + [-1101533110, 1073668768, -1082790149, -1088634448]], + dtype=torch.int32) + >>> y[0, 0] = 1000000000 + >>> x + tensor([[ 0.0047, -0.0310, 1.4999, -0.5316], + [-0.1520, 0.7472, 0.5617, -0.8649], + [-2.4724, -0.0334, -0.2976, -0.8499], + [-0.2109, 1.9913, -0.9607, -0.6123]]) + + >>> x.view(torch.cfloat) + tensor([[ 0.0047-0.0310j, 1.4999-0.5316j], + [-0.1520+0.7472j, 0.5617-0.8649j], + [-2.4724-0.0334j, -0.2976-0.8499j], + [-0.2109+1.9913j, -0.9607-0.6123j]]) + >>> x.view(torch.cfloat).size() + torch.Size([4, 2]) + + >>> x.view(torch.uint8) + tensor([[ 0, 202, 154, 59, 182, 243, 253, 188, 185, 252, 191, 63, 240, 22, + 8, 191], + [227, 165, 27, 190, 128, 72, 63, 63, 146, 203, 15, 63, 22, 106, + 93, 191], + [205, 59, 30, 192, 112, 206, 8, 189, 7, 95, 152, 190, 12, 147, + 89, 191], + [ 43, 246, 87, 190, 235, 226, 254, 63, 111, 240, 117, 191, 177, 191, + 28, 191]], dtype=torch.uint8) + >>> x.view(torch.uint8).size() + torch.Size([4, 16]) + """ + ... + def view_as(self, other: Tensor) -> Tensor: + r""" + view_as(other) -> Tensor + + View this tensor as the same size as :attr:`other`. + ``self.view_as(other)`` is equivalent to ``self.view(other.size())``. + + Please see :meth:`~Tensor.view` for more information about ``view``. + + Args: + other (:class:`torch.Tensor`): The result tensor has the same size + as :attr:`other`. + """ + ... + @overload + def vsplit(self, sections: _int) -> Tuple[Tensor, ...]: + r""" + vsplit(split_size_or_sections) -> List of Tensors + + See :func:`torch.vsplit` + """ + ... + @overload + def vsplit(self, indices: _size) -> Tuple[Tensor, ...]: + r""" + vsplit(split_size_or_sections) -> List of Tensors + + See :func:`torch.vsplit` + """ + ... + @overload + def vsplit(self, *indices: _int) -> Tuple[Tensor, ...]: + r""" + vsplit(split_size_or_sections) -> List of Tensors + + See :func:`torch.vsplit` + """ + ... + @overload + def where(self, condition: Tensor, other: Tensor) -> Tensor: + r""" + where(condition, y) -> Tensor + + ``self.where(condition, y)`` is equivalent to ``torch.where(condition, self, y)``. + See :func:`torch.where` + """ + ... + @overload + def where(self, condition: Tensor, other: Union[Number, _complex]) -> Tensor: + r""" + where(condition, y) -> Tensor + + ``self.where(condition, y)`` is equivalent to ``torch.where(condition, self, y)``. + See :func:`torch.where` + """ + ... + @overload + def xlogy(self, other: Tensor) -> Tensor: + r""" + xlogy(other) -> Tensor + + See :func:`torch.xlogy` + """ + ... + @overload + def xlogy(self, other: Union[Number, _complex]) -> Tensor: + r""" + xlogy(other) -> Tensor + + See :func:`torch.xlogy` + """ + ... + @overload + def xlogy_(self, other: Tensor) -> Tensor: + r""" + xlogy_(other) -> Tensor + + In-place version of :meth:`~Tensor.xlogy` + """ + ... + @overload + def xlogy_(self, other: Union[Number, _complex]) -> Tensor: + r""" + xlogy_(other) -> Tensor + + In-place version of :meth:`~Tensor.xlogy` + """ + ... + def zero_(self) -> Tensor: + r""" + zero_() -> Tensor + + Fills :attr:`self` tensor with zeros. + """ + ... + +_TensorBase = TensorBase + +# Defined in torch/csrc/multiprocessing/init.cpp +def _multiprocessing_init() -> None: ... + +# Defined in torch/csrc/mps/Module.cpp +def _mps_deviceSynchronize() -> None: ... +def _mps_get_default_generator() -> Generator: ... +def _mps_emptyCache() -> None: ... +def _mps_setMemoryFraction(fraction: _float) -> None: ... +def _mps_currentAllocatedMemory() -> _int: ... +def _mps_driverAllocatedMemory() -> _int: ... +def _mps_is_available() -> _bool: ... +def _mps_is_on_macos_or_newer(major: _int, minor: _int) -> _bool: ... +def _mps_profilerStartTrace(mode: str, wait_until_completed: _bool) -> None: ... +def _mps_profilerStopTrace() -> None: ... +def _mps_acquireEvent(enable_timing: _bool) -> _int: ... +def _mps_releaseEvent(event_id: _int) -> None: ... +def _mps_recordEvent(event_id: _int) -> None: ... +def _mps_waitForEvent(event_id: _int) -> None: ... +def _mps_synchronizeEvent(event_id: _int) -> None: ... +def _mps_queryEvent(event_id: _int) -> _bool: ... +def _mps_elapsedTimeOfEvents(start_event_id: _int, end_event_id: _int) -> _float: ... + + +# Defined in torch/csrc/cuda/Module.cpp +def _cuda_getCurrentStream(device: _int) -> Tuple: ... +def _cuda_getCurrentRawStream(device: _int) -> _int: ... +def _cuda_getDefaultStream(device: _int) -> Tuple: ... +def _cuda_getCurrentBlasHandle() -> _int: ... +def _cuda_clearCublasWorkspaces() -> None: ... +def _cuda_setDevice(device: _int) -> None: ... +def _cuda_exchangeDevice(device: _int) -> _int: ... +def _cuda_maybeExchangeDevice(device: _int) -> _int: ... +def _cuda_getDevice() -> _int: ... +def _cuda_getDeviceCount() -> _int: ... +def _cuda_set_sync_debug_mode(warn_level: Union[_int, str]) -> None: ... +def _cuda_get_sync_debug_mode() -> _int: ... +def _cuda_sleep(cycles: _int) -> None: ... +def _cuda_synchronize() -> None: ... +def _cuda_ipc_collect() -> None: ... +def _cuda_getArchFlags() -> Optional[str]: ... +def _cuda_init() -> None: ... +def _cuda_setStream(stream_id: _int, device_index: _int, device_type: _int) -> None: ... +def _cuda_getCompiledVersion() -> _int: ... +def _cuda_cudaHostAllocator() -> _int: ... +def _cuda_cudaCachingAllocator_raw_alloc(size: _int, cuda_stream: _int) -> _int: ... +def _cuda_cudaCachingAllocator_raw_delete(ptr: _int) -> None: ... +def _cuda_cudaCachingAllocator_set_allocator_settings(env: str) -> None: ... +def _cuda_beginAllocateCurrentStreamToPool(device: _int, mempool_id: Tuple[_int, _int]) -> None: ... +def _cuda_endAllocateCurrentStreamToPool(device: _int, mempool_id: Tuple[_int, _int]) -> None: ... +def _cuda_releasePool(device: _int, mempool_id: Tuple[_int, _int]) -> None: ... +def _cuda_checkPoolLiveAllocations(device: _int, mempool_id: Tuple[_int, _int], expected_live_allocations: Set) -> _bool: ... +def _cuda_setCheckpointPoolState(device: _int, state: _cuda_CUDAAllocator_AllocatorState, stale_storages: List[_int], storages_to_add_deleters_to: List[_int]) -> None: ... +def _cuda_setMemoryFraction(fraction: _float, device: _int) -> None: ... +def _cuda_emptyCache() -> None: ... +def _cuda_memoryStats(device: _int) -> Dict[str, Any]: ... +def _cuda_resetAccumulatedMemoryStats(device: _int) -> None: ... +def _cuda_resetPeakMemoryStats(device: _int) -> None: ... +def _cuda_memorySnapshot() -> Dict[str, Any]: ... +def _cuda_record_memory_history_legacy( + enabled: _bool, + record_context: _bool, + record_context_cpp: _bool, + alloc_trace_max_entries: _int, + alloc_trace_record_context: _bool, +) -> None: ... +def _cuda_record_memory_history( + enabled: Optional[str], + context: Optional[str], + stacks: str, + max_entries +) -> None: ... +def _cuda_isHistoryEnabled() -> _bool: ... + +def _cuda_getAllocatorBackend() -> str: ... +class _cuda_CUDAAllocator_AllocatorState: + pass +def _cuda_getCheckpointState(device: _int, mempool: Tuple[_int, _int]) -> _cuda_CUDAAllocator_AllocatorState: ... +def _set_cached_tensors_enabled(enabled: _bool) -> None: ... +def _add_cached_tensor(t: Tensor) -> None: ... +def _remove_cached_tensor(t: Tensor) -> None: ... +def _construct_CUDA_Tensor_From_Storage_And_Metadata(metadata: dict, storage: Storage) -> Tensor: ... +def _storage_Use_Count(storage_ptr: _int) -> _int: ... +def _set_storage_access_error_msg(t: Tensor, s: str) -> None: ... +def _free_And_Remove_DeleterFn(storage_ptr: _int) -> None: ... +def _has_Standard_Deleter(storage_ptr: _int) -> _bool: ... + +class _cuda_CUDAAllocator: ... + +def _cuda_customAllocator(alloc_fn: _int, free_fn: _int) -> _cuda_CUDAAllocator: ... +def _cuda_changeCurrentAllocator(allocator: _cuda_CUDAAllocator) -> None: ... +def _cuda_getAllocator() -> _cuda_CUDAAllocator: ... +def _cuda_lock_mutex() -> None: ... +def _cuda_unlock_mutex() -> None: ... +def _cuda_canDeviceAccessPeer(device: _int, peer_device: _int) -> _bool: ... +def _cuda_jiterator_compile_and_launch_kernel( + code_string: str, + kernel_name: str, + return_by_ref: _bool, + num_outputs: _int, + tensors: Tuple, + kwargs: Dict[str, Union[_int, _float, _bool]], +) -> Tensor: ... +def _cuda_get_cudnn_benchmark_limit() -> _int: ... +def _cuda_set_cudnn_benchmark_limit(arg: _int) -> None: ... +def _cuda_get_conv_benchmark_empty_cache() -> _bool: ... +def _cudnn_set_conv_benchmark_empty_cache(enable: _bool) -> None: ... +def _nccl_version() -> _int: ... +def _nccl_version_suffix() -> bytes : ... +def _nccl_unique_id() -> bytes: ... +def _nccl_init_rank(nranks: _int, comm_id: bytes, rank: _int) -> object: ... +def _nccl_reduce( + input: Sequence[Tensor], + output: Tensor, + root: _int, + op: _int, + streams: Optional[Sequence[_CudaStreamBase]], + comms: Optional[Sequence[object]], +) -> None: ... +def _nccl_all_reduce( + input: Sequence[Tensor], + output: Sequence[Tensor], + op: _int, + streams: Optional[Sequence[_CudaStreamBase]], + comms: Optional[Sequence[object]], +) -> None: ... +def _nccl_broadcast( + input: Sequence[Tensor], + root: _int, + streams: Optional[Sequence[_CudaStreamBase]], + comms: Optional[Sequence[object]], +) -> None: ... +def _nccl_all_gather( + input: Sequence[Tensor], + output: Sequence[Tensor], + streams: Optional[Sequence[_CudaStreamBase]], + comms: Optional[Sequence[object]], +) -> None: ... +def _nccl_reduce_scatter( + input: Sequence[Tensor], + output: Sequence[Tensor], + op: _int, + streams: Optional[Sequence[_CudaStreamBase]], + comms: Optional[Sequence[object]], +) -> None: ... +def _rocm_is_backward_pass() -> _bool: ... + +class _CudaDeviceProperties: + name: str + major: _int + minor: _int + multi_processor_count: _int + total_memory: _int + is_integrated: _int + is_multi_gpu_board: _int + max_threads_per_multi_processor: _int + gcnArchName: str + +# Functions related to SDPA +class _SDPAParams: + query: Tensor + key: Tensor + value: Tensor + attn_mask: Optional[Tensor] + dropout: _float + is_causal: _bool + def __init__( + self, + query: Tensor, + key: Tensor, + value: Tensor, + attn_mask: Optional[Tensor], + dropout: _float, + is_causal: _bool) -> None: ... + +class _SDPBackend(Enum): + ERROR = -1 + MATH = 0 + FLASH_ATTENTION = 1 + EFFICIENT_ATTENTION = 2 + CUDNN_ATTENTION = 3 + +def _can_use_flash_attention(params: _SDPAParams, debug: _bool) -> _bool: ... +def _can_use_mem_efficient_attention(params: _SDPAParams, debug: _bool) -> _bool: ... + +# Defined in torch/csrc/cuda/python_comm.cpp +def _broadcast(tensor: Tensor, devices: List[_int]) -> List[Tensor]: ... +def _broadcast_out(tensor: Tensor, out_tensors: List[Tensor]) -> List[Tensor]: ... +def _broadcast_coalesced( + tensors: List[Tensor], + devices: List[_int], + buffer_size: _int, +) -> List[List[Tensor]]: ... +def _scatter( + tensor: Tensor, + devices: List[_int], + chunk_sizes: Optional[List[_int]], + dim: _int, + streams: Optional[List[Stream]], +) -> List[Tensor]: ... +def _scatter_out( + tensor: Tensor, + out_tensors: List[Tensor], + dim: _int, + streams: Optional[List[Stream]], +) -> List[Tensor]: ... +def _gather( + tensors: List[Tensor], + dim: _int, + destination_index: Optional[_int], +) -> Tensor: ... +def _gather_out(tensors: List[Tensor], out_tensor: Tensor, dim: _int) -> Tensor: ... + +# Defined in torch/csrc/cuda/Stream.cpp +class _CudaStreamBase(Stream): + stream_id: _int + device_index: _int + device_type: _int + + device: _device + cuda_stream: _int + priority: _int + + def __new__( + self, + priority: _int = 0, + stream_id: _int = 0, + device_index: _int = 0, + stream_ptr: _int = 0, + ) -> _CudaStreamBase: ... + def query(self) -> _bool: ... + def synchronize(self) -> None: ... + def priority_range(self) -> Tuple[_int, _int]: ... + +# Defined in torch/csrc/cuda/Event.cpp +class _CudaEventBase: + device: _device + cuda_event: _int + + def __new__( + cls, + enable_timing: _bool = False, + blocking: _bool = False, + interprocess: _bool = False, + ) -> _CudaEventBase: ... + @classmethod + def from_ipc_handle(cls, device: _device, ipc_handle: bytes) -> _CudaEventBase: ... + def record(self, stream: _CudaStreamBase) -> None: ... + def wait(self, stream: _CudaStreamBase) -> None: ... + def query(self) -> _bool: ... + def elapsed_time(self, other: _CudaEventBase) -> _float: ... + def synchronize(self) -> None: ... + def ipc_handle(self) -> bytes: ... + +# Defined in torch/csrc/cuda/Graph.cpp +class _CUDAGraph: + def capture_begin(self, pool: Optional[Tuple[_int, _int]] = ..., capture_error_mode: str = "global") -> None: ... + def capture_end(self) -> None: ... + def replay(self) -> None: ... + def reset(self) -> None: ... + def pool(self) -> Tuple[_int, _int]: ... + def enable_debug_mode(self) -> None: ... + def debug_dump(self, debug_path: str) -> None: ... + +def _cuda_isCurrentStreamCapturing() -> _bool: ... +def _graph_pool_handle() -> Tuple[_int, _int]: ... + +# Defined in torch/csrc/xpu/Module.cpp +def _xpu_setDevice(device: _int) -> None: ... +def _xpu_exchangeDevice(device: _int) -> _int: ... +def _xpu_maybeExchangeDevice(device: _int) -> _int: ... +def _xpu_getDevice() -> _int: ... +def _xpu_getDeviceCount() -> _int: ... +def _xpu_init() -> None: ... +def _xpu_setStream(stream_id: _int, device_index: _int, device_type: _int) -> None: ... +def _xpu_getCurrentStream(device: _int) -> Tuple: ... +def _xpu_getCurrentRawStream(device: _int) -> _int: ... +def _xpu_synchronize(device: _int) -> None: ... +def _xpu_emptyCache() -> None: ... + +class _XpuDeviceProperties: + name: str + platform_name: str + total_memory: _int + max_compute_units: _int + gpu_eu_count: _int + gpu_subslice_count: _int + max_work_group_size: _int + max_num_sub_groups: _int + sub_group_sizes: List[_int] + type: str + +# Defined in torch/csrc/xpu/Stream.cpp +class _XpuStreamBase(Stream): + stream_id: _int + device_index: _int + device_type: _int + + device: _device + sycl_queue: _int + priority: _int + + def __new__( + cls, + priority: _int = 0, + stream_id: _int = 0, + device_index: _int = 0, + device_type: _int = 0, + ) -> _XpuStreamBase: ... + def query(self) -> _bool: ... + def synchronize(self) -> None: ... + @staticmethod + def priority_range() -> Tuple: ... + +# Defined in torch/csrc/xpu/Event.cpp +class _XpuEventBase: + device: _device + sycl_event: _int + + def __new__(cls, enable_timing: _bool = False) -> _XpuEventBase: ... + def record(self, stream: _XpuEventBase) -> None: ... + def wait(self, stream: _XpuStreamBase) -> None: ... + def query(self) -> _bool: ... + def elapsed_time(self, other: _XpuEventBase) -> _float: ... + def synchronize(self) -> None: ... + +# Defined in torch/csrc/DataLoader.cpp +def _set_worker_signal_handlers( + *arg: Any, +) -> None: ... # THPModule_setWorkerSignalHandlers +def _set_worker_pids( + key: _int, + child_pids: Tuple[_int, ...], +) -> None: ... # THPModule_setWorkerPIDs +def _remove_worker_pids(loader_id: _int) -> None: ... # THPModule_removeWorkerPIDs +def _error_if_any_worker_fails() -> None: ... # THPModule_errorIfAnyWorkerFails + +# Defined in torch/csrc/jit/python/python_tracer.cpp +class TracingState: + def push_scope(self, scope_name: str) -> None: ... + def pop_scope(self) -> None: ... + def current_scope(self) -> str: ... + def set_graph(self, graph: Graph) -> None: ... + def graph(self) -> Graph: ... + +def _create_graph_by_tracing( + func: Callable[..., Any], + inputs: Any, + var_name_lookup_fn: Callable[[Tensor], str], + strict: Any, + force_outplace: Any, + self: Any = None, + argument_names: List[str] = [], +) -> Tuple[Graph, Stack]: ... +def _tracer_warn_use_python(): ... +def _get_tracing_state() -> TracingState: ... + +# Defined in torch/csrc/jit/python/python_ir.cpp +# Not actually defined in python_ir.cpp, not sure where they are. +class IValue: ... + +Stack = List[IValue] + +class JitType: + annotation_str: str + def isSubtypeOf(self, other: JitType) -> _bool: ... + def with_dtype(self, dtype: _dtype) -> JitType: ... + def with_sizes(self, sizes: List[Optional[_int]]) -> JitType: ... + def kind(self) -> str: ... + def scalarType(self) -> Optional[str]: ... + def getElementType(self) -> JitType: ... + def dtype(self) -> Optional[_dtype]: ... + +class InferredType: + def __init__(self, arg: Union[JitType, str]): ... + def type(self) -> JitType: ... + def success(self) -> _bool: ... + def reason(self) -> str: ... + +R = TypeVar("R", bound=JitType) + +class AnyType(JitType): + @staticmethod + def get() -> AnyType: ... + +class NoneType(JitType): + @staticmethod + def get() -> NoneType: ... + +class BoolType(JitType): + @staticmethod + def get() -> BoolType: ... + +class FloatType(JitType): + @staticmethod + def get() -> FloatType: ... + +class ComplexType(JitType): + @staticmethod + def get() -> ComplexType: ... + +class IntType(JitType): + @staticmethod + def get() -> IntType: ... + +class SymIntType(JitType): + @staticmethod + def get() -> SymIntType: ... + +class SymBoolType(JitType): + @staticmethod + def get() -> SymBoolType: ... + +class NumberType(JitType): + @staticmethod + def get() -> NumberType: ... + +class StringType(JitType): + @staticmethod + def get() -> StringType: ... + +class DeviceObjType(JitType): + @staticmethod + def get() -> DeviceObjType: ... + +class _GeneratorType(JitType): + @staticmethod + def get() -> _GeneratorType: ... + +class StreamObjType(JitType): + @staticmethod + def get() -> StreamObjType: ... + +class ListType(JitType): + def __init__(self, a: JitType) -> None: ... + def getElementType(self) -> JitType: ... + @staticmethod + def ofInts() -> ListType: ... + @staticmethod + def ofTensors() -> ListType: ... + @staticmethod + def ofFloats() -> ListType: ... + @staticmethod + def ofComplexDoubles() -> ListType: ... + @staticmethod + def ofBools() -> ListType: ... + @staticmethod + def ofStrings() -> ListType: ... + +class DictType(JitType): + def __init__(self, key: JitType, value: JitType) -> None: ... + def getKeyType(self) -> JitType: ... + def getValueType(self) -> JitType: ... + +class TupleType(JitType): + def __init__(self, a: List[Optional[JitType]]) -> None: ... + def elements(self) -> List[JitType]: ... + +class UnionType(JitType): + def __init__(self, a: List[JitType]) -> None: ... + +class ClassType(JitType): + def __init__(self, qualified_name: str) -> None: ... + +class InterfaceType(JitType): + def __init__(self, qualified_name: str) -> None: ... + def getMethod(self, name: str) -> Optional[FunctionSchema]: ... + def getMethodNames(self) -> List[str]: ... + +class OptionalType(JitType, Generic[R]): + def __init__(self, a: JitType) -> None: ... + def getElementType(self) -> JitType: ... + @staticmethod + def ofTensor() -> OptionalType: ... + +class FutureType(JitType): + def __init__(self, a: JitType) -> None: ... + def getElementType(self) -> JitType: ... + +class AwaitType(JitType): + def __init__(self, a: JitType) -> None: ... + def getElementType(self) -> JitType: ... + +class RRefType(JitType): + def __init__(self, a: JitType) -> None: ... + +class EnumType(JitType): + def __init__( + self, + qualified_name: str, + value_type: JitType, + enum_names_values: List[Any], + ) -> None: ... + +class TensorType(JitType): + @classmethod + def get(cls) -> TensorType: ... + @classmethod + def getInferred(cls) -> TensorType: ... + def with_sizes(self, other: Optional[List[Optional[_int]]]) -> TensorType: ... + def sizes(self) -> Optional[List[_int]]: ... + def varyingSizes(self) -> Optional[List[Optional[_int]]]: ... + def strides(self) -> Optional[List[_int]]: ... + def device(self) -> Optional[_device]: ... + def dim(self) -> _int: ... + def dtype(self) -> Optional[_dtype]: ... + @staticmethod + def create_from_tensor(t: Tensor) -> TensorType: ... + +# Defined in torch/csrc/jit/python/python_tree_views.cpp +class SourceRange: ... +class TreeView: ... + +class Ident(TreeView): + @property + def name(self) -> str: ... + +class ClassDef(TreeView): ... + +class Def(TreeView): + def name(self) -> Ident: ... + +class Decl(TreeView): ... + +# Defined in torch/csrc/distributed/rpc/init.cpp +def _rpc_init() -> _bool: ... + +# Defined in torch/csrc/distributed/autograd/init.cpp +def _dist_autograd_init() -> _bool: ... + +# Defined in torch/csrc/distributed/c10d/init.cpp +def _c10d_init() -> _bool: ... + +# Defined in torch/csrc/distributed/rpc/testing/init.cpp +def _faulty_agent_init() -> _bool: ... +def _register_py_class_for_device(device: str, cls: Any) -> None: ... +def _activate_cuda_trace() -> None: ... + +# Defined in torch/csrc/Module.cpp +def _current_graph_task_id() -> _int: ... +def _current_autograd_node() -> _Node: ... + +# Defined in torch/csrc/Exceptions.cpp +class _OutOfMemoryError(RuntimeError): ... +class _DistError(RuntimeError): ... +class _DistBackendError(RuntimeError): ... +class _DistStoreError(RuntimeError): ... +class _DistNetworkError(RuntimeError): ... + +# Defined in torch/csrc/profiler/init.cpp +class CapturedTraceback: + pass +def gather_traceback(python: _bool, script: _bool, cpp: _bool) -> CapturedTraceback: ... +def symbolize_tracebacks(tracebacks: List[CapturedTraceback]) -> List[Dict[str, Any]]: ... + +def _load_mobile_module_from_file(filename: str): ... +def _load_mobile_module_from_bytes(bytes_: bytes): ... +def _load_jit_module_from_file(filename: str): ... +def _load_jit_module_from_bytes(bytes_: bytes): ... +def _save_mobile_module(m: LiteScriptModule, filename: str): ... +def _save_jit_module(m: ScriptModule, filename: str, extra_files: Dict[str, Any]): ... +def _save_mobile_module_to_bytes(m: LiteScriptModule) -> bytes: ... +def _save_jit_module_to_bytes(m: ScriptModule, extra_files: Dict[str, Any]) -> bytes: ... +def _get_module_info_from_flatbuffer(data: bytes): ... +def _jit_resolve_packet(op_name: str, *args, **kwargs) -> str: ... +def _swap_tensor_impl(t1: Tensor, t2: Tensor): ... +def _save_pickle(obj: Any) -> bytes: ... + +# Defined in torch/csrc/jit/runtime/static/init.cpp +def _jit_to_static_module(graph_or_module: Union[Graph,ScriptModule]) -> Any: ... +def _fuse_to_static_module(graph_or_module: Union[Graph,ScriptModule], min_size: _int) -> Any: ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/_aoti.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/_aoti.pyi new file mode 100644 index 0000000000000000000000000000000000000000..fb0fe49e8a31dd85c6b36e0c312afe6dbb6cef54 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/_aoti.pyi @@ -0,0 +1,3 @@ +# Defined in torch/csrc/inductor/aoti_runner/pybind.cpp +class AOTIModelContainerRunnerCpu: ... +class AOTIModelContainerRunnerCuda: ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/_functions.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/_functions.pyi new file mode 100644 index 0000000000000000000000000000000000000000..151c1077b1a368e63f5b6f5d3ccd4254c2269c36 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/_functions.pyi @@ -0,0 +1,11 @@ +from typing import AnyStr, List + +from torch import Tensor + +class UndefinedGrad: + def __init__(self) -> None: ... + def __call__(self, *inputs: Tensor) -> List[Tensor]: ... + +class DelayedError: + def __init__(self, msg: AnyStr, num_inputs: int) -> None: ... + def __call__(self, inputs: List[Tensor]) -> List[Tensor]: ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/_itt.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/_itt.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8a54437f527b994821133532f26598c951631b28 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/_itt.pyi @@ -0,0 +1,5 @@ +# Defined in torch/csrc/itt.cpp +def is_available() -> None: ... +def rangePush(message: str) -> None: ... +def rangePop() -> None: ... +def mark(message: str) -> None: ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/_lazy.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/_lazy.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ceaaedee210298b6448ee485ac0e433dc79dd450 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/_lazy.pyi @@ -0,0 +1,28 @@ +from typing import List + +from torch import Tensor + +# defined in torch/csrc/lazy/python/init.cpp +def _mark_step(device: str, devices: List[str], wait: bool): ... +def _wait_device_ops(devices: List[str]): ... +def _reset_metrics(): ... +def _counter_names() -> List[str]: ... +def _counter_value(name: str) -> int: ... +def _metrics_report() -> str: ... +def _get_graph_hash(tensors: List[Tensor]) -> str: ... +def _sync_multi( + tensors: List[Tensor], + devices: List[str], + wait: bool = True, + sync_ltc_data: bool = True, +): ... +def _get_tensor_id(tensor: Tensor) -> int: ... +def _get_tensors_text(tensors: List[Tensor]) -> str: ... +def _get_tensors_dot(tensors: List[Tensor]) -> str: ... +def _get_tensors_backend(tensors: List[Tensor]) -> str: ... +def _get_force_fallback() -> str: ... +def _set_force_fallback(newval: str): ... +def _clear_ir_cache(): ... +def _dump_ir_cache(filename: str): ... +def _set_reuse_ir(val: bool): ... +def _get_default_device_type(): ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/_lazy_ts_backend.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/_lazy_ts_backend.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ce833c5ec2e4543f7c668d63ba0cb1cc8de72b5e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/_lazy_ts_backend.pyi @@ -0,0 +1,11 @@ +# defined in torch/csrc/lazy/python/init.cpp + +from typing import Any, List, Tuple + +from torch import Tensor + +def _init(): ... +def _get_tensors_ts_device_data_node( + tensors: List[Tensor], +) -> Tuple[List[int], List[Any]]: ... +def _run_cached_graph(hash_str: str, graph_inputs: List[Any]) -> List[Tensor]: ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/_monitor.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/_monitor.pyi new file mode 100644 index 0000000000000000000000000000000000000000..9950a9e8c30a4b38f69ca9627e3980e948dfc213 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/_monitor.pyi @@ -0,0 +1,44 @@ +# Defined in torch/csrc/monitor/python_init.cpp + +import datetime +from enum import Enum +from typing import Callable, Dict, List, Union + +class Aggregation(Enum): + VALUE = ... + MEAN = ... + COUNT = ... + SUM = ... + MAX = ... + MIN = ... + +class Stat: + name: str + count: int + def __init__( + self, + name: str, + aggregations: List[Aggregation], + window_size: int, + max_samples: int = -1, + ) -> None: ... + def add(self, v: float) -> None: ... + def get(self) -> Dict[Aggregation, float]: ... + +class Event: + name: str + timestamp: datetime.datetime + data: Dict[str, Union[int, float, bool, str]] + def __init__( + self, + name: str, + timestamp: datetime.datetime, + data: Dict[str, Union[int, float, bool, str]], + ) -> None: ... + +def log_event(e: Event) -> None: ... + +class EventHandlerHandle: ... + +def register_event_handler(handler: Callable[[Event], None]) -> EventHandlerHandle: ... +def unregister_event_handler(handle: EventHandlerHandle) -> None: ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/_nvtx.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/_nvtx.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f7ff779d8ad73cfb6189bb5c0c02127dc96988af --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/_nvtx.pyi @@ -0,0 +1,6 @@ +# Defined in torch/csrc/cuda/shared/nvtx.cpp +def rangePushA(message: str) -> int: ... +def rangePop() -> int: ... +def rangeStartA(message: str) -> int: ... +def rangeEnd(int) -> None: ... +def markA(message: str) -> None: ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/_profiler.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/_profiler.pyi new file mode 100644 index 0000000000000000000000000000000000000000..f1b8cda073b0da319d015539c19f80fe459560b9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/_profiler.pyi @@ -0,0 +1,238 @@ +from enum import Enum +from typing import Any, Dict, List, Literal, Optional, Tuple, Union + +from torch._C import device, dtype, layout +from typing_extensions import TypeAlias + +# defined in torch/csrc/profiler/python/init.cpp + +class RecordScope(Enum): + FUNCTION = ... + BACKWARD_FUNCTION = ... + TORCHSCRIPT_FUNCTION = ... + KERNEL_FUNCTION_DTYPE = ... + CUSTOM_CLASS = ... + BUILD_FEATURE = ... + LITE_INTERPRETER = ... + USER_SCOPE = ... + STATIC_RUNTIME_OP = ... + STATIC_RUNTIME_MODEL = ... + +class ProfilerState(Enum): + Disable = ... + CPU = ... + CUDA = ... + NVTX = ... + ITT = ... + KINETO = ... + KINETO_GPU_FALLBACK = ... + KINETO_PRIVATEUSE1_FALLBACK = ... + KINETO_PRIVATEUSE1 = ... + +class ActiveProfilerType(Enum): + NONE = ... + LEGACY = ... + KINETO = ... + NVTX = ... + ITT = ... + +class ProfilerActivity(Enum): + CPU = ... + CUDA = ... + MTIA = ... + PrivateUse1 = ... + +class _EventType(Enum): + TorchOp = ... + Backend = ... + Allocation = ... + OutOfMemory = ... + PyCall = ... + PyCCall = ... + Kineto = ... + +class _ExperimentalConfig: + def __init__( + self, + profiler_metrics: List[str] = ..., + profiler_measure_per_kernel: bool = ..., + verbose: bool = ..., + performance_events: List[str] = ..., + enable_cuda_sync_events: bool = ..., + ) -> None: ... + +class ProfilerConfig: + def __init__( + self, + state: ProfilerState, + report_input_shapes: bool, + profile_memory: bool, + with_stack: bool, + with_flops: bool, + with_modules: bool, + experimental_config: _ExperimentalConfig, + ) -> None: ... + +class _ProfilerEvent: + start_tid: int + start_time_ns: int + children: List[_ProfilerEvent] + + # TODO(robieta): remove in favor of `self.typed` + extra_fields: Union[ + _ExtraFields_TorchOp, + _ExtraFields_Backend, + _ExtraFields_Allocation, + _ExtraFields_OutOfMemory, + _ExtraFields_PyCall, + _ExtraFields_PyCCall, + _ExtraFields_Kineto, + ] + + @property + def typed( + self, + ) -> Union[ + Tuple[Literal[_EventType.TorchOp], _ExtraFields_TorchOp], + Tuple[Literal[_EventType.Backend], _ExtraFields_Backend], + Tuple[Literal[_EventType.Allocation], _ExtraFields_Allocation], + Tuple[Literal[_EventType.OutOfMemory], _ExtraFields_OutOfMemory], + Tuple[Literal[_EventType.PyCall], _ExtraFields_PyCall], + Tuple[Literal[_EventType.PyCCall], _ExtraFields_PyCCall], + Tuple[Literal[_EventType.Kineto], _ExtraFields_Kineto], + ]: ... + @property + def name(self) -> str: ... + @property + def tag(self) -> _EventType: ... + @property + def id(self) -> int: ... + @property + def parent(self) -> Optional[_ProfilerEvent]: ... + @property + def correlation_id(self) -> int: ... + @property + def end_time_ns(self) -> int: ... + @property + def duration_time_ns(self) -> int: ... + +class _TensorMetadata: + impl_ptr: Optional[int] + storage_data_ptr: Optional[int] + id: Optional[int] + + @property + def allocation_id(self) -> Optional[int]: ... + @property + def layout(self) -> layout: ... + @property + def device(self) -> device: ... + @property + def dtype(self) -> dtype: ... + @property + def sizes(self) -> List[int]: ... + @property + def strides(self) -> List[int]: ... + +Scalar: TypeAlias = Union[int, float, bool, complex] +Input: TypeAlias = Optional[Union[_TensorMetadata, List[_TensorMetadata], Scalar]] + +class _ExtraFields_TorchOp: + name: str + sequence_number: int + allow_tf32_cublas: bool + + @property + def inputs(self) -> List[Input]: ... + @property + def scope(self) -> RecordScope: ... + +class _ExtraFields_Backend: ... + +class _ExtraFields_Allocation: + ptr: int + id: Optional[int] + alloc_size: int + total_allocated: int + total_reserved: int + + @property + def allocation_id(self) -> Optional[int]: ... + @property + def device(self) -> device: ... + +class _ExtraFields_OutOfMemory: ... + +class _PyFrameState: + line_number: int + function_name: str + + @property + def file_name(self) -> str: ... + +class _NNModuleInfo: + @property + def self_ptr(self) -> int: ... + @property + def cls_ptr(self) -> int: ... + @property + def cls_name(self) -> str: ... + @property + def parameters( + self, + ) -> List[Tuple[str, _TensorMetadata, Optional[_TensorMetadata]]]: ... + +class _OptimizerInfo: + @property + def parameters( + self, + ) -> List[ + Tuple[ + # Parameter + _TensorMetadata, + # + # Gradient (if present during optimizer.step()) + Optional[_TensorMetadata], + # + # Optimizer state for Parameter as (name, tensor) pairs + List[Tuple[str, _TensorMetadata]], + ] + ]: ... + +class _ExtraFields_PyCCall: + @property + def caller(self) -> _PyFrameState: ... + +class _ExtraFields_PyCall: + @property + def callsite(self) -> _PyFrameState: ... + @property + def caller(self) -> _PyFrameState: ... + @property + def module(self) -> Optional[_NNModuleInfo]: ... + @property + def optimizer(self) -> Optional[_OptimizerInfo]: ... + +class _ExtraFields_Kineto: ... + +def _add_execution_trace_observer(output_file_path: str) -> bool: ... +def _remove_execution_trace_observer() -> None: ... +def _enable_execution_trace_observer() -> None: ... +def _disable_execution_trace_observer() -> None: ... +def _set_record_concrete_inputs_enabled_val(val: bool) -> None: ... +def _set_fwd_bwd_enabled_val(val: bool) -> None: ... +def _set_cuda_sync_enabled_val(val: bool) -> None: ... + +class CapturedTraceback: ... + +def gather_traceback(python: bool, script: bool, cpp: bool) -> CapturedTraceback: ... + +# The Dict has name, filename, line +def symbolize_tracebacks( + to_symbolize: List[CapturedTraceback], +) -> List[List[Dict[str, str]]]: ... + +class _RecordFunctionFast: + def __init__(self, name: str) -> None: ... + def __enter__(self) -> None: ... + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C/_verbose.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_C/_verbose.pyi new file mode 100644 index 0000000000000000000000000000000000000000..2388ce2bb8a5edd4c7640c374537e71607e5b72e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_C/_verbose.pyi @@ -0,0 +1,3 @@ +# Defined in torch/csrc/utils/verbose.cpp +def mkl_set_verbose(enable: int) -> int: ... +def mkldnn_set_verbose(level: int) -> int: ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_prims_common/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/_prims_common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..08e066a9a72f8633beb5982bffa2a02d472315e3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_prims_common/__init__.py @@ -0,0 +1,1985 @@ +from __future__ import annotations + +import operator +import warnings +import weakref + +from contextlib import nullcontext +from enum import Enum +from functools import cmp_to_key, reduce +from typing import ( + Any, + Callable, + cast, + List, + NamedTuple, + Optional, + overload, + Sequence, + Tuple, + Type, + TYPE_CHECKING, + Union, +) + +from typing_extensions import TypeAlias + + +if TYPE_CHECKING: + # Import the following modules during type checking to enable code intelligence features, + # such as auto-completion in tools like pylance, even when these modules are not explicitly + # imported in user code. + + import sympy + +import torch +from torch import sym_float, sym_int, sym_max + + +ShapeType: TypeAlias = Union[torch.Size, List[int], Tuple[int, ...]] +StrideType: TypeAlias = Union[List[int], Tuple[int, ...]] +DimsType: TypeAlias = Union[int, List[int], Tuple[int, ...]] +DimsSequenceType: TypeAlias = Union[List[int], Tuple[int, ...]] +# TODO: Type[torch.SymInt], Type[torch.SymFloat] +NumberTypeType: TypeAlias = Union[Type[bool], Type[int], Type[float], Type[complex]] +# TODO: This needs a lot more type annotations +# NumberType = Union[bool, int, float, complex, torch.SymInt, torch.SymFloat] +NumberType: TypeAlias = Union[bool, int, float, complex] +RealNumberType: TypeAlias = Union[bool, int, float] + +Number = (bool, int, float, complex, torch.SymInt, torch.SymFloat) +# I don't call it Integral because numbers.Integral includes bool, but IntLike +# does not +Dim = int +IntLike = (int, torch.SymInt) +FloatLike = (float, torch.SymFloat) +IntWithoutSymInt = int +FloatWithoutSymFloat = float +DeviceLikeType: TypeAlias = Union[str, torch.device, int] +Tensor = torch.Tensor + + +torch_function_passthrough = { + torch.device, + torch.sym_not, + torch.sym_float, + torch.sym_int, + torch.sym_max, + torch.sym_min, + torch._sym_sqrt, # type: ignore[attr-defined] + torch.sym_ite, + torch.Tensor.dim, + torch.Tensor.ndim.__get__, # type: ignore[attr-defined] + torch.Tensor.numel, + torch.Tensor.size, + torch.Tensor.storage_offset, + torch.Tensor.stride, + torch.Tensor.dtype.__get__, # type: ignore[attr-defined] + torch.Tensor.is_sparse.__get__, # type: ignore[attr-defined] + torch.Tensor.shape.__get__, # type: ignore[attr-defined] + torch.Tensor.device.__get__, # type: ignore[attr-defined] + torch.Tensor.requires_grad.__get__, # type: ignore[attr-defined] + torch.Tensor.layout.__get__, # type: ignore[attr-defined] + torch.Tensor.is_contiguous, + # For TorchRefsMode only + torch.Tensor.__format__, + torch.Tensor.__repr__, + torch.Tensor.requires_grad.__get__, # type: ignore[attr-defined] +} + + +TensorLikeType = torch.Tensor +TensorLike = torch.Tensor +TensorSequenceType: TypeAlias = Union[List[TensorLikeType], Tuple[TensorLikeType, ...]] +TensorOrNumberLikeType: TypeAlias = Union[TensorLikeType, NumberType] + +CustomOutParamAnnotation = "__custom_out_param__" + + +def same_shape(a: ShapeType, b: ShapeType, *, allow_rhs_unbacked=False) -> bool: + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + if len(a) != len(b): + return False + + for x, y in zip(a, b): + if allow_rhs_unbacked: + # TODO: We should check that the symbols are consistent + # with each other + if isinstance(y, torch.SymInt): + continue + # NB: Naively, you would not expect to have to do an oblivious guard + # here because there is seemingly no broadcasting here, but in fact we + # use this in some situations to determine if we need to do an expand + # on the tensor because they don't line up, so you can definitely end + # up trying to prove u0 != 1 in this situation. See + # python test/test_proxy_tensor.py -k test_cumsum_unbacked + if guard_size_oblivious(x != y): + return False + + return True + + +def _maybe_get_pytype(t): + if t is torch.SymFloat: + return float + elif t is torch.SymInt: + return int + elif t is torch.SymBool: + return bool + else: + return t + + +# TODO: look at using torch.testing.assert_close instead with an option +# to just compare metadata +def compare_tensor_meta( + a: TensorLikeType, + b: TensorLikeType, + check_strides=False, + *, + allow_rhs_unbacked=False, + check_conj=True, +): + """ + Checks that two tensor likes have the same shape, + dtype and device. + + In the future this will validate additional metadata, like + strides. + """ + assert isinstance(a, TensorLike) + assert isinstance(b, TensorLike) + + if not same_shape(a.shape, b.shape, allow_rhs_unbacked=allow_rhs_unbacked): + msg = f"Shapes {a.shape} and {b.shape} are not equal!" + raise AssertionError(msg) + + if a.dtype != b.dtype: + msg = f"Dtypes {a.dtype} and {b.dtype} are not equal!" + raise AssertionError(msg) + + if a.device != b.device: + # Handles special cuda:0 vs cuda case + # TODO: we should review why this happens and see about fixing it + if (str(a.device) == "cuda:0" or str(a.device) == "cuda") and ( + str(b.device) == "cuda:0" or str(b.device) == "cuda" + ): + pass + else: + msg = f"Devices {a.device} and {b.device} are not equal!" + raise AssertionError(msg) + + # Stride checking is currently disabled, see https://github.com/pytorch/pytorch/issues/78050 + if check_strides: + same_strides, idx = check_significant_strides(a, b) + if not same_strides: + msg = f"Stride mismatch! Strides are {a.stride()} and {b.stride()} (mismatched at {idx})!" + raise RuntimeError(msg) + + if a.storage_offset() != b.storage_offset(): + msg = f"Storage offset mismatch! Storage offsets are {a.storage_offset()} and {b.storage_offset()}!" + raise RuntimeError(msg) + + if check_conj: + if a.is_conj() != b.is_conj(): + raise RuntimeError( + f"Conj mismatch! is_conj is set to {a.is_conj()} and {b.is_conj()}" + ) + + if a.is_neg() != b.is_neg(): + raise RuntimeError( + f"Neg mismatch! is_neg is set to {a.is_neg()} and {b.is_neg()}" + ) + + +def _check_strides_helper( + a: TensorLikeType, b: TensorLikeType, *, only_cuda=True, significant_only=True +) -> Tuple[bool, Optional[int]]: + # NOTE: only on CUDA because CPU elementwise strides are incorrect in PyTorch + # See https://github.com/pytorch/pytorch/issues/77553 + # Only compares strides that are "meaningful" -- strides for dimensions with length > 1 + # and for tensors with more than one element + if ( + not only_cuda or a.device.type == "cuda" or b.device.type == "cuda" + ) and a.numel() > 0: + for idx in range(a.ndim): + check = not significant_only or a.shape[idx] > 1 + if a.stride()[idx] != b.stride()[idx] and check: + return False, idx + + return True, None + + +def check_significant_strides( + a: TensorLikeType, b: TensorLikeType, *, only_cuda=True +) -> Tuple[bool, Optional[int]]: + return _check_strides_helper(a, b, only_cuda=only_cuda, significant_only=True) + + +def check_all_strides( + a: TensorLikeType, b: TensorLikeType, *, only_cuda=True +) -> Tuple[bool, Optional[int]]: + return _check_strides_helper(a, b, only_cuda=only_cuda, significant_only=False) + + +# This function is equivalent to compute_contiguous() from TensorImpl.cpp +def is_contiguous(a: TensorLikeType) -> bool: + """ + Tests whether a tensor is contiguous or not. + + Tensors are contiguous when they have no elements, + one element, or when they have "nested" strides. + """ + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + if guard_size_oblivious(a.numel() < 2): + return True + + expected_stride = 1 + for x, y in reversed(tuple(zip(a.shape, a.stride()))): + # Skips checking strides when a dimension has length 1 + if guard_size_oblivious(x == 1): + continue + + if y != expected_stride: + return False + expected_stride = expected_stride * x + + return True + + +# This function is equivalent to compute_channels_last_contiguous_2d() in TensorImpl.cpp +def is_channels_last_contiguous_2d(a: Tensor) -> bool: + # NHWC or not channels last 2D contiguous + if a.ndim != 4: + return False + + expected_stride = 1 + for idx in (1, 3, 2, 0): + length = a.shape[idx] + if length == 1: + continue + + stride = a.stride()[idx] + if stride != expected_stride: + return False + + expected_stride *= length + + return True + + +def is_channels_last_contiguous_3d(a: Tensor) -> bool: + # NDHWC or not channels last 3D contiguous + if a.ndim != 5: + return False + + expected_stride = 1 + for idx in (1, 4, 3, 2, 0): + length = a.shape[idx] + if length == 1: + continue + + stride = a.stride()[idx] + if stride != expected_stride: + return False + + expected_stride *= length + + return True + + +_memory_formats = { + torch.contiguous_format, + torch.preserve_format, + torch.channels_last, + torch.channels_last_3d, +} + + +def validate_memory_format(memory_format: torch.memory_format): + torch._check( + memory_format in _memory_formats, + lambda: f"Received unknown memory format {memory_format}!", + ) + + +def is_contiguous_for_memory_format( # type: ignore[return] + a: Tensor, *, memory_format: torch.memory_format +) -> bool: + validate_memory_format(memory_format) + + if memory_format == torch.contiguous_format: + return is_contiguous(a) + if memory_format == torch.channels_last: + return is_channels_last_contiguous_2d(a) + if memory_format == torch.channels_last_3d: + return is_channels_last_contiguous_3d(a) + + torch._check( + False, + lambda: f"is_contiguous received unsupported memory format {memory_format}", + ) + + +# NOTE: that tensors with no elements and channels last is ??? +def is_channels_last_contiguous(a: Tensor) -> bool: + """ + True when a tensor is channels-last contiguous. + + This requires that: + + - the tensor is conceptually either 4 (NHWC) or 5 (NDHWC) dimensions + - if we name the tensor's dimensions NCHW or NCDHW, then the strides are such that the + stride of the 'C' dimension (Cs) is 1 and the strides corresponding to + each dimension (Xs) can be ordered Cs <= Ws <= Hs <= (Ds) <= Ns and are + "nested" -- so Ws = Cs * Cl, where Cl is the length of the 'C' dimension, + for example. + """ + return is_channels_last_contiguous_2d(a) or is_channels_last_contiguous_3d(a) + + +def is_non_overlapping_and_dense(a: Tensor) -> bool: + """ + True when a tensor is non-overlapping and dense. + + A tensor is non-overlapping and dense when there exists a permutation of + its dimensions that is contiguous. + """ + + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + if a.is_sparse: + return False + + # Short-circuits if the tensor is already contiguous or channels-last contiguous + if is_contiguous(a) or is_channels_last_contiguous(a): + return True + + # The following is equivalent to compute_non_overlapping_and_dense in TensorImpl.cpp + + # Short-circuits for tensors of rank one, which are + # non-overlapping and "dense" if their stride is one + if a.ndim == 1: + return a.stride()[0] == 1 + + # Checks that there exists a permutation of the strides s.t. the tensor would be contiguous + # Sorts (length, stride) pairs by stride + # + # This sort is done in a size-oblivious way, which helps if we do a + # comparison like 2048*u0 > u0; we just want this to return True + # (and not worry about what if u0 is zero). + class K(NamedTuple): + size: int + stride: int + + def __lt__(self, other): + return guard_size_oblivious(self.stride < other.stride) + + def __gt__(self, other): + return guard_size_oblivious(self.stride > other.stride) + + def __le__(self, other): + return guard_size_oblivious(self.stride <= other.stride) + + def __ge__(self, other): + return guard_size_oblivious(self.stride >= other.stride) + + def __eq__(self, other): + return guard_size_oblivious(self.stride == other.stride) + + lengths_and_strides = sorted(map(K, a.shape, a.stride())) + + expected_stride = 1 + for length, stride in lengths_and_strides: + if guard_size_oblivious(length == 1): + continue + + if stride != expected_stride: + return False + + expected_stride *= length + + return True + + +# NOTE: Based on the implementation in TensorIterator.cpp, but note that +# the note [Computing output strides] is incorrect, because it +# says that strides will be preserved even if they are not +# "non overlapping and dense", but this is incorrect. The +# output of elementwise operations are always given +# non overlapping and dense strides. +# This is also INCORRECT because it does not model TensorIterator's +# short-circuit, which can cause different strides. +def compute_elementwise_output_logical_to_physical_perm( + *tensors, _skip_checks=False +) -> List[int]: + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + if not _skip_checks and len(tensors) == 0: + msg = "Can't compute elementwise output strides for zero tensors!" + raise ValueError(msg) + + if not _skip_checks: + check_same_shape(*tensors, allow_cpu_scalar_tensors=True) + + # Filters the tensors to actual tensors + if not _skip_checks: + tensors = tuple( + a + for a in tensors + if isinstance(a, TensorLike) and not is_cpu_scalar_tensor(a) + ) + + # Short-circuits for CPU scalar case + if len(tensors) == 0: + return [] + + # Short-circuits for shapes with zero or one dimensions + # TODO: are these necessary? + ndim = tensors[0].ndim + if ndim == 0: + return [] + if ndim == 1: + return [0] + + # Short-circuits if contiguous, following the fake fast path. + # This reduces the number of guards we end up making + # TODO: do channels last too + is_contiguous = True + for t in tensors: + is_contiguous = is_contiguous and t.is_contiguous( + memory_format=torch.contiguous_format + ) + + if is_contiguous: + return list(range(ndim)) + + shape = tensors[0].shape + + def should_swap(idx_a, idx_b): + for tensor in tensors: + stride_a = tensor.stride()[idx_a] + stride_b = tensor.stride()[idx_b] + + if guard_size_oblivious(stride_a == 0) or guard_size_oblivious( + stride_b == 0 + ): + continue + + if guard_size_oblivious(stride_a < stride_b): + return -1 + + if guard_size_oblivious(stride_a > stride_b): + return 1 + + # stride_a == stride_b + if guard_size_oblivious(shape[idx_a] > shape[idx_b]): + return 1 + + # Note: this case is hit if all strides are zero, + # or all strides are equal and all dimensions have the same length + return 0 + + # The "sort" order for the permutation is back-to-front, but + # the natural order for permutations is front-to-back. Do the + # sorting back-to-front and then reverse it on output. + # + # also, note this returns the logical to physical shape permutation + perm = list(reversed(range(ndim))) + + # insertion sort with support for ambiguous comparisons + for i in range(1, ndim): + dim1 = i + for dim0 in reversed(range(i)): + comparison = should_swap(perm[dim0], perm[dim1]) + if comparison > 0: + perm[dim0], perm[dim1] = perm[dim1], perm[dim0] + dim1 = dim0 + elif comparison < 0: + break + + return list(reversed(perm)) + + +def compute_elementwise_output_strides(*tensors) -> Tuple[int, ...]: + """ + Computes the output strides for elementwise operations. + """ + if len(tensors) == 0: + msg = "Can't compute elementwise output strides for zero tensors!" + raise ValueError(msg) + + check_same_shape(*tensors, allow_cpu_scalar_tensors=True) + + # Filters the tensors to actual tensors + tensors = tuple( + a for a in tensors if isinstance(a, TensorLike) and not is_cpu_scalar_tensor(a) + ) + + # Short-circuits for CPU scalar case + if len(tensors) == 0: + return () + + ndim = tensors[0].ndim + shape = tensors[0].shape + + if ndim == 0: + return () + if ndim == 1: + return (1,) + + logical_to_physical_perm = compute_elementwise_output_logical_to_physical_perm( + *tensors, _skip_checks=True + ) + permuted_shape = apply_perm(shape, logical_to_physical_perm) # to physical + + new_strides = make_contiguous_strides_for(permuted_shape) + permuted_strides = apply_perm( + new_strides, invert_perm(logical_to_physical_perm) + ) # to logical + + return tuple(permuted_strides) + + +# Identity permutation is [0, 1, 2] +def apply_perm(inp, perm): + ndim = len(inp) + permuted_inp = [-1] * ndim + for idx, x in enumerate(perm): + permuted_inp[idx] = inp[x] + return permuted_inp + + +def invert_perm(perm): + ndim = len(perm) + new_perm = [-1] * ndim + for idx, x in enumerate(perm): + new_perm[x] = idx + return new_perm + + +# +# Common helper functions +# + + +def validate_dim_length(length: int): + """ + Validates that an object represents a valid + dimension length. + """ + + if isinstance(length, (int, torch.SymInt)): + torch._check_is_size(length) + else: + # sometimes called with sympy expression by inductor + assert length >= 0 + + +def validate_shape(shape: ShapeType): + """ + Validates that a sequence represents a valid shape. + """ + + assert isinstance(shape, Sequence), type(shape) + for l in shape: + validate_dim_length(l) + + +def validate_strides(strides: StrideType): + """ + Verifies the object specifies valid strides. + """ + + assert isinstance(strides, Sequence) + for stride in strides: + assert stride >= 0 + + +def validate_idx(rank: int, idx: int): + """ + Validates that idx is a valid index for the given shape. + Assumes the index is already canonicalized. + """ + + assert isinstance(idx, Dim) + assert isinstance(rank, Dim) + + assert idx >= 0 and idx < rank or idx == 0 + + +def validate_dimension_indices(rank: int, indices: DimsSequenceType): + for idx in indices: + validate_idx(rank, idx) + + +def validate_exclusive_idx(rank: int, ex_idx: int): + """ + Validates that ex_idx is a valid exclusive index + for the given shape. + """ + + assert isinstance(ex_idx, Dim) + assert isinstance(rank, Dim) + assert ex_idx > 0 and ex_idx <= rank + + +# "Wraps" a dim (up to one time) for the given rank, allowing dims to be +# specified using negative indices. If `wrap_scalar` is true then scalar +# tensors of rank 0 will allow dimensions in the range [-1, 0]. Otherwise, +# idx should be in the range [-rank, rank-1]. +def canonicalize_dim(rank: int, idx: int, wrap_scalar: bool = True) -> int: + if rank < 0: + msg = f"Rank cannot be negative but got {rank}" + raise IndexError(msg) + + if rank == 0: + if not wrap_scalar: + msg = f"Dimension specified as {idx} but tensor has no dimensions" + raise IndexError(msg) + rank = 1 + + if idx >= 0 and idx < rank: + return idx + + if idx < 0: + _idx = idx + rank + else: + _idx = idx + + if _idx < 0 or _idx >= rank: + # Same error message as in aten/src/ATen/WrapDimUtils.h:49 + msg = f"Dimension out of range (expected to be in range of [{-rank}, {rank - 1}], but got {idx})" + raise IndexError(msg) + + return _idx + + +# Takes a dimension or sequence of dimensions and "wraps" them, +# mapping negative offsets to positive ones +@overload +def canonicalize_dims( + rank: int, indices: Sequence[int], wrap_scalar: bool = True +) -> Tuple[int, ...]: + pass + + +@overload +def canonicalize_dims(rank: int, indices: int, wrap_scalar: bool = True) -> int: + pass + + +def canonicalize_dims(rank, indices, wrap_scalar=True): + if isinstance(indices, Dim): + return canonicalize_dim(rank, indices, wrap_scalar) + + return tuple(canonicalize_dim(rank, x, wrap_scalar) for x in indices) + + +def is_valid_permutation(rank: int, perm: DimsSequenceType) -> bool: + """ + Validates that perm is a permutation of length rank. + """ + + if not isinstance(perm, Sequence): + return False + + if not (tuple(sorted(perm)) == tuple(range(0, rank))): + return False + + return True + + +def is_same_shape(a: Sequence, b: Sequence) -> bool: + """ + Compares two shapes a and b, returning True if they are the same + (their ranks and corresponding lengths match) and False otherwise. + """ + + return tuple(a) == tuple(b) + + +def is_cpu_scalar_tensor(a: Any) -> bool: + return isinstance(a, TensorLike) and a.ndim == 0 and a.device.type == "cpu" + + +def check_same_device(*args, allow_cpu_scalar_tensors): + """ + Checks that all Tensors in args have the same device. + + Raises a RuntimeError when: + - args contains an object whose type is not Tensor or Number + - two Tensor objects in args have different devices, unless one is a CPU scalar tensor and allow_cpu_scalar_tensors is True + """ + # Short-circuits if all (one or fewer) arguments are trivially on the same device + if len(args) <= 1: + return + + # Note: cannot initialize device to the first arg's device (it may not have one) + device = None + for arg in args: + if isinstance(arg, Number): + continue + elif isinstance(arg, TensorLike): + if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg): + continue + + if device is None: + device = arg.device + + if device != arg.device: + msg = ( + "Tensor on device " + + str(arg.device) + + " is not on the expected device " + + str(device) + + "!" + ) + raise RuntimeError(msg) + else: + msg = ( + "Unexpected type when checking for same device, " + str(type(arg)) + "!" + ) + raise RuntimeError(msg) + + +def canonicalize_device(device: DeviceLikeType) -> torch.device: + if isinstance(device, torch.device): + return device + + assert isinstance(device, str) + return torch.device(device) + + +# Asserts if any of the following are true: +# - a non-scalar or non-Tensor is given +# - the shape of any tensors is distinct +def check_same_shape(*args, allow_cpu_scalar_tensors: bool): + """ + Checks that all Tensors in args have the same shape. + + Raises a RuntimeError when: + - args contains an object whose type is not Tensor or Number + - two Tensor objects in args have different devices + """ + shape = None + + for arg in args: + if isinstance(arg, Number): + continue + elif isinstance(arg, TensorLike): + if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg): + continue + + if shape is None: + shape = arg.shape + + if not is_same_shape(shape, arg.shape): + msg = f"Shape {arg.shape} is not the expected shape {shape}!" + raise RuntimeError(msg) + else: + msg = ( + "Unexpected type when checking for same shape, " + str(type(arg)) + "!" + ) + raise RuntimeError(msg) + + +# Acquires a common shape, if it exists, from one or more tensor arguments, +# filtering number arguments +def extract_shape(*args, allow_cpu_scalar_tensors: bool) -> Optional[ShapeType]: + shape = None + scalar_shape = None + + for arg in args: + if isinstance(arg, Number): + continue + elif isinstance(arg, TensorLike): + if allow_cpu_scalar_tensors and is_cpu_scalar_tensor(arg): + scalar_shape = arg.shape + continue + + if shape is None: + shape = arg.shape + + if not is_same_shape(shape, arg.shape): + return None + else: + return None + + return shape if shape is not None else scalar_shape + + +# Extracts dimensions that might be passed either as a list/tuple or as varargs. +# A typical case is Tensor.permute . +def extract_dims_from_varargs( + dims: Union[DimsSequenceType, Tuple[DimsSequenceType, ...]] +) -> DimsSequenceType: + if dims and isinstance(dims[0], Sequence): + assert len(dims) == 1 + dims = cast(Tuple[DimsSequenceType], dims) + return dims[0] + else: + return cast(DimsSequenceType, dims) + + +def extract_shape_from_varargs( + shape: Union[ShapeType, Tuple[ShapeType]], + validate=True, +) -> Tuple[int, ...]: + """ + Returns a shape from varargs. + + In PyTorch, operations that accept shapes often accept them as varargs, like + foo(*shape). However a user can pass the shape as a sequence of integers, + like this: + + foo(1, 2, 3) + + or as a sequence of integers + + foo((1, 2, 3)) + + In the first case shape will be a tuple of integers, and in the second case it's a tuple + containing a tuple of integers. This validates those inputs and canonicalizes them + to a tuple of integers. + """ + + # Handles tuple unwrapping + if len(shape) == 1 and isinstance(shape[0], Sequence): + shape = shape[0] + + if validate: + validate_shape(shape) # type: ignore[arg-type] + return shape # type: ignore[return-value] + + +def infer_size_shapes(a: ShapeType, b: ShapeType) -> Tuple[int, ...]: + ndim = max(len(a), len(b)) + expandedSizes = [0] * ndim + + for i in range(ndim - 1, -1, -1): + offset = ndim - 1 - i + dimA = len(a) - 1 - offset + dimB = len(b) - 1 - offset + sizeA = a[dimA] if dimA >= 0 else 1 + sizeB = b[dimB] if dimB >= 0 else 1 + + torch._check( + (sizeA == sizeB) or (sizeA == 1) or (sizeB == 1), + lambda: ( + f"The size of tensor a ({sizeA}) must match the size of " + f"tensor b ({sizeB}) at non-jagged dimension {i}" + ), + ) + + # 1s map to the other size (even 0) + expandedSizes[i] = sizeB if sizeA == 1 else sizeA + + return tuple(expandedSizes) + + +def infer_size(shape: ShapeType, numel: int) -> Tuple[int, ...]: + """ + Infers the size of a dim with size -1, if it exists. + Also checks that new shape is compatible with the number of elements. + """ + dim = None + newsize = 1 + for i, d in enumerate(shape): + if d == -1: + torch._check(dim is None, lambda: "only one dimension can be inferred") + dim = i + elif d >= 0: + newsize *= d + else: + torch._check(False, lambda: f"invalid shape dimension {d}") + if dim is None: + torch._check( + numel == newsize, + lambda: f"shape '{list(shape)}' is invalid for input of size {numel}", + ) + else: + from torch.fx.experimental.symbolic_shapes import definitely_true + + torch._check( + newsize != 0, + lambda: ( + f"cannot reshape tensor of 0 elements into shape {list(shape)} because the " + f"unspecified dimension size -1 can be any value and is ambiguous" + if definitely_true(numel == 0) + else f"shape '{list(shape)}' is invalid for input of size {numel}" + ), + ) + torch._check( + numel % newsize == 0, + lambda: f"shape '{list(shape)}' is invalid for input of size {numel}", + ) + # Convert to list to produce a compatible error message with core + # PyTorch, which prints sequences in square brackets. + shape = list(shape) + shape[dim] = numel // newsize + # NB: This is pretty important when you have unbacked SymInts. + # Suppose you have (i0, 12) resizing into (2, -1, 12). The old + # range for i0 is typically [2, inf], which means if you divide + # by two the new range should be [1, inf]. But this is bad news + # if you have an unbacked SymInt: we need to reapply the unsound + # assumption that the size is >= 2. + torch._check_is_size(shape[dim]) + return tuple(shape) + + +_integer_dtypes = ( + torch.uint8, + torch.uint16, + torch.uint32, + torch.uint64, + torch.int8, + torch.int16, + torch.int32, + torch.int64, +) +_low_precision_dtypes = (torch.float16, torch.bfloat16, torch.complex32) +_complex_dtypes = (torch.complex32, torch.complex64, torch.complex128) + + +def is_boolean_dtype(dtype: torch.dtype) -> bool: + assert isinstance(dtype, torch.dtype) + return dtype is torch.bool + + +def is_integer_dtype(dtype: torch.dtype) -> bool: + assert isinstance(dtype, torch.dtype) + return dtype in _integer_dtypes + + +def is_low_precision_dtype(dtype: torch.dtype) -> bool: + assert isinstance(dtype, torch.dtype) + return dtype in _low_precision_dtypes + + +def is_float_dtype(dtype: torch.dtype) -> bool: + assert isinstance(dtype, torch.dtype) + return dtype.is_floating_point + + +def is_complex_dtype(dtype: torch.dtype) -> bool: + assert isinstance(dtype, torch.dtype) + return dtype in _complex_dtypes + + +def is_grad_dtype(dtype: torch.dtype) -> bool: + """ + Checks if the dtype can require a gradient. + """ + return dtype.is_floating_point or is_complex_dtype(dtype) + + +_complex_to_real_dtype_map = { + torch.complex128: torch.float64, + torch.complex64: torch.float32, + torch.complex32: torch.float16, +} + +_real_to_complex_dtype_map = { + torch.float16: torch.complex32, + torch.bfloat16: torch.complex64, + torch.float32: torch.complex64, + torch.float64: torch.complex128, +} + + +def corresponding_real_dtype(dtype: torch.dtype) -> torch.dtype: + return _complex_to_real_dtype_map[dtype] + + +def corresponding_complex_dtype(dtype: torch.dtype) -> torch.dtype: + return _real_to_complex_dtype_map[dtype] + + +def dtype_to_type(dtype: torch.dtype) -> type: + """ + Computes the corresponding Python type (AKA "type kind") for the + given dtype. + """ + assert isinstance(dtype, torch.dtype) + + if dtype is torch.bool: + return bool + if dtype in _integer_dtypes: + return int + if dtype.is_floating_point: + return float + if dtype in _complex_dtypes: + return complex + + raise ValueError("Invalid dtype!") + + +def dtype_to_type_ctor(dtype: torch.dtype) -> Callable[[NumberType], NumberType]: + """ + Computes the corresponding Python type constructor for the + given dtype. + """ + assert isinstance(dtype, torch.dtype) + + if dtype is torch.bool: + return lambda x: bool(x) + if dtype in _integer_dtypes: + return sym_int + if dtype.is_floating_point: + return sym_float + if dtype in _complex_dtypes: + # TODO: type error here is real, replace with sym_complex + return lambda x: complex(x) # type: ignore[arg-type] + + raise ValueError("Invalid dtype!") + + +def type_to_dtype(typ: type) -> torch.dtype: + """ + Computes the corresponding dtype for a Number type. + """ + + assert isinstance(typ, type) + + if typ is bool: + return torch.bool + if typ in [int, torch.SymInt]: + return torch.long + if typ in [float, torch.SymFloat]: + return torch.get_default_dtype() + # TODO: sym_complex_float? + if typ is complex: + return corresponding_complex_dtype(torch.get_default_dtype()) + + raise ValueError("Invalid type!") + + +def get_dtype(x: Union[torch.Tensor, NumberType]): + if isinstance(x, torch.Tensor): + return x.dtype + else: + return type_to_dtype(type(x)) + + +_ordered_types = (bool, int, float, complex) + + +def check_fp_or_complex( + dtype: torch.dtype, fn_name: str, allow_low_precision_dtypes: bool = True +): + """ + Checks whether the input is floating point or complex. + If allow_low_precision_dtypes is True, it allows having float16, bfloat16, and complex32 + """ + torch._check( + is_float_dtype(dtype) or is_complex_dtype(dtype), + lambda: f"{fn_name}: Expected a floating point or complex tensor as input. Got {dtype}", + ) + torch._check( + allow_low_precision_dtypes or not is_low_precision_dtype(dtype), + lambda: f"{fn_name}: Half precision dtypes not supported. Got {dtype}", + ) + + +def check_is_matrix(A: TensorLikeType, f_name: str, arg_name: str = "A"): + torch._check( + len(A.shape) >= 2, + lambda: f"{f_name}: The input tensor {arg_name} must have at least 2 dimensions.", + ) + + +def get_higher_type(a: type, b: type) -> type: + """ + Returns the higher of the two given Number types. + + The types are ordered bool -> int -> float -> complex. + """ + a, b = _maybe_get_pytype(a), _maybe_get_pytype(b) + # Type checking + if a not in _ordered_types or b not in _ordered_types: + raise RuntimeError(f"Expected builtin numeric types, found {a}, {b}") + + if a is b: + return a + + for typ in _ordered_types: + if a is typ: + return b + if b is typ: + return a + + raise ValueError("Unknown Python scalar type!") + + +# Returns the higher of two torch datatypes a and b or, if the two +# are not ordered relative to each other, the next +# higher datatype +def get_higher_dtype( + a: Optional[Union[torch.dtype, TensorLikeType, NumberType]], + b: Optional[Union[torch.dtype, TensorLikeType, NumberType]], +) -> Optional[torch.dtype]: + """ + Computes the "lowest" datatype that is weakly + "higher" than both a and b. + """ + + # Type checking + assert a is None or isinstance(a, (torch.dtype, TensorLike, Number)) + assert b is None or isinstance(b, (torch.dtype, TensorLike, Number)) + + def _extract_dtype( + x: Optional[Union[torch.dtype, TensorLikeType, NumberType]] + ) -> Optional[torch.dtype]: + if x is None: + return None + if isinstance(x, torch.dtype): + return x + if isinstance(x, TensorLike): + return x.dtype + if isinstance(x, Number): + return type_to_dtype(type(x)) + + raise RuntimeError("Unexpected type given to _extract_dtype!") + + a, b = _extract_dtype(a), _extract_dtype(b) + + if a is b: + return a + + if a is None: + return b + + if b is None: + return a + + ordered_datatypes = ( + (torch.bool,), + (torch.uint8, torch.int8), + (torch.int16,), + (torch.int32,), + (torch.int64,), + (torch.float16, torch.bfloat16), + (torch.float32,), + (torch.float64,), + (torch.complex32,), + (torch.complex64,), + (torch.complex128,), + ) + + for idx, dtypes in enumerate(ordered_datatypes): + if a in dtypes and b in dtypes: + return ordered_datatypes[idx + 1][0] + if a in dtypes: + return b + if b in dtypes: + return a + + raise RuntimeError("Unexpected termination!") + + +def check_pin_memory(pin_memory: bool): + torch._check_not_implemented( + not pin_memory, lambda: "PrimTorch does not support pinned memory" + ) + + +def check_layout(layout: torch.layout): + torch._check_not_implemented( + layout == torch.strided, lambda: f"PrimTorch doesn't support layout={layout}" + ) + + +# TODO: maybe unify with can_cast_to? +def is_weakly_lesser_type(a: type, b: type) -> bool: + """ + Compares two types, a and b, returning True if a is weakly "less" than b. + + The comparison is determined by the following type ordering: bool, int, float, complex. + """ + + a, b = _maybe_get_pytype(a), _maybe_get_pytype(b) + + if a not in _ordered_types or b not in _ordered_types: + raise RuntimeError(f"Expected builtin numeric types, found {a}, {b}") + + for typ in _ordered_types: + if a == typ: + return True + if b == typ: + return False + + raise RuntimeError("Unexpected termination!") + + +def can_safe_cast_to(*, cast_to: torch.dtype, cast_from: torch.dtype) -> bool: + for fn in (is_complex_dtype, is_float_dtype, is_integer_dtype, is_boolean_dtype): + if fn(cast_to): + return True + if fn(cast_from): + return False + + raise ValueError(f"Received unknown dtypes {cast_to}, {cast_from}!") + + +def check_same_dtype(*args): + """ + Checks that all Tensors in args have the same device and that all Numbers have the + same corresponding Python type. + + Raises a RuntimeError when: + - args contains an object whose type is not Tensor or Number + - two Tensors objects in args have different dtypes + - two Number objects in args have different types + - there are Tensors and Numbers in args, and one of those Tensors corresponding + Python types is different from the type of one of those Numbers + """ + full_dtype = None + scalar_type = None + + for arg in args: + if isinstance(arg, Number): + # Scalar type checking is disabled (and may be removed in the future) + continue + # if scalar_type is None: + # scalar_type = type(arg) + + # if scalar_type is not type(arg): + # msg = ( + # "Scalar of type " + # + str(type(arg)) + # + " is not the expected type of " + # + str(scalar_type) + # + "!" + # ) + # raise RuntimeError(msg) + elif isinstance(arg, TensorLike): + if full_dtype is None: + full_dtype = arg.dtype + if scalar_type is None: + scalar_type = dtype_to_type(arg.dtype) + + if full_dtype is not arg.dtype: + msg = ( + "Tensor with dtype " + + str(arg.dtype) + + " is not the expected dtype of " + + str(full_dtype) + + "!" + ) + raise RuntimeError(msg) + + arg_type = dtype_to_type(arg.dtype) + if arg_type is not scalar_type: + msg = ( + "Tensor with corresponding Python type " + + str(arg_type) + + " is not the expected type of " + + str(scalar_type) + + "!" + ) + raise RuntimeError(msg) + else: + msg = ( + "Unexpected type when checking for same dtype, " + str(type(arg)) + "!" + ) + raise RuntimeError(msg) + + +# Maps datatypes to their computation types for elementwise operations +_computation_dtype_map = { + torch.bfloat16: torch.float32, + torch.float16: torch.float32, + torch.complex32: torch.complex64, +} + + +def get_computation_dtype(dtype: torch.dtype) -> torch.dtype: + return _computation_dtype_map.get(dtype, dtype) + + +_cpu_acc_type_map = { + torch.bfloat16: torch.float64, + torch.float16: torch.float64, + torch.float32: torch.float64, + torch.complex32: torch.complex128, + torch.complex64: torch.complex128, +} + + +def get_acc_type(dtype: torch.dtype, device: torch.device) -> torch.dtype: + # Equivalent to at::toAccumulateType, prefer computation_dtype where possible + if device.type == "cpu": + return _cpu_acc_type_map.get(dtype, dtype) + else: + return get_computation_dtype(dtype) + + +class ELEMENTWISE_TYPE_PROMOTION_KIND(Enum): + DEFAULT = (0,) + NO_OPMATH = (1,) + INT_TO_FLOAT = (2,) + ALWAYS_BOOL = (3,) + COMPLEX_TO_FLOAT = (4,) + BOOL_TO_LONG = (5,) + + +class REDUCTION_OUTPUT_TYPE_KIND(Enum): + SAME = (0,) + COMPLEX_TO_FLOAT = (1,) # for complex types outputs corresponding real type + KEEP_PROMOTED_TYPE = (2,) # keep output in opmath type, needed for mean + ALWAYS_BOOL = (3,) + + +# Describes the return type of the primitive: +# +# - NEW, a new tensor is created +# - VIEW, a view of an input tensor is returned +# - INPLACE, one or more input tensors is modified +# +# these descriptors are mututally exclusive and exhaustive. +class RETURN_TYPE(Enum): + NEW = (0,) + VIEW = (1,) + INPLACE = (2,) + + +# TODO: when NumberType contains the sym types, can simplify this +def number_type(x: Union[NumberType, torch.SymInt, torch.SymFloat]) -> Type: + if isinstance(x, torch.SymInt): + return int + elif isinstance(x, torch.SymFloat): + return float + else: + return type(x) + + +def expr_type(x: sympy.Expr) -> Type: + if x.is_integer: # type: ignore[attr-defined] + return int + else: + # NB: Not strictly correct, but we don't support SymPy complex or bool. + return float + + +# TODO: document type promotion kinds +def elementwise_dtypes( + *_args, + type_promotion_kind: ELEMENTWISE_TYPE_PROMOTION_KIND, +) -> Tuple[torch.dtype, torch.dtype]: + """ + Computes the computation and result dtypes for elementwise type promotion + on the given arguments and with the given elementwise type promotion kind. + + Note that not all inputs to an elementwise operation necessarily participate in type promotion. + For example, the "alpha" parameter of torch.add does not participate in type promotion, + although it may be cast to the Python type corresponding to the computation dtype that + the type promotion algorithm determines. + + Default elementwise type promotion, which all other type promotion kinds tweak (see below), + first decides which of four ordered types to use: + + bool -> integer -> floating point -> complex + + The selected type is the "lowest" type in the above list such that all number arguments + have a weakly "lower" type and all tensor arguments have a weakly lower corresponding + type for their dtype. + + Once the type is determined, the particular result dtype is found. The dtypes are + partially ordered as follows: + + bool -> uint8, int8 -> int16 -> int32 -> int64 -> + float16, bfloat16 -> float32 -> float64 -> complex32 -> complex64 -> complex128 + + The result dtype is selected by: + - if no tensor's dtype has the same corresponding type as the one selected, + then the result dtype is the (default) dtype corresponding to the selected type + (for example, 1.5 + an integer tensor has a result dtype of the default floating point dtype) + - if the result type is complex then the dtype is: + - the default complex dtype if there are no floating point or complex tensors + - if there are floating point or complex tensors with one or more dimensions, then + the complex dtype corresponding to the highest corresponding complex dtype among those tensors + (for example, double + cfloat -> cdouble) + - if there are only floating point or complex tensors with zero dimensions, then + the complex dtype corresponding to the highest corresponding complex dtype among those tensors + - if the first two cases do not apply, the result dtype is the highest dtype among + all tensors with one or more dimensions of the output type, and if there are no such + tensors then it's the highest dtype among all tensors with zero dimensions of the output type + (for example, long + half -> half, even if the half tensor has zero dimensions) + + The "corresponding complex dtypes" are: + float16 -> complex32 + bfloat16 -> complex64 + float32 -> complex64 + float64 -> complex128 + complex32 -> complex32 + complex64 -> complex64 + complex128 -> complex128 + + The DEFAULT type promotion kind computes per above, and then uses the result dtype to pick a computation + dtype by mapping low precision floating point and complex dtypes as follows: + + float16 -> float32 + bfloat16 -> float32 + complex32 -> complex64 + + This is referred to as "op math", and the NO_OPMATH type promotion kind disables this mapping, making the + computation dtype the same as the result dtype when it's selected. NO_OPMATH is appropriate for kernels + which perform no mathematical operations on their tensors (see below for examples). + + The INT_TO_FLOAT type promotion kind maps boolean and integer result dtypes to the default floating point dtype, + and computation dtypes to the appropriate op math dtype. + + The COMPLEX_TO_FLOAT type promotion kind maps complex result dtypes to the corresponding float dtype, following this + mapping: + + complex32 -> float16 + complex64 -> float32 + complex128 -> float64 + + Note that COMPLEX_TO_FLOAT derives the computation dtype as the DEFAULT setting does. + + The BOOL_TO_LONG type promotion kind maps boolean computation and result dtypes to long. + + The ALWAYS_BOOL type promotion kind always sets the result dtype to bool. + + Example operators for each type promotion option: + DEFAULT : add + NO_OPMATH : where, nextafter, cat + INT_TO_FLOAT : sin + COMPLEX_TO_FLOAT : abs + BOOL_TO_LONG : pow + ALWAYS_BOOL : eq + + """ + + args = tuple(x for x in _args if x is not None) + + highest_type: type = bool + + # Import sympy locally, as importing it eagerly at a module level is too slow + # See https://dev-discuss.pytorch.org/t/delving-into-what-happens-when-you-import-torch/1589 + import sympy + + for x in args: + if not isinstance(x, (Number, TensorLike, sympy.Expr)): + msg = f"Unexpected type {str(type(x))} when computing elementwise type promotion!" + raise ValueError(msg) + + if isinstance(x, Number): + highest_type = get_higher_type(highest_type, number_type(x)) + elif isinstance(x, sympy.Expr): + highest_type = get_higher_type(highest_type, expr_type(x)) + else: + # x is a TensorLike + highest_type = get_higher_type(highest_type, dtype_to_type(x.dtype)) + + result_dtype = None + + def _find_highest_dtype_filtered( + args, filter, *, float_as_complex=False + ) -> Optional[torch.dtype]: + zero_dim_tensor_dtype = None + one_plus_dim_tensor_dtype = None + for x in args: + if isinstance(x, TensorLike) and filter(x.dtype): + _dtype = x.dtype + if float_as_complex and is_float_dtype(_dtype): + _dtype = corresponding_complex_dtype(_dtype) + if x.ndim == 0: + zero_dim_tensor_dtype = get_higher_dtype( + zero_dim_tensor_dtype, _dtype + ) + else: + # x.ndim > 0 + one_plus_dim_tensor_dtype = get_higher_dtype( + one_plus_dim_tensor_dtype, _dtype + ) + + # Prefers dtype of tensors with one or more dimensions + if one_plus_dim_tensor_dtype is not None: + return one_plus_dim_tensor_dtype + + return zero_dim_tensor_dtype + + if highest_type is float: + result_dtype = _find_highest_dtype_filtered(args, is_float_dtype) + result_dtype = ( + torch.get_default_dtype() if result_dtype is None else result_dtype + ) + elif highest_type is complex: + result_dtype = _find_highest_dtype_filtered( + args, + lambda x: is_float_dtype(x) or is_complex_dtype(x), + float_as_complex=True, + ) + if result_dtype is None: + result_dtype = corresponding_complex_dtype(torch.get_default_dtype()) + elif highest_type is int: + result_dtype = _find_highest_dtype_filtered(args, is_integer_dtype) + result_dtype = torch.long if result_dtype is None else result_dtype + else: + # highest_type is bool + result_dtype = torch.bool + + if type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT: + return get_computation_dtype(result_dtype), result_dtype + elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH: + return result_dtype, result_dtype + elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT: + if is_integer_dtype(result_dtype) or is_boolean_dtype(result_dtype): + result_dtype = torch.get_default_dtype() + return get_computation_dtype(result_dtype), result_dtype + elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT: + # NOTE: computation can still occur in a complex dtype + computation_dtype = get_computation_dtype(result_dtype) + if is_complex_dtype(result_dtype): + result_dtype = corresponding_real_dtype(result_dtype) + return computation_dtype, result_dtype + elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG: + if is_boolean_dtype(result_dtype): + return torch.long, torch.long + return get_computation_dtype(result_dtype), result_dtype + elif type_promotion_kind is ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL: + return get_computation_dtype(result_dtype), torch.bool + else: + raise ValueError(f"Unknown type promotion kind {str(type_promotion_kind)}") + + +def reduction_dtypes( + arg, + output_dtype_kind: REDUCTION_OUTPUT_TYPE_KIND, + dtype: Optional[torch.dtype] = None, +) -> Tuple[torch.dtype, Optional[torch.dtype]]: + # even though some reductions, like amin or amax, don't strictly require type promotion, + # all the math ops (including comparisons) are still defined only for a computation type, + # so promotion will still happen. We are doing it explicitly here + inp_dtype = dtype if dtype is not None else arg.dtype + computation_dtype = get_computation_dtype(inp_dtype) + if ( + output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.SAME + or output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT + ): + result_dtype = dtype if dtype else arg.dtype + if ( + output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT + and is_complex_dtype(result_dtype) + ): + result_dtype = corresponding_real_dtype(result_dtype) + elif output_dtype_kind == REDUCTION_OUTPUT_TYPE_KIND.KEEP_PROMOTED_TYPE: + result_dtype = None + else: # ALWAYS_BOOL + result_dtype = torch.bool + return computation_dtype, result_dtype + + +# This function's logic is borrowed from the following functions defined in C++: +# batched_matrix_contiguous_strides and contiguous_strides +def make_contiguous_strides_for( + shape: ShapeType, row_major: bool = True +) -> Tuple[int, ...]: + """ + Returns the strides of a contiguous tensor if row_major + If row_major=True, it returns the strides of a contiguous batch of Fortran-contiguous matrices + This is often used when calling external libraries like BLAS/LAPACK/cuSolver... + """ + # contiguous_strides from c10/util/strides.h + validate_shape(shape) + if not shape: + return () + + from torch.fx.experimental.symbolic_shapes import is_nested_int + + multiplier = 1 + strides = [] + for l in reversed(shape): + strides.append(multiplier) + multiplier *= l if is_nested_int(l) else sym_max(l, 1) + + result = tuple(reversed(strides)) + + # batched_matrix_contiguous_strides from aten/src/ATen/native/LinearAlgebraUtils.h + if row_major: + return result + else: + if len(shape) < 2: + return result + return result[:-2] + (1, max(shape[-2], 1)) + + +def make_channels_last_1d_strides_for(shape: ShapeType) -> Tuple[int, ...]: + torch._check( + len(shape) == 3, + lambda: "Only tensors of rank 3 can use the channels_last_1d memory format", + ) + + multiplier = 1 + strides = [0] * 3 + for idx in (1, -1, 0): + # NOTE: intentionally divergence from make_contiguous_strides_for + # This is consistent with eager + strides[idx] = multiplier + multiplier *= shape[idx] + + return tuple(strides) + + +def make_channels_last_2d_strides_for(shape: ShapeType) -> Tuple[int, ...]: + # TODO: maybe inform the user of channels_last_3d if rank of the tensor is 5? + torch._check( + len(shape) == 4, + lambda: "Only tensors of rank 4 can use the channels_last memory format", + ) + + multiplier = 1 + strides = [0] * 4 + for idx in (1, -1, -2, 0): + # NOTE: intentionally divergence from make_contiguous_strides_for + # This is consistent with eager + strides[idx] = multiplier + multiplier *= shape[idx] + + return tuple(strides) + + +def make_channels_last_3d_strides_for(shape: ShapeType) -> Tuple[int, ...]: + torch._check( + len(shape) == 5, + lambda: "Only tensors of rank 5 can use the channels_last_3d memory format", + ) + + multiplier = 1 + strides = [0] * 5 + for idx in (1, -1, -2, -3, 0): + # NOTE: intentionally divergence from make_contiguous_strides_for + # This is consistent with eager + strides[idx] = multiplier + multiplier *= shape[idx] + + return tuple(strides) + + +def make_channels_last_strides_for(shape: ShapeType) -> Tuple[int, ...]: + ndim = len(shape) if isinstance(shape, Sequence) else 1 + if ndim == 3: + return make_channels_last_1d_strides_for(shape) + elif ndim == 4: + return make_channels_last_2d_strides_for(shape) + elif ndim == 5: + return make_channels_last_3d_strides_for(shape) + else: + raise RuntimeError( + f"no channels last format strides exist in {ndim} dimensions" + ) + + +def compute_reduction_output_shape( + shape: ShapeType, dimensions: Sequence +) -> Tuple[int, ...]: + for idx in dimensions: + validate_idx(len(shape), idx) + + new_shape = [] + for idx in range(len(shape)): + if idx in dimensions: + continue + + new_shape.append(shape[idx]) + + return tuple(new_shape) + + +def validate_no_repeating_dims(dims: Sequence): + if len(dims) != len(set(dims)): + raise RuntimeError("duplicate value in the list of dims") + + +def reduction_dims(shape: ShapeType, dims: Optional[Sequence]) -> Tuple[int, ...]: + if dims is None: + return tuple(range(len(shape))) + dims = tuple(canonicalize_dim(len(shape), idx) for idx in dims) + validate_no_repeating_dims(dims) + return dims + + +def set_correction( + unbiased: Optional[bool] = None, + correction: Optional[NumberType] = None, +) -> float: + if correction is not None and unbiased is not None: + raise RuntimeError("cannot specify both correction and unbiased arguments") + elif correction is None and unbiased is None: + correction = 1.0 + elif correction is None and unbiased is not None: + correction = 0.0 if unbiased is False else 1.0 + # NB: we don't actually support symint here, but it's harmless to accept + if not isinstance(correction, (IntLike, FloatLike)): + raise ValueError("correction argument should be integer or float") + if correction < 0: + raise ValueError("correction argument should be non-negative") + return sym_float(correction) + + +def compute_required_storage_length( + shape: ShapeType, strides: StrideType, storage_offset: int +) -> int: + """Computes the minimum storage size to hold the given tensor geometry. + + Example + ======= + + This is the size of a newly allocated tensor's storage, in units of elements + + >>> t = torch.empty((10, 20)) + >>> compute_required_storage_length(t.shape, t.stride(), t.storage_offset()) + 200 + + >>> # xdoctest: +SKIP(failing) + >>> t2 = torch.empty_strided((1, 2, 3), (5, 7, 11)) + >>> size = compute_required_storage_length(t2.shape, t2.stride(), t2.storage_offset()) + >>> size == t.storage().size() + True + + A valid tensor may have a larger storage size, but never smaller + + >>> slice = torch.empty(100)[20:40] + >>> slice.storage().size() + 100 + + >>> compute_required_storage_length(slice.shape, slice.stride(), slice.storage_offset()) + 40 + + """ + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + # Short-circuits if the shape has no elements + if guard_size_oblivious(reduce(operator.mul, shape, 1) == 0): + return 0 + + max_offset = sum((x - 1) * y for x, y in zip(shape, strides)) + # +1 to account for the first element which offsets are taken from + return 1 + storage_offset + max_offset + + +def check_in_bounds_for_storage( + a: torch.TypedStorage, shape: ShapeType, strides: StrideType, storage_offset: int +): + """ + Determines if the given shape, strides, and offset are valid for the given storage. + """ + + required_length = compute_required_storage_length(shape, strides, storage_offset) + if a.size() < required_length: + msg = ( + "Can't view a storage of size {} with an offset of {}, shape of {}, and strides of {}, " + "which requires a storage of size {}".format( + a.size(), storage_offset, str(shape), str(strides), required_length + ) + ) + raise ValueError(msg) + + +# NOTE: This function should ideally be removed, but some Meta internal models +# packaged with `torch.package` are using it, so it will have to be removed +# at some point in the future when those models no longer use this function. +def check( + b: bool, s: Callable[[], str], exc_type: Type[Exception] = RuntimeError +) -> None: + """ + Helper function for raising an error_type (default: RuntimeError) if a boolean condition fails. + Error message is a callable producing a string (to avoid wasting time + string formatting in non-error case, and also to make it easier for torchdynamo + to trace.) + + .. note:: This function is planned for removal in the future. Please use + `torch._check*` functions instead. + """ + warnings.warn( + DeprecationWarning( + "'torch._prims_common.check' will be removed in the future. Please use " + "'torch._check*' functions instead" + ) + ) + torch._check_with(exc_type, b, s) + + +# This combines is_channels_last_strides_2d and is_channels_last_strides_3d in +# c10/core/MemoryFormat.h into one function +def are_strides_like_channels_last( + shape: Sequence[int], strides: Sequence[int] +) -> bool: + ndim = len(shape) + + if ndim == 4: + # Check for channels_last_2d + dim_order = [1, 3, 2, 0] + elif ndim == 5: + # Check for channels_last_3d + dim_order = [1, 4, 3, 2, 0] + else: + return False + + if strides[1] == 0: + return False + + min = 0 + for d in dim_order: + if shape[d] == 0: + return False + if strides[d] < min: + return False + if d == 0 and min == strides[1]: + return False + min = strides[d] + if strides[d] > 1: + min *= shape[d] + return True + + +def suggest_memory_format(x: TensorLikeType) -> torch.memory_format: + if x.layout != torch.strided: + return torch.contiguous_format + + if are_strides_like_channels_last(x.shape, x.stride()): + return torch.channels_last if x.ndim == 4 else torch.channels_last_3d + + return torch.contiguous_format + + +def prod(xs: Sequence[NumberType]) -> NumberType: + """Product of elements in input sequence. Returns 1 for empty sequence""" + return reduce(operator.mul, xs, 1) + + +def is_expandable_to(shape: ShapeType, desired: ShapeType) -> bool: + """Checks if a shape can be expanded to another shape. + This is equivalent to checking if the two shapes are broadcastable. + """ + # This is a Python implementation of + # aten/src/ATen/ExpandUtils.h:is_expandable_to + if len(shape) > len(desired): + return False + for i in range(len(shape)): + if shape[-i - 1] != desired[-i - 1] and shape[-i - 1] != 1: + return False + return True + + +def mask_tensor(mask: TensorLikeType, t: TensorLikeType): + """ + Similar to torch.where(mask, t, 0) but if t is boolean, + result is also boolean and not promoted to int. + """ + # torch.where(mask, t, False) is equivalent + # but feels hacky and might break in the future + if t.dtype is torch.bool: + return mask.logical_and(t) + else: + return torch.where(mask, t, 0) + + +def get_aten_op(fn: Callable, name: str): + """ + Given the __module__ of reference and its name, it returns + (our best guess of) the ATen name of the associated operation + + Note: In ATen, the __name__ of a function within a module often + starts by the module name. E.g. linalg_eigh, or special_zeta + """ + module = fn.__module__ + prefix = "torch._refs" + assert module.startswith(prefix) + module = module[len(prefix) :] + # We want to go from .special / .nn.functional + # to special and special_ / nn_functional_ + if module: + module = module[1:] + module = module.replace(".", "_") + module = module + "_" + return getattr(torch._ops.ops.aten, f"{module}{name}") + + +def dtype_or_default(dtype: Optional[torch.dtype]) -> torch.dtype: + return dtype if dtype is not None else torch.get_default_dtype() + + +def device_or_default(device: Optional[DeviceLikeType]) -> DeviceLikeType: + return device if device is not None else torch.device("cpu") + + +def layout_or_default(layout: Optional[torch.layout]) -> torch.layout: + return layout if layout is not None else torch.strided + + +def clone_preserve_strides(x): + needed_size = compute_required_storage_length( + x.size(), x.stride(), x.storage_offset() + ) + # Our eager implementations for *_scatter ops are all primitives w.r.t autograd, + # so these as_strided() calls are not seen by autograd. + # We need to mimic this behavior in our ref/prim implementations. + # TODO: a better way to handle this would be with a new op, "_unsafe_as_strided" + # We should revisit this when we add a compositional as_strided op, + # and also as part of https://github.com/pytorch/pytorch/issues/90507 + try: + old = torch._C._dispatch_tls_is_dispatch_key_excluded( + torch._C.DispatchKey.ADInplaceOrView + ) + torch._C._dispatch_tls_set_dispatch_key_excluded( + torch._C.DispatchKey.ADInplaceOrView, True + ) + buffer = torch.as_strided(x, (needed_size,), (1,), 0).clone() + return torch.as_strided(buffer, x.size(), x.stride(), x.storage_offset()) + finally: + torch._C._dispatch_tls_set_dispatch_key_excluded( + torch._C.DispatchKey.ADInplaceOrView, old + ) + + +def alert_not_deterministic(caller: str): + if torch.are_deterministic_algorithms_enabled(): + if torch.is_deterministic_algorithms_warn_only_enabled(): + warnings.warn( + f"{caller} does not have a deterministic implementation, but you set " + f"'torch.use_deterministic_algorithms(True, warn_only=True)'. " + f"You can file an issue at https://github.com/pytorch/pytorch/issues " + f"to help us prioritize adding deterministic support for this operation." + ) + else: + torch._check( + False, + lambda: ( + f"{caller} does not have a deterministic implementation, but you set " + f"'torch.use_deterministic_algorithms(True)'. You can turn off " + f"determinism just for this operation, or you can use the " + f"'warn_only=True' option, if that's acceptable for your application. " + f"You can also file an issue at https://github.com/pytorch/pytorch/issues " + f"to help us prioritize adding deterministic support for this operation." + ), + ) + + +class CUDARngStateHelper: + @staticmethod + def get_torch_state_as_tuple(fake_mode=nullcontext()): + if not torch.cuda.is_available(): + raise RuntimeError("CUDA not available") + + with fake_mode: + seed = torch.tensor(torch.cuda.initial_seed()) + offset = torch.tensor(torch.cuda._get_rng_state_offset()) + return seed, offset + + @staticmethod + def set_torch_state_tensor(seed, offset): + # Rng state is [64-bit seed, 64-bit offset] + seed_portion = seed.reshape([1]).view(torch.uint8) + offset_portion = offset.reshape([1]).view(torch.uint8) + new_state = torch.cat([seed_portion, offset_portion]) + torch.cuda.set_rng_state(new_state) + + @staticmethod + def set_new_offset(relative_offset): + torch.cuda._set_rng_state_offset(relative_offset.item()) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_prims_common/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_prims_common/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19ff42f6238a6b492482d9b788e65578e2566a16 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_prims_common/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_prims_common/__pycache__/wrappers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_prims_common/__pycache__/wrappers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a35f4b1454c92dcbf67c3baa4fcc808d49609375 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_prims_common/__pycache__/wrappers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_prims_common/wrappers.py b/llmeval-env/lib/python3.10/site-packages/torch/_prims_common/wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..1fa575dc78c065326ab70673cac6baf4237cb0c0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_prims_common/wrappers.py @@ -0,0 +1,401 @@ +import inspect +import warnings +from functools import wraps +from itertools import chain + +from typing import Callable, NamedTuple, Optional, overload, Sequence, Tuple + +import torch +import torch._prims_common as utils +from torch._prims_common import ( + CustomOutParamAnnotation, + ELEMENTWISE_TYPE_PROMOTION_KIND, + Number, + NumberType, + ShapeType, + TensorLike, + TensorLikeType, +) +from torch.utils import _pytree as pytree +from torch.utils._pytree import tree_flatten, tree_unflatten + + +@overload +def _maybe_convert_to_dtype(a: TensorLikeType, dtype: torch.dtype) -> TensorLikeType: + pass + + +@overload +def _maybe_convert_to_dtype(a: NumberType, dtype: torch.dtype) -> NumberType: + pass + + +@overload +def _maybe_convert_to_dtype(a: Sequence, dtype: torch.dtype) -> Sequence: + pass + + +@overload +def _maybe_convert_to_dtype(a: None, dtype: torch.dtype) -> None: + pass + + +# TODO: implement ref.cast with an option to enforce safe casting +def _maybe_convert_to_dtype(a, dtype): + if isinstance(a, TensorLike): + if a.dtype != dtype: + return a.to(dtype) + return a + if isinstance(a, Number): + return utils.dtype_to_type_ctor(dtype)(a) # type: ignore[arg-type] + if isinstance(a, Sequence): + return tuple(_maybe_convert_to_dtype(x, dtype) for x in a) + # Passthrough None because some functions wrapped with type promotion + # wrapper might have optional args + if a is None: + return None + + raise ValueError(f"Received type {type(a)} that is neither a tensor or a number!") + + +def _maybe_convert_to_type(a: NumberType, typ: type) -> NumberType: + if not isinstance(a, Number): + msg = f"Found unknown type {type(a)} when trying to convert scalars!" + raise ValueError(msg) + if not utils.is_weakly_lesser_type(type(a), typ): + msg = f"Scalar {a} of type {type(a)} cannot be safely cast to type {typ}!" + raise ValueError(msg) + + return typ(a) + + +def _annotation_has_type(*, typ, annotation): + if hasattr(annotation, "__args__"): + for a in annotation.__args__: + if _annotation_has_type(typ=typ, annotation=a): + return True + return False + + return typ is annotation + + +class elementwise_type_promotion_wrapper: + """ + Adds elementwise type promotion to a Python reference implementation. + + Takes two kwargs, type_promoting_args and type_promotion_kind. + + type_promoting_args must be a string Sequence specifiying the argument names of all + arguments that participate in type promotion (and should be type promoted). If the + arg specifies a Sequence-type then every element of the Sequence will participate in + type promotion. + + type_promotion_kind must be one of the kinds specified by ELEMENTWISE_TYPE_PROMOTION_KIND. + See its documentation for details. + + The return_dtype will be coerced to the wrapped function's dtype arg if it is available and + not None. + + Other type promotion behavior, like validating the Python type of scalar arguments, must + be handled separately. + """ + + def __init__( + self, + *, + type_promotion_kind: ELEMENTWISE_TYPE_PROMOTION_KIND, + type_promoting_args: Optional[Sequence[str]] = None, + ): + self.type_promoting_arg_names = type_promoting_args + self.type_promotion_kind = type_promotion_kind + + def __call__(self, fn: Callable) -> Callable: + sig = inspect.signature(fn) + + @wraps(fn) + def _fn(*args, **kwargs): + bound = sig.bind(*args, **kwargs) + type_promoting_args = tuple( + bound.arguments[x] + for x in self.type_promoting_arg_names # type: ignore[union-attr] + if x in bound.arguments.keys() + ) + + flattened_type_promoting_args = pytree.arg_tree_leaves(*type_promoting_args) + compute_dtype, result_dtype = utils.elementwise_dtypes( + *flattened_type_promoting_args, + type_promotion_kind=self.type_promotion_kind, + ) + + promoted_args = { + x: _maybe_convert_to_dtype(bound.arguments[x], compute_dtype) + for x in self.type_promoting_arg_names # type: ignore[union-attr] + if x in bound.arguments.keys() + } + bound.arguments.update(promoted_args) + + result = fn(**bound.arguments) + + # Override the return_dtype if a dtype arg is present and not None + if "dtype" in bound.arguments: + maybe_dtype = bound.arguments["dtype"] + if maybe_dtype: # dtype cannot be None + result_dtype = maybe_dtype + + if isinstance(result, TensorLike): + return _maybe_convert_to_dtype(result, result_dtype) + if isinstance(result, Sequence): + return tuple(_maybe_convert_to_dtype(x, result_dtype) for x in result) + raise AssertionError(f"Unhandled result type: {type(result)}") + + _fn.__signature__ = sig # type: ignore[attr-defined] + return _fn + + +# Returns True if resize is necessary +def _resize_output_check(out: TensorLikeType, shape: ShapeType): + # If the shapes are correct there's nothing to do + if utils.same_shape(out.shape, shape): + return False + if out.numel() != 0: + msg = ( + f"An output with one or more elements was resized since it had shape {str(out.shape)} " + "which does not match the required output shape {str(shape)}. " + "This behavior is deprecated, and in a future PyTorch release outputs will not " + "be resized unless they have zero elements. " + "You can explicitly reuse an out tensor t by resizing it, inplace, to zero elements with t.resize_(0)." + ) + warnings.warn(msg) + return True + + +# TODO: handle tuples of tensors +def _maybe_resize_out(out: TensorLikeType, shape: ShapeType): + if _resize_output_check(out, shape): + return out.resize_(shape) + else: + return out + + +def _safe_copy_out( + *, copy_from: TensorLikeType, copy_to: TensorLikeType, exact_dtype: bool = False +): + # Checks same device + if copy_from.device != copy_to.device: + msg = "Attempting to copy from device {} to device {}, but cross-device copies are not allowed!".format( + copy_from.device, copy_to.device + ) + raise RuntimeError(msg) + + # Checks safe cast + if exact_dtype: + torch._check( + copy_from.dtype == copy_to.dtype, + lambda: f"Expected out tensor to have dtype {copy_from.dtype} " + f"but got {copy_to.dtype} instead", + ) + else: + torch._check( + utils.can_safe_cast_to(cast_from=copy_from.dtype, cast_to=copy_to.dtype), + lambda: f"Attempting to cast from {copy_from.dtype} to out tensor with dtype {copy_to.dtype}, " + "but this can't be cast because it is not safe!", + ) + + return copy_to.copy_(copy_from) + + +def out_wrapper(*out_names: str, exact_dtype: bool = False, pass_is_out: bool = False): + # The wrapped function needs to convert the output parameters to ensure + # compatibility between the Python API (which always uses "out" as the + # parameter name and may be a tuple) and the Aten API (which may have + # multiple output parameters and use different parameter names such as + # "grad_input", "indices" or "values".) + + default_out_names = ("out",) + if len(out_names) == 0: + # Use default in out name + out_names = default_out_names + + is_tensor = len(out_names) == 1 + + def _out_wrapper(fn: Callable) -> Callable: + """ + Adds the out parameter to a Python reference. + """ + out_type = ( + TensorLikeType + if is_tensor + else Tuple[tuple(TensorLikeType for _ in range(len(out_names)))] + ) + return_type = ( + TensorLikeType + if is_tensor + else NamedTuple( + f"return_types_{fn.__name__}", [(o, TensorLikeType) for o in out_names] + ) + ) + + sig = inspect.signature(fn) + factory_kwargs = ("device", "dtype") + is_factory_fn = all(p in sig.parameters for p in factory_kwargs) + + @wraps(fn) + def _fn(*args, out=None, **kwargs): + if is_factory_fn and out is not None: + for k in factory_kwargs: + out_attr = getattr(out, k) + if k not in kwargs: + kwargs[k] = out_attr + if pass_is_out: + result = fn(*args, is_out=(out is not None), **kwargs) + else: + result = fn(*args, **kwargs) + assert ( + isinstance(result, TensorLike) + and is_tensor + or isinstance(result, Tuple) # type: ignore[arg-type] + and len(result) == len(out_names) + ) + if out is not None: + # Naively you might expect this assert to be true, but + # it's not: + # + # assert type(out) == type(result) + # + # The reason is that functions under this wrapper can + # get registered to the Meta dispatch key, and that + # means they can be executed in a context where tensor + # subclasses are disabled (with no_dispatch), which is a + # handy way for an is-a tensor subclass (e.g., + # FakeTensor) to have the normal meta backend create a + # meta tensor, to be wrapped once it gets returned. + # In this situation, you will get a FakeTensor as + # the output tensor, but not the result--which will + # be a normal meta tensor, but this is perfectly + # harmless. + if is_tensor: + assert isinstance(out, TensorLike) + # These two operations are done in-place + _maybe_resize_out(out, result.shape) + _safe_copy_out(copy_from=result, copy_to=out, exact_dtype=exact_dtype) # type: ignore[arg-type] + else: + assert isinstance(out, Tuple) # type: ignore[arg-type] + torch._check_type( + len(out) == len(result), + lambda: f"expected tuple of {len(result)} elements but got {len(out)}", + ) + for r, o in zip(result, out): + # These two operations are done in-place + _maybe_resize_out(o, r.shape) + _safe_copy_out(copy_from=r, copy_to=o, exact_dtype=exact_dtype) # type: ignore[arg-type] + else: + out = result + # mypy does not see through the definition of out_type given that it's in a different scope + return out if is_tensor else return_type(*out) # type: ignore[operator] + + out_param = inspect.Parameter( + "out", + kind=inspect.Parameter.KEYWORD_ONLY, + default=None, + annotation=out_type, + ) + # Mark that the function now returns a tuple + assert isinstance(sig.return_annotation, str) or sig.return_annotation in ( + sig.empty, + out_type, + ) + params = chain(sig.parameters.values(), (out_param,)) + _fn.__signature__ = inspect.Signature( # type: ignore[attr-defined] + parameters=params, return_annotation=return_type # type: ignore[arg-type] + ) + + _fn.__annotations__ = fn.__annotations__ + _fn.__annotations__["out"] = out_type + _fn.__annotations__["return"] = return_type + + # In the special case of having a single tensor out parameter with a + # name other than out, add a special annotation to name the parameter + if is_tensor and out_names != default_out_names: + _fn.__annotations__[CustomOutParamAnnotation] = out_names[0] + + # Add an indicator attribute that can be used in special cases + # where having a function wrapped by `out_wrapper` is not desirable e.g. + # jit + _fn._torch_decompositions_out_wrapper = f"This function is wrapped by {out_wrapper.__module__}.out_wrapper" # type: ignore[attr-defined] + + return _fn + + return _out_wrapper + + +def _maybe_remove_out_wrapper(fn: Callable): + return inspect.unwrap( + fn, + stop=lambda f: not hasattr(f, "_torch_decompositions_out_wrapper"), + ) + + +def backwards_not_supported(prim): + def redispatch_prim(args, kwargs): + with torch._C._AutoDispatchBelowAutograd(): + old = torch._C._dispatch_tls_is_dispatch_key_excluded( + torch._C.DispatchKey.ADInplaceOrView + ) + return prim(*args, **kwargs) + + class BackwardsNotSupported(torch.autograd.Function): + @staticmethod + def forward(ctx, args_spec, *flat_args): + args, kwargs = tree_unflatten(flat_args, args_spec) # type: ignore[arg-type] + return redispatch_prim(args, kwargs) + + @staticmethod + def backward(ctx, *args): + raise RuntimeError("backwards not supported on prim") + + @wraps(prim) + def _autograd_impl(*args, **kwargs): + flat_args, args_spec = tree_flatten((args, kwargs)) + if torch.is_grad_enabled() and any( + a.requires_grad for a in flat_args if isinstance(a, torch.Tensor) + ): + # TODO: There is a subtle bug here: prims like copy_to + # return their input argument after mutating it; and custom + # autograd function will incorrectly turn the result into + # a view which will fail test_python_ref_executor tests. + # At the moment, we sidestep this by observing that the + # unit tests don't ever try to run the executor with + # autograd, so we don't exercise the buggy case, but if + # you ever want to feed autograd through this, be aware + # of it! We need a way of properly implementing autograd + # for mutating operations in Python to do this. + return BackwardsNotSupported.apply(args_spec, *flat_args) + else: + return redispatch_prim(args, kwargs) + + return _autograd_impl + + +# TODO: when tracing this will add torch tensors and not TensorMeta objects +# to the trace -- we should fix this by adding a tracing context and NumberMeta classes +# TODO: this wrapper is currently untested +def elementwise_unary_scalar_wrapper(fn: Callable) -> Callable: + """ + Allows unary operators that accept tensors to work with Python numbers. + """ + sig = inspect.signature(fn) + + @wraps(fn) + def _fn(*args, **kwargs): + if len(args) > 0 and isinstance(args[0], Number): + dtype = utils.type_to_dtype(type(args[0])) + args_ = list(args) + args_[0] = torch.tensor(args[0], dtype=dtype) + result = fn(*args_, **kwargs) + assert isinstance(result, torch.Tensor) + return result.item() + + return fn(*args, **kwargs) + + _fn.__signature__ = sig # type: ignore[attr-defined] + return _fn diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/common_rules.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/common_rules.py new file mode 100644 index 0000000000000000000000000000000000000000..7d581995bd816cd23afa17d65fb919dd8c601782 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/common_rules.py @@ -0,0 +1,289 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from typing import cast, Dict, List, Optional, Tuple + +import torch +from torch.distributed._tensor._utils import compute_local_shape +from torch.distributed._tensor.op_schema import ( + _is_inplace_op, + _is_out_variant_op, + OpSchema, + OutputSharding, +) +from torch.distributed._tensor.ops.utils import prod +from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta + + +def _replace_char_in_str(string: str, new_char: str, idx: int) -> str: + return string[:idx] + new_char + string[idx + 1 :] + + +def _gen_reshard_suggestions( + op_schema: OpSchema, + input_dims: List[str], + input_specs: Tuple[DTensorSpec, ...], + dim_to_sharding: Dict[str, int], + pending_sum: List[int], +) -> OutputSharding: + suggested_arg_specs: List[DTensorSpec] = [] + for input_dim, input_spec in zip(input_dims, input_specs): + dim_map = [dim_to_sharding[dim] for dim in input_dim] + suggested_arg_specs.append( + DTensorSpec.from_dim_map( + mesh=input_spec.mesh, + dim_map=dim_map, + sums=pending_sum, + tensor_meta=input_spec.tensor_meta, + ) + ) + suggested_schema = OpSchema(op_schema.op, tuple(suggested_arg_specs), {}) + suggested_schema._inplace_rewrap_schema_suggestion(op_schema) + return OutputSharding( + None, + schema_suggestions=[suggested_schema], + failed_reason="Input placements op sharding propagation failed, need to reshard!", + ) + + +def einop_rule( + equation: str, + op_schema: OpSchema, + *, + linearity: bool = False, + enforce_sharding: Optional[Dict[str, int]] = None, +) -> OutputSharding: + """ + Propagate the sharding of inputs to output for ops whose data moves according to einsum notation. + + This is mostly borrowed from @zdevito's sharding simulator. Examples: + mk,kn->mn - einsum + ij,ij->ij - addition + ij,j->ij - broadcasted addition + ij->i - reduction + Other ops could use this propagation algorithm when applied, note + that einsum propagation only deal with list of specs (DTensor specs) + as it only works on list of tensors! + + linearity in einop_rule means that the calling op `f` follows this rule: + f(a + b) = f(a) + f(b) + + In this case we can propagate the partial sum, note that linearity in einop + only applies to partial sum, not other operations like min/max (which are + associative but not linear). + """ + # parse einop equation and extract arg specs + inputs, outputs = equation.split("->") + input_dims, output_dims = inputs.split(","), outputs.split(",") + input_specs = op_schema.args_spec + # NOTE: only support single output unless needed in future + output_dim = output_dims[0] + + dim_to_sharding: Dict[str, int] = {} + dim_to_size: Dict[str, int] = {} + # record pending sum, key is mesh dimension, value is pending sum + # counter across input specs + pending_sums_counter: Dict[int, int] = {} + seen_shardings: Dict[int, str] = {} + needs_reshard = False + + def merge_sharding(dim: str, a: int, b: int) -> int: + # merge the sharding of inputs if it's able to merge, i.e. we can merge + # replicate and shard to shard, but this will trigger an reshard operation + if a != b: + if a == -1 or b == -1: + # reshard the replicate to match the sharded one + nonlocal needs_reshard + needs_reshard = True + return a if a != -1 else b + else: + # TODO: further merge the sharding properly (i.e. reshard one input to replicate) + raise RuntimeError( + f"{equation}: dim {dim} sharded two different ways: {a} and {b}" + ) + else: + return a + + for input_dim, input_spec in zip(input_dims, input_specs): + # deal with partial sums + input_sums = input_spec.sums + for sum_dim in input_sums: + if sum_dim not in pending_sums_counter: + seen_shardings[sum_dim] = "+" + # update pending sum counter for pending sum mesh + # dimension with the occurrence from each input + pending_sums_counter[sum_dim] = pending_sums_counter.get(sum_dim, 0) + 1 + + for idx, (dim, mesh_dim) in enumerate(zip(input_dim, input_spec.dim_map)): + if enforce_sharding and dim in enforce_sharding: + if enforce_sharding[dim] != mesh_dim: + needs_reshard = True + dim_to_sharding[dim] = enforce_sharding[dim] + dim_to_size[dim] = input_spec.shape[idx] + elif dim not in dim_to_sharding: + dim_to_sharding[dim] = mesh_dim + dim_to_size[dim] = input_spec.shape[idx] + else: + dim_to_sharding[dim] = merge_sharding( + dim, dim_to_sharding[dim], mesh_dim + ) + assert dim_to_size[dim] == input_spec.shape[idx] + + # after merging sharding, we check if there're multiple + # sharding on the same mesh dim. + merged_sharding_for_dim = dim_to_sharding[dim] + if merged_sharding_for_dim != -1: + if ( + merged_sharding_for_dim in seen_shardings + and dim != seen_shardings[merged_sharding_for_dim] + ): + needs_reshard = True + seen_shardings[merged_sharding_for_dim] += dim + else: + seen_shardings[merged_sharding_for_dim] = dim + + if pending_sums_counter and not linearity: + # return reshard suggestion with no pending sum, because we already properly + # merge the sharding, this reshard suggestion is legit to use + return _gen_reshard_suggestions( + op_schema, input_dims, input_specs, dim_to_sharding, [] + ) + else: + # It's a op that support linearity, but not all input arguments are partial + # we fail the sharding propagation with suggestion to make all inputs be + # partial on the corresponding mesh dim (all inputs should be partial for + # the mesh dims in order to execute locally and delay the sum reduction) + for value in pending_sums_counter.values(): + if value != len(input_specs): + needs_reshard = True + + for mesh_dim, dims in seen_shardings.items(): + if len(dims) > 1: + # we found different input dims are being sharded on the same mesh dim + # in order to perform local op computation, we need to reshard inputs + # base on some simple heuristics, now we simply pick the one with least comm + # volume. (i.e. the input with least size) + # TODO: consider a more advanced heuristic to pick the best sharding + costs = [] + for d in dims: + cost = 0 + for input_dim, input_spec in zip(input_dims, input_specs): + if ( + d in input_dim + and input_spec.dim_map[input_dim.index(d)] == mesh_dim + ): + assert input_spec.tensor_meta is not None + global_shape = input_spec.tensor_meta.shape + local_shape = compute_local_shape( + global_shape, input_spec.mesh, input_spec.placements + ) + cost += prod(local_shape) * input_spec.mesh.size(mesh_dim) + costs.append(cost) + d_to_keep_sharding = dims[costs.index(max(costs))] + for d in dims: + # update dim_to_sharding to keep the sharding of the dim with + # highest comm and make the rest of the dims to replicate + if d != d_to_keep_sharding: + dim_to_sharding[d] = -1 + + pending_sums = list(pending_sums_counter.keys()) + if needs_reshard: + return _gen_reshard_suggestions( + op_schema, input_dims, input_specs, dim_to_sharding, pending_sums + ) + + # generate output pending sum if a dim is sharded, and it appears in input + # but not output + for dim, shard_on_mesh in dim_to_sharding.items(): + if dim not in output_dims[0] and shard_on_mesh != -1: + pending_sums.append(shard_on_mesh) + + # if no need to reshard, we directly generate the output sharding + output_dim_map = [] + output_shape = [] + for dim in output_dim: + if dim == "1": + # find output dim that is a singleton dimension, mark sharding and shape + output_dim_map.append(-1) + output_shape.append(1) + else: + output_dim_map.append(dim_to_sharding[dim]) + output_shape.append(dim_to_size[dim]) + + # XXX: since we still need to have intermediate shape calculation, we need + # to pass in the shape here. We should remove this once sharding decomp works + # for ops like addmm + assert input_specs[0].tensor_meta is not None + tensor_meta = TensorMeta( + torch.Size(output_shape), + input_specs[0].tensor_meta.stride, + input_specs[0].tensor_meta.dtype, + ) + return OutputSharding( + DTensorSpec.from_dim_map( + input_specs[0].mesh, + output_dim_map, + pending_sums, + tensor_meta=tensor_meta, + ) + ) + + +def pointwise_rule(op_schema: OpSchema, linearity: bool = False) -> OutputSharding: + """ + Propagate the sharding for pointwise operations. + + Examples: + ij,ij->ij - addition/mul + ij,j->ij - broadcasted addition + """ + alphabet = "abcdefghijklmnopqrstuvwxyz" + # find the max_dim first in case we need to broadcasting + input_specs = op_schema.args_spec + max_dim = max(input.ndim for input in input_specs) + dimchars = [] + singleton_counter: List[int] = [0] * max_dim + for input in input_specs: + start_dim = max_dim - input.ndim + p = alphabet[start_dim:max_dim] + # handle the "broadcasting to a common shape case" + # see https://pytorch.org/docs/stable/notes/broadcasting.html + # If any of the dimensions is singleton dimension (i.e. 1). + # we mark the dim char as a special "1" to distinguish with + # the non-singleton dimension, so that sharding propagation + # should just ignore the singleton dimension. + if len(input_specs) > 1: + for i in range(max_dim): + if i < start_dim: + # treat the leading miss dim chars as singleton + singleton_counter[i] += 1 + elif input.shape[i - start_dim] == 1: + # mark singleton dim char as a special "1" in einop rule + singleton_counter[i] += 1 + p = _replace_char_in_str(p, "1", (i - start_dim)) + + dimchars.append(p) + out_dimchars = alphabet[:max_dim] + # check if we replace the all inputs dim char with singleton dimension, + # if we replace all inputs, we also need to replace the output dimension. + for output_dim_idx in range(len(out_dimchars)): + out_dimchar = out_dimchars[output_dim_idx] + if singleton_counter[output_dim_idx] == len(input_specs): + out_dimchars = _replace_char_in_str(out_dimchars, "1", output_dim_idx) + + fmt = f"{','.join(p for p in dimchars)}->{out_dimchars}" + + enforce_sharding: Dict[str, int] = {} + if _is_inplace_op(op_schema.op): + # inplace op should keep the input sharding it writes to + for out_dimchar, mesh_dim in zip(out_dimchars, input_specs[0].dim_map): + enforce_sharding[out_dimchar] = mesh_dim + elif _is_out_variant_op(op_schema.op): + out_spec = cast(DTensorSpec, op_schema.kwargs_schema["out"]) + for out_dimchar, mesh_dim in zip(out_dimchars, out_spec.dim_map): + enforce_sharding[out_dimchar] = mesh_dim + + return einop_rule( + fmt, + op_schema, + linearity=linearity, + enforce_sharding=enforce_sharding, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/conv_ops.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/conv_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..b45ddb03ec7423ee32cec62bee35185fb765a0bb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/conv_ops.py @@ -0,0 +1,108 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# implement matrix related ops for distributed tensor +from typing import List + +import torch +from torch.distributed._tensor.op_schema import OpSchema, OutputSharding +from torch.distributed._tensor.ops.utils import register_prop_rule +from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta + +aten = torch.ops.aten + + +@register_prop_rule(aten.convolution.default) +def convolution_rules(op_schema: OpSchema) -> OutputSharding: + ( + input_spec, + weight_spec, + bias_spec, + stride, + padding, + dilation, + transposed, + output_padding, + groups, + ) = op_schema.args_schema + + assert isinstance(input_spec, DTensorSpec) + assert isinstance(weight_spec, DTensorSpec) + assert isinstance(bias_spec, DTensorSpec) + assert input_spec.tensor_meta is not None + assert weight_spec.tensor_meta is not None + in_shape = input_spec.tensor_meta.shape + weight_shape = weight_spec.tensor_meta.shape + assert isinstance(stride, List) + assert isinstance(padding, List) + assert isinstance(dilation, List) + assert isinstance(weight_shape, torch.Size) + N, C_in, H_in, W_in = in_shape[0], in_shape[1], in_shape[2], in_shape[3] + C_out = weight_shape[0] + H_out = (H_in + 2 * padding[0] - dilation[0] * (weight_shape[2] - 1) - 1) // stride[ + 0 + ] + 1 + W_out = (W_in + 2 * padding[1] - dilation[1] * (weight_shape[3] - 1) - 1) // stride[ + 1 + ] + 1 + output_shape = [N, C_out, H_out, W_out] + output_stride = (C_out * H_out * W_out, H_out * W_out, W_out, 1) + output_dim_map = input_spec.dim_map + pending_sums = input_spec.sums + + tensor_meta = TensorMeta( + torch.Size(output_shape), + output_stride, + input_spec.tensor_meta.dtype, + ) + return OutputSharding( + DTensorSpec.from_dim_map( + input_spec.mesh, + output_dim_map, + pending_sums, + tensor_meta=tensor_meta, + ) + ) + + +@register_prop_rule(aten.convolution_backward.default) +def convolution_backward_rules(op_schema: OpSchema) -> OutputSharding: + input_spec = op_schema.args_schema[0] + ( + grad_output_spec, + input_spec, + weight_spec, + bias_shape_opt, + stride, + padding, + dilation, + transposed, + output_padding, + groups, + output_mask, + ) = op_schema.args_schema + + assert isinstance(grad_output_spec, DTensorSpec) + assert isinstance(input_spec, DTensorSpec) + assert isinstance(weight_spec, DTensorSpec) + assert isinstance(bias_shape_opt, List) + assert input_spec.tensor_meta is not None + weight_tensor_meta = weight_spec.tensor_meta + bias_tensor_meta = TensorMeta( + torch.Size(bias_shape_opt), + (1,), + input_spec.tensor_meta.dtype, + ) + + grad_input_spec = input_spec + grad_weight_spec = DTensorSpec.from_dim_map( + input_spec.mesh, + [-1, -1, -1, -1], + [0], + tensor_meta=weight_tensor_meta, + ) + grad_bias_spec = DTensorSpec.from_dim_map( + input_spec.mesh, + [-1], + [0], + tensor_meta=bias_tensor_meta, + ) + return OutputSharding([grad_input_spec, grad_weight_spec, grad_bias_spec]) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/embedding_ops.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/embedding_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..bf6ad687387c842ea9536754ad5128ae5ae15aa6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/embedding_ops.py @@ -0,0 +1,313 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# implement matrix related ops for distributed tensor +import itertools +from dataclasses import dataclass, field +from typing import cast, List, Optional + +import torch +import torch.distributed._functional_collectives as funcol +from torch.distributed._tensor.op_schema import ( + OpSchema, + OpStrategy, + PlacementStrategy, + StrategyType, +) +from torch.distributed._tensor.ops.utils import ( + generate_redistribute_costs, + is_tensor_shardable, + register_op_strategy, +) + +from torch.distributed._tensor.placement_types import ( + _Partial, + DTensorSpec, + Placement, + Replicate, + Shard, +) + +from torch.distributed.device_mesh import DeviceMesh + +aten = torch.ops.aten + + +@dataclass +class MaskBuffer: + data: Optional[torch.Tensor] = None + + def materialize_mask(self, mask): + if self.data is not None: + raise RuntimeError("MaskBuffer has already been materialized") + self.data = mask + + def release_mask(self): + # TODO: evaluate if we need to release the mask buffer or the buffer + # can just have the same lifetime as the _Partial placement + if self.data is None: + raise RuntimeError("MaskBuffer has not been materialized") + self.data = None + + def apply_mask(self, tensor): + if self.data is None: + raise RuntimeError("MaskBuffer has not been materialized") + + # NOTE: _MaskPartial is being used by the embedding op and the gather op. + # For gather, the mask has the same dimension as the output tensor, whereas + # the output of the embedding op has an additional dimension compare to the input, + # hence the output masking logic below having two different cases. + if tensor.ndim == self.data.ndim: + tensor[self.data] = 0.0 + else: + tensor[self.data, :] = 0.0 + + +@dataclass(frozen=True) +class _MaskPartial(_Partial): + """ + A partial mask placement devised for rowwise sharded embedding op, where we need + to mask and adjust the indices to the local embedding shard, embedding masking + is a special type of the Partial placement + + NOTE: the lifecycle of this MaskPartial placement follows the corresponding DTensor + lifecycle, i.e. the indices_mask would only be alive during the lifetime of the DTensor. + """ + + logical_dim_size: int = -1 + mask_buffer: MaskBuffer = field(default_factory=MaskBuffer) + + def _partition_value( + self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int + ) -> torch.Tensor: + # override parent logic to perform partial mask for embedding + num_chunks = mesh.size(mesh_dim) + # get local shard size and offset on the embedding_dim + local_shard_size, local_offset_on_dim = Shard._local_shard_size_on_dim( + self.logical_dim_size, + num_chunks, + mesh.get_local_rank(mesh_dim), + return_offset=True, + ) + # Build the input mask and save it for the current partial placement + # this is so that the output of embedding op can reuse the same partial + # placement saved mask to perform mask + reduction + mask = (tensor < local_offset_on_dim) | ( + tensor >= local_offset_on_dim + local_shard_size + ) + # mask the input tensor + masked_tensor = tensor.clone() - local_offset_on_dim + masked_tensor[mask] = 0 + # materialize the mask buffer to be used for reduction + self.mask_buffer.materialize_mask(mask) + return masked_tensor + + def _reduce_value( + self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int + ) -> torch.Tensor: + # by the time we ned reduction, we should have already saved the mask + assert self.mask_buffer.data is not None + + # apply the mask to the tensor that pending reduction + self.mask_buffer.apply_mask(tensor) + + # clear the mask buffer + self.mask_buffer.release_mask() + + # perform sum reduction + return funcol.all_reduce( + tensor, reduceOp=self.reduce_op.name, group=(mesh, mesh_dim) + ) + + def _reduce_shard_value( + self, + tensor: torch.Tensor, + mesh: DeviceMesh, + mesh_dim: int, + shard_spec: Placement, + ) -> torch.Tensor: + # by the time we ned reduction, we should have already saved the mask + assert self.mask_buffer.data is not None + + # apply the mask to the tensor that pending reduction + self.mask_buffer.apply_mask(tensor) + + # clear the mask buffer + self.mask_buffer.release_mask() + + # call reduce_shard_tensor of the shard_spec. + shard_spec = cast(Shard, shard_spec) + return shard_spec._reduce_shard_tensor(tensor, mesh, self.reduce_op, mesh_dim) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _MaskPartial): + return False + + # if either data is not None, we invalidate the sharding cache, as this indicates + # the current MaskPartial placement is still in use and should not be used for cache hit. + if self.mask_buffer.data is not None or other.mask_buffer.data is not None: + return False + + return ( + self.reduce_op == other.reduce_op + and self.logical_dim_size == other.logical_dim_size + ) + + def __hash__(self) -> int: + return 1 + hash( + (self.logical_dim_size, id(self.mask_buffer.data), self.reduce_op) + ) + + def __repr__(self) -> str: + """ + machine readable representation of the MaskPartial placement + """ + return f"_MaskPartial(logical_dim_size={self.logical_dim_size})" + + def __str__(self) -> str: + """ + human readable representation of the MaskPartial placement + """ + return "MaskP" + + +@register_op_strategy(aten.embedding.default) +def embedding_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + """ + This strategy handles embedding op. We have two possible embedding shardings: + rowwise and colwise + # TODO: implement rowwise sharding + """ + weight_strategy = cast(OpStrategy, op_schema.args_schema[0]) + indices_strategy = cast(OpStrategy, op_schema.args_schema[1]) + + weight_shape = weight_strategy.output_shape + indices_shape = indices_strategy.output_shape + output_emd_dim = len(indices_shape) + + all_mesh_dim_strategies = [] + + for mesh_dim in range(mesh.ndim): + single_mesh_dim_strategies = [] + + # placement list stores placements of [output, weight, input_indices] + # first we always have replicate all for inputs and output + all_replicate: List[Placement] = [Replicate()] * 3 + single_mesh_dim_strategies.append(all_replicate) + + # colwise sharding, output shard on last dim, weight shard on dim 1, input replicate + colwise_sharding = [Shard(output_emd_dim), Shard(1), Replicate()] + single_mesh_dim_strategies.append(colwise_sharding) + + # rowwise sharding, output is embedding partial, weight shard on dim 0, input accepts embedding partial + embedding_partial_placement = _MaskPartial(logical_dim_size=weight_shape[0]) + + # NOTE we want to reuse the same mask partial placement so that we can reuse the same mask that generates + # from the input indices and use it for output reduction + rowwise_sharding = [ + embedding_partial_placement, + Shard(0), + embedding_partial_placement, + ] + single_mesh_dim_strategies.append(rowwise_sharding) + + # batch dim sharding, weight replicated, input can shard on any dim, output follows input + for input_dim in range(len(indices_shape)): + batch_sharding = [Shard(input_dim), Replicate(), Shard(input_dim)] + single_mesh_dim_strategies.append(batch_sharding) + + all_mesh_dim_strategies.append(single_mesh_dim_strategies) + + strategy_combs = itertools.product(*all_mesh_dim_strategies) + + all_strategies = [] + for strategy_comb in strategy_combs: + spec_list = [] + for specs in zip(*strategy_comb): + spec_list.append(DTensorSpec(mesh, tuple(specs))) + + if is_tensor_shardable(weight_shape, spec_list[1]) and is_tensor_shardable( + indices_shape, spec_list[2] + ): + # only add to the strategy list when both weight and indices are shardable + weight_spec, indices_spec = spec_list[1:] + redistribute_cost = [ + generate_redistribute_costs(weight_strategy, weight_spec), + generate_redistribute_costs(indices_strategy, indices_spec), + ] + strat = PlacementStrategy( + output_specs=spec_list[0], + input_specs=spec_list[1:], + redistribute_cost=redistribute_cost, + ) + all_strategies.append(strat) + + return OpStrategy(all_strategies) + + +@register_op_strategy(aten.embedding_dense_backward.default) +def embedding_dense_backward_strategy( + mesh: DeviceMesh, op_schema: OpSchema +) -> StrategyType: + """ + This strategy handles embedding op. We have two possible embedding shardings: + rowwise and colwise + # TODO: implement rowwise sharding backward + """ + grad_out_strategy = cast(OpStrategy, op_schema.args_schema[0]) + indices_strategy = cast(OpStrategy, op_schema.args_schema[1]) + + grad_out_shape = grad_out_strategy.output_shape + indices_shape = indices_strategy.output_shape + grad_out_ndim = len(grad_out_shape) + + all_mesh_dim_strategies = [] + + for mesh_dim in range(mesh.ndim): + single_mesh_dim_strategies = [] + + # placement list stores placements of [output, weight, input_indices] + # first we always have replicate all for inputs and output + all_replicate: List[Placement] = [Replicate()] * 3 + single_mesh_dim_strategies.append(all_replicate) + + # colwise sharding backward, grad_out shard on last dim, input replicate, + # weight grad shard colwise + colwise_sharding = [Shard(1), Shard(grad_out_ndim - 1), Replicate()] + single_mesh_dim_strategies.append(colwise_sharding) + + # batch dim sharding, weight replicated, grad_out/input have same sharding + # that can shard on any dim, weight grad partial + for input_dim in range(len(indices_shape)): + batch_sharding = [_Partial(), Shard(input_dim), Shard(input_dim)] + single_mesh_dim_strategies.append(batch_sharding) + + # grad_out partial, input replicate, weight grad keep partial + partial_sharding = [_Partial(), _Partial(), Replicate()] + single_mesh_dim_strategies.append(partial_sharding) + + all_mesh_dim_strategies.append(single_mesh_dim_strategies) + + strategy_combs = itertools.product(*all_mesh_dim_strategies) + + all_strategies = [] + for strategy_comb in strategy_combs: + spec_list = [] + for specs in zip(*strategy_comb): + spec_list.append(DTensorSpec(mesh, tuple(specs))) + + if is_tensor_shardable(grad_out_shape, spec_list[1]) and is_tensor_shardable( + indices_shape, spec_list[2] + ): + # only add to the strategy list when both grad_out and indices are shardable + grad_out_spec, indices_spec = spec_list[1:] + redistribute_cost = [ + generate_redistribute_costs(grad_out_strategy, grad_out_spec), + generate_redistribute_costs(indices_strategy, indices_spec), + ] + strat = PlacementStrategy( + output_specs=spec_list[0], + input_specs=spec_list[1:], + redistribute_cost=redistribute_cost, + ) + all_strategies.append(strat) + + return OpStrategy(all_strategies) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/experimental_ops.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/experimental_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..4f64b6df1b5fb295f40ed9af1daa29086a5a18b6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/experimental_ops.py @@ -0,0 +1,49 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# implement matrix related ops for distributed tensor +from typing import List + +try: + import numpy as np +except ModuleNotFoundError: + np = None # type: ignore[assignment] + +import torch +from torch.distributed._tensor.op_schema import OpSchema, OutputSharding +from torch.distributed._tensor.ops.utils import register_prop_rule +from torch.distributed._tensor.placement_types import DTensorSpec, TensorMeta + +aten = torch.ops.aten + + +@register_prop_rule(aten.slice_backward.default) +def slice_backward_rules(op_schema: OpSchema) -> OutputSharding: + grad_output_spec, input_sizes, dim, start, end, step = op_schema.args_schema + assert isinstance(grad_output_spec, DTensorSpec) + assert isinstance(input_sizes, List) + assert grad_output_spec.tensor_meta is not None + grad_input_stride = list(np.cumprod(input_sizes[::-1])[:-1][::-1]) + grad_input_stride.append(1) + dim_map = grad_output_spec.dim_map + sums = grad_output_spec.sums + + grad_input_tensor_meta = TensorMeta( + torch.Size(input_sizes), + tuple(grad_input_stride), + grad_output_spec.tensor_meta.dtype, + ) + grad_input_spec = DTensorSpec.from_dim_map( + grad_output_spec.mesh, + dim_map, + sums, + tensor_meta=grad_input_tensor_meta, + ) + + return OutputSharding(grad_input_spec) + + +@register_prop_rule(aten.bernoulli.default) +@register_prop_rule(aten.bernoulli_.float) +def bernoulli_rules(op_schema: OpSchema) -> OutputSharding: + input_spec = op_schema.args_schema[0] + assert isinstance(input_spec, DTensorSpec) + return OutputSharding(input_spec) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/math_ops.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/math_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..64d5a09bf4028596920e7a65fd9ef3bdd31f59aa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/math_ops.py @@ -0,0 +1,957 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +from dataclasses import dataclass +from enum import Enum +from typing import cast, List, Optional, Sequence, Tuple, Union + +import torch + +import torch.distributed.distributed_c10d as c10d +from torch.distributed._tensor.op_schema import ( + OpSchema, + OpStrategy, + PlacementStrategy, + RuntimeSchemaInfo, + TupleStrategy, +) +from torch.distributed._tensor.ops.utils import ( + as_list, + generate_redistribute_costs, + is_tensor_evenly_shardable, + normalize_dim, + normalize_dims, + normalize_to_torch_size, + register_op_strategy, +) +from torch.distributed._tensor.placement_types import ( + _Partial, + DTensorSpec, + Placement, + Replicate, + Shard, +) +from torch.distributed.device_mesh import DeviceMesh + + +aten = torch.ops.aten + + +class Reduction(Enum): + NONE = 0 + MEAN = 1 + SUM = 2 + + +@dataclass(frozen=True) +class NormReduction: + norm_type: Union[int, float, str] + + +ReductionOpType = Union[NormReduction, c10d.ReduceOp.RedOpType] + + +@dataclass(frozen=True) +class _NormPartial(_Partial): + """ + This placement is used for partial vector norm. + + For p-norms (where p not inf or -inf), the p-norm over n elements computes + (sum_i x_i^p)^(1/p) + where the sum is from i=1 to n. The reduction op is the p-norm itself. + For example, consider 2 ranks, a (4,) tensor sharded on dim-0, and 2-norm: + Rank 0: [t1, t2] | Rank 1: [t3, t4] + After computing 2-norm per gradient (partial placement): + Rank 0: [sqrt(t1^2 + t2^2)] | Rank 1: [sqrt(t3^2 + t4^2)] + Converting from partial to replicate wants to ultimately get: + Rank 0/1: [sqrt(t1^2 + t2^2 + t3^2 + t4^2)] + This can be achieved by computing 2-norm on each rank's result. This holds + similarly for inf and -inf norm. For 0-norm, the reduction op is sum. + """ + + norm_type: Union[int, float, str] = 2 + + def __post_init__(self): + """Set the appropriate reduce op based on the norm type.""" + # Use `object.__setattr__` to bypass frozen checks + if self.norm_type in (float("inf"), "inf"): + object.__setattr__(self, "reduce_op", c10d.ReduceOp.MAX) + elif self.norm_type in (float("-inf"), "-inf"): + object.__setattr__(self, "reduce_op", c10d.ReduceOp.MIN) + elif isinstance(self.norm_type, (int, float)): + object.__setattr__(self, "reduce_op", c10d.ReduceOp.SUM) + else: + raise NotImplementedError(f"Unsupported norm type: {self.norm_type}") + + def _partition_value( + self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int + ) -> torch.Tensor: + if self.reduce_op in (c10d.ReduceOp.MAX, c10d.ReduceOp.MIN): + return tensor + elif self.reduce_op == c10d.ReduceOp.SUM: + return tensor / mesh.size(mesh_dim=mesh_dim) + raise NotImplementedError(self.reduce_op) + + def _reduce_shard_value( + self, + tensor: torch.Tensor, + mesh: DeviceMesh, + mesh_dim: int, + shard_spec: Placement, + ) -> torch.Tensor: + assert isinstance(shard_spec, Shard), f"{shard_spec}" + tensor = self._pre_reduce_transform(tensor) + reduced_tensor = super()._reduce_shard_value(tensor, mesh, mesh_dim, shard_spec) + return self._post_reduce_transform(reduced_tensor) + + def _reduce_value( + self, tensor: torch.Tensor, mesh: DeviceMesh, mesh_dim: int + ) -> torch.Tensor: + tensor = self._pre_reduce_transform(tensor) + reduced_tensor = super()._reduce_value(tensor, mesh, mesh_dim) + return self._post_reduce_transform(reduced_tensor) + + def _pre_reduce_transform(self, tensor: torch.Tensor) -> torch.Tensor: + if self.reduce_op == c10d.ReduceOp.SUM: + assert isinstance(self.norm_type, (int, float)), f"{self.norm_type}" + if self.norm_type != 0 and self.norm_type != 1: + return tensor**self.norm_type + return tensor + + def _post_reduce_transform(self, tensor: torch.Tensor) -> torch.Tensor: + if self.reduce_op == c10d.ReduceOp.SUM: + assert isinstance(self.norm_type, (int, float)), f"{self.norm_type}" + if self.norm_type != 0 and self.norm_type != 1: + return tensor ** (1.0 / self.norm_type) + return tensor + + +def _infer_reduction_dims(dims_arg: object, ndim: int) -> Optional[List[int]]: + if dims_arg is None: + return None + dims = cast(List[int], as_list(dims_arg)) + dims = cast(List[int], normalize_dims(dims, ndim)) + empty_dims = [[0], [-1], []] + if ndim == 0 and dims_arg in empty_dims: + return None + return dims + + +def _infer_reduce_dims_map( + reduction_dims: List[int], input_ndim: int, keep_dim=False +) -> List[int]: + reduction_dims_map = [] + new_dim_count = 0 + for input_dim in range(input_ndim): + if input_dim in reduction_dims and not keep_dim: + # if input dim in reduction dims, mark it as -1 + reduction_dims_map.append(-1) + else: + # otherwise mark it as the new dim + reduction_dims_map.append(new_dim_count) + new_dim_count += 1 + + return reduction_dims_map + + +def replicate_reduction_dims( + placements: Tuple[Placement, ...], reduction_dims: List[int] +) -> Tuple[Placement, ...]: + # replicate the reduction dims if not reduction_linear + new_placements: List[Placement] = [] + + for p in placements: + if p.is_partial(): + new_placements.append(Replicate()) + elif isinstance(p, Shard) and p.dim in reduction_dims: + new_placements.append(Replicate()) + else: + new_placements.append(p) + + return tuple(new_placements) + + +def map_placements_after_reduction( + placements: Tuple[Placement, ...], + reduction_dims: List[int], + reduction_dims_map: List[int], + reduction_op: ReductionOpType, +) -> Tuple[Placement, ...]: + """ + Map each placement based on the output shape after reduction. + """ + new_placements: List[Placement] = [] + for placement in placements: + if isinstance(placement, (Replicate, _Partial)): + new_placements.append(placement) + else: + assert isinstance(placement, Shard) + shard_dim = placement.dim + new_shard_dim = reduction_dims_map[shard_dim] + if new_shard_dim == -1 or shard_dim in reduction_dims: + # if new_shard_dim collapsed or its in the reduction dims + # (i.e. for the case where keepdims=True), we generate partial + new_placements.append(get_placement_from_reduction_op(reduction_op)) + else: + new_placements.append(Shard(new_shard_dim)) + return tuple(new_placements) + + +def get_placement_from_reduction_op(reduction_op: ReductionOpType) -> Placement: + if isinstance(reduction_op, NormReduction): + return _NormPartial(norm_type=reduction_op.norm_type) + return _Partial(reduction_op) + + +def common_reduction_strategy( + mesh: DeviceMesh, + input_strategy: OpStrategy, + reduce_dims: List[int], + keep_dim: bool = False, + reduction_linear: bool = True, + reduction_op: ReductionOpType = c10d.ReduceOp.SUM, +) -> OpStrategy: + """ + reduction_linear means that the reduction `f` follows this rule: + f([f(a), f(b)]) = f([a, b]) + + reduction linear should be super set of linearity. + """ + # by default follow reduction input strategy + reduction_strategy = OpStrategy([]) + + for strtg in input_strategy.strategies: + if not reduction_linear: + # input placements for this strategy should clear out pending sum and sharding + # on the reduction dimension + input_placements = replicate_reduction_dims( + strtg.output_spec.placements, reduce_dims + ) + else: + input_placements = strtg.output_spec.placements + + input_spec = DTensorSpec( + mesh=mesh, + placements=input_placements, + tensor_meta=strtg.output_spec.tensor_meta, + ) + + reduce_dims_map = _infer_reduce_dims_map(reduce_dims, input_spec.ndim, keep_dim) + out_placements = map_placements_after_reduction( + input_spec.placements, reduce_dims, reduce_dims_map, reduction_op + ) + redistribute_cost = [generate_redistribute_costs(input_strategy, input_spec)] + reduction_strategy.strategies.append( + PlacementStrategy( + output_specs=DTensorSpec( + mesh=mesh, + placements=out_placements, + ), + input_specs=(input_spec,), + redistribute_cost=redistribute_cost, + ) + ) + + return reduction_strategy + + +LINEAR_REDUCTION_OP_MAP = { + aten.all.default: c10d.ReduceOp.SUM, + aten.all.dim: c10d.ReduceOp.SUM, + aten.sum.default: c10d.ReduceOp.SUM, + aten.sum.dim_IntList: c10d.ReduceOp.SUM, + aten.prod.default: c10d.ReduceOp.PRODUCT, + aten.prod.dim_int: c10d.ReduceOp.PRODUCT, + aten.prod.int_out: c10d.ReduceOp.PRODUCT, + aten.mean.default: c10d.ReduceOp.AVG, + aten.mean.dim: c10d.ReduceOp.AVG, + aten.mean.out: c10d.ReduceOp.AVG, + aten.max.default: c10d.ReduceOp.MAX, + aten.max.dim: c10d.ReduceOp.MAX, + aten.max.out: c10d.ReduceOp.MAX, + aten.min.default: c10d.ReduceOp.MIN, + aten.min.dim: c10d.ReduceOp.MIN, + aten.min.out: c10d.ReduceOp.MIN, +} + + +@register_op_strategy( + list(LINEAR_REDUCTION_OP_MAP.keys()), schema_info=RuntimeSchemaInfo(1) +) +def linear_reduction_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + args_schema = op_schema.args_schema + input_strategy = args_schema[0] + assert isinstance(input_strategy, OpStrategy) + dims = None + if len(op_schema.args_schema) > 1: + dims = _infer_reduction_dims(args_schema[1], input_strategy.output_ndim) + + reduce_dims = list(range(input_strategy.output_ndim)) if dims is None else dims + + keep_dim = len(op_schema.args_schema) > 2 and bool(op_schema.args_schema[2]) + reduction_op = LINEAR_REDUCTION_OP_MAP[op_schema.op] + return common_reduction_strategy( + mesh, + input_strategy, + reduce_dims, + keep_dim=keep_dim, + reduction_linear=True, + reduction_op=reduction_op, + ) + + +@register_op_strategy( + [aten.var.correction, aten.var.correction_out], + schema_info=RuntimeSchemaInfo(1, ["keepdim"]), +) +def var_reduction_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + args_schema = op_schema.args_schema + input_strategy = args_schema[0] + assert isinstance(input_strategy, OpStrategy) + dims = None + if len(op_schema.args_schema) > 1: + dims = _infer_reduction_dims(args_schema[1], input_strategy.output_ndim) + + reduce_dims = list(range(input_strategy.output_ndim)) if dims is None else dims + + keep_dim = cast(bool, op_schema.kwargs_schema.get("keepdim", False)) + return common_reduction_strategy( + mesh, input_strategy, reduce_dims, keep_dim=keep_dim, reduction_linear=False + ) + + +@register_op_strategy( + [aten.linalg_vector_norm.default], schema_info=RuntimeSchemaInfo(1) +) +def vector_norm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + args_schema = op_schema.args_schema + input_strategy = args_schema[0] + assert isinstance(input_strategy, OpStrategy) + norm_type = args_schema[1] if len(args_schema) > 1 else 2 + assert isinstance(norm_type, (int, float, str)), f"{norm_type}" + dim = args_schema[2] if len(args_schema) > 2 else None + keepdim = args_schema[3] if len(args_schema) > 3 else False + dims = _infer_reduction_dims(dim, input_strategy.output_ndim) + reduce_dims = list(range(input_strategy.output_ndim)) if dims is None else dims + return common_reduction_strategy( + mesh, + input_strategy, + reduce_dims, + keep_dim=cast(bool, keepdim), + reduction_linear=True, + reduction_op=NormReduction(norm_type), + ) + + +@register_op_strategy( + [aten._foreach_norm.Scalar], schema_info=RuntimeSchemaInfo(1, needs_pytree=True) +) +def foreach_norm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> TupleStrategy: + args_schema = op_schema.args_schema + input_tuple_strategy = args_schema[0] + assert isinstance(input_tuple_strategy, TupleStrategy) + norm_type = args_schema[1] + assert isinstance(norm_type, (int, float, str)), f"{norm_type}" + output_tuple_strategy_childs: List[OpStrategy] = [] + for op_strategy in input_tuple_strategy.childs: + assert isinstance(op_strategy, OpStrategy), f"{op_strategy}" + reduce_dims = list(range(op_strategy.output_ndim)) + output_strategy = common_reduction_strategy( + mesh, + op_strategy, + reduce_dims, + reduction_linear=True, + reduction_op=NormReduction(norm_type), + ) + output_tuple_strategy_childs.append(output_strategy) + return TupleStrategy(output_tuple_strategy_childs) + + +@register_op_strategy( + [aten._log_softmax.default, aten._softmax.default], schema_info=RuntimeSchemaInfo(1) +) +def softmax_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + input_strategy, softmax_dim, _ = op_schema.args_schema + input_strategy = cast(OpStrategy, input_strategy) + softmax_dim = cast(int, softmax_dim) + softmax_dim = normalize_dim(softmax_dim, input_strategy.output_ndim) + + output_strategy = OpStrategy([]) + for idx, input_placement_strategy in enumerate(input_strategy.strategies): + redistribute_costs = [] + input_src_spec = input_placement_strategy.output_spec + + # make sure input is replicated along the softmax dim + input_target_spec = DTensorSpec( + mesh=mesh, + placements=replicate_reduction_dims( + input_src_spec.placements, [softmax_dim] + ), + tensor_meta=input_src_spec.tensor_meta, + ) + redistribute_costs.append( + generate_redistribute_costs(input_strategy, input_target_spec) + ) + output_target_spec = input_target_spec + output_strategy.strategies.append( + PlacementStrategy( + output_specs=output_target_spec, + input_specs=[input_target_spec], + redistribute_cost=redistribute_costs, + ) + ) + + return output_strategy + + +@register_op_strategy( + [ + aten._log_softmax_backward_data.default, + aten._softmax_backward_data.default, + ], + schema_info=RuntimeSchemaInfo(2), +) +def softmax_backward_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + grad_out_strategy, out_strategy, softmax_dim, _ = op_schema.args_schema + grad_out_strategy = cast(OpStrategy, grad_out_strategy) + out_strategy = cast(OpStrategy, out_strategy) + softmax_dim = cast(int, softmax_dim) + softmax_dim = normalize_dim(softmax_dim, grad_out_strategy.output_ndim) + + grad_in_strategy = OpStrategy([]) + for grad_out_placement_strat, out_placement_strat in zip( + grad_out_strategy.strategies, out_strategy.strategies + ): + # follow the sharding of the grad_out or out depending on which has more shards + grad_out_src_spec = grad_out_placement_strat.output_spec + out_src_spec = out_placement_strat.output_spec + src_spec = ( + grad_out_src_spec + if grad_out_src_spec.num_shards >= out_src_spec.num_shards + else out_src_spec + ) + + # make sure inputs are replicated along the softmax dim + tgt_spec = DTensorSpec( + mesh=mesh, + placements=replicate_reduction_dims(src_spec.placements, [softmax_dim]), + ) + redist_grad_out_cost = generate_redistribute_costs(grad_out_strategy, tgt_spec) + redist_out_cost = generate_redistribute_costs(out_strategy, tgt_spec) + grad_in_strategy.strategies.append( + PlacementStrategy( + output_specs=tgt_spec, + redistribute_cost=[redist_grad_out_cost, redist_out_cost], + ) + ) + + return grad_in_strategy + + +@register_op_strategy( + [aten.nll_loss_forward.default, aten.nll_loss2d_forward.default], + schema_info=RuntimeSchemaInfo(3), +) +def nll_loss_forward_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + assert len(op_schema.args_schema) == 5 + ( + input_strategy, + target_strategy, + weight_strategy, + reduction, + _, + ) = op_schema.args_schema + input_strategy = cast(OpStrategy, input_strategy) + target_strategy = cast(OpStrategy, target_strategy) + reduction = cast(int, reduction) + + input_shape = input_strategy.output_shape + channel_dim = 1 if len(input_shape) >= 2 else 0 + + output_strategy = OpStrategy([]) + for idx, input_placement_strategy in enumerate(input_strategy.strategies): + op_args_target_specs = [] + redistribute_costs = [] + + # make sure input is replicated along the channel dim + input_src_spec = input_placement_strategy.output_spec + input_expected_spec = DTensorSpec( + mesh=mesh, + placements=replicate_reduction_dims( + input_src_spec.placements, [channel_dim] + ), + tensor_meta=input_src_spec.tensor_meta, + ) + op_args_target_specs.append(input_expected_spec) + redistribute_costs.append( + generate_redistribute_costs(input_strategy, input_expected_spec) + ) + + # target doesn't have channel dim, and it follows input on other dims + target_src_spec = target_strategy.strategies[idx].output_spec + target_expected_spec = DTensorSpec( + mesh=mesh, + placements=_skip_dim(input_expected_spec.placements, channel_dim), + tensor_meta=target_src_spec.tensor_meta, + ) + op_args_target_specs.append(target_expected_spec) + redistribute_costs.append( + generate_redistribute_costs(target_strategy, target_expected_spec) + ) + + # weight tensor, if given, has to be a Tensor of size input_shape[channel_dim] + # make sure it is replicated + if weight_strategy is not None: + assert isinstance(weight_strategy, OpStrategy) + weight_src_spec = weight_strategy.strategies[idx].output_spec + weight_expected_spec = DTensorSpec( + mesh=mesh, + placements=_replicate_dims_start_at(weight_src_spec.placements), + tensor_meta=weight_src_spec.tensor_meta, + ) + op_args_target_specs.append(weight_expected_spec) + redistribute_costs.append( + generate_redistribute_costs(weight_strategy, weight_expected_spec) + ) + + if reduction == Reduction.NONE.value: + output_expected_spec = target_expected_spec + total_weight_expected_spec = DTensorSpec( + mesh=mesh, placements=tuple([Replicate()] * mesh.ndim) + ) + else: + if reduction == Reduction.MEAN.value: + reduction_op = c10d.ReduceOp.AVG + if not is_tensor_evenly_shardable( + target_expected_spec.shape, target_expected_spec + ): + raise ValueError( + "The intermediate results of nll_loss cannot be evenly sharded, \ + resulting in biased mean result." + ) + else: # reduction == Reduction.SUM.value: + reduction_op = c10d.ReduceOp.SUM + reduce_dims = list(range(target_expected_spec.ndim)) + reduce_dims_map = _infer_reduce_dims_map( + reduce_dims, target_expected_spec.ndim, keep_dim=False + ) + out_placements = map_placements_after_reduction( + target_expected_spec.placements, + reduce_dims, + reduce_dims_map, + reduction_op, + ) + output_expected_spec = DTensorSpec( + mesh=mesh, + placements=out_placements, + ) + + # whether reduction is sum or mean, the total weight has to be summed up if not replicated + total_weight_placements = map_placements_after_reduction( + target_expected_spec.placements, + reduce_dims, + reduce_dims_map, + c10d.ReduceOp.SUM, + ) + total_weight_expected_spec = DTensorSpec( + mesh=mesh, + placements=total_weight_placements, + ) + + output_strategy.strategies.append( + PlacementStrategy( + output_specs=(output_expected_spec, total_weight_expected_spec), + input_specs=op_args_target_specs, + redistribute_cost=redistribute_costs, + ) + ) + + return output_strategy + + +@register_op_strategy( + [aten.nll_loss_backward.default, aten.nll_loss2d_backward.default], + schema_info=RuntimeSchemaInfo(4), +) +def nll_loss_backward_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + assert len(op_schema.args_schema) == 7 + ( + grad_out_strategy, + input_strategy, + target_strategy, + weight_strategy, + reduction, + _, + total_weight_strategy, + ) = op_schema.args_schema + grad_out_strategy = cast(OpStrategy, grad_out_strategy) + input_strategy = cast(OpStrategy, input_strategy) + target_strategy = cast(OpStrategy, target_strategy) + reduction = cast(int, reduction) + total_weight_strategy = cast(OpStrategy, total_weight_strategy) + + input_shape = input_strategy.output_shape + channel_dim = 1 if len(input_shape) >= 2 else 0 + + grad_in_strategy = OpStrategy([]) + for idx, input_placement_strategy in enumerate(input_strategy.strategies): + op_args_target_specs = [] + redistribute_costs = [] + + # make sure input is replicated along the channel dim + input_src_spec = input_placement_strategy.output_spec + input_expected_spec = DTensorSpec( + mesh=mesh, + placements=replicate_reduction_dims( + input_src_spec.placements, [channel_dim] + ), + tensor_meta=input_src_spec.tensor_meta, + ) + op_args_target_specs.append(input_expected_spec) + redistribute_costs.append( + generate_redistribute_costs(input_strategy, input_expected_spec) + ) + + # target doesn't have channel dim, and it follows input on other dims + target_src_spec = target_strategy.strategies[idx].output_spec + target_expected_spec = DTensorSpec( + mesh=mesh, + placements=_skip_dim(input_expected_spec.placements, channel_dim), + tensor_meta=target_src_spec.tensor_meta, + ) + op_args_target_specs.append(target_expected_spec) + redistribute_costs.append( + generate_redistribute_costs(target_strategy, target_expected_spec) + ) + + # grad_out follows target if there is no reduction; + # otherwise, it should be a replicated scalar. + grad_out_src_spec = grad_out_strategy.strategies[idx].output_spec + if reduction == Reduction.NONE.value: + grad_out_expected_spec = target_expected_spec + else: + grad_out_expected_spec = DTensorSpec( + mesh=mesh, + placements=_replicate_dims_start_at(grad_out_src_spec.placements), + tensor_meta=grad_out_src_spec.tensor_meta, + ) + op_args_target_specs.insert(0, grad_out_expected_spec) + redistribute_costs.insert( + 0, generate_redistribute_costs(grad_out_strategy, grad_out_expected_spec) + ) + + # weight tensor, if given, has to be a Tensor of size input_shape[channel_dim] + # make sure it is replicated + if weight_strategy is not None: + assert isinstance(weight_strategy, OpStrategy) + weight_src_spec = weight_strategy.strategies[idx].output_spec + weight_expected_spec = DTensorSpec( + mesh=mesh, + placements=_replicate_dims_start_at(weight_src_spec.placements), + tensor_meta=weight_src_spec.tensor_meta, + ) + op_args_target_specs.append(weight_expected_spec) + redistribute_costs.append( + generate_redistribute_costs(weight_strategy, weight_expected_spec) + ) + + # total_weight should always be replicated + total_weight_src_spec = total_weight_strategy.strategies[idx].output_spec + total_weight_expected_spec = DTensorSpec( + mesh=mesh, + placements=_replicate_dims_start_at(total_weight_src_spec.placements), + tensor_meta=total_weight_src_spec.tensor_meta, + ) + op_args_target_specs.append(total_weight_expected_spec) + redistribute_costs.append( + generate_redistribute_costs( + total_weight_strategy, total_weight_expected_spec + ) + ) + + grad_in_expected_spec = input_expected_spec + grad_in_strategy.strategies.append( + PlacementStrategy( + output_specs=grad_in_expected_spec, + input_specs=op_args_target_specs, + redistribute_cost=redistribute_costs, + ) + ) + + return grad_in_strategy + + +@register_op_strategy( + [aten.native_layer_norm.default], + schema_info=RuntimeSchemaInfo(1), +) +def layer_norm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + # args must be: input, normalized_shape, weight, bias, eps + # for None weight and bias, their corresponding objects will + # be None as well. layer_norm_strategy returns one OpStrategy + # for the triple return values (out, mean, rstd). + assert len(op_schema.args_schema) == 5 + ( + input_strategy, + normalized_shape, + weight_strategy, + bias_strategy, + _, + ) = op_schema.args_schema + + # the current layer norm implementation requires that all + # input DTensor's sharding must be in form of OpStrategy + assert isinstance(input_strategy, OpStrategy) + assert isinstance(normalized_shape, (int, Sequence, torch.Size)) + normalized_size = normalize_to_torch_size(normalized_shape) + + input_ndim = input_strategy.output_ndim + axis = input_ndim - len(normalized_size) + + # we use OpStrategy because the output (out, mean, rstd) + # should have the same placements + output_strategy = OpStrategy([]) + for idx, input_placement_strategy in enumerate(input_strategy.strategies): + op_args_target_specs = [] + redistribute_costs = [] + input_src_spec = input_placement_strategy.output_spec + + # for the input tensor, we replicate it on the inner dims if necessary + # TODO: we can avoid forcing the redistribution once we figure out + # how to decompose layer norm + input_target_spec = DTensorSpec( + mesh=mesh, + placements=_replicate_dims_start_at(input_src_spec.placements, axis), + tensor_meta=input_src_spec.tensor_meta, + ) + op_args_target_specs.append(input_target_spec) + redistribute_costs.append( + generate_redistribute_costs(input_strategy, input_target_spec) + ) + + if weight_strategy is not None: + assert isinstance(weight_strategy, OpStrategy) + weight_src_spec = weight_strategy.strategies[idx].output_spec + + # for the weight tensor, we replicate it on all dims if necessary + # TODO: we can avoid forcing the redistribution once we figure out + # how to decompose layer norm + weight_target_spec = DTensorSpec( + mesh=mesh, + placements=_replicate_dims_start_at(weight_src_spec.placements), + tensor_meta=weight_src_spec.tensor_meta, + ) + op_args_target_specs.append(weight_target_spec) + redistribute_costs.append( + generate_redistribute_costs(weight_strategy, weight_target_spec) + ) + + if bias_strategy is not None: + assert isinstance(bias_strategy, OpStrategy) + bias_src_spec = bias_strategy.strategies[idx].output_spec + + # for the bias tensor, we replicate it on all dims if necessary + # TODO: we can avoid forcing the redistribution once we figure out + # how to decompose layer norm + bias_target_spec = DTensorSpec( + mesh=mesh, + placements=_replicate_dims_start_at(bias_src_spec.placements), + tensor_meta=bias_src_spec.tensor_meta, + ) + op_args_target_specs.append(bias_target_spec) + redistribute_costs.append( + generate_redistribute_costs(bias_strategy, bias_target_spec) + ) + + # the output spec is the same as input spec + output_target_spec = input_target_spec + output_strategy.strategies.append( + PlacementStrategy( + output_specs=output_target_spec, + input_specs=op_args_target_specs, + redistribute_cost=redistribute_costs, + ) + ) + + return output_strategy + + +@register_op_strategy( + [aten.native_layer_norm_backward.default], + schema_info=RuntimeSchemaInfo(2), +) +def layer_norm_bwd_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + # args must be: grad_out, input, normalized_shape, mean, rstd, + # weight, bias, output_mask. For None weight and bias, their + # corresponding objects will be None as well. + assert len(op_schema.args_schema) == 8 + ( + grad_out_strategy, + input_strategy, + normalized_shape, + mean_strategy, + rstd_strategy, + weight_strategy, + bias_strategy, + output_mask, + ) = op_schema.args_schema + + assert isinstance(grad_out_strategy, OpStrategy) + assert isinstance(input_strategy, OpStrategy) + assert isinstance(mean_strategy, OpStrategy) + assert isinstance(rstd_strategy, OpStrategy) + + assert isinstance(normalized_shape, (int, Sequence, torch.Size)) + normalized_size = normalize_to_torch_size(normalized_shape) + input_ndim = input_strategy.output_ndim + axis = input_ndim - len(normalized_size) + outer_dims = list(range(axis)) + + assert isinstance(output_mask, List) and len(output_mask) == 3 + + # output triple: (d_input, d_weight, d_bias) + out_tuple_strategy = OpStrategy([]) + for idx, input_placement_strategy in enumerate(input_strategy.strategies): + # args for PlacementStrategy + output_specs_list: List[Optional[DTensorSpec]] = [] + op_args_target_specs = [] + redistribute_costs = [] + + input_src_spec = input_placement_strategy.output_spec + # arg: grad_out + # TODO: change the strategy to the following rule. + # d_input is basically a product of element-wise mul of + # grad_out, rstd, and normalized input, among which rstd + # and normalized input (x_hat) should have the same sharding + # placements, and grad_out's sharding is determined by the + # pointwise result of x_hat and weight/bias. + if output_mask[0]: + # TODO: now grad_out spec follows input spec. we may need + # to change it to apply a pointwise rule over grad_out, + # input, and weight. + grad_out_target_spec = DTensorSpec( + mesh=mesh, + placements=_replicate_dims_start_at(input_src_spec.placements, axis), + tensor_meta=input_src_spec.tensor_meta, + ) + op_args_target_specs.append(grad_out_target_spec) + redistribute_costs.append( + generate_redistribute_costs(grad_out_strategy, grad_out_target_spec) + ) + output_specs_list.append(grad_out_target_spec) + else: + output_specs_list.append(None) + + # arg: input + input_target_spec = DTensorSpec( + mesh=mesh, + placements=_replicate_dims_start_at(input_src_spec.placements, axis), + tensor_meta=input_src_spec.tensor_meta, + ) + op_args_target_specs.append(input_target_spec) + redistribute_costs.append( + generate_redistribute_costs(input_strategy, input_target_spec) + ) + + # arg: mean, rstd + mean_src_spec = mean_strategy.strategies[idx].output_spec + op_args_target_specs.append(mean_src_spec) + redistribute_costs.append([0.0 for _ in mean_strategy.strategies]) + rstd_src_spec = rstd_strategy.strategies[idx].output_spec + op_args_target_specs.append(rstd_src_spec) + redistribute_costs.append([0.0 for _ in rstd_strategy.strategies]) + + # arg: weight + # d_weight = sum(grad_out * (input - mean) / rstd, outer_dim, keepdim=False) + if output_mask[1]: + assert isinstance(weight_strategy, OpStrategy) + weight_src_spec = weight_strategy.strategies[idx].output_spec + # no need to redistribute weight since they should be replicated + # in forward pass + op_args_target_specs.append(weight_src_spec) + redistribute_costs.append([0.0 for _ in weight_strategy.strategies]) + # TODO: now d_weight spec follows input spec w/ a reduction. + # we may need to change to a pointwise rule over grad_out and + # input, then apply a reduction. + inp_placements = _replicate_dims_start_at(input_src_spec.placements, axis) + reduce_dims_map = _infer_reduce_dims_map( + outer_dims, input_src_spec.ndim, False + ) + out_placements = map_placements_after_reduction( + inp_placements, outer_dims, reduce_dims_map, c10d.ReduceOp.SUM + ) + output_specs_list.append( + DTensorSpec( + mesh=mesh, + placements=out_placements, + tensor_meta=weight_src_spec.tensor_meta, + ) + ) + else: + output_specs_list.append(None) + + # arg: bias + # d_bias = sum(grad_out, outer_dim, keepdim=False) + if output_mask[2]: + assert isinstance(bias_strategy, OpStrategy) + bias_src_spec = bias_strategy.strategies[idx].output_spec + # no need to redistribute weight since they should be replicated + # in forward pass + op_args_target_specs.append(bias_src_spec) + redistribute_costs.append([0.0 for _ in bias_strategy.strategies]) + # Currently we do not support the case where output_mask[0] is False while + # output_mask[1] is True. But it's easy to support that by accessing + # grad_out_spec via a local variable rather than the list. We just don't + # see the case. + grad_out_spec = output_specs_list[0] + assert isinstance(grad_out_spec, DTensorSpec) + # d_bias spec follows a reduction over grad_out + inp_placements = _replicate_dims_start_at(grad_out_spec.placements, axis) + reduce_dims_map = _infer_reduce_dims_map( + outer_dims, grad_out_spec.ndim, False + ) + out_placements = map_placements_after_reduction( + inp_placements, outer_dims, reduce_dims_map, c10d.ReduceOp.SUM + ) + output_specs_list.append( + DTensorSpec( + mesh=mesh, + placements=out_placements, + tensor_meta=bias_src_spec.tensor_meta, + ) + ) + else: + output_specs_list.append(None) + + out_tuple_strategy.strategies.append( + PlacementStrategy( + output_specs=tuple(output_specs_list), + input_specs=op_args_target_specs, + redistribute_cost=redistribute_costs, + ) + ) + + return out_tuple_strategy + + +def _replicate_dims_start_at( + placements: Sequence[Placement], start_dim: int = 0 +) -> Tuple[Placement, ...]: + new_placements: List[Placement] = [] + for p in placements: + if p.is_partial() or (isinstance(p, Shard) and p.dim >= start_dim): + new_placements.append(Replicate()) # make it replicate + else: + new_placements.append(p) # keep the placement + return tuple(new_placements) + + +# return new_placements which align with placements but skip the skipped_dim +def _skip_dim( + placements: Tuple[Placement, ...], skipped_dim: int +) -> Tuple[Placement, ...]: + new_placements: List[Placement] = [] + for p in placements: + if isinstance(p, Shard) and p.dim >= skipped_dim: + new_placements.append(Shard(p.dim - 1)) + else: + new_placements.append(p) + return tuple(new_placements) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/matrix_ops.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/matrix_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..9ee8b3f2a22b6ce2e899228440eec5503a4c40f9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/matrix_ops.py @@ -0,0 +1,226 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +# implement matrix related ops for distributed tensor +import itertools +from typing import List, Optional + +import torch +from torch.distributed._tensor.op_schema import ( + OpSchema, + OpStrategy, + OutputSharding, + PlacementStrategy, +) +from torch.distributed._tensor.ops.basic_strategy import gen_einsum_strategies +from torch.distributed._tensor.ops.common_rules import einop_rule +from torch.distributed._tensor.ops.utils import ( + generate_redistribute_costs, + infer_broadcast_dims_map, + is_tensor_shardable, + map_placements_after_broadcast, + register_op_strategy, + register_prop_rule, +) +from torch.distributed._tensor.placement_types import ( + DTensorSpec, + Placement, + Replicate, + Shard, +) + +from torch.distributed.device_mesh import DeviceMesh + +aten = torch.ops.aten + + +@register_prop_rule(aten.t.default) +def transpose_rule(op_schema: OpSchema) -> OutputSharding: + return einop_rule("ij->ji", op_schema, linearity=True) + + +def _mm_like_strategy( + mm_equation: str, mesh: DeviceMesh, op_schema: OpSchema +) -> OpStrategy: + self_strategy, mat2_strategy = op_schema.args_schema + assert isinstance(self_strategy, OpStrategy) + assert isinstance(mat2_strategy, OpStrategy) + # generate all possible strategies for mm + mm_strategy = gen_einsum_strategies(mm_equation, mesh) + # filter out invalid strategies and associate costs + strategies = mm_strategy.strategies + filtered_strategies = [] + for strtg in strategies: + assert strtg.input_specs is not None + self_spec = strtg.input_specs[0] + mat2_spec = strtg.input_specs[1] + if is_tensor_shardable( + self_strategy.output_shape, self_spec + ) and is_tensor_shardable(mat2_strategy.output_shape, mat2_spec): + redistribute_cost = [ + generate_redistribute_costs(self_strategy, self_spec), + generate_redistribute_costs(mat2_strategy, mat2_spec), + ] + strtg.redistribute_cost = redistribute_cost + filtered_strategies.append(strtg) + + mm_strategy.strategies = filtered_strategies + + return mm_strategy + + +def _addmm_like_strategy( + mm_equation: str, mesh: DeviceMesh, op_schema: OpSchema +) -> OpStrategy: + self_strategy, mat1_strategy, mat2_strategy = op_schema.args_schema + assert isinstance(self_strategy, OpStrategy) + assert isinstance(mat1_strategy, OpStrategy) + assert isinstance(mat2_strategy, OpStrategy) + self_shape = self_strategy.output_shape + mm_out_shape = torch.Size( + [ + mat2_strategy.output_shape[-1] + if i == len(mat1_strategy.output_shape) - 1 + else dim_size + for i, dim_size in enumerate(mat1_strategy.output_shape) + ] + ) + # generate all possible strategies for mm + mm_strategy = gen_einsum_strategies(mm_equation, mesh) + # filter out invalid strategies and associate costs + strategies = mm_strategy.strategies + filtered_strategies = [] + for strtg in strategies: + # construct new strategy by consider the self arg + assert strtg.input_specs is not None + mat1_spec = strtg.input_specs[0] + mat2_spec = strtg.input_specs[1] + out_spec = strtg.output_spec + + # self arg's spec should follow the output of mm, but need + # to consider broadcast for the self arg + broadcast_dims_map = infer_broadcast_dims_map(mm_out_shape, self_shape) + self_placements = map_placements_after_broadcast( + out_spec.placements, mm_out_shape, broadcast_dims_map + ) + self_spec = DTensorSpec(mesh=mesh, placements=self_placements) + + if is_tensor_shardable( + mat1_strategy.output_shape, mat1_spec + ) and is_tensor_shardable(mat2_strategy.output_shape, mat2_spec): + # update input specs with new self spec + strtg.input_specs = (self_spec, mat1_spec, mat2_spec) + + # associate costs + redistribute_cost = [ + generate_redistribute_costs(self_strategy, self_spec), + generate_redistribute_costs(mat1_strategy, mat1_spec), + generate_redistribute_costs(mat2_strategy, mat2_spec), + ] + strtg.redistribute_cost = redistribute_cost + filtered_strategies.append(strtg) + + mm_strategy.strategies = filtered_strategies + + return mm_strategy + + +@register_op_strategy(aten.mm.default) +def mm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + return _mm_like_strategy("mk,kn->mn", mesh, op_schema) + + +@register_op_strategy(aten.addmm.default) +def addmm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + return _addmm_like_strategy("mk,kn->mn", mesh, op_schema) + + +@register_op_strategy(aten.bmm.default) +def bmm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + return _mm_like_strategy("bmk,bkn->bmn", mesh, op_schema) + + +@register_op_strategy(aten.baddbmm.default) +def baddmm_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> OpStrategy: + return _addmm_like_strategy("bmk,bkn->bmn", mesh, op_schema) + + +@register_op_strategy(aten._scaled_dot_product_flash_attention.default) +def scaled_dot_product_attention_strategy( + mesh: DeviceMesh, op_schema: OpSchema +) -> OpStrategy: + # NOTE: currently we only support some simple strategies to support tensor parallelism + # TODO: sdpa might be a good candidate for us to explore decomposed sharding propagation + # as it involves: matmul, pointwise, reduction ops together. + return_debug_mask = len(op_schema.args_schema) >= 6 and op_schema.args_schema[5] + q_input_strategy = op_schema.args_schema[0] + assert isinstance(q_input_strategy, OpStrategy) + # q/k/v have the same shape + qkv_shape = q_input_strategy.output_shape + + all_mesh_dim_strategies = [] + + for mesh_dim in range(mesh.ndim): + single_mesh_dim_strategies = [] + + # placement list stores placements of [outputs, inputs] + # in the spda case, we have 3 valid tensor outputs and 3 tensor inputs + # first we can always accept full replication for inputs and output + all_replicate: List[Placement] = [Replicate()] * 6 + single_mesh_dim_strategies.append(all_replicate) + + # second we can accept the sharding pattern of tensor parallelism, which + # shard on the num of head dim + qkv_sharding = Shard(1) # num head dim + output_sharding = Shard(1) # num head dim + logsumexp_sharding = Shard(1) # num head dim + if return_debug_mask: + debug_attn_mask_sharding: Placement = Shard(1) # num head dim + else: + # empty debug mask, replicated + debug_attn_mask_sharding = Replicate() + + num_heads_dim_sharding = [ + output_sharding, + logsumexp_sharding, + debug_attn_mask_sharding, + qkv_sharding, + qkv_sharding, + qkv_sharding, + ] + single_mesh_dim_strategies.append(num_heads_dim_sharding) + + all_mesh_dim_strategies.append(single_mesh_dim_strategies) + + strategy_combs = itertools.product(*all_mesh_dim_strategies) + + all_strategies = [] + for strategy_comb in strategy_combs: + spec_list = [] + for specs in zip(*strategy_comb): + spec_list.append(DTensorSpec(mesh, tuple(specs))) + + assert len(spec_list) == 6 + input_expected_specs = spec_list[3:] + output_specs: List[Optional[DTensorSpec]] = list(spec_list[:3]) + # fix up output_specs and fill in None for the int and empty tensor return values + for i in range(2, 8): + output_specs.insert(i, None) + if all(is_tensor_shardable(qkv_shape, spec) for spec in input_expected_specs): + # only add to the strategy list when all inputs are shardable + redistribute_cost = [] + for input_idx, spec in enumerate(input_expected_specs): + qkv_strategy = op_schema.args_schema[input_idx] + assert isinstance(qkv_strategy, OpStrategy) + qkv_tensor_meta = qkv_strategy.strategies[0].output_spec.tensor_meta + spec.tensor_meta = qkv_tensor_meta + redistribute_cost.append( + generate_redistribute_costs(qkv_strategy, spec) + ) + + strat = PlacementStrategy( + output_specs=tuple(output_specs), + input_specs=tuple(input_expected_specs), + redistribute_cost=redistribute_cost, + ) + all_strategies.append(strat) + + return OpStrategy(all_strategies) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/random_ops.py b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/random_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..b666d1d8bcd0b26c65f77f85d00e78b8f9d17524 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/distributed/_tensor/ops/random_ops.py @@ -0,0 +1,30 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates +import torch +from torch.distributed._tensor.op_schema import ( + OpSchema, + OpStrategy, + PlacementStrategy, + StrategyType, +) +from torch.distributed._tensor.ops.utils import is_tensor_partial, register_op_strategy +from torch.distributed.device_mesh import DeviceMesh + +aten = torch.ops.aten + + +@register_op_strategy( + [aten.normal_.default, aten.uniform_.default, aten.native_dropout.default] +) +def random_op_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType: + self_strategy = op_schema.args_schema[0] + assert isinstance(self_strategy, OpStrategy) + + random_strategy = OpStrategy([]) + for arg_strategy in self_strategy.strategies: + arg_spec = arg_strategy.output_spec + if is_tensor_partial(arg_spec): + # TODO: figure out how inplace random op should behave when it's partial + raise RuntimeError(f"{op_schema.op} with _Partial is not supported yet!") + random_strategy.strategies.append(PlacementStrategy(output_specs=arg_spec)) + + return random_strategy diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/apply_optimizer_in_backward.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/apply_optimizer_in_backward.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6145abb6ddfc8d8b160813451b8f22f8afba1245 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/apply_optimizer_in_backward.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adamax.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adamax.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9aa3897013f993a3ec9de3af89fb422eacaf3267 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_adamax.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_rmsprop.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_rmsprop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c2f4a01dbd5622162599da27c28c7243723b72d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_rmsprop.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_sgd.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_sgd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db9c62b352ca27e48421500db0760b45beaf026d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/functional_sgd.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/optimizer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62f9bc367fa43f5fafbb1e6db8024db450a78e8b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/optimizer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/zero_redundancy_optimizer.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/zero_redundancy_optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..742838661c29c54327e6f04c81e869467fc18a3e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/distributed/optim/__pycache__/zero_redundancy_optimizer.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fft/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/fft/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3bc5191c7b57de89817e5401d0db24aac1c6df5e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/fft/__init__.py @@ -0,0 +1,1360 @@ +import sys + +import torch +from torch._C import _add_docstr, _fft # type: ignore[attr-defined] +from torch._torch_docs import factory_common_args, common_args + +__all__ = ['fft', 'ifft', 'fft2', 'ifft2', 'fftn', 'ifftn', + 'rfft', 'irfft', 'rfft2', 'irfft2', 'rfftn', 'irfftn', + 'hfft', 'ihfft', 'fftfreq', 'rfftfreq', 'fftshift', 'ifftshift', + 'Tensor'] + +Tensor = torch.Tensor + +# Note: This not only adds the doc strings for the spectral ops, but +# connects the torch.fft Python namespace to the torch._C._fft builtins. + +fft = _add_docstr(_fft.fft_fft, r""" +fft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor + +Computes the one dimensional discrete Fourier transform of :attr:`input`. + +Note: + The Fourier domain representation of any real signal satisfies the + Hermitian property: `X[i] = conj(X[-i])`. This function always returns both + the positive and negative frequency terms even though, for real inputs, the + negative frequencies are redundant. :func:`~torch.fft.rfft` returns the + more compact one-sided representation where only the positive frequencies + are returned. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimension. + +Args: + input (Tensor): the input tensor + n (int, optional): Signal length. If given, the input will either be zero-padded + or trimmed to this length before computing the FFT. + dim (int, optional): The dimension along which to take the one dimensional FFT. + norm (str, optional): Normalization mode. For the forward transform + (:func:`~torch.fft.fft`), these correspond to: + + * ``"forward"`` - normalize by ``1/n`` + * ``"backward"`` - no normalization + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal) + + Calling the backward transform (:func:`~torch.fft.ifft`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.ifft` + the exact inverse. + + Default is ``"backward"`` (no normalization). + +Keyword args: + {out} + +Example: + + >>> t = torch.arange(4) + >>> t + tensor([0, 1, 2, 3]) + >>> torch.fft.fft(t) + tensor([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j]) + + >>> t = torch.tensor([0.+1.j, 2.+3.j, 4.+5.j, 6.+7.j]) + >>> torch.fft.fft(t) + tensor([12.+16.j, -8.+0.j, -4.-4.j, 0.-8.j]) +""".format(**common_args)) + +ifft = _add_docstr(_fft.fft_ifft, r""" +ifft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor + +Computes the one dimensional inverse discrete Fourier transform of :attr:`input`. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimension. + +Args: + input (Tensor): the input tensor + n (int, optional): Signal length. If given, the input will either be zero-padded + or trimmed to this length before computing the IFFT. + dim (int, optional): The dimension along which to take the one dimensional IFFT. + norm (str, optional): Normalization mode. For the backward transform + (:func:`~torch.fft.ifft`), these correspond to: + + * ``"forward"`` - no normalization + * ``"backward"`` - normalize by ``1/n`` + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal) + + Calling the forward transform (:func:`~torch.fft.fft`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.ifft` + the exact inverse. + + Default is ``"backward"`` (normalize by ``1/n``). + +Keyword args: + {out} + +Example: + + >>> t = torch.tensor([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j]) + >>> torch.fft.ifft(t) + tensor([0.+0.j, 1.+0.j, 2.+0.j, 3.+0.j]) +""".format(**common_args)) + +fft2 = _add_docstr(_fft.fft_fft2, r""" +fft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor + +Computes the 2 dimensional discrete Fourier transform of :attr:`input`. +Equivalent to :func:`~torch.fft.fftn` but FFTs only the last two dimensions by default. + +Note: + The Fourier domain representation of any real signal satisfies the + Hermitian property: ``X[i, j] = conj(X[-i, -j])``. This + function always returns all positive and negative frequency terms even + though, for real inputs, half of these values are redundant. + :func:`~torch.fft.rfft2` returns the more compact one-sided representation + where only the positive frequencies of the last dimension are returned. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the FFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Default: ``s = [input.size(d) for d in dim]`` + dim (Tuple[int], optional): Dimensions to be transformed. + Default: last two dimensions. + norm (str, optional): Normalization mode. For the forward transform + (:func:`~torch.fft.fft2`), these correspond to: + + * ``"forward"`` - normalize by ``1/n`` + * ``"backward"`` - no normalization + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal) + + Where ``n = prod(s)`` is the logical FFT size. + Calling the backward transform (:func:`~torch.fft.ifft2`) with the same + normalization mode will apply an overall normalization of ``1/n`` + between the two transforms. This is required to make + :func:`~torch.fft.ifft2` the exact inverse. + + Default is ``"backward"`` (no normalization). + +Keyword args: + {out} + +Example: + + >>> x = torch.rand(10, 10, dtype=torch.complex64) + >>> fft2 = torch.fft.fft2(x) + + The discrete Fourier transform is separable, so :func:`~torch.fft.fft2` + here is equivalent to two one-dimensional :func:`~torch.fft.fft` calls: + + >>> two_ffts = torch.fft.fft(torch.fft.fft(x, dim=0), dim=1) + >>> torch.testing.assert_close(fft2, two_ffts, check_stride=False) + +""".format(**common_args)) + +ifft2 = _add_docstr(_fft.fft_ifft2, r""" +ifft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor + +Computes the 2 dimensional inverse discrete Fourier transform of :attr:`input`. +Equivalent to :func:`~torch.fft.ifftn` but IFFTs only the last two dimensions by default. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the IFFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Default: ``s = [input.size(d) for d in dim]`` + dim (Tuple[int], optional): Dimensions to be transformed. + Default: last two dimensions. + norm (str, optional): Normalization mode. For the backward transform + (:func:`~torch.fft.ifft2`), these correspond to: + + * ``"forward"`` - no normalization + * ``"backward"`` - normalize by ``1/n`` + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal) + + Where ``n = prod(s)`` is the logical IFFT size. + Calling the forward transform (:func:`~torch.fft.fft2`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.ifft2` + the exact inverse. + + Default is ``"backward"`` (normalize by ``1/n``). + +Keyword args: + {out} + +Example: + + >>> x = torch.rand(10, 10, dtype=torch.complex64) + >>> ifft2 = torch.fft.ifft2(x) + + The discrete Fourier transform is separable, so :func:`~torch.fft.ifft2` + here is equivalent to two one-dimensional :func:`~torch.fft.ifft` calls: + + >>> two_iffts = torch.fft.ifft(torch.fft.ifft(x, dim=0), dim=1) + >>> torch.testing.assert_close(ifft2, two_iffts, check_stride=False) + +""".format(**common_args)) + +fftn = _add_docstr(_fft.fft_fftn, r""" +fftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor + +Computes the N dimensional discrete Fourier transform of :attr:`input`. + +Note: + The Fourier domain representation of any real signal satisfies the + Hermitian property: ``X[i_1, ..., i_n] = conj(X[-i_1, ..., -i_n])``. This + function always returns all positive and negative frequency terms even + though, for real inputs, half of these values are redundant. + :func:`~torch.fft.rfftn` returns the more compact one-sided representation + where only the positive frequencies of the last dimension are returned. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the FFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Default: ``s = [input.size(d) for d in dim]`` + dim (Tuple[int], optional): Dimensions to be transformed. + Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given. + norm (str, optional): Normalization mode. For the forward transform + (:func:`~torch.fft.fftn`), these correspond to: + + * ``"forward"`` - normalize by ``1/n`` + * ``"backward"`` - no normalization + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal) + + Where ``n = prod(s)`` is the logical FFT size. + Calling the backward transform (:func:`~torch.fft.ifftn`) with the same + normalization mode will apply an overall normalization of ``1/n`` + between the two transforms. This is required to make + :func:`~torch.fft.ifftn` the exact inverse. + + Default is ``"backward"`` (no normalization). + +Keyword args: + {out} + +Example: + + >>> x = torch.rand(10, 10, dtype=torch.complex64) + >>> fftn = torch.fft.fftn(x) + + The discrete Fourier transform is separable, so :func:`~torch.fft.fftn` + here is equivalent to two one-dimensional :func:`~torch.fft.fft` calls: + + >>> two_ffts = torch.fft.fft(torch.fft.fft(x, dim=0), dim=1) + >>> torch.testing.assert_close(fftn, two_ffts, check_stride=False) + +""".format(**common_args)) + +ifftn = _add_docstr(_fft.fft_ifftn, r""" +ifftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor + +Computes the N dimensional inverse discrete Fourier transform of :attr:`input`. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the IFFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Default: ``s = [input.size(d) for d in dim]`` + dim (Tuple[int], optional): Dimensions to be transformed. + Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given. + norm (str, optional): Normalization mode. For the backward transform + (:func:`~torch.fft.ifftn`), these correspond to: + + * ``"forward"`` - no normalization + * ``"backward"`` - normalize by ``1/n`` + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal) + + Where ``n = prod(s)`` is the logical IFFT size. + Calling the forward transform (:func:`~torch.fft.fftn`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.ifftn` + the exact inverse. + + Default is ``"backward"`` (normalize by ``1/n``). + +Keyword args: + {out} + +Example: + + >>> x = torch.rand(10, 10, dtype=torch.complex64) + >>> ifftn = torch.fft.ifftn(x) + + The discrete Fourier transform is separable, so :func:`~torch.fft.ifftn` + here is equivalent to two one-dimensional :func:`~torch.fft.ifft` calls: + + >>> two_iffts = torch.fft.ifft(torch.fft.ifft(x, dim=0), dim=1) + >>> torch.testing.assert_close(ifftn, two_iffts, check_stride=False) + +""".format(**common_args)) + +rfft = _add_docstr(_fft.fft_rfft, r""" +rfft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor + +Computes the one dimensional Fourier transform of real-valued :attr:`input`. + +The FFT of a real signal is Hermitian-symmetric, ``X[i] = conj(X[-i])`` so +the output contains only the positive frequencies below the Nyquist frequency. +To compute the full output, use :func:`~torch.fft.fft` + +Note: + Supports torch.half on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimension. + +Args: + input (Tensor): the real input tensor + n (int, optional): Signal length. If given, the input will either be zero-padded + or trimmed to this length before computing the real FFT. + dim (int, optional): The dimension along which to take the one dimensional real FFT. + norm (str, optional): Normalization mode. For the forward transform + (:func:`~torch.fft.rfft`), these correspond to: + + * ``"forward"`` - normalize by ``1/n`` + * ``"backward"`` - no normalization + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the FFT orthonormal) + + Calling the backward transform (:func:`~torch.fft.irfft`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.irfft` + the exact inverse. + + Default is ``"backward"`` (no normalization). + +Keyword args: + {out} + +Example: + + >>> t = torch.arange(4) + >>> t + tensor([0, 1, 2, 3]) + >>> torch.fft.rfft(t) + tensor([ 6.+0.j, -2.+2.j, -2.+0.j]) + + Compare against the full output from :func:`~torch.fft.fft`: + + >>> torch.fft.fft(t) + tensor([ 6.+0.j, -2.+2.j, -2.+0.j, -2.-2.j]) + + Notice that the symmetric element ``T[-1] == T[1].conj()`` is omitted. + At the Nyquist frequency ``T[-2] == T[2]`` is it's own symmetric pair, + and therefore must always be real-valued. +""".format(**common_args)) + +irfft = _add_docstr(_fft.fft_irfft, r""" +irfft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor + +Computes the inverse of :func:`~torch.fft.rfft`. + +:attr:`input` is interpreted as a one-sided Hermitian signal in the Fourier +domain, as produced by :func:`~torch.fft.rfft`. By the Hermitian property, the +output will be real-valued. + +Note: + Some input frequencies must be real-valued to satisfy the Hermitian + property. In these cases the imaginary component will be ignored. + For example, any imaginary component in the zero-frequency term cannot + be represented in a real output and so will always be ignored. + +Note: + The correct interpretation of the Hermitian input depends on the length of + the original data, as given by :attr:`n`. This is because each input shape + could correspond to either an odd or even length signal. By default, the + signal is assumed to be even length and odd signals will not round-trip + properly. So, it is recommended to always pass the signal length :attr:`n`. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimension. + With default arguments, size of the transformed dimension should be (2^n + 1) as argument + `n` defaults to even output size = 2 * (transformed_dim_size - 1) + +Args: + input (Tensor): the input tensor representing a half-Hermitian signal + n (int, optional): Output signal length. This determines the length of the + output signal. If given, the input will either be zero-padded or trimmed to this + length before computing the real IFFT. + Defaults to even output: ``n=2*(input.size(dim) - 1)``. + dim (int, optional): The dimension along which to take the one dimensional real IFFT. + norm (str, optional): Normalization mode. For the backward transform + (:func:`~torch.fft.irfft`), these correspond to: + + * ``"forward"`` - no normalization + * ``"backward"`` - normalize by ``1/n`` + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real IFFT orthonormal) + + Calling the forward transform (:func:`~torch.fft.rfft`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.irfft` + the exact inverse. + + Default is ``"backward"`` (normalize by ``1/n``). + +Keyword args: + {out} + +Example: + + >>> t = torch.linspace(0, 1, 5) + >>> t + tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]) + >>> T = torch.fft.rfft(t) + >>> T + tensor([ 2.5000+0.0000j, -0.6250+0.8602j, -0.6250+0.2031j]) + + Without specifying the output length to :func:`~torch.fft.irfft`, the output + will not round-trip properly because the input is odd-length: + + >>> torch.fft.irfft(T) + tensor([0.1562, 0.3511, 0.7812, 1.2114]) + + So, it is recommended to always pass the signal length :attr:`n`: + + >>> roundtrip = torch.fft.irfft(T, t.numel()) + >>> torch.testing.assert_close(roundtrip, t, check_stride=False) + +""".format(**common_args)) + +rfft2 = _add_docstr(_fft.fft_rfft2, r""" +rfft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor + +Computes the 2-dimensional discrete Fourier transform of real :attr:`input`. +Equivalent to :func:`~torch.fft.rfftn` but FFTs only the last two dimensions by default. + +The FFT of a real signal is Hermitian-symmetric, ``X[i, j] = conj(X[-i, -j])``, +so the full :func:`~torch.fft.fft2` output contains redundant information. +:func:`~torch.fft.rfft2` instead omits the negative frequencies in the last +dimension. + +Note: + Supports torch.half on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the real FFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Default: ``s = [input.size(d) for d in dim]`` + dim (Tuple[int], optional): Dimensions to be transformed. + Default: last two dimensions. + norm (str, optional): Normalization mode. For the forward transform + (:func:`~torch.fft.rfft2`), these correspond to: + + * ``"forward"`` - normalize by ``1/n`` + * ``"backward"`` - no normalization + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real FFT orthonormal) + + Where ``n = prod(s)`` is the logical FFT size. + Calling the backward transform (:func:`~torch.fft.irfft2`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.irfft2` + the exact inverse. + + Default is ``"backward"`` (no normalization). + +Keyword args: + {out} + +Example: + + >>> t = torch.rand(10, 10) + >>> rfft2 = torch.fft.rfft2(t) + >>> rfft2.size() + torch.Size([10, 6]) + + Compared against the full output from :func:`~torch.fft.fft2`, we have all + elements up to the Nyquist frequency. + + >>> fft2 = torch.fft.fft2(t) + >>> torch.testing.assert_close(fft2[..., :6], rfft2, check_stride=False) + + The discrete Fourier transform is separable, so :func:`~torch.fft.rfft2` + here is equivalent to a combination of :func:`~torch.fft.fft` and + :func:`~torch.fft.rfft`: + + >>> two_ffts = torch.fft.fft(torch.fft.rfft(t, dim=1), dim=0) + >>> torch.testing.assert_close(rfft2, two_ffts, check_stride=False) + +""".format(**common_args)) + +irfft2 = _add_docstr(_fft.fft_irfft2, r""" +irfft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor + +Computes the inverse of :func:`~torch.fft.rfft2`. +Equivalent to :func:`~torch.fft.irfftn` but IFFTs only the last two dimensions by default. + +:attr:`input` is interpreted as a one-sided Hermitian signal in the Fourier +domain, as produced by :func:`~torch.fft.rfft2`. By the Hermitian property, the +output will be real-valued. + +Note: + Some input frequencies must be real-valued to satisfy the Hermitian + property. In these cases the imaginary component will be ignored. + For example, any imaginary component in the zero-frequency term cannot + be represented in a real output and so will always be ignored. + +Note: + The correct interpretation of the Hermitian input depends on the length of + the original data, as given by :attr:`s`. This is because each input shape + could correspond to either an odd or even length signal. By default, the + signal is assumed to be even length and odd signals will not round-trip + properly. So, it is recommended to always pass the signal shape :attr:`s`. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + With default arguments, the size of last dimension should be (2^n + 1) as argument + `s` defaults to even output size = 2 * (last_dim_size - 1) + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the real FFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Defaults to even output in the last dimension: + ``s[-1] = 2*(input.size(dim[-1]) - 1)``. + dim (Tuple[int], optional): Dimensions to be transformed. + The last dimension must be the half-Hermitian compressed dimension. + Default: last two dimensions. + norm (str, optional): Normalization mode. For the backward transform + (:func:`~torch.fft.irfft2`), these correspond to: + + * ``"forward"`` - no normalization + * ``"backward"`` - normalize by ``1/n`` + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real IFFT orthonormal) + + Where ``n = prod(s)`` is the logical IFFT size. + Calling the forward transform (:func:`~torch.fft.rfft2`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.irfft2` + the exact inverse. + + Default is ``"backward"`` (normalize by ``1/n``). + +Keyword args: + {out} + +Example: + + >>> t = torch.rand(10, 9) + >>> T = torch.fft.rfft2(t) + + Without specifying the output length to :func:`~torch.fft.irfft2`, the output + will not round-trip properly because the input is odd-length in the last + dimension: + + >>> torch.fft.irfft2(T).size() + torch.Size([10, 8]) + + So, it is recommended to always pass the signal shape :attr:`s`. + + >>> roundtrip = torch.fft.irfft2(T, t.size()) + >>> roundtrip.size() + torch.Size([10, 9]) + >>> torch.testing.assert_close(roundtrip, t, check_stride=False) + +""".format(**common_args)) + +rfftn = _add_docstr(_fft.fft_rfftn, r""" +rfftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor + +Computes the N-dimensional discrete Fourier transform of real :attr:`input`. + +The FFT of a real signal is Hermitian-symmetric, +``X[i_1, ..., i_n] = conj(X[-i_1, ..., -i_n])`` so the full +:func:`~torch.fft.fftn` output contains redundant information. +:func:`~torch.fft.rfftn` instead omits the negative frequencies in the +last dimension. + +Note: + Supports torch.half on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the real FFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Default: ``s = [input.size(d) for d in dim]`` + dim (Tuple[int], optional): Dimensions to be transformed. + Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given. + norm (str, optional): Normalization mode. For the forward transform + (:func:`~torch.fft.rfftn`), these correspond to: + + * ``"forward"`` - normalize by ``1/n`` + * ``"backward"`` - no normalization + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real FFT orthonormal) + + Where ``n = prod(s)`` is the logical FFT size. + Calling the backward transform (:func:`~torch.fft.irfftn`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.irfftn` + the exact inverse. + + Default is ``"backward"`` (no normalization). + +Keyword args: + {out} + +Example: + + >>> t = torch.rand(10, 10) + >>> rfftn = torch.fft.rfftn(t) + >>> rfftn.size() + torch.Size([10, 6]) + + Compared against the full output from :func:`~torch.fft.fftn`, we have all + elements up to the Nyquist frequency. + + >>> fftn = torch.fft.fftn(t) + >>> torch.testing.assert_close(fftn[..., :6], rfftn, check_stride=False) + + The discrete Fourier transform is separable, so :func:`~torch.fft.rfftn` + here is equivalent to a combination of :func:`~torch.fft.fft` and + :func:`~torch.fft.rfft`: + + >>> two_ffts = torch.fft.fft(torch.fft.rfft(t, dim=1), dim=0) + >>> torch.testing.assert_close(rfftn, two_ffts, check_stride=False) + +""".format(**common_args)) + +irfftn = _add_docstr(_fft.fft_irfftn, r""" +irfftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor + +Computes the inverse of :func:`~torch.fft.rfftn`. + +:attr:`input` is interpreted as a one-sided Hermitian signal in the Fourier +domain, as produced by :func:`~torch.fft.rfftn`. By the Hermitian property, the +output will be real-valued. + +Note: + Some input frequencies must be real-valued to satisfy the Hermitian + property. In these cases the imaginary component will be ignored. + For example, any imaginary component in the zero-frequency term cannot + be represented in a real output and so will always be ignored. + +Note: + The correct interpretation of the Hermitian input depends on the length of + the original data, as given by :attr:`s`. This is because each input shape + could correspond to either an odd or even length signal. By default, the + signal is assumed to be even length and odd signals will not round-trip + properly. So, it is recommended to always pass the signal shape :attr:`s`. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + With default arguments, the size of last dimension should be (2^n + 1) as argument + `s` defaults to even output size = 2 * (last_dim_size - 1) + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the real FFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Defaults to even output in the last dimension: + ``s[-1] = 2*(input.size(dim[-1]) - 1)``. + dim (Tuple[int], optional): Dimensions to be transformed. + The last dimension must be the half-Hermitian compressed dimension. + Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given. + norm (str, optional): Normalization mode. For the backward transform + (:func:`~torch.fft.irfftn`), these correspond to: + + * ``"forward"`` - no normalization + * ``"backward"`` - normalize by ``1/n`` + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the real IFFT orthonormal) + + Where ``n = prod(s)`` is the logical IFFT size. + Calling the forward transform (:func:`~torch.fft.rfftn`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.irfftn` + the exact inverse. + + Default is ``"backward"`` (normalize by ``1/n``). + +Keyword args: + {out} + +Example: + + >>> t = torch.rand(10, 9) + >>> T = torch.fft.rfftn(t) + + Without specifying the output length to :func:`~torch.fft.irfft`, the output + will not round-trip properly because the input is odd-length in the last + dimension: + + >>> torch.fft.irfftn(T).size() + torch.Size([10, 8]) + + So, it is recommended to always pass the signal shape :attr:`s`. + + >>> roundtrip = torch.fft.irfftn(T, t.size()) + >>> roundtrip.size() + torch.Size([10, 9]) + >>> torch.testing.assert_close(roundtrip, t, check_stride=False) + +""".format(**common_args)) + +hfft = _add_docstr(_fft.fft_hfft, r""" +hfft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor + +Computes the one dimensional discrete Fourier transform of a Hermitian +symmetric :attr:`input` signal. + +Note: + + :func:`~torch.fft.hfft`/:func:`~torch.fft.ihfft` are analogous to + :func:`~torch.fft.rfft`/:func:`~torch.fft.irfft`. The real FFT expects + a real signal in the time-domain and gives a Hermitian symmetry in the + frequency-domain. The Hermitian FFT is the opposite; Hermitian symmetric in + the time-domain and real-valued in the frequency-domain. For this reason, + special care needs to be taken with the length argument :attr:`n`, in the + same way as with :func:`~torch.fft.irfft`. + +Note: + Because the signal is Hermitian in the time-domain, the result will be + real in the frequency domain. Note that some input frequencies must be + real-valued to satisfy the Hermitian property. In these cases the imaginary + component will be ignored. For example, any imaginary component in + ``input[0]`` would result in one or more complex frequency terms which + cannot be represented in a real output and so will always be ignored. + +Note: + The correct interpretation of the Hermitian input depends on the length of + the original data, as given by :attr:`n`. This is because each input shape + could correspond to either an odd or even length signal. By default, the + signal is assumed to be even length and odd signals will not round-trip + properly. So, it is recommended to always pass the signal length :attr:`n`. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimension. + With default arguments, size of the transformed dimension should be (2^n + 1) as argument + `n` defaults to even output size = 2 * (transformed_dim_size - 1) + +Args: + input (Tensor): the input tensor representing a half-Hermitian signal + n (int, optional): Output signal length. This determines the length of the + real output. If given, the input will either be zero-padded or trimmed to this + length before computing the Hermitian FFT. + Defaults to even output: ``n=2*(input.size(dim) - 1)``. + dim (int, optional): The dimension along which to take the one dimensional Hermitian FFT. + norm (str, optional): Normalization mode. For the forward transform + (:func:`~torch.fft.hfft`), these correspond to: + + * ``"forward"`` - normalize by ``1/n`` + * ``"backward"`` - no normalization + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian FFT orthonormal) + + Calling the backward transform (:func:`~torch.fft.ihfft`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.ihfft` + the exact inverse. + + Default is ``"backward"`` (no normalization). + +Keyword args: + {out} + +Example: + + Taking a real-valued frequency signal and bringing it into the time domain + gives Hermitian symmetric output: + + >>> t = torch.linspace(0, 1, 5) + >>> t + tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]) + >>> T = torch.fft.ifft(t) + >>> T + tensor([ 0.5000-0.0000j, -0.1250-0.1720j, -0.1250-0.0406j, -0.1250+0.0406j, + -0.1250+0.1720j]) + + Note that ``T[1] == T[-1].conj()`` and ``T[2] == T[-2].conj()`` is + redundant. We can thus compute the forward transform without considering + negative frequencies: + + >>> torch.fft.hfft(T[:3], n=5) + tensor([0.0000, 0.2500, 0.5000, 0.7500, 1.0000]) + + Like with :func:`~torch.fft.irfft`, the output length must be given in order + to recover an even length output: + + >>> torch.fft.hfft(T[:3]) + tensor([0.1250, 0.2809, 0.6250, 0.9691]) +""".format(**common_args)) + +ihfft = _add_docstr(_fft.fft_ihfft, r""" +ihfft(input, n=None, dim=-1, norm=None, *, out=None) -> Tensor + +Computes the inverse of :func:`~torch.fft.hfft`. + +:attr:`input` must be a real-valued signal, interpreted in the Fourier domain. +The IFFT of a real signal is Hermitian-symmetric, ``X[i] = conj(X[-i])``. +:func:`~torch.fft.ihfft` represents this in the one-sided form where only the +positive frequencies below the Nyquist frequency are included. To compute the +full output, use :func:`~torch.fft.ifft`. + +Note: + Supports torch.half on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimension. + +Args: + input (Tensor): the real input tensor + n (int, optional): Signal length. If given, the input will either be zero-padded + or trimmed to this length before computing the Hermitian IFFT. + dim (int, optional): The dimension along which to take the one dimensional Hermitian IFFT. + norm (str, optional): Normalization mode. For the backward transform + (:func:`~torch.fft.ihfft`), these correspond to: + + * ``"forward"`` - no normalization + * ``"backward"`` - normalize by ``1/n`` + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the IFFT orthonormal) + + Calling the forward transform (:func:`~torch.fft.hfft`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.ihfft` + the exact inverse. + + Default is ``"backward"`` (normalize by ``1/n``). + +Keyword args: + {out} + +Example: + + >>> t = torch.arange(5) + >>> t + tensor([0, 1, 2, 3, 4]) + >>> torch.fft.ihfft(t) + tensor([ 2.0000-0.0000j, -0.5000-0.6882j, -0.5000-0.1625j]) + + Compare against the full output from :func:`~torch.fft.ifft`: + + >>> torch.fft.ifft(t) + tensor([ 2.0000-0.0000j, -0.5000-0.6882j, -0.5000-0.1625j, -0.5000+0.1625j, + -0.5000+0.6882j]) +""".format(**common_args)) + +hfft2 = _add_docstr(_fft.fft_hfft2, r""" +hfft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor + +Computes the 2-dimensional discrete Fourier transform of a Hermitian symmetric +:attr:`input` signal. Equivalent to :func:`~torch.fft.hfftn` but only +transforms the last two dimensions by default. + +:attr:`input` is interpreted as a one-sided Hermitian signal in the time +domain. By the Hermitian property, the Fourier transform will be real-valued. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + With default arguments, the size of last dimension should be (2^n + 1) as argument + `s` defaults to even output size = 2 * (last_dim_size - 1) + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the Hermitian FFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Defaults to even output in the last dimension: + ``s[-1] = 2*(input.size(dim[-1]) - 1)``. + dim (Tuple[int], optional): Dimensions to be transformed. + The last dimension must be the half-Hermitian compressed dimension. + Default: last two dimensions. + norm (str, optional): Normalization mode. For the forward transform + (:func:`~torch.fft.hfft2`), these correspond to: + + * ``"forward"`` - normalize by ``1/n`` + * ``"backward"`` - no normalization + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian FFT orthonormal) + + Where ``n = prod(s)`` is the logical FFT size. + Calling the backward transform (:func:`~torch.fft.ihfft2`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.ihfft2` + the exact inverse. + + Default is ``"backward"`` (no normalization). + +Keyword args: + {out} + +Example: + + Starting from a real frequency-space signal, we can generate a + Hermitian-symmetric time-domain signal: + >>> T = torch.rand(10, 9) + >>> t = torch.fft.ihfft2(T) + + Without specifying the output length to :func:`~torch.fft.hfftn`, the + output will not round-trip properly because the input is odd-length in the + last dimension: + + >>> torch.fft.hfft2(t).size() + torch.Size([10, 10]) + + So, it is recommended to always pass the signal shape :attr:`s`. + + >>> roundtrip = torch.fft.hfft2(t, T.size()) + >>> roundtrip.size() + torch.Size([10, 9]) + >>> torch.allclose(roundtrip, T) + True + +""".format(**common_args)) + +ihfft2 = _add_docstr(_fft.fft_ihfft2, r""" +ihfft2(input, s=None, dim=(-2, -1), norm=None, *, out=None) -> Tensor + +Computes the 2-dimensional inverse discrete Fourier transform of real +:attr:`input`. Equivalent to :func:`~torch.fft.ihfftn` but transforms only the +two last dimensions by default. + +Note: + Supports torch.half on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the Hermitian IFFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Default: ``s = [input.size(d) for d in dim]`` + dim (Tuple[int], optional): Dimensions to be transformed. + Default: last two dimensions. + norm (str, optional): Normalization mode. For the backward transform + (:func:`~torch.fft.ihfft2`), these correspond to: + + * ``"forward"`` - no normalization + * ``"backward"`` - normalize by ``1/n`` + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian IFFT orthonormal) + + Where ``n = prod(s)`` is the logical IFFT size. + Calling the forward transform (:func:`~torch.fft.hfft2`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.ihfft2` + the exact inverse. + + Default is ``"backward"`` (normalize by ``1/n``). + +Keyword args: + {out} + +Example: + + >>> T = torch.rand(10, 10) + >>> t = torch.fft.ihfft2(t) + >>> t.size() + torch.Size([10, 6]) + + Compared against the full output from :func:`~torch.fft.ifft2`, the + Hermitian time-space signal takes up only half the space. + + >>> fftn = torch.fft.ifft2(t) + >>> torch.allclose(fftn[..., :6], rfftn) + True + + The discrete Fourier transform is separable, so :func:`~torch.fft.ihfft2` + here is equivalent to a combination of :func:`~torch.fft.ifft` and + :func:`~torch.fft.ihfft`: + + >>> two_ffts = torch.fft.ifft(torch.fft.ihfft(t, dim=1), dim=0) + >>> torch.allclose(t, two_ffts) + True + +""".format(**common_args)) + +hfftn = _add_docstr(_fft.fft_hfftn, r""" +hfftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor + +Computes the n-dimensional discrete Fourier transform of a Hermitian symmetric +:attr:`input` signal. + +:attr:`input` is interpreted as a one-sided Hermitian signal in the time +domain. By the Hermitian property, the Fourier transform will be real-valued. + +Note: + :func:`~torch.fft.hfftn`/:func:`~torch.fft.ihfftn` are analogous to + :func:`~torch.fft.rfftn`/:func:`~torch.fft.irfftn`. The real FFT expects + a real signal in the time-domain and gives Hermitian symmetry in the + frequency-domain. The Hermitian FFT is the opposite; Hermitian symmetric in + the time-domain and real-valued in the frequency-domain. For this reason, + special care needs to be taken with the shape argument :attr:`s`, in the + same way as with :func:`~torch.fft.irfftn`. + +Note: + Some input frequencies must be real-valued to satisfy the Hermitian + property. In these cases the imaginary component will be ignored. + For example, any imaginary component in the zero-frequency term cannot + be represented in a real output and so will always be ignored. + +Note: + The correct interpretation of the Hermitian input depends on the length of + the original data, as given by :attr:`s`. This is because each input shape + could correspond to either an odd or even length signal. By default, the + signal is assumed to be even length and odd signals will not round-trip + properly. It is recommended to always pass the signal shape :attr:`s`. + +Note: + Supports torch.half and torch.chalf on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + With default arguments, the size of last dimension should be (2^n + 1) as argument + `s` defaults to even output size = 2 * (last_dim_size - 1) + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the real FFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Defaults to even output in the last dimension: + ``s[-1] = 2*(input.size(dim[-1]) - 1)``. + dim (Tuple[int], optional): Dimensions to be transformed. + The last dimension must be the half-Hermitian compressed dimension. + Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given. + norm (str, optional): Normalization mode. For the forward transform + (:func:`~torch.fft.hfftn`), these correspond to: + + * ``"forward"`` - normalize by ``1/n`` + * ``"backward"`` - no normalization + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian FFT orthonormal) + + Where ``n = prod(s)`` is the logical FFT size. + Calling the backward transform (:func:`~torch.fft.ihfftn`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.ihfftn` + the exact inverse. + + Default is ``"backward"`` (no normalization). + +Keyword args: + {out} + +Example: + + Starting from a real frequency-space signal, we can generate a + Hermitian-symmetric time-domain signal: + >>> T = torch.rand(10, 9) + >>> t = torch.fft.ihfftn(T) + + Without specifying the output length to :func:`~torch.fft.hfftn`, the + output will not round-trip properly because the input is odd-length in the + last dimension: + + >>> torch.fft.hfftn(t).size() + torch.Size([10, 10]) + + So, it is recommended to always pass the signal shape :attr:`s`. + + >>> roundtrip = torch.fft.hfftn(t, T.size()) + >>> roundtrip.size() + torch.Size([10, 9]) + >>> torch.allclose(roundtrip, T) + True + +""".format(**common_args)) + +ihfftn = _add_docstr(_fft.fft_ihfftn, r""" +ihfftn(input, s=None, dim=None, norm=None, *, out=None) -> Tensor + +Computes the N-dimensional inverse discrete Fourier transform of real :attr:`input`. + +:attr:`input` must be a real-valued signal, interpreted in the Fourier domain. +The n-dimensional IFFT of a real signal is Hermitian-symmetric, +``X[i, j, ...] = conj(X[-i, -j, ...])``. :func:`~torch.fft.ihfftn` represents +this in the one-sided form where only the positive frequencies below the +Nyquist frequency are included in the last signal dimension. To compute the +full output, use :func:`~torch.fft.ifftn`. + +Note: + Supports torch.half on CUDA with GPU Architecture SM53 or greater. + However it only supports powers of 2 signal length in every transformed dimensions. + +Args: + input (Tensor): the input tensor + s (Tuple[int], optional): Signal size in the transformed dimensions. + If given, each dimension ``dim[i]`` will either be zero-padded or + trimmed to the length ``s[i]`` before computing the Hermitian IFFT. + If a length ``-1`` is specified, no padding is done in that dimension. + Default: ``s = [input.size(d) for d in dim]`` + dim (Tuple[int], optional): Dimensions to be transformed. + Default: all dimensions, or the last ``len(s)`` dimensions if :attr:`s` is given. + norm (str, optional): Normalization mode. For the backward transform + (:func:`~torch.fft.ihfftn`), these correspond to: + + * ``"forward"`` - no normalization + * ``"backward"`` - normalize by ``1/n`` + * ``"ortho"`` - normalize by ``1/sqrt(n)`` (making the Hermitian IFFT orthonormal) + + Where ``n = prod(s)`` is the logical IFFT size. + Calling the forward transform (:func:`~torch.fft.hfftn`) with the same + normalization mode will apply an overall normalization of ``1/n`` between + the two transforms. This is required to make :func:`~torch.fft.ihfftn` + the exact inverse. + + Default is ``"backward"`` (normalize by ``1/n``). + +Keyword args: + {out} + +Example: + + >>> T = torch.rand(10, 10) + >>> ihfftn = torch.fft.ihfftn(T) + >>> ihfftn.size() + torch.Size([10, 6]) + + Compared against the full output from :func:`~torch.fft.ifftn`, we have all + elements up to the Nyquist frequency. + + >>> ifftn = torch.fft.ifftn(t) + >>> torch.allclose(ifftn[..., :6], ihfftn) + True + + The discrete Fourier transform is separable, so :func:`~torch.fft.ihfftn` + here is equivalent to a combination of :func:`~torch.fft.ihfft` and + :func:`~torch.fft.ifft`: + + >>> two_iffts = torch.fft.ifft(torch.fft.ihfft(t, dim=1), dim=0) + >>> torch.allclose(ihfftn, two_iffts) + True + +""".format(**common_args)) + +fftfreq = _add_docstr(_fft.fft_fftfreq, r""" +fftfreq(n, d=1.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + +Computes the discrete Fourier Transform sample frequencies for a signal of size :attr:`n`. + +Note: + By convention, :func:`~torch.fft.fft` returns positive frequency terms + first, followed by the negative frequencies in reverse order, so that + ``f[-i]`` for all :math:`0 < i \leq n/2`` in Python gives the negative + frequency terms. For an FFT of length :attr:`n` and with inputs spaced in + length unit :attr:`d`, the frequencies are:: + + f = [0, 1, ..., (n - 1) // 2, -(n // 2), ..., -1] / (d * n) + +Note: + For even lengths, the Nyquist frequency at ``f[n/2]`` can be thought of as + either negative or positive. :func:`~torch.fft.fftfreq` follows NumPy's + convention of taking it to be negative. + +Args: + n (int): the FFT length + d (float, optional): The sampling length scale. + The spacing between individual samples of the FFT input. + The default assumes unit spacing, dividing that result by the actual + spacing gives the result in physical frequency units. + +Keyword Args: + {out} + {dtype} + {layout} + {device} + {requires_grad} + +Example: + + >>> torch.fft.fftfreq(5) + tensor([ 0.0000, 0.2000, 0.4000, -0.4000, -0.2000]) + + For even input, we can see the Nyquist frequency at ``f[2]`` is given as + negative: + + >>> torch.fft.fftfreq(4) + tensor([ 0.0000, 0.2500, -0.5000, -0.2500]) + +""".format(**factory_common_args)) + +rfftfreq = _add_docstr(_fft.fft_rfftfreq, r""" +rfftfreq(n, d=1.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + +Computes the sample frequencies for :func:`~torch.fft.rfft` with a signal of size :attr:`n`. + +Note: + :func:`~torch.fft.rfft` returns Hermitian one-sided output, so only the + positive frequency terms are returned. For a real FFT of length :attr:`n` + and with inputs spaced in length unit :attr:`d`, the frequencies are:: + + f = torch.arange((n + 1) // 2) / (d * n) + +Note: + For even lengths, the Nyquist frequency at ``f[n/2]`` can be thought of as + either negative or positive. Unlike :func:`~torch.fft.fftfreq`, + :func:`~torch.fft.rfftfreq` always returns it as positive. + +Args: + n (int): the real FFT length + d (float, optional): The sampling length scale. + The spacing between individual samples of the FFT input. + The default assumes unit spacing, dividing that result by the actual + spacing gives the result in physical frequency units. + +Keyword Args: + {out} + {dtype} + {layout} + {device} + {requires_grad} + +Example: + + >>> torch.fft.rfftfreq(5) + tensor([0.0000, 0.2000, 0.4000]) + + >>> torch.fft.rfftfreq(4) + tensor([0.0000, 0.2500, 0.5000]) + + Compared to the output from :func:`~torch.fft.fftfreq`, we see that the + Nyquist frequency at ``f[2]`` has changed sign: + >>> torch.fft.fftfreq(4) + tensor([ 0.0000, 0.2500, -0.5000, -0.2500]) + +""".format(**factory_common_args)) + +fftshift = _add_docstr(_fft.fft_fftshift, r""" +fftshift(input, dim=None) -> Tensor + +Reorders n-dimensional FFT data, as provided by :func:`~torch.fft.fftn`, to have +negative frequency terms first. + +This performs a periodic shift of n-dimensional data such that the origin +``(0, ..., 0)`` is moved to the center of the tensor. Specifically, to +``input.shape[dim] // 2`` in each selected dimension. + +Note: + By convention, the FFT returns positive frequency terms first, followed by + the negative frequencies in reverse order, so that ``f[-i]`` for all + :math:`0 < i \leq n/2` in Python gives the negative frequency terms. + :func:`~torch.fft.fftshift` rearranges all frequencies into ascending order + from negative to positive with the zero-frequency term in the center. + +Note: + For even lengths, the Nyquist frequency at ``f[n/2]`` can be thought of as + either negative or positive. :func:`~torch.fft.fftshift` always puts the + Nyquist term at the 0-index. This is the same convention used by + :func:`~torch.fft.fftfreq`. + +Args: + input (Tensor): the tensor in FFT order + dim (int, Tuple[int], optional): The dimensions to rearrange. + Only dimensions specified here will be rearranged, any other dimensions + will be left in their original order. + Default: All dimensions of :attr:`input`. + +Example: + + >>> f = torch.fft.fftfreq(4) + >>> f + tensor([ 0.0000, 0.2500, -0.5000, -0.2500]) + + >>> torch.fft.fftshift(f) + tensor([-0.5000, -0.2500, 0.0000, 0.2500]) + + Also notice that the Nyquist frequency term at ``f[2]`` was moved to the + beginning of the tensor. + + This also works for multi-dimensional transforms: + + >>> x = torch.fft.fftfreq(5, d=1/5) + 0.1 * torch.fft.fftfreq(5, d=1/5).unsqueeze(1) + >>> x + tensor([[ 0.0000, 1.0000, 2.0000, -2.0000, -1.0000], + [ 0.1000, 1.1000, 2.1000, -1.9000, -0.9000], + [ 0.2000, 1.2000, 2.2000, -1.8000, -0.8000], + [-0.2000, 0.8000, 1.8000, -2.2000, -1.2000], + [-0.1000, 0.9000, 1.9000, -2.1000, -1.1000]]) + + >>> torch.fft.fftshift(x) + tensor([[-2.2000, -1.2000, -0.2000, 0.8000, 1.8000], + [-2.1000, -1.1000, -0.1000, 0.9000, 1.9000], + [-2.0000, -1.0000, 0.0000, 1.0000, 2.0000], + [-1.9000, -0.9000, 0.1000, 1.1000, 2.1000], + [-1.8000, -0.8000, 0.2000, 1.2000, 2.2000]]) + + :func:`~torch.fft.fftshift` can also be useful for spatial data. If our + data is defined on a centered grid (``[-(N//2), (N-1)//2]``) then we can + use the standard FFT defined on an uncentered grid (``[0, N)``) by first + applying an :func:`~torch.fft.ifftshift`. + + >>> x_centered = torch.arange(-5, 5) + >>> x_uncentered = torch.fft.ifftshift(x_centered) + >>> fft_uncentered = torch.fft.fft(x_uncentered) + + Similarly, we can convert the frequency domain components to centered + convention by applying :func:`~torch.fft.fftshift`. + + >>> fft_centered = torch.fft.fftshift(fft_uncentered) + + The inverse transform, from centered Fourier space back to centered spatial + data, can be performed by applying the inverse shifts in reverse order: + + >>> x_centered_2 = torch.fft.fftshift(torch.fft.ifft(torch.fft.ifftshift(fft_centered))) + >>> torch.testing.assert_close(x_centered.to(torch.complex64), x_centered_2, check_stride=False) + + +""") + +ifftshift = _add_docstr(_fft.fft_ifftshift, r""" +ifftshift(input, dim=None) -> Tensor + +Inverse of :func:`~torch.fft.fftshift`. + +Args: + input (Tensor): the tensor in FFT order + dim (int, Tuple[int], optional): The dimensions to rearrange. + Only dimensions specified here will be rearranged, any other dimensions + will be left in their original order. + Default: All dimensions of :attr:`input`. + +Example: + + >>> f = torch.fft.fftfreq(5) + >>> f + tensor([ 0.0000, 0.2000, 0.4000, -0.4000, -0.2000]) + + A round-trip through :func:`~torch.fft.fftshift` and + :func:`~torch.fft.ifftshift` gives the same result: + + >>> shifted = torch.fft.fftshift(f) + >>> torch.fft.ifftshift(shifted) + tensor([ 0.0000, 0.2000, 0.4000, -0.4000, -0.2000]) + +""") diff --git a/llmeval-env/lib/python3.10/site-packages/torch/fft/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/fft/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3f099684a2ac7eccf7ca2a05f1b744dd42fab0f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/fft/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/futures/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/futures/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6a398bebb599cb4b005ae76ea318e752dcd99a52 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/futures/__init__.py @@ -0,0 +1,318 @@ +from __future__ import annotations + +from typing import cast, Callable, Generic, List, Optional, Type, TypeVar, Union + +import torch + +__all__ = ['Future', 'collect_all', 'wait_all'] + +T = TypeVar("T") +S = TypeVar("S") + + +class _PyFutureMeta(type(torch._C.Future), type(Generic)): # type: ignore[misc, no-redef] + pass + + +class Future(torch._C.Future, Generic[T], metaclass=_PyFutureMeta): + r""" + Wrapper around a ``torch._C.Future`` which encapsulates an asynchronous + execution of a callable, e.g. :meth:`~torch.distributed.rpc.rpc_async`. It + also exposes a set of APIs to add callback functions and set results. + + .. warning:: GPU support is a beta feature, subject to changes. + """ + + def __init__(self, *, devices: Optional[List[Union[int, str, torch.device]]] = None): + r""" + Create an empty unset ``Future``. If the future is intended to hold + values containing CUDA tensors, (a superset of) their CUDA devices must + be specified at construction. (This is only supported if + ``torch.cuda.is_available()`` returns ``True``). This is needed to + ensure proper CUDA stream synchronization. The child futures, returned + by the ``then`` method, will inherit these devices. + + Args: + devices(``List[Union[int, str, torch.device]]``, optional): the set + of devices on which tensors contained in this future's value are + allowed to reside and on which callbacks are allowed to operate. + """ + if devices is None: + devices = [] + super().__init__([torch.device(d) for d in devices]) + + def done(self) -> bool: + r""" + Return ``True`` if this ``Future`` is done. A ``Future`` is done if it + has a result or an exception. + + If the value contains tensors that reside on GPUs, ``Future.done()`` + will return ``True`` even if the asynchronous kernels that are + populating those tensors haven't yet completed running on the device, + because at such stage the result is already usable, provided one + performs the appropriate synchronizations (see :meth:`wait`). + """ + return super().done() + + def wait(self) -> T: + r""" + Block until the value of this ``Future`` is ready. + + If the value contains tensors that reside on GPUs, then an additional + synchronization is performed with the kernels (executing on the device) + which may be asynchronously populating those tensors. Such sync is + non-blocking, which means that ``wait()`` will insert the necessary + instructions in the current streams to ensure that further operations + enqueued on those streams will be properly scheduled after the async + kernels but, once that is done, ``wait()`` will return, even if those + kernels are still running. No further synchronization is required when + accessing and using the values, as long as one doesn't change streams. + + Returns: + The value held by this ``Future``. If the function (callback or RPC) + creating the value has thrown an error, this ``wait`` method will + also throw an error. + """ + return super().wait() + + def value(self) -> T: + r""" + Obtain the value of an already-completed future. + + This method should only be called after a call to :meth:`wait` has + completed, or inside a callback function passed to :meth:`then`. In + other cases this ``Future`` may not yet hold a value and calling + ``value()`` could fail. + + If the value contains tensors that reside on GPUs, then this method will + *not* perform any additional synchronization. This should be done + beforehand, separately, through a call to :meth:`wait` (except within + callbacks, for which it's already being taken care of by :meth:`then`). + + Returns: + The value held by this ``Future``. If the function (callback or RPC) + creating the value has thrown an error, this ``value()`` method will + also throw an error. + """ + return super().value() + + def then(self, callback: Callable[[Future[T]], S]) -> Future[S]: + r""" + Append the given callback function to this ``Future``, which will be run + when the ``Future`` is completed. Multiple callbacks can be added to + the same ``Future``, but the order in which they will be executed cannot + be guaranteed (to enforce a certain order consider chaining: + ``fut.then(cb1).then(cb2)``). The callback must take one argument, which + is the reference to this ``Future``. The callback function can use the + :meth:`value` method to get the value. Note that if this ``Future`` is + already completed, the given callback will be run immediately inline. + + If the ``Future``'s value contains tensors that reside on GPUs, the + callback might be invoked while the async kernels that are populating + those tensors haven't yet finished executing on the device. However, the + callback will be invoked with some dedicated streams set as current + (fetched from a global pool) which will be synchronized with those + kernels. Hence any operation performed by the callback on these tensors + will be scheduled on the device after the kernels complete. In other + words, as long as the callback doesn't switch streams, it can safely + manipulate the result without any additional synchronization. This is + similar to the non-blocking behavior of :meth:`wait`. + + Similarly, if the callback returns a value that contains tensors that + reside on a GPU, it can do so even if the kernels that are producing + these tensors are still running on the device, as long as the callback + didn't change streams during its execution. If one wants to change + streams, one must be careful to re-synchronize them with the original + streams, that is, those that were current when the callback was invoked. + + Args: + callback(``Callable``): a ``Callable`` that takes this ``Future`` as + the only argument. + + Returns: + A new ``Future`` object that holds the return value of the + ``callback`` and will be marked as completed when the given + ``callback`` finishes. + + .. note:: Note that if the callback function throws, either + through the original future being completed with an exception and + calling ``fut.wait()``, or through other code in the callback, the + future returned by ``then`` will be marked appropriately with the + encountered error. However, if this callback later completes + additional futures, those futures are not marked as completed with + an error and the user is responsible for handling completion/waiting + on those futures independently. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) + >>> def callback(fut): + ... print(f"RPC return value is {fut.wait()}.") + >>> fut = torch.futures.Future() + >>> # The inserted callback will print the return value when + >>> # receiving the response from "worker1" + >>> cb_fut = fut.then(callback) + >>> chain_cb_fut = cb_fut.then( + ... lambda x : print(f"Chained cb done. {x.wait()}") + ... ) + >>> fut.set_result(5) + RPC return value is 5. + Chained cb done. None + """ + return cast(Future[S], super().then(callback)) + + def add_done_callback(self, callback: Callable[[Future[T]], None]) -> None: + r""" + Append the given callback function to this ``Future``, which will be run + when the ``Future`` is completed. Multiple callbacks can be added to + the same ``Future``, but the order in which they will be executed cannot + be guaranteed. The callback must take one argument, which is the + reference to this ``Future``. The callback function can use the + :meth:`value` method to get the value. Note that if this ``Future`` is + already completed, the given callback will be run inline. + + We recommend that you use the :meth:`then` method as it provides a way + to synchronize after your callback has completed. ``add_done_callback`` + can be cheaper if your callback does not return anything. But both + :meth:`then` and ``add_done_callback`` use the same callback + registration API under the hood. + + With respect to GPU tensors, this method behaves in the same way as + :meth:`then`. + + Args: + callback(``Future``): a ``Callable`` that takes in one argument, + which is the reference to this ``Future``. + + .. note:: Note that if the callback function throws, either + through the original future being completed with an exception and + calling ``fut.wait()``, or through other code in the callback, + error handling must be carefully taken care of. For example, if + this callback later completes additional futures, those futures are + not marked as completed with an error and the user is responsible + for handling completion/waiting on those futures independently. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) + >>> def callback(fut): + ... print("This will run after the future has finished.") + ... print(fut.wait()) + >>> fut = torch.futures.Future() + >>> fut.add_done_callback(callback) + >>> fut.set_result(5) + This will run after the future has finished. + 5 + """ + super().add_done_callback(callback) + + def set_result(self, result: T) -> None: + r""" + Set the result for this ``Future``, which will mark this ``Future`` as + completed and trigger all attached callbacks. Note that a ``Future`` + cannot be marked completed twice. + + If the result contains tensors that reside on GPUs, this method can be + called even if the asynchronous kernels that are populating those + tensors haven't yet completed running on the device, provided that the + streams on which those kernels were enqueued are set as the current ones + when this method is called. Put simply, it's safe to call this method + immediately after launching those kernels, without any additional + synchronization, as long as one doesn't change streams in between. This + method will record events on all the relevant current streams and will + use them to ensure proper scheduling for all the consumers of this + ``Future``. + + Args: + result (object): the result object of this ``Future``. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) + >>> import threading + >>> import time + >>> def slow_set_future(fut, value): + ... time.sleep(0.5) + ... fut.set_result(value) + >>> fut = torch.futures.Future() + >>> t = threading.Thread( + ... target=slow_set_future, + ... args=(fut, torch.ones(2) * 3) + ... ) + >>> t.start() + >>> print(fut.wait()) + tensor([3., 3.]) + >>> t.join() + """ + super().set_result(result) + + def set_exception(self, result: T) -> None: + r""" + Set an exception for this ``Future``, which will mark this ``Future`` as + completed with an error and trigger all attached callbacks. Note that + when calling wait()/value() on this ``Future``, the exception set here + will be raised inline. + + Args: + result (BaseException): the exception for this ``Future``. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) + >>> fut = torch.futures.Future() + >>> fut.set_exception(ValueError("foo")) + >>> fut.wait() + Traceback (most recent call last): + ... + ValueError: foo + """ + assert isinstance(result, Exception), f"{result} is of type {type(result)}, not an Exception." + + def raise_error(fut_result): + raise fut_result + + super()._set_unwrap_func(raise_error) + self.set_result(result) # type: ignore[arg-type] + + +def collect_all(futures: List[Future]) -> Future[List[Future]]: + r""" + Collects the provided :class:`~torch.futures.Future` objects into a single + combined :class:`~torch.futures.Future` that is completed when all of the + sub-futures are completed. + + Args: + futures (list): a list of :class:`~torch.futures.Future` objects. + + Returns: + Returns a :class:`~torch.futures.Future` object to a list of the passed + in Futures. + + Example:: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) + >>> fut0 = torch.futures.Future() + >>> fut1 = torch.futures.Future() + >>> fut = torch.futures.collect_all([fut0, fut1]) + >>> fut0.set_result(0) + >>> fut1.set_result(1) + >>> fut_list = fut.wait() + >>> print(f"fut0 result = {fut_list[0].wait()}") + fut0 result = 0 + >>> print(f"fut1 result = {fut_list[1].wait()}") + fut1 result = 1 + """ + return cast(Future[List[Future]], torch._C._collect_all(cast(List[torch._C.Future], futures))) + + +def wait_all(futures: List[Future]) -> List: + r""" + Waits for all provided futures to be complete, and returns + the list of completed values. If any of the futures encounters an error, + the method will exit early and report the error not waiting for other + futures to complete. + + Args: + futures (list): a list of :class:`~torch.futures.Future` object. + + Returns: + A list of the completed :class:`~torch.futures.Future` results. This + method will throw an error if ``wait`` on any + :class:`~torch.futures.Future` throws. + """ + return [fut.wait() for fut in torch._C._collect_all(cast(List[torch._C.Future], futures)).wait()] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..caee502880ec4a5f19906c02ce0b99d8bbbfbd23 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/futures/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3d317b7c09f20a40ac26acf0c4474c43d4cafda9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/__init__.py @@ -0,0 +1,53 @@ +from .modules import * # noqa: F403 +from .parameter import ( + Parameter as Parameter, + UninitializedParameter as UninitializedParameter, + UninitializedBuffer as UninitializedBuffer, +) +from .parallel import DataParallel as DataParallel +from . import init +from . import functional +from . import utils +from . import attention + + +def factory_kwargs(kwargs): + r"""Return a canonicalized dict of factory kwargs. + + Given kwargs, returns a canonicalized dict of factory kwargs that can be directly passed + to factory functions like torch.empty, or errors if unrecognized kwargs are present. + + This function makes it simple to write code like this:: + + class MyModule(nn.Module): + def __init__(self, **kwargs): + factory_kwargs = torch.nn.factory_kwargs(kwargs) + self.weight = Parameter(torch.empty(10, **factory_kwargs)) + + Why should you use this function instead of just passing `kwargs` along directly? + + 1. This function does error validation, so if there are unexpected kwargs we will + immediately report an error, instead of deferring it to the factory call + 2. This function supports a special `factory_kwargs` argument, which can be used to + explicitly specify a kwarg to be used for factory functions, in the event one of the + factory kwargs conflicts with an already existing argument in the signature (e.g. + in the signature ``def f(dtype, **kwargs)``, you can specify ``dtype`` for factory + functions, as distinct from the dtype argument, by saying + ``f(dtype1, factory_kwargs={"dtype": dtype2})``) + """ + if kwargs is None: + return {} + simple_keys = {"device", "dtype", "memory_format"} + expected_keys = simple_keys | {"factory_kwargs"} + if not kwargs.keys() <= expected_keys: + raise TypeError(f"unexpected kwargs {kwargs.keys() - expected_keys}") + + # guarantee no input kwargs is untouched + r = dict(kwargs.get("factory_kwargs", {})) + for k in simple_keys: + if k in kwargs: + if k in r: + raise TypeError(f"{k} specified twice, in **kwargs and in factory_kwargs") + r[k] = kwargs[k] + + return r diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/_reduction.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/_reduction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46027444525c188d81c3e266121a9d82820ab350 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/_reduction.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/cpp.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/cpp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0c839841c0a2bb8ece1a6aefafe0fd941b8e6e1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/cpp.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/grad.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/grad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ea9352bdbd0981d5383e7944a8086c218c3ce03 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/grad.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/init.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7351298ca2c5d2cc8527af0e64b391336e675341 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/init.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/parameter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/parameter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5b25968037c3f6465fed5e4be8243f5ef2c9314 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/__pycache__/parameter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/_reduction.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/_reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..ac2a8bb0a0e9eda779073176bcc209f326011600 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/_reduction.py @@ -0,0 +1,47 @@ +from typing import Optional +import warnings + +# NB: Keep this file in sync with enums in aten/src/ATen/core/Reduction.h + + +def get_enum(reduction: str) -> int: + if reduction == 'none': + ret = 0 + elif reduction == 'mean': + ret = 1 + elif reduction == 'elementwise_mean': + warnings.warn("reduction='elementwise_mean' is deprecated, please use reduction='mean' instead.") + ret = 1 + elif reduction == 'sum': + ret = 2 + else: + ret = -1 # TODO: remove once JIT exceptions support control flow + raise ValueError(f"{reduction} is not a valid value for reduction") + return ret + +# In order to support previous versions, accept boolean size_average and reduce +# and convert them into the new constants for now + + +# We use these functions in torch/legacy as well, in which case we'll silence the warning +def legacy_get_string(size_average: Optional[bool], reduce: Optional[bool], emit_warning: bool = True) -> str: + warning = "size_average and reduce args will be deprecated, please use reduction='{}' instead." + + if size_average is None: + size_average = True + if reduce is None: + reduce = True + + if size_average and reduce: + ret = 'mean' + elif reduce: + ret = 'sum' + else: + ret = 'none' + if emit_warning: + warnings.warn(warning.format(ret)) + return ret + + +def legacy_get_enum(size_average: Optional[bool], reduce: Optional[bool], emit_warning: bool = True) -> int: + return get_enum(legacy_get_string(size_average, reduce, emit_warning)) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/attention/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/attention/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fca8055ad253e48bed216dfc43a34a8f11a99913 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/attention/__init__.py @@ -0,0 +1,117 @@ +""" This module contains functions and classes that alter the behavior of torch.nn.functional.scaled_dot_product_attention """ +import contextlib +from typing import List, Union +from warnings import warn + +from torch.backends.cuda import ( + can_use_efficient_attention, + can_use_flash_attention, + enable_flash_sdp, + enable_math_sdp, + enable_mem_efficient_sdp, + flash_sdp_enabled, + math_sdp_enabled, + mem_efficient_sdp_enabled, + SDPAParams, +) + +__all__: List[str] = ["SDPBackend", "sdpa_kernel", "WARN_FOR_UNFUSED_KERNELS"] + +# Note: [SDPA warnings] +# TODO: Consider using this for sdpa regardless of subclasses +# This only effects users of bias subclasses +# If this is set to True, we will warn the user if they are not using the fused kernels +# As well, it will raise warnings for all the reasons why the fused kernels can't be run. +# To set this to True, run +# torch.nn.attention.WARN_FOR_UNFUSED_KERNELS = True +WARN_FOR_UNFUSED_KERNELS = False + + +from torch._C import _SDPBackend as SDPBackend + +# Hacks for Sphinx documentation: +# https://stackoverflow.com/questions/38765577/overriding-sphinx-autodoc-alias-of-for-import-of-private-class +SDPBackend = SDPBackend +r"""An enum-like class that contains the different backends for scaled dot product attention. + This backend class is designed to be used with the sdpa_kernel context manager. + + The following Enums are available: + - ERROR: An error occurred when trying to determine the backend. + - MATH: The math backend for scaled dot product attention. + - FLASH_ATTENTION: The flash attention backend for scaled dot product attention. + - EFFICIENT_ATTENTION: The efficient attention backend for scaled dot product attention. + - CUDNN_ATTENTION: The cuDNN backend for scaled dot product attention. + + See :func:`torch.nn.attention.sdpa_kernel` for more details. + + .. warning:: This class is in beta and subject to change. +""" +SDPBackend.__module__ = __name__ +SDPBackend.__name__ = "SDPBackend" + + +def _raise_kernel_warnings(params: SDPAParams) -> None: + """ + If WARN_FOR_UNFUSED_KERNELS is set to True, this will raise warnings + for all the reasons why the fused kernels can't be run. If using subclasses + """ + if WARN_FOR_UNFUSED_KERNELS: + if not can_use_efficient_attention(params): + warn("Efficient attention can't be used because:") + can_use_efficient_attention(params, True) + if not can_use_flash_attention(params): + warn("Flash attention can't be used because:") + can_use_flash_attention(params, True) + + +@contextlib.contextmanager +def sdpa_kernel(backends: Union[List[SDPBackend], SDPBackend]): + r""" + Context manager to select which backend to use for scaled dot product attention. + + .. warning:: This function is beta and subject to change. + + Args: + backend (Union[List[SDPBackend], SDPBackend]): A backend or list of backends for scaled dot product attention. + + Example: + + .. code-block:: python + + from torch.nn.functional import scaled_dot_product_attention + from torch.nn.attention import SDPBackend, sdpa_kernel + # Only enable flash attention backend + with sdpa_kernel(SDPBackend.FLASH_ATTENTION): + scaled_dot_product_attention(...) + + # Enable the Math or Efficient attention backends + with sdpa_kernel([SDPBackend.MATH, SDPBackend.EFFICIENT_ATTENTION]): + scaled_dot_product_attention(...) + + This context manager can be used to select which backend to use for scaled dot product attention. + Upon exiting the context manager, the previous state of the flags will be restored, enabling all backends. + """ + assert isinstance( + backends, (list, SDPBackend) + ), "Backend must be an instance of SDPBackend or a list of SDPBackend instances" + + if isinstance(backends, SDPBackend): + backends = [backends] + + backends = set(backends) + previous_flash: bool = flash_sdp_enabled() + previous_mem_efficient: bool = mem_efficient_sdp_enabled() + previous_math: bool = math_sdp_enabled() + try: + enable_flash = SDPBackend.FLASH_ATTENTION in backends + enable_mem_efficient = SDPBackend.EFFICIENT_ATTENTION in backends + enable_math = SDPBackend.MATH in backends + + enable_flash_sdp(enable_flash) + enable_mem_efficient_sdp(enable_mem_efficient) + enable_math_sdp(enable_math) + yield {} + finally: + enable_flash_sdp(previous_flash) + enable_mem_efficient_sdp(previous_mem_efficient) + enable_math_sdp(previous_math) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/common_types.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/common_types.py new file mode 100644 index 0000000000000000000000000000000000000000..884f739e27813a38e8364469bb9659c09ea410b3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/common_types.py @@ -0,0 +1,42 @@ +from typing import TypeVar, Union, Tuple, Optional +from .. import Tensor + +# Create some useful type aliases + +# Template for arguments which can be supplied as a tuple, or which can be a scalar which PyTorch will internally +# broadcast to a tuple. +# Comes in several variants: A tuple of unknown size, and a fixed-size tuple for 1d, 2d, or 3d operations. +T = TypeVar('T') +_scalar_or_tuple_any_t = Union[T, Tuple[T, ...]] +_scalar_or_tuple_1_t = Union[T, Tuple[T]] +_scalar_or_tuple_2_t = Union[T, Tuple[T, T]] +_scalar_or_tuple_3_t = Union[T, Tuple[T, T, T]] +_scalar_or_tuple_4_t = Union[T, Tuple[T, T, T, T]] +_scalar_or_tuple_5_t = Union[T, Tuple[T, T, T, T, T]] +_scalar_or_tuple_6_t = Union[T, Tuple[T, T, T, T, T, T]] + +# For arguments which represent size parameters (eg, kernel size, padding) +_size_any_t = _scalar_or_tuple_any_t[int] +_size_1_t = _scalar_or_tuple_1_t[int] +_size_2_t = _scalar_or_tuple_2_t[int] +_size_3_t = _scalar_or_tuple_3_t[int] +_size_4_t = _scalar_or_tuple_4_t[int] +_size_5_t = _scalar_or_tuple_5_t[int] +_size_6_t = _scalar_or_tuple_6_t[int] + +# For arguments which represent optional size parameters (eg, adaptive pool parameters) +_size_any_opt_t = _scalar_or_tuple_any_t[Optional[int]] +_size_2_opt_t = _scalar_or_tuple_2_t[Optional[int]] +_size_3_opt_t = _scalar_or_tuple_3_t[Optional[int]] + +# For arguments that represent a ratio to adjust each dimension of an input with (eg, upsampling parameters) +_ratio_2_t = _scalar_or_tuple_2_t[float] +_ratio_3_t = _scalar_or_tuple_3_t[float] +_ratio_any_t = _scalar_or_tuple_any_t[float] + +_tensor_list_t = _scalar_or_tuple_any_t[Tensor] + +# For the return value of max pooling operations that may or may not return indices. +# With the proposed 'Literal' feature to Python typing, it might be possible to +# eventually eliminate this. +_maybe_indices_t = _scalar_or_tuple_2_t[Tensor] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/cpp.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/cpp.py new file mode 100644 index 0000000000000000000000000000000000000000..a08c7b3141001613c1aa22851dde97f4dd48275c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/cpp.py @@ -0,0 +1,88 @@ +"""Functionality for Python <-> C++ frontend inter-op.""" + +from torch import nn + + +class OrderedDictWrapper: + """A wrapper around a C++ OrderedDict. + + It dynamically evaluates the OrderedDict getter on a bound C++ module, such + that new changes on the C++ side are picked up. Otherwise accessing e.g. + ``cpp_module._parameters`` just once would get a frozen copy of the parameters + at the time of access. ``torch.nn.Module`` accesses ``_parameters`` et al. via ``self.__dict__`` + so using properties does not work. + """ + + def __init__(self, cpp_module, attr): + self.cpp_module = cpp_module + self.attr = attr + + @property + def cpp_dict(self): + return getattr(self.cpp_module, self.attr) + + # Magic methods cannot be assigned dynamically and bypass ``getattr``, so we + # must manually override them. + + def items(self): + return self.cpp_dict.items() + + def keys(self): + return self.cpp_dict.keys() + + def values(self): + return self.cpp_dict.values() + + def __iter__(self): + return self.cpp_dict.__iter__() + + def __len__(self): + return self.cpp_dict.__len__() + + def __contains__(self, key): + return self.cpp_dict.__contains__(key) + + def __getitem__(self, key): + return self.cpp_dict.__getitem__(key) + + +class ModuleWrapper(nn.Module): + """A subclass of ``torch.nn.Module`` that wraps a C++ frontend module and delegates all access.""" + + def __init__(self, cpp_module): + # Assign before the super class constructor so ``self.training`` can be + # assigned to in the super class constructor. + self.cpp_module = cpp_module + super().__init__() + self._parameters = OrderedDictWrapper(cpp_module, "_parameters") # type: ignore[assignment] + self._buffers: OrderedDictWrapper = OrderedDictWrapper(cpp_module, "_buffers") # type: ignore[assignment] + self._modules: OrderedDictWrapper = OrderedDictWrapper(cpp_module, "_modules") # type: ignore[assignment] + for attr in dir(cpp_module): + # Skip magic methods and the three attributes above. + if not attr.startswith("_"): + setattr(self, attr, getattr(self.cpp_module, attr)) + + def _apply(self, fn, recurse=True): + for param in self.parameters(): + # Tensors stored in modules are graph leaves, and we don't + # want to create copy nodes, so we have to unpack the data. + param.data = fn(param.data) + if param._grad is not None: + param._grad.data = fn(param._grad.data) + + for buf in self.buffers(): + buf.data = fn(buf.data) + + return self + + # nn.Module defines training as a boolean + @property # type: ignore[override] + def training(self): + return self.cpp_module.training + + @training.setter + def training(self, mode): + self.cpp_module.train(mode) + + def __repr__(self): + return self.cpp_module.__repr__() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/functional.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/functional.py new file mode 100644 index 0000000000000000000000000000000000000000..a2107a00d678a7b79f3e1eb0b0abb87d3c0a268c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/functional.py @@ -0,0 +1,5512 @@ +"""Functional interface.""" +from typing import Callable, List, Optional, Tuple, Union +import math +import warnings +import importlib + +try: + import numpy as np +except ModuleNotFoundError: + np = None + +import torch +from torch import _VF +from torch import sym_int as _sym_int +from torch._C import _infer_size, _add_docstr +from torch._torch_docs import reproducibility_notes, tf32_notes, sparse_support_notes +# A workaround to support both TorchScript and MyPy: +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from torch.types import _dtype as DType +else: + # The JIT doesn't understand Union, nor torch.dtype here + DType = int + +from .._jit_internal import boolean_dispatch, _overload, BroadcastingList1, BroadcastingList2, BroadcastingList3 +from ..overrides import ( + has_torch_function, has_torch_function_unary, has_torch_function_variadic, + handle_torch_function) +from . import _reduction as _Reduction +from . import grad # noqa: F401 +from .modules import utils +from .modules.utils import _single, _pair, _triple, _list_with_default + +Tensor = torch.Tensor + +conv1d = _add_docstr( + torch.conv1d, + r""" +conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor + +Applies a 1D convolution over an input signal composed of several input +planes. + +{tf32_note} + +See :class:`~torch.nn.Conv1d` for details and output shape. + +Note: + {cudnn_reproducibility_note} + +Note: + This operator supports complex data types i.e. ``complex32, complex64, complex128``. +""".format( + **reproducibility_notes, **tf32_notes + ) + + r""" + +Args: + input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)` + weight: filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kW)` + bias: optional bias of shape :math:`(\text{out\_channels})`. Default: ``None`` + stride: the stride of the convolving kernel. Can be a single number or + a one-element tuple `(sW,)`. Default: 1 + padding: implicit paddings on both sides of the input. Can be a string {'valid', 'same'}, + single number or a one-element tuple `(padW,)`. Default: 0 + ``padding='valid'`` is the same as no padding. ``padding='same'`` pads + the input so the output has the same shape as the input. However, this mode + doesn't support any stride values other than 1. + + .. warning:: + For ``padding='same'``, if the ``weight`` is even-length and + ``dilation`` is odd in any dimension, a full :func:`pad` operation + may be needed internally. Lowering performance. + dilation: the spacing between kernel elements. Can be a single number or + a one-element tuple `(dW,)`. Default: 1 + groups: split input into groups, :math:`\text{in\_channels}` should be divisible by + the number of groups. Default: 1 + +Examples:: + + >>> inputs = torch.randn(33, 16, 30) + >>> filters = torch.randn(20, 16, 5) + >>> F.conv1d(inputs, filters) +""", +) + +conv2d = _add_docstr( + torch.conv2d, + r""" +conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor + +Applies a 2D convolution over an input image composed of several input +planes. + +{tf32_note} + +See :class:`~torch.nn.Conv2d` for details and output shape. + +Note: + {cudnn_reproducibility_note} + +Note: + This operator supports complex data types i.e. ``complex32, complex64, complex128``. +""".format( + **reproducibility_notes, **tf32_notes + ) + + r""" + +Args: + input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)` + weight: filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kH , kW)` + bias: optional bias tensor of shape :math:`(\text{out\_channels})`. Default: ``None`` + stride: the stride of the convolving kernel. Can be a single number or a + tuple `(sH, sW)`. Default: 1 + padding: implicit paddings on both sides of the input. Can be a string {'valid', 'same'}, + single number or a tuple `(padH, padW)`. Default: 0 + ``padding='valid'`` is the same as no padding. ``padding='same'`` pads + the input so the output has the same shape as the input. However, this mode + doesn't support any stride values other than 1. + + .. warning:: + For ``padding='same'``, if the ``weight`` is even-length and + ``dilation`` is odd in any dimension, a full :func:`pad` operation + may be needed internally. Lowering performance. + + dilation: the spacing between kernel elements. Can be a single number or + a tuple `(dH, dW)`. Default: 1 + groups: split input into groups, both :math:`\text{in\_channels}` and :math:`\text{out\_channels}` + should be divisible by the number of groups. Default: 1 + +Examples:: + + >>> # With square kernels and equal stride + >>> filters = torch.randn(8, 4, 3, 3) + >>> inputs = torch.randn(1, 4, 5, 5) + >>> F.conv2d(inputs, filters, padding=1) +""", +) # noqa: E501 + +conv3d = _add_docstr( + torch.conv3d, + r""" +conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor + +Applies a 3D convolution over an input image composed of several input +planes. + +{tf32_note} + +See :class:`~torch.nn.Conv3d` for details and output shape. + +Note: + {cudnn_reproducibility_note} + +Note: + This operator supports complex data types i.e. ``complex32, complex64, complex128``. +""".format( + **reproducibility_notes, **tf32_notes + ) + + r""" + +Args: + input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iT , iH , iW)` + weight: filters of shape :math:`(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kT , kH , kW)` + bias: optional bias tensor of shape :math:`(\text{out\_channels})`. Default: None + stride: the stride of the convolving kernel. Can be a single number or a + tuple `(sT, sH, sW)`. Default: 1 + padding: implicit paddings on both sides of the input. Can be a string {'valid', 'same'}, + single number or a tuple `(padT, padH, padW)`. Default: 0 + ``padding='valid'`` is the same as no padding. ``padding='same'`` pads + the input so the output has the same shape as the input. However, this mode + doesn't support any stride values other than 1. + + .. warning:: + For ``padding='same'``, if the ``weight`` is even-length and + ``dilation`` is odd in any dimension, a full :func:`pad` operation + may be needed internally. Lowering performance. + + dilation: the spacing between kernel elements. Can be a single number or + a tuple `(dT, dH, dW)`. Default: 1 + groups: split input into groups, :math:`\text{in\_channels}` should be divisible by + the number of groups. Default: 1 + +Examples:: + + >>> filters = torch.randn(33, 16, 3, 3, 3) + >>> inputs = torch.randn(20, 16, 50, 10, 20) + >>> F.conv3d(inputs, filters) +""", +) # noqa: E501 + +conv_transpose1d = _add_docstr( + torch.conv_transpose1d, + r""" +conv_transpose1d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor + +Applies a 1D transposed convolution operator over an input signal +composed of several input planes, sometimes also called "deconvolution". + +{tf32_note} + +See :class:`~torch.nn.ConvTranspose1d` for details and output shape. + +Note: + {cudnn_reproducibility_note} +""".format( + **reproducibility_notes, **tf32_notes + ) + + r""" + +Args: + input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)` + weight: filters of shape :math:`(\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kW)` + bias: optional bias of shape :math:`(\text{out\_channels})`. Default: None + stride: the stride of the convolving kernel. Can be a single number or a + tuple ``(sW,)``. Default: 1 + padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both + sides of each dimension in the input. Can be a single number or a tuple + ``(padW,)``. Default: 0 + output_padding: additional size added to one side of each dimension in the + output shape. Can be a single number or a tuple ``(out_padW)``. Default: 0 + groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the + number of groups. Default: 1 + dilation: the spacing between kernel elements. Can be a single number or + a tuple ``(dW,)``. Default: 1 + +Examples:: + + >>> inputs = torch.randn(20, 16, 50) + >>> weights = torch.randn(16, 33, 5) + >>> F.conv_transpose1d(inputs, weights) +""", +) + +conv_transpose2d = _add_docstr( + torch.conv_transpose2d, + r""" +conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor + +Applies a 2D transposed convolution operator over an input image +composed of several input planes, sometimes also called "deconvolution". + +{tf32_note} + +See :class:`~torch.nn.ConvTranspose2d` for details and output shape. + +Note: + {cudnn_reproducibility_note} +""".format( + **reproducibility_notes, **tf32_notes + ) + + r""" + +Args: + input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)` + weight: filters of shape :math:`(\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kH , kW)` + bias: optional bias of shape :math:`(\text{out\_channels})`. Default: None + stride: the stride of the convolving kernel. Can be a single number or a + tuple ``(sH, sW)``. Default: 1 + padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both + sides of each dimension in the input. Can be a single number or a tuple + ``(padH, padW)``. Default: 0 + output_padding: additional size added to one side of each dimension in the + output shape. Can be a single number or a tuple ``(out_padH, out_padW)``. + Default: 0 + groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the + number of groups. Default: 1 + dilation: the spacing between kernel elements. Can be a single number or + a tuple ``(dH, dW)``. Default: 1 + +Examples:: + + >>> # With square kernels and equal stride + >>> inputs = torch.randn(1, 4, 5, 5) + >>> weights = torch.randn(4, 8, 3, 3) + >>> F.conv_transpose2d(inputs, weights, padding=1) +""", +) # noqa: E501 + +conv_transpose3d = _add_docstr( + torch.conv_transpose3d, + r""" +conv_transpose3d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor + +Applies a 3D transposed convolution operator over an input image +composed of several input planes, sometimes also called "deconvolution" + +{tf32_note} + +See :class:`~torch.nn.ConvTranspose3d` for details and output shape. + +Note: + {cudnn_reproducibility_note} +""".format( + **reproducibility_notes, **tf32_notes + ) + + r""" + +Args: + input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iT , iH , iW)` + weight: filters of shape :math:`(\text{in\_channels} , \frac{\text{out\_channels}}{\text{groups}} , kT , kH , kW)` + bias: optional bias of shape :math:`(\text{out\_channels})`. Default: None + stride: the stride of the convolving kernel. Can be a single number or a + tuple ``(sT, sH, sW)``. Default: 1 + padding: ``dilation * (kernel_size - 1) - padding`` zero-padding will be added to both + sides of each dimension in the input. Can be a single number or a tuple + ``(padT, padH, padW)``. Default: 0 + output_padding: additional size added to one side of each dimension in the + output shape. Can be a single number or a tuple + ``(out_padT, out_padH, out_padW)``. Default: 0 + groups: split input into groups, :math:`\text{in\_channels}` should be divisible by the + number of groups. Default: 1 + dilation: the spacing between kernel elements. Can be a single number or + a tuple `(dT, dH, dW)`. Default: 1 + +Examples:: + + >>> inputs = torch.randn(20, 16, 50, 10, 20) + >>> weights = torch.randn(16, 33, 3, 3, 3) + >>> F.conv_transpose3d(inputs, weights) +""", +) # noqa: E501 + +conv_tbc = _add_docstr( + torch.conv_tbc, + r""" +Applies a 1-dimensional sequence convolution over an input sequence. +Input and output dimensions are (Time, Batch, Channels) - hence TBC. + +Args: + input: input tensor of shape :math:`(\text{sequence length} \times batch \times \text{in\_channels})` + weight: filter of shape (:math:`\text{kernel width} \times \text{in\_channels} \times \text{out\_channels}`) + bias: bias of shape (:math:`\text{out\_channels}`) + pad: number of timesteps to pad. Default: 0 +""", +) + + +# Pooling +avg_pool1d = _add_docstr( + torch.avg_pool1d, + r""" +avg_pool1d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True) -> Tensor + +Applies a 1D average pooling over an input signal composed of several +input planes. + +See :class:`~torch.nn.AvgPool1d` for details and output shape. + +Args: + input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)` + kernel_size: the size of the window. Can be a single number or a + tuple `(kW,)` + stride: the stride of the window. Can be a single number or a tuple + `(sW,)`. Default: :attr:`kernel_size` + padding: implicit zero paddings on both sides of the input. Can be a + single number or a tuple `(padW,)`. Default: 0 + ceil_mode: when True, will use `ceil` instead of `floor` to compute the + output shape. Default: ``False`` + count_include_pad: when True, will include the zero-padding in the + averaging calculation. Default: ``True`` + +Examples:: + + >>> # pool of square window of size=3, stride=2 + >>> input = torch.tensor([[[1, 2, 3, 4, 5, 6, 7]]], dtype=torch.float32) + >>> F.avg_pool1d(input, kernel_size=3, stride=2) + tensor([[[ 2., 4., 6.]]]) + +""", +) + + +avg_pool2d = _add_docstr( + torch._C._nn.avg_pool2d, + r""" +avg_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None) -> Tensor + +Applies 2D average-pooling operation in :math:`kH \times kW` regions by step size +:math:`sH \times sW` steps. The number of output features is equal to the number of +input planes. + +See :class:`~torch.nn.AvgPool2d` for details and output shape. + +Args: + input: input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)` + kernel_size: size of the pooling region. Can be a single number or a + tuple `(kH, kW)` + stride: stride of the pooling operation. Can be a single number or a + tuple `(sH, sW)`. Default: :attr:`kernel_size` + padding: implicit zero paddings on both sides of the input. Can be a + single number or a tuple `(padH, padW)`. Default: 0 + ceil_mode: when True, will use `ceil` instead of `floor` in the formula + to compute the output shape. Default: ``False`` + count_include_pad: when True, will include the zero-padding in the + averaging calculation. Default: ``True`` + divisor_override: if specified, it will be used as divisor, otherwise + size of the pooling region will be used. Default: None +""", +) + +avg_pool3d = _add_docstr( + torch._C._nn.avg_pool3d, + r""" +avg_pool3d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None) -> Tensor + +Applies 3D average-pooling operation in :math:`kT \times kH \times kW` regions by step +size :math:`sT \times sH \times sW` steps. The number of output features is equal to +:math:`\lfloor\frac{\text{input planes}}{sT}\rfloor`. + +See :class:`~torch.nn.AvgPool3d` for details and output shape. + +Args: + input: input tensor :math:`(\text{minibatch} , \text{in\_channels} , iT \times iH , iW)` + kernel_size: size of the pooling region. Can be a single number or a + tuple `(kT, kH, kW)` + stride: stride of the pooling operation. Can be a single number or a + tuple `(sT, sH, sW)`. Default: :attr:`kernel_size` + padding: implicit zero paddings on both sides of the input. Can be a + single number or a tuple `(padT, padH, padW)`, Default: 0 + ceil_mode: when True, will use `ceil` instead of `floor` in the formula + to compute the output shape + count_include_pad: when True, will include the zero-padding in the + averaging calculation + divisor_override: if specified, it will be used as divisor, otherwise + size of the pooling region will be used. Default: None +""", +) + + +def fractional_max_pool2d_with_indices( + input: Tensor, kernel_size: BroadcastingList2[int], + output_size: Optional[BroadcastingList2[int]] = None, + output_ratio: Optional[BroadcastingList2[float]] = None, + return_indices: bool = False, + _random_samples: Optional[Tensor] = None +) -> Tuple[Tensor, Tensor]: # noqa: D400 + r""" + fractional_max_pool2d(input, kernel_size, output_size=None, output_ratio=None, return_indices=False, _random_samples=None) + + Applies 2D fractional max pooling over an input signal composed of several input planes. + + Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham + + The max-pooling operation is applied in :math:`kH \times kW` regions by a stochastic + step size determined by the target output size. + The number of output features is equal to the number of input planes. + + Args: + kernel_size: the size of the window to take a max over. + Can be a single number :math:`k` (for a square kernel of :math:`k \times k`) + or a tuple `(kH, kW)` + output_size: the target output size of the image of the form :math:`oH \times oW`. + Can be a tuple `(oH, oW)` or a single number :math:`oH` for a square image :math:`oH \times oH` + output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given. + This has to be a number or tuple in the range (0, 1) + return_indices: if ``True``, will return the indices along with the outputs. + Useful to pass to :func:`~torch.nn.functional.max_unpool2d`. + + Examples:: + >>> input = torch.randn(20, 16, 50, 32) + >>> # pool of square window of size=3, and target output size 13x12 + >>> F.fractional_max_pool2d(input, 3, output_size=(13, 12)) + >>> # pool of square window and target output size being half of input image size + >>> F.fractional_max_pool2d(input, 3, output_ratio=(0.5, 0.5)) + + .. _Fractional MaxPooling: + http://arxiv.org/abs/1412.6071 + """ + if has_torch_function_variadic(input, _random_samples): + return handle_torch_function( + fractional_max_pool2d_with_indices, + (input, _random_samples), + input, + kernel_size, + output_size=output_size, + output_ratio=output_ratio, + return_indices=return_indices, + _random_samples=_random_samples, + ) + if output_size is None and output_ratio is None: + raise ValueError("fractional_max_pool2d requires specifying either an output_size or an output_ratio") + if output_size is None: + assert output_ratio is not None + if len(output_ratio) > 2: + raise ValueError("fractional_max_pool2d requires output_ratio to either be a single Int or tuple of Ints.") + _output_ratio = _pair(output_ratio) + output_size = [int(input.size(-2) * _output_ratio[0]), int(input.size(-1) * _output_ratio[1])] + + if _random_samples is None: + n_batch = 1 if input.dim() == 3 else input.size(0) + _random_samples = torch.rand(n_batch, input.size(-3), 2, dtype=input.dtype, device=input.device) + return torch._C._nn.fractional_max_pool2d(input, kernel_size, output_size, _random_samples) + + +def _fractional_max_pool2d( + input: Tensor, kernel_size: BroadcastingList2[int], + output_size: Optional[BroadcastingList2[int]] = None, + output_ratio: Optional[BroadcastingList2[float]] = None, + return_indices: bool = False, + _random_samples: Optional[Tensor] = None +) -> Tensor: + if has_torch_function_variadic(input, _random_samples): + return handle_torch_function( + fractional_max_pool2d, + (input, _random_samples), + input, + kernel_size, + output_size=output_size, + output_ratio=output_ratio, + return_indices=return_indices, + _random_samples=_random_samples, + ) + return fractional_max_pool2d_with_indices( + input, kernel_size, output_size, output_ratio, return_indices, _random_samples + )[0] + + +fractional_max_pool2d = boolean_dispatch( + arg_name="return_indices", + arg_index=4, + default=False, + if_true=fractional_max_pool2d_with_indices, + if_false=_fractional_max_pool2d, + module_name=__name__, + func_name="fractional_max_pool2d", +) + + +def fractional_max_pool3d_with_indices( + input: Tensor, kernel_size: BroadcastingList3[int], + output_size: Optional[BroadcastingList3[int]] = None, + output_ratio: Optional[BroadcastingList3[float]] = None, + return_indices: bool = False, + _random_samples: Optional[Tensor] = None +) -> Tuple[Tensor, Tensor]: # noqa: D400 + r""" + fractional_max_pool3d(input, kernel_size, output_size=None, output_ratio=None, return_indices=False, _random_samples=None) + + Applies 3D fractional max pooling over an input signal composed of several input planes. + + Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham + + The max-pooling operation is applied in :math:`kT \times kH \times kW` regions by a stochastic + step size determined by the target output size. + The number of output features is equal to the number of input planes. + + Args: + kernel_size: the size of the window to take a max over. + Can be a single number :math:`k` (for a square kernel of :math:`k \times k \times k`) + or a tuple `(kT, kH, kW)` + output_size: the target output size of the form :math:`oT \times oH \times oW`. + Can be a tuple `(oT, oH, oW)` or a single number :math:`oH` for a cubic output + :math:`oH \times oH \times oH` + output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given. + This has to be a number or tuple in the range (0, 1) + return_indices: if ``True``, will return the indices along with the outputs. + Useful to pass to :func:`~torch.nn.functional.max_unpool3d`. + + Shape: + - Input: :math:`(N, C, T_{in}, H_{in}, W_{in})` or :math:`(C, T_{in}, H_{in}, W_{in})`. + - Output: :math:`(N, C, T_{out}, H_{out}, W_{out})` or :math:`(C, T_{out}, H_{out}, W_{out})`, where + :math:`(T_{out}, H_{out}, W_{out})=\text{output\_size}` or + :math:`(T_{out}, H_{out}, W_{out})=\text{output\_ratio} \times (T_{in}, H_{in}, W_{in})` + + Examples:: + >>> input = torch.randn(20, 16, 50, 32, 16) + >>> # pool of cubic window of size=3, and target output size 13x12x11 + >>> F.fractional_max_pool3d(input, 3, output_size=(13, 12, 11)) + >>> # pool of cubic window and target output size being half of input size + >>> F.fractional_max_pool3d(input, 3, output_ratio=(0.5, 0.5, 0.5)) + + .. _Fractional MaxPooling: + http://arxiv.org/abs/1412.6071 + """ + if has_torch_function_variadic(input, _random_samples): + return handle_torch_function( + fractional_max_pool3d_with_indices, + (input, _random_samples), + input, + kernel_size, + output_size=output_size, + output_ratio=output_ratio, + return_indices=return_indices, + _random_samples=_random_samples, + ) + if output_size is None and output_ratio is None: + raise ValueError("fractional_max_pool3d requires specifying either an output_size or an output_ratio") + if output_size is None: + assert output_ratio is not None + _output_ratio = _triple(output_ratio) + output_size = [ + int(input.size(-3) * _output_ratio[0]), + int(input.size(-2) * _output_ratio[1]), + int(input.size(-1) * _output_ratio[2]), + ] + + if _random_samples is None: + n_batch = 1 if input.dim() == 4 else input.size(0) + _random_samples = torch.rand(n_batch, input.size(-4), 3, dtype=input.dtype, device=input.device) + return torch._C._nn.fractional_max_pool3d(input, kernel_size, output_size, _random_samples) + + +def _fractional_max_pool3d( + input: Tensor, kernel_size: BroadcastingList3[int], + output_size: Optional[BroadcastingList3[int]] = None, + output_ratio: Optional[BroadcastingList3[float]] = None, + return_indices: bool = False, + _random_samples: Optional[Tensor] = None +) -> Tensor: + if has_torch_function_variadic(input, _random_samples): + return handle_torch_function( + fractional_max_pool3d, + (input, _random_samples), + input, + kernel_size, + output_size=output_size, + output_ratio=output_ratio, + return_indices=return_indices, + _random_samples=_random_samples, + ) + return fractional_max_pool3d_with_indices( + input, kernel_size, output_size, output_ratio, return_indices, _random_samples + )[0] + + +fractional_max_pool3d = boolean_dispatch( + arg_name="return_indices", + arg_index=4, + default=False, + if_true=fractional_max_pool3d_with_indices, + if_false=_fractional_max_pool3d, + module_name=__name__, + func_name="fractional_max_pool3d", +) + + +def max_pool1d_with_indices( + input: Tensor, kernel_size: BroadcastingList1[int], + stride: Optional[BroadcastingList1[int]] = None, + padding: BroadcastingList1[int] = 0, + dilation: BroadcastingList1[int] = 1, + ceil_mode: bool = False, + return_indices: bool = False +) -> Tuple[Tensor, Tensor]: # noqa: D400 + r""" + max_pool1d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False) + + Applies a 1D max pooling over an input signal composed of several input + planes. + + .. note:: + The order of :attr:`ceil_mode` and :attr:`return_indices` is different from + what seen in :class:`~torch.nn.MaxPool1d`, and will change in a future release. + + See :class:`~torch.nn.MaxPool1d` for details. + + Args: + input: input tensor of shape :math:`(\text{minibatch} , \text{in\_channels} , iW)`, minibatch dim optional. + kernel_size: the size of the window. Can be a single number or a + tuple `(kW,)` + stride: the stride of the window. Can be a single number or a tuple + `(sW,)`. Default: :attr:`kernel_size` + padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2. + dilation: The stride between elements within a sliding window, must be > 0. + ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This + ensures that every element in the input tensor is covered by a sliding window. + return_indices: If ``True``, will return the argmax along with the max values. + Useful for :class:`torch.nn.functional.max_unpool1d` later + """ + if has_torch_function_unary(input): + return handle_torch_function( + max_pool1d_with_indices, + (input,), + input, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + ceil_mode=ceil_mode, + return_indices=return_indices, + ) + if stride is None: + stride = torch.jit.annotate(List[int], []) + return torch.max_pool1d_with_indices(input, kernel_size, stride, padding, dilation, ceil_mode) + + +def _max_pool1d( + input: Tensor, kernel_size: BroadcastingList1[int], + stride: Optional[BroadcastingList1[int]] = None, + padding: BroadcastingList1[int] = 0, + dilation: BroadcastingList1[int] = 1, + ceil_mode: bool = False, + return_indices: bool = False +) -> Tensor: + if has_torch_function_unary(input): + return handle_torch_function( + max_pool1d, + (input,), + input, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + ceil_mode=ceil_mode, + return_indices=return_indices, + ) + if stride is None: + stride = torch.jit.annotate(List[int], []) + return torch.max_pool1d(input, kernel_size, stride, padding, dilation, ceil_mode) + + +max_pool1d = boolean_dispatch( + arg_name="return_indices", + arg_index=6, + default=False, + if_true=max_pool1d_with_indices, + if_false=_max_pool1d, + module_name=__name__, + func_name="max_pool1d", +) + + +def max_pool2d_with_indices( + input: Tensor, kernel_size: BroadcastingList2[int], + stride: Optional[BroadcastingList2[int]] = None, + padding: BroadcastingList2[int] = 0, + dilation: BroadcastingList2[int] = 1, + ceil_mode: bool = False, + return_indices: bool = False +) -> Tuple[Tensor, Tensor]: # noqa: D400 + r""" + max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False) + + Applies a 2D max pooling over an input signal composed of several input + planes. + + .. note:: + The order of :attr:`ceil_mode` and :attr:`return_indices` is different from + what seen in :class:`~torch.nn.MaxPool2d`, and will change in a future release. + + See :class:`~torch.nn.MaxPool2d` for details. + + Args: + input: input tensor :math:`(\text{minibatch} , \text{in\_channels} , iH , iW)`, minibatch dim optional. + kernel_size: size of the pooling region. Can be a single number or a + tuple `(kH, kW)` + stride: stride of the pooling operation. Can be a single number or a + tuple `(sH, sW)`. Default: :attr:`kernel_size` + padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2. + dilation: The stride between elements within a sliding window, must be > 0. + ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This + ensures that every element in the input tensor is covered by a sliding window. + return_indices: If ``True``, will return the argmax along with the max values. + Useful for :class:`torch.nn.functional.max_unpool2d` later + """ + if has_torch_function_unary(input): + return handle_torch_function( + max_pool2d_with_indices, + (input,), + input, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + ceil_mode=ceil_mode, + return_indices=return_indices, + ) + if stride is None: + stride = torch.jit.annotate(List[int], []) + return torch._C._nn.max_pool2d_with_indices(input, kernel_size, stride, padding, dilation, ceil_mode) + + +def _max_pool2d( + input: Tensor, kernel_size: BroadcastingList2[int], + stride: Optional[BroadcastingList2[int]] = None, + padding: BroadcastingList2[int] = 0, + dilation: BroadcastingList2[int] = 1, + ceil_mode: bool = False, + return_indices: bool = False +) -> Tensor: + if has_torch_function_unary(input): + return handle_torch_function( + max_pool2d, + (input,), + input, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + ceil_mode=ceil_mode, + return_indices=return_indices, + ) + if stride is None: + stride = torch.jit.annotate(List[int], []) + return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode) + + +max_pool2d = boolean_dispatch( + arg_name="return_indices", + arg_index=6, + default=False, + if_true=max_pool2d_with_indices, + if_false=_max_pool2d, + module_name=__name__, + func_name="max_pool2d", +) + + +def max_pool3d_with_indices( + input: Tensor, kernel_size: BroadcastingList3[int], + stride: Optional[BroadcastingList3[int]] = None, + padding: BroadcastingList3[int] = 0, + dilation: BroadcastingList3[int] = 1, + ceil_mode: bool = False, + return_indices: bool = False +) -> Tuple[Tensor, Tensor]: # noqa: D400 + r""" + max_pool3d(input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False, return_indices=False) + + Applies a 3D max pooling over an input signal composed of several input + planes. + + .. note:: + The order of :attr:`ceil_mode` and :attr:`return_indices` is different from + what seen in :class:`~torch.nn.MaxPool3d`, and will change in a future release. + + See :class:`~torch.nn.MaxPool3d` for details. + + Args: + input: input tensor :math:`(\text{minibatch} , \text{in\_channels} , iD, iH , iW)`, minibatch dim optional. + kernel_size: size of the pooling region. Can be a single number or a + tuple `(kT, kH, kW)` + stride: stride of the pooling operation. Can be a single number or a + tuple `(sT, sH, sW)`. Default: :attr:`kernel_size` + padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2. + dilation: The stride between elements within a sliding window, must be > 0. + ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This + ensures that every element in the input tensor is covered by a sliding window. + return_indices: If ``True``, will return the argmax along with the max values. + Useful for :class:`torch.nn.functional.max_unpool3d` later + """ + if has_torch_function_unary(input): + return handle_torch_function( + max_pool3d_with_indices, + (input,), + input, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + ceil_mode=ceil_mode, + return_indices=return_indices, + ) + if stride is None: + stride = torch.jit.annotate(List[int], []) + return torch._C._nn.max_pool3d_with_indices(input, kernel_size, stride, padding, dilation, ceil_mode) + + +def _max_pool3d( + input: Tensor, kernel_size: BroadcastingList3[int], + stride: Optional[BroadcastingList3[int]] = None, + padding: BroadcastingList3[int] = 0, + dilation: BroadcastingList3[int] = 1, + ceil_mode: bool = False, + return_indices: bool = False +) -> Tensor: + if has_torch_function_unary(input): + return handle_torch_function( + max_pool3d, + (input,), + input, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + ceil_mode=ceil_mode, + return_indices=return_indices, + ) + if stride is None: + stride = torch.jit.annotate(List[int], []) + return torch.max_pool3d(input, kernel_size, stride, padding, dilation, ceil_mode) + + +max_pool3d = boolean_dispatch( + arg_name="return_indices", + arg_index=6, + default=False, + if_true=max_pool3d_with_indices, + if_false=_max_pool3d, + module_name=__name__, + func_name="max_pool3d", +) + + +def _unpool_output_size( + input: Tensor, kernel_size: List[int], stride: List[int], padding: List[int], output_size: Optional[List[int]] +) -> List[int]: + input_size = input.size() + default_size = torch.jit.annotate(List[int], []) + for d in range(len(kernel_size)): + default_size.append((input_size[-len(kernel_size) + d] - 1) * stride[d] + kernel_size[d] - 2 * padding[d]) + if output_size is None: + ret = default_size + else: + if len(output_size) == len(kernel_size) + 2: + output_size = output_size[2:] + if len(output_size) != len(kernel_size): + raise ValueError( + "output_size should be a sequence containing " + f"{len(kernel_size)} or {len(kernel_size) + 2} elements, but it has a length of '{len(output_size)}'" + ) + for d in range(len(kernel_size)): + min_size = default_size[d] - stride[d] + max_size = default_size[d] + stride[d] + if not (min_size < output_size[d] < max_size): + raise ValueError( + f'invalid output_size "{output_size}" (dim {d} must be between {min_size} and {max_size})' + ) + + ret = output_size + return ret + + +def max_unpool1d( + input: Tensor, indices: Tensor, + kernel_size: BroadcastingList1[int], + stride: Optional[BroadcastingList1[int]] = None, + padding: BroadcastingList1[int] = 0, + output_size: Optional[BroadcastingList1[int]] = None +) -> Tensor: + r"""Compute a partial inverse of :class:`MaxPool1d`. + + See :class:`~torch.nn.MaxUnpool1d` for details. + """ + if has_torch_function_unary(input): + return handle_torch_function( + max_unpool1d, + (input,), + input, + indices, + kernel_size, + stride=stride, + padding=padding, + output_size=output_size, + ) + kernel_size = _single(kernel_size) + if stride is not None: + _stride = _single(stride) + else: + _stride = kernel_size + padding = _single(padding) + output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size) + if isinstance(output_size, list): + output_size = output_size + [1] + else: + output_size = output_size + (1,) + return torch._C._nn.max_unpool2d(input.unsqueeze(-1), indices.unsqueeze(-1), output_size).squeeze(-1) + + +def max_unpool2d( + input: Tensor, indices: Tensor, + kernel_size: BroadcastingList2[int], + stride: Optional[BroadcastingList2[int]] = None, + padding: BroadcastingList2[int] = 0, + output_size: Optional[BroadcastingList2[int]] = None +) -> Tensor: + r"""Compute a partial inverse of :class:`MaxPool2d`. + + See :class:`~torch.nn.MaxUnpool2d` for details. + """ + if has_torch_function_unary(input): + return handle_torch_function( + max_unpool2d, + (input,), + input, + indices, + kernel_size, + stride=stride, + padding=padding, + output_size=output_size, + ) + kernel_size = _pair(kernel_size) + if stride is not None: + _stride = _pair(stride) + else: + _stride = kernel_size + padding = _pair(padding) + output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size) + return torch._C._nn.max_unpool2d(input, indices, output_size) + + +def max_unpool3d( + input: Tensor, indices: Tensor, + kernel_size: BroadcastingList3[int], + stride: Optional[BroadcastingList3[int]] = None, + padding: BroadcastingList3[int] = 0, + output_size: Optional[BroadcastingList3[int]] = None +) -> Tensor: + r"""Compute a partial inverse of :class:`MaxPool3d`. + + See :class:`~torch.nn.MaxUnpool3d` for details. + """ + if has_torch_function_unary(input): + return handle_torch_function( + max_unpool3d, + (input,), + input, + indices, + kernel_size, + stride=stride, + padding=padding, + output_size=output_size, + ) + kernel_size = _triple(kernel_size) + if stride is not None: + _stride = _triple(stride) + else: + _stride = kernel_size + padding = _triple(padding) + output_size = _unpool_output_size(input, kernel_size, _stride, padding, output_size) + return torch._C._nn.max_unpool3d(input, indices, output_size, _stride, padding) + + +def lp_pool3d( + input: Tensor, norm_type: Union[int, float], + kernel_size: BroadcastingList3[int], + stride: Optional[BroadcastingList3[int]] = None, + ceil_mode: bool = False +) -> Tensor: + r""" + Apply a 3D power-average pooling over an input signal composed of several input planes. + + If the sum of all inputs to the power of `p` is + zero, the gradient is set to zero as well. + + See :class:`~torch.nn.LPPool3d` for details. + """ + if has_torch_function_unary(input): + return handle_torch_function( + lp_pool3d, (input,), input, norm_type, kernel_size, stride=stride, ceil_mode=ceil_mode + ) + kd, kw, kh = utils._triple(kernel_size) + if stride is not None: + out = avg_pool3d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode) + else: + out = avg_pool3d(input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode) + + return (torch.sign(out) * relu(torch.abs(out))).mul(kd * kw * kh).pow(1.0 / norm_type) + + +def lp_pool2d( + input: Tensor, norm_type: Union[int, float], + kernel_size: BroadcastingList2[int], + stride: Optional[BroadcastingList2[int]] = None, + ceil_mode: bool = False +) -> Tensor: + r""" + Apply a 2D power-average pooling over an input signal composed of several input planes. + + If the sum of all inputs to the power of `p` is + zero, the gradient is set to zero as well. + + See :class:`~torch.nn.LPPool2d` for details. + """ + if has_torch_function_unary(input): + return handle_torch_function( + lp_pool2d, (input,), input, norm_type, kernel_size, stride=stride, ceil_mode=ceil_mode + ) + kw, kh = utils._pair(kernel_size) + if stride is not None: + out = avg_pool2d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode) + else: + out = avg_pool2d(input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode) + + return (torch.sign(out) * relu(torch.abs(out))).mul(kw * kh).pow(1.0 / norm_type) + + +def lp_pool1d( + input: Tensor, norm_type: Union[int, float], + kernel_size: int, + stride: Optional[BroadcastingList1[int]] = None, + ceil_mode: bool = False +) -> Tensor: + r"""Apply a 1D power-average pooling over an input signal composed of several input planes. + + If the sum of all inputs to the power of `p` is + zero, the gradient is set to zero as well. + + See :class:`~torch.nn.LPPool1d` for details. + """ + if has_torch_function_unary(input): + return handle_torch_function( + lp_pool1d, (input,), input, norm_type, kernel_size, stride=stride, ceil_mode=ceil_mode + ) + if stride is not None: + out = avg_pool1d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode) + else: + out = avg_pool1d(input.pow(norm_type), kernel_size, padding=0, ceil_mode=ceil_mode) + + return (torch.sign(out) * relu(torch.abs(out))).mul(kernel_size).pow(1.0 / norm_type) + + +def adaptive_max_pool1d_with_indices( + input: Tensor, output_size: BroadcastingList1[int], return_indices: bool = False +) -> Tuple[Tensor, Tensor]: # noqa: D400 + r""" + adaptive_max_pool1d(input, output_size, return_indices=False) + + Applies a 1D adaptive max pooling over an input signal composed of + several input planes. + + See :class:`~torch.nn.AdaptiveMaxPool1d` for details and output shape. + + Args: + output_size: the target output size (single integer) + return_indices: whether to return pooling indices. Default: ``False`` + """ + if has_torch_function_unary(input): + return handle_torch_function( + adaptive_max_pool1d_with_indices, (input,), input, output_size, return_indices=return_indices + ) + return torch.adaptive_max_pool1d(input, output_size) + + +def _adaptive_max_pool1d(input: Tensor, output_size: BroadcastingList1[int], return_indices: bool = False) -> Tensor: + if has_torch_function_unary(input): + return handle_torch_function( + adaptive_max_pool1d, (input,), input, output_size, return_indices=return_indices + ) + return adaptive_max_pool1d_with_indices(input, output_size)[0] + + +adaptive_max_pool1d = boolean_dispatch( + arg_name="return_indices", + arg_index=2, + default=False, + if_true=adaptive_max_pool1d_with_indices, + if_false=_adaptive_max_pool1d, + module_name=__name__, + func_name="adaptive_max_pool1d", +) + + +def adaptive_max_pool2d_with_indices( + input: Tensor, output_size: BroadcastingList2[int], + return_indices: bool = False +) -> Tuple[Tensor, Tensor]: # noqa: D400 + r"""adaptive_max_pool2d(input, output_size, return_indices=False) + + Applies a 2D adaptive max pooling over an input signal composed of + several input planes. + + See :class:`~torch.nn.AdaptiveMaxPool2d` for details and output shape. + + Args: + output_size: the target output size (single integer or + double-integer tuple) + return_indices: whether to return pooling indices. Default: ``False`` + """ + if has_torch_function_unary(input): + return handle_torch_function( + adaptive_max_pool2d_with_indices, (input,), input, output_size, return_indices=return_indices + ) + output_size = _list_with_default(output_size, input.size()) + return torch._C._nn.adaptive_max_pool2d(input, output_size) + + +def _adaptive_max_pool2d(input: Tensor, output_size: BroadcastingList2[int], return_indices: bool = False) -> Tensor: + if has_torch_function_unary(input): + return handle_torch_function( + adaptive_max_pool2d, (input,), input, output_size, return_indices=return_indices + ) + return adaptive_max_pool2d_with_indices(input, output_size)[0] + + +adaptive_max_pool2d = boolean_dispatch( + arg_name="return_indices", + arg_index=2, + default=False, + if_true=adaptive_max_pool2d_with_indices, + if_false=_adaptive_max_pool2d, + module_name=__name__, + func_name="adaptive_max_pool2d", +) + + +def adaptive_max_pool3d_with_indices( + input: Tensor, output_size: BroadcastingList3[int], + return_indices: bool = False +) -> Tuple[Tensor, Tensor]: # noqa: D400 + r""" + adaptive_max_pool3d(input, output_size, return_indices=False) + + Applies a 3D adaptive max pooling over an input signal composed of + several input planes. + + See :class:`~torch.nn.AdaptiveMaxPool3d` for details and output shape. + + Args: + output_size: the target output size (single integer or + triple-integer tuple) + return_indices: whether to return pooling indices. Default: ``False`` + """ + if has_torch_function_unary(input): + return handle_torch_function( + adaptive_max_pool3d_with_indices, (input,), input, output_size, return_indices=return_indices + ) + output_size = _list_with_default(output_size, input.size()) + return torch._C._nn.adaptive_max_pool3d(input, output_size) + + +def _adaptive_max_pool3d(input: Tensor, output_size: BroadcastingList3[int], return_indices: bool = False) -> Tensor: + if has_torch_function_unary(input): + return handle_torch_function( + adaptive_max_pool3d, (input,), input, output_size, return_indices=return_indices + ) + return adaptive_max_pool3d_with_indices(input, output_size)[0] + + +adaptive_max_pool3d = boolean_dispatch( + arg_name="return_indices", + arg_index=2, + default=False, + if_true=adaptive_max_pool3d_with_indices, + if_false=_adaptive_max_pool3d, + module_name=__name__, + func_name="adaptive_max_pool3d", +) + + +adaptive_avg_pool1d = _add_docstr( + torch.adaptive_avg_pool1d, + r""" +adaptive_avg_pool1d(input, output_size) -> Tensor + +Applies a 1D adaptive average pooling over an input signal composed of +several input planes. + +See :class:`~torch.nn.AdaptiveAvgPool1d` for details and output shape. + +Args: + output_size: the target output size (single integer) +""", +) + + +def adaptive_avg_pool2d(input: Tensor, output_size: BroadcastingList2[int]) -> Tensor: + r"""Apply a 2D adaptive average pooling over an input signal composed of several input planes. + + See :class:`~torch.nn.AdaptiveAvgPool2d` for details and output shape. + + Args: + output_size: the target output size (single integer or + double-integer tuple) + """ + if has_torch_function_unary(input): + return handle_torch_function(adaptive_avg_pool2d, (input,), input, output_size) + _output_size = _list_with_default(output_size, input.size()) + return torch._C._nn.adaptive_avg_pool2d(input, _output_size) + + +def adaptive_avg_pool3d(input: Tensor, output_size: BroadcastingList3[int]) -> Tensor: + r"""Apply a 3D adaptive average pooling over an input signal composed of several input planes. + + See :class:`~torch.nn.AdaptiveAvgPool3d` for details and output shape. + + Args: + output_size: the target output size (single integer or + triple-integer tuple) + """ + if has_torch_function_unary(input): + return handle_torch_function(adaptive_avg_pool3d, (input,), input, output_size) + _output_size = _list_with_default(output_size, input.size()) + return torch._C._nn.adaptive_avg_pool3d(input, _output_size) + + +# Activation functions +def dropout(input: Tensor, p: float = 0.5, training: bool = True, inplace: bool = False) -> Tensor: + r"""During training, randomly zeroes some elements of the input tensor with probability :attr:`p`. + + Uses samples from a Bernoulli distribution. + + See :class:`~torch.nn.Dropout` for details. + + Args: + p: probability of an element to be zeroed. Default: 0.5 + training: apply dropout if is ``True``. Default: ``True`` + inplace: If set to ``True``, will do this operation in-place. Default: ``False`` + """ + if has_torch_function_unary(input): + return handle_torch_function(dropout, (input,), input, p=p, training=training, inplace=inplace) + if p < 0.0 or p > 1.0: + raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}") + return _VF.dropout_(input, p, training) if inplace else _VF.dropout(input, p, training) + + +def alpha_dropout(input: Tensor, p: float = 0.5, training: bool = False, inplace: bool = False) -> Tensor: + r"""Apply alpha dropout to the input. + + See :class:`~torch.nn.AlphaDropout` for details. + """ + if has_torch_function_unary(input): + return handle_torch_function(alpha_dropout, (input,), input, p=p, training=training, inplace=inplace) + if p < 0.0 or p > 1.0: + raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}") + return _VF.alpha_dropout_(input, p, training) if inplace else _VF.alpha_dropout(input, p, training) + + +def dropout1d(input: Tensor, p: float = 0.5, training: bool = True, inplace: bool = False) -> Tensor: + r"""Randomly zero out entire channels (a channel is a 1D feature map). + + For example, the :math:`j`-th channel of the :math:`i`-th sample in the + batched input is a 1D tensor :math:`\text{input}[i, j]` of the input tensor. + Each channel will be zeroed out independently on every forward call with + probability :attr:`p` using samples from a Bernoulli distribution. + + See :class:`~torch.nn.Dropout1d` for details. + + Args: + p: probability of a channel to be zeroed. Default: 0.5 + training: apply dropout if is ``True``. Default: ``True`` + inplace: If set to ``True``, will do this operation in-place. Default: ``False`` + """ + if has_torch_function_unary(input): + return handle_torch_function(dropout1d, (input,), input, p=p, training=training, inplace=inplace) + if p < 0.0 or p > 1.0: + raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}") + inp_dim = input.dim() + if inp_dim not in (2, 3): + raise RuntimeError(f"dropout1d: Expected 2D or 3D input, but received a {inp_dim}D input. " + "Note that dropout1d exists to provide channel-wise dropout on inputs with 1 " + "spatial dimension, a channel dimension, and an optional batch dimension " + "(i.e. 2D or 3D inputs).") + + is_batched = inp_dim == 3 + if not is_batched: + input = input.unsqueeze_(0) if inplace else input.unsqueeze(0) + + result = _VF.feature_dropout_(input, p, training) if inplace else _VF.feature_dropout(input, p, training) + + if not is_batched: + result = result.squeeze_(0) if inplace else result.squeeze(0) + + return result + + +def dropout2d(input: Tensor, p: float = 0.5, training: bool = True, inplace: bool = False) -> Tensor: + r"""Randomly zero out entire channels (a channel is a 2D feature map). + + For example, the :math:`j`-th channel of the :math:`i`-th sample in the + batched input is a 2D tensor :math:`\text{input}[i, j]` of the input tensor. + Each channel will be zeroed out independently on every forward call with + probability :attr:`p` using samples from a Bernoulli distribution. + + See :class:`~torch.nn.Dropout2d` for details. + + Args: + p: probability of a channel to be zeroed. Default: 0.5 + training: apply dropout if is ``True``. Default: ``True`` + inplace: If set to ``True``, will do this operation in-place. Default: ``False`` + """ + if has_torch_function_unary(input): + return handle_torch_function(dropout2d, (input,), input, p=p, training=training, inplace=inplace) + if p < 0.0 or p > 1.0: + raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}") + inp_dim = input.dim() + if inp_dim not in (3, 4): + warn_msg = (f"dropout2d: Received a {inp_dim}-D input to dropout2d, which is deprecated " + "and will result in an error in a future release. To retain the behavior " + "and silence this warning, please use dropout instead. Note that dropout2d " + "exists to provide channel-wise dropout on inputs with 2 spatial dimensions, " + "a channel dimension, and an optional batch dimension (i.e. 3D or 4D inputs).") + warnings.warn(warn_msg) + + # TODO: Properly support no-batch-dim inputs. For now, these are NOT supported; passing + # a 3D input will perform dropout1d behavior instead. This was done historically and the + # behavior is maintained here for now. + # See https://github.com/pytorch/pytorch/issues/77081 + if inp_dim == 3: + warnings.warn("dropout2d: Received a 3D input to dropout2d and assuming that channel-wise " + "1D dropout behavior is desired - input is interpreted as shape (N, C, L), where C " + "is the channel dim. This behavior will change in a future release to interpret the " + "input as one without a batch dimension, i.e. shape (C, H, W). To maintain the 1D " + "channel-wise dropout behavior, please switch to using dropout1d instead.") + + result = _VF.feature_dropout_(input, p, training) if inplace else _VF.feature_dropout(input, p, training) + + return result + + +def dropout3d(input: Tensor, p: float = 0.5, training: bool = True, inplace: bool = False) -> Tensor: + r"""Randomly zero out entire channels (a channel is a 3D feature map). + + For example, the :math:`j`-th channel of the :math:`i`-th sample in the + batched input is a 3D tensor :math:`\text{input}[i, j]` of the input tensor. + Each channel will be zeroed out independently on every forward call with + probability :attr:`p` using samples from a Bernoulli distribution. + + See :class:`~torch.nn.Dropout3d` for details. + + Args: + p: probability of a channel to be zeroed. Default: 0.5 + training: apply dropout if is ``True``. Default: ``True`` + inplace: If set to ``True``, will do this operation in-place. Default: ``False`` + """ + if has_torch_function_unary(input): + return handle_torch_function(dropout3d, (input,), input, p=p, training=training, inplace=inplace) + if p < 0.0 or p > 1.0: + raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}") + inp_dim = input.dim() + if inp_dim not in (4, 5): + warn_msg = (f"dropout3d: Received a {inp_dim}-D input to dropout3d, which is deprecated " + "and will result in an error in a future release. To retain the behavior " + "and silence this warning, please use dropout instead. Note that dropout3d " + "exists to provide channel-wise dropout on inputs with 3 spatial dimensions, " + "a channel dimension, and an optional batch dimension (i.e. 4D or 5D inputs).") + warnings.warn(warn_msg) + + is_batched = inp_dim == 5 + if not is_batched: + input = input.unsqueeze_(0) if inplace else input.unsqueeze(0) + + result = _VF.feature_dropout_(input, p, training) if inplace else _VF.feature_dropout(input, p, training) + + if not is_batched: + result = result.squeeze_(0) if inplace else result.squeeze(0) + return result + + +def feature_alpha_dropout(input: Tensor, p: float = 0.5, training: bool = False, inplace: bool = False) -> Tensor: + r"""Randomly masks out entire channels (a channel is a feature map). + + For example, the :math:`j`-th channel of the :math:`i`-th sample in the batch input + is a tensor :math:`\text{input}[i, j]` of the input tensor. Instead of + setting activations to zero, as in regular Dropout, the activations are set + to the negative saturation value of the SELU activation function. + + Each element will be masked independently on every forward call with + probability :attr:`p` using samples from a Bernoulli distribution. + The elements to be masked are randomized on every forward call, and scaled + and shifted to maintain zero mean and unit variance. + + See :class:`~torch.nn.FeatureAlphaDropout` for details. + + Args: + p: dropout probability of a channel to be zeroed. Default: 0.5 + training: apply dropout if is ``True``. Default: ``True`` + inplace: If set to ``True``, will do this operation in-place. Default: ``False`` + """ + if has_torch_function_unary(input): + return handle_torch_function( + feature_alpha_dropout, (input,), input, p=p, training=training, inplace=inplace + ) + if p < 0.0 or p > 1.0: + raise ValueError(f"dropout probability has to be between 0 and 1, but got {p}") + return _VF.feature_alpha_dropout_(input, p, training) if inplace else _VF.feature_alpha_dropout(input, p, training) + + +def _threshold(input: Tensor, threshold: float, value: float, inplace: bool = False) -> Tensor: + r"""Apply a threshold to each element of the input Tensor. + + See :class:`~torch.nn.Threshold` for more details. + """ + if has_torch_function_unary(input): + return handle_torch_function(_threshold, (input,), input, threshold, value, inplace=inplace) + if inplace: + result = _VF.threshold_(input, threshold, value) + else: + result = _VF.threshold(input, threshold, value) + return result + + +# We define this function as _threshold because it takes an argument +# named threshold, which clobbers the recursive reference to the +# function needed for __torch_function__ support +threshold = _threshold + +threshold_ = _add_docstr( + _VF.threshold_, + r""" +threshold_(input, threshold, value) -> Tensor + +In-place version of :func:`~threshold`. +""", +) + + +def relu(input: Tensor, inplace: bool = False) -> Tensor: # noqa: D400,D402 + r"""relu(input, inplace=False) -> Tensor + + Applies the rectified linear unit function element-wise. See + :class:`~torch.nn.ReLU` for more details. + """ + if has_torch_function_unary(input): + return handle_torch_function(relu, (input,), input, inplace=inplace) + if inplace: + result = torch.relu_(input) + else: + result = torch.relu(input) + return result + + +relu_ = _add_docstr( + torch.relu_, + r""" +relu_(input) -> Tensor + +In-place version of :func:`~relu`. +""", +) + + +def glu(input: Tensor, dim: int = -1) -> Tensor: # noqa: D400,D402 + r""" + glu(input, dim=-1) -> Tensor + + The gated linear unit. Computes: + + .. math :: + \text{GLU}(a, b) = a \otimes \sigma(b) + + where `input` is split in half along `dim` to form `a` and `b`, :math:`\sigma` + is the sigmoid function and :math:`\otimes` is the element-wise product between matrices. + + See `Language Modeling with Gated Convolutional Networks `_. + + Args: + input (Tensor): input tensor + dim (int): dimension on which to split the input. Default: -1 + """ + if has_torch_function_unary(input): + return handle_torch_function(glu, (input,), input, dim=dim) + if input.dim() == 0: + raise RuntimeError("glu does not support scalars because halving size must be even") + return torch._C._nn.glu(input, dim) + + +def hardtanh(input: Tensor, min_val: float = -1., max_val: float = 1., inplace: bool = False) -> Tensor: # noqa: D400,D402 + r""" + hardtanh(input, min_val=-1., max_val=1., inplace=False) -> Tensor + + Applies the HardTanh function element-wise. See :class:`~torch.nn.Hardtanh` for more + details. + """ + if has_torch_function_unary(input): + return handle_torch_function(hardtanh, (input,), input, min_val=min_val, max_val=max_val, inplace=inplace) + if inplace: + result = torch._C._nn.hardtanh_(input, min_val, max_val) + else: + result = torch._C._nn.hardtanh(input, min_val, max_val) + return result + + +hardtanh_ = _add_docstr( + torch._C._nn.hardtanh_, + r""" +hardtanh_(input, min_val=-1., max_val=1.) -> Tensor + +In-place version of :func:`~hardtanh`. +""", +) + + +def relu6(input: Tensor, inplace: bool = False) -> Tensor: # noqa: D400,D402 + r"""relu6(input, inplace=False) -> Tensor + + Applies the element-wise function :math:`\text{ReLU6}(x) = \min(\max(0,x), 6)`. + + See :class:`~torch.nn.ReLU6` for more details. + """ + if has_torch_function_unary(input): + return handle_torch_function(relu6, (input,), input, inplace=inplace) + if inplace: + result = torch._C._nn.relu6_(input) + else: + result = torch._C._nn.relu6(input) + return result + + +def elu(input: Tensor, alpha: float = 1.0, inplace: bool = False) -> Tensor: + r"""Apply the Exponential Linear Unit (ELU) function element-wise. + + See :class:`~torch.nn.ELU` for more details. + """ + if has_torch_function_unary(input): + return handle_torch_function(elu, (input,), input, alpha=alpha, inplace=inplace) + if inplace: + result = torch._C._nn.elu_(input, alpha) + else: + result = torch._C._nn.elu(input, alpha) + return result + + +elu_ = _add_docstr( + torch._C._nn.elu_, + r""" +elu_(input, alpha=1.) -> Tensor + +In-place version of :func:`~elu`. +""", +) + + +def selu(input: Tensor, inplace: bool = False) -> Tensor: # noqa: D400,D402 + r"""selu(input, inplace=False) -> Tensor + + Applies element-wise, + :math:`\text{SELU}(x) = scale * (\max(0,x) + \min(0, \alpha * (\exp(x) - 1)))`, + with :math:`\alpha=1.6732632423543772848170429916717` and + :math:`scale=1.0507009873554804934193349852946`. + + See :class:`~torch.nn.SELU` for more details. + """ + if has_torch_function_unary(input): + return handle_torch_function(selu, (input,), input, inplace=inplace) + if inplace: + result = torch.selu_(input) + else: + result = torch.selu(input) + return result + + +selu_ = _add_docstr( + torch.selu_, + r""" +selu_(input) -> Tensor + +In-place version of :func:`~selu`. +""", +) + + +def celu(input: Tensor, alpha: float = 1.0, inplace: bool = False) -> Tensor: # noqa: D400,D402 + r"""celu(input, alpha=1., inplace=False) -> Tensor + + Applies element-wise, + :math:`\text{CELU}(x) = \max(0,x) + \min(0, \alpha * (\exp(x/\alpha) - 1))`. + + See :class:`~torch.nn.CELU` for more details. + """ + if has_torch_function_unary(input): + return handle_torch_function(celu, (input,), input, alpha=alpha, inplace=inplace) + if inplace: + result = torch.celu_(input, alpha) + else: + result = torch.celu(input, alpha) + return result + + +celu_ = _add_docstr( + torch.celu_, + r""" +celu_(input, alpha=1.) -> Tensor + +In-place version of :func:`~celu`. +""", +) + + +def leaky_relu(input: Tensor, negative_slope: float = 0.01, inplace: bool = False) -> Tensor: # noqa: D400,D402 + r""" + leaky_relu(input, negative_slope=0.01, inplace=False) -> Tensor + + Applies element-wise, + :math:`\text{LeakyReLU}(x) = \max(0, x) + \text{negative\_slope} * \min(0, x)` + + See :class:`~torch.nn.LeakyReLU` for more details. + """ + if has_torch_function_unary(input): + return handle_torch_function(leaky_relu, (input,), input, negative_slope=negative_slope, inplace=inplace) + if inplace: + result = torch._C._nn.leaky_relu_(input, negative_slope) + else: + result = torch._C._nn.leaky_relu(input, negative_slope) + return result + + +leaky_relu_ = _add_docstr( + torch._C._nn.leaky_relu_, + r""" +leaky_relu_(input, negative_slope=0.01) -> Tensor + +In-place version of :func:`~leaky_relu`. +""", +) + + +prelu = _add_docstr( + torch.prelu, + r"""prelu(input, weight) -> Tensor + +Applies element-wise the function +:math:`\text{PReLU}(x) = \max(0,x) + \text{weight} * \min(0,x)` where weight is a +learnable parameter. + +.. note:: + `weight` is expected to be a scalar or 1-D tensor. If `weight` is 1-D, + its size must match the number of input channels, determined by + `input.size(1)` when `input.dim() >= 2`, otherwise 1. + In the 1-D case, note that when `input` has dim > 2, `weight` can be expanded + to the shape of `input` in a way that is not possible using normal + :ref:`broadcasting semantics`. + +See :class:`~torch.nn.PReLU` for more details. +""") + + +def rrelu( + input: Tensor, lower: float = 1.0 / 8, upper: float = 1.0 / 3, training: bool = False, inplace: bool = False +) -> Tensor: # noqa: D400,D402 + r"""rrelu(input, lower=1./8, upper=1./3, training=False, inplace=False) -> Tensor + + Randomized leaky ReLU. + + See :class:`~torch.nn.RReLU` for more details. + """ + if has_torch_function_unary(input): + return handle_torch_function( + rrelu, (input,), input, lower=lower, upper=upper, training=training, inplace=inplace + ) + if inplace: + result = torch.rrelu_(input, lower, upper, training) + else: + result = torch.rrelu(input, lower, upper, training) + return result + + +rrelu_ = _add_docstr( + torch.rrelu_, + r""" +rrelu_(input, lower=1./8, upper=1./3, training=False) -> Tensor + +In-place version of :func:`~rrelu`. +""", +) + +logsigmoid = _add_docstr( + torch._C._nn.log_sigmoid, + r""" +logsigmoid(input) -> Tensor + +Applies element-wise :math:`\text{LogSigmoid}(x_i) = \log \left(\frac{1}{1 + \exp(-x_i)}\right)` + +See :class:`~torch.nn.LogSigmoid` for more details. +""", +) + +gelu = _add_docstr( + torch._C._nn.gelu, + r""" +gelu(input, approximate = 'none') -> Tensor + +When the approximate argument is 'none', it applies element-wise the function +:math:`\text{GELU}(x) = x * \Phi(x)` + +where :math:`\Phi(x)` is the Cumulative Distribution Function for Gaussian Distribution. + +When the approximate argument is 'tanh', Gelu is estimated with + +.. math:: + \text{GELU}(x) = 0.5 * x * (1 + \text{Tanh}(\sqrt{2 / \pi} * (x + 0.044715 * x^3))) + +See `Gaussian Error Linear Units (GELUs) `_. +""") + +hardshrink = _add_docstr( + torch.hardshrink, + r""" +hardshrink(input, lambd=0.5) -> Tensor + +Applies the hard shrinkage function element-wise + +See :class:`~torch.nn.Hardshrink` for more details. +""") + + +def tanhshrink(input): # noqa: D400,D402 + r"""tanhshrink(input) -> Tensor + + Applies element-wise, :math:`\text{Tanhshrink}(x) = x - \text{Tanh}(x)` + + See :class:`~torch.nn.Tanhshrink` for more details. + """ + if has_torch_function_unary(input): + return handle_torch_function(tanhshrink, (input,), input) + return input - input.tanh() + + +def softsign(input): # noqa: D400,D402 + r"""softsign(input) -> Tensor + + Applies element-wise, the function :math:`\text{SoftSign}(x) = \frac{x}{1 + |x|}` + + See :class:`~torch.nn.Softsign` for more details. + """ + if has_torch_function_unary(input): + return handle_torch_function(softsign, (input,), input) + return input / (input.abs() + 1) + + +softplus = _add_docstr( + torch._C._nn.softplus, + r""" +softplus(input, beta=1, threshold=20) -> Tensor + +Applies element-wise, the function :math:`\text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x))`. + +For numerical stability the implementation reverts to the linear function +when :math:`input \times \beta > threshold`. + +See :class:`~torch.nn.Softplus` for more details. +""", +) + + +def _get_softmax_dim(name: str, ndim: int, stacklevel: int) -> int: + warnings.warn( + f"Implicit dimension choice for {name} has been deprecated. Change the call to include dim=X as an argument.", + stacklevel=stacklevel, + ) + if ndim == 0 or ndim == 1 or ndim == 3: + ret = 0 + else: + ret = 1 + return ret + + +def softmin(input: Tensor, dim: Optional[int] = None, _stacklevel: int = 3, dtype: Optional[DType] = None) -> Tensor: + r"""Apply a softmin function. + + Note that :math:`\text{Softmin}(x) = \text{Softmax}(-x)`. See softmax definition for mathematical formula. + + See :class:`~torch.nn.Softmin` for more details. + + Args: + input (Tensor): input + dim (int): A dimension along which softmin will be computed (so every slice + along dim will sum to 1). + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + """ + if has_torch_function_unary(input): + return handle_torch_function(softmin, (input,), input, dim=dim, _stacklevel=_stacklevel, dtype=dtype) + if dim is None: + dim = _get_softmax_dim("softmin", input.dim(), _stacklevel) + if dtype is None: + ret = (-input).softmax(dim) + else: + ret = (-input).softmax(dim, dtype=dtype) + return ret + + +def softmax(input: Tensor, dim: Optional[int] = None, _stacklevel: int = 3, dtype: Optional[DType] = None) -> Tensor: + r"""Apply a softmax function. + + Softmax is defined as: + + :math:`\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}` + + It is applied to all slices along dim, and will re-scale them so that the elements + lie in the range `[0, 1]` and sum to 1. + + See :class:`~torch.nn.Softmax` for more details. + + Args: + input (Tensor): input + dim (int): A dimension along which softmax will be computed. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + .. note:: + This function doesn't work directly with NLLLoss, + which expects the Log to be computed between the Softmax and itself. + Use log_softmax instead (it's faster and has better numerical properties). + + """ + if has_torch_function_unary(input): + return handle_torch_function(softmax, (input,), input, dim=dim, _stacklevel=_stacklevel, dtype=dtype) + if dim is None: + dim = _get_softmax_dim("softmax", input.dim(), _stacklevel) + if dtype is None: + ret = input.softmax(dim) + else: + ret = input.softmax(dim, dtype=dtype) + return ret + + +def gumbel_softmax(logits: Tensor, tau: float = 1, hard: bool = False, eps: float = 1e-10, dim: int = -1) -> Tensor: + r""" + Sample from the Gumbel-Softmax distribution (`Link 1`_ `Link 2`_) and optionally discretize. + + Args: + logits: `[..., num_features]` unnormalized log probabilities + tau: non-negative scalar temperature + hard: if ``True``, the returned samples will be discretized as one-hot vectors, + but will be differentiated as if it is the soft sample in autograd + dim (int): A dimension along which softmax will be computed. Default: -1. + + Returns: + Sampled tensor of same shape as `logits` from the Gumbel-Softmax distribution. + If ``hard=True``, the returned samples will be one-hot, otherwise they will + be probability distributions that sum to 1 across `dim`. + + .. note:: + This function is here for legacy reasons, may be removed from nn.Functional in the future. + + .. note:: + The main trick for `hard` is to do `y_hard - y_soft.detach() + y_soft` + + It achieves two things: + - makes the output value exactly one-hot + (since we add then subtract y_soft value) + - makes the gradient equal to y_soft gradient + (since we strip all other gradients) + + Examples:: + >>> logits = torch.randn(20, 32) + >>> # Sample soft categorical using reparametrization trick: + >>> F.gumbel_softmax(logits, tau=1, hard=False) + >>> # Sample hard categorical using "Straight-through" trick: + >>> F.gumbel_softmax(logits, tau=1, hard=True) + + .. _Link 1: + https://arxiv.org/abs/1611.00712 + .. _Link 2: + https://arxiv.org/abs/1611.01144 + """ + if has_torch_function_unary(logits): + return handle_torch_function(gumbel_softmax, (logits,), logits, tau=tau, hard=hard, eps=eps, dim=dim) + if eps != 1e-10: + warnings.warn("`eps` parameter is deprecated and has no effect.") + + gumbels = ( + -torch.empty_like(logits, memory_format=torch.legacy_contiguous_format).exponential_().log() + ) # ~Gumbel(0,1) + gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau) + y_soft = gumbels.softmax(dim) + + if hard: + # Straight through. + index = y_soft.max(dim, keepdim=True)[1] + y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0) + ret = y_hard - y_soft.detach() + y_soft + else: + # Reparametrization trick. + ret = y_soft + return ret + + +def log_softmax(input: Tensor, dim: Optional[int] = None, _stacklevel: int = 3, dtype: Optional[DType] = None) -> Tensor: + r"""Apply a softmax followed by a logarithm. + + While mathematically equivalent to log(softmax(x)), doing these two + operations separately is slower and numerically unstable. This function + uses an alternative formulation to compute the output and gradient correctly. + + See :class:`~torch.nn.LogSoftmax` for more details. + + Args: + input (Tensor): input + dim (int): A dimension along which log_softmax will be computed. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is cast to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + """ + if has_torch_function_unary(input): + return handle_torch_function(log_softmax, (input,), input, dim=dim, _stacklevel=_stacklevel, dtype=dtype) + if dim is None: + dim = _get_softmax_dim("log_softmax", input.dim(), _stacklevel) + if dtype is None: + ret = input.log_softmax(dim) + else: + ret = input.log_softmax(dim, dtype=dtype) + return ret + + +softshrink = _add_docstr( + torch._C._nn.softshrink, + r""" +softshrink(input, lambd=0.5) -> Tensor + +Applies the soft shrinkage function elementwise + +See :class:`~torch.nn.Softshrink` for more details. +""", +) + + +def tanh(input): # noqa: D400,D402 + r"""tanh(input) -> Tensor + + Applies element-wise, + :math:`\text{Tanh}(x) = \tanh(x) = \frac{\exp(x) - \exp(-x)}{\exp(x) + \exp(-x)}` + + See :class:`~torch.nn.Tanh` for more details. + """ + return input.tanh() + + +def sigmoid(input): # noqa: D400,D402 + r"""sigmoid(input) -> Tensor + + Applies the element-wise function :math:`\text{Sigmoid}(x) = \frac{1}{1 + \exp(-x)}` + + See :class:`~torch.nn.Sigmoid` for more details. + """ + return input.sigmoid() + + +def hardsigmoid(input: Tensor, inplace: bool = False) -> Tensor: + r"""Apply the Hardsigmoid function element-wise. + + .. math:: + \text{Hardsigmoid}(x) = \begin{cases} + 0 & \text{if~} x \le -3, \\ + 1 & \text{if~} x \ge +3, \\ + x / 6 + 1 / 2 & \text{otherwise} + \end{cases} + + Args: + inplace: If set to ``True``, will do this operation in-place. Default: ``False`` + + See :class:`~torch.nn.Hardsigmoid` for more details. + """ + if has_torch_function_unary(input): + return handle_torch_function(hardsigmoid, (input,), input, inplace=inplace) + if inplace: + return torch._C._nn.hardsigmoid_(input) + return torch._C._nn.hardsigmoid(input) + + +linear = _add_docstr( + torch._C._nn.linear, + r""" +linear(input, weight, bias=None) -> Tensor + +Applies a linear transformation to the incoming data: :math:`y = xA^T + b`. + +This operation supports 2-D :attr:`weight` with :ref:`sparse layout` + +{sparse_beta_warning} + +This operator supports :ref:`TensorFloat32`. + +Shape: + + - Input: :math:`(*, in\_features)` where `*` means any number of + additional dimensions, including none + - Weight: :math:`(out\_features, in\_features)` or :math:`(in\_features)` + - Bias: :math:`(out\_features)` or :math:`()` + - Output: :math:`(*, out\_features)` or :math:`(*)`, based on the shape of the weight +""".format(**sparse_support_notes)) + + +bilinear = _add_docstr( + torch.bilinear, + r""" +bilinear(input1, input2, weight, bias=None) -> Tensor + +Applies a bilinear transformation to the incoming data: +:math:`y = x_1^T A x_2 + b` + +Shape: + + - input1: :math:`(N, *, H_{in1})` where :math:`H_{in1}=\text{in1\_features}` + and :math:`*` means any number of additional dimensions. + All but the last dimension of the inputs should be the same. + - input2: :math:`(N, *, H_{in2})` where :math:`H_{in2}=\text{in2\_features}` + - weight: :math:`(\text{out\_features}, \text{in1\_features}, + \text{in2\_features})` + - bias: :math:`(\text{out\_features})` + - output: :math:`(N, *, H_{out})` where :math:`H_{out}=\text{out\_features}` + and all but the last dimension are the same shape as the input. +""") + + +def silu(input: Tensor, inplace: bool = False) -> Tensor: + r"""Apply the Sigmoid Linear Unit (SiLU) function, element-wise. + + The SiLU function is also known as the swish function. + + .. math:: + \text{silu}(x) = x * \sigma(x), \text{where } \sigma(x) \text{ is the logistic sigmoid.} + + .. note:: + See `Gaussian Error Linear Units (GELUs) `_ + where the SiLU (Sigmoid Linear Unit) was originally coined, and see + `Sigmoid-Weighted Linear Units for Neural Network Function Approximation + in Reinforcement Learning `_ and `Swish: + a Self-Gated Activation Function `_ + where the SiLU was experimented with later. + + See :class:`~torch.nn.SiLU` for more details. + """ + if has_torch_function_unary(input): + return handle_torch_function(silu, (input,), input, inplace=inplace) + if inplace: + return torch._C._nn.silu_(input) + return torch._C._nn.silu(input) + + +def mish(input: Tensor, inplace: bool = False) -> Tensor: + r"""Apply the Mish function, element-wise. + + Mish: A Self Regularized Non-Monotonic Neural Activation Function. + + .. math:: + \text{Mish}(x) = x * \text{Tanh}(\text{Softplus}(x)) + + .. note:: + See `Mish: A Self Regularized Non-Monotonic Neural Activation Function `_ + + See :class:`~torch.nn.Mish` for more details. + """ + if has_torch_function_unary(input): + return handle_torch_function(mish, (input,), input, inplace=inplace) + if inplace: + return torch._C._nn.mish_(input) + return torch._C._nn.mish(input) + + +def hardswish(input: Tensor, inplace: bool = False) -> Tensor: + r"""Apply hardswish function, element-wise. + + Follows implementation as described in the paper: + `Searching for MobileNetV3`_. + + .. math:: + \text{Hardswish}(x) = \begin{cases} + 0 & \text{if~} x \le -3, \\ + x & \text{if~} x \ge +3, \\ + x \cdot (x + 3) /6 & \text{otherwise} + \end{cases} + + See :class:`~torch.nn.Hardswish` for more details. + + .. _`Searching for MobileNetV3`: + https://arxiv.org/abs/1905.02244 + """ + if has_torch_function_unary(input): + return handle_torch_function(hardswish, (input,), input, inplace=inplace) + if inplace: + return torch._C._nn.hardswish_(input) + return torch._C._nn.hardswish(input) + + +def _no_grad_embedding_renorm_(weight: Tensor, input: Tensor, max_norm: float, norm_type: float) -> Tuple[Tensor, Tensor]: + torch.embedding_renorm_(weight.detach(), input, max_norm, norm_type) + + +def embedding( + input: Tensor, + weight: Tensor, + padding_idx: Optional[int] = None, + max_norm: Optional[float] = None, + norm_type: float = 2.0, + scale_grad_by_freq: bool = False, + sparse: bool = False, +) -> Tensor: + r"""Generate a simple lookup table that looks up embeddings in a fixed dictionary and size. + + This module is often used to retrieve word embeddings using indices. + The input to the module is a list of indices, and the embedding matrix, + and the output is the corresponding word embeddings. + + See :class:`torch.nn.Embedding` for more details. + + .. note:: + Note that the analytical gradients of this function with respect to + entries in :attr:`weight` at the row specified by :attr:`padding_idx` + are expected to differ from the numerical ones. + + .. note:: + Note that `:class:`torch.nn.Embedding` differs from this function in + that it initializes the row of :attr:`weight` specified by + :attr:`padding_idx` to all zeros on construction. + + Args: + input (LongTensor): Tensor containing indices into the embedding matrix + weight (Tensor): The embedding matrix with number of rows equal to the maximum possible index + 1, + and number of columns equal to the embedding size + padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient; + therefore, the embedding vector at :attr:`padding_idx` is not updated during training, + i.e. it remains as a fixed "pad". + max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm` + is renormalized to have norm :attr:`max_norm`. + Note: this will modify :attr:`weight` in-place. + norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``. + scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse of frequency of + the words in the mini-batch. Default ``False``. + sparse (bool, optional): If ``True``, gradient w.r.t. :attr:`weight` will be a sparse tensor. See Notes under + :class:`torch.nn.Embedding` for more details regarding sparse gradients. + + Shape: + - Input: LongTensor of arbitrary shape containing the indices to extract + - Weight: Embedding matrix of floating point type with shape `(V, embedding_dim)`, + where V = maximum index + 1 and embedding_dim = the embedding size + - Output: `(*, embedding_dim)`, where `*` is the input shape + + Examples:: + + >>> # a batch of 2 samples of 4 indices each + >>> input = torch.tensor([[1, 2, 4, 5], [4, 3, 2, 9]]) + >>> # an embedding matrix containing 10 tensors of size 3 + >>> embedding_matrix = torch.rand(10, 3) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> F.embedding(input, embedding_matrix) + tensor([[[ 0.8490, 0.9625, 0.6753], + [ 0.9666, 0.7761, 0.6108], + [ 0.6246, 0.9751, 0.3618], + [ 0.4161, 0.2419, 0.7383]], + + [[ 0.6246, 0.9751, 0.3618], + [ 0.0237, 0.7794, 0.0528], + [ 0.9666, 0.7761, 0.6108], + [ 0.3385, 0.8612, 0.1867]]]) + + >>> # example with padding_idx + >>> weights = torch.rand(10, 3) + >>> weights[0, :].zero_() + >>> embedding_matrix = weights + >>> input = torch.tensor([[0, 2, 0, 5]]) + >>> F.embedding(input, embedding_matrix, padding_idx=0) + tensor([[[ 0.0000, 0.0000, 0.0000], + [ 0.5609, 0.5384, 0.8720], + [ 0.0000, 0.0000, 0.0000], + [ 0.6262, 0.2438, 0.7471]]]) + """ + if has_torch_function_variadic(input, weight): + return handle_torch_function( + embedding, + (input, weight), + input, + weight, + padding_idx=padding_idx, + max_norm=max_norm, + norm_type=norm_type, + scale_grad_by_freq=scale_grad_by_freq, + sparse=sparse, + ) + if padding_idx is not None: + if padding_idx > 0: + assert padding_idx < weight.size(0), "Padding_idx must be within num_embeddings" + elif padding_idx < 0: + assert padding_idx >= -weight.size(0), "Padding_idx must be within num_embeddings" + padding_idx = weight.size(0) + padding_idx + else: + padding_idx = -1 + if max_norm is not None: + # Note [embedding_renorm contiguous] + # `embedding_renorm_` will call .contiguous() on input anyways, so we + # call it here and take advantage of the improved locality in the + # `embedding` call below too. + input = input.contiguous() + # Note [embedding_renorm set_grad_enabled] + # XXX: equivalent to + # with torch.no_grad(): + # torch.embedding_renorm_ + # remove once script supports set_grad_enabled + _no_grad_embedding_renorm_(weight, input, max_norm, norm_type) + return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse) + + +def embedding_bag( + input: Tensor, + weight: Tensor, + offsets: Optional[Tensor] = None, + max_norm: Optional[float] = None, + norm_type: float = 2, + scale_grad_by_freq: bool = False, + mode: str = "mean", + sparse: bool = False, + per_sample_weights: Optional[Tensor] = None, + include_last_offset: bool = False, + padding_idx: Optional[int] = None, +) -> Tensor: + r"""Compute sums, means or maxes of `bags` of embeddings. + + Calculation is done without instantiating the intermediate embeddings. + See :class:`torch.nn.EmbeddingBag` for more details. + + Note: + {backward_reproducibility_note} + + Args: + input (LongTensor): Tensor containing bags of indices into the embedding matrix + weight (Tensor): The embedding matrix with number of rows equal to the maximum possible index + 1, + and number of columns equal to the embedding size + offsets (LongTensor, optional): Only used when :attr:`input` is 1D. :attr:`offsets` determines + the starting index position of each bag (sequence) in :attr:`input`. + max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm` + is renormalized to have norm :attr:`max_norm`. + Note: this will modify :attr:`weight` in-place. + norm_type (float, optional): The ``p`` in the ``p``-norm to compute for the :attr:`max_norm` option. + Default ``2``. + scale_grad_by_freq (bool, optional): if given, this will scale gradients by the inverse of frequency of + the words in the mini-batch. Default ``False``. + Note: this option is not supported when ``mode="max"``. + mode (str, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag. + Default: ``"mean"`` + sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` will be a sparse tensor. See Notes under + :class:`torch.nn.Embedding` for more details regarding sparse gradients. + Note: this option is not supported when ``mode="max"``. + per_sample_weights (Tensor, optional): a tensor of float / double weights, or None + to indicate all weights should be taken to be 1. If specified, :attr:`per_sample_weights` + must have exactly the same shape as input and is treated as having the same + :attr:`offsets`, if those are not None. + + include_last_offset (bool, optional): if ``True``, the size of offsets is equal to the number of bags + 1. + The last element is the size of the input, or the ending index position of the last bag (sequence). + + padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the + gradient; therefore, the embedding vector at :attr:`padding_idx` is not updated + during training, i.e. it remains as a fixed "pad". Note that the embedding + vector at :attr:`padding_idx` is excluded from the reduction. + + Shape: + - :attr:`input` (LongTensor) and :attr:`offsets` (LongTensor, optional) + + - If :attr:`input` is 2D of shape `(B, N)`, it will be treated as ``B`` bags (sequences) + each of fixed length ``N``, and this will return ``B`` values aggregated in a way + depending on the :attr:`mode`. :attr:`offsets` is ignored and required to be ``None`` in this case. + + - If :attr:`input` is 1D of shape `(N)`, it will be treated as a concatenation of + multiple bags (sequences). :attr:`offsets` is required to be a 1D tensor containing + the starting index positions of each bag in :attr:`input`. Therefore, for :attr:`offsets` + of shape `(B)`, :attr:`input` will be viewed as having ``B`` bags. + Empty bags (i.e., having 0-length) will have returned vectors filled by zeros. + + - :attr:`weight` (Tensor): the learnable weights of the module of shape `(num_embeddings, embedding_dim)` + + - :attr:`per_sample_weights` (Tensor, optional). Has the same shape as :attr:`input`. + + - :attr:`output`: aggregated embedding values of shape `(B, embedding_dim)` + + Examples:: + + >>> # an Embedding module containing 10 tensors of size 3 + >>> embedding_matrix = torch.rand(10, 3) + >>> # a batch of 2 samples of 4 indices each + >>> input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9]) + >>> offsets = torch.tensor([0, 4]) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> F.embedding_bag(input, embedding_matrix, offsets) + tensor([[ 0.3397, 0.3552, 0.5545], + [ 0.5893, 0.4386, 0.5882]]) + + >>> # example with padding_idx + >>> embedding_matrix = torch.rand(10, 3) + >>> input = torch.tensor([2, 2, 2, 2, 4, 3, 2, 9]) + >>> offsets = torch.tensor([0, 4]) + >>> F.embedding_bag(input, embedding_matrix, offsets, padding_idx=2, mode='sum') + tensor([[ 0.0000, 0.0000, 0.0000], + [-0.7082, 3.2145, -2.6251]]) + """ + if has_torch_function_variadic(input, weight, offsets, per_sample_weights): + return handle_torch_function( + embedding_bag, + (input, weight, offsets, per_sample_weights), + input, + weight, + offsets=offsets, + max_norm=max_norm, + norm_type=norm_type, + scale_grad_by_freq=scale_grad_by_freq, + mode=mode, + sparse=sparse, + per_sample_weights=per_sample_weights, + include_last_offset=include_last_offset, + padding_idx=padding_idx, + ) + # Check for backward compatibility. + # Used to be embedding_bag(weight, input, ...) + # Now is embedding_bag(input, weight, ...) + if weight.dtype == torch.long and input.is_floating_point(): + warnings.warn( + "Argument order of nn.functional.embedding_bag was changed. " + "Usage `embedding_bag(weight, input, ...)` is deprecated, " + "and should now be `embedding_bag(input, weight, ...)`." + ) + weight, input = input, weight + + if per_sample_weights is not None and input.size() != per_sample_weights.size(): + raise ValueError( + f"embedding_bag: If per_sample_weights ({per_sample_weights.shape}) is not None, " + f"then it must have the same shape as the input ({input.shape})" + ) + + if not weight.dim() == 2: + raise ValueError( + f"weight has to be a 2D Tensor, but got Tensor of dimension {weight.dim()}" + ) + + if input.dim() == 2: + if offsets is not None: + type_str = "" + # TODO: Remove this once script supports type() calls + if not torch.jit.is_scripting(): + type_str = str(type(offsets)) + raise ValueError( + "if input is 2D, then offsets has to be None" + ", as input is treated is a mini-batch of" + " fixed length sequences. However, found " + f"offsets of type {type_str}" + ) + offsets = torch.arange(0, input.numel(), input.size(1), dtype=input.dtype, device=input.device) + + input = input.reshape(-1) + if per_sample_weights is not None: + per_sample_weights = per_sample_weights.reshape(-1) + elif input.dim() == 1: + if offsets is None: + raise ValueError("offsets has to be a 1D Tensor but got None") + if offsets.dim() != 1: + raise ValueError("offsets has to be a 1D Tensor") + else: + raise ValueError(f"input has to be 1D or 2D Tensor, but got Tensor of dimension {input.dim()}") + if mode == "sum": + mode_enum = 0 + elif mode == "mean": + mode_enum = 1 + elif mode == "max": + mode_enum = 2 + + if scale_grad_by_freq: + raise ValueError("max mode does not support scaling the gradient by the frequency") + + if sparse: + raise ValueError("max mode does not support sparse weights") + + else: + raise ValueError("mode has to be one of sum, mean or max") + + if max_norm is not None: + # XXX: equivalent to + # with torch.no_grad(): + # torch.nembedding_renorm_ + # remove once script supports set_grad_enabled + _no_grad_embedding_renorm_(weight, input, max_norm, norm_type) + + if per_sample_weights is not None and mode != "sum": + raise NotImplementedError( + "embedding_bag: per_sample_weights was not None. " + "per_sample_weights is only supported for mode='sum' " + f"(got mode='{mode}'). Please open a feature request on GitHub." + ) + + ret, _, _, _ = torch.embedding_bag( + weight, input, offsets, scale_grad_by_freq, mode_enum, sparse, per_sample_weights, include_last_offset, padding_idx + ) + return ret + + +if embedding_bag.__doc__: + embedding_bag.__doc__ = embedding_bag.__doc__.format(**reproducibility_notes) + + +def _verify_batch_size(size: List[int]) -> None: + # XXX: JIT script does not support the reduce from functools, and mul op is a + # builtin, which cannot be used as a value to a func yet, so rewrite this size + # check to a simple equivalent for loop + # + # TODO: make use of reduce like below when JIT is ready with the missing features: + # from operator import mul + # from functools import reduce + # + # if reduce(mul, size[2:], size[0]) == 1 + size_prods = size[0] + for i in range(len(size) - 2): + size_prods *= size[i + 2] + if size_prods == 1: + raise ValueError(f"Expected more than 1 value per channel when training, got input size {size}") + + +def batch_norm( + input: Tensor, + running_mean: Optional[Tensor], + running_var: Optional[Tensor], + weight: Optional[Tensor] = None, + bias: Optional[Tensor] = None, + training: bool = False, + momentum: float = 0.1, + eps: float = 1e-5, +) -> Tensor: + r"""Apply Batch Normalization for each channel across a batch of data. + + See :class:`~torch.nn.BatchNorm1d`, :class:`~torch.nn.BatchNorm2d`, + :class:`~torch.nn.BatchNorm3d` for details. + """ + if has_torch_function_variadic(input, running_mean, running_var, weight, bias): + return handle_torch_function( + batch_norm, + (input, running_mean, running_var, weight, bias), + input, + running_mean, + running_var, + weight=weight, + bias=bias, + training=training, + momentum=momentum, + eps=eps, + ) + if training: + _verify_batch_size(input.size()) + + return torch.batch_norm( + input, weight, bias, running_mean, running_var, training, momentum, eps, torch.backends.cudnn.enabled + ) + + +def _verify_spatial_size(size: List[int]) -> None: + # Verify that there is > 1 spatial element for instance norm calculation. + size_prods = 1 + for i in range(2, len(size)): + size_prods *= size[i] + if size_prods == 1: + raise ValueError(f"Expected more than 1 spatial element when training, got input size {size}") + + +def instance_norm( + input: Tensor, + running_mean: Optional[Tensor] = None, + running_var: Optional[Tensor] = None, + weight: Optional[Tensor] = None, + bias: Optional[Tensor] = None, + use_input_stats: bool = True, + momentum: float = 0.1, + eps: float = 1e-5, +) -> Tensor: + r"""Apply Instance Normalization independently for each channel in every data sample within a batch. + + See :class:`~torch.nn.InstanceNorm1d`, :class:`~torch.nn.InstanceNorm2d`, + :class:`~torch.nn.InstanceNorm3d` for details. + """ + if has_torch_function_variadic(input, running_mean, running_var, weight, bias): + return handle_torch_function( + instance_norm, + (input, running_mean, running_var, weight, bias), + input, + running_mean=running_mean, + running_var=running_var, + weight=weight, + bias=bias, + use_input_stats=use_input_stats, + momentum=momentum, + eps=eps, + ) + if use_input_stats: + _verify_spatial_size(input.size()) + return torch.instance_norm( + input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, torch.backends.cudnn.enabled + ) + + +def layer_norm( + input: Tensor, + normalized_shape: List[int], + weight: Optional[Tensor] = None, + bias: Optional[Tensor] = None, + eps: float = 1e-5, +) -> Tensor: + r"""Apply Layer Normalization for last certain number of dimensions. + + See :class:`~torch.nn.LayerNorm` for details. + """ + if has_torch_function_variadic(input, weight, bias): + return handle_torch_function( + layer_norm, (input, weight, bias), input, normalized_shape, weight=weight, bias=bias, eps=eps + ) + return torch.layer_norm(input, normalized_shape, weight, bias, eps, torch.backends.cudnn.enabled) + + +def group_norm( + input: Tensor, num_groups: int, weight: Optional[Tensor] = None, bias: Optional[Tensor] = None, eps: float = 1e-5 +) -> Tensor: + r"""Apply Group Normalization for last certain number of dimensions. + + See :class:`~torch.nn.GroupNorm` for details. + """ + if has_torch_function_variadic(input, weight, bias): + return handle_torch_function(group_norm, (input, weight, bias,), input, num_groups, weight=weight, bias=bias, eps=eps) + if input.dim() < 2: + raise RuntimeError(f"Expected at least 2 dimensions for input tensor but received {input.dim()}") + _verify_batch_size([input.size(0) * input.size(1) // num_groups, num_groups] + list(input.size()[2:])) + return torch.group_norm(input, num_groups, weight, bias, eps, torch.backends.cudnn.enabled) + + +def local_response_norm(input: Tensor, size: int, alpha: float = 1e-4, beta: float = 0.75, k: float = 1.0) -> Tensor: + r"""Apply local response normalization over an input signal. + + The input signal is composed of several input planes, where channels occupy the second dimension. + Normalization is applied across channels. + + See :class:`~torch.nn.LocalResponseNorm` for details. + """ + if has_torch_function_unary(input): + return handle_torch_function(local_response_norm, (input,), input, size, alpha=alpha, beta=beta, k=k) + dim = input.dim() + if dim < 3: + raise ValueError( + f"Expected 3D or higher dimensionality input (got {dim} dimensions)" + ) + + if input.numel() == 0: + return input + + div = input.mul(input) + if dim == 3: + div = div.unsqueeze(1) + div = pad(div, (0, 0, size // 2, (size - 1) // 2)) + div = avg_pool2d(div, (size, 1), stride=1).squeeze(1) + else: + sizes = input.size() + div = div.view(sizes[0], 1, sizes[1], sizes[2], -1) + div = pad(div, (0, 0, 0, 0, size // 2, (size - 1) // 2)) + div = avg_pool3d(div, (size, 1, 1), stride=1).squeeze(1) + div = div.view(sizes) + div = div.mul(alpha).add(k).pow(beta) + return input / div + + +# loss + + +def ctc_loss( + log_probs: Tensor, + targets: Tensor, + input_lengths: Tensor, + target_lengths: Tensor, + blank: int = 0, + reduction: str = "mean", + zero_infinity: bool = False, +) -> Tensor: + r"""Apply the Connectionist Temporal Classification loss. + + See :class:`~torch.nn.CTCLoss` for details. + + Note: + {cudnn_reproducibility_note} + + Note: + {backward_reproducibility_note} + + Args: + log_probs: :math:`(T, N, C)` or :math:`(T, C)` where `C = number of characters in alphabet including blank`, + `T = input length`, and `N = batch size`. + The logarithmized probabilities of the outputs + (e.g. obtained with :func:`torch.nn.functional.log_softmax`). + targets: :math:`(N, S)` or `(sum(target_lengths))`. + Targets cannot be blank. In the second form, the targets are assumed to be concatenated. + input_lengths: :math:`(N)` or :math:`()`. + Lengths of the inputs (must each be :math:`\leq T`) + target_lengths: :math:`(N)` or :math:`()`. + Lengths of the targets + blank (int, optional): + Blank label. Default :math:`0`. + reduction (str, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the output losses will be divided by the target lengths and + then the mean over the batch is taken, ``'sum'``: the output will be + summed. Default: ``'mean'`` + zero_infinity (bool, optional): + Whether to zero infinite losses and the associated gradients. + Default: ``False`` + Infinite losses mainly occur when the inputs are too short + to be aligned to the targets. + + Example:: + + >>> log_probs = torch.randn(50, 16, 20).log_softmax(2).detach().requires_grad_() + >>> targets = torch.randint(1, 20, (16, 30), dtype=torch.long) + >>> input_lengths = torch.full((16,), 50, dtype=torch.long) + >>> target_lengths = torch.randint(10, 30, (16,), dtype=torch.long) + >>> loss = F.ctc_loss(log_probs, targets, input_lengths, target_lengths) + >>> loss.backward() + """ + if has_torch_function_variadic(log_probs, targets, input_lengths, target_lengths): + return handle_torch_function( + ctc_loss, + (log_probs, targets, input_lengths, target_lengths), + log_probs, targets, input_lengths, target_lengths, + blank=blank, reduction=reduction, zero_infinity=zero_infinity + ) + return torch.ctc_loss( + log_probs, targets, input_lengths, target_lengths, blank, _Reduction.get_enum(reduction), zero_infinity + ) + + +if ctc_loss.__doc__: + ctc_loss.__doc__ = ctc_loss.__doc__.format(**reproducibility_notes) + + +def nll_loss( + input: Tensor, + target: Tensor, + weight: Optional[Tensor] = None, + size_average: Optional[bool] = None, + ignore_index: int = -100, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> Tensor: + r"""Compute the negative log likelihood loss. + + See :class:`~torch.nn.NLLLoss` for details. + + Args: + input: :math:`(N, C)` where `C = number of classes` or :math:`(N, C, H, W)` + in case of 2D Loss, or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K \geq 1` + in the case of K-dimensional loss. `input` is expected to be log-probabilities. + target: :math:`(N)` where each value is :math:`0 \leq \text{targets}[i] \leq C-1`, + or :math:`(N, d_1, d_2, ..., d_K)` where :math:`K \geq 1` for + K-dimensional loss. + weight (Tensor, optional): a manual rescaling weight given to each + class. If given, has to be a Tensor of size `C` + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + ignore_index (int, optional): Specifies a target value that is ignored + and does not contribute to the input gradient. When :attr:`size_average` is + ``True``, the loss is averaged over non-ignored targets. Default: -100 + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (str, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + + Example:: + + >>> # input is of size N x C = 3 x 5 + >>> input = torch.randn(3, 5, requires_grad=True) + >>> # each element in target has to have 0 <= value < C + >>> target = torch.tensor([1, 0, 4]) + >>> output = F.nll_loss(F.log_softmax(input, dim=1), target) + >>> output.backward() + """ + if has_torch_function_variadic(input, target, weight): + return handle_torch_function( + nll_loss, + (input, target, weight), + input, + target, + weight=weight, + size_average=size_average, + ignore_index=ignore_index, + reduce=reduce, + reduction=reduction, + ) + if size_average is not None or reduce is not None: + reduction = _Reduction.legacy_get_string(size_average, reduce) + return torch._C._nn.nll_loss_nd(input, target, weight, _Reduction.get_enum(reduction), ignore_index) + + +def poisson_nll_loss( + input: Tensor, + target: Tensor, + log_input: bool = True, + full: bool = False, + size_average: Optional[bool] = None, + eps: float = 1e-8, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> Tensor: + r"""Poisson negative log likelihood loss. + + See :class:`~torch.nn.PoissonNLLLoss` for details. + + Args: + input: expectation of underlying Poisson distribution. + target: random sample :math:`target \sim \text{Poisson}(input)`. + log_input: if ``True`` the loss is computed as + :math:`\exp(\text{input}) - \text{target} * \text{input}`, if ``False`` then loss is + :math:`\text{input} - \text{target} * \log(\text{input}+\text{eps})`. Default: ``True`` + full: whether to compute full loss, i. e. to add the Stirling + approximation term. Default: ``False`` + :math:`\text{target} * \log(\text{target}) - \text{target} + 0.5 * \log(2 * \pi * \text{target})`. + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + eps (float, optional): Small value to avoid evaluation of :math:`\log(0)` when + :attr:`log_input`\ =\ ``False``. Default: 1e-8 + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (str, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + + """ + if has_torch_function_variadic(input, target): + return handle_torch_function( + poisson_nll_loss, + (input, target), + input, + target, + log_input=log_input, + full=full, + size_average=size_average, + eps=eps, + reduce=reduce, + reduction=reduction, + ) + if size_average is not None or reduce is not None: + reduction = _Reduction.legacy_get_string(size_average, reduce) + if reduction != "none" and reduction != "mean" and reduction != "sum": + ret = input + raise ValueError(reduction + " is not a valid value for reduction") + + ret = torch.poisson_nll_loss(input, target, log_input, full, eps, _Reduction.get_enum(reduction)) + return ret + + +def gaussian_nll_loss( + input: Tensor, + target: Tensor, + var: Tensor, + full: bool = False, + eps: float = 1e-6, + reduction: str = "mean", +) -> Tensor: + r"""Gaussian negative log likelihood loss. + + See :class:`~torch.nn.GaussianNLLLoss` for details. + + Args: + input: expectation of the Gaussian distribution. + target: sample from the Gaussian distribution. + var: tensor of positive variance(s), one for each of the expectations + in the input (heteroscedastic), or a single one (homoscedastic). + full (bool, optional): include the constant term in the loss calculation. Default: ``False``. + eps (float, optional): value added to var, for stability. Default: 1e-6. + reduction (str, optional): specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the output is the average of all batch member losses, + ``'sum'``: the output is the sum of all batch member losses. + Default: ``'mean'``. + """ + if has_torch_function_variadic(input, target, var): + return handle_torch_function( + gaussian_nll_loss, + (input, target, var), + input, + target, + var, + full=full, + eps=eps, + reduction=reduction, + ) + + # Check var size + # If var.size == input.size, the case is heteroscedastic and no further checks are needed. + # Otherwise: + if var.size() != input.size(): + + # If var is one dimension short of input, but the sizes match otherwise, then this is a homoscedastic case. + # e.g. input.size = (10, 2, 3), var.size = (10, 2) + # -> unsqueeze var so that var.shape = (10, 2, 1) + # this is done so that broadcasting can happen in the loss calculation + if input.size()[:-1] == var.size(): + var = torch.unsqueeze(var, -1) + + # This checks if the sizes match up to the final dimension, and the final dimension of var is of size 1. + # This is also a homoscedastic case. + # e.g. input.size = (10, 2, 3), var.size = (10, 2, 1) + elif input.size()[:-1] == var.size()[:-1] and var.size(-1) == 1: # Heteroscedastic case + pass + + # If none of the above pass, then the size of var is incorrect. + else: + raise ValueError("var is of incorrect size") + + # Check validity of reduction mode + if reduction != 'none' and reduction != 'mean' and reduction != 'sum': + raise ValueError(reduction + " is not valid") + + # Entries of var must be non-negative + if torch.any(var < 0): + raise ValueError("var has negative entry/entries") + + # Clamp for stability + var = var.clone() + with torch.no_grad(): + var.clamp_(min=eps) + + # Calculate the loss + loss = 0.5 * (torch.log(var) + (input - target)**2 / var) + if full: + loss += 0.5 * math.log(2 * math.pi) + + if reduction == 'mean': + return loss.mean() + elif reduction == 'sum': + return loss.sum() + else: + return loss + + +def kl_div( + input: Tensor, + target: Tensor, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", + log_target: bool = False, +) -> Tensor: + r"""Compute the KL Divergence loss. + + Refer - The `Kullback-Leibler divergence Loss + `__ + + See :class:`~torch.nn.KLDivLoss` for details. + + Args: + input: Tensor of arbitrary shape in log-probabilities. + target: Tensor of the same shape as input. See :attr:`log_target` for + the target's interpretation. + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (str, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``. + ``'none'``: no reduction will be applied + ``'batchmean'``: the sum of the output will be divided by the batchsize + ``'sum'``: the output will be summed + ``'mean'``: the output will be divided by the number of elements in the output + Default: ``'mean'`` + log_target (bool): A flag indicating whether ``target`` is passed in the log space. + It is recommended to pass certain distributions (like ``softmax``) + in the log space to avoid numerical issues caused by explicit ``log``. + Default: ``False`` + + .. note:: + :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, + and in the meantime, specifying either of those two args will override :attr:`reduction`. + + .. warning:: + :attr:`reduction` = ``'mean'`` doesn't return the true kl divergence value, please use + :attr:`reduction` = ``'batchmean'`` which aligns with KL math definition. + """ + if has_torch_function_variadic(input, target): + return handle_torch_function( + kl_div, + (input, target), + input, + target, + size_average=size_average, + reduce=reduce, + reduction=reduction, + log_target=log_target, + ) + if size_average is not None or reduce is not None: + reduction_enum = _Reduction.legacy_get_enum(size_average, reduce) + else: + if reduction == "mean": + warnings.warn( + "reduction: 'mean' divides the total loss by both the batch size and the support size." + "'batchmean' divides only by the batch size, and aligns with the KL div math definition." + "'mean' will be changed to behave the same as 'batchmean' in the next major release." + ) + + # special case for batchmean + if reduction == "batchmean": + reduction_enum = _Reduction.get_enum("sum") + else: + reduction_enum = _Reduction.get_enum(reduction) + + reduced = torch.kl_div(input, target, reduction_enum, log_target=log_target) + + if reduction == "batchmean" and input.dim() != 0: + reduced = reduced / input.size()[0] + + return reduced + + +def cross_entropy( + input: Tensor, + target: Tensor, + weight: Optional[Tensor] = None, + size_average: Optional[bool] = None, + ignore_index: int = -100, + reduce: Optional[bool] = None, + reduction: str = "mean", + label_smoothing: float = 0.0, +) -> Tensor: + r"""Compute the cross entropy loss between input logits and target. + + See :class:`~torch.nn.CrossEntropyLoss` for details. + + Args: + input (Tensor) : Predicted unnormalized logits; + see Shape section below for supported shapes. + target (Tensor) : Ground truth class indices or class probabilities; + see Shape section below for supported shapes. + weight (Tensor, optional): a manual rescaling weight given to each + class. If given, has to be a Tensor of size `C` + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + ignore_index (int, optional): Specifies a target value that is ignored + and does not contribute to the input gradient. When :attr:`size_average` is + ``True``, the loss is averaged over non-ignored targets. Note that + :attr:`ignore_index` is only applicable when the target contains class indices. + Default: -100 + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (str, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + label_smoothing (float, optional): A float in [0.0, 1.0]. Specifies the amount + of smoothing when computing the loss, where 0.0 means no smoothing. The targets + become a mixture of the original ground truth and a uniform distribution as described in + `Rethinking the Inception Architecture for Computer Vision `__. Default: :math:`0.0`. + + Shape: + - Input: Shape :math:`(C)`, :math:`(N, C)` or :math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1` + in the case of `K`-dimensional loss. + - Target: If containing class indices, shape :math:`()`, :math:`(N)` or :math:`(N, d_1, d_2, ..., d_K)` with + :math:`K \geq 1` in the case of K-dimensional loss where each value should be between :math:`[0, C)`. + If containing class probabilities, same shape as the input and each value should be between :math:`[0, 1]`. + + where: + + .. math:: + \begin{aligned} + C ={} & \text{number of classes} \\ + N ={} & \text{batch size} \\ + \end{aligned} + + Examples:: + + >>> # Example of target with class indices + >>> input = torch.randn(3, 5, requires_grad=True) + >>> target = torch.randint(5, (3,), dtype=torch.int64) + >>> loss = F.cross_entropy(input, target) + >>> loss.backward() + >>> + >>> # Example of target with class probabilities + >>> input = torch.randn(3, 5, requires_grad=True) + >>> target = torch.randn(3, 5).softmax(dim=1) + >>> loss = F.cross_entropy(input, target) + >>> loss.backward() + """ + if has_torch_function_variadic(input, target, weight): + return handle_torch_function( + cross_entropy, + (input, target, weight), + input, + target, + weight=weight, + size_average=size_average, + ignore_index=ignore_index, + reduce=reduce, + reduction=reduction, + label_smoothing=label_smoothing, + ) + if size_average is not None or reduce is not None: + reduction = _Reduction.legacy_get_string(size_average, reduce) + return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing) + + +def binary_cross_entropy( + input: Tensor, + target: Tensor, + weight: Optional[Tensor] = None, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> Tensor: + r"""Measure Binary Cross Entropy between the target and input probabilities. + + See :class:`~torch.nn.BCELoss` for details. + + Args: + input: Tensor of arbitrary shape as probabilities. + target: Tensor of the same shape as input with values between 0 and 1. + weight (Tensor, optional): a manual rescaling weight + if provided it's repeated to match input tensor shape + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (str, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + + Examples:: + + >>> input = torch.randn(3, 2, requires_grad=True) + >>> target = torch.rand(3, 2, requires_grad=False) + >>> loss = F.binary_cross_entropy(torch.sigmoid(input), target) + >>> loss.backward() + """ + if has_torch_function_variadic(input, target, weight): + return handle_torch_function( + binary_cross_entropy, + (input, target, weight), + input, + target, + weight=weight, + size_average=size_average, + reduce=reduce, + reduction=reduction, + ) + if size_average is not None or reduce is not None: + reduction_enum = _Reduction.legacy_get_enum(size_average, reduce) + else: + reduction_enum = _Reduction.get_enum(reduction) + if target.size() != input.size(): + raise ValueError( + "Using a target size ({}) that is different to the input size ({}) is deprecated. " + "Please ensure they have the same size.".format(target.size(), input.size()) + ) + + if weight is not None: + new_size = _infer_size(target.size(), weight.size()) + weight = weight.expand(new_size) + + return torch._C._nn.binary_cross_entropy(input, target, weight, reduction_enum) + + +def binary_cross_entropy_with_logits( + input: Tensor, + target: Tensor, + weight: Optional[Tensor] = None, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", + pos_weight: Optional[Tensor] = None, +) -> Tensor: + r"""Calculate Binary Cross Entropy between target and input logits. + + See :class:`~torch.nn.BCEWithLogitsLoss` for details. + + Args: + input: Tensor of arbitrary shape as unnormalized scores (often referred to as logits). + target: Tensor of the same shape as input with values between 0 and 1 + weight (Tensor, optional): a manual rescaling weight + if provided it's repeated to match input tensor shape + size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, + the losses are averaged over each loss element in the batch. Note that for + some losses, there multiple elements per sample. If the field :attr:`size_average` + is set to ``False``, the losses are instead summed for each minibatch. Ignored + when reduce is ``False``. Default: ``True`` + reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the + losses are averaged or summed over observations for each minibatch depending + on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per + batch element instead and ignores :attr:`size_average`. Default: ``True`` + reduction (str, optional): Specifies the reduction to apply to the output: + ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, + ``'mean'``: the sum of the output will be divided by the number of + elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` + and :attr:`reduce` are in the process of being deprecated, and in the meantime, + specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` + pos_weight (Tensor, optional): a weight of positive examples to be broadcasted with target. + Must be a tensor with equal size along the class dimension to the number of classes. + Pay close attention to PyTorch's broadcasting semantics in order to achieve the desired + operations. For a target of size [B, C, H, W] (where B is batch size) pos_weight of + size [B, C, H, W] will apply different pos_weights to each element of the batch or + [C, H, W] the same pos_weights across the batch. To apply the same positive weight + along all spacial dimensions for a 2D multi-class target [C, H, W] use: [C, 1, 1]. + Default: ``None`` + + Examples:: + + >>> input = torch.randn(3, requires_grad=True) + >>> target = torch.empty(3).random_(2) + >>> loss = F.binary_cross_entropy_with_logits(input, target) + >>> loss.backward() + """ + if has_torch_function_variadic(input, target, weight, pos_weight): + return handle_torch_function( + binary_cross_entropy_with_logits, + (input, target, weight, pos_weight), + input, + target, + weight=weight, + size_average=size_average, + reduce=reduce, + reduction=reduction, + pos_weight=pos_weight, + ) + if size_average is not None or reduce is not None: + reduction_enum = _Reduction.legacy_get_enum(size_average, reduce) + else: + reduction_enum = _Reduction.get_enum(reduction) + + if not (target.size() == input.size()): + raise ValueError(f"Target size ({target.size()}) must be the same as input size ({input.size()})") + + return torch.binary_cross_entropy_with_logits(input, target, weight, pos_weight, reduction_enum) + + +def smooth_l1_loss( + input: Tensor, + target: Tensor, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", + beta: float = 1.0, +) -> Tensor: + r"""Compute the Smooth L1 loss. + + Function uses a squared term if the absolute + element-wise error falls below beta and an L1 term otherwise. + + See :class:`~torch.nn.SmoothL1Loss` for details. + """ + if has_torch_function_variadic(input, target): + return handle_torch_function( + smooth_l1_loss, + (input, target), + input, + target, + size_average=size_average, + reduce=reduce, + reduction=reduction, + beta=beta, + ) + if not (target.size() == input.size()): + warnings.warn( + f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). " + "This will likely lead to incorrect results due to broadcasting. " + "Please ensure they have the same size.", + stacklevel=2, + ) + if size_average is not None or reduce is not None: + reduction = _Reduction.legacy_get_string(size_average, reduce) + + expanded_input, expanded_target = torch.broadcast_tensors(input, target) + + if beta == 0.0: + return torch._C._nn.l1_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction)) + else: + return torch._C._nn.smooth_l1_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction), beta) + + +def huber_loss( + input: Tensor, + target: Tensor, + reduction: str = 'mean', + delta: float = 1.0, +) -> Tensor: + r"""Compute the Huber loss. + + Function uses a squared term if the absolute + element-wise error falls below delta and a delta-scaled L1 term otherwise. + + When delta equals 1, this loss is equivalent to SmoothL1Loss. + In general, Huber loss differs from SmoothL1Loss by a factor of delta (AKA beta in Smooth L1). + + See :class:`~torch.nn.HuberLoss` for details. + """ + if has_torch_function_variadic(input, target): + return handle_torch_function( + huber_loss, + (input, target), + input, + target, + reduction=reduction, + delta=delta, + ) + if not (target.size() == input.size()): + warnings.warn(f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). " + "This will likely lead to incorrect results due to broadcasting. " + "Please ensure they have the same size.", + stacklevel=2) + + expanded_input, expanded_target = torch.broadcast_tensors(input, target) + return torch._C._nn.huber_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction), delta) + + +def l1_loss( + input: Tensor, + target: Tensor, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> Tensor: # noqa: D400,D402 + r"""l1_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor + + Function that takes the mean element-wise absolute value difference. + + See :class:`~torch.nn.L1Loss` for details. + """ + if has_torch_function_variadic(input, target): + return handle_torch_function( + l1_loss, (input, target), input, target, size_average=size_average, reduce=reduce, reduction=reduction + ) + if not (target.size() == input.size()): + warnings.warn( + f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). " + "This will likely lead to incorrect results due to broadcasting. " + "Please ensure they have the same size.", + stacklevel=2, + ) + if size_average is not None or reduce is not None: + reduction = _Reduction.legacy_get_string(size_average, reduce) + + expanded_input, expanded_target = torch.broadcast_tensors(input, target) + return torch._C._nn.l1_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction)) + + +def mse_loss( + input: Tensor, + target: Tensor, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> Tensor: # noqa: D400,D402 + r"""mse_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor + + Measures the element-wise mean squared error. + See :class:`~torch.nn.MSELoss` for details. + """ + if has_torch_function_variadic(input, target): + return handle_torch_function( + mse_loss, (input, target), input, target, size_average=size_average, reduce=reduce, reduction=reduction + ) + if not (target.size() == input.size()): + warnings.warn( + f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). " + "This will likely lead to incorrect results due to broadcasting. " + "Please ensure they have the same size.", + stacklevel=2, + ) + if size_average is not None or reduce is not None: + reduction = _Reduction.legacy_get_string(size_average, reduce) + + expanded_input, expanded_target = torch.broadcast_tensors(input, target) + return torch._C._nn.mse_loss(expanded_input, expanded_target, _Reduction.get_enum(reduction)) + + +def margin_ranking_loss( + input1: Tensor, + input2: Tensor, + target: Tensor, + margin: float = 0, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> Tensor: # noqa: D400,D402 + r"""margin_ranking_loss(input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean') -> Tensor + + See :class:`~torch.nn.MarginRankingLoss` for details. + """ + if has_torch_function_variadic(input1, input2, target): + return handle_torch_function( + margin_ranking_loss, + (input1, input2, target), + input1, + input2, + target, + margin=margin, + size_average=size_average, + reduce=reduce, + reduction=reduction, + ) + if size_average is not None or reduce is not None: + reduction_enum = _Reduction.legacy_get_enum(size_average, reduce) + else: + reduction_enum = _Reduction.get_enum(reduction) + if (input1.dim() != input2.dim() or input1.dim() != target.dim()): + raise RuntimeError( + f"margin_ranking_loss : All input tensors should have same dimension but got sizes: " + f"input1: {input1.size()}, input2: {input2.size()}, target: {target.size()} " + ) + return torch.margin_ranking_loss(input1, input2, target, margin, reduction_enum) + + +def hinge_embedding_loss( + input: Tensor, + target: Tensor, + margin: float = 1.0, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> Tensor: # noqa: D400,D402 + r"""hinge_embedding_loss(input, target, margin=1.0, size_average=None, reduce=None, reduction='mean') -> Tensor + + See :class:`~torch.nn.HingeEmbeddingLoss` for details. + """ + if has_torch_function_variadic(input, target): + return handle_torch_function( + hinge_embedding_loss, + (input, target), + input, + target, + margin=margin, + size_average=size_average, + reduce=reduce, + reduction=reduction, + ) + if size_average is not None or reduce is not None: + reduction_enum = _Reduction.legacy_get_enum(size_average, reduce) + else: + reduction_enum = _Reduction.get_enum(reduction) + return torch.hinge_embedding_loss(input, target, margin, reduction_enum) + + +def multilabel_margin_loss( + input: Tensor, + target: Tensor, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> Tensor: # noqa: D400,D402 + r"""multilabel_margin_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor + + See :class:`~torch.nn.MultiLabelMarginLoss` for details. + """ + if has_torch_function_variadic(input, target): + return handle_torch_function( + multilabel_margin_loss, + (input, target), + input, + target, + size_average=size_average, + reduce=reduce, + reduction=reduction, + ) + if size_average is not None or reduce is not None: + reduction_enum = _Reduction.legacy_get_enum(size_average, reduce) + else: + reduction_enum = _Reduction.get_enum(reduction) + return torch._C._nn.multilabel_margin_loss(input, target, reduction_enum) + + +def soft_margin_loss( + input: Tensor, + target: Tensor, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> Tensor: # noqa: D400,D402 + r""" + soft_margin_loss(input, target, size_average=None, reduce=None, reduction='mean') -> Tensor + + See :class:`~torch.nn.SoftMarginLoss` for details. + """ + if has_torch_function_variadic(input, target): + return handle_torch_function( + soft_margin_loss, (input, target), input, target, size_average=size_average, reduce=reduce, reduction=reduction + ) + if size_average is not None or reduce is not None: + reduction_enum = _Reduction.legacy_get_enum(size_average, reduce) + else: + reduction_enum = _Reduction.get_enum(reduction) + return torch._C._nn.soft_margin_loss(input, target, reduction_enum) + + +def multilabel_soft_margin_loss( + input: Tensor, + target: Tensor, + weight: Optional[Tensor] = None, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> Tensor: # noqa: D400,D402 + r"""multilabel_soft_margin_loss(input, target, weight=None, size_average=None, reduce=None, reduction='mean') -> Tensor + + See :class:`~torch.nn.MultiLabelSoftMarginLoss` for details. + """ + if has_torch_function_variadic(input, target, weight): + return handle_torch_function( + multilabel_soft_margin_loss, + (input, target, weight), + input, + target, + weight=weight, + size_average=size_average, + reduce=reduce, + reduction=reduction, + ) + if size_average is not None or reduce is not None: + reduction = _Reduction.legacy_get_string(size_average, reduce) + + loss = -(target * logsigmoid(input) + (1 - target) * logsigmoid(-input)) + + if weight is not None: + loss = loss * weight + + class_dim = input.dim() - 1 + C = input.size(class_dim) + loss = loss.sum(dim=class_dim) / C # only return N loss values + + if reduction == "none": + ret = loss + elif reduction == "mean": + ret = loss.mean() + elif reduction == "sum": + ret = loss.sum() + else: + ret = input + raise ValueError(reduction + " is not valid") + return ret + + +def cosine_embedding_loss( + input1: Tensor, + input2: Tensor, + target: Tensor, + margin: float = 0, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> Tensor: # noqa: D400,D402 + r"""cosine_embedding_loss(input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean') -> Tensor + + See :class:`~torch.nn.CosineEmbeddingLoss` for details. + """ + if has_torch_function_variadic(input1, input2, target): + return handle_torch_function( + cosine_embedding_loss, + (input1, input2, target), + input1, + input2, + target, + margin=margin, + size_average=size_average, + reduce=reduce, + reduction=reduction, + ) + if size_average is not None or reduce is not None: + reduction_enum = _Reduction.legacy_get_enum(size_average, reduce) + else: + reduction_enum = _Reduction.get_enum(reduction) + return torch.cosine_embedding_loss(input1, input2, target, margin, reduction_enum) + + +def multi_margin_loss( + input: Tensor, + target: Tensor, + p: int = 1, + margin: float = 1.0, + weight: Optional[Tensor] = None, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> Tensor: # noqa: D400,D402 + r"""multi_margin_loss(input, target, p=1, margin=1, weight=None, size_average=None, reduce=None, reduction='mean') -> Tensor + + See :class:`~torch.nn.MultiMarginLoss` for details. + """ + if has_torch_function_variadic(input, target, weight): + return handle_torch_function( + multi_margin_loss, + (input, target, weight), + input, + target, + p=p, + margin=margin, + weight=weight, + size_average=size_average, + reduce=reduce, + reduction=reduction, + ) + if size_average is not None or reduce is not None: + reduction_enum = _Reduction.legacy_get_enum(size_average, reduce) + else: + reduction_enum = _Reduction.get_enum(reduction) + if p != 1 and p != 2: + raise ValueError("only p == 1 and p == 2 supported") + if weight is not None: + if weight.dim() != 1: + raise ValueError("weight must be one-dimensional") + + return torch._C._nn.multi_margin_loss(input, target, p, margin, weight, reduction_enum) + + +pixel_shuffle = _add_docstr( + torch.pixel_shuffle, + r""" +pixel_shuffle(input, upscale_factor) -> Tensor + +Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)` to a +tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is the :attr:`upscale_factor`. + +See :class:`~torch.nn.PixelShuffle` for details. + +Args: + input (Tensor): the input tensor + upscale_factor (int): factor to increase spatial resolution by + +Examples:: + + >>> input = torch.randn(1, 9, 4, 4) + >>> output = torch.nn.functional.pixel_shuffle(input, 3) + >>> print(output.size()) + torch.Size([1, 1, 12, 12]) +""", +) + +pixel_unshuffle = _add_docstr( + torch.pixel_unshuffle, + r""" +pixel_unshuffle(input, downscale_factor) -> Tensor + +Reverses the :class:`~torch.nn.PixelShuffle` operation by rearranging elements in a +tensor of shape :math:`(*, C, H \times r, W \times r)` to a tensor of shape +:math:`(*, C \times r^2, H, W)`, where r is the :attr:`downscale_factor`. + +See :class:`~torch.nn.PixelUnshuffle` for details. + +Args: + input (Tensor): the input tensor + downscale_factor (int): factor to increase spatial resolution by + +Examples:: + + >>> input = torch.randn(1, 1, 12, 12) + >>> output = torch.nn.functional.pixel_unshuffle(input, 3) + >>> print(output.size()) + torch.Size([1, 9, 4, 4]) +""", +) + +channel_shuffle = _add_docstr( + torch.channel_shuffle, + r""" +channel_shuffle(input, groups) -> Tensor + +Divide the channels in a tensor of shape :math:`(*, C , H, W)` +into g groups and rearrange them as :math:`(*, C \frac g, g, H, W)`, +while keeping the original tensor shape. + +See :class:`~torch.nn.ChannelShuffle` for details. + +Args: + input (Tensor): the input tensor + groups (int): number of groups to divide channels in and rearrange. + +Examples:: + + >>> input = torch.randn(1, 4, 2, 2) + >>> print(input) + [[[[1, 2], + [3, 4]], + [[5, 6], + [7, 8]], + [[9, 10], + [11, 12]], + [[13, 14], + [15, 16]], + ]] + >>> output = torch.nn.functional.channel_shuffle(input, 2) + >>> print(output) + [[[[1, 2], + [3, 4]], + [[9, 10], + [11, 12]], + [[5, 6], + [7, 8]], + [[13, 14], + [15, 16]], + ]] +""", +) + +native_channel_shuffle = _add_docstr( + torch.native_channel_shuffle, + r""" +native_channel_shuffle(input, groups) -> Tensor + +Native kernel level implementation of the `channel_shuffle`. +This function might become private in future releases, use with caution. + +Divide the channels in a tensor of shape :math:`(*, C , H, W)` +into g groups and rearrange them as :math:`(*, C \frac g, g, H, W)`, +while keeping the original tensor shape. + +See :class:`~torch.nn.ChannelShuffle` for details. + +Args: + input (Tensor): the input tensor + groups (int): number of groups to divide channels in and rearrange. + +Examples:: + + >>> input = torch.randn(1, 4, 2, 2) + >>> print(input) + [[[[1, 2], + [3, 4]], + [[5, 6], + [7, 8]], + [[9, 10], + [11, 12]], + [[13, 14], + [15, 16]], + ]] + >>> output = torch.nn.functional.native_channel_shuffle(input, 2) + >>> print(output) + [[[[1, 2], + [3, 4]], + [[9, 10], + [11, 12]], + [[5, 6], + [7, 8]], + [[13, 14], + [15, 16]], + ]] +""", +) + +@_overload # noqa: F811 +def upsample(input: Tensor, size: Optional[int] = None, scale_factor: Optional[float] = None, mode: str = "nearest", align_corners: Optional[bool] = None) -> Tensor: # noqa: F811,B950 + pass + + +@_overload # noqa: F811 +def upsample(input: Tensor, size: Optional[List[int]] = None, scale_factor: Optional[float] = None, mode: str = "nearest", align_corners: Optional[bool] = None) -> Tensor: # noqa: F811,B950 + pass + + +def upsample(input, size=None, scale_factor=None, mode="nearest", align_corners=None): # noqa: F811 + r"""Upsample input. + + Provided tensor is upsampled to either the given :attr:`size` or the given + :attr:`scale_factor` + + .. warning:: + This function is deprecated in favor of :func:`torch.nn.functional.interpolate`. + This is equivalent with ``nn.functional.interpolate(...)``. + + Note: + {backward_reproducibility_note} + + The algorithm used for upsampling is determined by :attr:`mode`. + + Currently temporal, spatial and volumetric upsampling are supported, i.e. + expected inputs are 3-D, 4-D or 5-D in shape. + + The input dimensions are interpreted in the form: + `mini-batch x channels x [optional depth] x [optional height] x width`. + + The modes available for upsampling are: `nearest`, `linear` (3D-only), + `bilinear`, `bicubic` (4D-only), `trilinear` (5D-only) + + Args: + input (Tensor): the input tensor + size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]): + output spatial size. + scale_factor (float or Tuple[float]): multiplier for spatial size. Has to match input size if it is a tuple. + mode (str): algorithm used for upsampling: + ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` | + ``'trilinear'``. Default: ``'nearest'`` + align_corners (bool, optional): Geometrically, we consider the pixels of the + input and output as squares rather than points. + If set to ``True``, the input and output tensors are aligned by the + center points of their corner pixels, preserving the values at the corner pixels. + If set to ``False``, the input and output tensors are aligned by the corner + points of their corner pixels, and the interpolation uses edge value padding + for out-of-boundary values, making this operation *independent* of input size + when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode` + is ``'linear'``, ``'bilinear'``, ``'bicubic'`` or ``'trilinear'``. + Default: ``False`` + + .. note:: + With ``mode='bicubic'``, it's possible to cause overshoot, in other words it can produce + negative values or values greater than 255 for images. + Explicitly call ``result.clamp(min=0, max=255)`` if you want to reduce the overshoot + when displaying the image. + + .. warning:: + With ``align_corners = True``, the linearly interpolating modes + (`linear`, `bilinear`, and `trilinear`) don't proportionally align the + output and input pixels, and thus the output values can depend on the + input size. This was the default behavior for these modes up to version + 0.3.1. Since then, the default behavior is ``align_corners = False``. + See :class:`~torch.nn.Upsample` for concrete examples on how this + affects the outputs. + + """ + warnings.warn("nn.functional.upsample is deprecated. Use nn.functional.interpolate instead.") + return interpolate(input, size, scale_factor, mode, align_corners) + + +if upsample.__doc__: + upsample.__doc__ = upsample.__doc__.format(**reproducibility_notes) + + +def _is_integer(x) -> bool: + r"""Type check the input number is an integer. + + Will return True for int, SymInt, Numpy integers and Tensors with integer elements. + """ + if isinstance(x, (int, torch.SymInt)): + return True + if np is not None and isinstance(x, np.integer): + return True + return isinstance(x, Tensor) and not x.is_floating_point() + + +@_overload # noqa: F811 +def interpolate(input: Tensor, size: Optional[int] = None, scale_factor: Optional[List[float]] = None, mode: str = 'nearest', align_corners: Optional[bool] = None, recompute_scale_factor: Optional[bool] = None, antialias: bool = False) -> Tensor: # noqa: F811,B950 + pass + + +@_overload # noqa: F811 +def interpolate(input: Tensor, size: Optional[List[int]] = None, scale_factor: Optional[List[float]] = None, mode: str = 'nearest', align_corners: Optional[bool] = None, recompute_scale_factor: Optional[bool] = None, antialias: bool = False) -> Tensor: # noqa: F811,B950 + pass + + +@_overload # noqa: F811 +def interpolate(input: Tensor, size: Optional[int] = None, scale_factor: Optional[float] = None, mode: str = 'nearest', align_corners: Optional[bool] = None, recompute_scale_factor: Optional[bool] = None, antialias: bool = False) -> Tensor: # noqa: F811,B950 + pass + + +@_overload # noqa: F811 +def interpolate( # noqa: F811 + input: Tensor, + size: Optional[List[int]] = None, + scale_factor: Optional[float] = None, + mode: str = "nearest", + align_corners: Optional[bool] = None, + recompute_scale_factor: Optional[bool] = None, + antialias: bool = False, +) -> Tensor: # noqa: F811 + pass + +def interpolate(input: Tensor, size: Optional[int] = None, scale_factor: Optional[List[float]] = None, mode: str = 'nearest', align_corners: Optional[bool] = None, recompute_scale_factor: Optional[bool] = None, antialias: bool = False) -> Tensor: # noqa: F811,B950 + r"""Down/up samples the input. + + Tensor interpolated to either the given :attr:`size` or the given + :attr:`scale_factor` + + The algorithm used for interpolation is determined by :attr:`mode`. + + Currently temporal, spatial and volumetric sampling are supported, i.e. + expected inputs are 3-D, 4-D or 5-D in shape. + + The input dimensions are interpreted in the form: + `mini-batch x channels x [optional depth] x [optional height] x width`. + + The modes available for resizing are: `nearest`, `linear` (3D-only), + `bilinear`, `bicubic` (4D-only), `trilinear` (5D-only), `area`, `nearest-exact` + + Args: + input (Tensor): the input tensor + size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int]): + output spatial size. + scale_factor (float or Tuple[float]): multiplier for spatial size. If `scale_factor` is a tuple, + its length has to match the number of spatial dimensions; `input.dim() - 2`. + mode (str): algorithm used for upsampling: + ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` | + ``'trilinear'`` | ``'area'`` | ``'nearest-exact'``. Default: ``'nearest'`` + align_corners (bool, optional): Geometrically, we consider the pixels of the + input and output as squares rather than points. + If set to ``True``, the input and output tensors are aligned by the + center points of their corner pixels, preserving the values at the corner pixels. + If set to ``False``, the input and output tensors are aligned by the corner + points of their corner pixels, and the interpolation uses edge value padding + for out-of-boundary values, making this operation *independent* of input size + when :attr:`scale_factor` is kept the same. This only has an effect when :attr:`mode` + is ``'linear'``, ``'bilinear'``, ``'bicubic'`` or ``'trilinear'``. + Default: ``False`` + recompute_scale_factor (bool, optional): recompute the scale_factor for use in the + interpolation calculation. If `recompute_scale_factor` is ``True``, then + `scale_factor` must be passed in and `scale_factor` is used to compute the + output `size`. The computed output `size` will be used to infer new scales for + the interpolation. Note that when `scale_factor` is floating-point, it may differ + from the recomputed `scale_factor` due to rounding and precision issues. + If `recompute_scale_factor` is ``False``, then `size` or `scale_factor` will + be used directly for interpolation. Default: ``None``. + antialias (bool, optional): flag to apply anti-aliasing. Default: ``False``. Using anti-alias + option together with ``align_corners=False``, interpolation result would match Pillow + result for downsampling operation. Supported modes: ``'bilinear'``, ``'bicubic'``. + + .. note:: + With ``mode='bicubic'``, it's possible to cause overshoot, in other words it can produce + negative values or values greater than 255 for images. + Explicitly call ``result.clamp(min=0, max=255)`` if you want to reduce the overshoot + when displaying the image. + + .. note:: + Mode ``mode='nearest-exact'`` matches Scikit-Image and PIL nearest neighbours interpolation + algorithms and fixes known issues with ``mode='nearest'``. This mode is introduced to keep + backward compatibility. + Mode ``mode='nearest'`` matches buggy OpenCV's ``INTER_NEAREST`` interpolation algorithm. + + .. note:: + The gradients for the dtype ``float16`` on CUDA may be inaccurate in the upsample operation + when using modes ``['linear', 'bilinear', 'bicubic', 'trilinear', 'area']``. + For more details, please refer to the discussion in + `issue#104157 `_. + + Note: + {backward_reproducibility_note} + """ + if has_torch_function_unary(input): + return handle_torch_function( + interpolate, + (input,), + input, + size=size, + scale_factor=scale_factor, + mode=mode, + align_corners=align_corners, + recompute_scale_factor=recompute_scale_factor, + antialias=antialias + ) + + if mode in ("nearest", "area", "nearest-exact"): + if align_corners is not None: + raise ValueError( + "align_corners option can only be set with the " + "interpolating modes: linear | bilinear | bicubic | trilinear" + ) + else: + if align_corners is None: + align_corners = False + + dim = input.dim() - 2 # Number of spatial dimensions. + + # Process size and scale_factor. Validate that exactly one is set. + # Validate its length if it is a list, or expand it if it is a scalar. + # After this block, exactly one of output_size and scale_factors will + # be non-None, and it will be a list (or tuple). + if size is not None and scale_factor is not None: + raise ValueError("only one of size or scale_factor should be defined") + elif size is not None: + assert scale_factor is None + scale_factors = None + if isinstance(size, (list, tuple)): + if len(size) != dim: + raise ValueError( + "Input and output must have the same number of spatial dimensions, but got " + f"input with spatial dimensions of {list(input.shape[2:])} and output size of {size}. " + "Please provide input tensor in (N, C, d1, d2, ...,dK) format and " + "output size in (o1, o2, ...,oK) format." + ) + if not torch.jit.is_scripting(): + if not all(_is_integer(x) for x in size): + raise TypeError( + "expected size to be one of int or Tuple[int] or Tuple[int, int] or " + f"Tuple[int, int, int], but got size with types {[type(x) for x in size]}" + ) + output_size = size + else: + output_size = [size for _ in range(dim)] + elif scale_factor is not None: + assert size is None + output_size = None + if isinstance(scale_factor, (list, tuple)): + if len(scale_factor) != dim: + raise ValueError( + "Input and scale_factor must have the same number of spatial dimensions, but " + f"got input with spatial dimensions of {list(input.shape[2:])} and " + f"scale_factor of shape {scale_factor}. " + "Please provide input tensor in (N, C, d1, d2, ...,dK) format and " + "scale_factor in (s1, s2, ...,sK) format." + ) + scale_factors = scale_factor + else: + scale_factors = [scale_factor for _ in range(dim)] + else: + raise ValueError("either size or scale_factor should be defined") + + if recompute_scale_factor is not None and recompute_scale_factor and size is not None: + raise ValueError("recompute_scale_factor is not meaningful with an explicit size.") + + # "area" mode always requires an explicit size rather than scale factor. + # Re-use the recompute_scale_factor code path. + if mode == "area" and output_size is None: + recompute_scale_factor = True + + if recompute_scale_factor is not None and recompute_scale_factor: + # We compute output_size here, then un-set scale_factors. + # The C++ code will recompute it based on the (integer) output size. + assert scale_factors is not None + if not torch.jit.is_scripting() and torch._C._get_tracing_state(): + # make scale_factor a tensor in tracing so constant doesn't get baked in + output_size = [ + (torch.floor((input.size(i + 2).float() * torch.tensor(scale_factors[i], dtype=torch.float32)).float())) + for i in range(dim) + ] + elif torch.jit.is_scripting(): + output_size = [int(math.floor(float(input.size(i + 2)) * scale_factors[i])) + for i in range(dim)] + else: + output_size = [ + _sym_int(input.size(i + 2) * scale_factors[i]) + for i in range(dim) + ] + scale_factors = None + + if antialias and not (mode in ("bilinear", "bicubic") and input.ndim == 4): + raise ValueError("Anti-alias option is restricted to bilinear and bicubic modes and requires a 4-D tensor as input") + + if input.dim() == 3 and mode == "nearest": + return torch._C._nn.upsample_nearest1d(input, output_size, scale_factors) + if input.dim() == 4 and mode == "nearest": + return torch._C._nn.upsample_nearest2d(input, output_size, scale_factors) + if input.dim() == 5 and mode == "nearest": + return torch._C._nn.upsample_nearest3d(input, output_size, scale_factors) + + if input.dim() == 3 and mode == "nearest-exact": + return torch._C._nn._upsample_nearest_exact1d(input, output_size, scale_factors) + if input.dim() == 4 and mode == "nearest-exact": + return torch._C._nn._upsample_nearest_exact2d(input, output_size, scale_factors) + if input.dim() == 5 and mode == "nearest-exact": + return torch._C._nn._upsample_nearest_exact3d(input, output_size, scale_factors) + + if input.dim() == 3 and mode == "area": + assert output_size is not None + return adaptive_avg_pool1d(input, output_size) + if input.dim() == 4 and mode == "area": + assert output_size is not None + return adaptive_avg_pool2d(input, output_size) + if input.dim() == 5 and mode == "area": + assert output_size is not None + return adaptive_avg_pool3d(input, output_size) + + if input.dim() == 3 and mode == "linear": + assert align_corners is not None + return torch._C._nn.upsample_linear1d(input, output_size, align_corners, scale_factors) + if input.dim() == 4 and mode == "bilinear": + assert align_corners is not None + if antialias: + return torch._C._nn._upsample_bilinear2d_aa(input, output_size, align_corners, scale_factors) + # Two levels are necessary to prevent TorchScript from touching + # are_deterministic_algorithms_enabled. + if not torch.jit.is_scripting(): + if torch.are_deterministic_algorithms_enabled() and input.is_cuda: + # Use slow decomp whose backward will be in terms of index_put + # importlib is required because the import cannot be top level + # (cycle) and cannot be nested (TS doesn't support) + return importlib.import_module('torch._decomp.decompositions')._upsample_linear_vec( + input, output_size, align_corners, scale_factors) + return torch._C._nn.upsample_bilinear2d(input, output_size, align_corners, scale_factors) + if input.dim() == 5 and mode == "trilinear": + assert align_corners is not None + return torch._C._nn.upsample_trilinear3d(input, output_size, align_corners, scale_factors) + if input.dim() == 4 and mode == "bicubic": + assert align_corners is not None + if antialias: + return torch._C._nn._upsample_bicubic2d_aa(input, output_size, align_corners, scale_factors) + return torch._C._nn.upsample_bicubic2d(input, output_size, align_corners, scale_factors) + + if input.dim() == 3 and mode == "bilinear": + raise NotImplementedError("Got 3D input, but bilinear mode needs 4D input") + if input.dim() == 3 and mode == "trilinear": + raise NotImplementedError("Got 3D input, but trilinear mode needs 5D input") + if input.dim() == 4 and mode == "linear": + raise NotImplementedError("Got 4D input, but linear mode needs 3D input") + if input.dim() == 4 and mode == "trilinear": + raise NotImplementedError("Got 4D input, but trilinear mode needs 5D input") + if input.dim() == 5 and mode == "linear": + raise NotImplementedError("Got 5D input, but linear mode needs 3D input") + if input.dim() == 5 and mode == "bilinear": + raise NotImplementedError("Got 5D input, but bilinear mode needs 4D input") + + raise NotImplementedError( + "Input Error: Only 3D, 4D and 5D input Tensors supported" + f" (got {input.dim()}D) for the modes: nearest | linear | bilinear | bicubic | trilinear | area | nearest-exact" + f" (got {mode})" + ) + + +if interpolate.__doc__: + interpolate.__doc__ = interpolate.__doc__.format(**reproducibility_notes) + + +@_overload # noqa: F811 +def upsample_nearest(input: Tensor, size: Optional[int] = None, scale_factor: Optional[float] = None) -> Tensor: # noqa: F811 + pass + + +@_overload # noqa: F811 +def upsample_nearest(input: Tensor, size: Optional[List[int]] = None, scale_factor: Optional[float] = None) -> Tensor: # noqa: F811 + pass + + +def upsample_nearest(input, size=None, scale_factor=None): # noqa: F811 + r"""Upsamples the input, using nearest neighbours' pixel values. + + .. warning:: + This function is deprecated in favor of :func:`torch.nn.functional.interpolate`. + This is equivalent with ``nn.functional.interpolate(..., mode='nearest')``. + + Currently spatial and volumetric upsampling are supported (i.e. expected + inputs are 4 or 5 dimensional). + + Args: + input (Tensor): input + size (int or Tuple[int, int] or Tuple[int, int, int]): output spatia + size. + scale_factor (int): multiplier for spatial size. Has to be an integer. + + Note: + {backward_reproducibility_note} + """ + # DeprecationWarning is ignored by default + warnings.warn("nn.functional.upsample_nearest is deprecated. Use nn.functional.interpolate instead.") + return interpolate(input, size, scale_factor, mode="nearest") + + +if upsample_nearest.__doc__: + upsample_nearest.__doc__ = upsample_nearest.__doc__.format(**reproducibility_notes) + + +@_overload # noqa: F811 +def upsample_bilinear( + input: Tensor, size: Optional[int] = None, scale_factor: Optional[float] = None +) -> Tensor: # noqa: F811 + pass + + +@_overload # noqa: F811 +def upsample_bilinear( # noqa: F811 + input: Tensor, size: Optional[List[int]] = None, scale_factor: Optional[float] = None +) -> Tensor: # noqa: F811 + pass + + +@_overload # noqa: F811 +def upsample_bilinear( # noqa: F811 + input: Tensor, size: Optional[int] = None, scale_factor: Optional[List[float]] = None +) -> Tensor: # noqa: F811 + pass + + +@_overload # noqa: F811 +def upsample_bilinear( # noqa: F811 + input: Tensor, size: Optional[List[int]] = None, scale_factor: Optional[List[float]] = None +) -> Tensor: # noqa: F811 + pass + + +def upsample_bilinear(input, size=None, scale_factor=None): # noqa: F811 + r"""Upsamples the input, using bilinear upsampling. + + .. warning:: + This function is deprecated in favor of :func:`torch.nn.functional.interpolate`. + This is equivalent with + ``nn.functional.interpolate(..., mode='bilinear', align_corners=True)``. + + Expected inputs are spatial (4 dimensional). Use `upsample_trilinear` fo + volumetric (5 dimensional) inputs. + + Args: + input (Tensor): input + size (int or Tuple[int, int]): output spatial size. + scale_factor (int or Tuple[int, int]): multiplier for spatial size + + Note: + {backward_reproducibility_note} + """ + # DeprecationWarning is ignored by default + warnings.warn("nn.functional.upsample_bilinear is deprecated. Use nn.functional.interpolate instead.") + return interpolate(input, size, scale_factor, mode="bilinear", align_corners=True) + + +if upsample_bilinear.__doc__: + upsample_bilinear.__doc__ = upsample_bilinear.__doc__.format(**reproducibility_notes) + +GRID_SAMPLE_INTERPOLATION_MODES = { + "bilinear": 0, + "nearest": 1, + "bicubic": 2, +} + +GRID_SAMPLE_PADDING_MODES = { + "zeros": 0, + "border": 1, + "reflection": 2, +} + + +def grid_sample( + input: Tensor, + grid: Tensor, + mode: str = "bilinear", + padding_mode: str = "zeros", + align_corners: Optional[bool] = None, +) -> Tensor: + r"""Compute grid sample. + + Given an :attr:`input` and a flow-field :attr:`grid`, computes the + ``output`` using :attr:`input` values and pixel locations from :attr:`grid`. + + Currently, only spatial (4-D) and volumetric (5-D) :attr:`input` are + supported. + + In the spatial (4-D) case, for :attr:`input` with shape + :math:`(N, C, H_\text{in}, W_\text{in})` and :attr:`grid` with shape + :math:`(N, H_\text{out}, W_\text{out}, 2)`, the output will have shape + :math:`(N, C, H_\text{out}, W_\text{out})`. + + For each output location ``output[n, :, h, w]``, the size-2 vector + ``grid[n, h, w]`` specifies :attr:`input` pixel locations ``x`` and ``y``, + which are used to interpolate the output value ``output[n, :, h, w]``. + In the case of 5D inputs, ``grid[n, d, h, w]`` specifies the + ``x``, ``y``, ``z`` pixel locations for interpolating + ``output[n, :, d, h, w]``. :attr:`mode` argument specifies ``nearest`` or + ``bilinear`` interpolation method to sample the input pixels. + + :attr:`grid` specifies the sampling pixel locations normalized by the + :attr:`input` spatial dimensions. Therefore, it should have most values in + the range of ``[-1, 1]``. For example, values ``x = -1, y = -1`` is the + left-top pixel of :attr:`input`, and values ``x = 1, y = 1`` is the + right-bottom pixel of :attr:`input`. + + If :attr:`grid` has values outside the range of ``[-1, 1]``, the corresponding + outputs are handled as defined by :attr:`padding_mode`. Options are + + * ``padding_mode="zeros"``: use ``0`` for out-of-bound grid locations, + * ``padding_mode="border"``: use border values for out-of-bound grid locations, + * ``padding_mode="reflection"``: use values at locations reflected by + the border for out-of-bound grid locations. For location far away + from the border, it will keep being reflected until becoming in bound, + e.g., (normalized) pixel location ``x = -3.5`` reflects by border ``-1`` + and becomes ``x' = 1.5``, then reflects by border ``1`` and becomes + ``x'' = -0.5``. + + Note: + This function is often used in conjunction with :func:`affine_grid` + to build `Spatial Transformer Networks`_ . + + Note: + When using the CUDA backend, this operation may induce nondeterministic + behaviour in its backward pass that is not easily switched off. + Please see the notes on :doc:`/notes/randomness` for background. + + Note: + NaN values in :attr:`grid` would be interpreted as ``-1``. + + Args: + input (Tensor): input of shape :math:`(N, C, H_\text{in}, W_\text{in})` (4-D case) + or :math:`(N, C, D_\text{in}, H_\text{in}, W_\text{in})` (5-D case) + grid (Tensor): flow-field of shape :math:`(N, H_\text{out}, W_\text{out}, 2)` (4-D case) + or :math:`(N, D_\text{out}, H_\text{out}, W_\text{out}, 3)` (5-D case) + mode (str): interpolation mode to calculate output values + ``'bilinear'`` | ``'nearest'`` | ``'bicubic'``. Default: ``'bilinear'`` + Note: ``mode='bicubic'`` supports only 4-D input. + When ``mode='bilinear'`` and the input is 5-D, the interpolation mode + used internally will actually be trilinear. However, when the input is 4-D, + the interpolation mode will legitimately be bilinear. + padding_mode (str): padding mode for outside grid values + ``'zeros'`` | ``'border'`` | ``'reflection'``. Default: ``'zeros'`` + align_corners (bool, optional): Geometrically, we consider the pixels of the + input as squares rather than points. + If set to ``True``, the extrema (``-1`` and ``1``) are considered as referring + to the center points of the input's corner pixels. If set to ``False``, they + are instead considered as referring to the corner points of the input's corner + pixels, making the sampling more resolution agnostic. + This option parallels the ``align_corners`` option in + :func:`interpolate`, and so whichever option is used here + should also be used there to resize the input image before grid sampling. + Default: ``False`` + + Returns: + output (Tensor): output Tensor + + .. _`Spatial Transformer Networks`: + https://arxiv.org/abs/1506.02025 + + .. warning:: + When ``align_corners = True``, the grid positions depend on the pixel + size relative to the input image size, and so the locations sampled by + :func:`grid_sample` will differ for the same input given at different + resolutions (that is, after being upsampled or downsampled). + The default behavior up to version 1.2.0 was ``align_corners = True``. + Since then, the default behavior has been changed to ``align_corners = False``, + in order to bring it in line with the default for :func:`interpolate`. + + .. note:: + ``mode='bicubic'`` is implemented using the `cubic convolution algorithm`_ with :math:`\alpha=-0.75`. + The constant :math:`\alpha` might be different from packages to packages. + For example, `PIL`_ and `OpenCV`_ use -0.5 and -0.75 respectively. + This algorithm may "overshoot" the range of values it's interpolating. + For example, it may produce negative values or values greater than 255 when interpolating input in [0, 255]. + Clamp the results with :func:`torch.clamp` to ensure they are within the valid range. + .. _`cubic convolution algorithm`: https://en.wikipedia.org/wiki/Bicubic_interpolation + .. _`PIL`: https://github.com/python-pillow/Pillow/blob/4634eafe3c695a014267eefdce830b4a825beed7/src/libImaging/Resample.c#L51 + .. _`OpenCV`: https://github.com/opencv/opencv/blob/f345ed564a06178670750bad59526cfa4033be55/modules/imgproc/src/resize.cpp#L908 + """ + if has_torch_function_variadic(input, grid): + return handle_torch_function( + grid_sample, (input, grid), input, grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners + ) + if mode != "bilinear" and mode != "nearest" and mode != "bicubic": + raise ValueError( + f"nn.functional.grid_sample(): expected mode to be 'bilinear', 'nearest' or 'bicubic', but got: '{mode}'" + ) + if padding_mode != "zeros" and padding_mode != "border" and padding_mode != "reflection": + raise ValueError( + "nn.functional.grid_sample(): expected padding_mode " + "to be 'zeros', 'border', or 'reflection', " + f"but got: '{padding_mode}'" + ) + + if mode == "bilinear": + mode_enum = 0 + elif mode == "nearest": + mode_enum = 1 + else: # mode == 'bicubic' + mode_enum = 2 + + if padding_mode == "zeros": + padding_mode_enum = 0 + elif padding_mode == "border": + padding_mode_enum = 1 + else: # padding_mode == 'reflection' + padding_mode_enum = 2 + + if align_corners is None: + warnings.warn( + "Default grid_sample and affine_grid behavior has changed " + "to align_corners=False since 1.3.0. Please specify " + "align_corners=True if the old behavior is desired. " + "See the documentation of grid_sample for details." + ) + align_corners = False + + return torch.grid_sampler(input, grid, mode_enum, padding_mode_enum, align_corners) + + +def affine_grid(theta: Tensor, size: List[int], align_corners: Optional[bool] = None) -> Tensor: + r"""Generate 2D or 3D flow field (sampling grid), given a batch of affine matrices :attr:`theta`. + + .. note:: + This function is often used in conjunction with :func:`grid_sample` + to build `Spatial Transformer Networks`_ . + + Args: + theta (Tensor): input batch of affine matrices with shape + (:math:`N \times 2 \times 3`) for 2D or + (:math:`N \times 3 \times 4`) for 3D + size (torch.Size): the target output image size. + (:math:`N \times C \times H \times W` for 2D or + :math:`N \times C \times D \times H \times W` for 3D) + Example: torch.Size((32, 3, 24, 24)) + align_corners (bool, optional): if ``True``, consider ``-1`` and ``1`` + to refer to the centers of the corner pixels rather than the image corners. + Refer to :func:`grid_sample` for a more complete description. + A grid generated by :func:`affine_grid` should be passed to :func:`grid_sample` + with the same setting for this option. + Default: ``False`` + + Returns: + output (Tensor): output Tensor of size (:math:`N \times H \times W \times 2`) + + .. _`Spatial Transformer Networks`: + https://arxiv.org/abs/1506.02025 + + .. warning:: + When ``align_corners = True``, the grid positions depend on the pixel + size relative to the input image size, and so the locations sampled by + :func:`grid_sample` will differ for the same input given at different + resolutions (that is, after being upsampled or downsampled). + The default behavior up to version 1.2.0 was ``align_corners = True``. + Since then, the default behavior has been changed to ``align_corners = False``, + in order to bring it in line with the default for :func:`interpolate`. + .. warning:: + When ``align_corners = True``, 2D affine transforms on 1D data and + 3D affine transforms on 2D data (that is, when one of the spatial + dimensions has unit size) are ill-defined, and not an intended use case. + This is not a problem when ``align_corners = False``. + Up to version 1.2.0, all grid points along a unit dimension were + considered arbitrarily to be at ``-1``. + From version 1.3.0, under ``align_corners = True`` all grid points + along a unit dimension are considered to be at ``0`` + (the center of the input image). + """ + if has_torch_function_unary(theta): + return handle_torch_function(affine_grid, (theta,), theta, size, align_corners=align_corners) + if align_corners is None: + warnings.warn( + "Default grid_sample and affine_grid behavior has changed " + "to align_corners=False since 1.3.0. Please specify " + "align_corners=True if the old behavior is desired. " + "See the documentation of grid_sample for details." + ) + align_corners = False + + # enforce floating point dtype on theta + if not theta.is_floating_point(): + raise ValueError(f"Expected theta to have floating point type, but got {theta.dtype}") + # check that shapes and sizes match + if len(size) == 4: + if theta.dim() != 3 or theta.shape[-2] != 2 or theta.shape[-1] != 3: + raise ValueError( + f"Expected a batch of 2D affine matrices of shape Nx2x3 for size {size}. Got {theta.shape}." + ) + spatial_size = size[-2:] # spatial dimension sizes + elif len(size) == 5: + if theta.dim() != 3 or theta.shape[-2] != 3 or theta.shape[-1] != 4: + raise ValueError( + f"Expected a batch of 3D affine matrices of shape Nx3x4 for size {size}. Got {theta.shape}." + ) + spatial_size = size[-3:] # spatial dimension sizes + else: + raise NotImplementedError( + "affine_grid only supports 4D and 5D sizes, " + "for 2D and 3D affine transforms, respectively. " + f"Got size {size}." + ) + # check for empty span + if align_corners and min(spatial_size) == 1: + warnings.warn( + "Since version 1.3.0, affine_grid behavior has changed " + "for unit-size grids when align_corners=True. " + "This is not an intended use case of affine_grid. " + "See the documentation of affine_grid for details." + ) + elif min(size) <= 0: + raise ValueError(f"Expected non-zero, positive output size. Got {size}") + + return torch.affine_grid_generator(theta, size, align_corners) + + +def pad(input: Tensor, pad: List[int], mode: str = "constant", value: Optional[float] = None) -> Tensor: + r""" +pad(input, pad, mode="constant", value=None) -> Tensor + +Pads tensor. + +Padding size: + The padding size by which to pad some dimensions of :attr:`input` + are described starting from the last dimension and moving forward. + :math:`\left\lfloor\frac{\text{len(pad)}}{2}\right\rfloor` dimensions + of ``input`` will be padded. + For example, to pad only the last dimension of the input tensor, then + :attr:`pad` has the form + :math:`(\text{padding\_left}, \text{padding\_right})`; + to pad the last 2 dimensions of the input tensor, then use + :math:`(\text{padding\_left}, \text{padding\_right},` + :math:`\text{padding\_top}, \text{padding\_bottom})`; + to pad the last 3 dimensions, use + :math:`(\text{padding\_left}, \text{padding\_right},` + :math:`\text{padding\_top}, \text{padding\_bottom}` + :math:`\text{padding\_front}, \text{padding\_back})`. + +Padding mode: + See :class:`torch.nn.CircularPad2d`, :class:`torch.nn.ConstantPad2d`, + :class:`torch.nn.ReflectionPad2d`, and :class:`torch.nn.ReplicationPad2d` + for concrete examples on how each of the padding modes works. Constant + padding is implemented for arbitrary dimensions. Circular, replicate and + reflection padding are implemented for padding the last 3 dimensions of a + 4D or 5D input tensor, the last 2 dimensions of a 3D or 4D input tensor, + or the last dimension of a 2D or 3D input tensor. + +Note: + When using the CUDA backend, this operation may induce nondeterministic + behaviour in its backward pass that is not easily switched off. + Please see the notes on :doc:`/notes/randomness` for background. + +Args: + input (Tensor): N-dimensional tensor + pad (tuple): m-elements tuple, where + :math:`\frac{m}{2} \leq` input dimensions and :math:`m` is even. + mode: ``'constant'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. + Default: ``'constant'`` + value: fill value for ``'constant'`` padding. Default: ``0`` + +Examples:: + + >>> t4d = torch.empty(3, 3, 4, 2) + >>> p1d = (1, 1) # pad last dim by 1 on each side + >>> out = F.pad(t4d, p1d, "constant", 0) # effectively zero padding + >>> print(out.size()) + torch.Size([3, 3, 4, 4]) + >>> p2d = (1, 1, 2, 2) # pad last dim by (1, 1) and 2nd to last by (2, 2) + >>> out = F.pad(t4d, p2d, "constant", 0) + >>> print(out.size()) + torch.Size([3, 3, 8, 4]) + >>> t4d = torch.empty(3, 3, 4, 2) + >>> p3d = (0, 1, 2, 1, 3, 3) # pad by (0, 1), (2, 1), and (3, 3) + >>> out = F.pad(t4d, p3d, "constant", 0) + >>> print(out.size()) + torch.Size([3, 9, 7, 3]) + +""" + if has_torch_function_unary(input): + return handle_torch_function( + torch.nn.functional.pad, (input,), input, pad, mode=mode, value=value) + if not torch.jit.is_scripting(): + if torch.are_deterministic_algorithms_enabled() and input.is_cuda: + if mode == 'replicate': + # Use slow decomp whose backward will be in terms of index_put. + # importlib is required because the import cannot be top level + # (cycle) and cannot be nested (TS doesn't support) + return importlib.import_module('torch._decomp.decompositions')._replication_pad( + input, pad + ) + return torch._C._nn.pad(input, pad, mode, value) + +# TODO: Fix via https://github.com/pytorch/pytorch/issues/75798 +pad.__module__ = "torch.nn.functional" + +# distance + + +pairwise_distance = _add_docstr( + torch.pairwise_distance, + r""" +pairwise_distance(x1, x2, p=2.0, eps=1e-6, keepdim=False) -> Tensor + +See :class:`torch.nn.PairwiseDistance` for details +""") + + +pdist = _add_docstr( + torch.pdist, + r""" +pdist(input, p=2) -> Tensor + +Computes the p-norm distance between every pair of row vectors in the input. +This is identical to the upper triangular portion, excluding the diagonal, of +`torch.norm(input[:, None] - input, dim=2, p=p)`. This function will be faster +if the rows are contiguous. + +If input has shape :math:`N \times M` then the output will have shape +:math:`\frac{1}{2} N (N - 1)`. + +This function is equivalent to ``scipy.spatial.distance.pdist(input, +'minkowski', p=p)`` if :math:`p \in (0, \infty)`. When :math:`p = 0` it is +equivalent to ``scipy.spatial.distance.pdist(input, 'hamming') * M``. +When :math:`p = \infty`, the closest scipy function is +``scipy.spatial.distance.pdist(xn, lambda x, y: np.abs(x - y).max())``. + +Args: + input: input tensor of shape :math:`N \times M`. + p: p value for the p-norm distance to calculate between each vector pair + :math:`\in [0, \infty]`. +""", +) + + +cosine_similarity = _add_docstr( + torch.cosine_similarity, + r""" +cosine_similarity(x1, x2, dim=1, eps=1e-8) -> Tensor + +Returns cosine similarity between ``x1`` and ``x2``, computed along dim. ``x1`` and ``x2`` must be broadcastable +to a common shape. ``dim`` refers to the dimension in this common shape. Dimension ``dim`` of the output is +squeezed (see :func:`torch.squeeze`), resulting in the +output tensor having 1 fewer dimension. + +.. math :: + \text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2, \epsilon) \cdot \max(\Vert x_2 \Vert _2, \epsilon)} + +Supports :ref:`type promotion `. + +Args: + x1 (Tensor): First input. + x2 (Tensor): Second input. + dim (int, optional): Dimension along which cosine similarity is computed. Default: 1 + eps (float, optional): Small value to avoid division by zero. + Default: 1e-8 + +Example:: + + >>> input1 = torch.randn(100, 128) + >>> input2 = torch.randn(100, 128) + >>> output = F.cosine_similarity(input1, input2) + >>> print(output) +""", +) + + +one_hot = _add_docstr( + torch._C._nn.one_hot, + r""" +one_hot(tensor, num_classes=-1) -> LongTensor + +Takes LongTensor with index values of shape ``(*)`` and returns a tensor +of shape ``(*, num_classes)`` that have zeros everywhere except where the +index of last dimension matches the corresponding value of the input tensor, +in which case it will be 1. + +See also `One-hot on Wikipedia`_ . + +.. _One-hot on Wikipedia: + https://en.wikipedia.org/wiki/One-hot + +Arguments: + tensor (LongTensor): class values of any shape. + num_classes (int): Total number of classes. If set to -1, the number + of classes will be inferred as one greater than the largest class + value in the input tensor. + +Returns: + LongTensor that has one more dimension with 1 values at the + index of last dimension indicated by the input, and 0 everywhere + else. + +Examples: + >>> F.one_hot(torch.arange(0, 5) % 3) + tensor([[1, 0, 0], + [0, 1, 0], + [0, 0, 1], + [1, 0, 0], + [0, 1, 0]]) + >>> F.one_hot(torch.arange(0, 5) % 3, num_classes=5) + tensor([[1, 0, 0, 0, 0], + [0, 1, 0, 0, 0], + [0, 0, 1, 0, 0], + [1, 0, 0, 0, 0], + [0, 1, 0, 0, 0]]) + >>> F.one_hot(torch.arange(0, 6).view(3,2) % 3) + tensor([[[1, 0, 0], + [0, 1, 0]], + [[0, 0, 1], + [1, 0, 0]], + [[0, 1, 0], + [0, 0, 1]]]) +""", +) + + +def triplet_margin_loss( + anchor: Tensor, + positive: Tensor, + negative: Tensor, + margin: float = 1.0, + p: float = 2, + eps: float = 1e-6, + swap: bool = False, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> Tensor: + r"""Compute the triplet loss between given input tensors and a margin greater than 0. + + See :class:`~torch.nn.TripletMarginLoss` for details. + """ + if has_torch_function_variadic(anchor, positive, negative): + return handle_torch_function( + triplet_margin_loss, + (anchor, positive, negative), + anchor, + positive, + negative, + margin=margin, + p=p, + eps=eps, + swap=swap, + size_average=size_average, + reduce=reduce, + reduction=reduction, + ) + if size_average is not None or reduce is not None: + reduction_enum = _Reduction.legacy_get_enum(size_average, reduce) + else: + reduction_enum = _Reduction.get_enum(reduction) + return torch.triplet_margin_loss(anchor, positive, negative, margin, p, eps, swap, reduction_enum) + + +def triplet_margin_with_distance_loss( + anchor: Tensor, + positive: Tensor, + negative: Tensor, + *, + distance_function: Optional[Callable[[Tensor, Tensor], Tensor]] = None, + margin: float = 1.0, + swap: bool = False, + reduction: str = "mean" +) -> Tensor: + r"""Compute the triplet margin loss for input tensors using a custom distance function. + + See :class:`~torch.nn.TripletMarginWithDistanceLoss` for details. + """ + if torch.jit.is_scripting(): + raise NotImplementedError( + "F.triplet_margin_with_distance_loss does not support JIT scripting: " + "functions requiring Callables cannot be scripted." + ) + + if has_torch_function_variadic(anchor, positive, negative): + return handle_torch_function( + triplet_margin_with_distance_loss, + (anchor, positive, negative), + anchor, + positive, + negative, + distance_function=distance_function, + margin=margin, + swap=swap, + reduction=reduction, + ) + + # Check validity of reduction mode + if reduction not in ("mean", "sum", "none"): + raise ValueError(f"{reduction} is not a valid value for reduction") + + # Check dimensions + a_dim = anchor.ndim + p_dim = positive.ndim + n_dim = negative.ndim + if not (a_dim == p_dim and p_dim == n_dim): + raise RuntimeError( + f"The anchor, positive, and negative tensors are expected to have " + f"the same number of dimensions, but got: anchor {a_dim}D, " + f"positive {p_dim}D, and negative {n_dim}D inputs") + + # Calculate loss + if distance_function is None: + distance_function = torch.pairwise_distance + + dist_pos = distance_function(anchor, positive) + dist_neg = distance_function(anchor, negative) + # The distance swap is described in the paper "Learning shallow + # convolutional feature descriptors with triplet losses" by V. Balntas, E. + # Riba et al. If True, and if the positive example is closer to the + # negative example than the anchor is, swaps the positive example and the + # anchor in the loss computation. + if swap: + dist_swap = distance_function(positive, negative) + dist_neg = torch.minimum(dist_neg, dist_swap) + loss = torch.clamp_min(margin + dist_pos - dist_neg, 0) + + # Apply reduction + if reduction == "sum": + return torch.sum(loss) + elif reduction == "mean": + return torch.mean(loss) + else: # reduction == "none" + return loss + + +def normalize(input: Tensor, p: float = 2.0, dim: int = 1, eps: float = 1e-12, out: Optional[Tensor] = None) -> Tensor: + r"""Perform :math:`L_p` normalization of inputs over specified dimension. + + For a tensor :attr:`input` of sizes :math:`(n_0, ..., n_{dim}, ..., n_k)`, each + :math:`n_{dim}` -element vector :math:`v` along dimension :attr:`dim` is transformed as + + .. math:: + v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}. + + With the default arguments it uses the Euclidean norm over vectors along dimension :math:`1` for normalization. + + Args: + input: input tensor of any shape + p (float): the exponent value in the norm formulation. Default: 2 + dim (int or tuple of ints): the dimension to reduce. Default: 1 + eps (float): small value to avoid division by zero. Default: 1e-12 + out (Tensor, optional): the output tensor. If :attr:`out` is used, this + operation won't be differentiable. + """ + if has_torch_function_variadic(input, out): + return handle_torch_function(normalize, (input, out), input, p=p, dim=dim, eps=eps, out=out) + if out is None: + denom = input.norm(p, dim, keepdim=True).clamp_min(eps).expand_as(input) + return input / denom + else: + denom = input.norm(p, dim, keepdim=True).clamp_min_(eps).expand_as(input) + return torch.div(input, denom, out=out) + + +def assert_int_or_pair(arg: List[int], arg_name: str, message: str) -> None: + assert isinstance(arg, int) or len(arg) == 2, message.format(arg_name) + + +def unfold( + input: Tensor, kernel_size: BroadcastingList2[int], + dilation: BroadcastingList2[int] = 1, + padding: BroadcastingList2[int] = 0, + stride: BroadcastingList2[int] = 1 +) -> Tensor: + r"""Extract sliding local blocks from a batched input tensor. + + .. warning:: + Currently, only 4-D input tensors (batched image-like tensors) are + supported. + + .. warning:: + + More than one element of the unfolded tensor may refer to a single + memory location. As a result, in-place operations (especially ones that + are vectorized) may result in incorrect behavior. If you need to write + to the tensor, please clone it first. + + + See :class:`torch.nn.Unfold` for details + """ + if has_torch_function_unary(input): + return handle_torch_function( + unfold, (input,), input, kernel_size, dilation=dilation, padding=padding, stride=stride + ) + return torch._C._nn.im2col(input, _pair(kernel_size), _pair(dilation), _pair(padding), _pair(stride)) + + +def fold( + input: Tensor, output_size: BroadcastingList2[int], + kernel_size: BroadcastingList2[int], + dilation: BroadcastingList2[int] = 1, + padding: BroadcastingList2[int] = 0, + stride: BroadcastingList2[int] = 1 +) -> Tensor: + r"""Combine an array of sliding local blocks into a large containing tensor. + + .. warning:: + Currently, only unbatched (3D) or batched (4D) image-like output tensors are supported. + + See :class:`torch.nn.Fold` for details + """ + if has_torch_function_unary(input): + return handle_torch_function( + fold, (input,), input, output_size, kernel_size, dilation=dilation, padding=padding, stride=stride + ) + return torch._C._nn.col2im( + input, _pair(output_size), _pair(kernel_size), _pair(dilation), _pair(padding), _pair(stride) + ) + +# +# multihead attention +# + +def _in_projection_packed( + q: Tensor, + k: Tensor, + v: Tensor, + w: Tensor, + b: Optional[Tensor] = None, +) -> List[Tensor]: + r"""Perform the in-projection step of the attention operation, using packed weights. + + Output is a triple containing projection tensors for query, key and value. + + Args: + q, k, v: query, key and value tensors to be projected. For self-attention, + these are typically the same tensor; for encoder-decoder attention, + k and v are typically the same tensor. (We take advantage of these + identities for performance if they are present.) Regardless, q, k and v + must share a common embedding dimension; otherwise their shapes may vary. + w: projection weights for q, k and v, packed into a single tensor. Weights + are packed along dimension 0, in q, k, v order. + b: optional projection biases for q, k and v, packed into a single tensor + in q, k, v order. + + Shape: + Inputs: + - q: :math:`(..., E)` where E is the embedding dimension + - k: :math:`(..., E)` where E is the embedding dimension + - v: :math:`(..., E)` where E is the embedding dimension + - w: :math:`(E * 3, E)` where E is the embedding dimension + - b: :math:`E * 3` where E is the embedding dimension + + Output: + - in output list :math:`[q', k', v']`, each output tensor will have the + same shape as the corresponding input tensor. + """ + E = q.size(-1) + if k is v: + if q is k: + # self-attention + proj = linear(q, w, b) + # reshape to 3, E and not E, 3 is deliberate for better memory coalescing and keeping same order as chunk() + proj = proj.unflatten(-1, (3, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous() + return proj[0], proj[1], proj[2] + else: + # encoder-decoder attention + w_q, w_kv = w.split([E, E * 2]) + if b is None: + b_q = b_kv = None + else: + b_q, b_kv = b.split([E, E * 2]) + q_proj = linear(q, w_q, b_q) + kv_proj = linear(k, w_kv, b_kv) + # reshape to 2, E and not E, 2 is deliberate for better memory coalescing and keeping same order as chunk() + kv_proj = kv_proj.unflatten(-1, (2, E)).unsqueeze(0).transpose(0, -2).squeeze(-2).contiguous() + return (q_proj, kv_proj[0], kv_proj[1]) + else: + w_q, w_k, w_v = w.chunk(3) + if b is None: + b_q = b_k = b_v = None + else: + b_q, b_k, b_v = b.chunk(3) + return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v) + + +def _in_projection( + q: Tensor, + k: Tensor, + v: Tensor, + w_q: Tensor, + w_k: Tensor, + w_v: Tensor, + b_q: Optional[Tensor] = None, + b_k: Optional[Tensor] = None, + b_v: Optional[Tensor] = None, +) -> Tuple[Tensor, Tensor, Tensor]: + r"""Perform the in-projection step of the attention operation. + + This is simply a triple of linear projections, + with shape constraints on the weights which + ensure embedding dimension uniformity in the projected outputs. + Output is a triple containing projection tensors for query, key and value. + + Args: + q, k, v: query, key and value tensors to be projected. + w_q, w_k, w_v: weights for q, k and v, respectively. + b_q, b_k, b_v: optional biases for q, k and v, respectively. + + Shape: + Inputs: + - q: :math:`(Qdims..., Eq)` where Eq is the query embedding dimension and Qdims are any + number of leading dimensions. + - k: :math:`(Kdims..., Ek)` where Ek is the key embedding dimension and Kdims are any + number of leading dimensions. + - v: :math:`(Vdims..., Ev)` where Ev is the value embedding dimension and Vdims are any + number of leading dimensions. + - w_q: :math:`(Eq, Eq)` + - w_k: :math:`(Eq, Ek)` + - w_v: :math:`(Eq, Ev)` + - b_q: :math:`(Eq)` + - b_k: :math:`(Eq)` + - b_v: :math:`(Eq)` + + Output: in output triple :math:`(q', k', v')`, + - q': :math:`[Qdims..., Eq]` + - k': :math:`[Kdims..., Eq]` + - v': :math:`[Vdims..., Eq]` + + """ + Eq, Ek, Ev = q.size(-1), k.size(-1), v.size(-1) + assert w_q.shape == (Eq, Eq), f"expecting query weights shape of {(Eq, Eq)}, but got {w_q.shape}" + assert w_k.shape == (Eq, Ek), f"expecting key weights shape of {(Eq, Ek)}, but got {w_k.shape}" + assert w_v.shape == (Eq, Ev), f"expecting value weights shape of {(Eq, Ev)}, but got {w_v.shape}" + assert b_q is None or b_q.shape == (Eq,), f"expecting query bias shape of {(Eq,)}, but got {b_q.shape}" + assert b_k is None or b_k.shape == (Eq,), f"expecting key bias shape of {(Eq,)}, but got {b_k.shape}" + assert b_v is None or b_v.shape == (Eq,), f"expecting value bias shape of {(Eq,)}, but got {b_v.shape}" + return linear(q, w_q, b_q), linear(k, w_k, b_k), linear(v, w_v, b_v) + +scaled_dot_product_attention = _add_docstr( + torch._C._nn.scaled_dot_product_attention, r""" +scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False, scale=None) -> Tensor: + +Computes scaled dot product attention on query, key and value tensors, using +an optional attention mask if passed, and applying dropout if a probability +greater than 0.0 is specified. The optional scale argument can only be specified as a keyword argument. + +.. code-block:: python + + # Efficient implementation equivalent to the following: + def scaled_dot_product_attention(query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False, scale=None) -> torch.Tensor: + L, S = query.size(-2), key.size(-2) + scale_factor = 1 / math.sqrt(query.size(-1)) if scale is None else scale + attn_bias = torch.zeros(L, S, dtype=query.dtype) + if is_causal: + assert attn_mask is None + temp_mask = torch.ones(L, S, dtype=torch.bool).tril(diagonal=0) + attn_bias.masked_fill_(temp_mask.logical_not(), float("-inf")) + attn_bias.to(query.dtype) + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + attn_bias.masked_fill_(attn_mask.logical_not(), float("-inf")) + else: + attn_bias += attn_mask + attn_weight = query @ key.transpose(-2, -1) * scale_factor + attn_weight += attn_bias + attn_weight = torch.softmax(attn_weight, dim=-1) + attn_weight = torch.dropout(attn_weight, dropout_p, train=True) + return attn_weight @ value + +.. warning:: This function is beta and subject to change. + +Note: + + There are currently three supported implementations of scaled dot product attention: + + - `FlashAttention-2: Faster Attention with Better Parallelism and Work Partitioning`_ + - `Memory-Efficient Attention`_ + - A PyTorch implementation defined in C++ matching the above formulation + + The function may call optimized kernels for improved performance when using the CUDA backend. + For all other backends, the PyTorch implementation will be used. + + All implementations are enabled by default. Scaled dot product attention attempts to automatically select the + most optimal implementation based on the inputs. In order to provide more fine-grained control over what implementation + is used, the following functions are provided for enabling and disabling implementations. + The context manager is the preferred mechanism: + + - :func:`torch.nn.attention.sdpa_kernel`: A context manager used to enable or disable any of the implementations. + - :func:`torch.backends.cuda.enable_flash_sdp`: Globally enables or disables FlashAttention. + - :func:`torch.backends.cuda.enable_mem_efficient_sdp`: Globally enables or disables Memory-Efficient Attention. + - :func:`torch.backends.cuda.enable_math_sdp`: Globally enables or disables the PyTorch C++ implementation. + + Each of the fused kernels has specific input limitations. If the user requires the use of a specific fused implementation, + disable the PyTorch C++ implementation using :func:`torch.nn.attention.sdpa_kernel`. + In the event that a fused implementation is not available, a warning will be raised with the + reasons why the fused implementation cannot run. + + Due to the nature of fusing floating point operations, the output of this function may be different + depending on what backend kernel is chosen. + The c++ implementation supports torch.float64 and can be used when higher precision is required. + For more information please see :doc:`/notes/numerical_accuracy` + +Note: + {cudnn_reproducibility_note} +""".format(**reproducibility_notes) + + r""" +Args: + query (Tensor): Query tensor; shape :math:`(N, ..., L, E)`. + key (Tensor): Key tensor; shape :math:`(N, ..., S, E)`. + value (Tensor): Value tensor; shape :math:`(N, ..., S, Ev)`. + attn_mask (optional Tensor): Attention mask; shape must be broadcastable to the shape of attention weights, + which is :math:`(N,..., L, S)`. Two types of masks are supported. + A boolean mask where a value of True indicates that the element *should* take part in attention. + A float mask of the same type as query, key, value that is added to the attention score. + dropout_p (float): Dropout probability; if greater than 0.0, dropout is applied + is_causal (bool): If true, assumes upper left causal attention masking and errors if both attn_mask and is_causal + are set. + scale (optional float, keyword-only): Scaling factor applied prior to softmax. If None, the default value is set + to :math:`\frac{1}{\sqrt{E}}`. + + +Returns: + output (Tensor): Attention output; shape :math:`(N, ..., L, Ev)`. + +Shape legend: + - :math:`N: \text{Batch size} ... : \text{Any number of other batch dimensions (optional)}` + - :math:`S: \text{Source sequence length}` + - :math:`L: \text{Target sequence length}` + - :math:`E: \text{Embedding dimension of the query and key}` + - :math:`Ev: \text{Embedding dimension of the value}` + +Examples: + + >>> # Optionally use the context manager to ensure one of the fused kernels is run + >>> query = torch.rand(32, 8, 128, 64, dtype=torch.float16, device="cuda") + >>> key = torch.rand(32, 8, 128, 64, dtype=torch.float16, device="cuda") + >>> value = torch.rand(32, 8, 128, 64, dtype=torch.float16, device="cuda") + >>> with torch.backends.cuda.sdp_kernel(enable_math=False): + >>> F.scaled_dot_product_attention(query,key,value) + + +.. _FlashAttention-2\: Faster Attention with Better Parallelism and Work Partitioning: + https://arxiv.org/abs/2307.08691 +.. _Memory-Efficient Attention: + https://github.com/facebookresearch/xformers + +""") + +def _mha_shape_check(query: Tensor, key: Tensor, value: Tensor, + key_padding_mask: Optional[Tensor], attn_mask: Optional[Tensor], num_heads: int): + # Verifies the expected shape for `query, `key`, `value`, `key_padding_mask` and `attn_mask` + # and returns if the input is batched or not. + # Raises an error if `query` is not 2-D (unbatched) or 3-D (batched) tensor. + + # Shape check. + if query.dim() == 3: + # Batched Inputs + is_batched = True + assert key.dim() == 3 and value.dim() == 3, \ + ("For batched (3-D) `query`, expected `key` and `value` to be 3-D" + f" but found {key.dim()}-D and {value.dim()}-D tensors respectively") + if key_padding_mask is not None: + assert key_padding_mask.dim() == 2, \ + ("For batched (3-D) `query`, expected `key_padding_mask` to be `None` or 2-D" + f" but found {key_padding_mask.dim()}-D tensor instead") + if attn_mask is not None: + assert attn_mask.dim() in (2, 3), \ + ("For batched (3-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D" + f" but found {attn_mask.dim()}-D tensor instead") + elif query.dim() == 2: + # Unbatched Inputs + is_batched = False + assert key.dim() == 2 and value.dim() == 2, \ + ("For unbatched (2-D) `query`, expected `key` and `value` to be 2-D" + f" but found {key.dim()}-D and {value.dim()}-D tensors respectively") + + if key_padding_mask is not None: + assert key_padding_mask.dim() == 1, \ + ("For unbatched (2-D) `query`, expected `key_padding_mask` to be `None` or 1-D" + f" but found {key_padding_mask.dim()}-D tensor instead") + + if attn_mask is not None: + assert attn_mask.dim() in (2, 3), \ + ("For unbatched (2-D) `query`, expected `attn_mask` to be `None`, 2-D or 3-D" + f" but found {attn_mask.dim()}-D tensor instead") + if attn_mask.dim() == 3: + expected_shape = (num_heads, query.shape[0], key.shape[0]) + assert attn_mask.shape == expected_shape, \ + (f"Expected `attn_mask` shape to be {expected_shape} but got {attn_mask.shape}") + else: + raise AssertionError( + f"query should be unbatched 2D or batched 3D tensor but received {query.dim()}-D query tensor") + + return is_batched + +def _canonical_mask( + mask: Optional[Tensor], + mask_name: str, + other_type: Optional[DType], + other_name: str, + target_type: DType, + check_other: bool = True, +) -> Optional[Tensor]: + + if mask is not None: + _mask_dtype = mask.dtype + _mask_is_float = torch.is_floating_point(mask) + if _mask_dtype != torch.bool and not _mask_is_float: + raise AssertionError( + f"only bool and floating types of {mask_name} are supported") + if check_other and other_type is not None: + if _mask_dtype != other_type: + warnings.warn( + f"Support for mismatched {mask_name} and {other_name} " + "is deprecated. Use same type for both instead." + ) + if not _mask_is_float: + mask = ( + torch.zeros_like(mask, dtype=target_type) + .masked_fill_(mask, float("-inf")) + ) + return mask + +def _none_or_dtype(input: Optional[Tensor]) -> Optional[DType]: + if input is None: + return None + elif isinstance(input, torch.Tensor): + return input.dtype + raise RuntimeError("input to _none_or_dtype() must be None or torch.Tensor") + +def multi_head_attention_forward( + query: Tensor, + key: Tensor, + value: Tensor, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight: Optional[Tensor], + in_proj_bias: Optional[Tensor], + bias_k: Optional[Tensor], + bias_v: Optional[Tensor], + add_zero_attn: bool, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Optional[Tensor], + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + use_separate_proj_weight: bool = False, + q_proj_weight: Optional[Tensor] = None, + k_proj_weight: Optional[Tensor] = None, + v_proj_weight: Optional[Tensor] = None, + static_k: Optional[Tensor] = None, + static_v: Optional[Tensor] = None, + average_attn_weights: bool = True, + is_causal: bool = False, +) -> Tuple[Tensor, Optional[Tensor]]: + r"""Forward method for MultiHeadAttention. + + See :class:`torch.nn.MultiheadAttention` for details. + + Args: + query, key, value: map a query and a set of key-value pairs to an output. + See "Attention Is All You Need" for more details. + embed_dim_to_check: total dimension of the model. + num_heads: parallel attention heads. + in_proj_weight, in_proj_bias: input projection weight and bias. + bias_k, bias_v: bias of the key and value sequences to be added at dim=0. + add_zero_attn: add a new batch of zeros to the key and + value sequences at dim=1. + dropout_p: probability of an element to be zeroed. + out_proj_weight, out_proj_bias: the output projection weight and bias. + training: apply dropout if is ``True``. + key_padding_mask: if provided, specified padding elements in the key will + be ignored by the attention. This is an binary mask. When the value is True, + the corresponding value on the attention layer will be filled with -inf. + need_weights: output attn_output_weights. + Default: `True` + Note: `needs_weight` defaults to `True`, but should be set to `False` + For best performance when attention weights are not needed. + *Setting needs_weights to `True` + leads to a significant performance degradation.* + attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all + the batches while a 3D mask allows to specify a different mask for the entries of each batch. + is_causal: If specified, applies a causal mask as attention mask, and ignores + attn_mask for computing scaled dot product attention. + Default: ``False``. + .. warning:: + is_causal is provides a hint that the attn_mask is the + causal mask.Providing incorrect hints can result in + incorrect execution, including forward and backward + compatibility. + use_separate_proj_weight: the function accept the proj. weights for query, key, + and value in different forms. If false, in_proj_weight will be used, which is + a combination of q_proj_weight, k_proj_weight, v_proj_weight. + q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. + static_k, static_v: static key and value used for attention operators. + average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across heads. + Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an effect + when ``need_weights=True.``. Default: True + + + Shape: + Inputs: + - query: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is + the embedding dimension. + - key: :math:`(S, E)` or :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - value: :math:`(S, E)` or :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is + the embedding dimension. + - key_padding_mask: :math:`(S)` or :math:`(N, S)` where N is the batch size, S is the source sequence length. + If a FloatTensor is provided, it will be directly added to the value. + If a BoolTensor is provided, the positions with the + value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged. + - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length. + 3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length, + S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked + positions. If a BoolTensor is provided, positions with ``True`` + are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor + is provided, it will be added to the attention weight. + - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, + N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. + + Outputs: + - attn_output: :math:`(L, E)` or :math:`(L, N, E)` where L is the target sequence length, N is the batch size, + E is the embedding dimension. + - attn_output_weights: Only returned when ``need_weights=True``. If ``average_attn_weights=True``, returns + attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or + :math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and + :math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per + head of shape :math:`(num_heads, L, S)` when input is unbatched or :math:`(N, num_heads, L, S)`. + """ + tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias) + if has_torch_function(tens_ops): + return handle_torch_function( + multi_head_attention_forward, + tens_ops, + query, + key, + value, + embed_dim_to_check, + num_heads, + in_proj_weight, + in_proj_bias, + bias_k, + bias_v, + add_zero_attn, + dropout_p, + out_proj_weight, + out_proj_bias, + training=training, + key_padding_mask=key_padding_mask, + need_weights=need_weights, + attn_mask=attn_mask, + is_causal=is_causal, + use_separate_proj_weight=use_separate_proj_weight, + q_proj_weight=q_proj_weight, + k_proj_weight=k_proj_weight, + v_proj_weight=v_proj_weight, + static_k=static_k, + static_v=static_v, + average_attn_weights=average_attn_weights, + ) + + is_batched = _mha_shape_check(query, key, value, key_padding_mask, attn_mask, num_heads) + + # For unbatched input, we unsqueeze at the expected batch-dim to pretend that the input + # is batched, run the computation and before returning squeeze the + # batch dimension so that the output doesn't carry this temporary batch dimension. + if not is_batched: + # unsqueeze if the input is unbatched + query = query.unsqueeze(1) + key = key.unsqueeze(1) + value = value.unsqueeze(1) + if key_padding_mask is not None: + key_padding_mask = key_padding_mask.unsqueeze(0) + + # set up shape vars + tgt_len, bsz, embed_dim = query.shape + src_len, _, _ = key.shape + + key_padding_mask = _canonical_mask( + mask=key_padding_mask, + mask_name="key_padding_mask", + other_type=_none_or_dtype(attn_mask), + other_name="attn_mask", + target_type=query.dtype + ) + + if is_causal and attn_mask is None: + raise RuntimeError( + "Need attn_mask if specifying the is_causal hint. " + "You may use the Transformer module method " + "`generate_square_subsequent_mask` to create this mask." + ) + + if is_causal and key_padding_mask is None and not need_weights: + # when we have a kpm or need weights, we need attn_mask + # Otherwise, we use the is_causal hint go as is_causal + # indicator to SDPA. + attn_mask = None + else: + attn_mask = _canonical_mask( + mask=attn_mask, + mask_name="attn_mask", + other_type=None, + other_name="", + target_type=query.dtype, + check_other=False, + ) + + if key_padding_mask is not None: + # We have the attn_mask, and use that to merge kpm into it. + # Turn off use of is_causal hint, as the merged mask is no + # longer causal. + is_causal = False + + assert embed_dim == embed_dim_to_check, \ + f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}" + if isinstance(embed_dim, torch.Tensor): + # embed_dim can be a tensor when JIT tracing + head_dim = embed_dim.div(num_heads, rounding_mode='trunc') + else: + head_dim = embed_dim // num_heads + assert head_dim * num_heads == embed_dim, f"embed_dim {embed_dim} not divisible by num_heads {num_heads}" + if use_separate_proj_weight: + # allow MHA to have different embedding dimensions when separate projection weights are used + assert key.shape[:2] == value.shape[:2], \ + f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}" + else: + assert key.shape == value.shape, f"key shape {key.shape} does not match value shape {value.shape}" + + # + # compute in-projection + # + if not use_separate_proj_weight: + assert in_proj_weight is not None, "use_separate_proj_weight is False but in_proj_weight is None" + q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias) + else: + assert q_proj_weight is not None, "use_separate_proj_weight is True but q_proj_weight is None" + assert k_proj_weight is not None, "use_separate_proj_weight is True but k_proj_weight is None" + assert v_proj_weight is not None, "use_separate_proj_weight is True but v_proj_weight is None" + if in_proj_bias is None: + b_q = b_k = b_v = None + else: + b_q, b_k, b_v = in_proj_bias.chunk(3) + q, k, v = _in_projection(query, key, value, q_proj_weight, k_proj_weight, v_proj_weight, b_q, b_k, b_v) + + # prep attention mask + + if attn_mask is not None: + # ensure attn_mask's dim is 3 + if attn_mask.dim() == 2: + correct_2d_size = (tgt_len, src_len) + if attn_mask.shape != correct_2d_size: + raise RuntimeError(f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}.") + attn_mask = attn_mask.unsqueeze(0) + elif attn_mask.dim() == 3: + correct_3d_size = (bsz * num_heads, tgt_len, src_len) + if attn_mask.shape != correct_3d_size: + raise RuntimeError(f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}.") + else: + raise RuntimeError(f"attn_mask's dimension {attn_mask.dim()} is not supported") + + # add bias along batch dimension (currently second) + if bias_k is not None and bias_v is not None: + assert static_k is None, "bias cannot be added to static key." + assert static_v is None, "bias cannot be added to static value." + k = torch.cat([k, bias_k.repeat(1, bsz, 1)]) + v = torch.cat([v, bias_v.repeat(1, bsz, 1)]) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + else: + assert bias_k is None + assert bias_v is None + + # + # reshape q, k, v for multihead attention and make em batch first + # + q = q.view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1) + if static_k is None: + k = k.view(k.shape[0], bsz * num_heads, head_dim).transpose(0, 1) + else: + # TODO finish disentangling control flow so we don't do in-projections when statics are passed + assert static_k.size(0) == bsz * num_heads, \ + f"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}" + assert static_k.size(2) == head_dim, \ + f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}" + k = static_k + if static_v is None: + v = v.view(v.shape[0], bsz * num_heads, head_dim).transpose(0, 1) + else: + # TODO finish disentangling control flow so we don't do in-projections when statics are passed + assert static_v.size(0) == bsz * num_heads, \ + f"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}" + assert static_v.size(2) == head_dim, \ + f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}" + v = static_v + + # add zero attention along batch dimension (now first) + if add_zero_attn: + zero_attn_shape = (bsz * num_heads, 1, head_dim) + k = torch.cat([k, torch.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1) + v = torch.cat([v, torch.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1) + if attn_mask is not None: + attn_mask = pad(attn_mask, (0, 1)) + if key_padding_mask is not None: + key_padding_mask = pad(key_padding_mask, (0, 1)) + + # update source sequence length after adjustments + src_len = k.size(1) + + # merge key padding and attention masks + if key_padding_mask is not None: + assert key_padding_mask.shape == (bsz, src_len), \ + f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}" + key_padding_mask = key_padding_mask.view(bsz, 1, 1, src_len). \ + expand(-1, num_heads, -1, -1).reshape(bsz * num_heads, 1, src_len) + if attn_mask is None: + attn_mask = key_padding_mask + else: + attn_mask = attn_mask + key_padding_mask + + # adjust dropout probability + if not training: + dropout_p = 0.0 + + # + # (deep breath) calculate attention and out projection + # + + if need_weights: + B, Nt, E = q.shape + q_scaled = q * math.sqrt(1.0 / float(E)) + + assert not (is_causal and attn_mask is None), "FIXME: is_causal not implemented for need_weights" + + if attn_mask is not None: + attn_output_weights = torch.baddbmm(attn_mask, q_scaled, k.transpose(-2, -1)) + else: + attn_output_weights = torch.bmm(q_scaled, k.transpose(-2, -1)) + attn_output_weights = softmax(attn_output_weights, dim=-1) + if dropout_p > 0.0: + attn_output_weights = dropout(attn_output_weights, p=dropout_p) + + attn_output = torch.bmm(attn_output_weights, v) + + attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len * bsz, embed_dim) + attn_output = linear(attn_output, out_proj_weight, out_proj_bias) + attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1)) + + # optionally average attention weights over heads + attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len) + if average_attn_weights: + attn_output_weights = attn_output_weights.mean(dim=1) + + if not is_batched: + # squeeze the output if input was unbatched + attn_output = attn_output.squeeze(1) + attn_output_weights = attn_output_weights.squeeze(0) + return attn_output, attn_output_weights + else: + # attn_mask can be either (L,S) or (N*num_heads, L, S) + # if attn_mask's shape is (1, L, S) we need to unsqueeze to (1, 1, L, S) + # in order to match the input for SDPA of (N, num_heads, L, S) + if attn_mask is not None: + if attn_mask.size(0) == 1 and attn_mask.dim() == 3: + attn_mask = attn_mask.unsqueeze(0) + else: + attn_mask = attn_mask.view(bsz, num_heads, -1, src_len) + + q = q.view(bsz, num_heads, tgt_len, head_dim) + k = k.view(bsz, num_heads, src_len, head_dim) + v = v.view(bsz, num_heads, src_len, head_dim) + + attn_output = scaled_dot_product_attention(q, k, v, attn_mask, dropout_p, is_causal) + attn_output = attn_output.permute(2, 0, 1, 3).contiguous().view(bsz * tgt_len, embed_dim) + + attn_output = linear(attn_output, out_proj_weight, out_proj_bias) + attn_output = attn_output.view(tgt_len, bsz, attn_output.size(1)) + if not is_batched: + # squeeze the output if input was unbatched + attn_output = attn_output.squeeze(1) + return attn_output, None diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/functional.pyi b/llmeval-env/lib/python3.10/site-packages/torch/nn/functional.pyi new file mode 100644 index 0000000000000000000000000000000000000000..766ee58d70ead2b0bc143656f83feba293460c36 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/functional.pyi @@ -0,0 +1,682 @@ +from typing import ( + Any, + Callable, + Dict, + List, + Literal, + Optional, + overload, + Sequence, + Tuple, + Union, +) + +from torch import Tensor +from torch.types import _dtype, _int, _size + +from .common_types import ( + _ratio_any_t, + _size_1_t, + _size_2_opt_t, + _size_2_t, + _size_3_opt_t, + _size_3_t, + _size_any_t, +) + +# 'TypedDict' is a new accepted type that represents a dictionary with a fixed set of allowed keys. +# It is standards-track but not in `typing` yet. We leave this hear to be uncommented once the feature +# is wide-spread. + +# from mypy_extensions import TypedDict + +# GRID_SAMPLE_INTERPOLATION_MODES = TypedDict('GRID_SAMPLE_INTERPOLATION_MODES', {'bilinear': int, 'nearest': int}) +# GRID_SAMPLE_PADDING_MODES = TypedDict('GRID_SAMPLE_PADDING_MODES', {'zeros': int, 'border': int, 'reflection': int}) + +GRID_SAMPLE_INTERPOLATION_MODES = Dict[str, int] +GRID_SAMPLE_PADDING_MODES = Dict[str, int] + +# These stubs were generated by running stubgen (`stubgen --parse-only functional.py`), followed by manual cleaning. +# +# The 'BroadcastingList{1,2,3}' types were replaced by `_size` or _output_ratio, as appropriate. +# This was necessary since the JIT uses BroadcastingList* types but static checking with mypy etc requires a `Sequence` +# type. There is no way to express the expected lengths of these lists in the current Python typing system. +# +# Functions created via `_add_docstr` in `functional.py` where merely typed as `Any` by `stubgen`, so those were +# deleted from the stub and replaced by generated declarations. See `gen_pyi` for the implementation of the code +# generation logic for those functions. In the future, it might be worth looking into using the mypy plugin system +# to encode the type semantics of `_add_docstr`, should that system ever become widespread. +def fractional_max_pool2d_with_indices( + input: Tensor, + kernel_size: _size, + output_size: Optional[_size] = ..., + output_ratio: Optional[_ratio_any_t] = ..., + return_indices: bool = ..., + _random_samples: Optional[Tensor] = ..., +) -> Tuple[Tensor, Tensor]: ... +def fractional_max_pool3d_with_indices( + input: Tensor, + kernel_size: _size, + output_size: Optional[_size] = ..., + output_ratio: Optional[_ratio_any_t] = ..., + return_indices: bool = ..., + _random_samples: Optional[Tensor] = ..., +) -> Tuple[Tensor, Tensor]: ... +def max_pool1d_with_indices( + input: Tensor, + kernel_size: _size, + stride: Optional[_size] = ..., + padding: _size = ..., + dilation: _size = ..., + ceil_mode: bool = ..., + return_indices: bool = ..., +) -> Tuple[Tensor, Tensor]: ... +def max_pool2d_with_indices( + input: Tensor, + kernel_size: _size, + stride: Optional[_size] = ..., + padding: _size = ..., + dilation: _size = ..., + ceil_mode: bool = ..., + return_indices: bool = ..., +) -> Tuple[Tensor, Tensor]: ... +def max_pool3d_with_indices( + input: Tensor, + kernel_size: _size, + stride: Optional[_size] = ..., + padding: _size = ..., + dilation: _size = ..., + ceil_mode: bool = ..., + return_indices: bool = ..., +) -> Tuple[Tensor, Tensor]: ... +def max_unpool1d( + input: Tensor, + indices: Tensor, + kernel_size: _size, + stride: Optional[_size] = ..., + padding: _size = ..., + output_size: Optional[_size] = ..., +) -> Tensor: ... +def max_unpool2d( + input: Tensor, + indices: Tensor, + kernel_size: _size, + stride: Optional[_size] = ..., + padding: _size = ..., + output_size: Optional[_size] = ..., +) -> Tensor: ... +def max_unpool3d( + input: Tensor, + indices: Tensor, + kernel_size: _size, + stride: Optional[_size] = ..., + padding: _size = ..., + output_size: Optional[_size] = ..., +) -> Tensor: ... +def lp_pool1d( + input: Tensor, + norm_type: float, + kernel_size: _size_1_t, + stride: Union[Optional[_size], Optional[int]] = ..., + ceil_mode: bool = ..., +) -> Tensor: ... +def lp_pool2d( + input: Tensor, + norm_type: float, + kernel_size: _size_2_t, + stride: Union[Optional[_size], Optional[int]] = ..., + ceil_mode: bool = ..., +) -> Tensor: ... +def lp_pool3d( + input: Tensor, + norm_type: float, + kernel_size: _size_3_t, + stride: Union[Optional[_size], Optional[int]] = ..., + ceil_mode: bool = ..., +) -> Tensor: ... +def adaptive_max_pool1d_with_indices( + input: Tensor, + output_size: _size, + return_indices: bool = ..., +) -> Tuple[Tensor, Tensor]: ... +def adaptive_max_pool2d_with_indices( + input: Tensor, + output_size: _size_2_opt_t, + return_indices: bool = ..., +) -> Tuple[Tensor, Tensor]: ... +def adaptive_max_pool3d_with_indices( + input: Tensor, + output_size: _size_3_opt_t, + return_indices: bool = ..., +) -> Tuple[Tensor, Tensor]: ... +def adaptive_avg_pool2d(input: Tensor, output_size: _size_2_opt_t) -> Tensor: ... +def adaptive_avg_pool3d(input: Tensor, output_size: _size_3_opt_t) -> Tensor: ... +def dropout( + input: Tensor, + p: float = ..., + training: bool = ..., + inplace: bool = ..., +) -> Tensor: ... +def alpha_dropout( + input: Tensor, + p: float = ..., + training: bool = ..., + inplace: bool = ..., +) -> Tensor: ... +def dropout1d( + input: Tensor, + p: float = ..., + training: bool = ..., + inplace: bool = ..., +) -> Tensor: ... +def dropout2d( + input: Tensor, + p: float = ..., + training: bool = ..., + inplace: bool = ..., +) -> Tensor: ... +def dropout3d( + input: Tensor, + p: float = ..., + training: bool = ..., + inplace: bool = ..., +) -> Tensor: ... +def feature_alpha_dropout( + input: Tensor, + p: float = ..., + training: bool = ..., + inplace: bool = ..., +) -> Tensor: ... +def threshold( + input: Tensor, + threshold: float, + value: float, + inplace: bool = ..., +) -> Tensor: ... +def relu(input: Tensor, inplace: bool = ...) -> Tensor: ... +def glu(input: Tensor, dim: int = ...) -> Tensor: ... +def hardtanh( + input: Tensor, + min_val: float = ..., + max_val: float = ..., + inplace: bool = ..., +) -> Tensor: ... +def relu6(input: Tensor, inplace: bool = ...) -> Tensor: ... +def elu(input: Tensor, alpha: float = ..., inplace: bool = ...) -> Tensor: ... +def selu(input: Tensor, inplace: bool = ...) -> Tensor: ... +def celu(input: Tensor, alpha: float = ..., inplace: bool = ...) -> Tensor: ... +def leaky_relu( + input: Tensor, + negative_slope: float = ..., + inplace: bool = ..., +) -> Tensor: ... +def rrelu( + input: Tensor, + lower: float = ..., + upper: float = ..., + training: bool = ..., + inplace: bool = ..., +) -> Tensor: ... +def tanhshrink(input: Any): ... +def softsign(input: Any): ... +def softmin( + input: Tensor, + dim: Optional[int] = ..., + _stacklevel: int = ..., + dtype: Optional[_dtype] = ..., +) -> Tensor: ... +def softmax( + input: Tensor, + dim: Optional[int] = ..., + _stacklevel: int = ..., + dtype: Optional[_dtype] = ..., +) -> Tensor: ... +def gumbel_softmax( + logits: Tensor, + tau: float = ..., + hard: bool = ..., + eps: float = ..., + dim: int = ..., +) -> Tensor: ... +def log_softmax( + input: Tensor, + dim: Optional[int] = ..., + _stacklevel: int = ..., + dtype: Optional[_dtype] = ..., +) -> Tensor: ... +def tanh(input: Any): ... +def sigmoid(input: Any) -> Tensor: ... +def hardsigmoid(input: Tensor, inplace: bool = False) -> Tensor: ... +def silu(input: Tensor, inplace: bool = False) -> Tensor: ... +def mish(input: Tensor, inplace: bool = False) -> Tensor: ... +def hardswish(input: Tensor, inplace: bool = False) -> Tensor: ... +def embedding( + input: Tensor, + weight: Tensor, + padding_idx: Optional[int] = ..., + max_norm: Optional[float] = ..., + norm_type: float = ..., + scale_grad_by_freq: bool = ..., + sparse: bool = ..., +) -> Tensor: ... +def embedding_bag( + input: Tensor, + weight: Tensor, + offsets: Optional[Tensor] = ..., + max_norm: Optional[float] = ..., + norm_type: float = ..., + scale_grad_by_freq: bool = ..., + mode: str = ..., + sparse: bool = ..., + per_sample_weights: Optional[Tensor] = ..., + include_last_offset: bool = ..., + padding_idx: Optional[int] = ..., +) -> Tensor: ... +def batch_norm( + input: Tensor, + running_mean: Optional[Tensor], + running_var: Optional[Tensor], + weight: Optional[Tensor] = ..., + bias: Optional[Tensor] = ..., + training: bool = ..., + momentum: float = ..., + eps: float = ..., +) -> Tensor: ... +def instance_norm( + input: Tensor, + running_mean: Optional[Tensor] = ..., + running_var: Optional[Tensor] = ..., + weight: Optional[Tensor] = ..., + bias: Optional[Tensor] = ..., + use_input_stats: bool = ..., + momentum: float = ..., + eps: float = ..., +) -> Tensor: ... +def layer_norm( + input: Tensor, + normalized_shape: Sequence[int], + weight: Optional[Tensor] = ..., + bias: Optional[Tensor] = ..., + eps: float = ..., +) -> Tensor: ... +def group_norm( + input: Tensor, + num_groups: int, + weight: Optional[Tensor] = ..., + bias: Optional[Tensor] = ..., + eps: float = ..., +) -> Tensor: ... +def local_response_norm( + input: Tensor, + size: int, + alpha: float = ..., + beta: float = ..., + k: float = ..., +) -> Tensor: ... +def ctc_loss( + log_probs: Tensor, + targets: Tensor, + input_lengths: Tensor, + target_lengths: Tensor, + blank: int = ..., + reduction: str = ..., + zero_infinity: bool = ..., +) -> Tensor: ... +def nll_loss( + input: Tensor, + target: Tensor, + weight: Optional[Tensor] = ..., + size_average: Optional[bool] = ..., + ignore_index: int = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def poisson_nll_loss( + input: Tensor, + target: Tensor, + log_input: bool = ..., + full: bool = ..., + size_average: Optional[bool] = ..., + eps: float = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def gaussian_nll_loss( + input: Tensor, + target: Tensor, + var: Tensor, + full: Optional[bool] = ..., + eps: Optional[float] = ..., + reduction: Optional[str] = ..., +) -> Tensor: ... +def kl_div( + input: Tensor, + target: Tensor, + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., + log_target: bool = ..., +) -> Tensor: ... +def cross_entropy( + input: Tensor, + target: Tensor, + weight: Optional[Tensor] = ..., + size_average: Optional[bool] = ..., + ignore_index: int = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., + label_smoothing: float = ..., +) -> Tensor: ... +def binary_cross_entropy( + input: Tensor, + target: Tensor, + weight: Optional[Tensor] = ..., + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def binary_cross_entropy_with_logits( + input: Tensor, + target: Tensor, + weight: Optional[Tensor] = ..., + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., + pos_weight: Optional[Tensor] = ..., +) -> Tensor: ... +def smooth_l1_loss( + input: Tensor, + target: Tensor, + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., + beta: float = ..., +) -> Tensor: ... +def huber_loss( + input: Tensor, + target: Tensor, + reduction: str = ..., + delta: float = ..., +) -> Tensor: ... +def l1_loss( + input: Tensor, + target: Tensor, + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def mse_loss( + input: Tensor, + target: Tensor, + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def margin_ranking_loss( + input1: Tensor, + input2: Tensor, + target: Tensor, + margin: float = ..., + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def hinge_embedding_loss( + input: Tensor, + target: Tensor, + margin: float = ..., + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def multilabel_margin_loss( + input: Tensor, + target: Tensor, + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def soft_margin_loss( + input: Tensor, + target: Tensor, + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def multilabel_soft_margin_loss( + input: Tensor, + target: Tensor, + weight: Optional[Tensor] = ..., + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def cosine_embedding_loss( + input1: Tensor, + input2: Tensor, + target: Tensor, + margin: float = ..., + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def multi_margin_loss( + input: Tensor, + target: Tensor, + p: int = ..., + margin: float = ..., + weight: Optional[Tensor] = ..., + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def upsample( + input: Any, + size: Optional[Any] = ..., + scale_factor: Optional[Any] = ..., + mode: str = ..., + align_corners: Optional[Any] = ..., +): ... +def interpolate( + input: Any, + size: Optional[Any] = ..., + scale_factor: Optional[Any] = ..., + mode: str = ..., + align_corners: Optional[Any] = ..., + recompute_scale_factor: Optional[Any] = ..., + antialias: bool = ..., +): ... +def upsample_nearest( + input: Any, + size: Optional[Any] = ..., + scale_factor: Optional[Any] = ..., +): ... +def upsample_bilinear( + input: Any, + size: Optional[Any] = ..., + scale_factor: Optional[Any] = ..., +): ... +def grid_sample( + input: Tensor, + grid: Tensor, + mode: str = ..., + padding_mode: str = ..., + align_corners: Optional[Any] = ..., +) -> Tensor: ... +def affine_grid( + theta: Tensor, + size: List[int], + align_corners: Optional[Any] = ..., +) -> Tensor: ... +def triplet_margin_loss( + anchor: Tensor, + positive: Tensor, + negative: Tensor, + margin: float = ..., + p: float = ..., + eps: float = ..., + swap: bool = ..., + size_average: Optional[bool] = ..., + reduce: Optional[bool] = ..., + reduction: str = ..., +) -> Tensor: ... +def triplet_margin_with_distance_loss( + anchor: Tensor, + positive: Tensor, + negative: Tensor, + *, + distance_function: Optional[Callable[[Tensor, Tensor], Tensor]] = ..., + margin: float = ..., + swap: bool = ..., + reduction: str = ..., +) -> Tensor: ... +def normalize( + input: Tensor, + p: float = ..., + dim: int = ..., + eps: float = ..., + out: Optional[Tensor] = ..., +) -> Tensor: ... +def assert_int_or_pair( + arg: Any, + arg_name: Any, + message: Any, +) -> None: ... +def unfold( + input: Tensor, + kernel_size: _size_any_t, + dilation: _size_any_t = ..., + padding: _size_any_t = ..., + stride: _size_any_t = ..., +) -> Tensor: ... +def fold( + input: Tensor, + output_size: _size_any_t, + kernel_size: _size_any_t, + dilation: _size_any_t = ..., + padding: _size_any_t = ..., + stride: _size_any_t = ..., +) -> Tensor: ... +def _canonical_mask( + mask: Optional[Tensor], + mask_name: str, + other_type: Optional[_dtype], + other_name: str, + target_type: _dtype, + check_other: bool = True, +) -> Optional[Tensor]: ... +def _none_or_dtype(input: Optional[Tensor]) -> Optional[_dtype]: ... +def multi_head_attention_forward( + query: Tensor, + key: Tensor, + value: Tensor, + embed_dim_to_check: int, + num_heads: int, + in_proj_weight: Optional[Tensor], + in_proj_bias: Optional[Tensor], + bias_k: Optional[Tensor], + bias_v: Optional[Tensor], + add_zero_attn: bool, + dropout_p: float, + out_proj_weight: Tensor, + out_proj_bias: Optional[Tensor], + training: bool = True, + key_padding_mask: Optional[Tensor] = None, + need_weights: bool = True, + attn_mask: Optional[Tensor] = None, + use_separate_proj_weight: bool = False, + q_proj_weight: Optional[Tensor] = None, + k_proj_weight: Optional[Tensor] = None, + v_proj_weight: Optional[Tensor] = None, + static_k: Optional[Tensor] = None, + static_v: Optional[Tensor] = None, + average_attn_weights: bool = True, + is_causal: bool = False, +) -> Tuple[Tensor, Optional[Tensor]]: ... + +from .. import conv1d as conv1d +from .. import conv2d as conv2d +from .. import conv3d as conv3d +from .. import conv_transpose1d as conv_transpose1d +from .. import conv_transpose2d as conv_transpose2d +from .. import conv_transpose3d as conv_transpose3d +from .. import conv_tbc as conv_tbc +from .. import avg_pool1d as avg_pool1d +from .. import adaptive_avg_pool1d as adaptive_avg_pool1d +from .. import relu_ as relu_ +from .. import selu_ as selu_ +from .. import celu_ as celu_ +from .. import prelu as prelu +from .. import rrelu_ as rrelu_ +from .. import hardshrink as hardshrink +from .. import bilinear as bilinear +from .. import pixel_shuffle as pixel_shuffle +from .. import pixel_unshuffle as pixel_unshuffle +from .. import channel_shuffle as channel_shuffle +from .. import native_channel_shuffle as native_channel_shuffle +from .. import pairwise_distance as pairwise_distance +from .. import pdist as pdist +from .. import cosine_similarity as cosine_similarity +from .._C._nn import avg_pool2d as avg_pool2d +from .._C._nn import avg_pool3d as avg_pool3d +from .._C._nn import hardtanh_ as hardtanh_ +from .._C._nn import elu_ as elu_ +from .._C._nn import leaky_relu_ as leaky_relu_ +from .._C._nn import gelu as gelu +from .._C._nn import softplus as softplus +from .._C._nn import softshrink as softshrink +from .._C._nn import linear as linear +from .._C._nn import pad as pad +from .._C._nn import one_hot as one_hot +from .._C._nn import scaled_dot_product_attention as scaled_dot_product_attention +from .._C._nn import log_sigmoid +logsigmoid = log_sigmoid + +@overload +def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[False] = False) -> Tensor: ... +@overload +def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ... +@overload +def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size], *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ... +@overload +def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[False] = False) -> Tensor: ... +@overload +def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ... +@overload +def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size], *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ... +@overload +def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[False] = False) -> Tensor: ... +@overload +def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ... +@overload +def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size], *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ... +@overload +def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]] = None, output_ratio: Optional[_ratio_any_t] = None, return_indices: Literal[False] = False, _random_samples: Optional[Tensor] = None) -> Tensor: ... +@overload +def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]], output_ratio: Optional[_ratio_any_t], return_indices: Literal[True], /, _random_samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ... +@overload +def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]] = None, output_ratio: Optional[_ratio_any_t] = None, *, return_indices: Literal[True], _random_samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ... +@overload +def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]] = None, output_ratio: Optional[_ratio_any_t] = None, return_indices: Literal[False] = False, _random_samples: Optional[Tensor] = None) -> Tensor: ... +@overload +def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]], output_ratio: Optional[_ratio_any_t], return_indices: Literal[True], /, _random_samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ... +@overload +def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]] = None, output_ratio: Optional[_ratio_any_t] = None, *, return_indices: Literal[True], _random_samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ... +@overload +def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, return_indices: Literal[False] = False) -> Tensor: ... +@overload +def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]], padding: Union[_int, _size], dilation: Union[_int, _size], ceil_mode: bool, return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ... +@overload +def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ... +@overload +def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, return_indices: Literal[False] = False) -> Tensor: ... +@overload +def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]], padding: Union[_int, _size], dilation: Union[_int, _size], ceil_mode: bool, return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ... +@overload +def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ... +@overload +def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, return_indices: Literal[False] = False) -> Tensor: ... +@overload +def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]], padding: Union[_int, _size], dilation: Union[_int, _size], ceil_mode: bool, return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ... +@overload +def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/grad.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/grad.py new file mode 100644 index 0000000000000000000000000000000000000000..660c87fb4133c4bffc9fb711767306a1ccf7f5f1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/grad.py @@ -0,0 +1,189 @@ +"""Gradient interface.""" + +import torch +from .modules.utils import _single, _pair, _triple + + +def conv1d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1): + r"""Compute the gradient of conv1d with respect to the input of the convolution. + + This is same as the 1D transposed convolution operator under the hood but requires + the shape of the gradient w.r.t. input to be specified explicitly. + + Args: + input_size : Shape of the input gradient tensor + weight: weight tensor (out_channels x in_channels/groups x kW) + grad_output : output gradient tensor (minibatch x out_channels x oW) + stride (int or tuple, optional): Stride of the convolution. Default: 1 + padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 + dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 + groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 + + Examples:: + + >>> input = torch.randn(1, 1, 3, requires_grad=True) + >>> weight = torch.randn(1, 1, 1, requires_grad=True) + >>> output = F.conv1d(input, weight) + >>> grad_output = torch.randn(output.shape) + >>> grad_input = torch.autograd.grad(output, input, grad_output) + >>> F.grad.conv1d_input(input.shape, weight, grad_output) + + """ + input = grad_output.new_empty(1).expand(input_size) + + return torch.ops.aten.convolution_backward(grad_output, input, weight, None, + _single(stride), _single(padding), _single(dilation), + False, [0], groups, (True, False, False))[0] + + +def conv1d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1): + r"""Compute the gradient of conv1d with respect to the weight of the convolution. + + Args: + input: input tensor of shape (minibatch x in_channels x iW) + weight_size : Shape of the weight gradient tensor + grad_output : output gradient tensor (minibatch x out_channels x oW) + stride (int or tuple, optional): Stride of the convolution. Default: 1 + padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 + dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 + groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 + + Examples:: + + >>> input = torch.randn(1, 1, 3, requires_grad=True) + >>> weight = torch.randn(1, 1, 1, requires_grad=True) + >>> output = F.conv1d(input, weight) + >>> grad_output = torch.randn(output.shape) + >>> # xdoctest: +SKIP + >>> grad_weight = torch.autograd.grad(output, filter, grad_output) + >>> F.grad.conv1d_weight(input, weight.shape, grad_output) + + """ + weight = grad_output.new_empty(1).expand(weight_size) + + return torch.ops.aten.convolution_backward(grad_output, input, weight, None, + _single(stride), _single(padding), _single(dilation), + False, [0], groups, (False, True, False))[1] + + +def conv2d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1): + r"""Compute the gradient of conv2d with respect to the input of the convolution. + + This is same as the 2D transposed convolution operator under the hood but requires + the shape of the gradient w.r.t. input to be specified explicitly. + + Args: + input_size : Shape of the input gradient tensor + weight: weight tensor (out_channels x in_channels/groups x kH x kW) + grad_output : output gradient tensor (minibatch x out_channels x oH x oW) + stride (int or tuple, optional): Stride of the convolution. Default: 1 + padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 + dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 + groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 + + Examples:: + + >>> input = torch.randn(1, 1, 3, 3, requires_grad=True) + >>> weight = torch.randn(1, 1, 1, 2, requires_grad=True) + >>> output = F.conv2d(input, weight) + >>> grad_output = torch.randn(output.shape) + >>> grad_input = torch.autograd.grad(output, input, grad_output) + >>> F.grad.conv2d_input(input.shape, weight, grad_output) + + """ + input = grad_output.new_empty(1).expand(input_size) + + return torch.ops.aten.convolution_backward(grad_output, input, weight, None, + _pair(stride), _pair(padding), _pair(dilation), + False, [0], groups, (True, False, False))[0] + + +def conv2d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1): + r"""Compute the gradient of conv2d with respect to the weight of the convolution. + + Args: + input: input tensor of shape (minibatch x in_channels x iH x iW) + weight_size : Shape of the weight gradient tensor + grad_output : output gradient tensor (minibatch x out_channels x oH x oW) + stride (int or tuple, optional): Stride of the convolution. Default: 1 + padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 + dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 + groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 + + Examples:: + + >>> input = torch.randn(1, 1, 3, 3, requires_grad=True) + >>> weight = torch.randn(1, 1, 1, 2, requires_grad=True) + >>> output = F.conv2d(input, weight) + >>> grad_output = torch.randn(output.shape) + >>> # xdoctest: +SKIP + >>> grad_weight = torch.autograd.grad(output, filter, grad_output) + >>> F.grad.conv2d_weight(input, weight.shape, grad_output) + + """ + weight = grad_output.new_empty(1).expand(weight_size) + + return torch.ops.aten.convolution_backward(grad_output, input, weight, None, + _pair(stride), _pair(padding), _pair(dilation), + False, [0], groups, (False, True, False))[1] + + +def conv3d_input(input_size, weight, grad_output, stride=1, padding=0, dilation=1, groups=1): + r"""Compute the gradient of conv3d with respect to the input of the convolution. + + This is same as the 3D transposed convolution operator under the hood but requires + the shape of the gradient w.r.t. input to be specified explicitly. + + Args: + input_size : Shape of the input gradient tensor + weight: weights tensor (out_channels x in_channels/groups x kT x kH x kW) + grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW) + stride (int or tuple, optional): Stride of the convolution. Default: 1 + padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 + dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 + groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 + + Examples:: + + >>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True) + >>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True) + >>> output = F.conv3d(input, weight) + >>> grad_output = torch.randn(output.shape) + >>> grad_input = torch.autograd.grad(output, input, grad_output) + >>> F.grad.conv3d_input(input.shape, weight, grad_output) + + """ + input = grad_output.new_empty(1).expand(input_size) + + return torch.ops.aten.convolution_backward(grad_output, input, weight, None, + _triple(stride), _triple(padding), _triple(dilation), + False, [0], groups, (True, False, False))[0] + + +def conv3d_weight(input, weight_size, grad_output, stride=1, padding=0, dilation=1, groups=1): + r"""Compute the gradient of conv3d with respect to the weight of the convolution. + + Args: + input: input tensor of shape (minibatch x in_channels x iT x iH x iW) + weight_size : Shape of the weight gradient tensor + grad_output : output gradient tensor (minibatch x out_channels x oT x oH x oW) + stride (int or tuple, optional): Stride of the convolution. Default: 1 + padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 + dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 + groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 + + Examples:: + + >>> input = torch.randn(2, 8, 10, 10, 20, requires_grad=True) + >>> weight = torch.randn(4, 8, 2, 3, 3, requires_grad=True) + >>> output = F.conv3d(input, weight) + >>> grad_output = torch.randn(output.shape) + >>> grad_weight = torch.autograd.grad(output, weight, grad_output) + >>> F.grad.conv3d_weight(input, weight.shape, grad_output) + + """ + weight = grad_output.new_empty(1).expand(weight_size) + + return torch.ops.aten.convolution_backward(grad_output, input, weight, None, + _triple(stride), _triple(padding), _triple(dilation), + False, [0], groups, (False, True, False))[1] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/init.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/init.py new file mode 100644 index 0000000000000000000000000000000000000000..426069d780c0b350c60a3bf92331fbfc52b5bf23 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/init.py @@ -0,0 +1,626 @@ +"""This file contains utilities for initializing neural network parameters.""" +import math +import warnings + +from torch import Tensor +import torch +from typing import Optional as _Optional + +# These no_grad_* functions are necessary as wrappers around the parts of these +# functions that use `with torch.no_grad()`. The JIT doesn't support context +# managers, so these need to be implemented as builtins. Using these wrappers +# lets us keep those builtins small and re-usable. +def _no_grad_uniform_(tensor, a, b, generator=None): + with torch.no_grad(): + return tensor.uniform_(a, b, generator=generator) + + +def _no_grad_normal_(tensor, mean, std, generator=None): + with torch.no_grad(): + return tensor.normal_(mean, std, generator=generator) + + +def _no_grad_trunc_normal_(tensor, mean, std, a, b, generator=None): + # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf + def norm_cdf(x): + # Computes standard normal cumulative distribution function + return (1. + math.erf(x / math.sqrt(2.))) / 2. + + if (mean < a - 2 * std) or (mean > b + 2 * std): + warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " + "The distribution of values may be incorrect.", + stacklevel=2) + + with torch.no_grad(): + # Values are generated by using a truncated uniform distribution and + # then using the inverse CDF for the normal distribution. + # Get upper and lower cdf values + l = norm_cdf((a - mean) / std) + u = norm_cdf((b - mean) / std) + + # Uniformly fill tensor with values from [l, u], then translate to + # [2l-1, 2u-1]. + tensor.uniform_(2 * l - 1, 2 * u - 1, generator=generator) + + # Use inverse cdf transform for normal distribution to get truncated + # standard normal + tensor.erfinv_() + + # Transform to proper mean, std + tensor.mul_(std * math.sqrt(2.)) + tensor.add_(mean) + + # Clamp to ensure it's in the proper range + tensor.clamp_(min=a, max=b) + return tensor + + +def _no_grad_fill_(tensor, val): + with torch.no_grad(): + return tensor.fill_(val) + + +def _no_grad_zero_(tensor): + with torch.no_grad(): + return tensor.zero_() + + +def calculate_gain(nonlinearity, param=None): + r"""Return the recommended gain value for the given nonlinearity function. + + The values are as follows: + + ================= ==================================================== + nonlinearity gain + ================= ==================================================== + Linear / Identity :math:`1` + Conv{1,2,3}D :math:`1` + Sigmoid :math:`1` + Tanh :math:`\frac{5}{3}` + ReLU :math:`\sqrt{2}` + Leaky Relu :math:`\sqrt{\frac{2}{1 + \text{negative\_slope}^2}}` + SELU :math:`\frac{3}{4}` + ================= ==================================================== + + .. warning:: + In order to implement `Self-Normalizing Neural Networks`_ , + you should use ``nonlinearity='linear'`` instead of ``nonlinearity='selu'``. + This gives the initial weights a variance of ``1 / N``, + which is necessary to induce a stable fixed point in the forward pass. + In contrast, the default gain for ``SELU`` sacrifices the normalization + effect for more stable gradient flow in rectangular layers. + + Args: + nonlinearity: the non-linear function (`nn.functional` name) + param: optional parameter for the non-linear function + + Examples: + >>> gain = nn.init.calculate_gain('leaky_relu', 0.2) # leaky_relu with negative_slope=0.2 + + .. _Self-Normalizing Neural Networks: https://papers.nips.cc/paper/2017/hash/5d44ee6f2c3f71b73125876103c8f6c4-Abstract.html + """ + linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d'] + if nonlinearity in linear_fns or nonlinearity == 'sigmoid': + return 1 + elif nonlinearity == 'tanh': + return 5.0 / 3 + elif nonlinearity == 'relu': + return math.sqrt(2.0) + elif nonlinearity == 'leaky_relu': + if param is None: + negative_slope = 0.01 + elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float): + # True/False are instances of int, hence check above + negative_slope = param + else: + raise ValueError(f"negative_slope {param} not a valid number") + return math.sqrt(2.0 / (1 + negative_slope ** 2)) + elif nonlinearity == 'selu': + return 3.0 / 4 # Value found empirically (https://github.com/pytorch/pytorch/pull/50664) + else: + raise ValueError(f"Unsupported nonlinearity {nonlinearity}") + + +def uniform_( + tensor: Tensor, + a: float = 0.0, + b: float = 1.0, + generator: _Optional[torch.Generator] = None, +) -> Tensor: + r"""Fill the input Tensor with values drawn from the uniform distribution. + + :math:`\mathcal{U}(a, b)`. + + Args: + tensor: an n-dimensional `torch.Tensor` + a: the lower bound of the uniform distribution + b: the upper bound of the uniform distribution + generator: the torch Generator to sample from (default: None) + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.uniform_(w) + """ + if torch.overrides.has_torch_function_variadic(tensor): + return torch.overrides.handle_torch_function( + uniform_, (tensor,), tensor=tensor, a=a, b=b, generator=generator + ) + return _no_grad_uniform_(tensor, a, b, generator) + + +def normal_( + tensor: Tensor, + mean: float = 0.0, + std: float = 1.0, + generator: _Optional[torch.Generator] = None, +) -> Tensor: + r"""Fill the input Tensor with values drawn from the normal distribution. + + :math:`\mathcal{N}(\text{mean}, \text{std}^2)`. + + Args: + tensor: an n-dimensional `torch.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + generator: the torch Generator to sample from (default: None) + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.normal_(w) + """ + if torch.overrides.has_torch_function_variadic(tensor): + return torch.overrides.handle_torch_function( + normal_, (tensor,), tensor=tensor, mean=mean, std=std, generator=generator + ) + return _no_grad_normal_(tensor, mean, std, generator) + +def trunc_normal_( + tensor: Tensor, + mean: float = 0., + std: float = 1., + a: float = -2., + b: float = 2., + generator: _Optional[torch.Generator] = None +) -> Tensor: + r"""Fill the input Tensor with values drawn from a truncated normal distribution. + + The values are effectively drawn from the + normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` + with values outside :math:`[a, b]` redrawn until they are within + the bounds. The method used for generating the random values works + best when :math:`a \leq \text{mean} \leq b`. + + Args: + tensor: an n-dimensional `torch.Tensor` + mean: the mean of the normal distribution + std: the standard deviation of the normal distribution + a: the minimum cutoff value + b: the maximum cutoff value + generator: the torch Generator to sample from (default: None) + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.trunc_normal_(w) + """ + return _no_grad_trunc_normal_(tensor, mean, std, a, b, generator=generator) + + +def constant_(tensor: Tensor, val: float) -> Tensor: + r"""Fill the input Tensor with the value :math:`\text{val}`. + + Args: + tensor: an n-dimensional `torch.Tensor` + val: the value to fill the tensor with + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.constant_(w, 0.3) + """ + if torch.overrides.has_torch_function_variadic(tensor): + return torch.overrides.handle_torch_function(constant_, (tensor,), tensor=tensor, val=val) + return _no_grad_fill_(tensor, val) + + +def ones_(tensor: Tensor) -> Tensor: + r"""Fill the input Tensor with the scalar value `1`. + + Args: + tensor: an n-dimensional `torch.Tensor` + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.ones_(w) + """ + return _no_grad_fill_(tensor, 1.) + + +def zeros_(tensor: Tensor) -> Tensor: + r"""Fill the input Tensor with the scalar value `0`. + + Args: + tensor: an n-dimensional `torch.Tensor` + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.zeros_(w) + """ + return _no_grad_zero_(tensor) + + +def eye_(tensor): + r"""Fill the 2-dimensional input `Tensor` with the identity matrix. + + Preserves the identity of the inputs in `Linear` layers, where as + many inputs are preserved as possible. + + Args: + tensor: a 2-dimensional `torch.Tensor` + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.eye_(w) + """ + if tensor.ndimension() != 2: + raise ValueError("Only tensors with 2 dimensions are supported") + + with torch.no_grad(): + torch.eye(*tensor.shape, out=tensor, requires_grad=tensor.requires_grad) + return tensor + + +def dirac_(tensor, groups=1): + r"""Fill the {3, 4, 5}-dimensional input `Tensor` with the Dirac delta function. + + Preserves the identity of the inputs in `Convolutional` + layers, where as many input channels are preserved as possible. In case + of groups>1, each group of channels preserves identity + + Args: + tensor: a {3, 4, 5}-dimensional `torch.Tensor` + groups (int, optional): number of groups in the conv layer (default: 1) + Examples: + >>> w = torch.empty(3, 16, 5, 5) + >>> nn.init.dirac_(w) + >>> w = torch.empty(3, 24, 5, 5) + >>> nn.init.dirac_(w, 3) + """ + dimensions = tensor.ndimension() + if dimensions not in [3, 4, 5]: + raise ValueError("Only tensors with 3, 4, or 5 dimensions are supported") + + sizes = tensor.size() + + if sizes[0] % groups != 0: + raise ValueError('dim 0 must be divisible by groups') + + out_chans_per_grp = sizes[0] // groups + min_dim = min(out_chans_per_grp, sizes[1]) + + with torch.no_grad(): + tensor.zero_() + + for g in range(groups): + for d in range(min_dim): + if dimensions == 3: # Temporal convolution + tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2] = 1 + elif dimensions == 4: # Spatial convolution + tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2, + tensor.size(3) // 2] = 1 + else: # Volumetric convolution + tensor[g * out_chans_per_grp + d, d, tensor.size(2) // 2, + tensor.size(3) // 2, tensor.size(4) // 2] = 1 + return tensor + + +def _calculate_fan_in_and_fan_out(tensor): + dimensions = tensor.dim() + if dimensions < 2: + raise ValueError("Fan in and fan out can not be computed for tensor with fewer than 2 dimensions") + + num_input_fmaps = tensor.size(1) + num_output_fmaps = tensor.size(0) + receptive_field_size = 1 + if tensor.dim() > 2: + # math.prod is not always available, accumulate the product manually + # we could use functools.reduce but that is not supported by TorchScript + for s in tensor.shape[2:]: + receptive_field_size *= s + fan_in = num_input_fmaps * receptive_field_size + fan_out = num_output_fmaps * receptive_field_size + + return fan_in, fan_out + + +def xavier_uniform_( + tensor: Tensor, gain: float = 1.0, generator: _Optional[torch.Generator] = None +) -> Tensor: + r"""Fill the input `Tensor` with values using a Xavier uniform distribution. + + The method is described in `Understanding the difficulty of training + deep feedforward neural networks` - Glorot, X. & Bengio, Y. (2010). + The resulting tensor will have values sampled from + :math:`\mathcal{U}(-a, a)` where + + .. math:: + a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + \text{fan\_out}}} + + Also known as Glorot initialization. + + Args: + tensor: an n-dimensional `torch.Tensor` + gain: an optional scaling factor + generator: the torch Generator to sample from (default: None) + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('relu')) + """ + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) + std = gain * math.sqrt(2.0 / float(fan_in + fan_out)) + a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation + + return _no_grad_uniform_(tensor, -a, a, generator) + + +def xavier_normal_( + tensor: Tensor, + gain: float = 1.0, + generator: _Optional[torch.Generator] = None, +) -> Tensor: + r"""Fill the input `Tensor` with values using a Xavier normal distribution. + + The method is described in `Understanding the difficulty of training deep feedforward + neural networks` - Glorot, X. & Bengio, Y. (2010). The resulting tensor + will have values sampled from :math:`\mathcal{N}(0, \text{std}^2)` where + + .. math:: + \text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan\_in} + \text{fan\_out}}} + + Also known as Glorot initialization. + + Args: + tensor: an n-dimensional `torch.Tensor` + gain: an optional scaling factor + generator: the torch Generator to sample from (default: None) + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.xavier_normal_(w) + """ + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) + std = gain * math.sqrt(2.0 / float(fan_in + fan_out)) + + return _no_grad_normal_(tensor, 0., std, generator) + + +def _calculate_correct_fan(tensor, mode): + mode = mode.lower() + valid_modes = ['fan_in', 'fan_out'] + if mode not in valid_modes: + raise ValueError(f"Mode {mode} not supported, please use one of {valid_modes}") + + fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) + return fan_in if mode == 'fan_in' else fan_out + + +def kaiming_uniform_( + tensor: Tensor, + a: float = 0, + mode: str = "fan_in", + nonlinearity: str = "leaky_relu", + generator: _Optional[torch.Generator] = None, +): + r"""Fill the input `Tensor` with values using a Kaiming uniform distribution. + + The method is described in `Delving deep into rectifiers: Surpassing + human-level performance on ImageNet classification` - He, K. et al. (2015). + The resulting tensor will have values sampled from + :math:`\mathcal{U}(-\text{bound}, \text{bound})` where + + .. math:: + \text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}} + + Also known as He initialization. + + Args: + tensor: an n-dimensional `torch.Tensor` + a: the negative slope of the rectifier used after this layer (only + used with ``'leaky_relu'``) + mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'`` + preserves the magnitude of the variance of the weights in the + forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the + backwards pass. + nonlinearity: the non-linear function (`nn.functional` name), + recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default). + generator: the torch Generator to sample from (default: None) + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.kaiming_uniform_(w, mode='fan_in', nonlinearity='relu') + """ + if torch.overrides.has_torch_function_variadic(tensor): + return torch.overrides.handle_torch_function( + kaiming_uniform_, + (tensor,), + tensor=tensor, + a=a, + mode=mode, + nonlinearity=nonlinearity, + generator=generator) + + if 0 in tensor.shape: + warnings.warn("Initializing zero-element tensors is a no-op") + return tensor + fan = _calculate_correct_fan(tensor, mode) + gain = calculate_gain(nonlinearity, a) + std = gain / math.sqrt(fan) + bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation + with torch.no_grad(): + return tensor.uniform_(-bound, bound, generator=generator) + + +def kaiming_normal_( + tensor: Tensor, + a: float = 0, + mode: str = "fan_in", + nonlinearity: str = "leaky_relu", + generator: _Optional[torch.Generator] = None, +): + r"""Fill the input `Tensor` with values using a Kaiming normal distribution. + + The method is described in `Delving deep into rectifiers: Surpassing + human-level performance on ImageNet classification` - He, K. et al. (2015). + The resulting tensor will have values sampled from + :math:`\mathcal{N}(0, \text{std}^2)` where + + .. math:: + \text{std} = \frac{\text{gain}}{\sqrt{\text{fan\_mode}}} + + Also known as He initialization. + + Args: + tensor: an n-dimensional `torch.Tensor` + a: the negative slope of the rectifier used after this layer (only + used with ``'leaky_relu'``) + mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'`` + preserves the magnitude of the variance of the weights in the + forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the + backwards pass. + nonlinearity: the non-linear function (`nn.functional` name), + recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default). + generator: the torch Generator to sample from (default: None) + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.kaiming_normal_(w, mode='fan_out', nonlinearity='relu') + """ + if 0 in tensor.shape: + warnings.warn("Initializing zero-element tensors is a no-op") + return tensor + fan = _calculate_correct_fan(tensor, mode) + gain = calculate_gain(nonlinearity, a) + std = gain / math.sqrt(fan) + with torch.no_grad(): + return tensor.normal_(0, std, generator=generator) + + +def orthogonal_( + tensor, + gain=1, + generator: _Optional[torch.Generator] = None, +): + r"""Fill the input `Tensor` with a (semi) orthogonal matrix. + + Described in `Exact solutions to the nonlinear dynamics of learning in deep + linear neural networks` - Saxe, A. et al. (2013). The input tensor must have + at least 2 dimensions, and for tensors with more than 2 dimensions the + trailing dimensions are flattened. + + Args: + tensor: an n-dimensional `torch.Tensor`, where :math:`n \geq 2` + gain: optional scaling factor + generator: the torch Generator to sample from (default: None) + + Examples: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK) + >>> w = torch.empty(3, 5) + >>> nn.init.orthogonal_(w) + """ + if tensor.ndimension() < 2: + raise ValueError("Only tensors with 2 or more dimensions are supported") + + if tensor.numel() == 0: + # no-op + return tensor + rows = tensor.size(0) + cols = tensor.numel() // rows + flattened = tensor.new(rows, cols).normal_(0, 1, generator=generator) + + if rows < cols: + flattened.t_() + + # Compute the qr factorization + q, r = torch.linalg.qr(flattened) + # Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf + d = torch.diag(r, 0) + ph = d.sign() + q *= ph + + if rows < cols: + q.t_() + + with torch.no_grad(): + tensor.view_as(q).copy_(q) + tensor.mul_(gain) + return tensor + + +def sparse_( + tensor, + sparsity, + std=0.01, + generator: _Optional[torch.Generator] = None, +): + r"""Fill the 2D input `Tensor` as a sparse matrix. + + The non-zero elements will be drawn from the normal distribution + :math:`\mathcal{N}(0, 0.01)`, as described in `Deep learning via + Hessian-free optimization` - Martens, J. (2010). + + Args: + tensor: an n-dimensional `torch.Tensor` + sparsity: The fraction of elements in each column to be set to zero + std: the standard deviation of the normal distribution used to generate + the non-zero values + generator: the torch Generator to sample from (default: None) + + Examples: + >>> w = torch.empty(3, 5) + >>> nn.init.sparse_(w, sparsity=0.1) + """ + if tensor.ndimension() != 2: + raise ValueError("Only tensors with 2 dimensions are supported") + + rows, cols = tensor.shape + num_zeros = int(math.ceil(sparsity * rows)) + + with torch.no_grad(): + tensor.normal_(0, std, generator=generator) + for col_idx in range(cols): + row_indices = torch.randperm(rows) + zero_indices = row_indices[:num_zeros] + tensor[zero_indices, col_idx] = 0 + return tensor + + +# for backward compatibility +def _make_deprecate(meth): + new_name = meth.__name__ + old_name = new_name[:-1] + + def deprecated_init(*args, **kwargs): + warnings.warn(f"nn.init.{old_name} is now deprecated in favor of nn.init.{new_name}.", stacklevel=2) + return meth(*args, **kwargs) + + deprecated_init.__doc__ = fr""" + {old_name}(...) + + .. warning:: + This method is now deprecated in favor of :func:`torch.nn.init.{new_name}`. + + See :func:`~torch.nn.init.{new_name}` for details.""" + deprecated_init.__name__ = old_name + return deprecated_init + + +uniform = _make_deprecate(uniform_) +normal = _make_deprecate(normal_) +constant = _make_deprecate(constant_) +eye = _make_deprecate(eye_) +dirac = _make_deprecate(dirac_) +xavier_uniform = _make_deprecate(xavier_uniform_) +xavier_normal = _make_deprecate(xavier_normal_) +kaiming_uniform = _make_deprecate(kaiming_uniform_) +kaiming_normal = _make_deprecate(kaiming_normal_) +orthogonal = _make_deprecate(orthogonal_) +sparse = _make_deprecate(sparse_) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d0708296d47a49f47f6eae0acc89bb556d9fe4ca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__init__.py @@ -0,0 +1,14 @@ +from .parallel_apply import parallel_apply +from .replicate import replicate +from .data_parallel import DataParallel, data_parallel +from .scatter_gather import gather, scatter +from .distributed import DistributedDataParallel + +__all__ = ['replicate', 'scatter', 'parallel_apply', 'gather', 'data_parallel', + 'DataParallel', 'DistributedDataParallel'] + +def DistributedDataParallelCPU(*args, **kwargs): + import warnings + warnings.warn("torch.nn.parallel.DistributedDataParallelCPU is deprecated, " + "please use torch.nn.parallel.DistributedDataParallel instead.") + return DistributedDataParallel(*args, **kwargs) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2287d8a5b2266f288e2a33afcd73d9d729e97fd6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_functions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d1608321a2019a04776095e754a560b333ff3e8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/_functions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/comm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/comm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2250832f074b3d09dbdc50825d063146164c163 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/comm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/data_parallel.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/data_parallel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04328830aea14d9dc86a20eabc4fee90d4c08c26 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/data_parallel.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/distributed.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/distributed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77ea1999c01b8b61c8e06355d73435c4187982c5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/distributed.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/parallel_apply.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/parallel_apply.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f94041510b450ed12282040501d435634aaadca4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/parallel_apply.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/replicate.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/replicate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f64dfe2f0575d94a587787ff8a97a582a7e08e0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/replicate.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/scatter_gather.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/scatter_gather.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..569f0fd72bcdec062bdd9ec4d842b9dbe071ec80 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/parallel/__pycache__/scatter_gather.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/parameter.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/parameter.py new file mode 100644 index 0000000000000000000000000000000000000000..43d4f1cf40008b0dd77e6fe6055753b57f75c041 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/parameter.py @@ -0,0 +1,223 @@ +import torch +from torch._C import _disabled_torch_function_impl +from collections import OrderedDict + +# Metaclass to combine _TensorMeta and the instance check override for Parameter. +class _ParameterMeta(torch._C._TensorMeta): + # Make `isinstance(t, Parameter)` return True for custom tensor instances that have the _is_param flag. + def __instancecheck__(self, instance): + return super().__instancecheck__(instance) or ( + isinstance(instance, torch.Tensor) and getattr(instance, '_is_param', False)) + + +class Parameter(torch.Tensor, metaclass=_ParameterMeta): + r"""A kind of Tensor that is to be considered a module parameter. + + Parameters are :class:`~torch.Tensor` subclasses, that have a + very special property when used with :class:`Module` s - when they're + assigned as Module attributes they are automatically added to the list of + its parameters, and will appear e.g. in :meth:`~Module.parameters` iterator. + Assigning a Tensor doesn't have such effect. This is because one might + want to cache some temporary state, like last hidden state of the RNN, in + the model. If there was no such class as :class:`Parameter`, these + temporaries would get registered too. + + Args: + data (Tensor): parameter tensor. + requires_grad (bool, optional): if the parameter requires gradient. Note that + the torch.no_grad() context does NOT affect the default behavior of + Parameter creation--the Parameter will still have `requires_grad=True` in + :class:`~no_grad` mode. See :ref:`locally-disable-grad-doc` for more + details. Default: `True` + """ + + def __new__(cls, data=None, requires_grad=True): + if data is None: + data = torch.empty(0) + if type(data) is torch.Tensor or type(data) is Parameter: + # For ease of BC maintenance, keep this path for standard Tensor. + # Eventually (tm), we should change the behavior for standard Tensor to match. + return torch.Tensor._make_subclass(cls, data, requires_grad) + + # Path for custom tensors: set a flag on the instance to indicate parameter-ness. + t = data.detach().requires_grad_(requires_grad) + if type(t) is not type(data): + raise RuntimeError(f"Creating a Parameter from an instance of type {type(data).__name__} " + "requires that detach() returns an instance of the same type, but return " + f"type {type(t).__name__} was found instead. To use the type as a " + "Parameter, please correct the detach() semantics defined by " + "its __torch_dispatch__() implementation.") + t._is_param = True + return t + + # Note: the 3 methods below only apply to standard Tensor. Parameters of custom tensor types + # are still considered that custom tensor type and these methods will not be called for them. + def __deepcopy__(self, memo): + if id(self) in memo: + return memo[id(self)] + else: + result = type(self)(self.data.clone(memory_format=torch.preserve_format), self.requires_grad) + memo[id(self)] = result + return result + + def __repr__(self): + return 'Parameter containing:\n' + super().__repr__() + + def __reduce_ex__(self, proto): + state = torch._utils._get_obj_state(self) + + # See Note [Don't serialize hooks] + hooks = OrderedDict() + if not state: + return ( + torch._utils._rebuild_parameter, + (self.data, self.requires_grad, hooks) + ) + + return ( + torch._utils._rebuild_parameter_with_state, + (self.data, self.requires_grad, hooks, state) + ) + + __torch_function__ = _disabled_torch_function_impl + + +class UninitializedTensorMixin: + _allowed_methods = [ + torch.Tensor.__hash__, + torch.Tensor.size, + torch.Tensor.copy_, + torch.Tensor.is_complex, + torch.Tensor.is_floating_point, + torch.Tensor.half, + torch.Tensor.float, + torch.Tensor.double, + torch.Tensor.char, + torch.Tensor.short, + torch.Tensor.int, + torch.Tensor.long, + torch.Tensor.cuda, + torch.Tensor.cpu, + torch.Tensor.to, + torch.Tensor.get_device, + torch._has_compatible_shallow_copy_type, + ] + + def materialize(self, shape, device=None, dtype=None): + r"""Create a Parameter or Tensor with the same properties of the uninitialized one. + + Given a shape, it materializes a parameter in the same device + and with the same `dtype` as the current one or the specified ones in the + arguments. + + Args: + shape : (tuple): the shape for the materialized tensor. + device (:class:`torch.device`): the desired device of the parameters + and buffers in this module. Optional. + dtype (:class:`torch.dtype`): the desired floating point type of + the floating point parameters and buffers in this module. Optional. + """ + if device is None: + device = self.data.device + if dtype is None: + dtype = self.data.dtype + self.data = torch.empty(shape, device=device, dtype=dtype) + self.__class__ = self.cls_to_become + + @property + def shape(self): + raise RuntimeError( + 'Can\'t access the shape of an uninitialized parameter or buffer. ' + 'This error usually happens in `load_state_dict` when trying to load ' + 'an uninitialized parameter into an initialized one. ' + 'Call `forward` to initialize the parameters before accessing their attributes.') + + def share_memory_(self): + raise RuntimeError( + 'Can\'t share memory on an uninitialized parameter or buffer. ' + 'Call `forward` to initialize the parameters before calling ' + '`module.share_memory()`.') + + def __repr__(self): + return f'<{self.__class__.__name__}>' + + def __reduce_ex__(self, proto): + # See Note [Don't serialize hooks] + return ( + self.__class__, + (self.requires_grad,) + ) + + @classmethod + def __torch_function__(cls, func, types, args=(), kwargs=None): + # method-wrapper is to detect access to Tensor properties that are + # wrapped in descriptors + if func in cls._allowed_methods or func.__class__.__name__ == 'method-wrapper': + if kwargs is None: + kwargs = {} + return super().__torch_function__(func, types, args, kwargs) + raise ValueError( + f'Attempted to use an uninitialized parameter in {func}. ' + 'This error happens when you are using a `LazyModule` or ' + f'explicitly manipulating `torch.nn.parameter.{cls.__name__}` ' + 'objects. When using LazyModules Call `forward` with a dummy batch ' + 'to initialize the parameters before calling torch functions') + + +def is_lazy(param): + return isinstance(param, UninitializedTensorMixin) + + +class UninitializedParameter(UninitializedTensorMixin, Parameter): + r"""A parameter that is not initialized. + + Uninitialized Parameters are a a special case of :class:`torch.nn.Parameter` + where the shape of the data is still unknown. + + Unlike a :class:`torch.nn.Parameter`, uninitialized parameters + hold no data and attempting to access some properties, like their shape, + will throw a runtime error. The only operations that can be performed on a uninitialized + parameter are changing its datatype, moving it to a different device and + converting it to a regular :class:`torch.nn.Parameter`. + + The default device or dtype to use when the parameter is materialized can be set + during construction using e.g. ``device='cuda'``. + """ + + cls_to_become = Parameter + + def __new__(cls, requires_grad=True, device=None, dtype=None) -> None: + factory_kwargs = {'device': device, 'dtype': dtype} + data = torch.empty(0, **factory_kwargs) + return torch.Tensor._make_subclass(cls, data, requires_grad) + + def __deepcopy__(self, memo): + if id(self) in memo: + return memo[id(self)] + else: + result = type(self)(self.requires_grad, self.data.device, self.data.dtype) + memo[id(self)] = result + return result + +class UninitializedBuffer(UninitializedTensorMixin, torch.Tensor): + r"""A buffer that is not initialized. + + Uninitialized Buffer is a a special case of :class:`torch.Tensor` + where the shape of the data is still unknown. + + Unlike a :class:`torch.Tensor`, uninitialized parameters + hold no data and attempting to access some properties, like their shape, + will throw a runtime error. The only operations that can be performed on a uninitialized + parameter are changing its datatype, moving it to a different device and + converting it to a regular :class:`torch.Tensor`. + + The default device or dtype to use when the buffer is materialized can be set + during construction using e.g. ``device='cuda'``. + """ + + cls_to_become = torch.Tensor + + def __new__(cls, requires_grad=False, device=None, dtype=None) -> None: + factory_kwargs = {'device': device, 'dtype': dtype} + data = torch.empty(0, **factory_kwargs) + return torch.Tensor._make_subclass(cls, data, requires_grad) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/parameter.pyi b/llmeval-env/lib/python3.10/site-packages/torch/nn/parameter.pyi new file mode 100644 index 0000000000000000000000000000000000000000..219bb6d4efa2a0bb1843ca3b38a4183a346a995c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/parameter.pyi @@ -0,0 +1,40 @@ +import builtins +from typing import Optional, Tuple + +import torch +from torch import Tensor + +class Parameter(Tensor): + def __init__( + self, + data: Tensor = ..., + requires_grad: builtins.bool = ..., + ): ... + +def is_lazy(param: Tensor): ... + +class UninitializedParameter(Tensor): + def __init__( + self, + data: Tensor = ..., + requires_grad: builtins.bool = ..., + ): ... + def materialize( + self, + shape: Tuple[int, ...], + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): ... + +class UninitializedBuffer(Tensor): + def __init__( + self, + data: Tensor = ..., + requires_grad: builtins.bool = ..., + ): ... + def materialize( + self, + shape: Tuple[int, ...], + device: Optional[torch.device] = None, + dtype: Optional[torch.dtype] = None, + ): ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1d7de4e239e50ee263536bef64b3ef30a776a8ad --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__init__.py @@ -0,0 +1,32 @@ +# flake8: noqa: F401 +r"""Quantized Dynamic Modules. + +This file is in the process of migration to `torch/ao/nn/quantized/dynamic`, +and is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantized/dynamic`, +while adding an import statement here. +""" + +from torch.ao.nn.quantized.dynamic.modules import conv +from torch.ao.nn.quantized.dynamic.modules import linear +from torch.ao.nn.quantized.dynamic.modules import rnn + +from torch.ao.nn.quantized.dynamic.modules.conv import Conv1d, Conv2d, Conv3d, ConvTranspose1d, ConvTranspose2d, ConvTranspose3d +from torch.ao.nn.quantized.dynamic.modules.linear import Linear +from torch.ao.nn.quantized.dynamic.modules.rnn import LSTM, GRU, LSTMCell, RNNCell, GRUCell + +__all__ = [ + 'Linear', + 'LSTM', + 'GRU', + 'LSTMCell', + 'RNNCell', + 'GRUCell', + 'Conv1d', + 'Conv2d', + 'Conv3d', + 'ConvTranspose1d', + 'ConvTranspose2d', + 'ConvTranspose3d', +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a06ad64faec7e8261374632b51b2f47fc0937f8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/conv.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/conv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f590e448fc38b00554aea29c9414f1384559496a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/conv.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/linear.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15bcee80b0360b615084bb1a205505d545479378 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/linear.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/rnn.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/rnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4ba75c858cff9194c7ee1f37a977db55c6a0f49 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/__pycache__/rnn.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/conv.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/conv.py new file mode 100644 index 0000000000000000000000000000000000000000..4dc5ef66147c5a7d3495ea62c48cb35e38cfe1a5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/conv.py @@ -0,0 +1,18 @@ +# flake8: noqa: F401 +r"""Quantized Dynamic Modules. + +This file is in the process of migration to `torch/ao/nn/quantized/dynamic`, +and is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantized/dynamic/modules`, +while adding an import statement here. +""" + +__all__ = ['Conv1d', 'Conv2d', 'Conv3d', 'ConvTranspose1d', 'ConvTranspose2d', 'ConvTranspose3d'] + +from torch.ao.nn.quantized.dynamic.modules.conv import Conv1d +from torch.ao.nn.quantized.dynamic.modules.conv import Conv2d +from torch.ao.nn.quantized.dynamic.modules.conv import Conv3d +from torch.ao.nn.quantized.dynamic.modules.conv import ConvTranspose1d +from torch.ao.nn.quantized.dynamic.modules.conv import ConvTranspose2d +from torch.ao.nn.quantized.dynamic.modules.conv import ConvTranspose3d diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/rnn.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..4abef6573bed0033e6d6f5ed4438c2475b08ab43 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/dynamic/modules/rnn.py @@ -0,0 +1,22 @@ +# flake8: noqa: F401 +r"""Quantized Dynamic Modules. + +This file is in the process of migration to `torch/ao/nn/quantized/dynamic`, +and is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantized/dynamic/modules`, +while adding an import statement here. +""" + +__all__ = ['pack_weight_bias', 'PackedParameter', 'RNNBase', 'LSTM', 'GRU', 'RNNCellBase', 'RNNCell', 'LSTMCell', + 'GRUCell'] + +from torch.ao.nn.quantized.dynamic.modules.rnn import pack_weight_bias +from torch.ao.nn.quantized.dynamic.modules.rnn import PackedParameter +from torch.ao.nn.quantized.dynamic.modules.rnn import RNNBase +from torch.ao.nn.quantized.dynamic.modules.rnn import LSTM +from torch.ao.nn.quantized.dynamic.modules.rnn import GRU +from torch.ao.nn.quantized.dynamic.modules.rnn import RNNCellBase +from torch.ao.nn.quantized.dynamic.modules.rnn import RNNCell +from torch.ao.nn.quantized.dynamic.modules.rnn import LSTMCell +from torch.ao.nn.quantized.dynamic.modules.rnn import GRUCell diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d0ee0cc168aea662232d59da636d03a558d3ebd2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__init__.py @@ -0,0 +1,70 @@ +r"""Quantized Modules. + +Note:: + The `torch.nn.quantized` namespace is in the process of being deprecated. + Please, use `torch.ao.nn.quantized` instead. +""" + +from torch.ao.nn.quantized.modules.activation import ReLU6, Hardswish, ELU, LeakyReLU, Sigmoid, Softmax, MultiheadAttention, PReLU +from torch.ao.nn.quantized.modules.batchnorm import BatchNorm2d, BatchNorm3d +from torch.ao.nn.quantized.modules.conv import Conv1d, Conv2d, Conv3d +from torch.ao.nn.quantized.modules.conv import ConvTranspose1d, ConvTranspose2d, ConvTranspose3d +from torch.ao.nn.quantized.modules.dropout import Dropout +from torch.ao.nn.quantized.modules.embedding_ops import Embedding, EmbeddingBag +from torch.ao.nn.quantized.modules.functional_modules import FloatFunctional, FXFloatFunctional, QFunctional +from torch.ao.nn.quantized.modules.linear import Linear +from torch.ao.nn.quantized.modules.normalization import LayerNorm, GroupNorm, InstanceNorm1d, InstanceNorm2d, InstanceNorm3d +from torch.ao.nn.quantized.modules.rnn import LSTM + +from torch.ao.nn.quantized.modules import MaxPool2d +from torch.ao.nn.quantized.modules import Quantize, DeQuantize + +# The following imports are needed in case the user decides +# to import the files directly, +# s.a. `from torch.nn.quantized.modules.conv import ...`. +# No need to add them to the `__all__`. +from torch.ao.nn.quantized.modules import activation +from torch.ao.nn.quantized.modules import batchnorm +from torch.ao.nn.quantized.modules import conv +from torch.ao.nn.quantized.modules import dropout +from torch.ao.nn.quantized.modules import embedding_ops +from torch.ao.nn.quantized.modules import functional_modules +from torch.ao.nn.quantized.modules import linear +from torch.ao.nn.quantized.modules import normalization +from torch.ao.nn.quantized.modules import rnn +from torch.ao.nn.quantized.modules import utils + +__all__ = [ + 'BatchNorm2d', + 'BatchNorm3d', + 'Conv1d', + 'Conv2d', + 'Conv3d', + 'ConvTranspose1d', + 'ConvTranspose2d', + 'ConvTranspose3d', + 'DeQuantize', + 'ELU', + 'Embedding', + 'EmbeddingBag', + 'GroupNorm', + 'Hardswish', + 'InstanceNorm1d', + 'InstanceNorm2d', + 'InstanceNorm3d', + 'LayerNorm', + 'LeakyReLU', + 'Linear', + 'LSTM', + 'MultiheadAttention', + 'Quantize', + 'ReLU6', + 'Sigmoid', + 'Softmax', + 'Dropout', + 'PReLU', + # Wrapper modules + 'FloatFunctional', + 'FXFloatFunctional', + 'QFunctional', +] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a0c3f8859df415daa17972f421e9d69081165d2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/batchnorm.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/batchnorm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcefb74c0bec4e4167910061c84dc4b2ecc7d163 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/batchnorm.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/conv.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/conv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d2d60cbedfe3fedb83f3544753465d8bd41ce84 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/conv.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/dropout.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/dropout.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65c9cbdbcd51f9df8bbed6b215b426813f2c4280 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/dropout.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/embedding_ops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/embedding_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4141c10ba15372f779fd7d39c04d2ae28da22c66 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/embedding_ops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/linear.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f637b6f618ffdff32ad58d3c396c87cd64c8d4b4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/linear.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/normalization.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/normalization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..675dca9513841cb327aa423d4758874e82af2454 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/normalization.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/rnn.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/rnn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..544e3bb0128b13b253e5d76c8d1875ddcf9437b0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/__pycache__/rnn.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/batchnorm.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/batchnorm.py new file mode 100644 index 0000000000000000000000000000000000000000..29cb184fbece72de56055f8f00e471e881c72c12 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/batchnorm.py @@ -0,0 +1,12 @@ +# flake8: noqa: F401 +r"""Quantized Modules. + +This file is in the process of migration to `torch/ao/nn/quantized`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantized/modules`, +while adding an import statement here. +""" + +from torch.ao.nn.quantized.modules.batchnorm import BatchNorm2d +from torch.ao.nn.quantized.modules.batchnorm import BatchNorm3d diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/rnn.py b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/rnn.py new file mode 100644 index 0000000000000000000000000000000000000000..a1a0076d13bc4e3ee29e9b3e410171d20e8e9a65 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/nn/quantized/modules/rnn.py @@ -0,0 +1,11 @@ +# flake8: noqa: F401 +r"""Quantized Modules. + +This file is in the process of migration to `torch/ao/nn/quantized`, and +is kept here for compatibility while the migration process is ongoing. +If you are adding a new entry/functionality, please, add it to the +appropriate file under the `torch/ao/nn/quantized/modules`, +while adding an import statement here. +""" + +from torch.ao.nn.quantized.modules.rnn import LSTM