text
stringlengths
145
7.65M
====================================================================================================================== SOURCE CODE FILE: utils.py LINES: 1 SIZE: 0.73 KB PATH: scripts\freecad_env\Lib\site-packages\torch\quantization\fx\utils.py ENCODING: utf-8 ```py # flake8: noqa: F401 r""" This file is in the process of migration to `torch/ao/quantization`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the appropriate files under `torch/ao/quantization/fx/`, while adding an import statement here. """ from torch.ao.quantization.fx.utils import ( all_node_args_have_no_tensors, assert_and_get_unique_device, create_getattr_from_value, get_custom_module_class_keys, get_linear_prepack_op_for_dtype, get_new_attr_name_with_prefix, get_non_observable_arg_indexes_and_types, get_qconv_prepack_op, graph_module_from_producer_nodes, maybe_get_next_module, ) ```
====================================================================================================================== SOURCE CODE FILE: observer.py LINES: 1 SIZE: 1.09 KB PATH: scripts\freecad_env\Lib\site-packages\torch\quantization\observer.py ENCODING: utf-8 ```py # flake8: noqa: F401 r""" This file is in the process of migration to `torch/ao/quantization`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the `torch/ao/quantization/observer.py`, while adding an import statement here. """ from torch.ao.quantization.observer import ( _is_activation_post_process, _is_per_channel_script_obs_instance, _ObserverBase, _PartialWrapper, _with_args, _with_callable_args, ABC, default_debug_observer, default_dynamic_quant_observer, default_float_qparams_observer, default_histogram_observer, default_observer, default_per_channel_weight_observer, default_placeholder_observer, default_weight_observer, get_observer_state_dict, HistogramObserver, load_observer_state_dict, MinMaxObserver, MovingAverageMinMaxObserver, MovingAveragePerChannelMinMaxObserver, NoopObserver, ObserverBase, PerChannelMinMaxObserver, PlaceholderObserver, RecordingObserver, ) ```
===================================================================================================================== SOURCE CODE FILE: qconfig.py LINES: 1 SIZE: 0.92 KB PATH: scripts\freecad_env\Lib\site-packages\torch\quantization\qconfig.py ENCODING: utf-8 ```py # flake8: noqa: F401 r""" This file is in the process of migration to `torch/ao/quantization`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the `torch/ao/quantization/qconfig.py`, while adding an import statement here. """ from torch.ao.quantization.qconfig import ( _add_module_to_qconfig_obs_ctr, _assert_valid_qconfig, default_activation_only_qconfig, default_debug_qconfig, default_dynamic_qconfig, default_per_channel_qconfig, default_qat_qconfig, default_qat_qconfig_v2, default_qconfig, default_weight_only_qconfig, float16_dynamic_qconfig, float16_static_qconfig, float_qparams_weight_only_qconfig, get_default_qat_qconfig, get_default_qconfig, per_channel_dynamic_qconfig, QConfig, qconfig_equals, QConfigAny, QConfigDynamic, ) ```
======================================================================================================================== SOURCE CODE FILE: quant_type.py LINES: 1 SIZE: 0.40 KB PATH: scripts\freecad_env\Lib\site-packages\torch\quantization\quant_type.py ENCODING: utf-8 ```py # flake8: noqa: F401 r""" This file is in the process of migration to `torch/ao/quantization`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the `torch/ao/quantization/quant_type.py`, while adding an import statement here. """ from torch.ao.quantization.quant_type import _get_quant_type_to_str, QuantType ```
=================================================================================================================================== SOURCE CODE FILE: quantization_mappings.py LINES: 1 SIZE: 1.15 KB PATH: scripts\freecad_env\Lib\site-packages\torch\quantization\quantization_mappings.py ENCODING: utf-8 ```py # flake8: noqa: F401 r""" This file is in the process of migration to `torch/ao/quantization`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the `torch/ao/quantization/quantization_mappings.py`, while adding an import statement here. """ from torch.ao.quantization.quantization_mappings import ( _get_special_act_post_process, _has_special_act_post_process, _INCLUDE_QCONFIG_PROPAGATE_LIST, DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS, DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS, DEFAULT_MODULE_TO_ACT_POST_PROCESS, DEFAULT_QAT_MODULE_MAPPINGS, DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS, DEFAULT_STATIC_QUANT_MODULE_MAPPINGS, get_default_compare_output_module_list, get_default_dynamic_quant_module_mappings, get_default_float_to_quantized_operator_mappings, get_default_qat_module_mappings, get_default_qconfig_propagation_list, get_default_static_quant_module_mappings, get_dynamic_quant_module_class, get_quantized_operator, get_static_quant_module_class, no_observer_set, ) ```
====================================================================================================================== SOURCE CODE FILE: quantize.py LINES: 1 SIZE: 0.81 KB PATH: scripts\freecad_env\Lib\site-packages\torch\quantization\quantize.py ENCODING: utf-8 ```py # flake8: noqa: F401 r""" This file is in the process of migration to `torch/ao/quantization`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the `torch/ao/quantization/quantize.py`, while adding an import statement here. """ from torch.ao.quantization.quantize import ( _add_observer_, _convert, _get_observer_dict, _get_unique_devices_, _is_activation_post_process, _observer_forward_hook, _propagate_qconfig_helper, _register_activation_post_process_hook, _remove_activation_post_process, _remove_qconfig, add_quant_dequant, convert, prepare, prepare_qat, propagate_qconfig_, quantize, quantize_dynamic, quantize_qat, swap_module, ) ```
========================================================================================================================= SOURCE CODE FILE: quantize_fx.py LINES: 1 SIZE: 0.74 KB PATH: scripts\freecad_env\Lib\site-packages\torch\quantization\quantize_fx.py ENCODING: utf-8 ```py # flake8: noqa: F401 r""" This file is in the process of migration to `torch/ao/quantization`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the `torch/ao/quantization/quantize_fx.py`, while adding an import statement here. """ from torch.ao.quantization.fx.graph_module import ObservedGraphModule from torch.ao.quantization.quantize_fx import ( _check_is_graph_module, _convert_fx, _convert_standalone_module_fx, _fuse_fx, _prepare_fx, _prepare_standalone_module_fx, _swap_ff_with_fxff, convert_fx, fuse_fx, prepare_fx, prepare_qat_fx, QuantizationTracer, Scope, ScopeContextManager, ) ```
========================================================================================================================== SOURCE CODE FILE: quantize_jit.py LINES: 1 SIZE: 0.72 KB PATH: scripts\freecad_env\Lib\site-packages\torch\quantization\quantize_jit.py ENCODING: utf-8 ```py # flake8: noqa: F401 r""" This file is in the process of migration to `torch/ao/quantization`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the `torch/ao/quantization/quantize_jit.py`, while adding an import statement here. """ from torch.ao.quantization.quantize_jit import ( _check_forward_method, _check_is_script_module, _convert_jit, _prepare_jit, _prepare_ondevice_dynamic_jit, _quantize_jit, convert_dynamic_jit, convert_jit, fuse_conv_bn_jit, prepare_dynamic_jit, prepare_jit, quantize_dynamic_jit, quantize_jit, script_qconfig, script_qconfig_dict, ) ```
=================================================================================================================== SOURCE CODE FILE: stubs.py LINES: 1 SIZE: 0.39 KB PATH: scripts\freecad_env\Lib\site-packages\torch\quantization\stubs.py ENCODING: utf-8 ```py # flake8: noqa: F401 r""" This file is in the process of migration to `torch/ao/quantization`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the `torch/ao/quantization/stubs.py`, while adding an import statement here. """ from torch.ao.quantization.stubs import DeQuantStub, QuantStub, QuantWrapper ```
=================================================================================================================== SOURCE CODE FILE: utils.py LINES: 1 SIZE: 0.84 KB PATH: scripts\freecad_env\Lib\site-packages\torch\quantization\utils.py ENCODING: utf-8 ```py # flake8: noqa: F401 r""" Utils shared by different modes of quantization (eager/graph) This file is in the process of migration to `torch/ao/quantization`, and is kept here for compatibility while the migration process is ongoing. If you are adding a new entry/functionality, please, add it to the `torch/ao/quantization/utils.py`, while adding an import statement here. """ from torch.ao.quantization.utils import ( activation_dtype, activation_is_int8_quantized, activation_is_statically_quantized, calculate_qmin_qmax, check_min_max_valid, get_combined_dict, get_qconfig_dtypes, get_qparam_dict, get_quant_type, get_swapped_custom_module_class, getattr_from_fqn, is_per_channel, is_per_tensor, weight_dtype, weight_is_quantized, weight_is_statically_quantized, ) ```
============================================================================================================ SOURCE CODE FILE: quasirandom.py LINES: 1 SIZE: 7.97 KB PATH: scripts\freecad_env\Lib\site-packages\torch\quasirandom.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs from typing import Optional import torch class SobolEngine: r""" The :class:`torch.quasirandom.SobolEngine` is an engine for generating (scrambled) Sobol sequences. Sobol sequences are an example of low discrepancy quasi-random sequences. This implementation of an engine for Sobol sequences is capable of sampling sequences up to a maximum dimension of 21201. It uses direction numbers from https://web.maths.unsw.edu.au/~fkuo/sobol/ obtained using the search criterion D(6) up to the dimension 21201. This is the recommended choice by the authors. References: - Art B. Owen. Scrambling Sobol and Niederreiter-Xing points. Journal of Complexity, 14(4):466-489, December 1998. - I. M. Sobol. The distribution of points in a cube and the accurate evaluation of integrals. Zh. Vychisl. Mat. i Mat. Phys., 7:784-802, 1967. Args: dimension (Int): The dimensionality of the sequence to be drawn scramble (bool, optional): Setting this to ``True`` will produce scrambled Sobol sequences. Scrambling is capable of producing better Sobol sequences. Default: ``False``. seed (Int, optional): This is the seed for the scrambling. The seed of the random number generator is set to this, if specified. Otherwise, it uses a random seed. Default: ``None`` Examples:: >>> # xdoctest: +SKIP("unseeded random state") >>> soboleng = torch.quasirandom.SobolEngine(dimension=5) >>> soboleng.draw(3) tensor([[0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.5000, 0.5000, 0.5000, 0.5000, 0.5000], [0.7500, 0.2500, 0.2500, 0.2500, 0.7500]]) """ MAXBIT = 30 MAXDIM = 21201 def __init__(self, dimension, scramble=False, seed=None): if dimension > self.MAXDIM or dimension < 1: raise ValueError( "Supported range of dimensionality " f"for SobolEngine is [1, {self.MAXDIM}]" ) self.seed = seed self.scramble = scramble self.dimension = dimension cpu = torch.device("cpu") self.sobolstate = torch.zeros( dimension, self.MAXBIT, device=cpu, dtype=torch.long ) torch._sobol_engine_initialize_state_(self.sobolstate, self.dimension) if not self.scramble: self.shift = torch.zeros(self.dimension, device=cpu, dtype=torch.long) else: self._scramble() self.quasi = self.shift.clone(memory_format=torch.contiguous_format) self._first_point = (self.quasi / 2**self.MAXBIT).reshape(1, -1) self.num_generated = 0 def draw( self, n: int = 1, out: Optional[torch.Tensor] = None, dtype: Optional[torch.dtype] = None, ) -> torch.Tensor: r""" Function to draw a sequence of :attr:`n` points from a Sobol sequence. Note that the samples are dependent on the previous samples. The size of the result is :math:`(n, dimension)`. Args: n (Int, optional): The length of sequence of points to draw. Default: 1 out (Tensor, optional): The output tensor dtype (:class:`torch.dtype`, optional): the desired data type of the returned tensor. Default: ``None`` """ if dtype is None: dtype = torch.get_default_dtype() if self.num_generated == 0: if n == 1: result = self._first_point.to(dtype) else: result, self.quasi = torch._sobol_engine_draw( self.quasi, n - 1, self.sobolstate, self.dimension, self.num_generated, dtype=dtype, ) result = torch.cat((self._first_point.to(dtype), result), dim=-2) else: result, self.quasi = torch._sobol_engine_draw( self.quasi, n, self.sobolstate, self.dimension, self.num_generated - 1, dtype=dtype, ) self.num_generated += n if out is not None: out.resize_as_(result).copy_(result) return out return result def draw_base2( self, m: int, out: Optional[torch.Tensor] = None, dtype: Optional[torch.dtype] = None, ) -> torch.Tensor: r""" Function to draw a sequence of :attr:`2**m` points from a Sobol sequence. Note that the samples are dependent on the previous samples. The size of the result is :math:`(2**m, dimension)`. Args: m (Int): The (base2) exponent of the number of points to draw. out (Tensor, optional): The output tensor dtype (:class:`torch.dtype`, optional): the desired data type of the returned tensor. Default: ``None`` """ n = 2**m total_n = self.num_generated + n if not (total_n & (total_n - 1) == 0): raise ValueError( "The balance properties of Sobol' points require " f"n to be a power of 2. {self.num_generated} points have been " f"previously generated, then: n={self.num_generated}+2**{m}={total_n}. " "If you still want to do this, please use " "'SobolEngine.draw()' instead." ) return self.draw(n=n, out=out, dtype=dtype) def reset(self): r""" Function to reset the ``SobolEngine`` to base state. """ self.quasi.copy_(self.shift) self.num_generated = 0 return self def fast_forward(self, n): r""" Function to fast-forward the state of the ``SobolEngine`` by :attr:`n` steps. This is equivalent to drawing :attr:`n` samples without using the samples. Args: n (Int): The number of steps to fast-forward by. """ if self.num_generated == 0: torch._sobol_engine_ff_( self.quasi, n - 1, self.sobolstate, self.dimension, self.num_generated ) else: torch._sobol_engine_ff_( self.quasi, n, self.sobolstate, self.dimension, self.num_generated - 1 ) self.num_generated += n return self def _scramble(self): g: Optional[torch.Generator] = None if self.seed is not None: g = torch.Generator() g.manual_seed(self.seed) cpu = torch.device("cpu") # Generate shift vector shift_ints = torch.randint( 2, (self.dimension, self.MAXBIT), device=cpu, generator=g ) self.shift = torch.mv( shift_ints, torch.pow(2, torch.arange(0, self.MAXBIT, device=cpu)) ) # Generate lower triangular matrices (stacked across dimensions) ltm_dims = (self.dimension, self.MAXBIT, self.MAXBIT) ltm = torch.randint(2, ltm_dims, device=cpu, generator=g).tril() torch._sobol_engine_scramble_(self.sobolstate, ltm, self.dimension) def __repr__(self): fmt_string = [f"dimension={self.dimension}"] if self.scramble: fmt_string += ["scramble=True"] if self.seed is not None: fmt_string += [f"seed={self.seed}"] return self.__class__.__name__ + "(" + ", ".join(fmt_string) + ")" ```
======================================================================================================= SOURCE CODE FILE: random.py LINES: 1 SIZE: 7.24 KB PATH: scripts\freecad_env\Lib\site-packages\torch\random.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import contextlib import warnings from collections.abc import Generator import torch from torch._C import default_generator def set_rng_state(new_state: torch.Tensor) -> None: r"""Sets the random number generator state. .. note:: This function only works for CPU. For CUDA, please use :func:`torch.manual_seed`, which works for both CPU and CUDA. Args: new_state (torch.ByteTensor): The desired state """ default_generator.set_state(new_state) def get_rng_state() -> torch.Tensor: r"""Returns the random number generator state as a `torch.ByteTensor`. .. note:: The returned state is for the default generator on CPU only. See also: :func:`torch.random.fork_rng`. """ return default_generator.get_state() def manual_seed(seed) -> torch._C.Generator: r"""Sets the seed for generating random numbers on all devices. Returns a `torch.Generator` object. Args: seed (int): The desired seed. Value must be within the inclusive range `[-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff]`. Otherwise, a RuntimeError is raised. Negative inputs are remapped to positive values with the formula `0xffff_ffff_ffff_ffff + seed`. """ seed = int(seed) import torch.cuda if not torch.cuda._is_in_bad_fork(): torch.cuda.manual_seed_all(seed) import torch.mps if not torch.mps._is_in_bad_fork(): torch.mps.manual_seed(seed) import torch.xpu if not torch.xpu._is_in_bad_fork(): torch.xpu.manual_seed_all(seed) _seed_custom_device(seed) return default_generator.manual_seed(seed) def seed() -> int: r"""Sets the seed for generating random numbers to a non-deterministic random number on all devices. Returns a 64 bit number used to seed the RNG. """ seed = default_generator.seed() import torch.cuda if not torch.cuda._is_in_bad_fork(): torch.cuda.manual_seed_all(seed) import torch.mps if not torch.mps._is_in_bad_fork(): torch.mps.manual_seed(seed) import torch.xpu if not torch.xpu._is_in_bad_fork(): torch.xpu.manual_seed_all(seed) _seed_custom_device(seed) return seed def _seed_custom_device(seed) -> None: r"""Sets the seed to generate random numbers for custom device. Args: seed (int): The desired seed. See [Note: support the custom device with privateuse1] """ seed = int(seed) custom_backend_name = torch._C._get_privateuse1_backend_name() if hasattr(torch, custom_backend_name): custom_device_mod = getattr(torch, custom_backend_name) _bad_fork_name = "_is_in_bad_fork" _seed_all_name = "manual_seed_all" if hasattr(custom_device_mod, _bad_fork_name) and hasattr( custom_device_mod, _seed_all_name ): if not getattr(custom_device_mod, _bad_fork_name)(): getattr(custom_device_mod, _seed_all_name)(seed) else: message = f"Set seed for `{custom_backend_name}` device does not take effect, please add API's " message += f"`{_bad_fork_name}` and `{_seed_all_name}` to `{custom_backend_name}` device module." warnings.warn(message, UserWarning, stacklevel=3) def initial_seed() -> int: r"""Returns the initial seed for generating random numbers as a Python `long`. .. note:: The returned seed is for the default generator on CPU only. """ return default_generator.initial_seed() _fork_rng_warned_already = False @contextlib.contextmanager def fork_rng( devices=None, enabled=True, _caller="fork_rng", _devices_kw="devices", device_type="cuda", ) -> Generator: """ Forks the RNG, so that when you return, the RNG is reset to the state that it was previously in. Args: devices (iterable of Device IDs): devices for which to fork the RNG. CPU RNG state is always forked. By default, :meth:`fork_rng` operates on all devices, but will emit a warning if your machine has a lot of devices, since this function will run very slowly in that case. If you explicitly specify devices, this warning will be suppressed enabled (bool): if ``False``, the RNG is not forked. This is a convenience argument for easily disabling the context manager without having to delete it and unindent your Python code under it. device_type (str): device type str, default is `cuda`. As for custom device, see details in [Note: support the custom device with privateuse1] """ if device_type == "meta": yield return device_type = torch.device(device_type).type device_mod = getattr(torch, device_type, None) if device_mod is None: raise RuntimeError( f"torch has no module of `{device_type}`, you should register " + "a module by `torch._register_device_module`." ) global _fork_rng_warned_already # Internal arguments: # _caller: the function which called fork_rng, which the user used # _devices_kw: the devices keyword of _caller if not enabled: yield return if devices is None: num_devices = device_mod.device_count() if num_devices > 1 and not _fork_rng_warned_already: message = ( f"{device_type.upper()} reports that you have {num_devices} available devices, and " f"you have used {_caller} without explicitly specifying which devices are being used. " f"For safety, we initialize *every* {device_type.upper()} device by default, which can " f"be quite slow if you have a lot of {device_type.upper()}s. If you know that you are only" f" making use of a few {device_type.upper()} devices, set the environment variable " f"{device_type.upper()}_VISIBLE_DEVICES or the '{_devices_kw}' keyword argument of {_caller} " "with the set of devices you are actually using. For example, if you are using CPU only, " "set device.upper()_VISIBLE_DEVICES= or devices=[]; if you are using device 0 only, " f"set {device_type.upper()}_VISIBLE_DEVICES=0 or devices=[0]. To initialize all devices " f"and suppress this warning, set the '{_devices_kw}' keyword argument to " f"`range(torch.{device_type}.device_count())`." ) warnings.warn(message) _fork_rng_warned_already = True devices = list(range(num_devices)) else: # Protect against user passing us a generator; we need to traverse this # multiple times but a generator will be exhausted upon first traversal devices = list(devices) cpu_rng_state = torch.get_rng_state() device_rng_states = [device_mod.get_rng_state(device) for device in devices] try: yield finally: torch.set_rng_state(cpu_rng_state) for device, device_rng_state in zip(devices, device_rng_states): device_mod.set_rng_state(device_rng_state, device) ```
============================================================================================================= SOURCE CODE FILE: return_types.py LINES: 1 SIZE: 1.50 KB PATH: scripts\freecad_env\Lib\site-packages\torch\return_types.py ENCODING: utf-8 ```py import inspect import torch from torch.utils._pytree import register_pytree_node, SequenceKey __all__ = ["pytree_register_structseq", "all_return_types"] all_return_types = [] # error: Module has no attribute "_return_types" return_types = torch._C._return_types # type: ignore[attr-defined] def pytree_register_structseq(cls): def structseq_flatten(structseq): return list(structseq), None def structseq_flatten_with_keys(structseq): values, context = structseq_flatten(structseq) return [(SequenceKey(i), v) for i, v in enumerate(values)], context def structseq_unflatten(values, context): return cls(values) register_pytree_node( cls, structseq_flatten, structseq_unflatten, flatten_with_keys_fn=structseq_flatten_with_keys, ) for name in dir(return_types): if name.startswith("__"): continue _attr = getattr(return_types, name) globals()[name] = _attr if not name.startswith("_"): __all__.append(name) all_return_types.append(_attr) # Today everything in torch.return_types is a structseq, aka a "namedtuple"-like # thing defined by the Python C-API. We're going to need to modify this when that # is no longer the case. # NB: I don't know how to check that something is a "structseq" so we do a fuzzy # check for tuple if inspect.isclass(_attr) and issubclass(_attr, tuple): pytree_register_structseq(_attr) ```
============================================================================================================== SOURCE CODE FILE: serialization.py LINES: 12 SIZE: 84.69 KB PATH: scripts\freecad_env\Lib\site-packages\torch\serialization.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import copyreg import difflib import functools import io import os import pickle import re import shutil import struct import sys import tarfile import tempfile import threading import warnings from contextlib import closing, contextmanager from enum import Enum from typing import Any, Callable, cast, Generic, IO, Optional, TypeVar, Union from typing_extensions import TypeAlias, TypeIs import torch import torch._weights_only_unpickler as _weights_only_unpickler from torch._sources import get_source_lines_and_file from torch._utils import _import_dotted_name from torch.storage import _get_dtype_from_pickle_storage_type from torch.types import FileLike, Storage __all__ = [ "SourceChangeWarning", "mkdtemp", "register_package", "check_module_version_greater_or_equal", "validate_cuda_device", "validate_hpu_device", "location_tag", "default_restore_location", "normalize_storage_type", "storage_to_tensor_type", "save", "load", "StorageType", "LoadEndianness", "get_crc32_options", "set_crc32_options", "get_default_load_endianness", "set_default_load_endianness", "get_default_mmap_options", "set_default_mmap_options", "clear_safe_globals", "get_safe_globals", "add_safe_globals", "safe_globals", "get_unsafe_globals_in_checkpoint", "skip_data", ] DEFAULT_PROTOCOL = 2 LONG_SIZE = struct.Struct("=l").size INT_SIZE = struct.Struct("=i").size SHORT_SIZE = struct.Struct("=h").size MAGIC_NUMBER = 0x1950A86A20F9469CFC6C PROTOCOL_VERSION = 1001 STORAGE_KEY_SEPARATOR = "," MAP_LOCATION: TypeAlias = Optional[ Union[Callable[[Storage, str], Storage], torch.device, str, dict[str, str]] ] STORAGE: TypeAlias = Union[Storage, torch.storage.TypedStorage, torch.UntypedStorage] IS_WINDOWS = sys.platform == "win32" UNSAFE_MESSAGE = ( "In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` " "from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, " "but it can result in arbitrary code execution. Do it only if you got the file from a " "trusted source." ) if not IS_WINDOWS: from mmap import MAP_PRIVATE, MAP_SHARED else: MAP_SHARED, MAP_PRIVATE = None, None # type: ignore[assignment] def _default_to_weights_only(pickle_module): is_fbcode = not hasattr(torch.version, "git_version") return pickle_module is None and not is_fbcode # _serialization_tls is used to store thread local state specific to serialization # that needs to be propagated to other files, in particular we use this for # (1) map_location (needed for wrapper subclasses/third party devices to torch._utils) # (2) skip_data (needed for torch.Tensor.__reduce_ex__ for skip_data ctx) # (3) materialize_fake_tensors (needed for torch.Tensor.__reduce_ex__ for skip_data ctx) class _SerializationLocal(threading.local): def __init__(self): super().__init__() self.map_location: Optional[MAP_LOCATION] = None self.skip_data: bool = False self.materialize_fake_tensors: bool = False _serialization_tls = _SerializationLocal() class SourceChangeWarning(Warning): pass @contextmanager def mkdtemp(): path = tempfile.mkdtemp() try: yield path finally: shutil.rmtree(path) _package_registry: list[ tuple[ int, Callable[[STORAGE], Optional[str]], Callable[[STORAGE, str], Optional[STORAGE]], ] ] = [] class LoadEndianness(Enum): NATIVE = 1 LITTLE = 2 BIG = 3 def get_default_load_endianness() -> Optional[LoadEndianness]: """ Get fallback byte order for loading files If byteorder mark is not present in saved checkpoint, this byte order is used as fallback. By default, it's "native" byte order. Returns: default_load_endian: Optional[LoadEndianness] """ from torch.utils.serialization import config return config.load.endianness def set_default_load_endianness(endianness): """ Set fallback byte order for loading files If byteorder mark is not present in saved checkpoint, this byte order is used as fallback. By default, it's "native" byte order. Args: endianness: the new fallback byte order """ if not isinstance(endianness, LoadEndianness) and endianness is not None: raise TypeError("Invalid argument type in function set_default_load_endianness") from torch.utils.serialization import config config.load.endianness = endianness def get_crc32_options() -> bool: """ Get whether :func:`torch.save` computes and writes crc32 for each record. Defaults to ``True``. """ from torch.utils.serialization import config return config.save.compute_crc32 def set_crc32_options(compute_crc32: bool): """ Set whether :func:`torch.save` computes and writes crc32 for each record. .. note:: Setting this to ``False`` may make unzipping of the ``torch.save`` output fail or warn due to corrupted CRC32. However ``torch.load`` will be able to load the file. Args: compute_crc32 (bool): set crc32 compuation flag """ from torch.utils.serialization import config config.save.compute_crc32 = compute_crc32 def get_default_mmap_options() -> Optional[int]: """ Get default mmap options for :func:`torch.load` with ``mmap=True``. Defaults to ``mmap.MAP_PRIVATE``. Returns: default_mmap_options: int """ from torch.utils.serialization import config return config.load.mmap_flags def _get_storage_alignment() -> int: """ Gets alignment for storages in torch.save files/ Defaults to 64. Returns: storage_alginment: int """ from torch.utils.serialization import config return config.save.storage_alignment class set_default_mmap_options: """ Context manager or function to set default mmap options for :func:`torch.load` with ``mmap=True`` to flags. For now, only either ``mmap.MAP_PRIVATE`` or ``mmap.MAP_SHARED`` are supported. Please open an issue if you need any other option to be added here. .. note:: This feature is currently not supported for Windows. Args: flags: ``mmap.MAP_PRIVATE`` or ``mmap.MAP_SHARED`` """ def __init__(self, flags: int) -> None: if IS_WINDOWS: raise RuntimeError( "Changing the default mmap options is currently not supported for Windows" ) if flags != MAP_PRIVATE and flags != MAP_SHARED: raise ValueError( "Invalid argument in function set_default_mmap_options, " f"expected mmap.MAP_PRIVATE or mmap.MAP_SHARED, but got {flags}" ) # global config from torch.utils.serialization import config self.prev = config.load.mmap_flags config.load.mmap_flags = flags def __enter__(self) -> None: pass def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: from torch.utils.serialization import config config.load.mmap_flags = self.prev def clear_safe_globals() -> None: """ Clears the list of globals that are safe for ``weights_only`` load. """ _weights_only_unpickler._clear_safe_globals() def get_safe_globals() -> list[Union[Callable, tuple[Callable, str]]]: """ Returns the list of user-added globals that are safe for ``weights_only`` load. """ return _weights_only_unpickler._get_safe_globals() def add_safe_globals(safe_globals: list[Union[Callable, tuple[Callable, str]]]) -> None: """ Marks the given globals as safe for ``weights_only`` load. For example, functions added to this list can be called during unpickling, classes could be instantiated and have state set. Each item in the list can either be a function/class or a tuple of the form (function/class, string) where string is the full path of the function/class. Within the serialized format, each function is identified with its full path as ``{__module__}.{__qualname__}``. When calling this API, you can provide this full path that should match the one in the checkpoint otherwise the default ``{fn.__module__}.{fn.__qualname__}`` will be used. Args: safe_globals (List[Union[Callable, Tuple[Callable, str]]]): list of globals to mark as safe Example: >>> # xdoctest: +SKIP("Can't torch.save(t, ...) as doctest thinks MyTensor is defined on torch.serialization") >>> import tempfile >>> class MyTensor(torch.Tensor): ... pass >>> t = MyTensor(torch.randn(2, 3)) >>> with tempfile.NamedTemporaryFile() as f: ... torch.save(t, f.name) # Running `torch.load(f.name, weights_only=True)` will fail with # Unsupported global: GLOBAL __main__.MyTensor was not an allowed global by default. # Check the code and make sure MyTensor is safe to be used when loaded from an arbitrary checkpoint. ... torch.serialization.add_safe_globals([MyTensor]) ... torch.load(f.name, weights_only=True) # MyTensor([[-0.5024, -1.8152, -0.5455], # [-0.8234, 2.0500, -0.3657]]) """ _weights_only_unpickler._add_safe_globals(safe_globals) class safe_globals(_weights_only_unpickler._safe_globals): r"""Context-manager that adds certain globals as safe for ``weights_only`` load. Args: safe_globals: List of globals for weights_only load. Example: >>> # xdoctest: +SKIP("Can't torch.save(t, ...) as doctest thinks MyTensor is defined on torch.serialization") >>> import tempfile >>> class MyTensor(torch.Tensor): ... pass >>> t = MyTensor(torch.randn(2, 3)) >>> with tempfile.NamedTemporaryFile() as f: ... torch.save(t, f.name) # Running `torch.load(f.name, weights_only=True)` will fail with # Unsupported global: GLOBAL __main__.MyTensor was not an allowed global by default. # Check the code and make sure MyTensor is safe to be used when loaded from an arbitrary checkpoint. ... with torch.serialization.safe_globals([MyTensor]): ... torch.load(f.name, weights_only=True) # MyTensor([[-0.5024, -1.8152, -0.5455], # [-0.8234, 2.0500, -0.3657]]) >>> assert torch.serialization.get_safe_globals() == [] """ def get_unsafe_globals_in_checkpoint(f: FileLike) -> list[str]: """Returns a list of strings of functions/classes in a ``torch.save`` object that are not safe for ``weights_only``. For a given function or class ``f``, the corresponding string will be of the form ``{f.__module__}.{f.__name__}``. This function will return any GLOBALs in the checkpoint that are not in the set marked safe for ``weights_only`` (either via :func:`add_safe_globals` or :class:`safe_globals` context or allowlisted by ``torch`` by default). .. note:: This function will statically disassemble the pickle file in the checkpoint. The implication is any classes dynamically pushed onto the stack during unpickling will not be included in the output. Args: f: File-like object or string containing the checkpoint object saved via ``torch.save`` Returns: A list of strings of pickle GLOBALs in the checkpoint that are not allowlisted for ``weights_only``. """ default_safe_globals_strings = set( _weights_only_unpickler._get_allowed_globals().keys() ) user_safe_global_strings = set( _weights_only_unpickler._get_user_allowed_globals().keys() ) safe_global_strings = default_safe_globals_strings.union(user_safe_global_strings) with _open_file_like(f, "rb") as opened_file: if not _is_zipfile(opened_file): raise ValueError("Expected input to be a checkpoint returned by torch.save") with _open_zipfile_reader(opened_file) as zip_file: if _is_torchscript_zip(zip_file): raise ValueError( "Expected input to be a checkpoint returned by torch.save but got a torchscript checkpoint" ) data_file = io.BytesIO(zip_file.get_record("data.pkl")) all_globals = _weights_only_unpickler.get_globals_in_pkl(data_file) return list(all_globals.difference(safe_global_strings)) class skip_data: """ Context-manager that skips writing/reading storage bytes for ``torch.save`` / ``torch.load`` calls. For the save path, storages will still be saved, but the space that their bytes would usually be written to will be empty space. The storage bytes can then be populated in a separate pass. For the load path, tensors will be loaded per the checkpoint but their storages will not be populated with data. .. warning:: The ``skip_data`` context manager is an early prototype and is subject to change. Args: materialize_fake_tensors: Whether to materialize FakeTensors during save. This is a no-op for the load path. Example: >>> # xdoctest: +SKIP("NamedTemporaryFile on Windows") >>> import tempfile >>> t = torch.randn(2, 3) >>> with tempfile.NamedTemporaryFile() as f: ... with torch.serialization.skip_data(): ... torch.save(t, f.name) ... torch.load(f.name, weights_only=True) tensor([[0., 0., 0.], [0., 0., 0.]]) """ def __init__(self, materialize_fake_tensors: bool = False): self.materialize_fake_tensors = materialize_fake_tensors def __enter__(self): global _serialization_tls self._old_skip_data = _serialization_tls.skip_data self._old_materialize_fake_tensors = _serialization_tls.materialize_fake_tensors _serialization_tls.skip_data = True _serialization_tls.materialize_fake_tensors = self.materialize_fake_tensors def __exit__(self, type, value, tb): global _serialization_tls _serialization_tls.skip_data = self._old_skip_data _serialization_tls.materialize_fake_tensors = self._old_materialize_fake_tensors def _is_zipfile(f) -> bool: # This is a stricter implementation than zipfile.is_zipfile(). # zipfile.is_zipfile() is True if the magic number appears anywhere in the # binary. Since we expect the files here to be generated by torch.save or # torch.jit.save, it's safe to only check the start bytes and avoid # collisions and assume the zip has only 1 file. # See bugs.python.org/issue28494. start = f.tell() # Read the first few bytes and match against the ZIP file signature local_header_magic_number = b"PK\x03\x04" read_bytes = f.read(len(local_header_magic_number)) f.seek(start) return read_bytes == local_header_magic_number def register_package( priority: int, tagger: Callable[[STORAGE], Optional[str]], deserializer: Callable[[STORAGE, str], Optional[STORAGE]], ): """ Registers callables for tagging and deserializing storage objects with an associated priority. Tagging associates a device with a storage object at save time while deserializing moves a storage object to an appropriate device at load time. :attr:`tagger` and :attr:`deserializer` are run in the order given by their :attr:`priority` until a tagger/deserializer returns a value that is not `None`. To override the deserialization behavior for a device in the global registry, one can register a tagger with a higher priority than the existing tagger. This function can also be used to register a tagger and deserializer for new devices. Args: priority: Indicates the priority associated with the tagger and deserializer, where a lower value indicates higher priority. tagger: Callable that takes in a storage object and returns its tagged device as a string or None. deserializer: Callable that takes in storage object and a device string and returns a storage object on the appropriate device or None. Returns: `None` Example: >>> def ipu_tag(obj): >>> if obj.device.type == 'ipu': >>> return 'ipu' >>> def ipu_deserialize(obj, location): >>> if location.startswith('ipu'): >>> ipu = getattr(torch, "ipu", None) >>> assert ipu is not None, "IPU device module is not loaded" >>> assert torch.ipu.is_available(), "ipu is not available" >>> return obj.ipu(location) >>> torch.serialization.register_package(11, ipu_tag, ipu_deserialize) """ queue_elem = (priority, tagger, deserializer) _package_registry.append(queue_elem) _package_registry.sort() def check_module_version_greater_or_equal( module, req_version_tuple, error_if_malformed=True, ): """ Check if a module's version satisfies requirements Usually, a module's version string will be like 'x.y.z', which would be represented as a tuple (x, y, z), but sometimes it could be an unexpected format. If the version string does not match the given tuple's format up to the length of the tuple, then error and exit or emit a warning. Args: module: the module to check the version of req_version_tuple: tuple (usually of ints) representing the required version error_if_malformed: whether we should exit if module version string is malformed Returns: requirement_is_met: bool """ try: version_strs = module.__version__.split(".") # Cast module version fields to match the types of the required version module_version = tuple( type(req_field)(version_strs[idx]) for idx, req_field in enumerate(req_version_tuple) ) requirement_is_met = module_version >= req_version_tuple except Exception as e: message = ( f"'{module.__name__}' module version string is malformed '{module.__version__}' and cannot be compared" f" with tuple {str(req_version_tuple)}" ) if error_if_malformed: raise RuntimeError(message) from e else: warnings.warn(message + ", but continuing assuming that requirement is met") requirement_is_met = True return requirement_is_met def _cpu_tag(obj): if obj.device.type == "cpu": return "cpu" def _mps_tag(obj): if obj.device.type == "mps": return "mps" def _meta_tag(obj): if obj.device.type == "meta": return "meta" def _backend_tag(backend_name, obj): if backend_name == "privateuse1": backend_name = torch._C._get_privateuse1_backend_name() if obj.device.type == backend_name: if obj.device.index is None: return backend_name else: return backend_name + ":" + str(obj.device.index) def _cpu_deserialize(obj, location): if location == "cpu": return obj def _mps_deserialize(obj, location): if location.startswith("mps"): return obj.mps() def _meta_deserialize(obj, location): if location == "meta": return torch.UntypedStorage(obj.nbytes(), device="meta") def _validate_device(location, backend_name): """ Check whether the device index of specified backend is valid In case of privateuse1 backend, your must first register a device_module for privateuse1 using torch._register_device_module. Implement the following methods in device_module like cuda: device_module._utils._get_device_index(location, True), device_module.device_count(). Args: location: string of device backend_name: the backend name or the name of privateuse1, which can be renamed Returns: device_index: int """ if not hasattr(torch, backend_name): raise RuntimeError( f"The {backend_name.upper()} device module is not registered. " "If you are running on a CPU-only machine, " "please use torch.load with map_location=torch.device('cpu') " "to map your storages to the CPU." ) device_module = getattr(torch, backend_name) if hasattr(device_module, "_utils") and hasattr( device_module._utils, "_get_device_index" ): device_index = device_module._utils._get_device_index(location, True) device = torch.device(backend_name, device_index) else: device = torch.device(location) device_index = device.index if device.index else 0 if hasattr(device_module, "is_available") and not device_module.is_available(): raise RuntimeError( f"Attempting to deserialize object on a {backend_name.upper()} " f"device but torch.{backend_name}.is_available() is False. " "If you are running on a CPU-only machine, " "please use torch.load with map_location=torch.device('cpu') " "to map your storages to the CPU." ) if hasattr(device_module, "device_count"): device_count = device_module.device_count() if device_index >= device_count: raise RuntimeError( f"Attempting to deserialize object on {backend_name.upper()} device " f"{device_index} but torch.{backend_name}.device_count() is {device_count}. " "Please use torch.load with map_location to map your storages " "to an existing device." ) return device def validate_cuda_device(location): return _validate_device(location, "cuda").index def validate_hpu_device(location): return _validate_device(location, "hpu").index def _deserialize(backend_name, obj, location): if backend_name == "privateuse1": backend_name = torch._C._get_privateuse1_backend_name() if location.startswith(backend_name): device = _validate_device(location, backend_name) return obj.to(device=device) register_package(10, _cpu_tag, _cpu_deserialize) register_package( 20, functools.partial(_backend_tag, "cuda"), functools.partial(_deserialize, "cuda"), ) register_package(21, _mps_tag, _mps_deserialize) register_package(22, _meta_tag, _meta_deserialize) register_package( 23, functools.partial(_backend_tag, "privateuse1"), functools.partial(_deserialize, "privateuse1"), ) register_package( 24, functools.partial(_backend_tag, "hpu"), functools.partial(_deserialize, "hpu"), ) register_package( 25, functools.partial(_backend_tag, "xpu"), functools.partial(_deserialize, "xpu"), ) def location_tag( storage: Union[Storage, torch.storage.TypedStorage, torch.UntypedStorage], ): for _, tagger, _ in _package_registry: location = tagger(storage) if location: return location raise RuntimeError( "don't know how to determine data location of " + torch.typename(storage) ) def default_restore_location(storage, location): """ Restores `storage` using a deserializer function registered for the `location`. This function looks in the registry for deserializer functions that match the `location`. If found, it attempts to use them, in priority order, to restore `storage` until one returns a not `None` result. If no deserializer can be found in the registry, or all found fail to bear a result, it raises a `RuntimeError`. Args: storage (STORAGE): the storage object to restore location (str): the location tag associated with the storage object Returns: storage: Optional[STORAGE] Raises: RuntimeError: If no deserializer matching `location` is found in the registry or if all matching ones return `None`. """ for _, _, fn in _package_registry: result = fn(storage, location) if result is not None: return result raise RuntimeError( "don't know how to restore data location of " + torch.typename(storage) + " (tagged with " + location + ")" ) def normalize_storage_type(storage_type): return getattr(torch, storage_type.__name__) def storage_to_tensor_type(storage): storage_type = type(storage) module = _import_dotted_name(storage_type.__module__) return getattr(module, storage_type.__name__.replace("Storage", "Tensor")) def _is_path(name_or_buffer: object) -> TypeIs[Union[str, os.PathLike]]: return isinstance(name_or_buffer, (str, os.PathLike)) T = TypeVar("T") class _opener(Generic[T]): def __init__(self, file_like: T) -> None: self.file_like: T = file_like def __enter__(self): return self.file_like def __exit__(self, *args): pass class _open_file(_opener[IO[bytes]]): def __init__(self, name: Union[str, os.PathLike[str]], mode: str) -> None: super().__init__(open(name, mode)) def __exit__(self, *args): self.file_like.close() class _open_buffer_reader(_opener[IO[bytes]]): def __init__(self, buffer: IO[bytes]) -> None: super().__init__(buffer) _check_seekable(buffer) class _open_buffer_writer(_opener[IO[bytes]]): def __exit__(self, *args): self.file_like.flush() def _open_file_like(name_or_buffer: FileLike, mode: str) -> _opener[IO[bytes]]: if _is_path(name_or_buffer): return _open_file(name_or_buffer, mode) else: if "w" in mode: return _open_buffer_writer(name_or_buffer) elif "r" in mode: return _open_buffer_reader(name_or_buffer) else: raise RuntimeError(f"Expected 'r' or 'w' in mode but got {mode}") class _open_zipfile_reader(_opener[torch._C.PyTorchFileReader]): def __init__(self, name_or_buffer: Union[str, IO[bytes]]) -> None: super().__init__(torch._C.PyTorchFileReader(name_or_buffer)) class _open_zipfile_writer_file(_opener[torch._C.PyTorchFileWriter]): def __init__(self, name: str) -> None: self.file_stream = None self.name = name try: self.name.encode("ascii") except UnicodeEncodeError: # PyTorchFileWriter only supports ascii filename. # For filenames with non-ascii characters, we rely on Python # for writing out the file. self.file_stream = io.FileIO(self.name, mode="w") super().__init__( torch._C.PyTorchFileWriter( self.file_stream, get_crc32_options(), _get_storage_alignment() ) ) else: super().__init__( torch._C.PyTorchFileWriter( self.name, get_crc32_options(), _get_storage_alignment() ) ) def __exit__(self, *args) -> None: self.file_like.write_end_of_file() if self.file_stream is not None: self.file_stream.close() class _open_zipfile_writer_buffer(_opener[torch._C.PyTorchFileWriter]): def __init__(self, buffer: IO[bytes]) -> None: if not callable(getattr(buffer, "write", None)): msg = f"Buffer of {str(type(buffer)).strip('<>')} has no callable attribute 'write'" if not hasattr(buffer, "write"): raise AttributeError(msg) raise TypeError(msg) self.buffer = buffer super().__init__( torch._C.PyTorchFileWriter( buffer, get_crc32_options(), _get_storage_alignment() ) ) def __exit__(self, *args) -> None: self.file_like.write_end_of_file() self.buffer.flush() def _open_zipfile_writer(name_or_buffer: Union[str, IO[bytes]]) -> _opener: container: type[_opener] if _is_path(name_or_buffer): container = _open_zipfile_writer_file else: container = _open_zipfile_writer_buffer return container(name_or_buffer) def _is_compressed_file(f) -> bool: compress_modules = ["gzip"] try: return f.__module__ in compress_modules except AttributeError: return False def _should_read_directly(f): """ Checks if f is a file that should be read directly. It should be read directly if it is backed by a real file (has a fileno) and is not a a compressed file (e.g. gzip) """ if _is_compressed_file(f): return False try: return f.fileno() >= 0 except io.UnsupportedOperation: return False except AttributeError: return False def _check_seekable(f) -> bool: def raise_err_msg(patterns, e): for p in patterns: if p in str(e): msg = ( str(e) + ". You can only torch.load from a file that is seekable." + " Please pre-load the data into a buffer like io.BytesIO and" + " try to load from it instead." ) raise type(e)(msg) raise e try: f.seek(f.tell()) return True except (io.UnsupportedOperation, AttributeError) as e: raise_err_msg(["seek", "tell"], e) return False def _check_dill_version(pickle_module) -> None: """Checks if using dill as the pickle module, and if so, checks if it is the correct version. If dill version is lower than 0.3.1, a ValueError is raised. Args: pickle_module: module used for pickling metadata and objects """ if pickle_module is not None and pickle_module.__name__ == "dill": required_dill_version = (0, 3, 1) if not check_module_version_greater_or_equal( pickle_module, required_dill_version, False ): raise ValueError( ( "'torch' supports dill >= {}, but you have dill {}." " Please upgrade dill or switch to 'pickle'" ).format( ".".join([str(num) for num in required_dill_version]), pickle_module.__version__, ) ) def _check_save_filelike(f): if not _is_path(f) and not hasattr(f, "write"): raise AttributeError( "expected 'f' to be string, path, or a file-like object with " "a 'write' attribute" ) def save( obj: object, f: FileLike, pickle_module: Any = pickle, pickle_protocol: int = DEFAULT_PROTOCOL, _use_new_zipfile_serialization: bool = True, _disable_byteorder_record: bool = False, ) -> None: # Reference: https://github.com/pytorch/pytorch/issues/54354 # The first line of this docstring overrides the one Sphinx generates for the # documentation. We need it so that Sphinx doesn't leak `pickle`s path from # the build environment (e.g. `<module 'pickle' from '/leaked/path'). """save(obj, f, pickle_module=pickle, pickle_protocol=2, _use_new_zipfile_serialization=True) Saves an object to a disk file. See also: :ref:`saving-loading-tensors` Args: obj: saved object f: a file-like object (has to implement write and flush) or a string or os.PathLike object containing a file name pickle_module: module used for pickling metadata and objects pickle_protocol: can be specified to override the default protocol .. note:: A common PyTorch convention is to save tensors using .pt file extension. .. note:: PyTorch preserves storage sharing across serialization. See :ref:`preserve-storage-sharing` for more details. .. note:: The 1.6 release of PyTorch switched ``torch.save`` to use a new zipfile-based file format. ``torch.load`` still retains the ability to load files in the old format. If for any reason you want ``torch.save`` to use the old format, pass the kwarg ``_use_new_zipfile_serialization=False``. Example: >>> # xdoctest: +SKIP("makes cwd dirty") >>> # Save to file >>> x = torch.tensor([0, 1, 2, 3, 4]) >>> torch.save(x, "tensor.pt") >>> # Save to io.BytesIO buffer >>> buffer = io.BytesIO() >>> torch.save(x, buffer) """ torch._C._log_api_usage_once("torch.save") _check_dill_version(pickle_module) _check_save_filelike(f) if isinstance(f, (str, os.PathLike)): f = os.fspath(f) if _use_new_zipfile_serialization: with _open_zipfile_writer(f) as opened_zipfile: _save( obj, opened_zipfile, pickle_module, pickle_protocol, _disable_byteorder_record, ) return else: global _serialization_tls if _serialization_tls.skip_data: raise RuntimeError( "Cannot use skip_data=True with _use_new_zipfile_serialization=False" ) with _open_file_like(f, "wb") as opened_file: _legacy_save(obj, opened_file, pickle_module, pickle_protocol) def _legacy_save(obj, f, pickle_module, pickle_protocol) -> None: import torch.nn as nn serialized_container_types = {} serialized_storages: dict[str, tuple[torch.UntypedStorage, torch.dtype]] = {} # Since loading storages that view the same data with different dtypes is # not supported, we need to keep track of the dtype associated with each # storage data_ptr and throw an error if the dtype is ever different. # TODO: This feature could be added in the future storage_dtypes: dict[int, torch.dtype] = {} def persistent_id(obj: Any) -> Optional[tuple]: # FIXME: the docs say that persistent_id should only return a string # but torch store returns tuples. This works only in the binary protocol # see # https://docs.python.org/2/library/pickle.html#pickling-and-unpickling-external-objects # https://github.com/python/cpython/blob/master/Lib/pickle.py#L527-L537 if isinstance(obj, type) and issubclass(obj, nn.Module): if obj in serialized_container_types: return None serialized_container_types[obj] = True source_file = source = None try: source_lines, _, source_file = get_source_lines_and_file(obj) source = "".join(source_lines) except ( Exception ): # saving the source is optional, so we can ignore any errors warnings.warn( "Couldn't retrieve source code for container of " "type " + obj.__name__ + ". It won't be checked " "for correctness upon loading." ) return ("module", obj, source_file, source) if isinstance(obj, torch.storage.TypedStorage) or torch.is_storage(obj): storage: torch.UntypedStorage if isinstance(obj, torch.storage.TypedStorage): # TODO: Once we decide to break serialization FC, this case # can be deleted storage = obj._untyped_storage storage_dtype = obj.dtype storage_type_str = obj._pickle_storage_type() storage_type = getattr(torch, storage_type_str) dtype = obj.dtype storage_numel = obj._size() elif isinstance(obj, torch.UntypedStorage): storage = obj storage_dtype = torch.uint8 storage_type = normalize_storage_type(type(obj)) dtype = torch.uint8 storage_numel = storage.nbytes() else: raise TypeError(f"type not recognized: {type(obj)}") # If storage is allocated, ensure that any other saved storages # pointing to the same data all have the same dtype. If storage is # not allocated, don't perform this check if storage.data_ptr() != 0: if storage.data_ptr() in storage_dtypes: if storage_dtype != storage_dtypes[storage.data_ptr()]: raise RuntimeError( "Cannot save multiple tensors or storages that " "view the same data as different types" ) else: storage_dtypes[storage.data_ptr()] = storage_dtype view_metadata: Optional[tuple[str, int, int]] # Offset is always 0, but we keep it for backwards compatibility # with the old serialization format (which supported storage views) offset = 0 storage_key = str(storage._cdata) location = location_tag(storage) # TODO: There's an issue here with FC. It might be impossible to # solve, but it's worth noting. Imagine we save a list `[storage, # tensor]`, where `tensor.storage()` is the same as `storage`, and # `tensor.element_size() > 1`. Let's say that `tensor.dtype == # torch.float`. The storage will be serialized with element size # of 1, since we're choosing to serialize the first occurance of # a duplicate storage. Since this legacy serialization format saves # the numel of the storage, rather than nbytes directly, we'll be # effectively saving nbytes in this case. We'll be able to load it # and the tensor back up with no problems in _this_ and future # versions of pytorch, but in older versions, here's the problem: # the storage will be loaded up as a UntypedStorage, and then the # FloatTensor will loaded and the UntypedStorage will be assigned to # it. Since the storage dtype does not match the tensor dtype, this # will cause an error. If we reverse the list, like `[tensor, # storage]`, then we will save the `tensor.storage()` as a faked # `FloatStorage`, and the saved size will be the correct # dtype-specific numel count that old versions expect. `tensor` # will be able to load up properly in old versions, pointing to # a FloatStorage. However, `storage` is still being translated to # a UntypedStorage, and it will try to resolve to the same # FloatStorage that `tensor` contains. This will also cause an # error. It doesn't seem like there's any way around this. # Probably, we just cannot maintain FC for the legacy format if the # saved list contains both a tensor and a storage that point to the # same data. We should still be able to maintain FC for lists of # just tensors, as long as all views share the same dtype as the # tensor they are viewing. if storage_key not in serialized_storages: serialized_storages[storage_key] = (storage, dtype) is_view = storage._cdata != storage._cdata if is_view: view_metadata = (str(storage._cdata), offset, storage.nbytes()) else: view_metadata = None res = ( "storage", storage_type, storage_key, location, storage_numel, view_metadata, ) return res return None sys_info = dict( protocol_version=PROTOCOL_VERSION, little_endian=sys.byteorder == "little", type_sizes=dict( short=SHORT_SIZE, int=INT_SIZE, long=LONG_SIZE, ), ) pickle_module.dump(MAGIC_NUMBER, f, protocol=pickle_protocol) pickle_module.dump(PROTOCOL_VERSION, f, protocol=pickle_protocol) pickle_module.dump(sys_info, f, protocol=pickle_protocol) class PyTorchLegacyPickler(pickle_module.Pickler): def persistent_id(self, obj): return persistent_id(obj) pickler = PyTorchLegacyPickler(f, protocol=pickle_protocol) pickler.dump(obj) serialized_storage_keys = sorted(serialized_storages.keys()) pickle_module.dump(serialized_storage_keys, f, protocol=pickle_protocol) f.flush() for key in serialized_storage_keys: storage, dtype = serialized_storages[key] storage._write_file( f, _should_read_directly(f), True, torch._utils._element_size(dtype) ) def _save( obj, zip_file, pickle_module, pickle_protocol, _disable_byteorder_record, ): serialized_storages = {} id_map: dict[int, str] = {} # Since loading storages that view the same data with different dtypes is # not supported, we need to keep track of the dtype associated with each # storage data_ptr and throw an error if the dtype is ever different. # TODO: This feature could be added in the future storage_dtypes: dict[int, torch.dtype] = {} def persistent_id(obj): # FIXME: the docs say that persistent_id should only return a string # but torch store returns tuples. This works only in the binary protocol # see # https://docs.python.org/2/library/pickle.html#pickling-and-unpickling-external-objects # https://github.com/python/cpython/blob/master/Lib/pickle.py#L527-L537 if isinstance(obj, torch.storage.TypedStorage) or torch.is_storage(obj): if isinstance(obj, torch.storage.TypedStorage): # TODO: Once we decide to break serialization FC, this case # can be deleted storage = obj._untyped_storage storage_dtype = obj.dtype storage_type_str = obj._pickle_storage_type() storage_type = getattr(torch, storage_type_str) storage_numel = obj._size() else: storage = obj storage_dtype = torch.uint8 storage_type = normalize_storage_type(type(obj)) storage_numel = storage.nbytes() # If storage is allocated, ensure that any other saved storages # pointing to the same data all have the same dtype. If storage is # not allocated, don't perform this check if str(storage.device) != "meta" and storage.data_ptr() != 0: if storage.data_ptr() in storage_dtypes: if storage_dtype != storage_dtypes[storage.data_ptr()]: raise RuntimeError( "Cannot save multiple tensors or storages that " "view the same data as different types" ) else: storage_dtypes[storage.data_ptr()] = storage_dtype storage_key = id_map.setdefault(storage._cdata, str(len(id_map))) if hasattr(obj, "_fake_device") and obj._fake_device is not None: location = str(obj._fake_device) else: location = location_tag(storage) serialized_storages[storage_key] = storage return ("storage", storage_type, storage_key, location, storage_numel) return None # Write the pickle data for `obj` data_buf = io.BytesIO() class PyTorchPickler(pickle_module.Pickler): # type: ignore[name-defined] def persistent_id(self, obj): return persistent_id(obj) pickler = PyTorchPickler(data_buf, protocol=pickle_protocol) pickler.dump(obj) data_value = data_buf.getvalue() zip_file.write_record("data.pkl", data_value, len(data_value)) # .format_version is used to track # 1. version 1 represents the order of storages being changed from # lexicographical based on keys to numerically ordered based on keys # 2. version 2 represents including storage_alignment as a record # within the zipfile zip_file.write_record(".format_version", "1", len("1")) storage_alignment = str(_get_storage_alignment()) zip_file.write_record( ".storage_alignment", storage_alignment, len(storage_alignment) ) # Write byte order marker if not _disable_byteorder_record: if sys.byteorder not in ["little", "big"]: raise ValueError("Unknown endianness type: " + sys.byteorder) zip_file.write_record("byteorder", sys.byteorder, len(sys.byteorder)) # Write each tensor to a file named tensor/the_tensor_key in the zip archive for key in serialized_storages.keys(): name = f"data/{key}" storage = serialized_storages[key] num_bytes = storage.nbytes() global _serialization_tls if _serialization_tls.skip_data: zip_file.write_record_metadata(name, num_bytes) else: # given that we copy things around anyway, we might use storage.cpu() # this means to that to get tensors serialized, you need to implement # .cpu() on the underlying Storage if storage.device.type != "cpu": from torch.utils.serialization import config if ( config.save.use_pinned_memory_for_d2h and ( acc := torch.accelerator.current_accelerator( check_available=True ) ) is not None and acc.type == storage.device.type ): new_storage = torch.empty( num_bytes, dtype=torch.uint8, device="cpu", pin_memory=True ).untyped_storage() new_storage.copy_(storage) torch.accelerator.current_stream(storage.device.index).synchronize() storage = new_storage else: storage = storage.cpu() # Now that it is on the CPU we can directly copy it into the zip file zip_file.write_record(name, storage, num_bytes) def load( f: FileLike, map_location: MAP_LOCATION = None, pickle_module: Any = None, *, weights_only: Optional[bool] = None, mmap: Optional[bool] = None, **pickle_load_args: Any, ) -> Any: # Reference: https://github.com/pytorch/pytorch/issues/54354 # The first line of this docstring overrides the one Sphinx generates for the # documentation. We need it so that Sphinx doesn't leak `pickle`s path from # the build environment (e.g. `<module 'pickle' from '/leaked/path'). """load(f, map_location=None, pickle_module=pickle, *, weights_only=True, mmap=None, **pickle_load_args) Loads an object saved with :func:`torch.save` from a file. :func:`torch.load` uses Python's unpickling facilities but treats storages, which underlie tensors, specially. They are first deserialized on the CPU and are then moved to the device they were saved from. If this fails (e.g. because the run time system doesn't have certain devices), an exception is raised. However, storages can be dynamically remapped to an alternative set of devices using the :attr:`map_location` argument. If :attr:`map_location` is a callable, it will be called once for each serialized storage with two arguments: storage and location. The storage argument will be the initial deserialization of the storage, residing on the CPU. Each serialized storage has a location tag associated with it which identifies the device it was saved from, and this tag is the second argument passed to :attr:`map_location`. The builtin location tags are ``'cpu'`` for CPU tensors and ``'cuda:device_id'`` (e.g. ``'cuda:2'``) for CUDA tensors. :attr:`map_location` should return either ``None`` or a storage. If :attr:`map_location` returns a storage, it will be used as the final deserialized object, already moved to the right device. Otherwise, :func:`torch.load` will fall back to the default behavior, as if :attr:`map_location` wasn't specified. If :attr:`map_location` is a :class:`torch.device` object or a string containing a device tag, it indicates the location where all tensors should be loaded. Otherwise, if :attr:`map_location` is a dict, it will be used to remap location tags appearing in the file (keys), to ones that specify where to put the storages (values). User extensions can register their own location tags and tagging and deserialization methods using :func:`torch.serialization.register_package`. Args: f: a file-like object (has to implement :meth:`read`, :meth:`readline`, :meth:`tell`, and :meth:`seek`), or a string or os.PathLike object containing a file name map_location: a function, :class:`torch.device`, string or a dict specifying how to remap storage locations pickle_module: module used for unpickling metadata and objects (has to match the :attr:`pickle_module` used to serialize file) weights_only: Indicates whether unpickler should be restricted to loading only tensors, primitive types, dictionaries and any types added via :func:`torch.serialization.add_safe_globals`. See :ref:`weights-only` for more details. mmap: Indicates whether the file should be mmaped rather than loading all the storages into memory. Typically, tensor storages in the file will first be moved from disk to CPU memory, after which they are moved to the location that they were tagged with when saving, or specified by ``map_location``. This second step is a no-op if the final location is CPU. When the ``mmap`` flag is set, instead of copying the tensor storages from disk to CPU memory in the first step, ``f`` is mmaped. pickle_load_args: (Python 3 only) optional keyword arguments passed over to :func:`pickle_module.load` and :func:`pickle_module.Unpickler`, e.g., :attr:`errors=...`. .. warning:: :func:`torch.load()` unless `weights_only` parameter is set to `True`, uses ``pickle`` module implicitly, which is known to be insecure. It is possible to construct malicious pickle data which will execute arbitrary code during unpickling. Never load data that could have come from an untrusted source in an unsafe mode, or that could have been tampered with. **Only load data you trust**. .. note:: When you call :func:`torch.load()` on a file which contains GPU tensors, those tensors will be loaded to GPU by default. You can call ``torch.load(.., map_location='cpu')`` and then :meth:`load_state_dict` to avoid GPU RAM surge when loading a model checkpoint. .. note:: By default, we decode byte strings as ``utf-8``. This is to avoid a common error case ``UnicodeDecodeError: 'ascii' codec can't decode byte 0x...`` when loading files saved by Python 2 in Python 3. If this default is incorrect, you may use an extra :attr:`encoding` keyword argument to specify how these objects should be loaded, e.g., :attr:`encoding='latin1'` decodes them to strings using ``latin1`` encoding, and :attr:`encoding='bytes'` keeps them as byte arrays which can be decoded later with ``byte_array.decode(...)``. Example: >>> # xdoctest: +SKIP("undefined filepaths") >>> torch.load("tensors.pt", weights_only=True) # Load all tensors onto the CPU >>> torch.load( ... "tensors.pt", ... map_location=torch.device("cpu"), ... weights_only=True, ... ) # Load all tensors onto the CPU, using a function >>> torch.load( ... "tensors.pt", ... map_location=lambda storage, loc: storage, ... weights_only=True, ... ) # Load all tensors onto GPU 1 >>> torch.load( ... "tensors.pt", ... map_location=lambda storage, loc: storage.cuda(1), ... weights_only=True, ... ) # type: ignore[attr-defined] # Map tensors from GPU 1 to GPU 0 >>> torch.load( ... "tensors.pt", ... map_location={"cuda:1": "cuda:0"}, ... weights_only=True, ... ) # Load tensor from io.BytesIO object # Loading from a buffer setting weights_only=False, warning this can be unsafe >>> with open("tensor.pt", "rb") as f: ... buffer = io.BytesIO(f.read()) >>> torch.load(buffer, weights_only=False) # Load a module with 'ascii' encoding for unpickling # Loading from a module setting weights_only=False, warning this can be unsafe >>> torch.load("module.pt", encoding="ascii", weights_only=False) """ torch._C._log_api_usage_once("torch.load") DOCS_MESSAGE = ( "\n\nCheck the documentation of torch.load to learn more about types accepted by default with " "weights_only https://pytorch.org/docs/stable/generated/torch.load.html." ) def _get_wo_message(message: str) -> str: unsafe_global_pattern = r"GLOBAL (\S+) was not an allowed global by default." has_unsafe_global = re.search(unsafe_global_pattern, message) is not None blocklist_pattern = r"whose module (\S+) is blocked" has_blocklist = re.search(blocklist_pattern, message) is not None import_pattern = r"(\S+) must be (\S+) to load" has_import = re.search(import_pattern, message) is not None if has_unsafe_global: updated_message = ( "Weights only load failed. This file can still be loaded, to do so you have two options, " "\033[1mdo those steps only if you trust the source of the checkpoint\033[0m. " f"\n\t(1) {UNSAFE_MESSAGE}\n\t(2) Alternatively, to load with `weights_only=True` please check " "the recommended steps in the following error message.\n\tWeightsUnpickler error: " + message ) else: if has_import: return f"Weights only load failed. {message}\n {UNSAFE_MESSAGE}\n" else: updated_message = f"Weights only load failed. {UNSAFE_MESSAGE}\n" if not has_blocklist: updated_message += ( "Please file an issue with the following so that we can make " "`weights_only=True` compatible with your use case: WeightsUnpickler error: " ) updated_message += message return updated_message + DOCS_MESSAGE weights_only_not_set = weights_only is None if weights_only_not_set: weights_only = _default_to_weights_only(pickle_module) true_values = ["1", "y", "yes", "true"] # Add ability to force safe only or non-safe weight loads via environment variables force_weights_only_load = ( os.getenv("TORCH_FORCE_WEIGHTS_ONLY_LOAD", "0") in true_values ) force_no_weights_only_load = ( os.getenv("TORCH_FORCE_NO_WEIGHTS_ONLY_LOAD", "0") in true_values ) if force_weights_only_load and force_no_weights_only_load: raise RuntimeError( "Only one of `TORCH_FORCE_WEIGHTS_ONLY_LOAD` or `TORCH_FORCE_NO_WEIGHTS_ONLY_LOAD` " "should be set, but both were set." ) elif force_weights_only_load: weights_only = True elif force_no_weights_only_load: # TORCH_FORCE_NO_WEIGHTS_ONLY_LOAD can only override if callsite did not explicitly set weights_only if weights_only_not_set: warnings.warn( "Environment variable TORCH_FORCE_NO_WEIGHTS_ONLY_LOAD detected, since the" "`weights_only` argument was not explicitly passed to `torch.load`, forcing weights_only=False.", UserWarning, stacklevel=2, ) weights_only = False if weights_only: if pickle_module is not None: raise RuntimeError( "Can not safely load weights when explicit pickle_module is specified" ) else: if pickle_module is None: pickle_module = pickle # make flipping default BC-compatible if mmap is None: from torch.utils.serialization import config mmap = config.load.mmap _check_dill_version(pickle_module) if "encoding" not in pickle_load_args.keys(): pickle_load_args["encoding"] = "utf-8" with _open_file_like(f, "rb") as opened_file: if _is_zipfile(opened_file): # The zipfile reader is going to advance the current file position. # If we want to actually tail call to torch.jit.load, we need to # reset back to the original position. orig_position = opened_file.tell() overall_storage = None with _open_zipfile_reader(opened_file) as opened_zipfile: if _is_torchscript_zip(opened_zipfile): warnings.warn( "'torch.load' received a zip file that looks like a TorchScript archive" " dispatching to 'torch.jit.load' (call 'torch.jit.load' directly to" " silence this warning)", UserWarning, ) if weights_only: raise RuntimeError( "Cannot use ``weights_only=True`` with TorchScript archives passed to " "``torch.load``. " + UNSAFE_MESSAGE ) opened_file.seek(orig_position) return torch.jit.load(opened_file, map_location=map_location) if mmap: if not _is_path(f): raise ValueError( "f must be a file path in order to use the mmap argument" ) size = os.path.getsize(f) if not IS_WINDOWS: shared = get_default_mmap_options() == MAP_SHARED else: shared = False overall_storage = torch.UntypedStorage.from_file( os.fspath(f), shared, size ) if weights_only: try: return _load( opened_zipfile, map_location, _weights_only_unpickler, overall_storage=overall_storage, **pickle_load_args, ) except pickle.UnpicklingError as e: raise pickle.UnpicklingError(_get_wo_message(str(e))) from None return _load( opened_zipfile, map_location, pickle_module, overall_storage=overall_storage, **pickle_load_args, ) if mmap: f_name = "" if not isinstance(f, str) else f"{f}, " raise RuntimeError( "mmap can only be used with files saved with " f"`torch.save({f_name}_use_new_zipfile_serialization=True), " "please torch.save your checkpoint with this option in order to use mmap." ) if weights_only: try: return _legacy_load( opened_file, map_location, _weights_only_unpickler, **pickle_load_args, ) except pickle.UnpicklingError as e: raise pickle.UnpicklingError(_get_wo_message(str(e))) from None return _legacy_load( opened_file, map_location, pickle_module, **pickle_load_args ) # Register pickling support for layout instances such as # torch.sparse_coo, etc def _get_layout(name): """Get layout extension object from its string representation.""" cache = _get_layout.cache # type: ignore[attr-defined] if not cache: for v in torch.__dict__.values(): if isinstance(v, torch.layout): cache[str(v)] = v return cache[name] # There are yet not good way to type annotate function attributes https://github.com/python/mypy/issues/2087 _get_layout.cache = {} # type: ignore[attr-defined] copyreg.pickle(torch.layout, lambda obj: (_get_layout, (str(obj),))) def _legacy_load(f, map_location, pickle_module, **pickle_load_args): deserialized_objects: dict[int, Any] = {} restore_location = _get_restore_location(map_location) class UnpicklerWrapper(pickle_module.Unpickler): # type: ignore[name-defined] def find_class(self, mod_name, name): if type(name) is str and "Storage" in name: try: return StorageType(name) except KeyError: pass return super().find_class(mod_name, name) def _check_container_source(container_type, source_file, original_source): try: current_source = "".join(get_source_lines_and_file(container_type)[0]) except Exception: # saving the source is optional, so we can ignore any errors warnings.warn( "Couldn't retrieve source code for container of " "type " + container_type.__name__ + ". It won't be checked " "for correctness upon loading." ) return if original_source != current_source: if container_type.dump_patches: file_name = container_type.__name__ + ".patch" diff = difflib.unified_diff( current_source.split("\n"), original_source.split("\n"), source_file, source_file, lineterm="", ) lines = "\n".join(diff) try: with open(file_name, "a+") as f: file_size = f.seek(0, 2) f.seek(0) if file_size == 0: f.write(lines) elif file_size != len(lines) or f.read() != lines: raise OSError msg = ( "Saved a reverse patch to " + file_name + ". " "Run `patch -p0 < " + file_name + "` to revert your " "changes." ) except OSError: msg = ( "Tried to save a patch, but couldn't create a " "writable file " + file_name + ". Make sure it " "doesn't exist and your working directory is " "writable." ) else: msg = ( "you can retrieve the original source code by " "accessing the object's source attribute or set " "`torch.nn.Module.dump_patches = True` and use the " "patch tool to revert the changes." ) msg = f"source code of class '{torch.typename(container_type)}' has changed. {msg}" warnings.warn(msg, SourceChangeWarning) def legacy_load(f): deserialized_objects: dict[int, Any] = {} def persistent_load(saved_id): if isinstance(saved_id, tuple): # Ignore containers that don't have any sources saved if all(saved_id[1:]): _check_container_source(*saved_id) return saved_id[0] return deserialized_objects[int(saved_id)] with ( closing( tarfile.open(fileobj=f, mode="r:", format=tarfile.PAX_FORMAT) ) as tar, mkdtemp() as tmpdir, ): if pickle_module is _weights_only_unpickler: raise RuntimeError( "Cannot use ``weights_only=True`` with files saved in the " "legacy .tar format. " + UNSAFE_MESSAGE ) tar.extract("storages", path=tmpdir) with open(os.path.join(tmpdir, "storages"), "rb", 0) as f: num_storages = pickle_module.load(f, **pickle_load_args) for _ in range(num_storages): args = pickle_module.load(f, **pickle_load_args) key, location, storage_type = args dtype = storage_type._dtype obj = cast(Storage, torch.UntypedStorage)._new_with_file( f, torch._utils._element_size(dtype) ) obj = restore_location(obj, location) # TODO: Once we decide to break serialization FC, we can # stop wrapping with TypedStorage deserialized_objects[key] = torch.storage.TypedStorage( wrap_storage=obj, dtype=dtype, _internal=True ) storage_views = pickle_module.load(f, **pickle_load_args) for target_cdata, root_cdata, offset, numel in storage_views: root = deserialized_objects[root_cdata] element_size = torch._utils._element_size(root.dtype) offset_bytes = offset * element_size # TODO: Once we decide to break serialization FC, we can # stop wrapping with TypedStorage deserialized_objects[target_cdata] = torch.storage.TypedStorage( wrap_storage=root._untyped_storage[ offset_bytes : offset_bytes + numel * element_size ], dtype=root.dtype, _internal=True, ) tar.extract("tensors", path=tmpdir) with open(os.path.join(tmpdir, "tensors"), "rb", 0) as f: num_tensors = pickle_module.load(f, **pickle_load_args) for _ in range(num_tensors): args = pickle_module.load(f, **pickle_load_args) key, storage_id, _original_tensor_type = args storage = deserialized_objects[storage_id] (ndim,) = struct.unpack("<i", f.read(4)) # skip next 4 bytes; legacy encoding treated ndim as 8 bytes f.read(4) numel = struct.unpack(f"<{ndim}q", f.read(8 * ndim)) stride = struct.unpack(f"<{ndim}q", f.read(8 * ndim)) (storage_offset,) = struct.unpack("<q", f.read(8)) tensor = torch.empty((0,), dtype=storage.dtype).set_( storage._untyped_storage, storage_offset, numel, stride ) deserialized_objects[key] = tensor pickle_file = tar.extractfile("pickle") unpickler = UnpicklerWrapper(pickle_file, **pickle_load_args) unpickler.persistent_load = persistent_load result = unpickler.load() return result deserialized_objects = {} def persistent_load(saved_id): assert isinstance(saved_id, tuple) typename = _maybe_decode_ascii(saved_id[0]) data = saved_id[1:] if typename == "module": # Ignore containers that don't have any sources saved if all(data[1:]): _check_container_source(*data) return data[0] elif typename == "storage": storage_type, root_key, location, numel, view_metadata = data location = _maybe_decode_ascii(location) dtype = storage_type.dtype nbytes = numel * torch._utils._element_size(dtype) if root_key not in deserialized_objects: if torch._guards.active_fake_mode() is not None: obj = cast(Storage, torch.UntypedStorage(nbytes, device="meta")) elif _serialization_tls.skip_data: obj = cast(Storage, torch.UntypedStorage(nbytes)) obj = restore_location(obj, location) else: obj = cast(Storage, torch.UntypedStorage(nbytes)) obj._torch_load_uninitialized = True obj = restore_location(obj, location) # TODO: Once we decide to break serialization FC, we can # stop wrapping with TypedStorage typed_storage = torch.storage.TypedStorage( wrap_storage=obj, dtype=dtype, _internal=True ) deserialized_objects[root_key] = typed_storage else: typed_storage = deserialized_objects[root_key] if typed_storage._data_ptr() == 0: typed_storage = torch.storage.TypedStorage( device=typed_storage._untyped_storage.device, dtype=dtype, _internal=True, ) if view_metadata is not None: view_key, offset, view_size = view_metadata offset_bytes = offset * torch._utils._element_size(dtype) view_size_bytes = view_size * torch._utils._element_size(dtype) if view_key not in deserialized_objects: # TODO: Once we decide to break serialization FC, we can # stop wrapping with TypedStorage deserialized_objects[view_key] = torch.storage.TypedStorage( wrap_storage=typed_storage._untyped_storage[ offset_bytes : offset_bytes + view_size_bytes ], dtype=dtype, _internal=True, ) res = deserialized_objects[view_key] else: res = typed_storage return res else: raise RuntimeError(f"Unknown saved id type: {saved_id[0]}") _check_seekable(f) f_should_read_directly = _should_read_directly(f) if f_should_read_directly and f.tell() == 0: # legacy_load requires that f has fileno() # only if offset is zero we can attempt the legacy tar file loader try: return legacy_load(f) except tarfile.TarError: if _is_zipfile(f): # .zip is used for torch.jit.save and will throw an un-pickling error here raise RuntimeError( f"{f.name} is a zip archive (did you mean to use torch.jit.load()?)" ) from None # if not a tarfile, reset file offset and proceed f.seek(0) magic_number = pickle_module.load(f, **pickle_load_args) if magic_number != MAGIC_NUMBER: raise RuntimeError("Invalid magic number; corrupt file?") protocol_version = pickle_module.load(f, **pickle_load_args) if protocol_version != PROTOCOL_VERSION: raise RuntimeError(f"Invalid protocol version: {protocol_version}") _sys_info = pickle_module.load(f, **pickle_load_args) unpickler = UnpicklerWrapper(f, **pickle_load_args) unpickler.persistent_load = persistent_load result = unpickler.load() deserialized_storage_keys = pickle_module.load(f, **pickle_load_args) if torch._guards.active_fake_mode() is None and not _serialization_tls.skip_data: offset = f.tell() if f_should_read_directly else None for key in deserialized_storage_keys: assert key in deserialized_objects typed_storage = deserialized_objects[key] typed_storage._untyped_storage._set_from_file( f, offset, f_should_read_directly, torch._utils._element_size(typed_storage.dtype), ) if offset is not None: offset = f.tell() torch._utils._validate_loaded_sparse_tensors() return result def _maybe_decode_ascii(bytes_str: Union[bytes, str]) -> str: # When using encoding='bytes' in Py3, some **internal** keys stored as # strings in Py2 are loaded as bytes. This function decodes them with # ascii encoding, one that Py3 uses by default. # # NOTE: This should only be used on internal keys (e.g., `typename` and # `location` in `persistent_load` below! if isinstance(bytes_str, bytes): return bytes_str.decode("ascii") return bytes_str def _get_restore_location(map_location): if map_location is None: restore_location = default_restore_location elif isinstance(map_location, dict): def restore_location(storage, location): location = map_location.get(location, location) return default_restore_location(storage, location) elif isinstance(map_location, (str, bytes)): def restore_location(storage, location): return default_restore_location(storage, map_location) elif isinstance(map_location, torch.device): def restore_location(storage, location): return default_restore_location(storage, str(map_location)) else: def restore_location(storage, location): result = map_location(storage, location) if result is None: result = default_restore_location(storage, location) return result return restore_location class StorageType: def __init__(self, name): self._dtype = _get_dtype_from_pickle_storage_type(name) @property def dtype(self): return self._dtype def __str__(self): return f"StorageType(dtype={self.dtype})" def _load( zip_file, map_location, pickle_module, pickle_file="data.pkl", overall_storage=None, **pickle_load_args, ): restore_location = _get_restore_location(map_location) loaded_storages = {} can_calculate_storage_offsets = False if zip_file.has_record(".format_version"): version = zip_file.get_record(".format_version") can_calculate_storage_offsets = version >= b"1" # check if byteswapping is needed byteordername = "byteorder" byteorderdata = None if zip_file.has_record(byteordername): byteorderdata = zip_file.get_record(byteordername) if byteorderdata not in [b"little", b"big"]: raise ValueError("Unknown endianness type: " + byteorderdata.decode()) elif ( get_default_load_endianness() == LoadEndianness.LITTLE or get_default_load_endianness() is None ): byteorderdata = b"little" elif get_default_load_endianness() == LoadEndianness.BIG: byteorderdata = b"big" elif get_default_load_endianness() == LoadEndianness.NATIVE: pass else: raise ValueError("Invalid load endianness type") storage_alignment = 64 if zip_file.has_record(".storage_alignment"): storage_alignment = int(zip_file.get_record(".storage_alignment")) if ( not zip_file.has_record(byteordername) and get_default_load_endianness() is None and sys.byteorder == "big" ): # Default behaviour was changed # See https://github.com/pytorch/pytorch/issues/101688 warnings.warn( "The default load endianness for checkpoints without a byteorder mark " "on big endian machines was changed from 'native' to 'little' endian, " "to avoid this behavior please use " "torch.serialization.set_default_load_endianness to set " "the desired default load endianness", UserWarning, ) from torch.utils.serialization import config calculate_storage_offsets = config.load.calculate_storage_offsets run_debug_asserts = os.environ.get("TORCH_SERIALIZATION_DEBUG", "0") == "1" current_offset = None # constants from miniz.h/miniz.c data_descripter_size64 = 24 data_descripter_size32 = 16 mz_uint32_max = 0xFFFFFFFF offsets: dict[str, int] = dict() def _get_offset(key, name, numel): """ Return the offset of the storage associated with key with record name `name` and size numel. It is expected that the zipfile header of this storage starts at current_offset. WARNING: This function relies on the behavior of the zipwriter in miniz.c. In particular, the behavior of `mz_zip_writer_add_mem_ex_v2`. The behavior of this function must be kept in sync with that of miniz! After reading a storage of size numel that starts at storage_offset if it is the first time that storage was read, update nonlocal variable current_offset to the start of the next zipfile header by incrementing it by numel and the data descriptor size. """ nonlocal current_offset, offsets if name in offsets: storage_offset = offsets[name] return storage_offset if current_offset is None: assert key == "0" current_offset = zip_file.get_record_offset(name) local_header_offset = zip_file.get_record_header_offset(name) storage_offset = current_offset else: storage_offset = zip_file.get_record_offset_no_read( current_offset, name, numel, storage_alignment ) local_header_offset = current_offset # This is only actually needed for storages that have typed_storage._data_ptr() == 0 # after being read. Otherwise persistent_load would never "re-call" load_tensor # for a given key. offsets[name] = storage_offset # Increment current_offset of offset where next zipfile header starts current_offset = storage_offset + numel # add size of data descriptor after payload if numel > 0: if local_header_offset >= mz_uint32_max or numel >= mz_uint32_max: current_offset += data_descripter_size64 else: current_offset += data_descripter_size32 return storage_offset def load_tensor(dtype, numel, key, location): name = f"data/{key}" if torch._guards.detect_fake_mode(None) is not None: nbytes = numel * torch._utils._element_size(dtype) storage = torch.UntypedStorage(nbytes, device="meta") storage._checkpoint_offset = zip_file.get_record_offset(name) elif _serialization_tls.skip_data: nbytes = numel * torch._utils._element_size(dtype) storage = torch.UntypedStorage(nbytes) elif overall_storage is not None: if can_calculate_storage_offsets and calculate_storage_offsets: storage_offset = _get_offset(key, name, numel) if run_debug_asserts: if storage_offset != zip_file.get_record_offset(name): raise RuntimeError( "This is a debug assert that was run as the `TORCH_SERIALIZATION_DEBUG` environment " f"variable was set: Incorrect offset for {name}, got {storage_offset} expected " f"{zip_file.get_record_offset(name)}" ) else: storage_offset = zip_file.get_record_offset(name) storage = overall_storage[storage_offset : storage_offset + numel] else: if can_calculate_storage_offsets and run_debug_asserts: # This is debug code that we use to test the validity of # torch.utils.serialization.config.load.calculate_storage_offsets throughout CI storage_offset = _get_offset(key, name, numel) if storage_offset != zip_file.get_record_offset(name): raise RuntimeError( "This is a debug assert that was run as the `TORCH_SERIALIZATION_DEBUG` environment " f"variable was set: Incorrect offset for {name}, got {storage_offset} expected " f"{zip_file.get_record_offset(name)}" ) storage = ( zip_file.get_storage_from_record(name, numel, torch.UntypedStorage) ._typed_storage() ._untyped_storage ) # swap here if byteswapping is needed if byteorderdata is not None: if byteorderdata.decode() != sys.byteorder: storage.byteswap(dtype) # TODO: Once we decide to break serialization FC, we can # stop wrapping with TypedStorage if torch._guards.detect_fake_mode(None) is None: wrap_storage = restore_location(storage, location) else: storage._fake_device = location wrap_storage = storage typed_storage = torch.storage.TypedStorage( wrap_storage=wrap_storage, dtype=dtype, _internal=True, ) if typed_storage._data_ptr() != 0: loaded_storages[key] = typed_storage return typed_storage def persistent_load(saved_id): assert isinstance(saved_id, tuple) typename = _maybe_decode_ascii(saved_id[0]) data = saved_id[1:] assert typename == "storage", ( f"Unknown typename for persistent_load, expected 'storage' but got '{typename}'" ) storage_type, key, location, numel = data if storage_type is torch.UntypedStorage: dtype = torch.uint8 else: dtype = storage_type.dtype if key in loaded_storages: typed_storage = loaded_storages[key] else: nbytes = numel * torch._utils._element_size(dtype) typed_storage = load_tensor( dtype, nbytes, key, _maybe_decode_ascii(location) ) return typed_storage load_module_mapping: dict[str, str] = { # See https://github.com/pytorch/pytorch/pull/51633 "torch.tensor": "torch._tensor" } # Need to subclass Unpickler instead of directly monkey-patching the find_class method # because it's marked readonly in pickle. # The type: ignore is because mypy can't statically determine the type of this class. class UnpicklerWrapper(pickle_module.Unpickler): # type: ignore[name-defined] # from https://stackoverflow.com/questions/13398462/unpickling-python-objects-with-a-changed-module-path/13405732 # Lets us override the imports that pickle uses when unpickling an object. # This is useful for maintaining BC if we change a module path that tensor instantiation relies on. def find_class(self, mod_name, name): if type(name) is str and "Storage" in name: try: return StorageType(name) except KeyError: pass mod_name = load_module_mapping.get(mod_name, mod_name) return super().find_class(mod_name, name) # Load the data (which may in turn use `persistent_load` to load tensors) data_file = io.BytesIO(zip_file.get_record(pickle_file)) unpickler = UnpicklerWrapper(data_file, **pickle_load_args) unpickler.persistent_load = persistent_load # Needed for tensors where storage device and rebuild tensor device are # not connected (wrapper subclasses and tensors rebuilt using numpy) global _serialization_tls _serialization_tls.map_location = map_location result = unpickler.load() _serialization_tls.map_location = None torch._utils._validate_loaded_sparse_tensors() torch._C._log_api_usage_metadata( "torch.load.metadata", {"serialization_id": zip_file.serialization_id()} ) return result def _is_torchscript_zip(zip_file): return "constants.pkl" in zip_file.get_all_records() ```
=============================================================================================================================== SOURCE CODE FILE: ATenConfig.cmake LINES: 1 SIZE: 0.30 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\ATen\ATenConfig.cmake ENCODING: utf-8 ```cmake # Find the TH includes and library # # ATEN_INCLUDE_DIR -- where to find the includes # ATEN_LIBRARIES -- list of libraries to link against # ATEN_FOUND -- set to 1 if found set(ATEN_FOUND 1) set(ATEN_INCLUDE_DIR "C:/actions-runner/_work/pytorch/pytorch/pytorch/torch/include") set(ATEN_LIBRARIES "") ```
=================================================================================================================================== SOURCE CODE FILE: Caffe2Config.cmake LINES: 1 SIZE: 5.12 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\Caffe2Config.cmake ENCODING: utf-8 ```cmake # - Config file for the Caffe2 package # It defines the following variable(s) # CAFFE2_INCLUDE_DIRS - include directories for FooBar # as well as Caffe2 targets for other cmake libraries to use. # library version information # Utils functions. include("${CMAKE_CURRENT_LIST_DIR}/public/utils.cmake") # Depending on whether Caffe2 uses gflags during compile time or # not, invoke gflags. if(OFF) include("${CMAKE_CURRENT_LIST_DIR}/public/gflags.cmake") if(NOT TARGET gflags) message(FATAL_ERROR "Your installed Caffe2 version uses gflags but the gflags library " "cannot be found. Did you accidentally remove it, or have you set " "the right CMAKE_PREFIX_PATH and/or GFLAGS_ROOT_DIR? If you do not " "have gflags, you will need to install gflags and set the library " "path accordingly.") endif() endif() # Depending on whether Caffe2 uses glog during compile time or # not, invoke glog. if(OFF) include("${CMAKE_CURRENT_LIST_DIR}/public/glog.cmake") if(NOT TARGET glog::glog) message(FATAL_ERROR "Your installed Caffe2 version uses glog but the glog library " "cannot be found. Did you accidentally remove it, or have you set " "the right CMAKE_PREFIX_PATH and/or GFLAGS_ROOT_DIR? If you do not " "have glog, you will need to install glog and set the library " "path accordingly.") endif() endif() # Protobuf if(ON) if(NOT TARGET protobuf::libprotobuf) # Define protobuf::libprotobuf as a dummy target to resolve references to # protobuf::libprotobuf in Caffe2Targets.cmake. add_library(dummy INTERFACE) add_library(protobuf::libprotobuf ALIAS dummy) endif() else() include("${CMAKE_CURRENT_LIST_DIR}/public/protobuf.cmake") if(NOT TARGET protobuf::libprotobuf) message(FATAL_ERROR "Your installed Caffe2 version uses protobuf but the protobuf library " "cannot be found. Did you accidentally remove it, or have you set " "the right CMAKE_PREFIX_PATH? If you do not have protobuf, you will " "need to install protobuf and set the library path accordingly.") endif() message(STATUS "Caffe2: Protobuf version " ${Protobuf_VERSION}) # If during build time we know the protobuf version, we will also do a sanity # check to ensure that the protobuf library that Caffe2 found is consistent # with the compiled version. if(FALSE) if(NOT (${Protobuf_VERSION} VERSION_EQUAL Protobuf_VERSION_NOTFOUND)) message(FATAL_ERROR "Your installed Caffe2 is built with protobuf " "Protobuf_VERSION_NOTFOUND" ", while your current cmake setting discovers protobuf version " ${Protobuf_VERSION} ". Please specify a protobuf version that is the same as the built " "version.") endif() endif() endif() if (OFF) include("${CMAKE_CURRENT_LIST_DIR}/public/LoadHIP.cmake") endif() if(ON) # The file public/cuda.cmake exclusively uses CAFFE2_USE_*. # If Caffe2 was compiled with the libraries below, they must # be found again when including the Caffe2 target. set(CAFFE2_USE_CUDA ON) # Add current directory to module path so we pick up FindCUDAToolkit.cmake set(old_CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}") list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}") include("${CMAKE_CURRENT_LIST_DIR}/public/cuda.cmake") set(CMAKE_MODULE_PATH "${old_CMAKE_MODULE_PATH}") if(ON AND NOT CAFFE2_USE_CUDA) message(FATAL_ERROR "Your installed Caffe2 version uses CUDA but I cannot find the CUDA " "libraries. Please set the proper CUDA prefixes and / or install " "CUDA.") endif() endif() if(OFF) # Add current directory to module path so we pick up FindSYCLToolkit.cmake set(old_CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}") list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}") include("${CMAKE_CURRENT_LIST_DIR}/public/xpu.cmake") set(CMAKE_MODULE_PATH "${old_CMAKE_MODULE_PATH}") endif() if(ON) include("${CMAKE_CURRENT_LIST_DIR}/public/mkl.cmake") endif() if(ON) include("${CMAKE_CURRENT_LIST_DIR}/public/mkldnn.cmake") endif() # import targets include ("${CMAKE_CURRENT_LIST_DIR}/Caffe2Targets.cmake") # Interface libraries, that allows one to build proper link flags. # We will also define a helper variable, Caffe2_MAIN_LIBS, that resolves to # the main caffe2 libraries in cases of cuda presence / absence. set(Caffe2_MAIN_LIBS torch_library) # include directory. # # Newer versions of CMake set the INTERFACE_INCLUDE_DIRECTORIES property # of the imported targets. It is hence not necessary to add this path # manually to the include search path for targets which link to gflags. # The following lines are here for backward compatibility, in case one # would like to use the old-style include path. get_filename_component( CMAKE_CURRENT_LIST_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) # Note: the current list dir is _INSTALL_PREFIX/share/cmake/Gloo. get_filename_component( _INSTALL_PREFIX "${CMAKE_CURRENT_LIST_DIR}/../../../" ABSOLUTE) set(CAFFE2_INCLUDE_DIRS "${_INSTALL_PREFIX}/include") ```
============================================================================================================================================ SOURCE CODE FILE: Caffe2Targets-release.cmake LINES: 1 SIZE: 2.81 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\Caffe2Targets-release.cmake ENCODING: utf-8 ```cmake #---------------------------------------------------------------- # Generated CMake target import file for configuration "Release". #---------------------------------------------------------------- # Commands may need to know the format version. set(CMAKE_IMPORT_FILE_VERSION 1) # Import target "c10_cuda" for configuration "Release" set_property(TARGET c10_cuda APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) set_target_properties(c10_cuda PROPERTIES IMPORTED_IMPLIB_RELEASE "${_IMPORT_PREFIX}/lib/c10_cuda.lib" IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/c10_cuda.dll" ) list(APPEND _cmake_import_check_targets c10_cuda ) list(APPEND _cmake_import_check_files_for_c10_cuda "${_IMPORT_PREFIX}/lib/c10_cuda.lib" "${_IMPORT_PREFIX}/lib/c10_cuda.dll" ) # Import target "c10" for configuration "Release" set_property(TARGET c10 APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) set_target_properties(c10 PROPERTIES IMPORTED_IMPLIB_RELEASE "${_IMPORT_PREFIX}/lib/c10.lib" IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/c10.dll" ) list(APPEND _cmake_import_check_targets c10 ) list(APPEND _cmake_import_check_files_for_c10 "${_IMPORT_PREFIX}/lib/c10.lib" "${_IMPORT_PREFIX}/lib/c10.dll" ) # Import target "torch_cpu" for configuration "Release" set_property(TARGET torch_cpu APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) set_target_properties(torch_cpu PROPERTIES IMPORTED_IMPLIB_RELEASE "${_IMPORT_PREFIX}/lib/torch_cpu.lib" IMPORTED_LINK_DEPENDENT_LIBRARIES_RELEASE "fbgemm" IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/torch_cpu.dll" ) list(APPEND _cmake_import_check_targets torch_cpu ) list(APPEND _cmake_import_check_files_for_torch_cpu "${_IMPORT_PREFIX}/lib/torch_cpu.lib" "${_IMPORT_PREFIX}/lib/torch_cpu.dll" ) # Import target "torch_cuda" for configuration "Release" set_property(TARGET torch_cuda APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) set_target_properties(torch_cuda PROPERTIES IMPORTED_IMPLIB_RELEASE "${_IMPORT_PREFIX}/lib/torch_cuda.lib" IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/torch_cuda.dll" ) list(APPEND _cmake_import_check_targets torch_cuda ) list(APPEND _cmake_import_check_files_for_torch_cuda "${_IMPORT_PREFIX}/lib/torch_cuda.lib" "${_IMPORT_PREFIX}/lib/torch_cuda.dll" ) # Import target "torch" for configuration "Release" set_property(TARGET torch APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) set_target_properties(torch PROPERTIES IMPORTED_IMPLIB_RELEASE "${_IMPORT_PREFIX}/lib/torch.lib" IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib/torch.dll" ) list(APPEND _cmake_import_check_targets torch ) list(APPEND _cmake_import_check_files_for_torch "${_IMPORT_PREFIX}/lib/torch.lib" "${_IMPORT_PREFIX}/lib/torch.dll" ) # Commands beyond this point should not need to know the version. set(CMAKE_IMPORT_FILE_VERSION) ```
==================================================================================================================================== SOURCE CODE FILE: Caffe2Targets.cmake LINES: 4 SIZE: 8.06 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\Caffe2Targets.cmake ENCODING: utf-8 ```cmake # Generated by CMake if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.8) message(FATAL_ERROR "CMake >= 3.0.0 required") endif() if(CMAKE_VERSION VERSION_LESS "3.0.0") message(FATAL_ERROR "CMake >= 3.0.0 required") endif() cmake_policy(PUSH) cmake_policy(VERSION 3.0.0...3.30) #---------------------------------------------------------------- # Generated CMake target import file. #---------------------------------------------------------------- # Commands may need to know the format version. set(CMAKE_IMPORT_FILE_VERSION 1) # Protect against multiple inclusion, which would fail when already imported targets are added once more. set(_cmake_targets_defined "") set(_cmake_targets_not_defined "") set(_cmake_expected_targets "") foreach(_cmake_expected_target IN ITEMS c10_cuda c10 torch_cpu torch_cpu_library torch_cuda torch_cuda_library torch torch_library) list(APPEND _cmake_expected_targets "${_cmake_expected_target}") if(TARGET "${_cmake_expected_target}") list(APPEND _cmake_targets_defined "${_cmake_expected_target}") else() list(APPEND _cmake_targets_not_defined "${_cmake_expected_target}") endif() endforeach() unset(_cmake_expected_target) if(_cmake_targets_defined STREQUAL _cmake_expected_targets) unset(_cmake_targets_defined) unset(_cmake_targets_not_defined) unset(_cmake_expected_targets) unset(CMAKE_IMPORT_FILE_VERSION) cmake_policy(POP) return() endif() if(NOT _cmake_targets_defined STREQUAL "") string(REPLACE ";" ", " _cmake_targets_defined_text "${_cmake_targets_defined}") string(REPLACE ";" ", " _cmake_targets_not_defined_text "${_cmake_targets_not_defined}") message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_cmake_targets_defined_text}\nTargets not yet defined: ${_cmake_targets_not_defined_text}\n") endif() unset(_cmake_targets_defined) unset(_cmake_targets_not_defined) unset(_cmake_expected_targets) # Compute the installation prefix relative to this file. get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH) get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) if(_IMPORT_PREFIX STREQUAL "/") set(_IMPORT_PREFIX "") endif() # Create imported target c10_cuda add_library(c10_cuda SHARED IMPORTED) set_target_properties(c10_cuda PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include" INTERFACE_LINK_LIBRARIES "c10;torch::cudart" ) # Create imported target c10 add_library(c10 SHARED IMPORTED) set_target_properties(c10 PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include" ) # Create imported target torch_cpu add_library(torch_cpu SHARED IMPORTED) set_target_properties(torch_cpu PROPERTIES INTERFACE_COMPILE_DEFINITIONS "USE_DISTRIBUTED;USE_C10D_GLOO" INTERFACE_COMPILE_OPTIONS "\$<\$<COMPILE_LANGUAGE:CXX>:;\$<\$<OR:\$<CONFIG:Debug>,\$<CONFIG:RelWithDebInfo>>:/Z7>;/EHsc;/bigobj>" INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include" INTERFACE_LINK_LIBRARIES "protobuf::libprotobuf;c10;caffe2::mkl" ) # Create imported target torch_cpu_library add_library(torch_cpu_library INTERFACE IMPORTED) set_target_properties(torch_cpu_library PROPERTIES INTERFACE_COMPILE_DEFINITIONS "\$<TARGET_PROPERTY:torch_cpu,INTERFACE_COMPILE_DEFINITIONS>" INTERFACE_COMPILE_OPTIONS "\$<TARGET_PROPERTY:torch_cpu,INTERFACE_COMPILE_OPTIONS>" INTERFACE_INCLUDE_DIRECTORIES "\$<TARGET_PROPERTY:torch_cpu,INTERFACE_INCLUDE_DIRECTORIES>" INTERFACE_LINK_LIBRARIES "torch_cpu;\$<TARGET_PROPERTY:torch_cpu,INTERFACE_LINK_LIBRARIES>" INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "\$<TARGET_PROPERTY:torch_cpu,INTERFACE_SYSTEM_INCLUDE_DIRECTORIES>" ) # Create imported target torch_cuda add_library(torch_cuda SHARED IMPORTED) set_target_properties(torch_cuda PROPERTIES INTERFACE_COMPILE_OPTIONS "\$<\$<COMPILE_LANGUAGE:CXX>:;\$<\$<OR:\$<CONFIG:Debug>,\$<CONFIG:RelWithDebInfo>>:/Z7>;/EHsc;/bigobj>" INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include" INTERFACE_LINK_LIBRARIES "-INCLUDE:?warp_size@cuda@at@@YAHXZ;torch::cudart;c10_cuda;torch_cpu_library" ) # Create imported target torch_cuda_library add_library(torch_cuda_library INTERFACE IMPORTED) set_target_properties(torch_cuda_library PROPERTIES INTERFACE_COMPILE_DEFINITIONS "\$<TARGET_PROPERTY:torch_cuda,INTERFACE_COMPILE_DEFINITIONS>" INTERFACE_COMPILE_OPTIONS "\$<TARGET_PROPERTY:torch_cuda,INTERFACE_COMPILE_OPTIONS>" INTERFACE_INCLUDE_DIRECTORIES "\$<TARGET_PROPERTY:torch_cuda,INTERFACE_INCLUDE_DIRECTORIES>" INTERFACE_LINK_LIBRARIES "torch_cuda;\$<TARGET_PROPERTY:torch_cuda,INTERFACE_LINK_LIBRARIES>" INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "\$<TARGET_PROPERTY:torch_cuda,INTERFACE_SYSTEM_INCLUDE_DIRECTORIES>" ) # Create imported target torch add_library(torch SHARED IMPORTED) set_target_properties(torch PROPERTIES INTERFACE_LINK_LIBRARIES "torch_cpu_library;torch_cuda_library" ) # Create imported target torch_library add_library(torch_library INTERFACE IMPORTED) set_target_properties(torch_library PROPERTIES INTERFACE_COMPILE_DEFINITIONS "\$<TARGET_PROPERTY:torch,INTERFACE_COMPILE_DEFINITIONS>" INTERFACE_COMPILE_OPTIONS "\$<TARGET_PROPERTY:torch,INTERFACE_COMPILE_OPTIONS>" INTERFACE_INCLUDE_DIRECTORIES "\$<TARGET_PROPERTY:torch,INTERFACE_INCLUDE_DIRECTORIES>" INTERFACE_LINK_LIBRARIES "torch;\$<TARGET_PROPERTY:torch,INTERFACE_LINK_LIBRARIES>" INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "\$<TARGET_PROPERTY:torch,INTERFACE_SYSTEM_INCLUDE_DIRECTORIES>" ) # Load information for each installed configuration. file(GLOB _cmake_config_files "${CMAKE_CURRENT_LIST_DIR}/Caffe2Targets-*.cmake") foreach(_cmake_config_file IN LISTS _cmake_config_files) include("${_cmake_config_file}") endforeach() unset(_cmake_config_file) unset(_cmake_config_files) # Cleanup temporary variables. set(_IMPORT_PREFIX) # Loop over all imported files and verify that they actually exist foreach(_cmake_target IN LISTS _cmake_import_check_targets) if(CMAKE_VERSION VERSION_LESS "3.28" OR NOT DEFINED _cmake_import_check_xcframework_for_${_cmake_target} OR NOT IS_DIRECTORY "${_cmake_import_check_xcframework_for_${_cmake_target}}") foreach(_cmake_file IN LISTS "_cmake_import_check_files_for_${_cmake_target}") if(NOT EXISTS "${_cmake_file}") message(FATAL_ERROR "The imported target \"${_cmake_target}\" references the file \"${_cmake_file}\" but this file does not exist. Possible reasons include: * The file was deleted, renamed, or moved to another location. * An install or uninstall procedure did not complete successfully. * The installation package was faulty and contained \"${CMAKE_CURRENT_LIST_FILE}\" but not all the files it references. ") endif() endforeach() endif() unset(_cmake_file) unset("_cmake_import_check_files_for_${_cmake_target}") endforeach() unset(_cmake_target) unset(_cmake_import_check_targets) # Make sure the targets which have been exported in some other # export set exist. unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets) foreach(_target "protobuf::libprotobuf" ) if(NOT TARGET "${_target}" ) set(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets "${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets} ${_target}") endif() endforeach() if(DEFINED ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets) if(CMAKE_FIND_PACKAGE_NAME) set( ${CMAKE_FIND_PACKAGE_NAME}_FOUND FALSE) set( ${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}") else() message(FATAL_ERROR "The following imported targets are referenced, but are missing: ${${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets}") endif() endif() unset(${CMAKE_FIND_PACKAGE_NAME}_NOT_FOUND_MESSAGE_targets) # Commands beyond this point should not need to know the version. set(CMAKE_IMPORT_FILE_VERSION) cmake_policy(POP) ```
====================================================================================================================================== SOURCE CODE FILE: FindCUDAToolkit.cmake LINES: 2 SIZE: 38.92 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\FindCUDAToolkit.cmake ENCODING: utf-8 ```cmake # This module is back-ported from CMake 3.17 and above to work with CMake 3.10 # Distributed under the OSI-approved BSD 3-Clause License. See accompanying # file Copyright.txt or https://cmake.org/licensing for details. #[=======================================================================[.rst: FindCUDAToolkit --------------- .. versionadded:: 3.17 This script locates the NVIDIA CUDA toolkit and the associated libraries, but does not require the ``CUDA`` language be enabled for a given project. This module does not search for the NVIDIA CUDA Samples. .. versionadded:: 3.19 QNX support. Search Behavior ^^^^^^^^^^^^^^^ The CUDA Toolkit search behavior uses the following order: 1. If the ``CUDA`` language has been enabled we will use the directory containing the compiler as the first search location for ``nvcc``. 2. If the ``CUDAToolkit_ROOT`` cmake configuration variable (e.g., ``-DCUDAToolkit_ROOT=/some/path``) *or* environment variable is defined, it will be searched. If both an environment variable **and** a configuration variable are specified, the *configuration* variable takes precedence. The directory specified here must be such that the executable ``nvcc`` or the appropriate ``version.txt`` file can be found underneath the specified directory. 3. If the CUDA_PATH environment variable is defined, it will be searched for ``nvcc``. 4. The user's path is searched for ``nvcc`` using :command:`find_program`. If this is found, no subsequent search attempts are performed. Users are responsible for ensuring that the first ``nvcc`` to show up in the path is the desired path in the event that multiple CUDA Toolkits are installed. 5. On Unix systems, if the symbolic link ``/usr/local/cuda`` exists, this is used. No subsequent search attempts are performed. No default symbolic link location exists for the Windows platform. 6. The platform specific default install locations are searched. If exactly one candidate is found, this is used. The default CUDA Toolkit install locations searched are: +-------------+-------------------------------------------------------------+ | Platform | Search Pattern | +=============+=============================================================+ | macOS | ``/Developer/NVIDIA/CUDA-X.Y`` | +-------------+-------------------------------------------------------------+ | Other Unix | ``/usr/local/cuda-X.Y`` | +-------------+-------------------------------------------------------------+ | Windows | ``C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\vX.Y`` | +-------------+-------------------------------------------------------------+ Where ``X.Y`` would be a specific version of the CUDA Toolkit, such as ``/usr/local/cuda-9.0`` or ``C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v9.0`` .. note:: When multiple CUDA Toolkits are installed in the default location of a system(e.g., both ``/usr/local/cuda-9.0`` and ``/usr/local/cuda-10.0`` exist but the ``/usr/local/cuda`` symbolic link does **not** exist), this package is marked as **not** found. There are too many factors involved in making an automatic decision in the presence of multiple CUDA Toolkits being installed. In this situation, users are encouraged to either (1) set ``CUDAToolkit_ROOT`` or (2) ensure that the correct ``nvcc`` executable shows up in ``$PATH`` for :command:`find_program` to find. Arguments ^^^^^^^^^ ``[<version>]`` The ``[<version>]`` argument requests a version with which the package found should be compatible. See :ref:`find_package version format <FIND_PACKAGE_VERSION_FORMAT>` for more details. Options ^^^^^^^ ``REQUIRED`` If specified, configuration will error if a suitable CUDA Toolkit is not found. ``QUIET`` If specified, the search for a suitable CUDA Toolkit will not produce any messages. ``EXACT`` If specified, the CUDA Toolkit is considered found only if the exact ``VERSION`` specified is recovered. Imported targets ^^^^^^^^^^^^^^^^ An :ref:`imported target <Imported targets>` named ``CUDA::toolkit`` is provided. This module defines :prop_tgt:`IMPORTED` targets for each of the following libraries that are part of the CUDAToolkit: - :ref:`CUDA Runtime Library<cuda_toolkit_rt_lib>` - :ref:`CUDA Driver Library<cuda_toolkit_driver_lib>` - :ref:`cuBLAS<cuda_toolkit_cuBLAS>` - :ref:`cuFFT<cuda_toolkit_cuFFT>` - :ref:`cuRAND<cuda_toolkit_cuRAND>` - :ref:`cuSOLVER<cuda_toolkit_cuSOLVER>` - :ref:`cuSPARSE<cuda_toolkit_cuSPARSE>` - :ref:`cuPTI<cuda_toolkit_cupti>` - :ref:`NPP<cuda_toolkit_NPP>` - :ref:`nvBLAS<cuda_toolkit_nvBLAS>` - :ref:`nvGRAPH<cuda_toolkit_nvGRAPH>` - :ref:`nvJPEG<cuda_toolkit_nvJPEG>` - :ref:`nvidia-ML<cuda_toolkit_nvML>` - :ref:`nvRTC<cuda_toolkit_nvRTC>` - :ref:`nvToolsExt<cuda_toolkit_nvToolsExt>` - :ref:`OpenCL<cuda_toolkit_opencl>` - :ref:`cuLIBOS<cuda_toolkit_cuLIBOS>` .. _`cuda_toolkit_rt_lib`: CUDA Runtime Library """""""""""""""""""" The CUDA Runtime library (cudart) are what most applications will typically need to link against to make any calls such as `cudaMalloc`, and `cudaFree`. Targets Created: - ``CUDA::cudart`` - ``CUDA::cudart_static`` .. _`cuda_toolkit_driver_lib`: CUDA Driver Library """""""""""""""""""" The CUDA Driver library (cuda) are used by applications that use calls such as `cuMemAlloc`, and `cuMemFree`. Targets Created: - ``CUDA::cuda_driver`` .. _`cuda_toolkit_cuBLAS`: cuBLAS """""" The `cuBLAS <https://docs.nvidia.com/cuda/cublas/index.html>`_ library. Targets Created: - ``CUDA::cublas`` - ``CUDA::cublas_static`` - ``CUDA::cublasLt`` starting in CUDA 10.1 - ``CUDA::cublasLt_static`` starting in CUDA 10.1 .. _`cuda_toolkit_cuFFT`: cuFFT """"" The `cuFFT <https://docs.nvidia.com/cuda/cufft/index.html>`_ library. Targets Created: - ``CUDA::cufft`` - ``CUDA::cufftw`` - ``CUDA::cufft_static`` - ``CUDA::cufft_static_nocallback`` starting in CUDA 9.2, requires CMake 3.23+ - ``CUDA::cufftw_static`` cuRAND """""" The `cuRAND <https://docs.nvidia.com/cuda/curand/index.html>`_ library. Targets Created: - ``CUDA::curand`` - ``CUDA::curand_static`` .. _`cuda_toolkit_cuSOLVER`: cuSOLVER """""""" The `cuSOLVER <https://docs.nvidia.com/cuda/cusolver/index.html>`_ library. Targets Created: - ``CUDA::cusolver`` - ``CUDA::cusolver_static`` .. _`cuda_toolkit_cuSPARSE`: cuSPARSE """""""" The `cuSPARSE <https://docs.nvidia.com/cuda/cusparse/index.html>`_ library. Targets Created: - ``CUDA::cusparse`` - ``CUDA::cusparse_static`` .. _`cuda_toolkit_cupti`: cupti """"" The `NVIDIA CUDA Profiling Tools Interface <https://developer.nvidia.com/CUPTI>`_. Targets Created: - ``CUDA::cupti`` - ``CUDA::cupti_static`` .. _`cuda_toolkit_NPP`: NPP """ The `NPP <https://docs.nvidia.com/cuda/npp/index.html>`_ libraries. Targets Created: - `nppc`: - ``CUDA::nppc`` - ``CUDA::nppc_static`` - `nppial`: Arithmetic and logical operation functions in `nppi_arithmetic_and_logical_operations.h` - ``CUDA::nppial`` - ``CUDA::nppial_static`` - `nppicc`: Color conversion and sampling functions in `nppi_color_conversion.h` - ``CUDA::nppicc`` - ``CUDA::nppicc_static`` - `nppicom`: JPEG compression and decompression functions in `nppi_compression_functions.h` Removed starting in CUDA 11.0, use :ref:`nvJPEG<cuda_toolkit_nvJPEG>` instead. - ``CUDA::nppicom`` - ``CUDA::nppicom_static`` - `nppidei`: Data exchange and initialization functions in `nppi_data_exchange_and_initialization.h` - ``CUDA::nppidei`` - ``CUDA::nppidei_static`` - `nppif`: Filtering and computer vision functions in `nppi_filter_functions.h` - ``CUDA::nppif`` - ``CUDA::nppif_static`` - `nppig`: Geometry transformation functions found in `nppi_geometry_transforms.h` - ``CUDA::nppig`` - ``CUDA::nppig_static`` - `nppim`: Morphological operation functions found in `nppi_morphological_operations.h` - ``CUDA::nppim`` - ``CUDA::nppim_static`` - `nppist`: Statistics and linear transform in `nppi_statistics_functions.h` and `nppi_linear_transforms.h` - ``CUDA::nppist`` - ``CUDA::nppist_static`` - `nppisu`: Memory support functions in `nppi_support_functions.h` - ``CUDA::nppisu`` - ``CUDA::nppisu_static`` - `nppitc`: Threshold and compare operation functions in `nppi_threshold_and_compare_operations.h` - ``CUDA::nppitc`` - ``CUDA::nppitc_static`` - `npps`: - ``CUDA::npps`` - ``CUDA::npps_static`` .. _`cuda_toolkit_nvBLAS`: nvBLAS """""" The `nvBLAS <https://docs.nvidia.com/cuda/nvblas/index.html>`_ libraries. This is a shared library only. Targets Created: - ``CUDA::nvblas`` .. _`cuda_toolkit_nvGRAPH`: nvGRAPH """"""" The `nvGRAPH <https://docs.nvidia.com/cuda/nvgraph/index.html>`_ library. Removed starting in CUDA 11.0 Targets Created: - ``CUDA::nvgraph`` - ``CUDA::nvgraph_static`` .. _`cuda_toolkit_nvJPEG`: nvJPEG """""" The `nvJPEG <https://docs.nvidia.com/cuda/nvjpeg/index.html>`_ library. Introduced in CUDA 10. Targets Created: - ``CUDA::nvjpeg`` - ``CUDA::nvjpeg_static`` .. _`cuda_toolkit_nvRTC`: nvRTC """"" The `nvRTC <https://docs.nvidia.com/cuda/nvrtc/index.html>`_ (Runtime Compilation) library. This is a shared library only. Targets Created: - ``CUDA::nvrtc`` .. _`cuda_toolkit_nvml`: nvidia-ML """"""""" The `NVIDIA Management Library <https://developer.nvidia.com/nvidia-management-library-nvml>`_. This is a shared library only. Targets Created: - ``CUDA::nvml`` .. _`cuda_toolkit_nvToolsExt`: nvToolsExt """""""""" The `NVIDIA Tools Extension <https://docs.nvidia.com/gameworks/content/gameworkslibrary/nvtx/nvidia_tools_extension_library_nvtx.htm>`_. This is a shared library only. Targets Created: - ``CUDA::nvToolsExt`` .. _`cuda_toolkit_opencl`: OpenCL """""" The `NVIDIA OpenCL Library <https://developer.nvidia.com/opencl>`_. This is a shared library only. Targets Created: - ``CUDA::OpenCL`` .. _`cuda_toolkit_cuLIBOS`: cuLIBOS """"""" The cuLIBOS library is a backend thread abstraction layer library which is static only. The ``CUDA::cublas_static``, ``CUDA::cusparse_static``, ``CUDA::cufft_static``, ``CUDA::curand_static``, and (when implemented) NPP libraries all automatically have this dependency linked. Target Created: - ``CUDA::culibos`` **Note**: direct usage of this target by consumers should not be necessary. .. _`cuda_toolkit_cuRAND`: Result variables ^^^^^^^^^^^^^^^^ ``CUDAToolkit_FOUND`` A boolean specifying whether or not the CUDA Toolkit was found. ``CUDAToolkit_VERSION`` The exact version of the CUDA Toolkit found (as reported by ``nvcc --version`` or ``version.txt``). ``CUDAToolkit_VERSION_MAJOR`` The major version of the CUDA Toolkit. ``CUDAToolkit_VERSION_MINOR`` The minor version of the CUDA Toolkit. ``CUDAToolkit_VERSION_PATCH`` The patch version of the CUDA Toolkit. ``CUDAToolkit_BIN_DIR`` The path to the CUDA Toolkit library directory that contains the CUDA executable ``nvcc``. ``CUDAToolkit_INCLUDE_DIRS`` The path to the CUDA Toolkit ``include`` folder containing the header files required to compile a project linking against CUDA. ``CUDAToolkit_LIBRARY_DIR`` The path to the CUDA Toolkit library directory that contains the CUDA Runtime library ``cudart``. ``CUDAToolkit_LIBRARY_ROOT`` .. versionadded:: 3.18 The path to the CUDA Toolkit directory containing the nvvm directory and version.txt. ``CUDAToolkit_TARGET_DIR`` The path to the CUDA Toolkit directory including the target architecture when cross-compiling. When not cross-compiling this will be equivalent to the parent directory of ``CUDAToolkit_BIN_DIR``. ``CUDAToolkit_NVCC_EXECUTABLE`` The path to the NVIDIA CUDA compiler ``nvcc``. Note that this path may **not** be the same as :variable:`CMAKE_CUDA_COMPILER <CMAKE_<LANG>_COMPILER>`. ``nvcc`` must be found to determine the CUDA Toolkit version as well as determining other features of the Toolkit. This variable is set for the convenience of modules that depend on this one. #]=======================================================================] # NOTE: much of this was simply extracted from FindCUDA.cmake. # James Bigler, NVIDIA Corp (nvidia.com - jbigler) # Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html # # Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. # # Copyright (c) 2007-2009 # Scientific Computing and Imaging Institute, University of Utah # # This code is licensed under the MIT License. See the FindCUDA.cmake script # for the text of the license. # The MIT License # # License for the specific language governing rights and limitations under # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # ############################################################################### # The toolkit is located during compiler detection for CUDA and stored in CMakeCUDACompiler.cmake as # CMAKE_CUDA_COMPILER_TOOLKIT_ROOT and CMAKE_CUDA_COMPILER_LIBRARY_ROOT. # We compute the rest based on those here to avoid re-searching and to avoid finding a possibly # different installation. if(CMAKE_CUDA_COMPILER_TOOLKIT_ROOT) set(CUDAToolkit_ROOT_DIR "${CMAKE_CUDA_COMPILER_TOOLKIT_ROOT}") set(CUDAToolkit_LIBRARY_ROOT "${CMAKE_CUDA_COMPILER_LIBRARY_ROOT}") set(CUDAToolkit_VERSION "${CMAKE_CUDA_COMPILER_TOOLKIT_VERSION}") if(CUDAToolkit_VERSION MATCHES [=[([0-9]+)\.([0-9]+)\.([0-9]+)]=]) set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}") set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}") set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}") endif() else() function(_CUDAToolkit_find_root_dir ) cmake_parse_arguments(arg "" "" "SEARCH_PATHS;FIND_FLAGS" ${ARGN}) if(NOT CUDAToolkit_BIN_DIR) if(NOT CUDAToolkit_SENTINEL_FILE) find_program(CUDAToolkit_NVCC_EXECUTABLE NAMES nvcc nvcc.exe PATHS ${arg_SEARCH_PATHS} ${arg_FIND_FLAGS} ) endif() if(NOT CUDAToolkit_NVCC_EXECUTABLE) find_file(CUDAToolkit_SENTINEL_FILE NAMES version.txt PATHS ${arg_SEARCH_PATHS} NO_DEFAULT_PATH ) endif() if(EXISTS "${CUDAToolkit_NVCC_EXECUTABLE}") # If NVCC exists then invoke it to find the toolkit location. # This allows us to support wrapper scripts (e.g. ccache or colornvcc), CUDA Toolkit, # NVIDIA HPC SDK, and distro's splayed layouts execute_process(COMMAND ${CUDAToolkit_NVCC_EXECUTABLE} "-v" "__cmake_determine_cuda" OUTPUT_VARIABLE _CUDA_NVCC_OUT ERROR_VARIABLE _CUDA_NVCC_OUT) if(_CUDA_NVCC_OUT MATCHES "\\#\\$ TOP=([^\r\n]*)") get_filename_component(CUDAToolkit_BIN_DIR "${CMAKE_MATCH_1}/bin" ABSOLUTE) else() get_filename_component(CUDAToolkit_BIN_DIR "${CUDAToolkit_NVCC_EXECUTABLE}" DIRECTORY) endif() unset(_CUDA_NVCC_OUT) mark_as_advanced(CUDAToolkit_BIN_DIR) set(CUDAToolkit_BIN_DIR "${CUDAToolkit_BIN_DIR}" CACHE PATH "" FORCE) endif() if(CUDAToolkit_SENTINEL_FILE) get_filename_component(CUDAToolkit_BIN_DIR ${CUDAToolkit_SENTINEL_FILE} DIRECTORY ABSOLUTE) set(CUDAToolkit_BIN_DIR "${CUDAToolkit_BIN_DIR}/bin") set(CUDAToolkit_BIN_DIR "${CUDAToolkit_BIN_DIR}" CACHE PATH "" FORCE) mark_as_advanced(CUDAToolkit_BIN_DIR) endif() endif() if(CUDAToolkit_BIN_DIR) get_filename_component(CUDAToolkit_ROOT_DIR ${CUDAToolkit_BIN_DIR} DIRECTORY ABSOLUTE) set(CUDAToolkit_ROOT_DIR "${CUDAToolkit_ROOT_DIR}" PARENT_SCOPE) endif() endfunction() # For NVCC we can easily deduce the SDK binary directory from the compiler path. if(CMAKE_CUDA_COMPILER_LOADED AND NOT CUDAToolkit_BIN_DIR AND CMAKE_CUDA_COMPILER_ID STREQUAL "NVIDIA") get_filename_component(CUDAToolkit_BIN_DIR "${CMAKE_CUDA_COMPILER}" DIRECTORY) set(CUDAToolkit_BIN_DIR "${CUDAToolkit_BIN_DIR}" CACHE PATH "") # Try language provided path first. _CUDAToolkit_find_root_dir(SEARCH_PATHS "${CUDAToolkit_BIN_DIR}" FIND_FLAGS NO_DEFAULT_PATH) mark_as_advanced(CUDAToolkit_BIN_DIR) endif() # Try user provided path if(NOT CUDAToolkit_ROOT_DIR AND CUDAToolkit_ROOT) _CUDAToolkit_find_root_dir(SEARCH_PATHS "${CUDAToolkit_ROOT}" FIND_FLAGS PATH_SUFFIXES bin NO_DEFAULT_PATH) endif() if(NOT CUDAToolkit_ROOT_DIR) _CUDAToolkit_find_root_dir(FIND_FLAGS PATHS ENV CUDA_PATH PATH_SUFFIXES bin) endif() # If the user specified CUDAToolkit_ROOT but the toolkit could not be found, this is an error. if(NOT CUDAToolkit_ROOT_DIR AND (DEFINED CUDAToolkit_ROOT OR DEFINED ENV{CUDAToolkit_ROOT})) # Declare error messages now, print later depending on find_package args. set(fail_base "Could not find nvcc executable in path specified by") set(cuda_root_fail "${fail_base} CUDAToolkit_ROOT=${CUDAToolkit_ROOT}") set(env_cuda_root_fail "${fail_base} environment variable CUDAToolkit_ROOT=$ENV{CUDAToolkit_ROOT}") if(CUDAToolkit_FIND_REQUIRED) if(DEFINED CUDAToolkit_ROOT) message(FATAL_ERROR ${cuda_root_fail}) elseif(DEFINED ENV{CUDAToolkit_ROOT}) message(FATAL_ERROR ${env_cuda_root_fail}) endif() else() if(NOT CUDAToolkit_FIND_QUIETLY) if(DEFINED CUDAToolkit_ROOT) message(STATUS ${cuda_root_fail}) elseif(DEFINED ENV{CUDAToolkit_ROOT}) message(STATUS ${env_cuda_root_fail}) endif() endif() set(CUDAToolkit_FOUND FALSE) unset(fail_base) unset(cuda_root_fail) unset(env_cuda_root_fail) return() endif() endif() # CUDAToolkit_ROOT cmake / env variable not specified, try platform defaults. # # - Linux: /usr/local/cuda-X.Y # - macOS: /Developer/NVIDIA/CUDA-X.Y # - Windows: C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\vX.Y # # We will also search the default symlink location /usr/local/cuda first since # if CUDAToolkit_ROOT is not specified, it is assumed that the symlinked # directory is the desired location. if(NOT CUDAToolkit_ROOT_DIR) if(UNIX) if(NOT APPLE) set(platform_base "/usr/local/cuda-") else() set(platform_base "/Developer/NVIDIA/CUDA-") endif() else() set(platform_base "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v") endif() # Build out a descending list of possible cuda installations, e.g. file(GLOB possible_paths "${platform_base}*") # Iterate the glob results and create a descending list. set(versions) foreach(p ${possible_paths}) # Extract version number from end of string string(REGEX MATCH "[0-9][0-9]?\\.[0-9]$" p_version ${p}) if(IS_DIRECTORY ${p} AND p_version) list(APPEND versions ${p_version}) endif() endforeach() # Sort numerically in descending order, so we try the newest versions first. if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.18) list(SORT versions COMPARE NATURAL ORDER DESCENDING) elseif(versions) # Alphabetical sort here is not ideal but better than nothing list(SORT versions) list(REVERSE versions) endif() # With a descending list of versions, populate possible paths to search. set(search_paths) foreach(v ${versions}) list(APPEND search_paths "${platform_base}${v}") endforeach() # Force the global default /usr/local/cuda to the front on Unix. if(UNIX) list(INSERT search_paths 0 "/usr/local/cuda") endif() # Now search for the toolkit again using the platform default search paths. _CUDAToolkit_find_root_dir(SEARCH_PATHS "${search_paths}" FIND_FLAGS PATH_SUFFIXES bin) # We are done with these variables now, cleanup for caller. unset(platform_base) unset(possible_paths) unset(versions) unset(search_paths) if(NOT CUDAToolkit_ROOT_DIR) if(CUDAToolkit_FIND_REQUIRED) message(FATAL_ERROR "Could not find nvcc, please set CUDAToolkit_ROOT.") elseif(NOT CUDAToolkit_FIND_QUIETLY) message(STATUS "Could not find nvcc, please set CUDAToolkit_ROOT.") endif() set(CUDAToolkit_FOUND FALSE) return() endif() endif() endif() if(NOT CUDAToolkit_BIN_DIR) set(CUDAToolkit_BIN_DIR "${CUDAToolkit_ROOT_DIR}/bin") endif() if(NOT CUDAToolkit_NVCC_EXECUTABLE) set(CUDAToolkit_NVCC_EXECUTABLE "${CUDAToolkit_BIN_DIR}/nvcc${CMAKE_EXECUTABLE_SUFFIX}") endif() if(CMAKE_CUDA_COMPILER_TOOLKIT_VERSION) set(CUDAToolkit_VERSION "${CMAKE_CUDA_COMPILER_TOOLKIT_VERSION}") else() function(_CUDAToolkit_find_version_file result_variable) # We first check for a non-scattered installation to prefer it over a scattered installation. if(CUDAToolkit_ROOT AND EXISTS "${CUDAToolkit_ROOT}/version.txt") set(${result_variable} "${CUDAToolkit_ROOT}/version.txt" PARENT_SCOPE) elseif(CUDAToolkit_ROOT_DIR AND EXISTS "${CUDAToolkit_ROOT_DIR}/version.txt") set(${result_variable} "${CUDAToolkit_ROOT_DIR}/version.txt" PARENT_SCOPE) elseif(CMAKE_SYSROOT_LINK AND EXISTS "${CMAKE_SYSROOT_LINK}/usr/lib/cuda/version.txt") set(${result_variable} "${CMAKE_SYSROOT_LINK}/usr/lib/cuda/version.txt" PARENT_SCOPE) elseif(EXISTS "${CMAKE_SYSROOT}/usr/lib/cuda/version.txt") set(${result_variable} "${CMAKE_SYSROOT}/usr/lib/cuda/version.txt" PARENT_SCOPE) endif() endfunction() _CUDAToolkit_find_version_file( _CUDAToolkit_version_file ) if(_CUDAToolkit_version_file) # CUDAToolkit_LIBRARY_ROOT contains the device library and version file. get_filename_component(CUDAToolkit_LIBRARY_ROOT "${_CUDAToolkit_version_file}" DIRECTORY ABSOLUTE) endif() unset(_CUDAToolkit_version_file) if(CUDAToolkit_NVCC_EXECUTABLE AND CMAKE_CUDA_COMPILER_VERSION AND CUDAToolkit_NVCC_EXECUTABLE STREQUAL CMAKE_CUDA_COMPILER) # Need to set these based off the already computed CMAKE_CUDA_COMPILER_VERSION value # This if statement will always match, but is used to provide variables for MATCH 1,2,3... if(CMAKE_CUDA_COMPILER_VERSION MATCHES [=[([0-9]+)\.([0-9]+)\.([0-9]+)]=]) set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}") set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}") set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}") set(CUDAToolkit_VERSION "${CMAKE_CUDA_COMPILER_VERSION}") endif() elseif(CUDAToolkit_NVCC_EXECUTABLE) # Compute the version by invoking nvcc execute_process(COMMAND ${CUDAToolkit_NVCC_EXECUTABLE} "--version" OUTPUT_VARIABLE NVCC_OUT) if(NVCC_OUT MATCHES [=[ V([0-9]+)\.([0-9]+)\.([0-9]+)]=]) set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}") set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}") set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}") set(CUDAToolkit_VERSION "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}.${CMAKE_MATCH_3}") endif() unset(NVCC_OUT) else() _CUDAToolkit_find_version_file(version_file) if(version_file) file(READ "${version_file}" VERSION_INFO) if(VERSION_INFO MATCHES [=[CUDA Version ([0-9]+)\.([0-9]+)\.([0-9]+)]=]) set(CUDAToolkit_VERSION_MAJOR "${CMAKE_MATCH_1}") set(CUDAToolkit_VERSION_MINOR "${CMAKE_MATCH_2}") set(CUDAToolkit_VERSION_PATCH "${CMAKE_MATCH_3}") set(CUDAToolkit_VERSION "${CMAKE_MATCH_1}.${CMAKE_MATCH_2}.${CMAKE_MATCH_3}") endif() endif() endif() endif() # Find target directory when crosscompiling. if(CMAKE_CROSSCOMPILING) if(CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7-a") # Support for NVPACK set(CUDAToolkit_TARGET_NAME "armv7-linux-androideabi") elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "arm") set(CUDAToolkit_TARGET_NAME "armv7-linux-gnueabihf") elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") if(ANDROID_ARCH_NAME STREQUAL "arm64") set(CUDAToolkit_TARGET_NAME "aarch64-linux-androideabi") elseif(CMAKE_SYSTEM_NAME STREQUAL "QNX") set(CUDAToolkit_TARGET_NAME "aarch64-qnx") else() set(CUDAToolkit_TARGET_NAME "aarch64-linux") endif(ANDROID_ARCH_NAME STREQUAL "arm64") elseif(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64") set(CUDAToolkit_TARGET_NAME "x86_64-linux") endif() if(EXISTS "${CUDAToolkit_ROOT_DIR}/targets/${CUDAToolkit_TARGET_NAME}") set(CUDAToolkit_TARGET_DIR "${CUDAToolkit_ROOT_DIR}/targets/${CUDAToolkit_TARGET_NAME}") # add known CUDA target root path to the set of directories we search for programs, libraries and headers list(PREPEND CMAKE_FIND_ROOT_PATH "${CUDAToolkit_TARGET_DIR}") # Mark that we need to pop the root search path changes after we have # found all cuda libraries so that searches for our cross-compilation # libraries work when another cuda sdk is in CMAKE_PREFIX_PATH or # PATh set(_CUDAToolkit_Pop_ROOT_PATH True) endif() endif() # If not already set we can simply use the toolkit root or it's a scattered installation. if(NOT CUDAToolkit_TARGET_DIR) # Not cross compiling set(CUDAToolkit_TARGET_DIR "${CUDAToolkit_ROOT_DIR}") # Now that we have the real ROOT_DIR, find components inside it. list(APPEND CMAKE_PREFIX_PATH ${CUDAToolkit_ROOT_DIR}) # Mark that we need to pop the prefix path changes after we have # found the cudart library. set(_CUDAToolkit_Pop_Prefix True) endif() # CUDAToolkit_TARGET_DIR always points to the directory containing the include directory. # On a scattered installation /usr, on a non-scattered something like /usr/local/cuda or /usr/local/cuda-10.2/targets/aarch64-linux. if(EXISTS "${CUDAToolkit_TARGET_DIR}/include/cuda_runtime.h") set(CUDAToolkit_INCLUDE_DIR "${CUDAToolkit_TARGET_DIR}/include") elseif(NOT CUDAToolkit_FIND_QUIETLY) message(STATUS "Unable to find cuda_runtime.h in \"${CUDAToolkit_TARGET_DIR}/include\" for CUDAToolkit_INCLUDE_DIR.") endif() # The NVHPC layout moves math library headers and libraries to a sibling directory. # Create a separate variable so this directory can be selectively added to math targets. if(NOT EXISTS "${CUDAToolkit_INCLUDE_DIR}/cublas_v2.h") set(CUDAToolkit_MATH_INCLUDE_DIR "${CUDAToolkit_TARGET_DIR}/../../math_libs/include") get_filename_component(CUDAToolkit_MATH_INCLUDE_DIR "${CUDAToolkit_MATH_INCLUDE_DIR}" ABSOLUTE) if(NOT EXISTS "${CUDAToolkit_MATH_INCLUDE_DIR}/cublas_v2.h") if(NOT CUDAToolkit_FIND_QUIETLY) message(STATUS "Unable to find cublas_v2.h in either \"${CUDAToolkit_INCLUDE_DIR}\" or \"${CUDAToolkit_MATH_INCLUDE_DIR}\"") endif() unset(CUDAToolkit_MATH_INCLUDE_DIR) endif() endif() # Find the CUDA Runtime Library libcudart find_library(CUDA_CUDART NAMES cudart PATH_SUFFIXES lib64 lib/x64 ) find_library(CUDA_CUDART NAMES cudart PATH_SUFFIXES lib64/stubs lib/x64/stubs ) if(NOT CUDA_CUDART AND NOT CUDAToolkit_FIND_QUIETLY) message(STATUS "Unable to find cudart library.") endif() if(_CUDAToolkit_Pop_Prefix) list(REMOVE_AT CMAKE_PREFIX_PATH -1) unset(_CUDAToolkit_Pop_Prefix) endif() #----------------------------------------------------------------------------- # Perform version comparison and validate all required variables are set. include(FindPackageHandleStandardArgs) find_package_handle_standard_args(CUDAToolkit REQUIRED_VARS CUDAToolkit_INCLUDE_DIR CUDAToolkit_VERSION CUDA_CUDART CUDAToolkit_BIN_DIR VERSION_VAR CUDAToolkit_VERSION ) mark_as_advanced(CUDA_CUDART CUDAToolkit_INCLUDE_DIR CUDAToolkit_NVCC_EXECUTABLE CUDAToolkit_SENTINEL_FILE ) #----------------------------------------------------------------------------- # Construct result variables if(CUDAToolkit_FOUND) set(CUDAToolkit_INCLUDE_DIRS ${CUDAToolkit_INCLUDE_DIR}) get_filename_component(CUDAToolkit_LIBRARY_DIR ${CUDA_CUDART} DIRECTORY ABSOLUTE) endif() #----------------------------------------------------------------------------- # Construct import targets if(CUDAToolkit_FOUND) function(_CUDAToolkit_find_and_add_import_lib lib_name) cmake_parse_arguments(arg "" "" "ALT;DEPS;EXTRA_HINTS;EXTRA_PATH_SUFFIXES;EXTRA_INCLUDE_DIRS" ${ARGN}) set(search_names ${lib_name} ${arg_ALT}) find_library(CUDA_${lib_name}_LIBRARY NAMES ${search_names} HINTS ${CUDAToolkit_LIBRARY_DIR} ENV CUDA_PATH ${arg_EXTRA_HINTS} PATH_SUFFIXES nvidia/current lib64 lib/x64 lib ${arg_EXTRA_PATH_SUFFIXES} ) # Don't try any stub directories until we have exhausted all other # search locations. find_library(CUDA_${lib_name}_LIBRARY NAMES ${search_names} HINTS ${CUDAToolkit_LIBRARY_DIR} ENV CUDA_PATH ${arg_EXTRA_HINTS} PATH_SUFFIXES lib64/stubs lib/x64/stubs lib/stubs stubs # Support NVHPC splayed math library layout ../../math_libs/${CUDAToolkit_VERSION_MAJOR}.${CUDAToolkit_VERSION_MINOR}/lib64 ../../math_libs/lib64 ) mark_as_advanced(CUDA_${lib_name}_LIBRARY) if(NOT TARGET CUDA::${lib_name} AND CUDA_${lib_name}_LIBRARY) add_library(CUDA::${lib_name} UNKNOWN IMPORTED) set_property(TARGET CUDA::${lib_name} APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES "${CUDAToolkit_INCLUDE_DIRS}") set_property(TARGET CUDA::${lib_name} APPEND PROPERTY INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${CUDAToolkit_INCLUDE_DIRS}") if(DEFINED CUDAToolkit_MATH_INCLUDE_DIR) string(FIND ${CUDA_${lib_name}_LIBRARY} "math_libs" math_libs) if(NOT ${math_libs} EQUAL -1) set_property(TARGET CUDA::${lib_name} APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES "${CUDAToolkit_MATH_INCLUDE_DIRS}") set_property(TARGET CUDA::${lib_name} APPEND PROPERTY INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${CUDAToolkit_MATH_INCLUDE_DIRS}") endif() endif() set_property(TARGET CUDA::${lib_name} PROPERTY IMPORTED_LOCATION "${CUDA_${lib_name}_LIBRARY}") foreach(dep ${arg_DEPS}) if(TARGET CUDA::${dep}) set_property(TARGET CUDA::${lib_name} APPEND PROPERTY INTERFACE_LINK_LIBRARIES CUDA::${dep}) endif() endforeach() if(arg_EXTRA_INCLUDE_DIRS) set_property(TARGET CUDA::${lib_name} APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES "${arg_EXTRA_INCLUDE_DIRS}") set_property(TARGET CUDA::${lib_name} APPEND PROPERTY INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${arg_EXTRA_INCLUDE_DIRS}") endif() endif() endfunction() if(NOT TARGET CUDA::toolkit) add_library(CUDA::toolkit IMPORTED INTERFACE) set_property(TARGET CUDA::toolkit APPEND PROPERTY INTERFACE_INCLUDE_DIRECTORIES "${CUDAToolkit_INCLUDE_DIRS}") set_property(TARGET CUDA::toolkit APPEND PROPERTY INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${CUDAToolkit_INCLUDE_DIRS}") endif() _CUDAToolkit_find_and_add_import_lib(cuda_driver ALT cuda) _CUDAToolkit_find_and_add_import_lib(cudart) _CUDAToolkit_find_and_add_import_lib(cudart_static) # setup dependencies that are required for cudart_static when building # on linux. These are generally only required when using the CUDA toolkit # when CUDA language is disabled if(NOT TARGET CUDA::cudart_static_deps AND TARGET CUDA::cudart_static) add_library(CUDA::cudart_static_deps IMPORTED INTERFACE) set_property(TARGET CUDA::cudart_static APPEND PROPERTY INTERFACE_LINK_LIBRARIES CUDA::cudart_static_deps) if(UNIX AND (CMAKE_C_COMPILER OR CMAKE_CXX_COMPILER)) find_package(Threads REQUIRED) set_property(TARGET CUDA::cudart_static_deps APPEND PROPERTY INTERFACE_LINK_LIBRARIES Threads::Threads ${CMAKE_DL_LIBS}) endif() if(UNIX AND NOT APPLE AND NOT (CMAKE_SYSTEM_NAME STREQUAL "QNX")) # On Linux, you must link against librt when using the static cuda runtime. find_library(CUDAToolkit_rt_LIBRARY rt) mark_as_advanced(CUDAToolkit_rt_LIBRARY) if(NOT CUDAToolkit_rt_LIBRARY) message(WARNING "Could not find librt library, needed by CUDA::cudart_static") else() set_property(TARGET CUDA::cudart_static_deps APPEND PROPERTY INTERFACE_LINK_LIBRARIES ${CUDAToolkit_rt_LIBRARY}) endif() endif() endif() _CUDAToolkit_find_and_add_import_lib(culibos) # it's a static library foreach(cuda_lib cublasLt cufft curand cusparse nppc nvjpeg) _CUDAToolkit_find_and_add_import_lib(${cuda_lib}) _CUDAToolkit_find_and_add_import_lib(${cuda_lib}_static DEPS culibos) endforeach() if(CUDAToolkit_VERSION VERSION_GREATER_EQUAL 11.0.0) # cublas depends on cublasLt # https://docs.nvidia.com/cuda/archive/11.0/cublas/index.html#static-library _CUDAToolkit_find_and_add_import_lib(cublas DEPS cublasLt) _CUDAToolkit_find_and_add_import_lib(cublas_static DEPS cublasLt_static) else() _CUDAToolkit_find_and_add_import_lib(cublas) _CUDAToolkit_find_and_add_import_lib(cublas_static DEPS culibos) endif() if(CUDAToolkit_VERSION VERSION_GREATER_EQUAL 11.4) _CUDAToolkit_find_and_add_import_lib(cuFile ALT cufile DEPS culibos) _CUDAToolkit_find_and_add_import_lib(cuFile_static ALT cufile_static DEPS culibos) _CUDAToolkit_find_and_add_import_lib(cuFile_rdma ALT cufile_rdma DEPS cuFile culibos) _CUDAToolkit_find_and_add_import_lib(cuFile_rdma_static ALT cufile_rdma_static DEPS cuFile_static culibos) endif() # cuFFTW depends on cuFFT _CUDAToolkit_find_and_add_import_lib(cufftw DEPS cufft) _CUDAToolkit_find_and_add_import_lib(cufftw_static DEPS cufft_static) if(CUDAToolkit_VERSION VERSION_GREATER_EQUAL 9.2) _CUDAToolkit_find_and_add_import_lib(cufft_static_nocallback DEPS culibos) endif() # cuSOLVER depends on cuBLAS, and cuSPARSE _CUDAToolkit_find_and_add_import_lib(cusolver DEPS cublas cusparse) _CUDAToolkit_find_and_add_import_lib(cusolver_static DEPS cublas_static cusparse_static culibos) if(CUDAToolkit_VERSION VERSION_GREATER_EQUAL 10.1.2) # cusolver depends on liblapack_static.a starting with CUDA 10.1 update 2, # https://docs.nvidia.com/cuda/archive/11.5.0/cusolver/index.html#static-link-lapack _CUDAToolkit_find_and_add_import_lib(cusolver_lapack_static ALT lapack_static) # implementation detail static lib _CUDAToolkit_find_and_add_import_lib(cusolver_static DEPS cusolver_lapack_static) endif() if(CUDAToolkit_VERSION VERSION_GREATER 11.2.1) # cusolver depends on libcusolver_metis and cublasLt # https://docs.nvidia.com/cuda/archive/11.2.2/cusolver/index.html#link-dependency _CUDAToolkit_find_and_add_import_lib(cusolver DEPS cublasLt) _CUDAToolkit_find_and_add_import_lib(cusolver_metis_static ALT metis_static) # implementation detail static lib _CUDAToolkit_find_and_add_import_lib(cusolver_static DEPS cusolver_metis_static cublasLt_static) endif() # nvGRAPH depends on cuRAND, and cuSOLVER. _CUDAToolkit_find_and_add_import_lib(nvgraph DEPS curand cusolver) _CUDAToolkit_find_and_add_import_lib(nvgraph_static DEPS curand_static cusolver_static) # Process the majority of the NPP libraries. foreach(cuda_lib nppial nppicc nppidei nppif nppig nppim nppist nppitc npps nppicom nppisu) _CUDAToolkit_find_and_add_import_lib(${cuda_lib} DEPS nppc) _CUDAToolkit_find_and_add_import_lib(${cuda_lib}_static DEPS nppc_static) endforeach() find_path(CUDAToolkit_CUPTI_INCLUDE_DIR cupti.h PATHS "${CUDAToolkit_ROOT_DIR}/extras/CUPTI/include" "${CUDAToolkit_INCLUDE_DIR}/../extras/CUPTI/include" "${CUDAToolkit_INCLUDE_DIR}" NO_DEFAULT_PATH) mark_as_advanced(CUDAToolkit_CUPTI_INCLUDE_DIR) if(CUDAToolkit_CUPTI_INCLUDE_DIR) _CUDAToolkit_find_and_add_import_lib(cupti EXTRA_PATH_SUFFIXES ../extras/CUPTI/lib64/ ../extras/CUPTI/lib/ EXTRA_INCLUDE_DIRS "${CUDAToolkit_CUPTI_INCLUDE_DIR}") _CUDAToolkit_find_and_add_import_lib(cupti_static EXTRA_PATH_SUFFIXES ../extras/CUPTI/lib64/ ../extras/CUPTI/lib/ EXTRA_INCLUDE_DIRS "${CUDAToolkit_CUPTI_INCLUDE_DIR}") endif() _CUDAToolkit_find_and_add_import_lib(nvrtc DEPS cuda_driver) _CUDAToolkit_find_and_add_import_lib(nvml ALT nvidia-ml nvml) # nvtools can be installed outside the CUDA toolkit directory, # so search the NVTOOLSEXT_PATH windows only environment variable set(nvToolsExt_EXTRA_PATH) if(WIN32) set(nvToolsExt_EXTRA_PATH "C:\\Program Files\\NVIDIA Corporation\\NvToolsExt") endif() find_path(CUDAToolkit_nvToolsExt_INCLUDE_DIR nvToolsExt.h PATHS "${CUDAToolkit_INCLUDE_DIR}" "${CUDAToolkit_ROOT_DIR}" ENV NVTOOLSEXT_PATH "${nvToolsExt_EXTRA_PATH}" PATH_SUFFIXES include NO_DEFAULT_PATH) mark_as_advanced(CUDAToolkit_nvToolsExt_INCLUDE_DIR) if(CUDAToolkit_nvToolsExt_INCLUDE_DIR) _CUDAToolkit_find_and_add_import_lib(nvToolsExt ALT nvToolsExt64 nvToolsExt64_1 EXTRA_HINTS ENV NVTOOLSEXT_PATH "${nvToolsExt_EXTRA_PATH}" EXTRA_INCLUDE_DIRS "${CUDAToolkit_nvToolsExt_INCLUDE_DIR}") endif() _CUDAToolkit_find_and_add_import_lib(OpenCL) endif() unset(CUDAToolkit_ROOT_DIR) if(_CUDAToolkit_Pop_ROOT_PATH) list(REMOVE_AT CMAKE_FIND_ROOT_PATH 0) unset(_CUDAToolkit_Pop_ROOT_PATH) endif() ```
================================================================================================================================ SOURCE CODE FILE: FindCUDSS.cmake LINES: 1 SIZE: 2.70 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\FindCUDSS.cmake ENCODING: utf-8 ```cmake # Find the CUDSS library # # The following variables are optionally searched for defaults # CUDSS_ROOT: Base directory where CUDSS is found # CUDSS_INCLUDE_DIR: Directory where CUDSS header is searched for # CUDSS_LIBRARY: Directory where CUDSS library is searched for # # The following are set after configuration is done: # CUDSS_FOUND # CUDSS_INCLUDE_PATH # CUDSS_LIBRARY_PATH include(FindPackageHandleStandardArgs) set(CUDSS_ROOT $ENV{CUDSS_ROOT_DIR} CACHE PATH "Folder containing NVIDIA CUDSS") if (DEFINED $ENV{CUDSS_ROOT_DIR}) message(WARNING "CUDSS_ROOT_DIR is deprecated. Please set CUDSS_ROOT instead.") endif() list(APPEND CUDSS_ROOT $ENV{CUDSS_ROOT_DIR} ${CUDA_TOOLKIT_ROOT_DIR}) # Compatible layer for CMake <3.12. CUDSS_ROOT will be accounted in for searching paths and libraries for CMake >=3.12. list(APPEND CMAKE_PREFIX_PATH ${CUDSS_ROOT}) set(CUDSS_INCLUDE_DIR $ENV{CUDSS_INCLUDE_DIR} CACHE PATH "Folder containing NVIDIA CUDSS header files") find_path(CUDSS_INCLUDE_PATH cudss.h HINTS ${CUDSS_INCLUDE_DIR} PATH_SUFFIXES cuda/include cuda include) set(CUDSS_LIBRARY $ENV{CUDSS_LIBRARY} CACHE PATH "Path to the CUDSS library file (e.g., libcudss.so)") set(CUDSS_LIBRARY_NAME "libcudss.so") if(MSVC) set(CUDSS_LIBRARY_NAME "cudss.lib") endif() find_library(CUDSS_LIBRARY_PATH ${CUDSS_LIBRARY_NAME} PATHS ${CUDSS_LIBRARY} PATH_SUFFIXES lib lib64 cuda/lib cuda/lib64 lib/x64) find_package_handle_standard_args(CUDSS DEFAULT_MSG CUDSS_LIBRARY_PATH CUDSS_INCLUDE_PATH) if(CUDSS_FOUND) # Get CUDSS version file(READ ${CUDSS_INCLUDE_PATH}/cudss.h CUDSS_HEADER_CONTENTS) string(REGEX MATCH "define CUDSS_VER_MAJOR * +([0-9]+)" CUDSS_VERSION_MAJOR "${CUDSS_HEADER_CONTENTS}") string(REGEX REPLACE "define CUDSS_VER_MAJOR * +([0-9]+)" "\\1" CUDSS_VERSION_MAJOR "${CUDSS_VERSION_MAJOR}") string(REGEX MATCH "define CUDSS_VER_MINOR * +([0-9]+)" CUDSS_VERSION_MINOR "${CUDSS_HEADER_CONTENTS}") string(REGEX REPLACE "define CUDSS_VER_MINOR * +([0-9]+)" "\\1" CUDSS_VERSION_MINOR "${CUDSS_VERSION_MINOR}") string(REGEX MATCH "define CUDSS_VER_PATCH * +([0-9]+)" CUDSS_VERSION_PATCH "${CUDSS_HEADER_CONTENTS}") string(REGEX REPLACE "define CUDSS_VER_PATCH * +([0-9]+)" "\\1" CUDSS_VERSION_PATCH "${CUDSS_VERSION_PATCH}") # Assemble CUDSS version. Use minor version since current major version is 0. if(NOT CUDSS_VERSION_MINOR) set(CUDSS_VERSION "?") else() set(CUDSS_VERSION "${CUDSS_VERSION_MAJOR}.${CUDSS_VERSION_MINOR}.${CUDSS_VERSION_PATCH}") endif() endif() mark_as_advanced(CUDSS_ROOT CUDSS_INCLUDE_DIR CUDSS_LIBRARY CUDSS_VERSION) ```
===================================================================================================================================== SOURCE CODE FILE: FindCUSPARSELT.cmake LINES: 1 SIZE: 3.06 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\FindCUSPARSELT.cmake ENCODING: utf-8 ```cmake # Find the CUSPARSELT library # # The following variables are optionally searched for defaults # CUSPARSELT_ROOT: Base directory where CUSPARSELT is found # CUSPARSELT_INCLUDE_DIR: Directory where CUSPARSELT header is searched for # CUSPARSELT_LIBRARY: Directory where CUSPARSELT library is searched for # # The following are set after configuration is done: # CUSPARSELT_FOUND # CUSPARSELT_INCLUDE_PATH # CUSPARSELT_LIBRARY_PATH include(FindPackageHandleStandardArgs) set(CUSPARSELT_ROOT $ENV{CUSPARSELT_ROOT_DIR} CACHE PATH "Folder containing NVIDIA cuSPARSELt") if (DEFINED $ENV{CUSPARSELT_ROOT_DIR}) message(WARNING "CUSPARSELT_ROOT_DIR is deprecated. Please set CUSPARSELT_ROOT instead.") endif() list(APPEND CUSPARSELT_ROOT $ENV{CUSPARSELT_ROOT_DIR} ${CUDA_TOOLKIT_ROOT_DIR}) # Compatible layer for CMake <3.12. CUSPARSELT_ROOT will be accounted in for searching paths and libraries for CMake >=3.12. list(APPEND CMAKE_PREFIX_PATH ${CUSPARSELT_ROOT}) set(CUSPARSELT_INCLUDE_DIR $ENV{CUSPARSELT_INCLUDE_DIR} CACHE PATH "Folder containing NVIDIA cuSPARSELt header files") find_path(CUSPARSELT_INCLUDE_PATH cusparseLt.h HINTS ${CUSPARSELT_INCLUDE_DIR} PATH_SUFFIXES cuda/include cuda include) set(CUSPARSELT_LIBRARY $ENV{CUSPARSELT_LIBRARY} CACHE PATH "Path to the cusparselt library file (e.g., libcusparseLt.so)") set(CUSPARSELT_LIBRARY_NAME "libcusparseLt.so") if(MSVC) set(CUSPARSELT_LIBRARY_NAME "cusparseLt.lib") endif() find_library(CUSPARSELT_LIBRARY_PATH ${CUSPARSELT_LIBRARY_NAME} PATHS ${CUSPARSELT_LIBRARY} PATH_SUFFIXES lib lib64 cuda/lib cuda/lib64 lib/x64) find_package_handle_standard_args(CUSPARSELT DEFAULT_MSG CUSPARSELT_LIBRARY_PATH CUSPARSELT_INCLUDE_PATH) if(CUSPARSELT_FOUND) # Get cuSPARSELt version file(READ ${CUSPARSELT_INCLUDE_PATH}/cusparseLt.h CUSPARSELT_HEADER_CONTENTS) string(REGEX MATCH "define CUSPARSELT_VER_MAJOR * +([0-9]+)" CUSPARSELT_VERSION_MAJOR "${CUSPARSELT_HEADER_CONTENTS}") string(REGEX REPLACE "define CUSPARSELT_VER_MAJOR * +([0-9]+)" "\\1" CUSPARSELT_VERSION_MAJOR "${CUSPARSELT_VERSION_MAJOR}") string(REGEX MATCH "define CUSPARSELT_VER_MINOR * +([0-9]+)" CUSPARSELT_VERSION_MINOR "${CUSPARSELT_HEADER_CONTENTS}") string(REGEX REPLACE "define CUSPARSELT_VER_MINOR * +([0-9]+)" "\\1" CUSPARSELT_VERSION_MINOR "${CUSPARSELT_VERSION_MINOR}") string(REGEX MATCH "define CUSPARSELT_VER_PATCH * +([0-9]+)" CUSPARSELT_VERSION_PATCH "${CUSPARSELT_HEADER_CONTENTS}") string(REGEX REPLACE "define CUSPARSELT_VER_PATCH * +([0-9]+)" "\\1" CUSPARSELT_VERSION_PATCH "${CUSPARSELT_VERSION_PATCH}") # Assemble cuSPARSELt version. Use minor version since current major version is 0. if(NOT CUSPARSELT_VERSION_MINOR) set(CUSPARSELT_VERSION "?") else() set(CUSPARSELT_VERSION "${CUSPARSELT_VERSION_MAJOR}.${CUSPARSELT_VERSION_MINOR}.${CUSPARSELT_VERSION_PATCH}") endif() endif() mark_as_advanced(CUSPARSELT_ROOT CUSPARSELT_INCLUDE_DIR CUSPARSELT_LIBRARY CUSPARSELT_VERSION) ```
====================================================================================================================================== SOURCE CODE FILE: FindSYCLToolkit.cmake LINES: 1 SIZE: 4.23 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\FindSYCLToolkit.cmake ENCODING: utf-8 ```cmake # This will define the following variables: # SYCL_FOUND : True if the system has the SYCL library. # SYCL_INCLUDE_DIR : Include directories needed to use SYCL. # SYCL_LIBRARY_DIR :The path to the SYCL library. # SYCL_LIBRARY : SYCL library fullname. # SYCL_COMPILER_VERSION : SYCL compiler version. include(FindPackageHandleStandardArgs) set(SYCL_ROOT "") if(DEFINED ENV{SYCL_ROOT}) set(SYCL_ROOT $ENV{SYCL_ROOT}) elseif(DEFINED ENV{CMPLR_ROOT}) set(SYCL_ROOT $ENV{CMPLR_ROOT}) endif() string(COMPARE EQUAL "${SYCL_ROOT}" "" nosyclfound) if(nosyclfound) set(SYCL_FOUND False) set(SYCL_REASON_FAILURE "SYCL library not set!!") set(SYCL_NOT_FOUND_MESSAGE "${SYCL_REASON_FAILURE}") return() endif() # Find SYCL compiler executable. find_program( SYCL_COMPILER NAMES icx PATHS "${SYCL_ROOT}" PATH_SUFFIXES bin bin64 NO_DEFAULT_PATH ) function(parse_sycl_compiler_version version_number) # Execute the SYCL compiler with the --version flag to match the version string. execute_process(COMMAND ${SYCL_COMPILER} --version OUTPUT_VARIABLE SYCL_VERSION_STRING) string(REGEX REPLACE "Intel\\(R\\) (.*) Compiler ([0-9]+\\.[0-9]+\\.[0-9]+) (.*)" "\\2" SYCL_VERSION_STRING_MATCH ${SYCL_VERSION_STRING}) string(REPLACE "." ";" SYCL_VERSION_LIST ${SYCL_VERSION_STRING_MATCH}) # Split the version number list into major, minor, and patch components. list(GET SYCL_VERSION_LIST 0 VERSION_MAJOR) list(GET SYCL_VERSION_LIST 1 VERSION_MINOR) list(GET SYCL_VERSION_LIST 2 VERSION_PATCH) # Calculate the version number in the format XXXXYYZZ, using the formula (major * 10000 + minor * 100 + patch). math(EXPR VERSION_NUMBER_MATCH "${VERSION_MAJOR} * 10000 + ${VERSION_MINOR} * 100 + ${VERSION_PATCH}") set(${version_number} "${VERSION_NUMBER_MATCH}" PARENT_SCOPE) endfunction() parse_sycl_compiler_version(SYCL_COMPILER_VERSION) if(NOT SYCL_COMPILER_VERSION) set(SYCL_FOUND False) set(SYCL_REASON_FAILURE "Cannot parse sycl compiler version to get SYCL_COMPILER_VERSION!") set(SYCL_NOT_FOUND_MESSAGE "${SYCL_REASON_FAILURE}") return() endif() # Find include path from binary. find_file( SYCL_INCLUDE_DIR NAMES include HINTS ${SYCL_ROOT} NO_DEFAULT_PATH ) # Find include/sycl path from include path. find_file( SYCL_INCLUDE_SYCL_DIR NAMES sycl HINTS ${SYCL_ROOT}/include/ NO_DEFAULT_PATH ) # Due to the unrecognized compilation option `-fsycl` in other compiler. list(APPEND SYCL_INCLUDE_DIR ${SYCL_INCLUDE_SYCL_DIR}) # Find library directory from binary. find_file( SYCL_LIBRARY_DIR NAMES lib lib64 HINTS ${SYCL_ROOT} NO_DEFAULT_PATH ) # Define the old version of SYCL toolkit that is compatible with the current version of PyTorch. set(PYTORCH_2_5_SYCL_TOOLKIT_VERSION 20249999) # By default, we use libsycl.so on Linux and sycl.lib on Windows as the SYCL library name. if (SYCL_COMPILER_VERSION VERSION_LESS_EQUAL PYTORCH_2_5_SYCL_TOOLKIT_VERSION) # Don't use if(LINUX) here since this requires cmake>=3.25 and file is installed # and used by other projects. # See: https://cmake.org/cmake/help/v3.25/variable/LINUX.html if(CMAKE_SYSTEM_NAME MATCHES "Linux") set(sycl_lib_suffix "-preview") elseif(CMAKE_SYSTEM_NAME MATCHES "Windows") # On Windows, the SYCL library is named sycl7.lib until PYTORCH_2_5_SYCL_TOOLKIT_VERSION. # sycl.lib is supported in the later version. set(sycl_lib_suffix "7") endif() endif() # Find SYCL library fullname. find_library( SYCL_LIBRARY NAMES "sycl${sycl_lib_suffix}" HINTS ${SYCL_LIBRARY_DIR} NO_DEFAULT_PATH ) # Find OpenCL library fullname, which is a dependency of oneDNN. find_library( OCL_LIBRARY NAMES OpenCL HINTS ${SYCL_LIBRARY_DIR} NO_DEFAULT_PATH ) if((NOT SYCL_LIBRARY) OR (NOT OCL_LIBRARY)) set(SYCL_FOUND False) set(SYCL_REASON_FAILURE "SYCL library is incomplete!!") set(SYCL_NOT_FOUND_MESSAGE "${SYCL_REASON_FAILURE}") return() endif() find_package_handle_standard_args( SYCL FOUND_VAR SYCL_FOUND REQUIRED_VARS SYCL_INCLUDE_DIR SYCL_LIBRARY_DIR SYCL_LIBRARY REASON_FAILURE_MESSAGE "${SYCL_REASON_FAILURE}" VERSION_VAR SYCL_COMPILER_VERSION ) ```
================================================================================================================================================ SOURCE CODE FILE: FindCUDA.cmake LINES: 1 SIZE: 0.52 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\Modules_CUDA_fix\FindCUDA.cmake ENCODING: utf-8 ```cmake # This is a wrapper of the upstream `./upstream/FindCUDA.cmake` that # automatically includes `./upstream/CMakeInitializeConfigs.cmake` before # `./upstream/FindCUDA.cmake`. The `CMakeInitializeConfigs.cmake`, which is # absent in old CMake versions, creates some necessary variables for the later # to run. # See ./README.md for details. set(UPSTREAM_FIND_CUDA_DIR "${CMAKE_CURRENT_LIST_DIR}/upstream/") include("${UPSTREAM_FIND_CUDA_DIR}/CMakeInitializeConfigs.cmake") include("${UPSTREAM_FIND_CUDA_DIR}/FindCUDA.cmake") ```
================================================================================================================================================= SOURCE CODE FILE: FindCUDNN.cmake LINES: 1 SIZE: 3.09 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\Modules_CUDA_fix\FindCUDNN.cmake ENCODING: utf-8 ```cmake # Find the CUDNN libraries # # The following variables are optionally searched for defaults # CUDNN_ROOT: Base directory where CUDNN is found # CUDNN_INCLUDE_DIR: Directory where CUDNN header is searched for # CUDNN_LIBRARY: Directory where CUDNN library is searched for # CUDNN_STATIC: Are we looking for a static library? (default: no) # # The following are set after configuration is done: # CUDNN_FOUND # CUDNN_INCLUDE_PATH # CUDNN_LIBRARY_PATH # include(FindPackageHandleStandardArgs) set(CUDNN_ROOT $ENV{CUDNN_ROOT_DIR} CACHE PATH "Folder containing NVIDIA cuDNN") if (DEFINED $ENV{CUDNN_ROOT_DIR}) message(WARNING "CUDNN_ROOT_DIR is deprecated. Please set CUDNN_ROOT instead.") endif() list(APPEND CUDNN_ROOT $ENV{CUDNN_ROOT_DIR} ${CUDA_TOOLKIT_ROOT_DIR}) # Compatible layer for CMake <3.12. CUDNN_ROOT will be accounted in for searching paths and libraries for CMake >=3.12. list(APPEND CMAKE_PREFIX_PATH ${CUDNN_ROOT}) set(CUDNN_INCLUDE_DIR $ENV{CUDNN_INCLUDE_DIR} CACHE PATH "Folder containing NVIDIA cuDNN header files") find_path(CUDNN_INCLUDE_PATH cudnn.h HINTS ${CUDNN_INCLUDE_DIR} PATH_SUFFIXES cuda/include cuda include) option(CUDNN_STATIC "Look for static CUDNN" OFF) if (CUDNN_STATIC) set(CUDNN_LIBNAME "libcudnn_static.a") else() set(CUDNN_LIBNAME "cudnn") endif() set(CUDNN_LIBRARY $ENV{CUDNN_LIBRARY} CACHE PATH "Path to the cudnn library file (e.g., libcudnn.so)") if (CUDNN_LIBRARY MATCHES ".*cudnn_static.a" AND NOT CUDNN_STATIC) message(WARNING "CUDNN_LIBRARY points to a static library (${CUDNN_LIBRARY}) but CUDNN_STATIC is OFF.") endif() find_library(CUDNN_LIBRARY_PATH ${CUDNN_LIBNAME} PATHS ${CUDNN_LIBRARY} PATH_SUFFIXES lib lib64 cuda/lib cuda/lib64 lib/x64) find_package_handle_standard_args(CUDNN DEFAULT_MSG CUDNN_LIBRARY_PATH CUDNN_INCLUDE_PATH) if(CUDNN_FOUND) # Get cuDNN version if(EXISTS ${CUDNN_INCLUDE_PATH}/cudnn_version.h) file(READ ${CUDNN_INCLUDE_PATH}/cudnn_version.h CUDNN_HEADER_CONTENTS) else() file(READ ${CUDNN_INCLUDE_PATH}/cudnn.h CUDNN_HEADER_CONTENTS) endif() string(REGEX MATCH "define CUDNN_MAJOR * +([0-9]+)" CUDNN_VERSION_MAJOR "${CUDNN_HEADER_CONTENTS}") string(REGEX REPLACE "define CUDNN_MAJOR * +([0-9]+)" "\\1" CUDNN_VERSION_MAJOR "${CUDNN_VERSION_MAJOR}") string(REGEX MATCH "define CUDNN_MINOR * +([0-9]+)" CUDNN_VERSION_MINOR "${CUDNN_HEADER_CONTENTS}") string(REGEX REPLACE "define CUDNN_MINOR * +([0-9]+)" "\\1" CUDNN_VERSION_MINOR "${CUDNN_VERSION_MINOR}") string(REGEX MATCH "define CUDNN_PATCHLEVEL * +([0-9]+)" CUDNN_VERSION_PATCH "${CUDNN_HEADER_CONTENTS}") string(REGEX REPLACE "define CUDNN_PATCHLEVEL * +([0-9]+)" "\\1" CUDNN_VERSION_PATCH "${CUDNN_VERSION_PATCH}") # Assemble cuDNN version if(NOT CUDNN_VERSION_MAJOR) set(CUDNN_VERSION "?") else() set(CUDNN_VERSION "${CUDNN_VERSION_MAJOR}.${CUDNN_VERSION_MINOR}.${CUDNN_VERSION_PATCH}") endif() endif() mark_as_advanced(CUDNN_ROOT CUDNN_INCLUDE_DIR CUDNN_LIBRARY CUDNN_VERSION) ```
======================================================================================================================================================================= SOURCE CODE FILE: CMakeInitializeConfigs.cmake LINES: 1 SIZE: 1.66 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\Modules_CUDA_fix\upstream\CMakeInitializeConfigs.cmake ENCODING: utf-8 ```cmake # Distributed under the OSI-approved BSD 3-Clause License. See accompanying # file Copyright.txt or https://cmake.org/licensing for details. # Present in upstream, but not supported on versions of cmake we need to support # include_guard(GLOBAL) # Initializes `<_PREFIX>_<CONFIG>` variables from the corresponding # `<_PREFIX>_<CONFIG>_INIT`, for the configurations currently used. function(cmake_initialize_per_config_variable _PREFIX _DOCSTRING) string(STRIP "${${_PREFIX}_INIT}" _INIT) set("${_PREFIX}" "${_INIT}" CACHE STRING "${_DOCSTRING} during all build types.") mark_as_advanced("${_PREFIX}") if (NOT CMAKE_NOT_USING_CONFIG_FLAGS) set(_CONFIGS Debug Release MinSizeRel RelWithDebInfo) get_property(_GENERATOR_IS_MULTI_CONFIG GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) if (_GENERATOR_IS_MULTI_CONFIG) list(APPEND _CONFIGS ${CMAKE_CONFIGURATION_TYPES}) else() if (NOT CMAKE_NO_BUILD_TYPE) set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE_INIT}" CACHE STRING "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel ...") endif() list(APPEND _CONFIGS ${CMAKE_BUILD_TYPE}) endif() list(REMOVE_DUPLICATES _CONFIGS) foreach(_BUILD_TYPE IN LISTS _CONFIGS) if (NOT "${_BUILD_TYPE}" STREQUAL "") string(TOUPPER "${_BUILD_TYPE}" _BUILD_TYPE) string(STRIP "${${_PREFIX}_${_BUILD_TYPE}_INIT}" _INIT) set("${_PREFIX}_${_BUILD_TYPE}" "${_INIT}" CACHE STRING "${_DOCSTRING} during ${_BUILD_TYPE} builds.") mark_as_advanced("${_PREFIX}_${_BUILD_TYPE}") endif() endforeach() endif() endfunction() ```
========================================================================================================================================================= SOURCE CODE FILE: FindCUDA.cmake LINES: 5 SIZE: 86.56 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\Modules_CUDA_fix\upstream\FindCUDA.cmake ENCODING: utf-8 ```cmake #.rst: # FindCUDA # -------- # # .. note:: # # The FindCUDA module has been superseded by first-class support # for the CUDA language in CMake. It is no longer necessary to # use this module or call ``find_package(CUDA)``. This module # now exists only for compatibility with projects that have not # been ported. # # Instead, list ``CUDA`` among the languages named in the top-level # call to the :command:`project` command, or call the # :command:`enable_language` command with ``CUDA``. # Then one can add CUDA (``.cu``) sources to programs directly # in calls to :command:`add_library` and :command:`add_executable`. # # Tools for building CUDA C files: libraries and build dependencies. # # This script locates the NVIDIA CUDA C tools. It should work on Linux, # Windows, and macOS and should be reasonably up to date with CUDA C # releases. # # This script makes use of the standard :command:`find_package` arguments of # ``<VERSION>``, ``REQUIRED`` and ``QUIET``. ``CUDA_FOUND`` will report if an # acceptable version of CUDA was found. # # The script will prompt the user to specify ``CUDA_TOOLKIT_ROOT_DIR`` if # the prefix cannot be determined by the location of nvcc in the system # path and ``REQUIRED`` is specified to :command:`find_package`. To use # a different installed version of the toolkit set the environment variable # ``CUDA_BIN_PATH`` before running cmake (e.g. # ``CUDA_BIN_PATH=/usr/local/cuda1.0`` instead of the default # ``/usr/local/cuda``) or set ``CUDA_TOOLKIT_ROOT_DIR`` after configuring. If # you change the value of ``CUDA_TOOLKIT_ROOT_DIR``, various components that # depend on the path will be relocated. # # It might be necessary to set ``CUDA_TOOLKIT_ROOT_DIR`` manually on certain # platforms, or to use a CUDA runtime not installed in the default # location. In newer versions of the toolkit the CUDA library is # included with the graphics driver -- be sure that the driver version # matches what is needed by the CUDA runtime version. # # The following variables affect the behavior of the macros in the # script (in alphebetical order). Note that any of these flags can be # changed multiple times in the same directory before calling # ``CUDA_ADD_EXECUTABLE``, ``CUDA_ADD_LIBRARY``, ``CUDA_COMPILE``, # ``CUDA_COMPILE_PTX``, ``CUDA_COMPILE_FATBIN``, ``CUDA_COMPILE_CUBIN`` # or ``CUDA_WRAP_SRCS``:: # # CUDA_64_BIT_DEVICE_CODE (Default matches host bit size) # -- Set to ON to compile for 64 bit device code, OFF for 32 bit device code. # Note that making this different from the host code when generating object # or C files from CUDA code just won't work, because size_t gets defined by # nvcc in the generated source. If you compile to PTX and then load the # file yourself, you can mix bit sizes between device and host. # # CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE (Default ON) # -- Set to ON if you want the custom build rule to be attached to the source # file in Visual Studio. Turn OFF if you add the same cuda file to multiple # targets. # # This allows the user to build the target from the CUDA file; however, bad # things can happen if the CUDA source file is added to multiple targets. # When performing parallel builds it is possible for the custom build # command to be run more than once and in parallel causing cryptic build # errors. VS runs the rules for every source file in the target, and a # source can have only one rule no matter how many projects it is added to. # When the rule is run from multiple targets race conditions can occur on # the generated file. Eventually everything will get built, but if the user # is unaware of this behavior, there may be confusion. It would be nice if # this script could detect the reuse of source files across multiple targets # and turn the option off for the user, but no good solution could be found. # # CUDA_BUILD_CUBIN (Default OFF) # -- Set to ON to enable and extra compilation pass with the -cubin option in # Device mode. The output is parsed and register, shared memory usage is # printed during build. # # CUDA_BUILD_EMULATION (Default OFF for device mode) # -- Set to ON for Emulation mode. -D_DEVICEEMU is defined for CUDA C files # when CUDA_BUILD_EMULATION is TRUE. # # CUDA_LINK_LIBRARIES_KEYWORD (Default "") # -- The <PRIVATE|PUBLIC|INTERFACE> keyword to use for internal # target_link_libraries calls. The default is to use no keyword which # uses the old "plain" form of target_link_libraries. Note that is matters # because whatever is used inside the FindCUDA module must also be used # outside - the two forms of target_link_libraries cannot be mixed. # # CUDA_GENERATED_OUTPUT_DIR (Default CMAKE_CURRENT_BINARY_DIR) # -- Set to the path you wish to have the generated files placed. If it is # blank output files will be placed in CMAKE_CURRENT_BINARY_DIR. # Intermediate files will always be placed in # CMAKE_CURRENT_BINARY_DIR/CMakeFiles. # # CUDA_HOST_COMPILATION_CPP (Default ON) # -- Set to OFF for C compilation of host code. # # CUDA_HOST_COMPILER (Default CMAKE_C_COMPILER) # -- Set the host compiler to be used by nvcc. Ignored if -ccbin or # --compiler-bindir is already present in the CUDA_NVCC_FLAGS or # CUDA_NVCC_FLAGS_<CONFIG> variables. For Visual Studio targets, # the host compiler is constructed with one or more visual studio macros # such as $(VCInstallDir), that expands out to the path when # the command is run from within VS. # If the CUDAHOSTCXX environment variable is set it will # be used as the default. # # CUDA_NVCC_FLAGS # CUDA_NVCC_FLAGS_<CONFIG> # -- Additional NVCC command line arguments. NOTE: multiple arguments must be # semi-colon delimited (e.g. --compiler-options;-Wall) # # CUDA_PROPAGATE_HOST_FLAGS (Default ON) # -- Set to ON to propagate CMAKE_{C,CXX}_FLAGS and their configuration # dependent counterparts (e.g. CMAKE_C_FLAGS_DEBUG) automatically to the # host compiler through nvcc's -Xcompiler flag. This helps make the # generated host code match the rest of the system better. Sometimes # certain flags give nvcc problems, and this will help you turn the flag # propagation off. This does not affect the flags supplied directly to nvcc # via CUDA_NVCC_FLAGS or through the OPTION flags specified through # CUDA_ADD_LIBRARY, CUDA_ADD_EXECUTABLE, or CUDA_WRAP_SRCS. Flags used for # shared library compilation are not affected by this flag. # # CUDA_PROPAGATE_HOST_FLAGS_BLACKLIST (Default "") # -- A list containing the host flags that should not be propagated when # CUDA_PROPAGATE_HOST_FLAGS is ON. # # CUDA_SEPARABLE_COMPILATION (Default OFF) # -- If set this will enable separable compilation for all CUDA runtime object # files. If used outside of CUDA_ADD_EXECUTABLE and CUDA_ADD_LIBRARY # (e.g. calling CUDA_WRAP_SRCS directly), # CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME and # CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS should be called. # # CUDA_SOURCE_PROPERTY_FORMAT # -- If this source file property is set, it can override the format specified # to CUDA_WRAP_SRCS (OBJ, PTX, CUBIN, or FATBIN). If an input source file # is not a .cu file, setting this file will cause it to be treated as a .cu # file. See documentation for set_source_files_properties on how to set # this property. # # CUDA_USE_STATIC_CUDA_RUNTIME (Default ON) # -- When enabled the static version of the CUDA runtime library will be used # in CUDA_LIBRARIES. If the version of CUDA configured doesn't support # this option, then it will be silently disabled. # # CUDA_VERBOSE_BUILD (Default OFF) # -- Set to ON to see all the commands used when building the CUDA file. When # using a Makefile generator the value defaults to VERBOSE (run make # VERBOSE=1 to see output), although setting CUDA_VERBOSE_BUILD to ON will # always print the output. # # The script creates the following macros (in alphebetical order):: # # CUDA_ADD_CUFFT_TO_TARGET( cuda_target ) # -- Adds the cufft library to the target (can be any target). Handles whether # you are in emulation mode or not. # # CUDA_ADD_CUBLAS_TO_TARGET( cuda_target ) # -- Adds the cublas library to the target (can be any target). Handles # whether you are in emulation mode or not. # # CUDA_ADD_EXECUTABLE( cuda_target file0 file1 ... # [WIN32] [MACOSX_BUNDLE] [EXCLUDE_FROM_ALL] [OPTIONS ...] ) # -- Creates an executable "cuda_target" which is made up of the files # specified. All of the non CUDA C files are compiled using the standard # build rules specified by CMAKE and the cuda files are compiled to object # files using nvcc and the host compiler. In addition CUDA_INCLUDE_DIRS is # added automatically to include_directories(). Some standard CMake target # calls can be used on the target after calling this macro # (e.g. set_target_properties and target_link_libraries), but setting # properties that adjust compilation flags will not affect code compiled by # nvcc. Such flags should be modified before calling CUDA_ADD_EXECUTABLE, # CUDA_ADD_LIBRARY or CUDA_WRAP_SRCS. # # CUDA_ADD_LIBRARY( cuda_target file0 file1 ... # [STATIC | SHARED | MODULE] [EXCLUDE_FROM_ALL] [OPTIONS ...] ) # -- Same as CUDA_ADD_EXECUTABLE except that a library is created. # # CUDA_BUILD_CLEAN_TARGET() # -- Creates a convenience target that deletes all the dependency files # generated. You should make clean after running this target to ensure the # dependency files get regenerated. # # CUDA_COMPILE( generated_files file0 file1 ... [STATIC | SHARED | MODULE] # [OPTIONS ...] ) # -- Returns a list of generated files from the input source files to be used # with ADD_LIBRARY or ADD_EXECUTABLE. # # CUDA_COMPILE_PTX( generated_files file0 file1 ... [OPTIONS ...] ) # -- Returns a list of PTX files generated from the input source files. # # CUDA_COMPILE_FATBIN( generated_files file0 file1 ... [OPTIONS ...] ) # -- Returns a list of FATBIN files generated from the input source files. # # CUDA_COMPILE_CUBIN( generated_files file0 file1 ... [OPTIONS ...] ) # -- Returns a list of CUBIN files generated from the input source files. # # CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME( output_file_var # cuda_target # object_files ) # -- Compute the name of the intermediate link file used for separable # compilation. This file name is typically passed into # CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS. output_file_var is produced # based on cuda_target the list of objects files that need separable # compilation as specified by object_files. If the object_files list is # empty, then output_file_var will be empty. This function is called # automatically for CUDA_ADD_LIBRARY and CUDA_ADD_EXECUTABLE. Note that # this is a function and not a macro. # # CUDA_INCLUDE_DIRECTORIES( path0 path1 ... ) # -- Sets the directories that should be passed to nvcc # (e.g. nvcc -Ipath0 -Ipath1 ... ). These paths usually contain other .cu # files. # # # CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS( output_file_var cuda_target # nvcc_flags object_files) # -- Generates the link object required by separable compilation from the given # object files. This is called automatically for CUDA_ADD_EXECUTABLE and # CUDA_ADD_LIBRARY, but can be called manually when using CUDA_WRAP_SRCS # directly. When called from CUDA_ADD_LIBRARY or CUDA_ADD_EXECUTABLE the # nvcc_flags passed in are the same as the flags passed in via the OPTIONS # argument. The only nvcc flag added automatically is the bitness flag as # specified by CUDA_64_BIT_DEVICE_CODE. Note that this is a function # instead of a macro. # # CUDA_SELECT_NVCC_ARCH_FLAGS(out_variable [target_CUDA_architectures]) # -- Selects GPU arch flags for nvcc based on target_CUDA_architectures # target_CUDA_architectures : Auto | Common | All | LIST(ARCH_AND_PTX ...) # - "Auto" detects local machine GPU compute arch at runtime. # - "Common" and "All" cover common and entire subsets of architectures # ARCH_AND_PTX : NAME | NUM.NUM | NUM.NUM(NUM.NUM) | NUM.NUM+PTX # NAME: Kepler Maxwell Kepler+Tesla Maxwell+Tegra Pascal Volta Turing # NUM: Any number. Only those pairs are currently accepted by NVCC though: # 3.5 3.7 5.0 5.2 5.3 6.0 6.1 6.2 7.0 7.2 7.5 # Returns LIST of flags to be added to CUDA_NVCC_FLAGS in ${out_variable} # Additionally, sets ${out_variable}_readable to the resulting numeric list # Example: # CUDA_SELECT_NVCC_ARCH_FLAGS(ARCH_FLAGS 3.0 3.5+PTX 5.2(5.0) Maxwell) # LIST(APPEND CUDA_NVCC_FLAGS ${ARCH_FLAGS}) # # More info on CUDA architectures: https://en.wikipedia.org/wiki/CUDA # Note that this is a function instead of a macro. # # CUDA_WRAP_SRCS ( cuda_target format generated_files file0 file1 ... # [STATIC | SHARED | MODULE] [OPTIONS ...] ) # -- This is where all the magic happens. CUDA_ADD_EXECUTABLE, # CUDA_ADD_LIBRARY, CUDA_COMPILE, and CUDA_COMPILE_PTX all call this # function under the hood. # # Given the list of files (file0 file1 ... fileN) this macro generates # custom commands that generate either PTX or linkable objects (use "PTX" or # "OBJ" for the format argument to switch). Files that don't end with .cu # or have the HEADER_FILE_ONLY property are ignored. # # The arguments passed in after OPTIONS are extra command line options to # give to nvcc. You can also specify per configuration options by # specifying the name of the configuration followed by the options. General # options must precede configuration specific options. Not all # configurations need to be specified, only the ones provided will be used. # # OPTIONS -DFLAG=2 "-DFLAG_OTHER=space in flag" # DEBUG -g # RELEASE --use_fast_math # RELWITHDEBINFO --use_fast_math;-g # MINSIZEREL --use_fast_math # # For certain configurations (namely VS generating object files with # CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE set to ON), no generated file will # be produced for the given cuda file. This is because when you add the # cuda file to Visual Studio it knows that this file produces an object file # and will link in the resulting object file automatically. # # This script will also generate a separate cmake script that is used at # build time to invoke nvcc. This is for several reasons. # # 1. nvcc can return negative numbers as return values which confuses # Visual Studio into thinking that the command succeeded. The script now # checks the error codes and produces errors when there was a problem. # # 2. nvcc has been known to not delete incomplete results when it # encounters problems. This confuses build systems into thinking the # target was generated when in fact an unusable file exists. The script # now deletes the output files if there was an error. # # 3. By putting all the options that affect the build into a file and then # make the build rule dependent on the file, the output files will be # regenerated when the options change. # # This script also looks at optional arguments STATIC, SHARED, or MODULE to # determine when to target the object compilation for a shared library. # BUILD_SHARED_LIBS is ignored in CUDA_WRAP_SRCS, but it is respected in # CUDA_ADD_LIBRARY. On some systems special flags are added for building # objects intended for shared libraries. A preprocessor macro, # <target_name>_EXPORTS is defined when a shared library compilation is # detected. # # Flags passed into add_definitions with -D or /D are passed along to nvcc. # # # # The script defines the following variables:: # # CUDA_VERSION_MAJOR -- The major version of cuda as reported by nvcc. # CUDA_VERSION_MINOR -- The minor version. # CUDA_VERSION # CUDA_VERSION_STRING -- CUDA_VERSION_MAJOR.CUDA_VERSION_MINOR # CUDA_HAS_FP16 -- Whether a short float (float16,fp16) is supported. # # CUDA_TOOLKIT_ROOT_DIR -- Path to the CUDA Toolkit (defined if not set). # CUDA_SDK_ROOT_DIR -- Path to the CUDA SDK. Use this to find files in the # SDK. This script will not directly support finding # specific libraries or headers, as that isn't # supported by NVIDIA. If you want to change # libraries when the path changes see the # FindCUDA.cmake script for an example of how to clear # these variables. There are also examples of how to # use the CUDA_SDK_ROOT_DIR to locate headers or # libraries, if you so choose (at your own risk). # CUDA_INCLUDE_DIRS -- Include directory for cuda headers. Added automatically # for CUDA_ADD_EXECUTABLE and CUDA_ADD_LIBRARY. # CUDA_LIBRARIES -- Cuda RT library. # CUDA_CUFFT_LIBRARIES -- Device or emulation library for the Cuda FFT # implementation (alternative to: # CUDA_ADD_CUFFT_TO_TARGET macro) # CUDA_CUBLAS_LIBRARIES -- Device or emulation library for the Cuda BLAS # implementation (alternative to: # CUDA_ADD_CUBLAS_TO_TARGET macro). # CUDA_cudart_static_LIBRARY -- Statically linkable cuda runtime library. # Only available for CUDA version 5.5+ # CUDA_cudadevrt_LIBRARY -- Device runtime library. # Required for separable compilation. # CUDA_cupti_LIBRARY -- CUDA Profiling Tools Interface library. # Only available for CUDA version 4.0+. # CUDA_curand_LIBRARY -- CUDA Random Number Generation library. # Only available for CUDA version 3.2+. # CUDA_cusolver_LIBRARY -- CUDA Direct Solver library. # Only available for CUDA version 7.0+. # CUDA_cusparse_LIBRARY -- CUDA Sparse Matrix library. # Only available for CUDA version 3.2+. # CUDA_npp_LIBRARY -- NVIDIA Performance Primitives lib. # Only available for CUDA version 4.0+. # CUDA_nppc_LIBRARY -- NVIDIA Performance Primitives lib (core). # Only available for CUDA version 5.5+. # CUDA_nppi_LIBRARY -- NVIDIA Performance Primitives lib (image processing). # Only available for CUDA version 5.5 - 8.0. # CUDA_nppial_LIBRARY -- NVIDIA Performance Primitives lib (image processing). # Only available for CUDA version 9.0. # CUDA_nppicc_LIBRARY -- NVIDIA Performance Primitives lib (image processing). # Only available for CUDA version 9.0. # CUDA_nppicom_LIBRARY -- NVIDIA Performance Primitives lib (image processing). # Only available for CUDA version 9.0. # CUDA_nppidei_LIBRARY -- NVIDIA Performance Primitives lib (image processing). # Only available for CUDA version 9.0. # CUDA_nppif_LIBRARY -- NVIDIA Performance Primitives lib (image processing). # Only available for CUDA version 9.0. # CUDA_nppig_LIBRARY -- NVIDIA Performance Primitives lib (image processing). # Only available for CUDA version 9.0. # CUDA_nppim_LIBRARY -- NVIDIA Performance Primitives lib (image processing). # Only available for CUDA version 9.0. # CUDA_nppist_LIBRARY -- NVIDIA Performance Primitives lib (image processing). # Only available for CUDA version 9.0. # CUDA_nppisu_LIBRARY -- NVIDIA Performance Primitives lib (image processing). # Only available for CUDA version 9.0. # CUDA_nppitc_LIBRARY -- NVIDIA Performance Primitives lib (image processing). # Only available for CUDA version 9.0. # CUDA_npps_LIBRARY -- NVIDIA Performance Primitives lib (signal processing). # Only available for CUDA version 5.5+. # CUDA_nvcuvenc_LIBRARY -- CUDA Video Encoder library. # Only available for CUDA version 3.2+. # Windows only. # CUDA_nvcuvid_LIBRARY -- CUDA Video Decoder library. # Only available for CUDA version 3.2+. # Windows only. # # James Bigler, NVIDIA Corp (nvidia.com - jbigler) # Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html # # Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. # # Copyright (c) 2007-2009 # Scientific Computing and Imaging Institute, University of Utah # # This code is licensed under the MIT License. See the FindCUDA.cmake script # for the text of the license. # The MIT License # # License for the specific language governing rights and limitations under # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # ############################################################################### # FindCUDA.cmake # This macro helps us find the location of helper files we will need the full path to macro(CUDA_FIND_HELPER_FILE _name _extension) set(_full_name "${_name}.${_extension}") # CMAKE_CURRENT_LIST_FILE contains the full path to the file currently being # processed. Using this variable, we can pull out the current path, and # provide a way to get access to the other files we need local to here. get_filename_component(CMAKE_CURRENT_LIST_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) set(CUDA_${_name} "${CMAKE_CURRENT_LIST_DIR}/FindCUDA/${_full_name}") if(NOT EXISTS "${CUDA_${_name}}") set(error_message "${_full_name} not found in ${CMAKE_CURRENT_LIST_DIR}/FindCUDA") if(CUDA_FIND_REQUIRED) message(FATAL_ERROR "${error_message}") else() if(NOT CUDA_FIND_QUIETLY) message(STATUS "${error_message}") endif() endif() endif() # Set this variable as internal, so the user isn't bugged with it. set(CUDA_${_name} ${CUDA_${_name}} CACHE INTERNAL "Location of ${_full_name}" FORCE) endmacro() ##################################################################### ## CUDA_INCLUDE_NVCC_DEPENDENCIES ## # So we want to try and include the dependency file if it exists. If # it doesn't exist then we need to create an empty one, so we can # include it. # If it does exist, then we need to check to see if all the files it # depends on exist. If they don't then we should clear the dependency # file and regenerate it later. This covers the case where a header # file has disappeared or moved. macro(CUDA_INCLUDE_NVCC_DEPENDENCIES dependency_file) set(CUDA_NVCC_DEPEND) set(CUDA_NVCC_DEPEND_REGENERATE FALSE) # Include the dependency file. Create it first if it doesn't exist . The # INCLUDE puts a dependency that will force CMake to rerun and bring in the # new info when it changes. DO NOT REMOVE THIS (as I did and spent a few # hours figuring out why it didn't work. if(NOT EXISTS ${dependency_file}) file(WRITE ${dependency_file} "#FindCUDA.cmake generated file. Do not edit.\n") endif() # Always include this file to force CMake to run again next # invocation and rebuild the dependencies. #message("including dependency_file = ${dependency_file}") include(${dependency_file}) # Now we need to verify the existence of all the included files # here. If they aren't there we need to just blank this variable and # make the file regenerate again. # if(DEFINED CUDA_NVCC_DEPEND) # message("CUDA_NVCC_DEPEND set") # else() # message("CUDA_NVCC_DEPEND NOT set") # endif() if(CUDA_NVCC_DEPEND) #message("CUDA_NVCC_DEPEND found") foreach(f ${CUDA_NVCC_DEPEND}) # message("searching for ${f}") if(NOT EXISTS ${f}) #message("file ${f} not found") set(CUDA_NVCC_DEPEND_REGENERATE TRUE) endif() endforeach() else() #message("CUDA_NVCC_DEPEND false") # No dependencies, so regenerate the file. set(CUDA_NVCC_DEPEND_REGENERATE TRUE) endif() #message("CUDA_NVCC_DEPEND_REGENERATE = ${CUDA_NVCC_DEPEND_REGENERATE}") # No incoming dependencies, so we need to generate them. Make the # output depend on the dependency file itself, which should cause the # rule to re-run. if(CUDA_NVCC_DEPEND_REGENERATE) set(CUDA_NVCC_DEPEND ${dependency_file}) #message("Generating an empty dependency_file: ${dependency_file}") file(WRITE ${dependency_file} "#FindCUDA.cmake generated file. Do not edit.\n") endif() endmacro() ############################################################################### ############################################################################### # Setup variables' defaults ############################################################################### ############################################################################### # Allow the user to specify if the device code is supposed to be 32 or 64 bit. if(CMAKE_SIZEOF_VOID_P EQUAL 8) set(CUDA_64_BIT_DEVICE_CODE_DEFAULT ON) else() set(CUDA_64_BIT_DEVICE_CODE_DEFAULT OFF) endif() option(CUDA_64_BIT_DEVICE_CODE "Compile device code in 64 bit mode" ${CUDA_64_BIT_DEVICE_CODE_DEFAULT}) # Attach the build rule to the source file in VS. This option option(CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE "Attach the build rule to the CUDA source file. Enable only when the CUDA source file is added to at most one target." ON) # Prints out extra information about the cuda file during compilation option(CUDA_BUILD_CUBIN "Generate and parse .cubin files in Device mode." OFF) # Set whether we are using emulation or device mode. option(CUDA_BUILD_EMULATION "Build in Emulation mode" OFF) # Where to put the generated output. set(CUDA_GENERATED_OUTPUT_DIR "" CACHE PATH "Directory to put all the output files. If blank it will default to the CMAKE_CURRENT_BINARY_DIR") # Parse HOST_COMPILATION mode. option(CUDA_HOST_COMPILATION_CPP "Generated file extension" ON) # Extra user settable flags cmake_initialize_per_config_variable(CUDA_NVCC_FLAGS "Semi-colon delimit multiple arguments.") if(DEFINED ENV{CUDAHOSTCXX}) set(CUDA_HOST_COMPILER "$ENV{CUDAHOSTCXX}" CACHE FILEPATH "Host side compiler used by NVCC") elseif(CMAKE_GENERATOR MATCHES "Visual Studio") set(_CUDA_MSVC_HOST_COMPILER "$(VCInstallDir)Tools/MSVC/$(VCToolsVersion)/bin/Host$(Platform)/$(PlatformTarget)") if(MSVC_VERSION LESS 1910) set(_CUDA_MSVC_HOST_COMPILER "$(VCInstallDir)bin") endif() set(CUDA_HOST_COMPILER "${_CUDA_MSVC_HOST_COMPILER}" CACHE FILEPATH "Host side compiler used by NVCC") else() if(APPLE AND "${CMAKE_C_COMPILER_ID}" MATCHES "Clang" AND "${CMAKE_C_COMPILER}" MATCHES "/cc$") # Using cc which is symlink to clang may let NVCC think it is GCC and issue # unhandled -dumpspecs option to clang. Also in case neither # CMAKE_C_COMPILER is defined (project does not use C language) nor # CUDA_HOST_COMPILER is specified manually we should skip -ccbin and let # nvcc use its own default C compiler. # Only care about this on APPLE with clang to avoid # following symlinks to things like ccache if(DEFINED CMAKE_C_COMPILER AND NOT DEFINED CUDA_HOST_COMPILER) get_filename_component(c_compiler_realpath "${CMAKE_C_COMPILER}" REALPATH) # if the real path does not end up being clang then # go back to using CMAKE_C_COMPILER if(NOT "${c_compiler_realpath}" MATCHES "/clang$") set(c_compiler_realpath "${CMAKE_C_COMPILER}") endif() else() set(c_compiler_realpath "") endif() set(CUDA_HOST_COMPILER "${c_compiler_realpath}" CACHE FILEPATH "Host side compiler used by NVCC") elseif(MSVC AND "${CMAKE_C_COMPILER}" MATCHES "clcache|sccache") # NVCC does not think it will work if it is passed clcache.exe or sccache.exe # as the host compiler, which means that builds with CC=cl.exe won't work. # Best to just feed it whatever the actual cl.exe is as the host compiler. set(CUDA_HOST_COMPILER "cl.exe" CACHE FILEPATH "Host side compiler used by NVCC") else() set(CUDA_HOST_COMPILER "${CMAKE_C_COMPILER}" CACHE FILEPATH "Host side compiler used by NVCC") endif() endif() # Propagate the host flags to the host compiler via -Xcompiler option(CUDA_PROPAGATE_HOST_FLAGS "Propagate C/CXX_FLAGS and friends to the host compiler via -Xcompile" ON) # Blacklisted flags to prevent propagation set(CUDA_PROPAGATE_HOST_FLAGS_BLACKLIST "" CACHE STRING "Blacklisted flags to prevent propagation") # Enable CUDA_SEPARABLE_COMPILATION option(CUDA_SEPARABLE_COMPILATION "Compile CUDA objects with separable compilation enabled. Requires CUDA 5.0+" OFF) # Specifies whether the commands used when compiling the .cu file will be printed out. option(CUDA_VERBOSE_BUILD "Print out the commands run while compiling the CUDA source file. With the Makefile generator this defaults to VERBOSE variable specified on the command line, but can be forced on with this option." OFF) mark_as_advanced( CUDA_64_BIT_DEVICE_CODE CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE CUDA_GENERATED_OUTPUT_DIR CUDA_HOST_COMPILATION_CPP CUDA_NVCC_FLAGS CUDA_PROPAGATE_HOST_FLAGS CUDA_PROPAGATE_HOST_FLAGS_BLACKLIST CUDA_BUILD_CUBIN CUDA_BUILD_EMULATION CUDA_VERBOSE_BUILD CUDA_SEPARABLE_COMPILATION ) # Single config generators like Makefiles or Ninja don't usually have # CMAKE_CONFIGURATION_TYPES defined (but note that it can be defined if set by # projects or developers). Even CMAKE_BUILD_TYPE might not be defined for # single config generators (and should not be defined for multi-config # generators). To ensure we get a complete superset of all possible # configurations, we combine CMAKE_CONFIGURATION_TYPES, CMAKE_BUILD_TYPE and # all of the standard configurations, then weed out duplicates with # list(REMOVE_DUPLICATES). Looping over the unique set then ensures we have # each configuration-specific set of nvcc flags defined and marked as advanced. set(CUDA_configuration_types ${CMAKE_CONFIGURATION_TYPES} ${CMAKE_BUILD_TYPE} Debug MinSizeRel Release RelWithDebInfo) list(REMOVE_DUPLICATES CUDA_configuration_types) ############################################################################### ############################################################################### # Locate CUDA, Set Build Type, etc. ############################################################################### ############################################################################### macro(cuda_unset_include_and_libraries) unset(CUDA_TOOLKIT_INCLUDE CACHE) unset(CUDA_CUDART_LIBRARY CACHE) unset(CUDA_CUDA_LIBRARY CACHE) # Make sure you run this before you unset CUDA_VERSION. unset(CUDA_cudart_static_LIBRARY CACHE) unset(CUDA_cudadevrt_LIBRARY CACHE) unset(CUDA_cublas_LIBRARY CACHE) unset(CUDA_cublas_device_LIBRARY CACHE) unset(CUDA_cublasemu_LIBRARY CACHE) unset(CUDA_cublasLt_LIBRARY CACHE) unset(CUDA_cufft_LIBRARY CACHE) unset(CUDA_cufftemu_LIBRARY CACHE) unset(CUDA_cupti_LIBRARY CACHE) unset(CUDA_curand_LIBRARY CACHE) unset(CUDA_cusolver_LIBRARY CACHE) unset(CUDA_cusparse_LIBRARY CACHE) unset(CUDA_npp_LIBRARY CACHE) unset(CUDA_nppc_LIBRARY CACHE) unset(CUDA_nppi_LIBRARY CACHE) unset(CUDA_npps_LIBRARY CACHE) unset(CUDA_nvcuvenc_LIBRARY CACHE) unset(CUDA_nvcuvid_LIBRARY CACHE) unset(CUDA_GPU_DETECT_OUTPUT CACHE) endmacro() # Check to see if the CUDA_TOOLKIT_ROOT_DIR and CUDA_SDK_ROOT_DIR have changed, # if they have then clear the cache variables, so that will be detected again. if(NOT "${CUDA_TOOLKIT_ROOT_DIR}" STREQUAL "${CUDA_TOOLKIT_ROOT_DIR_INTERNAL}") unset(CUDA_TOOLKIT_TARGET_DIR CACHE) unset(CUDA_NVCC_EXECUTABLE CACHE) cuda_unset_include_and_libraries() unset(CUDA_VERSION CACHE) endif() if(NOT "${CUDA_TOOLKIT_TARGET_DIR}" STREQUAL "${CUDA_TOOLKIT_TARGET_DIR_INTERNAL}") cuda_unset_include_and_libraries() endif() # # End of unset() # # # Start looking for things # # Search for the cuda distribution. if(NOT CUDA_TOOLKIT_ROOT_DIR AND NOT CMAKE_CROSSCOMPILING) # Search in the CUDA_BIN_PATH first. find_program(CUDA_TOOLKIT_ROOT_DIR_NVCC NAMES nvcc nvcc.exe PATHS ENV CUDA_TOOLKIT_ROOT ENV CUDA_PATH ENV CUDA_BIN_PATH PATH_SUFFIXES bin bin64 DOC "Toolkit location." NO_DEFAULT_PATH ) # Now search default paths find_program(CUDA_TOOLKIT_ROOT_DIR_NVCC NAMES nvcc nvcc.exe PATHS /opt/cuda/bin PATH_SUFFIXES cuda/bin DOC "Toolkit location." ) if (CUDA_TOOLKIT_ROOT_DIR_NVCC) get_filename_component(CUDA_TOOLKIT_ROOT_DIR_NVCC_PAR "${CUDA_TOOLKIT_ROOT_DIR_NVCC}" DIRECTORY) get_filename_component(CUDA_TOOLKIT_ROOT_DIR "${CUDA_TOOLKIT_ROOT_DIR_NVCC_PAR}" DIRECTORY CACHE) string(REGEX REPLACE "[/\\\\]?bin[64]*[/\\\\]?$" "" CUDA_TOOLKIT_ROOT_DIR ${CUDA_TOOLKIT_ROOT_DIR}) # We need to force this back into the cache. set(CUDA_TOOLKIT_ROOT_DIR ${CUDA_TOOLKIT_ROOT_DIR} CACHE PATH "Toolkit location." FORCE) set(CUDA_TOOLKIT_TARGET_DIR ${CUDA_TOOLKIT_ROOT_DIR}) endif() unset(CUDA_TOOLKIT_ROOT_DIR_NVCC CACHE) if (NOT EXISTS ${CUDA_TOOLKIT_ROOT_DIR}) if(CUDA_FIND_REQUIRED) message(FATAL_ERROR "Specify CUDA_TOOLKIT_ROOT_DIR") elseif(NOT CUDA_FIND_QUIETLY) message("CUDA_TOOLKIT_ROOT_DIR not found or specified") endif() endif () endif () if(CMAKE_CROSSCOMPILING) SET (CUDA_TOOLKIT_ROOT $ENV{CUDA_TOOLKIT_ROOT}) if(CMAKE_SYSTEM_PROCESSOR STREQUAL "armv7-a") # Support for NVPACK set (CUDA_TOOLKIT_TARGET_NAMES "armv7-linux-androideabi") elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "arm") # Support for arm cross compilation set(CUDA_TOOLKIT_TARGET_NAMES "armv7-linux-gnueabihf") elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") # Support for aarch64 cross compilation if (ANDROID_ARCH_NAME STREQUAL "arm64") set(CUDA_TOOLKIT_TARGET_NAMES "aarch64-linux-androideabi") else() set(CUDA_TOOLKIT_TARGET_NAMES "aarch64-linux" "sbsa-linux") endif (ANDROID_ARCH_NAME STREQUAL "arm64") endif() foreach(CUDA_TOOLKIT_TARGET_NAME IN LISTS CUDA_TOOLKIT_TARGET_NAMES) if (EXISTS "${CUDA_TOOLKIT_ROOT}/targets/${CUDA_TOOLKIT_TARGET_NAME}") set(CUDA_TOOLKIT_TARGET_DIR "${CUDA_TOOLKIT_ROOT}/targets/${CUDA_TOOLKIT_TARGET_NAME}" CACHE PATH "CUDA Toolkit target location.") SET (CUDA_TOOLKIT_ROOT_DIR ${CUDA_TOOLKIT_ROOT} CACHE PATH "Toolkit location." FORCE) mark_as_advanced(CUDA_TOOLKIT_TARGET_DIR) break() endif() endforeach() # add known CUDA targetr root path to the set of directories we search for programs, libraries and headers set( CMAKE_FIND_ROOT_PATH "${CUDA_TOOLKIT_TARGET_DIR};${CMAKE_FIND_ROOT_PATH}") macro( cuda_find_host_program ) if (COMMAND find_host_program) find_host_program( ${ARGN} ) else() find_program( ${ARGN} ) endif() endmacro() else() # for non-cross-compile, find_host_program == find_program and CUDA_TOOLKIT_TARGET_DIR == CUDA_TOOLKIT_ROOT_DIR macro( cuda_find_host_program ) find_program( ${ARGN} ) endmacro() SET (CUDA_TOOLKIT_TARGET_DIR ${CUDA_TOOLKIT_ROOT_DIR}) endif() # CUDA_NVCC_EXECUTABLE if(DEFINED ENV{CUDA_NVCC_EXECUTABLE}) set(CUDA_NVCC_EXECUTABLE "$ENV{CUDA_NVCC_EXECUTABLE}" CACHE FILEPATH "The CUDA compiler") else() cuda_find_host_program(CUDA_NVCC_EXECUTABLE NAMES nvcc PATHS "${CUDA_TOOLKIT_ROOT_DIR}" ENV CUDA_PATH ENV CUDA_BIN_PATH PATH_SUFFIXES bin bin64 NO_DEFAULT_PATH ) # Search default search paths, after we search our own set of paths. cuda_find_host_program(CUDA_NVCC_EXECUTABLE nvcc) endif() if(CUDA_NVCC_EXECUTABLE AND NOT CUDA_VERSION) # Compute the version. execute_process(COMMAND ${CUDA_NVCC_EXECUTABLE} "--version" OUTPUT_VARIABLE NVCC_OUT RESULT_VARIABLE NVCC_RC) if(NOT (${NVCC_RC} EQUAL 0)) message(WARNING "Failed to execute '${CUDA_NVCC_EXECUTABLE} --version'") set(CUDA_FOUND FALSE) return() endif() string(REGEX REPLACE ".*release ([0-9]+)\\.([0-9]+).*" "\\1" CUDA_VERSION_MAJOR ${NVCC_OUT}) string(REGEX REPLACE ".*release ([0-9]+)\\.([0-9]+).*" "\\2" CUDA_VERSION_MINOR ${NVCC_OUT}) set(CUDA_VERSION "${CUDA_VERSION_MAJOR}.${CUDA_VERSION_MINOR}" CACHE STRING "Version of CUDA as computed from nvcc.") mark_as_advanced(CUDA_VERSION) else() # Need to set these based off of the cached value string(REGEX REPLACE "([0-9]+)\\.([0-9]+).*" "\\1" CUDA_VERSION_MAJOR "${CUDA_VERSION}") string(REGEX REPLACE "([0-9]+)\\.([0-9]+).*" "\\2" CUDA_VERSION_MINOR "${CUDA_VERSION}") endif() # Always set this convenience variable set(CUDA_VERSION_STRING "${CUDA_VERSION}") # CUDA_TOOLKIT_INCLUDE find_path(CUDA_TOOLKIT_INCLUDE device_functions.h # Header included in toolkit PATHS ${CUDA_TOOLKIT_TARGET_DIR} ENV CUDA_PATH ENV CUDA_INC_PATH PATH_SUFFIXES include NO_DEFAULT_PATH ) # Search default search paths, after we search our own set of paths. find_path(CUDA_TOOLKIT_INCLUDE device_functions.h) mark_as_advanced(CUDA_TOOLKIT_INCLUDE) set(CUDA_HAS_FP16 TRUE) # Set the user list of include dir to nothing to initialize it. set (CUDA_NVCC_INCLUDE_DIRS_USER "") set (CUDA_INCLUDE_DIRS ${CUDA_TOOLKIT_INCLUDE}) macro(cuda_find_library_local_first_with_path_ext _var _names _doc _path_ext ) if(CMAKE_SIZEOF_VOID_P EQUAL 8) # CUDA 3.2+ on Windows moved the library directories, so we need the new # and old paths. set(_cuda_64bit_lib_dir "${_path_ext}lib/x64" "${_path_ext}lib64" "${_path_ext}libx64" ) endif() # CUDA 3.2+ on Windows moved the library directories, so we need to new # (lib/Win32) and the old path (lib). find_library(${_var} NAMES ${_names} PATHS "${CUDA_TOOLKIT_TARGET_DIR}" ENV CUDA_PATH ENV CUDA_LIB_PATH PATH_SUFFIXES ${_cuda_64bit_lib_dir} "${_path_ext}lib/Win32" "${_path_ext}lib" "${_path_ext}libWin32" DOC ${_doc} NO_DEFAULT_PATH ) if (NOT CMAKE_CROSSCOMPILING) # Search default search paths, after we search our own set of paths. find_library(${_var} NAMES ${_names} PATHS "/usr/lib/nvidia-current" DOC ${_doc} ) endif() endmacro() macro(cuda_find_library_local_first _var _names _doc) cuda_find_library_local_first_with_path_ext( "${_var}" "${_names}" "${_doc}" "" ) endmacro() macro(find_library_local_first _var _names _doc ) cuda_find_library_local_first( "${_var}" "${_names}" "${_doc}" "" ) endmacro() # CUDA_LIBRARIES cuda_find_library_local_first(CUDA_CUDART_LIBRARY cudart "\"cudart\" library") cuda_find_library_local_first(CUDA_cudart_static_LIBRARY cudart_static "static CUDA runtime library") mark_as_advanced(CUDA_cudart_static_LIBRARY) if(CUDA_cudart_static_LIBRARY) # If static cudart available, use it by default, but provide a user-visible option to disable it. option(CUDA_USE_STATIC_CUDA_RUNTIME "Use the static version of the CUDA runtime library if available" ON) else() # If not available, silently disable the option. set(CUDA_USE_STATIC_CUDA_RUNTIME OFF CACHE INTERNAL "") endif() if(CUDA_USE_STATIC_CUDA_RUNTIME) set(CUDA_CUDART_LIBRARY_VAR CUDA_cudart_static_LIBRARY) else() set(CUDA_CUDART_LIBRARY_VAR CUDA_CUDART_LIBRARY) endif() cuda_find_library_local_first(CUDA_cudadevrt_LIBRARY cudadevrt "\"cudadevrt\" library") mark_as_advanced(CUDA_cudadevrt_LIBRARY) if(CUDA_USE_STATIC_CUDA_RUNTIME) if(UNIX) # Check for the dependent libraries. Here we look for pthreads. if (DEFINED CMAKE_THREAD_PREFER_PTHREAD) set(_cuda_cmake_thread_prefer_pthread ${CMAKE_THREAD_PREFER_PTHREAD}) endif() set(CMAKE_THREAD_PREFER_PTHREAD 1) # Many of the FindXYZ CMake comes with makes use of try_compile with int main(){return 0;} # as the source file. Unfortunately this causes a warning with -Wstrict-prototypes and # -Werror causes the try_compile to fail. We will just temporarily disable other flags # when doing the find_package command here. set(_cuda_cmake_c_flags ${CMAKE_C_FLAGS}) set(CMAKE_C_FLAGS "-fPIC") find_package(Threads REQUIRED) set(CMAKE_C_FLAGS ${_cuda_cmake_c_flags}) if (DEFINED _cuda_cmake_thread_prefer_pthread) set(CMAKE_THREAD_PREFER_PTHREAD ${_cuda_cmake_thread_prefer_pthread}) unset(_cuda_cmake_thread_prefer_pthread) else() unset(CMAKE_THREAD_PREFER_PTHREAD) endif() if(NOT APPLE) #On Linux, you must link against librt when using the static cuda runtime. find_library(CUDA_rt_LIBRARY rt) if (NOT CUDA_rt_LIBRARY) message(WARNING "Expecting to find librt for libcudart_static, but didn't find it.") endif() endif() endif() endif() cuda_find_library_local_first_with_path_ext(CUDA_cupti_LIBRARY cupti "\"cupti\" library" "extras/CUPTI/") mark_as_advanced(CUDA_cupti_LIBRARY) # Set the CUDA_LIBRARIES variable. This is the set of stuff to link against if you are # using the CUDA runtime. For the dynamic version of the runtime, most of the # dependencies are brough in, but for the static version there are additional libraries # and linker commands needed. # Initialize to empty set(CUDA_LIBRARIES) # If we are using emulation mode and we found the cudartemu library then use # that one instead of cudart. if(CUDA_BUILD_EMULATION AND CUDA_CUDARTEMU_LIBRARY) list(APPEND CUDA_LIBRARIES ${CUDA_CUDARTEMU_LIBRARY}) elseif(CUDA_USE_STATIC_CUDA_RUNTIME AND CUDA_cudart_static_LIBRARY) list(APPEND CUDA_LIBRARIES ${CUDA_cudart_static_LIBRARY} ${CMAKE_THREAD_LIBS_INIT} ${CMAKE_DL_LIBS}) if (CUDA_rt_LIBRARY) list(APPEND CUDA_LIBRARIES ${CUDA_rt_LIBRARY}) endif() if(APPLE) # We need to add the default path to the driver (libcuda.dylib) as an rpath, so that # the static cuda runtime can find it at runtime. list(APPEND CUDA_LIBRARIES -Wl,-rpath,/usr/local/cuda/lib) endif() else() list(APPEND CUDA_LIBRARIES ${CUDA_CUDART_LIBRARY}) endif() # 1.1 toolkit on linux doesn't appear to have a separate library on # some platforms. cuda_find_library_local_first(CUDA_CUDA_LIBRARY cuda "\"cuda\" library (older versions only).") mark_as_advanced( CUDA_CUDA_LIBRARY CUDA_CUDART_LIBRARY ) ####################### # Look for some of the toolkit helper libraries macro(FIND_CUDA_HELPER_LIBS _name) cuda_find_library_local_first(CUDA_${_name}_LIBRARY ${_name} "\"${_name}\" library") mark_as_advanced(CUDA_${_name}_LIBRARY) endmacro() if(CUDA_BUILD_EMULATION) message(FATAL_ERROR "CUDA_BUILD_EMULATION is not supported in version 3.1 and onwards. You must disable it to proceed. You have version ${CUDA_VERSION}.") endif() find_cuda_helper_libs(cufft) find_cuda_helper_libs(cublas) find_cuda_helper_libs(cublasLt) # cusparse showed up in version 3.2 find_cuda_helper_libs(cusparse) find_cuda_helper_libs(curand) if (WIN32) find_cuda_helper_libs(nvcuvenc) find_cuda_helper_libs(nvcuvid) endif() # In CUDA 9.0 NPP was nppi was removed find_cuda_helper_libs(nppc) find_cuda_helper_libs(nppial) find_cuda_helper_libs(nppicc) find_cuda_helper_libs(nppicom) find_cuda_helper_libs(nppidei) find_cuda_helper_libs(nppif) find_cuda_helper_libs(nppig) find_cuda_helper_libs(nppim) find_cuda_helper_libs(nppist) find_cuda_helper_libs(nppisu) find_cuda_helper_libs(nppitc) find_cuda_helper_libs(npps) set(CUDA_npp_LIBRARY "${CUDA_nppc_LIBRARY};${CUDA_nppial_LIBRARY};${CUDA_nppicc_LIBRARY};${CUDA_nppicom_LIBRARY};${CUDA_nppidei_LIBRARY};${CUDA_nppif_LIBRARY};${CUDA_nppig_LIBRARY};${CUDA_nppim_LIBRARY};${CUDA_nppist_LIBRARY};${CUDA_nppisu_LIBRARY};${CUDA_nppitc_LIBRARY};${CUDA_npps_LIBRARY}") # cusolver showed up in version 7.0 find_cuda_helper_libs(cusolver) if (CUDA_BUILD_EMULATION) set(CUDA_CUFFT_LIBRARIES ${CUDA_cufftemu_LIBRARY}) set(CUDA_CUBLAS_LIBRARIES ${CUDA_cublasemu_LIBRARY}) else() set(CUDA_CUFFT_LIBRARIES ${CUDA_cufft_LIBRARY}) set(CUDA_CUBLAS_LIBRARIES ${CUDA_cublas_LIBRARY} ${CUDA_cublas_device_LIBRARY} ${CUDA_cublasLt_LIBRARY}) endif() ######################## # Look for the SDK stuff. As of CUDA 3.0 NVSDKCUDA_ROOT has been replaced with # NVSDKCOMPUTE_ROOT with the old CUDA C contents moved into the C subdirectory find_path(CUDA_SDK_ROOT_DIR common/inc/cutil.h HINTS "$ENV{NVSDKCOMPUTE_ROOT}/C" ENV NVSDKCUDA_ROOT "[HKEY_LOCAL_MACHINE\\SOFTWARE\\NVIDIA Corporation\\Installed Products\\NVIDIA SDK 10\\Compute;InstallDir]" PATHS "/Developer/GPU\ Computing/C" ) # Keep the CUDA_SDK_ROOT_DIR first in order to be able to override the # environment variables. set(CUDA_SDK_SEARCH_PATH "${CUDA_SDK_ROOT_DIR}" "${CUDA_TOOLKIT_ROOT_DIR}/local/NVSDK0.2" "${CUDA_TOOLKIT_ROOT_DIR}/NVSDK0.2" "${CUDA_TOOLKIT_ROOT_DIR}/NV_CUDA_SDK" "$ENV{HOME}/NVIDIA_CUDA_SDK" "$ENV{HOME}/NVIDIA_CUDA_SDK_MACOSX" "/Developer/CUDA" ) # Example of how to find an include file from the CUDA_SDK_ROOT_DIR # find_path(CUDA_CUT_INCLUDE_DIR # cutil.h # PATHS ${CUDA_SDK_SEARCH_PATH} # PATH_SUFFIXES "common/inc" # DOC "Location of cutil.h" # NO_DEFAULT_PATH # ) # # Now search system paths # find_path(CUDA_CUT_INCLUDE_DIR cutil.h DOC "Location of cutil.h") # mark_as_advanced(CUDA_CUT_INCLUDE_DIR) # Example of how to find a library in the CUDA_SDK_ROOT_DIR # # cutil library is called cutil64 for 64 bit builds on windows. We don't want # # to get these confused, so we are setting the name based on the word size of # # the build. # if(CMAKE_SIZEOF_VOID_P EQUAL 8) # set(cuda_cutil_name cutil64) # else() # set(cuda_cutil_name cutil32) # endif() # find_library(CUDA_CUT_LIBRARY # NAMES cutil ${cuda_cutil_name} # PATHS ${CUDA_SDK_SEARCH_PATH} # # The new version of the sdk shows up in common/lib, but the old one is in lib # PATH_SUFFIXES "common/lib" "lib" # DOC "Location of cutil library" # NO_DEFAULT_PATH # ) # # Now search system paths # find_library(CUDA_CUT_LIBRARY NAMES cutil ${cuda_cutil_name} DOC "Location of cutil library") # mark_as_advanced(CUDA_CUT_LIBRARY) # set(CUDA_CUT_LIBRARIES ${CUDA_CUT_LIBRARY}) ############################# # Check for required components set(CUDA_FOUND TRUE) set(CUDA_TOOLKIT_ROOT_DIR_INTERNAL "${CUDA_TOOLKIT_ROOT_DIR}" CACHE INTERNAL "This is the value of the last time CUDA_TOOLKIT_ROOT_DIR was set successfully." FORCE) set(CUDA_TOOLKIT_TARGET_DIR_INTERNAL "${CUDA_TOOLKIT_TARGET_DIR}" CACHE INTERNAL "This is the value of the last time CUDA_TOOLKIT_TARGET_DIR was set successfully." FORCE) set(CUDA_SDK_ROOT_DIR_INTERNAL "${CUDA_SDK_ROOT_DIR}" CACHE INTERNAL "This is the value of the last time CUDA_SDK_ROOT_DIR was set successfully." FORCE) include(${CMAKE_CURRENT_LIST_DIR}/FindPackageHandleStandardArgs.cmake) find_package_handle_standard_args(CUDA REQUIRED_VARS CUDA_TOOLKIT_ROOT_DIR CUDA_NVCC_EXECUTABLE CUDA_INCLUDE_DIRS ${CUDA_CUDART_LIBRARY_VAR} VERSION_VAR CUDA_VERSION ) ############################################################################### ############################################################################### # Macros ############################################################################### ############################################################################### ############################################################################### # Add include directories to pass to the nvcc command. macro(CUDA_INCLUDE_DIRECTORIES) foreach(dir ${ARGN}) list(APPEND CUDA_NVCC_INCLUDE_DIRS_USER ${dir}) endforeach() endmacro() ############################################################################## cuda_find_helper_file(parse_cubin cmake) cuda_find_helper_file(make2cmake cmake) cuda_find_helper_file(run_nvcc cmake) include("${CMAKE_CURRENT_LIST_DIR}/FindCUDA/select_compute_arch.cmake") ############################################################################## # Separate the OPTIONS out from the sources # macro(CUDA_GET_SOURCES_AND_OPTIONS _sources _cmake_options _options) set( ${_sources} ) set( ${_cmake_options} ) set( ${_options} ) set( _found_options FALSE ) foreach(arg ${ARGN}) if("x${arg}" STREQUAL "xOPTIONS") set( _found_options TRUE ) elseif( "x${arg}" STREQUAL "xWIN32" OR "x${arg}" STREQUAL "xMACOSX_BUNDLE" OR "x${arg}" STREQUAL "xEXCLUDE_FROM_ALL" OR "x${arg}" STREQUAL "xSTATIC" OR "x${arg}" STREQUAL "xSHARED" OR "x${arg}" STREQUAL "xMODULE" ) list(APPEND ${_cmake_options} ${arg}) else() if ( _found_options ) list(APPEND ${_options} ${arg}) else() # Assume this is a file list(APPEND ${_sources} ${arg}) endif() endif() endforeach() endmacro() ############################################################################## # Parse the OPTIONS from ARGN and set the variables prefixed by _option_prefix # macro(CUDA_PARSE_NVCC_OPTIONS _option_prefix) set( _found_config ) foreach(arg ${ARGN}) # Determine if we are dealing with a perconfiguration flag foreach(config ${CUDA_configuration_types}) string(TOUPPER ${config} config_upper) if (arg STREQUAL "${config_upper}") set( _found_config _${arg}) # Set arg to nothing to keep it from being processed further set( arg ) endif() endforeach() if ( arg ) list(APPEND ${_option_prefix}${_found_config} "${arg}") endif() endforeach() endmacro() ############################################################################## # Helper to add the include directory for CUDA only once function(CUDA_ADD_CUDA_INCLUDE_ONCE) get_directory_property(_include_directories INCLUDE_DIRECTORIES) set(_add TRUE) if(_include_directories) foreach(dir ${_include_directories}) if("${dir}" STREQUAL "${CUDA_INCLUDE_DIRS}") set(_add FALSE) endif() endforeach() endif() if(_add) include_directories(${CUDA_INCLUDE_DIRS}) endif() endfunction() function(CUDA_BUILD_SHARED_LIBRARY shared_flag) set(cmake_args ${ARGN}) # If SHARED, MODULE, or STATIC aren't already in the list of arguments, then # add SHARED or STATIC based on the value of BUILD_SHARED_LIBS. list(FIND cmake_args SHARED _cuda_found_SHARED) list(FIND cmake_args MODULE _cuda_found_MODULE) list(FIND cmake_args STATIC _cuda_found_STATIC) if( _cuda_found_SHARED GREATER -1 OR _cuda_found_MODULE GREATER -1 OR _cuda_found_STATIC GREATER -1) set(_cuda_build_shared_libs) else() if (BUILD_SHARED_LIBS) set(_cuda_build_shared_libs SHARED) else() set(_cuda_build_shared_libs STATIC) endif() endif() set(${shared_flag} ${_cuda_build_shared_libs} PARENT_SCOPE) endfunction() ############################################################################## # Helper to avoid clashes of files with the same basename but different paths. # This doesn't attempt to do exactly what CMake internals do, which is to only # add this path when there is a conflict, since by the time a second collision # in names is detected it's already too late to fix the first one. For # consistency sake the relative path will be added to all files. function(CUDA_COMPUTE_BUILD_PATH path build_path) #message("CUDA_COMPUTE_BUILD_PATH([${path}] ${build_path})") # Only deal with CMake style paths from here on out file(TO_CMAKE_PATH "${path}" bpath) if (IS_ABSOLUTE "${bpath}") # Absolute paths are generally unnessary, especially if something like # file(GLOB_RECURSE) is used to pick up the files. string(FIND "${bpath}" "${CMAKE_CURRENT_BINARY_DIR}" _binary_dir_pos) if (_binary_dir_pos EQUAL 0) file(RELATIVE_PATH bpath "${CMAKE_CURRENT_BINARY_DIR}" "${bpath}") else() file(RELATIVE_PATH bpath "${CMAKE_CURRENT_SOURCE_DIR}" "${bpath}") endif() endif() # This recipe is from cmLocalGenerator::CreateSafeUniqueObjectFileName in the # CMake source. # Remove leading / string(REGEX REPLACE "^[/]+" "" bpath "${bpath}") # Avoid absolute paths by removing ':' string(REPLACE ":" "_" bpath "${bpath}") # Avoid relative paths that go up the tree string(REPLACE "../" "__/" bpath "${bpath}") # Avoid spaces string(REPLACE " " "_" bpath "${bpath}") # Strip off the filename. I wait until here to do it, since removin the # basename can make a path that looked like path/../basename turn into # path/.. (notice the trailing slash). get_filename_component(bpath "${bpath}" PATH) set(${build_path} "${bpath}" PARENT_SCOPE) #message("${build_path} = ${bpath}") endfunction() ############################################################################## # This helper macro populates the following variables and setups up custom # commands and targets to invoke the nvcc compiler to generate C or PTX source # dependent upon the format parameter. The compiler is invoked once with -M # to generate a dependency file and a second time with -cuda or -ptx to generate # a .cpp or .ptx file. # INPUT: # cuda_target - Target name # format - PTX, CUBIN, FATBIN or OBJ # FILE1 .. FILEN - The remaining arguments are the sources to be wrapped. # OPTIONS - Extra options to NVCC # OUTPUT: # generated_files - List of generated files ############################################################################## ############################################################################## macro(CUDA_WRAP_SRCS cuda_target format generated_files) # Put optional arguments in list. set(_argn_list "${ARGN}") # If one of the given optional arguments is "PHONY", make a note of it, then # remove it from the list. list(FIND _argn_list "PHONY" _phony_idx) if("${_phony_idx}" GREATER "-1") set(_target_is_phony true) list(REMOVE_AT _argn_list ${_phony_idx}) else() set(_target_is_phony false) endif() # If CMake doesn't support separable compilation, complain if(CUDA_SEPARABLE_COMPILATION AND CMAKE_VERSION VERSION_LESS "2.8.10.1") message(SEND_ERROR "CUDA_SEPARABLE_COMPILATION isn't supported for CMake versions less than 2.8.10.1") endif() # Set up all the command line flags here, so that they can be overridden on a per target basis. set(nvcc_flags "") # Emulation if the card isn't present. if (CUDA_BUILD_EMULATION) # Emulation. set(nvcc_flags ${nvcc_flags} --device-emulation -D_DEVICEEMU -g) else() # Device mode. No flags necessary. endif() if(CUDA_HOST_COMPILATION_CPP) set(CUDA_C_OR_CXX CXX) else() message(WARNING "--host-compilation flag is deprecated in CUDA version >= 3.0. Removing --host-compilation C flag" ) set(CUDA_C_OR_CXX C) endif() set(generated_extension ${CMAKE_${CUDA_C_OR_CXX}_OUTPUT_EXTENSION}) if(CUDA_64_BIT_DEVICE_CODE) set(nvcc_flags ${nvcc_flags} -m64) else() set(nvcc_flags ${nvcc_flags} -m32) endif() if(CUDA_TARGET_CPU_ARCH) set(nvcc_flags ${nvcc_flags} "--target-cpu-architecture=${CUDA_TARGET_CPU_ARCH}") endif() # This needs to be passed in at this stage, because VS needs to fill out the # various macros from within VS. Note that CCBIN is only used if # -ccbin or --compiler-bindir isn't used and CUDA_HOST_COMPILER matches # _CUDA_MSVC_HOST_COMPILER if(CMAKE_GENERATOR MATCHES "Visual Studio") set(ccbin_flags -D "\"CCBIN:PATH=${_CUDA_MSVC_HOST_COMPILER}\"" ) else() set(ccbin_flags) endif() # Figure out which configure we will use and pass that in as an argument to # the script. We need to defer the decision until compilation time, because # for VS projects we won't know if we are making a debug or release build # until build time. if(CMAKE_GENERATOR MATCHES "Visual Studio") set( CUDA_build_configuration "$(ConfigurationName)" ) else() set( CUDA_build_configuration "${CMAKE_BUILD_TYPE}") endif() # Initialize our list of includes with the user ones followed by the CUDA system ones. set(CUDA_NVCC_INCLUDE_DIRS ${CUDA_NVCC_INCLUDE_DIRS_USER} "${CUDA_INCLUDE_DIRS}") if(_target_is_phony) # If the passed in target name isn't a real target (i.e., this is from a call to one of the # cuda_compile_* functions), need to query directory properties to get include directories # and compile definitions. get_directory_property(_dir_include_dirs INCLUDE_DIRECTORIES) get_directory_property(_dir_compile_defs COMPILE_DEFINITIONS) list(APPEND CUDA_NVCC_INCLUDE_DIRS "${_dir_include_dirs}") set(CUDA_NVCC_COMPILE_DEFINITIONS "${_dir_compile_defs}") else() # Append the include directories for this target via generator expression, which is # expanded by the FILE(GENERATE) call below. This generator expression captures all # include dirs set by the user, whether via directory properties or target properties list(APPEND CUDA_NVCC_INCLUDE_DIRS "$<TARGET_PROPERTY:${cuda_target},INCLUDE_DIRECTORIES>") # Do the same thing with compile definitions set(CUDA_NVCC_COMPILE_DEFINITIONS "$<TARGET_PROPERTY:${cuda_target},COMPILE_DEFINITIONS>") endif() # Reset these variables set(CUDA_WRAP_OPTION_NVCC_FLAGS) foreach(config ${CUDA_configuration_types}) string(TOUPPER ${config} config_upper) set(CUDA_WRAP_OPTION_NVCC_FLAGS_${config_upper}) endforeach() CUDA_GET_SOURCES_AND_OPTIONS(_cuda_wrap_sources _cuda_wrap_cmake_options _cuda_wrap_options ${_argn_list}) CUDA_PARSE_NVCC_OPTIONS(CUDA_WRAP_OPTION_NVCC_FLAGS ${_cuda_wrap_options}) # Figure out if we are building a shared library. BUILD_SHARED_LIBS is # respected in CUDA_ADD_LIBRARY. set(_cuda_build_shared_libs FALSE) # SHARED, MODULE list(FIND _cuda_wrap_cmake_options SHARED _cuda_found_SHARED) list(FIND _cuda_wrap_cmake_options MODULE _cuda_found_MODULE) if(_cuda_found_SHARED GREATER -1 OR _cuda_found_MODULE GREATER -1) set(_cuda_build_shared_libs TRUE) endif() # STATIC list(FIND _cuda_wrap_cmake_options STATIC _cuda_found_STATIC) if(_cuda_found_STATIC GREATER -1) set(_cuda_build_shared_libs FALSE) endif() # CUDA_HOST_FLAGS if(_cuda_build_shared_libs) # If we are setting up code for a shared library, then we need to add extra flags for # compiling objects for shared libraries. set(CUDA_HOST_SHARED_FLAGS ${CMAKE_SHARED_LIBRARY_${CUDA_C_OR_CXX}_FLAGS}) else() set(CUDA_HOST_SHARED_FLAGS) endif() macro(_filter_blocklisted_host_flags CUDA_FLAGS) string(REGEX REPLACE "[ \t]+" ";" ${CUDA_FLAGS} "${${CUDA_FLAGS}}") foreach(_blacklisted ${CUDA_PROPAGATE_HOST_FLAGS_BLACKLIST}) list(REMOVE_ITEM ${CUDA_FLAGS} "${_blacklisted}") endforeach() string(REPLACE ";" " " ${CUDA_FLAGS} "${${CUDA_FLAGS}}") endmacro() # Only add the CMAKE_{C,CXX}_FLAGS if we are propagating host flags. We # always need to set the SHARED_FLAGS, though. if(CUDA_PROPAGATE_HOST_FLAGS) set(_cuda_C_FLAGS "${CMAKE_${CUDA_C_OR_CXX}_FLAGS}") _filter_blocklisted_host_flags(_cuda_C_FLAGS) set(_cuda_host_flags "set(CMAKE_HOST_FLAGS ${_cuda_C_FLAGS} ${CUDA_HOST_SHARED_FLAGS})") else() set(_cuda_host_flags "set(CMAKE_HOST_FLAGS ${CUDA_HOST_SHARED_FLAGS})") endif() set(_cuda_nvcc_flags_config "# Build specific configuration flags") # Loop over all the configuration types to generate appropriate flags for run_nvcc.cmake foreach(config ${CUDA_configuration_types}) string(TOUPPER ${config} config_upper) # CMAKE_FLAGS are strings and not lists. By not putting quotes around CMAKE_FLAGS # we convert the strings to lists (like we want). if(CUDA_PROPAGATE_HOST_FLAGS) # nvcc chokes on -g3 in versions previous to 3.0, so replace it with -g set(_cuda_fix_g3 FALSE) set(_cuda_C_FLAGS "${CMAKE_${CUDA_C_OR_CXX}_FLAGS_${config_upper}}") _filter_blocklisted_host_flags(_cuda_C_FLAGS) if(_cuda_fix_g3) string(REPLACE "-g3" "-g" _cuda_C_FLAGS "${_cuda_C_FLAGS}") endif() string(APPEND _cuda_host_flags "\nset(CMAKE_HOST_FLAGS_${config_upper} ${_cuda_C_FLAGS})") endif() # Note that if we ever want CUDA_NVCC_FLAGS_<CONFIG> to be string (instead of a list # like it is currently), we can remove the quotes around the # ${CUDA_NVCC_FLAGS_${config_upper}} variable like the CMAKE_HOST_FLAGS_<CONFIG> variable. string(APPEND _cuda_nvcc_flags_config "\nset(CUDA_NVCC_FLAGS_${config_upper} ${CUDA_NVCC_FLAGS_${config_upper}} ;; ${CUDA_WRAP_OPTION_NVCC_FLAGS_${config_upper}})") endforeach() # Process the C++14 flag. If the host sets the flag, we need to add it to nvcc and # remove it from the host. This is because -Xcompile -std=c++ will choke nvcc (it uses # the C preprocessor). In order to get this to work correctly, we need to use nvcc's # specific c++14 flag. if( "${_cuda_host_flags}" MATCHES "-std=c\\+\\+11") # Add the c++14 flag to nvcc if it isn't already present. Note that we only look at # the main flag instead of the configuration specific flags. if( NOT "${CUDA_NVCC_FLAGS}" MATCHES "-std=c\\+\\+14" ) list(APPEND nvcc_flags --std c++14) endif() string(REGEX REPLACE "[-]+std=c\\+\\+14" "" _cuda_host_flags "${_cuda_host_flags}") endif() if(_cuda_build_shared_libs) list(APPEND nvcc_flags "-D${cuda_target}_EXPORTS") endif() # Reset the output variable set(_cuda_wrap_generated_files "") # Iterate over the macro arguments and create custom # commands for all the .cu files. foreach(file ${_argn_list}) # Ignore any file marked as a HEADER_FILE_ONLY get_source_file_property(_is_header ${file} HEADER_FILE_ONLY) # Allow per source file overrides of the format. Also allows compiling non-.cu files. get_source_file_property(_cuda_source_format ${file} CUDA_SOURCE_PROPERTY_FORMAT) if((${file} MATCHES "\\.cu$" OR _cuda_source_format) AND NOT _is_header) if(NOT _cuda_source_format) set(_cuda_source_format ${format}) endif() # If file isn't a .cu file, we need to tell nvcc to treat it as such. if(NOT file MATCHES "\\.cu$") set(cuda_language_flag -x=cu) else() set(cuda_language_flag) endif() if( ${_cuda_source_format} MATCHES "OBJ") set( cuda_compile_to_external_module OFF ) else() set( cuda_compile_to_external_module ON ) if( ${_cuda_source_format} MATCHES "PTX" ) set( cuda_compile_to_external_module_type "ptx" ) elseif( ${_cuda_source_format} MATCHES "CUBIN") set( cuda_compile_to_external_module_type "cubin" ) elseif( ${_cuda_source_format} MATCHES "FATBIN") set( cuda_compile_to_external_module_type "fatbin" ) else() message( FATAL_ERROR "Invalid format flag passed to CUDA_WRAP_SRCS or set with CUDA_SOURCE_PROPERTY_FORMAT file property for file '${file}': '${_cuda_source_format}'. Use OBJ, PTX, CUBIN or FATBIN.") endif() endif() if(cuda_compile_to_external_module) # Don't use any of the host compilation flags for PTX targets. set(CUDA_HOST_FLAGS) set(CUDA_NVCC_FLAGS_CONFIG) else() set(CUDA_HOST_FLAGS ${_cuda_host_flags}) set(CUDA_NVCC_FLAGS_CONFIG ${_cuda_nvcc_flags_config}) endif() # Determine output directory cuda_compute_build_path("${file}" cuda_build_path) set(cuda_compile_intermediate_directory "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/${cuda_target}.dir/${cuda_build_path}") if(CUDA_GENERATED_OUTPUT_DIR) set(cuda_compile_output_dir "${CUDA_GENERATED_OUTPUT_DIR}") else() if ( cuda_compile_to_external_module ) set(cuda_compile_output_dir "${CMAKE_CURRENT_BINARY_DIR}") else() set(cuda_compile_output_dir "${cuda_compile_intermediate_directory}") endif() endif() # Add a custom target to generate a c or ptx file. ###################### get_filename_component( basename ${file} NAME ) if( cuda_compile_to_external_module ) set(generated_file_path "${cuda_compile_output_dir}") set(generated_file_basename "${cuda_target}_generated_${basename}.${cuda_compile_to_external_module_type}") set(format_flag "-${cuda_compile_to_external_module_type}") file(MAKE_DIRECTORY "${cuda_compile_output_dir}") else() set(generated_file_path "${cuda_compile_output_dir}/${CMAKE_CFG_INTDIR}") set(generated_file_basename "${cuda_target}_generated_${basename}${generated_extension}") if(CUDA_SEPARABLE_COMPILATION) set(format_flag "-dc") else() set(format_flag "-c") endif() endif() # Set all of our file names. Make sure that whatever filenames that have # generated_file_path in them get passed in through as a command line # argument, so that the ${CMAKE_CFG_INTDIR} gets expanded at run time # instead of configure time. set(generated_file "${generated_file_path}/${generated_file_basename}") set(cmake_dependency_file "${cuda_compile_intermediate_directory}/${generated_file_basename}.depend") set(NVCC_generated_dependency_file "${cuda_compile_intermediate_directory}/${generated_file_basename}.NVCC-depend") set(generated_cubin_file "${generated_file_path}/${generated_file_basename}.cubin.txt") set(custom_target_script_pregen "${cuda_compile_intermediate_directory}/${generated_file_basename}.cmake.pre-gen") set(custom_target_script "${cuda_compile_intermediate_directory}/${generated_file_basename}$<$<BOOL:$<CONFIG>>:.$<CONFIG>>.cmake") # Setup properties for obj files: if( NOT cuda_compile_to_external_module ) set_source_files_properties("${generated_file}" PROPERTIES EXTERNAL_OBJECT true # This is an object file not to be compiled, but only be linked. ) endif() # Don't add CMAKE_CURRENT_SOURCE_DIR if the path is already an absolute path. get_filename_component(file_path "${file}" PATH) if(IS_ABSOLUTE "${file_path}") set(source_file "${file}") else() set(source_file "${CMAKE_CURRENT_SOURCE_DIR}/${file}") endif() if( NOT cuda_compile_to_external_module AND CUDA_SEPARABLE_COMPILATION) list(APPEND ${cuda_target}_SEPARABLE_COMPILATION_OBJECTS "${generated_file}") endif() # Bring in the dependencies. Creates a variable CUDA_NVCC_DEPEND ####### cuda_include_nvcc_dependencies(${cmake_dependency_file}) # Convenience string for output ######################################### if(CUDA_BUILD_EMULATION) set(cuda_build_type "Emulation") else() set(cuda_build_type "Device") endif() # Build the NVCC made dependency file ################################### set(build_cubin OFF) if ( NOT CUDA_BUILD_EMULATION AND CUDA_BUILD_CUBIN ) if ( NOT cuda_compile_to_external_module ) set ( build_cubin ON ) endif() endif() # Configure the build script configure_file("${CUDA_run_nvcc}" "${custom_target_script_pregen}" @ONLY) file(GENERATE OUTPUT "${custom_target_script}" INPUT "${custom_target_script_pregen}" ) # So if a user specifies the same cuda file as input more than once, you # can have bad things happen with dependencies. Here we check an option # to see if this is the behavior they want. if(CUDA_ATTACH_VS_BUILD_RULE_TO_CUDA_FILE) set(main_dep MAIN_DEPENDENCY ${source_file}) else() set(main_dep DEPENDS ${source_file}) endif() if(CUDA_VERBOSE_BUILD) set(verbose_output ON) elseif(CMAKE_GENERATOR MATCHES "Makefiles") set(verbose_output "$(VERBOSE)") # This condition lets us also turn on verbose output when someone # specifies CMAKE_VERBOSE_MAKEFILE, even if the generator isn't # the Makefiles generator (this is important for us, Ninja users.) elseif(CMAKE_VERBOSE_MAKEFILE) set(verbose_output ON) else() set(verbose_output OFF) endif() # Create up the comment string file(RELATIVE_PATH generated_file_relative_path "${CMAKE_BINARY_DIR}" "${generated_file}") if(cuda_compile_to_external_module) set(cuda_build_comment_string "Building NVCC ${cuda_compile_to_external_module_type} file ${generated_file_relative_path}") else() set(cuda_build_comment_string "Building NVCC (${cuda_build_type}) object ${generated_file_relative_path}") endif() set(_verbatim VERBATIM) if(ccbin_flags MATCHES "\\$\\(VCInstallDir\\)") set(_verbatim "") endif() # Build the generated file and dependency file ########################## add_custom_command( OUTPUT ${generated_file} # These output files depend on the source_file and the contents of cmake_dependency_file ${main_dep} DEPENDS ${CUDA_NVCC_DEPEND} DEPENDS ${custom_target_script} # Make sure the output directory exists before trying to write to it. COMMAND ${CMAKE_COMMAND} -E make_directory "${generated_file_path}" COMMAND ${CMAKE_COMMAND} ARGS -D verbose:BOOL=${verbose_output} ${ccbin_flags} -D build_configuration:STRING=${CUDA_build_configuration} -D "generated_file:STRING=${generated_file}" -D "generated_cubin_file:STRING=${generated_cubin_file}" -P "${custom_target_script}" WORKING_DIRECTORY "${cuda_compile_intermediate_directory}" COMMENT "${cuda_build_comment_string}" ${_verbatim} ) # Make sure the build system knows the file is generated. set_source_files_properties(${generated_file} PROPERTIES GENERATED TRUE) list(APPEND _cuda_wrap_generated_files ${generated_file}) # Add the other files that we want cmake to clean on a cleanup ########## list(APPEND CUDA_ADDITIONAL_CLEAN_FILES "${cmake_dependency_file}") list(REMOVE_DUPLICATES CUDA_ADDITIONAL_CLEAN_FILES) set(CUDA_ADDITIONAL_CLEAN_FILES ${CUDA_ADDITIONAL_CLEAN_FILES} CACHE INTERNAL "List of intermediate files that are part of the cuda dependency scanning.") endif() endforeach() # Set the return parameter set(${generated_files} ${_cuda_wrap_generated_files}) endmacro() function(_cuda_get_important_host_flags important_flags flag_string) if(CMAKE_GENERATOR MATCHES "Visual Studio") string(REGEX MATCHALL "/M[DT][d]?" flags "${flag_string}") list(APPEND ${important_flags} ${flags}) else() string(REGEX MATCHALL "-fPIC" flags "${flag_string}") list(APPEND ${important_flags} ${flags}) endif() set(${important_flags} ${${important_flags}} PARENT_SCOPE) endfunction() ############################################################################### ############################################################################### # Separable Compilation Link ############################################################################### ############################################################################### # Compute the filename to be used by CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS function(CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME output_file_var cuda_target object_files) if (object_files) set(generated_extension ${CMAKE_${CUDA_C_OR_CXX}_OUTPUT_EXTENSION}) set(output_file "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/${cuda_target}.dir/${CMAKE_CFG_INTDIR}/${cuda_target}_intermediate_link${generated_extension}") else() set(output_file) endif() set(${output_file_var} "${output_file}" PARENT_SCOPE) endfunction() # Setup the build rule for the separable compilation intermediate link file. function(CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS output_file cuda_target options object_files) if (object_files) set_source_files_properties("${output_file}" PROPERTIES EXTERNAL_OBJECT TRUE # This is an object file not to be compiled, but only # be linked. GENERATED TRUE # This file is generated during the build ) # For now we are ignoring all the configuration specific flags. set(nvcc_flags) CUDA_PARSE_NVCC_OPTIONS(nvcc_flags ${options}) if(CUDA_64_BIT_DEVICE_CODE) list(APPEND nvcc_flags -m64) else() list(APPEND nvcc_flags -m32) endif() # If -ccbin, --compiler-bindir has been specified, don't do anything. Otherwise add it here. list( FIND nvcc_flags "-ccbin" ccbin_found0 ) list( FIND nvcc_flags "--compiler-bindir" ccbin_found1 ) if( ccbin_found0 LESS 0 AND ccbin_found1 LESS 0 AND CUDA_HOST_COMPILER ) # Match VERBATIM check below. if(CUDA_HOST_COMPILER MATCHES "\\$\\(VCInstallDir\\)") list(APPEND nvcc_flags -ccbin "\"${CUDA_HOST_COMPILER}\"") else() list(APPEND nvcc_flags -ccbin "${CUDA_HOST_COMPILER}") endif() endif() # Create a list of flags specified by CUDA_NVCC_FLAGS_${CONFIG} and CMAKE_${CUDA_C_OR_CXX}_FLAGS* set(config_specific_flags) set(flags) foreach(config ${CUDA_configuration_types}) string(TOUPPER ${config} config_upper) # Add config specific flags foreach(f ${CUDA_NVCC_FLAGS_${config_upper}}) list(APPEND config_specific_flags $<$<CONFIG:${config}>:${f}>) endforeach() set(important_host_flags) _cuda_get_important_host_flags(important_host_flags "${CMAKE_${CUDA_C_OR_CXX}_FLAGS_${config_upper}}") foreach(f ${important_host_flags}) list(APPEND flags $<$<CONFIG:${config}>:-Xcompiler> $<$<CONFIG:${config}>:${f}>) endforeach() endforeach() # Add CMAKE_${CUDA_C_OR_CXX}_FLAGS set(important_host_flags) _cuda_get_important_host_flags(important_host_flags "${CMAKE_${CUDA_C_OR_CXX}_FLAGS}") foreach(f ${important_host_flags}) list(APPEND flags -Xcompiler ${f}) endforeach() # Add our general CUDA_NVCC_FLAGS with the configuration specifig flags set(nvcc_flags ${CUDA_NVCC_FLAGS} ${config_specific_flags} ${nvcc_flags}) file(RELATIVE_PATH output_file_relative_path "${CMAKE_BINARY_DIR}" "${output_file}") # Some generators don't handle the multiple levels of custom command # dependencies correctly (obj1 depends on file1, obj2 depends on obj1), so # we work around that issue by compiling the intermediate link object as a # pre-link custom command in that situation. set(do_obj_build_rule TRUE) if (MSVC_VERSION GREATER 1599 AND MSVC_VERSION LESS 1800) # VS 2010 and 2012 have this problem. set(do_obj_build_rule FALSE) endif() set(_verbatim VERBATIM) if(nvcc_flags MATCHES "\\$\\(VCInstallDir\\)") set(_verbatim "") endif() if (do_obj_build_rule) add_custom_command( OUTPUT ${output_file} DEPENDS ${object_files} COMMAND ${CUDA_NVCC_EXECUTABLE} ${nvcc_flags} -dlink ${object_files} -o ${output_file} ${flags} COMMENT "Building NVCC intermediate link file ${output_file_relative_path}" COMMAND_EXPAND_LISTS ${_verbatim} ) else() get_filename_component(output_file_dir "${output_file}" DIRECTORY) add_custom_command( TARGET ${cuda_target} PRE_LINK COMMAND ${CMAKE_COMMAND} -E echo "Building NVCC intermediate link file ${output_file_relative_path}" COMMAND ${CMAKE_COMMAND} -E make_directory "${output_file_dir}" COMMAND ${CUDA_NVCC_EXECUTABLE} ${nvcc_flags} ${flags} -dlink ${object_files} -o "${output_file}" COMMAND_EXPAND_LISTS ${_verbatim} ) endif() endif() endfunction() ############################################################################### ############################################################################### # ADD LIBRARY ############################################################################### ############################################################################### macro(CUDA_ADD_LIBRARY cuda_target) CUDA_ADD_CUDA_INCLUDE_ONCE() # Separate the sources from the options CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN}) CUDA_BUILD_SHARED_LIBRARY(_cuda_shared_flag ${ARGN}) # Create custom commands and targets for each file. CUDA_WRAP_SRCS( ${cuda_target} OBJ _generated_files ${_sources} ${_cmake_options} ${_cuda_shared_flag} OPTIONS ${_options} ) # Compute the file name of the intermedate link file used for separable # compilation. CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME(link_file ${cuda_target} "${${cuda_target}_SEPARABLE_COMPILATION_OBJECTS}") # Add the library. add_library(${cuda_target} ${_cmake_options} ${_generated_files} ${_sources} ${link_file} ) # Add a link phase for the separable compilation if it has been enabled. If # it has been enabled then the ${cuda_target}_SEPARABLE_COMPILATION_OBJECTS # variable will have been defined. CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS("${link_file}" ${cuda_target} "${_options}" "${${cuda_target}_SEPARABLE_COMPILATION_OBJECTS}") target_link_libraries(${cuda_target} ${CUDA_LINK_LIBRARIES_KEYWORD} ${CUDA_LIBRARIES} ) if(CUDA_SEPARABLE_COMPILATION) target_link_libraries(${cuda_target} ${CUDA_LINK_LIBRARIES_KEYWORD} ${CUDA_cudadevrt_LIBRARY} ) endif() # We need to set the linker language based on what the expected generated file # would be. CUDA_C_OR_CXX is computed based on CUDA_HOST_COMPILATION_CPP. set_target_properties(${cuda_target} PROPERTIES LINKER_LANGUAGE ${CUDA_C_OR_CXX} ) endmacro() ############################################################################### ############################################################################### # ADD EXECUTABLE ############################################################################### ############################################################################### macro(CUDA_ADD_EXECUTABLE cuda_target) CUDA_ADD_CUDA_INCLUDE_ONCE() # Separate the sources from the options CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN}) # Create custom commands and targets for each file. CUDA_WRAP_SRCS( ${cuda_target} OBJ _generated_files ${_sources} OPTIONS ${_options} ) # Compute the file name of the intermedate link file used for separable # compilation. CUDA_COMPUTE_SEPARABLE_COMPILATION_OBJECT_FILE_NAME(link_file ${cuda_target} "${${cuda_target}_SEPARABLE_COMPILATION_OBJECTS}") # Add the library. add_executable(${cuda_target} ${_cmake_options} ${_generated_files} ${_sources} ${link_file} ) # Add a link phase for the separable compilation if it has been enabled. If # it has been enabled then the ${cuda_target}_SEPARABLE_COMPILATION_OBJECTS # variable will have been defined. CUDA_LINK_SEPARABLE_COMPILATION_OBJECTS("${link_file}" ${cuda_target} "${_options}" "${${cuda_target}_SEPARABLE_COMPILATION_OBJECTS}") target_link_libraries(${cuda_target} ${CUDA_LINK_LIBRARIES_KEYWORD} ${CUDA_LIBRARIES} ) # We need to set the linker language based on what the expected generated file # would be. CUDA_C_OR_CXX is computed based on CUDA_HOST_COMPILATION_CPP. set_target_properties(${cuda_target} PROPERTIES LINKER_LANGUAGE ${CUDA_C_OR_CXX} ) endmacro() ############################################################################### ############################################################################### # (Internal) helper for manually added cuda source files with specific targets ############################################################################### ############################################################################### macro(cuda_compile_base cuda_target format generated_files) # Update a counter in this directory, to keep phony target names unique. set(_cuda_target "${cuda_target}") get_property(_counter DIRECTORY PROPERTY _cuda_internal_phony_counter) if(_counter) math(EXPR _counter "${_counter} + 1") else() set(_counter 1) endif() string(APPEND _cuda_target "_${_counter}") set_property(DIRECTORY PROPERTY _cuda_internal_phony_counter ${_counter}) # Separate the sources from the options CUDA_GET_SOURCES_AND_OPTIONS(_sources _cmake_options _options ${ARGN}) # Create custom commands and targets for each file. CUDA_WRAP_SRCS( ${_cuda_target} ${format} _generated_files ${_sources} ${_cmake_options} OPTIONS ${_options} PHONY) set( ${generated_files} ${_generated_files}) endmacro() ############################################################################### ############################################################################### # CUDA COMPILE ############################################################################### ############################################################################### macro(CUDA_COMPILE generated_files) cuda_compile_base(cuda_compile OBJ ${generated_files} ${ARGN}) endmacro() ############################################################################### ############################################################################### # CUDA COMPILE PTX ############################################################################### ############################################################################### macro(CUDA_COMPILE_PTX generated_files) cuda_compile_base(cuda_compile_ptx PTX ${generated_files} ${ARGN}) endmacro() ############################################################################### ############################################################################### # CUDA COMPILE FATBIN ############################################################################### ############################################################################### macro(CUDA_COMPILE_FATBIN generated_files) cuda_compile_base(cuda_compile_fatbin FATBIN ${generated_files} ${ARGN}) endmacro() ############################################################################### ############################################################################### # CUDA COMPILE CUBIN ############################################################################### ############################################################################### macro(CUDA_COMPILE_CUBIN generated_files) cuda_compile_base(cuda_compile_cubin CUBIN ${generated_files} ${ARGN}) endmacro() ############################################################################### ############################################################################### # CUDA ADD CUFFT TO TARGET ############################################################################### ############################################################################### macro(CUDA_ADD_CUFFT_TO_TARGET target) if (CUDA_BUILD_EMULATION) target_link_libraries(${target} ${CUDA_LINK_LIBRARIES_KEYWORD} ${CUDA_cufftemu_LIBRARY}) else() target_link_libraries(${target} ${CUDA_LINK_LIBRARIES_KEYWORD} ${CUDA_cufft_LIBRARY}) endif() endmacro() ############################################################################### ############################################################################### # CUDA ADD CUBLAS TO TARGET ############################################################################### ############################################################################### macro(CUDA_ADD_CUBLAS_TO_TARGET target) if (CUDA_BUILD_EMULATION) target_link_libraries(${target} ${CUDA_LINK_LIBRARIES_KEYWORD} ${CUDA_cublasemu_LIBRARY}) else() target_link_libraries(${target} ${CUDA_LINK_LIBRARIES_KEYWORD} ${CUDA_cublas_LIBRARY} ${CUDA_cublas_device_LIBRARY} ${CUDA_cublasLt_LIBRARY}) endif() endmacro() ############################################################################### ############################################################################### # CUDA BUILD CLEAN TARGET ############################################################################### ############################################################################### macro(CUDA_BUILD_CLEAN_TARGET) # Call this after you add all your CUDA targets, and you will get a # convenience target. You should also make clean after running this target # to get the build system to generate all the code again. set(cuda_clean_target_name clean_cuda_depends) if (CMAKE_GENERATOR MATCHES "Visual Studio") string(TOUPPER ${cuda_clean_target_name} cuda_clean_target_name) endif() add_custom_target(${cuda_clean_target_name} COMMAND ${CMAKE_COMMAND} -E remove ${CUDA_ADDITIONAL_CLEAN_FILES}) # Clear out the variable, so the next time we configure it will be empty. # This is useful so that the files won't persist in the list after targets # have been removed. set(CUDA_ADDITIONAL_CLEAN_FILES "" CACHE INTERNAL "List of intermediate files that are part of the cuda dependency scanning.") endmacro() ```
==================================================================================================================================================================== SOURCE CODE FILE: make2cmake.cmake LINES: 7 SIZE: 3.94 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\Modules_CUDA_fix\upstream\FindCUDA\make2cmake.cmake ENCODING: utf-8 ```cmake # James Bigler, NVIDIA Corp (nvidia.com - jbigler) # Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html # # Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. # # Copyright (c) 2007-2009 # Scientific Computing and Imaging Institute, University of Utah # # This code is licensed under the MIT License. See the FindCUDA.cmake script # for the text of the license. # The MIT License # # License for the specific language governing rights and limitations under # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # ####################################################################### # This converts a file written in makefile syntax into one that can be included # by CMake. # Input variables # # verbose:BOOL=<> OFF: Be as quiet as possible (default) # ON : Extra output # # input_file:FILEPATH=<> Path to dependency file in makefile format # # output_file:FILEPATH=<> Path to file with dependencies in CMake readable variable # file(READ ${input_file} depend_text) if (NOT "${depend_text}" STREQUAL "") # message("FOUND DEPENDS") string(REPLACE "\\ " " " depend_text ${depend_text}) # This works for the nvcc -M generated dependency files. string(REGEX REPLACE "^.* : " "" depend_text ${depend_text}) string(REGEX REPLACE "[ \\\\]*\n" ";" depend_text ${depend_text}) set(dependency_list "") foreach(file ${depend_text}) string(REGEX REPLACE "^ +" "" file ${file}) # OK, now if we had a UNC path, nvcc has a tendency to only output the first '/' # instead of '//'. Here we will test to see if the file exists, if it doesn't then # try to prepend another '/' to the path and test again. If it still fails remove the # path. if(NOT EXISTS "${file}") if (EXISTS "/${file}") set(file "/${file}") else() if(verbose) message(WARNING " Removing non-existent dependency file: ${file}") endif() set(file "") endif() endif() # Make sure we check to see if we have a file, before asking if it is not a directory. # if(NOT IS_DIRECTORY "") will return TRUE. if(file AND NOT IS_DIRECTORY "${file}") # If softlinks start to matter, we should change this to REALPATH. For now we need # to flatten paths, because nvcc can generate stuff like /bin/../include instead of # just /include. get_filename_component(file_absolute "${file}" ABSOLUTE) list(APPEND dependency_list "${file_absolute}") endif() endforeach() else() # message("FOUND NO DEPENDS") endif() # Remove the duplicate entries and sort them. list(REMOVE_DUPLICATES dependency_list) list(SORT dependency_list) foreach(file ${dependency_list}) string(APPEND cuda_nvcc_depend " \"${file}\"\n") endforeach() file(WRITE ${output_file} "# Generated by: make2cmake.cmake\nSET(CUDA_NVCC_DEPEND\n ${cuda_nvcc_depend})\n\n") ```
===================================================================================================================================================================== SOURCE CODE FILE: parse_cubin.cmake LINES: 3 SIZE: 3.46 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\Modules_CUDA_fix\upstream\FindCUDA\parse_cubin.cmake ENCODING: utf-8 ```cmake # James Bigler, NVIDIA Corp (nvidia.com - jbigler) # Abe Stephens, SCI Institute -- http://www.sci.utah.edu/~abe/FindCuda.html # # Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. # # Copyright (c) 2007-2009 # Scientific Computing and Imaging Institute, University of Utah # # This code is licensed under the MIT License. See the FindCUDA.cmake script # for the text of the license. # The MIT License # # License for the specific language governing rights and limitations under # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # ####################################################################### # Parses a .cubin file produced by nvcc and reports statistics about the file. file(READ ${input_file} file_text) if (NOT "${file_text}" STREQUAL "") string(REPLACE ";" "\\;" file_text ${file_text}) string(REPLACE "\ncode" ";code" file_text ${file_text}) list(LENGTH file_text len) foreach(line ${file_text}) # Only look at "code { }" blocks. if(line MATCHES "^code") # Break into individual lines. string(REGEX REPLACE "\n" ";" line ${line}) foreach(entry ${line}) # Extract kernel names. if (${entry} MATCHES "[^g]name = ([^ ]+)") set(entry "${CMAKE_MATCH_1}") # Check to see if the kernel name starts with "_" set(skip FALSE) # if (${entry} MATCHES "^_") # Skip the rest of this block. # message("Skipping ${entry}") # set(skip TRUE) # else () message("Kernel: ${entry}") # endif () endif() # Skip the rest of the block if necessary if(NOT skip) # Registers if (${entry} MATCHES "reg([ ]+)=([ ]+)([^ ]+)") set(entry "${CMAKE_MATCH_3}") message("Registers: ${entry}") endif() # Local memory if (${entry} MATCHES "lmem([ ]+)=([ ]+)([^ ]+)") set(entry "${CMAKE_MATCH_3}") message("Local: ${entry}") endif() # Shared memory if (${entry} MATCHES "smem([ ]+)=([ ]+)([^ ]+)") set(entry "${CMAKE_MATCH_3}") message("Shared: ${entry}") endif() if (${entry} MATCHES "^}") message("") endif() endif() endforeach() endif() endforeach() else() # message("FOUND NO DEPENDS") endif() ```
================================================================================================================================================================== SOURCE CODE FILE: run_nvcc.cmake LINES: 1 SIZE: 11.83 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\Modules_CUDA_fix\upstream\FindCUDA\run_nvcc.cmake ENCODING: utf-8 ```cmake # James Bigler, NVIDIA Corp (nvidia.com - jbigler) # # Copyright (c) 2008 - 2009 NVIDIA Corporation. All rights reserved. # # This code is licensed under the MIT License. See the FindCUDA.cmake script # for the text of the license. # The MIT License # # License for the specific language governing rights and limitations under # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ########################################################################## # This file runs the nvcc commands to produce the desired output file along with # the dependency file needed by CMake to compute dependencies. In addition the # file checks the output of each command and if the command fails it deletes the # output files. # Input variables # # verbose:BOOL=<> OFF: Be as quiet as possible (default) # ON : Describe each step # # build_configuration:STRING=<> Typically one of Debug, MinSizeRel, Release, or # RelWithDebInfo, but it should match one of the # entries in CUDA_HOST_FLAGS. This is the build # configuration used when compiling the code. If # blank or unspecified Debug is assumed as this is # what CMake does. # # generated_file:STRING=<> File to generate. This argument must be passed in. # # generated_cubin_file:STRING=<> File to generate. This argument must be passed # in if build_cubin is true. cmake_policy(PUSH) cmake_policy(SET CMP0007 NEW) cmake_policy(SET CMP0010 NEW) if(NOT generated_file) message(FATAL_ERROR "You must specify generated_file on the command line") endif() # Set these up as variables to make reading the generated file easier set(CMAKE_COMMAND "@CMAKE_COMMAND@") # path set(source_file "@source_file@") # path set(NVCC_generated_dependency_file "@NVCC_generated_dependency_file@") # path set(cmake_dependency_file "@cmake_dependency_file@") # path set(CUDA_make2cmake "@CUDA_make2cmake@") # path set(CUDA_parse_cubin "@CUDA_parse_cubin@") # path set(build_cubin @build_cubin@) # bool set(CUDA_HOST_COMPILER "@CUDA_HOST_COMPILER@") # path # We won't actually use these variables for now, but we need to set this, in # order to force this file to be run again if it changes. set(generated_file_path "@generated_file_path@") # path set(generated_file_internal "@generated_file@") # path set(generated_cubin_file_internal "@generated_cubin_file@") # path set(CUDA_NVCC_EXECUTABLE "@CUDA_NVCC_EXECUTABLE@") # path set(CUDA_NVCC_FLAGS @CUDA_NVCC_FLAGS@ ;; @CUDA_WRAP_OPTION_NVCC_FLAGS@) # list @CUDA_NVCC_FLAGS_CONFIG@ set(nvcc_flags @nvcc_flags@) # list set(CUDA_NVCC_INCLUDE_DIRS [==[@CUDA_NVCC_INCLUDE_DIRS@]==]) # list (needs to be in lua quotes to address backslashes) string(REPLACE "\\" "/" CUDA_NVCC_INCLUDE_DIRS "${CUDA_NVCC_INCLUDE_DIRS}") set(CUDA_NVCC_COMPILE_DEFINITIONS [==[@CUDA_NVCC_COMPILE_DEFINITIONS@]==]) # list (needs to be in lua quotes see #16510 ). set(format_flag "@format_flag@") # string set(cuda_language_flag @cuda_language_flag@) # list # Clean up list of include directories and add -I flags list(REMOVE_DUPLICATES CUDA_NVCC_INCLUDE_DIRS) set(CUDA_NVCC_INCLUDE_ARGS) foreach(dir ${CUDA_NVCC_INCLUDE_DIRS}) # Extra quotes are added around each flag to help nvcc parse out flags with spaces. list(APPEND CUDA_NVCC_INCLUDE_ARGS "-I${dir}") endforeach() # Clean up list of compile definitions, add -D flags, and append to nvcc_flags list(REMOVE_DUPLICATES CUDA_NVCC_COMPILE_DEFINITIONS) foreach(def ${CUDA_NVCC_COMPILE_DEFINITIONS}) list(APPEND nvcc_flags "-D${def}") endforeach() if(build_cubin AND NOT generated_cubin_file) message(FATAL_ERROR "You must specify generated_cubin_file on the command line") endif() # This is the list of host compilation flags. It C or CXX should already have # been chosen by FindCUDA.cmake. @CUDA_HOST_FLAGS@ # Take the compiler flags and package them up to be sent to the compiler via -Xcompiler set(nvcc_host_compiler_flags "") # If we weren't given a build_configuration, use Debug. if(NOT build_configuration) set(build_configuration Debug) endif() string(TOUPPER "${build_configuration}" build_configuration) #message("CUDA_NVCC_HOST_COMPILER_FLAGS = ${CUDA_NVCC_HOST_COMPILER_FLAGS}") foreach(flag ${CMAKE_HOST_FLAGS} ${CMAKE_HOST_FLAGS_${build_configuration}}) # Extra quotes are added around each flag to help nvcc parse out flags with spaces. string(APPEND nvcc_host_compiler_flags ",\"${flag}\"") endforeach() if (nvcc_host_compiler_flags) set(nvcc_host_compiler_flags "-Xcompiler" ${nvcc_host_compiler_flags}) endif() #message("nvcc_host_compiler_flags = \"${nvcc_host_compiler_flags}\"") # Add the build specific configuration flags list(APPEND CUDA_NVCC_FLAGS ${CUDA_NVCC_FLAGS_${build_configuration}}) # Any -ccbin existing in CUDA_NVCC_FLAGS gets highest priority list( FIND CUDA_NVCC_FLAGS "-ccbin" ccbin_found0 ) list( FIND CUDA_NVCC_FLAGS "--compiler-bindir" ccbin_found1 ) if( ccbin_found0 LESS 0 AND ccbin_found1 LESS 0 AND CUDA_HOST_COMPILER ) if (CUDA_HOST_COMPILER STREQUAL "@_CUDA_MSVC_HOST_COMPILER@" AND DEFINED CCBIN) set(CCBIN -ccbin "${CCBIN}") else() set(CCBIN -ccbin "${CUDA_HOST_COMPILER}") endif() endif() # cuda_execute_process - Executes a command with optional command echo and status message. # # status - Status message to print if verbose is true # command - COMMAND argument from the usual execute_process argument structure # ARGN - Remaining arguments are the command with arguments # # CUDA_result - return value from running the command # # Make this a macro instead of a function, so that things like RESULT_VARIABLE # and other return variables are present after executing the process. macro(cuda_execute_process status command) set(_command ${command}) if(NOT "x${_command}" STREQUAL "xCOMMAND") message(FATAL_ERROR "Malformed call to cuda_execute_process. Missing COMMAND as second argument. (command = ${command})") endif() if(verbose) execute_process(COMMAND "${CMAKE_COMMAND}" -E echo -- ${status}) # Now we need to build up our command string. We are accounting for quotes # and spaces, anything else is left up to the user to fix if they want to # copy and paste a runnable command line. set(cuda_execute_process_string) foreach(arg ${ARGN}) # If there are quotes, excape them, so they come through. string(REPLACE "\"" "\\\"" arg ${arg}) # Args with spaces need quotes around them to get them to be parsed as a single argument. if(arg MATCHES " ") list(APPEND cuda_execute_process_string "\"${arg}\"") else() list(APPEND cuda_execute_process_string ${arg}) endif() endforeach() # Echo the command execute_process(COMMAND ${CMAKE_COMMAND} -E echo ${cuda_execute_process_string}) endif() # Run the command execute_process(COMMAND ${ARGN} RESULT_VARIABLE CUDA_result ) endmacro() # Delete the target file cuda_execute_process( "Removing ${generated_file}" COMMAND "${CMAKE_COMMAND}" -E remove "${generated_file}" ) # For CUDA 2.3 and below, -G -M doesn't work, so remove the -G flag # for dependency generation and hope for the best. set(depends_CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS}") set(CUDA_VERSION @CUDA_VERSION@) # nvcc doesn't define __CUDACC__ for some reason when generating dependency files. This # can cause incorrect dependencies when #including files based on this macro which is # defined in the generating passes of nvcc invocation. We will go ahead and manually # define this for now until a future version fixes this bug. set(CUDACC_DEFINE -D__CUDACC__) # Generate the dependency file cuda_execute_process( "Generating dependency file: ${NVCC_generated_dependency_file}" COMMAND "${CUDA_NVCC_EXECUTABLE}" -M ${CUDACC_DEFINE} "${source_file}" -o "${NVCC_generated_dependency_file}" ${CCBIN} ${nvcc_flags} ${nvcc_host_compiler_flags} ${depends_CUDA_NVCC_FLAGS} -DNVCC ${CUDA_NVCC_INCLUDE_ARGS} ) if(CUDA_result) message(FATAL_ERROR "Error generating ${generated_file}") endif() # Generate the cmake readable dependency file to a temp file. Don't put the # quotes just around the filenames for the input_file and output_file variables. # CMake will pass the quotes through and not be able to find the file. cuda_execute_process( "Generating temporary cmake readable file: ${cmake_dependency_file}.tmp" COMMAND "${CMAKE_COMMAND}" -D "input_file:FILEPATH=${NVCC_generated_dependency_file}" -D "output_file:FILEPATH=${cmake_dependency_file}.tmp" -D "verbose=${verbose}" -P "${CUDA_make2cmake}" ) if(CUDA_result) message(FATAL_ERROR "Error generating ${generated_file}") endif() # Copy the file if it is different cuda_execute_process( "Copy if different ${cmake_dependency_file}.tmp to ${cmake_dependency_file}" COMMAND "${CMAKE_COMMAND}" -E copy_if_different "${cmake_dependency_file}.tmp" "${cmake_dependency_file}" ) if(CUDA_result) message(FATAL_ERROR "Error generating ${generated_file}") endif() # Delete the temporary file cuda_execute_process( "Removing ${cmake_dependency_file}.tmp and ${NVCC_generated_dependency_file}" COMMAND "${CMAKE_COMMAND}" -E remove "${cmake_dependency_file}.tmp" "${NVCC_generated_dependency_file}" ) if(CUDA_result) message(FATAL_ERROR "Error generating ${generated_file}") endif() # Generate the code cuda_execute_process( "Generating ${generated_file}" COMMAND "${CUDA_NVCC_EXECUTABLE}" "${source_file}" ${cuda_language_flag} ${format_flag} -o "${generated_file}" ${CCBIN} ${nvcc_flags} ${nvcc_host_compiler_flags} ${CUDA_NVCC_FLAGS} -DNVCC ${CUDA_NVCC_INCLUDE_ARGS} ) if(CUDA_result) # Since nvcc can sometimes leave half done files make sure that we delete the output file. cuda_execute_process( "Removing ${generated_file}" COMMAND "${CMAKE_COMMAND}" -E remove "${generated_file}" ) message(FATAL_ERROR "Error generating file ${generated_file}") else() if(verbose) message("Generated ${generated_file} successfully.") endif() endif() # Cubin resource report commands. if( build_cubin ) # Run with -cubin to produce resource usage report. cuda_execute_process( "Generating ${generated_cubin_file}" COMMAND "${CUDA_NVCC_EXECUTABLE}" "${source_file}" ${CUDA_NVCC_FLAGS} ${nvcc_flags} ${CCBIN} ${nvcc_host_compiler_flags} -DNVCC -cubin -o "${generated_cubin_file}" ${CUDA_NVCC_INCLUDE_ARGS} ) # Execute the parser script. cuda_execute_process( "Executing the parser script" COMMAND "${CMAKE_COMMAND}" -D "input_file:STRING=${generated_cubin_file}" -P "${CUDA_parse_cubin}" ) endif() cmake_policy(POP) ```
============================================================================================================================================================================= SOURCE CODE FILE: select_compute_arch.cmake LINES: 16 SIZE: 11.59 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\Modules_CUDA_fix\upstream\FindCUDA\select_compute_arch.cmake ENCODING: utf-8 ```cmake # Synopsis: # CUDA_SELECT_NVCC_ARCH_FLAGS(out_variable [target_CUDA_architectures]) # -- Selects GPU arch flags for nvcc based on target_CUDA_architectures # target_CUDA_architectures : Auto | Common | All | LIST(ARCH_AND_PTX ...) # - "Auto" detects local machine GPU compute arch at runtime. # - "Common" and "All" cover common and entire subsets of architectures # ARCH_AND_PTX : NAME | NUM.NUM | NUM.NUM(NUM.NUM) | NUM.NUM+PTX # NAME: Kepler Maxwell Kepler+Tegra Kepler+Tesla Maxwell+Tegra Pascal Volta Turing Ampere # NUM: Any number. Only those pairs are currently accepted by NVCC though: # 3.5 3.7 5.0 5.2 5.3 6.0 6.2 7.0 7.2 7.5 8.0 # Returns LIST of flags to be added to CUDA_NVCC_FLAGS in ${out_variable} # Additionally, sets ${out_variable}_readable to the resulting numeric list # Example: # CUDA_SELECT_NVCC_ARCH_FLAGS(ARCH_FLAGS 3.0 3.5+PTX 5.2(5.0) Maxwell) # LIST(APPEND CUDA_NVCC_FLAGS ${ARCH_FLAGS}) # # More info on CUDA architectures: https://en.wikipedia.org/wiki/CUDA # if(CMAKE_CUDA_COMPILER_LOADED) # CUDA as a language if(CMAKE_CUDA_COMPILER_ID STREQUAL "NVIDIA" AND CMAKE_CUDA_COMPILER_VERSION MATCHES "^([0-9]+\\.[0-9]+)") set(CUDA_VERSION "${CMAKE_MATCH_1}") endif() endif() # See: https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list # This list will be used for CUDA_ARCH_NAME = All option set(CUDA_KNOWN_GPU_ARCHITECTURES "Kepler" "Maxwell") # This list will be used for CUDA_ARCH_NAME = Common option (enabled by default) set(CUDA_COMMON_GPU_ARCHITECTURES "3.5" "5.0") # This list is used to filter CUDA archs when autodetecting set(CUDA_ALL_GPU_ARCHITECTURES "3.5" "5.0") if(CUDA_VERSION VERSION_GREATER "10.5") list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Ampere") list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.0") list(APPEND CUDA_ALL_GPU_ARCHITECTURES "8.0") if(CUDA_VERSION VERSION_LESS "11.1") set(CUDA_LIMIT_GPU_ARCHITECTURE "8.0") list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.0+PTX") endif() endif() if(NOT CUDA_VERSION VERSION_LESS "11.1") list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.6") list(APPEND CUDA_ALL_GPU_ARCHITECTURES "8.6") set(CUDA_LIMIT_GPU_ARCHITECUTRE "8.6") if(CUDA_VERSION VERSION_LESS "11.8") set(CUDA_LIMIT_GPU_ARCHITECTURE "8.9") list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.6+PTX") endif() endif() if(NOT CUDA_VERSION VERSION_LESS "11.8") list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Ada") list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Hopper") list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.9") list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "9.0") list(APPEND CUDA_ALL_GPU_ARCHITECTURES "8.9") list(APPEND CUDA_ALL_GPU_ARCHITECTURES "9.0") if(CUDA_VERSION VERSION_LESS "12.0") set(CUDA_LIMIT_GPU_ARCHITECTURE "9.0") list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.9+PTX") list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "9.0+PTX") endif() endif() if(NOT CUDA_VERSION VERSION_LESS "12.0") list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "9.0a") list(APPEND CUDA_ALL_GPU_ARCHITECTURES "9.0a") list(REMOVE_ITEM CUDA_COMMON_GPU_ARCHITECTURES "3.5") list(REMOVE_ITEM CUDA_ALL_GPU_ARCHITECTURES "3.5") endif() if(CUDA_VERSION VERSION_GREATER "12.6") list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Blackwell") list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "10.0") list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "10.0a") list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "10.1a") list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "12.0") list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "12.0a") list(APPEND CUDA_ALL_GPU_ARCHITECTURES "10.0") list(APPEND CUDA_ALL_GPU_ARCHITECTURES "10.0a") list(APPEND CUDA_ALL_GPU_ARCHITECTURES "10.1a") list(APPEND CUDA_ALL_GPU_ARCHITECTURES "12.0") list(APPEND CUDA_ALL_GPU_ARCHITECTURES "12.0a") endif() ################################################################################################ # A function for automatic detection of GPUs installed (if autodetection is enabled) # Usage: # CUDA_DETECT_INSTALLED_GPUS(OUT_VARIABLE) # function(CUDA_DETECT_INSTALLED_GPUS OUT_VARIABLE) if(NOT CUDA_GPU_DETECT_OUTPUT) if(CMAKE_CUDA_COMPILER_LOADED) # CUDA as a language set(file "${PROJECT_BINARY_DIR}/detect_cuda_compute_capabilities.cu") else() set(file "${PROJECT_BINARY_DIR}/detect_cuda_compute_capabilities.cpp") endif() file(WRITE ${file} "" "#include <cuda_runtime.h>\n" "#include <cstdio>\n" "int main()\n" "{\n" " int count = 0;\n" " if (cudaSuccess != cudaGetDeviceCount(&count)) return -1;\n" " if (count == 0) return -1;\n" " for (int device = 0; device < count; ++device)\n" " {\n" " cudaDeviceProp prop;\n" " if (cudaSuccess == cudaGetDeviceProperties(&prop, device))\n" " std::printf(\"%d.%d \", prop.major, prop.minor);\n" " }\n" " return 0;\n" "}\n") if(CMAKE_CUDA_COMPILER_LOADED) # CUDA as a language try_run(run_result compile_result ${PROJECT_BINARY_DIR} ${file} RUN_OUTPUT_VARIABLE compute_capabilities) else() try_run(run_result compile_result ${PROJECT_BINARY_DIR} ${file} CMAKE_FLAGS "-DINCLUDE_DIRECTORIES=${CUDA_INCLUDE_DIRS}" LINK_LIBRARIES ${CUDA_LIBRARIES} RUN_OUTPUT_VARIABLE compute_capabilities) endif() # Filter unrelated content out of the output. string(REGEX MATCHALL "[0-9]+\\.[0-9]+" compute_capabilities "${compute_capabilities}") if(run_result EQUAL 0) string(REPLACE "2.1" "2.1(2.0)" compute_capabilities "${compute_capabilities}") set(CUDA_GPU_DETECT_OUTPUT ${compute_capabilities} CACHE INTERNAL "Returned GPU architectures from detect_gpus tool" FORCE) endif() endif() if(NOT CUDA_GPU_DETECT_OUTPUT) message(STATUS "Automatic GPU detection failed. Building for common architectures.") set(${OUT_VARIABLE} ${CUDA_COMMON_GPU_ARCHITECTURES} PARENT_SCOPE) else() # Filter based on CUDA version supported archs set(CUDA_GPU_DETECT_OUTPUT_FILTERED "") separate_arguments(CUDA_GPU_DETECT_OUTPUT) foreach(ITEM IN ITEMS ${CUDA_GPU_DETECT_OUTPUT}) if(CUDA_LIMIT_GPU_ARCHITECTURE AND (ITEM VERSION_GREATER CUDA_LIMIT_GPU_ARCHITECTURE OR ITEM VERSION_EQUAL CUDA_LIMIT_GPU_ARCHITECTURE)) list(GET CUDA_COMMON_GPU_ARCHITECTURES -1 NEWITEM) string(APPEND CUDA_GPU_DETECT_OUTPUT_FILTERED " ${NEWITEM}") else() string(APPEND CUDA_GPU_DETECT_OUTPUT_FILTERED " ${ITEM}") endif() endforeach() set(${OUT_VARIABLE} ${CUDA_GPU_DETECT_OUTPUT_FILTERED} PARENT_SCOPE) endif() endfunction() ################################################################################################ # Function for selecting GPU arch flags for nvcc based on CUDA architectures from parameter list # Usage: # SELECT_NVCC_ARCH_FLAGS(out_variable [list of CUDA compute archs]) function(CUDA_SELECT_NVCC_ARCH_FLAGS out_variable) set(CUDA_ARCH_LIST "${ARGN}") if("X${CUDA_ARCH_LIST}" STREQUAL "X" ) set(CUDA_ARCH_LIST "Auto") endif() set(cuda_arch_bin) set(cuda_arch_ptx) if("${CUDA_ARCH_LIST}" STREQUAL "All") set(CUDA_ARCH_LIST ${CUDA_KNOWN_GPU_ARCHITECTURES}) elseif("${CUDA_ARCH_LIST}" STREQUAL "Common") set(CUDA_ARCH_LIST ${CUDA_COMMON_GPU_ARCHITECTURES}) elseif("${CUDA_ARCH_LIST}" STREQUAL "Auto") CUDA_DETECT_INSTALLED_GPUS(CUDA_ARCH_LIST) message(STATUS "Autodetected CUDA architecture(s): ${CUDA_ARCH_LIST}") endif() # Now process the list and look for names string(REGEX REPLACE "[ \t]+" ";" CUDA_ARCH_LIST "${CUDA_ARCH_LIST}") list(REMOVE_DUPLICATES CUDA_ARCH_LIST) foreach(arch_name ${CUDA_ARCH_LIST}) set(arch_bin) set(arch_ptx) set(add_ptx FALSE) # Check to see if we are compiling PTX if(arch_name MATCHES "(.*)\\+PTX$") set(add_ptx TRUE) set(arch_name ${CMAKE_MATCH_1}) endif() if(arch_name MATCHES "^([0-9]+\\.[0-9]a?(\\([0-9]+\\.[0-9]\\))?)$") set(arch_bin ${CMAKE_MATCH_1}) set(arch_ptx ${arch_bin}) else() # Look for it in our list of known architectures if(${arch_name} STREQUAL "Kepler+Tesla") set(arch_bin 3.7) elseif(${arch_name} STREQUAL "Kepler") set(arch_bin 3.5) set(arch_ptx 3.5) elseif(${arch_name} STREQUAL "Maxwell+Tegra") set(arch_bin 5.3) elseif(${arch_name} STREQUAL "Maxwell") set(arch_bin 5.0 5.2) set(arch_ptx 5.2) elseif(${arch_name} STREQUAL "Pascal") set(arch_bin 6.0 6.1) set(arch_ptx 6.1) elseif(${arch_name} STREQUAL "Volta+Tegra") set(arch_bin 7.2) elseif(${arch_name} STREQUAL "Volta") set(arch_bin 7.0 7.0) set(arch_ptx 7.0) elseif(${arch_name} STREQUAL "Turing") set(arch_bin 7.5) set(arch_ptx 7.5) elseif(${arch_name} STREQUAL "Ampere+Tegra") set(arch_bin 8.7) elseif(${arch_name} STREQUAL "Ampere") set(arch_bin 8.0 8.6) set(arch_ptx 8.0 8.6) elseif(${arch_name} STREQUAL "Ada") set(arch_bin 8.9) set(arch_ptx 8.9) elseif(${arch_name} STREQUAL "Hopper") set(arch_bin 9.0) set(arch_ptx 9.0) elseif(${arch_name} STREQUAL "Blackwell+Tegra") set(arch_bin 10.1) elseif(${arch_name} STREQUAL "Blackwell") set(arch_bin 10.0 12.0) set(arch_ptx 10.0 12.0) else() message(SEND_ERROR "Found Unknown CUDA Architecture Name in CUDA_SELECT_NVCC_ARCH_FLAGS: ${arch_name} ") endif() endif() if(NOT arch_bin) message(SEND_ERROR "arch_bin wasn't set for some reason") endif() list(APPEND cuda_arch_bin ${arch_bin}) if(add_ptx) if (NOT arch_ptx) set(arch_ptx ${arch_bin}) endif() list(APPEND cuda_arch_ptx ${arch_ptx}) endif() endforeach() # remove dots and convert to lists string(REGEX REPLACE "\\." "" cuda_arch_bin "${cuda_arch_bin}") string(REGEX REPLACE "\\." "" cuda_arch_ptx "${cuda_arch_ptx}") string(REGEX MATCHALL "[0-9()]+a?" cuda_arch_bin "${cuda_arch_bin}") string(REGEX MATCHALL "[0-9]+a?" cuda_arch_ptx "${cuda_arch_ptx}") if(cuda_arch_bin) list(REMOVE_DUPLICATES cuda_arch_bin) endif() if(cuda_arch_ptx) list(REMOVE_DUPLICATES cuda_arch_ptx) endif() set(nvcc_flags "") set(nvcc_archs_readable "") # Tell NVCC to add binaries for the specified GPUs foreach(arch ${cuda_arch_bin}) if(arch MATCHES "([0-9]+)\\(([0-9]+)\\)") # User explicitly specified ARCH for the concrete CODE list(APPEND nvcc_flags -gencode arch=compute_${CMAKE_MATCH_2},code=sm_${CMAKE_MATCH_1}) list(APPEND nvcc_archs_readable sm_${CMAKE_MATCH_1}) else() # User didn't explicitly specify ARCH for the concrete CODE, we assume ARCH=CODE list(APPEND nvcc_flags -gencode arch=compute_${arch},code=sm_${arch}) list(APPEND nvcc_archs_readable sm_${arch}) endif() endforeach() # Tell NVCC to add PTX intermediate code for the specified architectures foreach(arch ${cuda_arch_ptx}) list(APPEND nvcc_flags -gencode arch=compute_${arch},code=compute_${arch}) list(APPEND nvcc_archs_readable compute_${arch}) endforeach() string(REPLACE ";" " " nvcc_archs_readable "${nvcc_archs_readable}") set(${out_variable} ${nvcc_flags} PARENT_SCOPE) set(${out_variable}_readable ${nvcc_archs_readable} PARENT_SCOPE) endfunction() ```
============================================================================================================================================================================== SOURCE CODE FILE: FindPackageHandleStandardArgs.cmake LINES: 4 SIZE: 14.93 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\Modules_CUDA_fix\upstream\FindPackageHandleStandardArgs.cmake ENCODING: utf-8 ```cmake # Distributed under the OSI-approved BSD 3-Clause License. See accompanying # file Copyright.txt or https://cmake.org/licensing for details. #[=======================================================================[.rst: FindPackageHandleStandardArgs ----------------------------- This module provides a function intended to be used in :ref:`Find Modules` implementing :command:`find_package(<PackageName>)` calls. It handles the ``REQUIRED``, ``QUIET`` and version-related arguments of ``find_package``. It also sets the ``<PackageName>_FOUND`` variable. The package is considered found if all variables listed contain valid results, e.g. valid filepaths. .. command:: find_package_handle_standard_args There are two signatures:: find_package_handle_standard_args(<PackageName> (DEFAULT_MSG|<custom-failure-message>) <required-var>... ) find_package_handle_standard_args(<PackageName> [FOUND_VAR <result-var>] [REQUIRED_VARS <required-var>...] [VERSION_VAR <version-var>] [HANDLE_COMPONENTS] [CONFIG_MODE] [FAIL_MESSAGE <custom-failure-message>] ) The ``<PackageName>_FOUND`` variable will be set to ``TRUE`` if all the variables ``<required-var>...`` are valid and any optional constraints are satisfied, and ``FALSE`` otherwise. A success or failure message may be displayed based on the results and on whether the ``REQUIRED`` and/or ``QUIET`` option was given to the :command:`find_package` call. The options are: ``(DEFAULT_MSG|<custom-failure-message>)`` In the simple signature this specifies the failure message. Use ``DEFAULT_MSG`` to ask for a default message to be computed (recommended). Not valid in the full signature. ``FOUND_VAR <result-var>`` Obsolete. Specifies either ``<PackageName>_FOUND`` or ``<PACKAGENAME>_FOUND`` as the result variable. This exists only for compatibility with older versions of CMake and is now ignored. Result variables of both names are always set for compatibility. ``REQUIRED_VARS <required-var>...`` Specify the variables which are required for this package. These may be named in the generated failure message asking the user to set the missing variable values. Therefore these should typically be cache entries such as ``FOO_LIBRARY`` and not output variables like ``FOO_LIBRARIES``. ``VERSION_VAR <version-var>`` Specify the name of a variable that holds the version of the package that has been found. This version will be checked against the (potentially) specified required version given to the :command:`find_package` call, including its ``EXACT`` option. The default messages include information about the required version and the version which has been actually found, both if the version is ok or not. ``HANDLE_COMPONENTS`` Enable handling of package components. In this case, the command will report which components have been found and which are missing, and the ``<PackageName>_FOUND`` variable will be set to ``FALSE`` if any of the required components (i.e. not the ones listed after the ``OPTIONAL_COMPONENTS`` option of :command:`find_package`) are missing. ``CONFIG_MODE`` Specify that the calling find module is a wrapper around a call to ``find_package(<PackageName> NO_MODULE)``. This implies a ``VERSION_VAR`` value of ``<PackageName>_VERSION``. The command will automatically check whether the package configuration file was found. ``FAIL_MESSAGE <custom-failure-message>`` Specify a custom failure message instead of using the default generated message. Not recommended. Example for the simple signature: .. code-block:: cmake find_package_handle_standard_args(LibXml2 DEFAULT_MSG LIBXML2_LIBRARY LIBXML2_INCLUDE_DIR) The ``LibXml2`` package is considered to be found if both ``LIBXML2_LIBRARY`` and ``LIBXML2_INCLUDE_DIR`` are valid. Then also ``LibXml2_FOUND`` is set to ``TRUE``. If it is not found and ``REQUIRED`` was used, it fails with a :command:`message(FATAL_ERROR)`, independent whether ``QUIET`` was used or not. If it is found, success will be reported, including the content of the first ``<required-var>``. On repeated CMake runs, the same message will not be printed again. Example for the full signature: .. code-block:: cmake find_package_handle_standard_args(LibArchive REQUIRED_VARS LibArchive_LIBRARY LibArchive_INCLUDE_DIR VERSION_VAR LibArchive_VERSION) In this case, the ``LibArchive`` package is considered to be found if both ``LibArchive_LIBRARY`` and ``LibArchive_INCLUDE_DIR`` are valid. Also the version of ``LibArchive`` will be checked by using the version contained in ``LibArchive_VERSION``. Since no ``FAIL_MESSAGE`` is given, the default messages will be printed. Another example for the full signature: .. code-block:: cmake find_package(Automoc4 QUIET NO_MODULE HINTS /opt/automoc4) find_package_handle_standard_args(Automoc4 CONFIG_MODE) In this case, a ``FindAutmoc4.cmake`` module wraps a call to ``find_package(Automoc4 NO_MODULE)`` and adds an additional search directory for ``automoc4``. Then the call to ``find_package_handle_standard_args`` produces a proper success/failure message. #]=======================================================================] include(${CMAKE_CURRENT_LIST_DIR}/FindPackageMessage.cmake) # internal helper macro macro(_FPHSA_FAILURE_MESSAGE _msg) if (${_NAME}_FIND_REQUIRED) message(FATAL_ERROR "${_msg}") else () if (NOT ${_NAME}_FIND_QUIETLY) message(STATUS "${_msg}") endif () endif () endmacro() # internal helper macro to generate the failure message when used in CONFIG_MODE: macro(_FPHSA_HANDLE_FAILURE_CONFIG_MODE) # <name>_CONFIG is set, but FOUND is false, this means that some other of the REQUIRED_VARS was not found: if(${_NAME}_CONFIG) _FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE}: missing:${MISSING_VARS} (found ${${_NAME}_CONFIG} ${VERSION_MSG})") else() # If _CONSIDERED_CONFIGS is set, the config-file has been found, but no suitable version. # List them all in the error message: if(${_NAME}_CONSIDERED_CONFIGS) set(configsText "") list(LENGTH ${_NAME}_CONSIDERED_CONFIGS configsCount) math(EXPR configsCount "${configsCount} - 1") foreach(currentConfigIndex RANGE ${configsCount}) list(GET ${_NAME}_CONSIDERED_CONFIGS ${currentConfigIndex} filename) list(GET ${_NAME}_CONSIDERED_VERSIONS ${currentConfigIndex} version) string(APPEND configsText " ${filename} (version ${version})\n") endforeach() if (${_NAME}_NOT_FOUND_MESSAGE) string(APPEND configsText " Reason given by package: ${${_NAME}_NOT_FOUND_MESSAGE}\n") endif() _FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE} ${VERSION_MSG}, checked the following files:\n${configsText}") else() # Simple case: No Config-file was found at all: _FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE}: found neither ${_NAME}Config.cmake nor ${_NAME_LOWER}-config.cmake ${VERSION_MSG}") endif() endif() endmacro() function(FIND_PACKAGE_HANDLE_STANDARD_ARGS _NAME _FIRST_ARG) # Set up the arguments for `cmake_parse_arguments`. set(options CONFIG_MODE HANDLE_COMPONENTS) set(oneValueArgs FAIL_MESSAGE VERSION_VAR FOUND_VAR) set(multiValueArgs REQUIRED_VARS) # Check whether we are in 'simple' or 'extended' mode: set(_KEYWORDS_FOR_EXTENDED_MODE ${options} ${oneValueArgs} ${multiValueArgs} ) list(FIND _KEYWORDS_FOR_EXTENDED_MODE "${_FIRST_ARG}" INDEX) if(${INDEX} EQUAL -1) set(FPHSA_FAIL_MESSAGE ${_FIRST_ARG}) set(FPHSA_REQUIRED_VARS ${ARGN}) set(FPHSA_VERSION_VAR) else() cmake_parse_arguments(FPHSA "${options}" "${oneValueArgs}" "${multiValueArgs}" ${_FIRST_ARG} ${ARGN}) if(FPHSA_UNPARSED_ARGUMENTS) message(FATAL_ERROR "Unknown keywords given to FIND_PACKAGE_HANDLE_STANDARD_ARGS(): \"${FPHSA_UNPARSED_ARGUMENTS}\"") endif() if(NOT FPHSA_FAIL_MESSAGE) set(FPHSA_FAIL_MESSAGE "DEFAULT_MSG") endif() # In config-mode, we rely on the variable <package>_CONFIG, which is set by find_package() # when it successfully found the config-file, including version checking: if(FPHSA_CONFIG_MODE) list(INSERT FPHSA_REQUIRED_VARS 0 ${_NAME}_CONFIG) list(REMOVE_DUPLICATES FPHSA_REQUIRED_VARS) set(FPHSA_VERSION_VAR ${_NAME}_VERSION) endif() if(NOT FPHSA_REQUIRED_VARS) message(FATAL_ERROR "No REQUIRED_VARS specified for FIND_PACKAGE_HANDLE_STANDARD_ARGS()") endif() endif() # now that we collected all arguments, process them if("x${FPHSA_FAIL_MESSAGE}" STREQUAL "xDEFAULT_MSG") set(FPHSA_FAIL_MESSAGE "Could NOT find ${_NAME}") endif() list(GET FPHSA_REQUIRED_VARS 0 _FIRST_REQUIRED_VAR) string(TOUPPER ${_NAME} _NAME_UPPER) string(TOLOWER ${_NAME} _NAME_LOWER) if(FPHSA_FOUND_VAR) if(FPHSA_FOUND_VAR MATCHES "^${_NAME}_FOUND$" OR FPHSA_FOUND_VAR MATCHES "^${_NAME_UPPER}_FOUND$") set(_FOUND_VAR ${FPHSA_FOUND_VAR}) else() message(FATAL_ERROR "The argument for FOUND_VAR is \"${FPHSA_FOUND_VAR}\", but only \"${_NAME}_FOUND\" and \"${_NAME_UPPER}_FOUND\" are valid names.") endif() else() set(_FOUND_VAR ${_NAME_UPPER}_FOUND) endif() # collect all variables which were not found, so they can be printed, so the # user knows better what went wrong (#6375) set(MISSING_VARS "") set(DETAILS "") # check if all passed variables are valid set(FPHSA_FOUND_${_NAME} TRUE) foreach(_CURRENT_VAR ${FPHSA_REQUIRED_VARS}) if(NOT ${_CURRENT_VAR}) set(FPHSA_FOUND_${_NAME} FALSE) string(APPEND MISSING_VARS " ${_CURRENT_VAR}") else() string(APPEND DETAILS "[${${_CURRENT_VAR}}]") endif() endforeach() if(FPHSA_FOUND_${_NAME}) set(${_NAME}_FOUND TRUE) set(${_NAME_UPPER}_FOUND TRUE) else() set(${_NAME}_FOUND FALSE) set(${_NAME_UPPER}_FOUND FALSE) endif() # component handling unset(FOUND_COMPONENTS_MSG) unset(MISSING_COMPONENTS_MSG) if(FPHSA_HANDLE_COMPONENTS) foreach(comp ${${_NAME}_FIND_COMPONENTS}) if(${_NAME}_${comp}_FOUND) if(NOT DEFINED FOUND_COMPONENTS_MSG) set(FOUND_COMPONENTS_MSG "found components: ") endif() string(APPEND FOUND_COMPONENTS_MSG " ${comp}") else() if(NOT DEFINED MISSING_COMPONENTS_MSG) set(MISSING_COMPONENTS_MSG "missing components: ") endif() string(APPEND MISSING_COMPONENTS_MSG " ${comp}") if(${_NAME}_FIND_REQUIRED_${comp}) set(${_NAME}_FOUND FALSE) string(APPEND MISSING_VARS " ${comp}") endif() endif() endforeach() set(COMPONENT_MSG "${FOUND_COMPONENTS_MSG} ${MISSING_COMPONENTS_MSG}") string(APPEND DETAILS "[c${COMPONENT_MSG}]") endif() # version handling: set(VERSION_MSG "") set(VERSION_OK TRUE) # check with DEFINED here as the requested or found version may be "0" if (DEFINED ${_NAME}_FIND_VERSION) if(DEFINED ${FPHSA_VERSION_VAR}) set(_FOUND_VERSION ${${FPHSA_VERSION_VAR}}) if(${_NAME}_FIND_VERSION_EXACT) # exact version required # count the dots in the version string string(REGEX REPLACE "[^.]" "" _VERSION_DOTS "${_FOUND_VERSION}") # add one dot because there is one dot more than there are components string(LENGTH "${_VERSION_DOTS}." _VERSION_DOTS) if (_VERSION_DOTS GREATER ${_NAME}_FIND_VERSION_COUNT) # Because of the C++ implementation of find_package() ${_NAME}_FIND_VERSION_COUNT # is at most 4 here. Therefore a simple lookup table is used. if (${_NAME}_FIND_VERSION_COUNT EQUAL 1) set(_VERSION_REGEX "[^.]*") elseif (${_NAME}_FIND_VERSION_COUNT EQUAL 2) set(_VERSION_REGEX "[^.]*\\.[^.]*") elseif (${_NAME}_FIND_VERSION_COUNT EQUAL 3) set(_VERSION_REGEX "[^.]*\\.[^.]*\\.[^.]*") else () set(_VERSION_REGEX "[^.]*\\.[^.]*\\.[^.]*\\.[^.]*") endif () string(REGEX REPLACE "^(${_VERSION_REGEX})\\..*" "\\1" _VERSION_HEAD "${_FOUND_VERSION}") unset(_VERSION_REGEX) if (NOT ${_NAME}_FIND_VERSION VERSION_EQUAL _VERSION_HEAD) set(VERSION_MSG "Found unsuitable version \"${_FOUND_VERSION}\", but required is exact version \"${${_NAME}_FIND_VERSION}\"") set(VERSION_OK FALSE) else () set(VERSION_MSG "(found suitable exact version \"${_FOUND_VERSION}\")") endif () unset(_VERSION_HEAD) else () if (NOT ${_NAME}_FIND_VERSION VERSION_EQUAL _FOUND_VERSION) set(VERSION_MSG "Found unsuitable version \"${_FOUND_VERSION}\", but required is exact version \"${${_NAME}_FIND_VERSION}\"") set(VERSION_OK FALSE) else () set(VERSION_MSG "(found suitable exact version \"${_FOUND_VERSION}\")") endif () endif () unset(_VERSION_DOTS) else() # minimum version specified: if (${_NAME}_FIND_VERSION VERSION_GREATER _FOUND_VERSION) set(VERSION_MSG "Found unsuitable version \"${_FOUND_VERSION}\", but required is at least \"${${_NAME}_FIND_VERSION}\"") set(VERSION_OK FALSE) else () set(VERSION_MSG "(found suitable version \"${_FOUND_VERSION}\", minimum required is \"${${_NAME}_FIND_VERSION}\")") endif () endif() else() # if the package was not found, but a version was given, add that to the output: if(${_NAME}_FIND_VERSION_EXACT) set(VERSION_MSG "(Required is exact version \"${${_NAME}_FIND_VERSION}\")") else() set(VERSION_MSG "(Required is at least version \"${${_NAME}_FIND_VERSION}\")") endif() endif() else () # Check with DEFINED as the found version may be 0. if(DEFINED ${FPHSA_VERSION_VAR}) set(VERSION_MSG "(found version \"${${FPHSA_VERSION_VAR}}\")") endif() endif () if(VERSION_OK) string(APPEND DETAILS "[v${${FPHSA_VERSION_VAR}}(${${_NAME}_FIND_VERSION})]") else() set(${_NAME}_FOUND FALSE) endif() # print the result: if (${_NAME}_FOUND) FIND_PACKAGE_MESSAGE(${_NAME} "Found ${_NAME}: ${${_FIRST_REQUIRED_VAR}} ${VERSION_MSG} ${COMPONENT_MSG}" "${DETAILS}") else () if(FPHSA_CONFIG_MODE) _FPHSA_HANDLE_FAILURE_CONFIG_MODE() else() if(NOT VERSION_OK) _FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE}: ${VERSION_MSG} (found ${${_FIRST_REQUIRED_VAR}})") else() _FPHSA_FAILURE_MESSAGE("${FPHSA_FAIL_MESSAGE} (missing:${MISSING_VARS}) ${VERSION_MSG}") endif() endif() endif () set(${_NAME}_FOUND ${${_NAME}_FOUND} PARENT_SCOPE) set(${_NAME_UPPER}_FOUND ${${_NAME}_FOUND} PARENT_SCOPE) endfunction() ```
=================================================================================================================================================================== SOURCE CODE FILE: FindPackageMessage.cmake LINES: 2 SIZE: 1.57 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\Modules_CUDA_fix\upstream\FindPackageMessage.cmake ENCODING: utf-8 ```cmake # Distributed under the OSI-approved BSD 3-Clause License. See accompanying # file Copyright.txt or https://cmake.org/licensing for details. #.rst: # FindPackageMessage # ------------------ # # # # FIND_PACKAGE_MESSAGE(<name> "message for user" "find result details") # # This macro is intended to be used in FindXXX.cmake modules files. It # will print a message once for each unique find result. This is useful # for telling the user where a package was found. The first argument # specifies the name (XXX) of the package. The second argument # specifies the message to display. The third argument lists details # about the find result so that if they change the message will be # displayed again. The macro also obeys the QUIET argument to the # find_package command. # # Example: # # :: # # if(X11_FOUND) # FIND_PACKAGE_MESSAGE(X11 "Found X11: ${X11_X11_LIB}" # "[${X11_X11_LIB}][${X11_INCLUDE_DIR}]") # else() # ... # endif() function(FIND_PACKAGE_MESSAGE pkg msg details) # Avoid printing a message repeatedly for the same find result. if(NOT ${pkg}_FIND_QUIETLY) string(REPLACE "\n" "" details "${details}") set(DETAILS_VAR FIND_PACKAGE_MESSAGE_DETAILS_${pkg}) if(NOT "${details}" STREQUAL "${${DETAILS_VAR}}") # The message has not yet been printed. message(STATUS "${msg}") # Save the find details in the cache to avoid printing the same # message again. set("${DETAILS_VAR}" "${details}" CACHE INTERNAL "Details about finding ${pkg}") endif() endif() endfunction() ```
===================================================================================================================================== SOURCE CODE FILE: LoadHIP.cmake LINES: 14 SIZE: 8.33 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\public\LoadHIP.cmake ENCODING: utf-8 ```cmake set(PYTORCH_FOUND_HIP FALSE) # If ROCM_PATH is set, assume intention is to compile with # ROCm support and error out if the ROCM_PATH does not exist. # Else ROCM_PATH does not exist, assume a default of /opt/rocm # In the latter case, if /opt/rocm does not exist emit status # message and return. if(DEFINED ENV{ROCM_PATH}) set(ROCM_PATH $ENV{ROCM_PATH}) if(NOT EXISTS ${ROCM_PATH}) message(FATAL_ERROR "ROCM_PATH environment variable is set to ${ROCM_PATH} but does not exist.\n" "Set a valid ROCM_PATH or unset ROCM_PATH environment variable to fix.") endif() else() if(UNIX) set(ROCM_PATH /opt/rocm) else() # Win32 set(ROCM_PATH C:/opt/rocm) endif() if(NOT EXISTS ${ROCM_PATH}) message(STATUS "ROCM_PATH environment variable is not set and ${ROCM_PATH} does not exist.\n" "Building without ROCm support.") return() endif() endif() if(NOT DEFINED ENV{ROCM_INCLUDE_DIRS}) set(ROCM_INCLUDE_DIRS ${ROCM_PATH}/include) else() set(ROCM_INCLUDE_DIRS $ENV{ROCM_INCLUDE_DIRS}) endif() # MAGMA_HOME if(NOT DEFINED ENV{MAGMA_HOME}) set(MAGMA_HOME ${ROCM_PATH}/magma) set(ENV{MAGMA_HOME} ${ROCM_PATH}/magma) else() set(MAGMA_HOME $ENV{MAGMA_HOME}) endif() # MIOpen isn't a part of HIP-SDK for Windows and hence, may have a different # installation directory. if(WIN32) if(NOT DEFINED ENV{MIOPEN_PATH}) set(miopen_DIR C:/opt/miopen/lib/cmake/miopen) else() set(miopen_DIR $ENV{MIOPEN_PATH}/lib/cmake/miopen) endif() endif() torch_hip_get_arch_list(PYTORCH_ROCM_ARCH) if(PYTORCH_ROCM_ARCH STREQUAL "") message(FATAL_ERROR "No GPU arch specified for ROCm build. Please use PYTORCH_ROCM_ARCH environment variable to specify GPU archs to build for.") endif() message("Building PyTorch for GPU arch: ${PYTORCH_ROCM_ARCH}") # Add HIP to the CMAKE Module Path # needed because the find_package call to this module uses the Module mode search # https://cmake.org/cmake/help/latest/command/find_package.html#search-modes if(UNIX) set(CMAKE_MODULE_PATH ${ROCM_PATH}/lib/cmake/hip ${CMAKE_MODULE_PATH}) else() # Win32 set(CMAKE_MODULE_PATH ${ROCM_PATH}/cmake/ ${CMAKE_MODULE_PATH}) endif() # Add ROCM_PATH to CMAKE_PREFIX_PATH, needed because the find_package # call to individual ROCM components uses the Config mode search list(APPEND CMAKE_PREFIX_PATH ${ROCM_PATH}) macro(find_package_and_print_version PACKAGE_NAME) find_package("${PACKAGE_NAME}" ${ARGN}) message("${PACKAGE_NAME} VERSION: ${${PACKAGE_NAME}_VERSION}") endmacro() # Find the HIP Package # MODULE argument is added for clarity that CMake is searching # for FindHIP.cmake in Module mode find_package_and_print_version(HIP 1.0 MODULE) if(HIP_FOUND) set(PYTORCH_FOUND_HIP TRUE) # Find ROCM version for checks if(UNIX) set(ROCM_VERSION_HEADER_PATH ${ROCM_INCLUDE_DIRS}/rocm-core/rocm_version.h) else() set(ROCM_VERSION_HEADER_PATH ${ROCM_INCLUDE_DIRS}/hip/hip_version.h) endif() get_filename_component(ROCM_HEADER_NAME ${ROCM_VERSION_HEADER_PATH} NAME) if(EXISTS ${ROCM_VERSION_HEADER_PATH}) set(ROCM_HEADER_FILE ${ROCM_VERSION_HEADER_PATH}) else() message(FATAL_ERROR "********************* ${ROCM_HEADER_NAME} could not be found ******************\n") endif() # Read the ROCM headerfile into a variable file(READ ${ROCM_HEADER_FILE} ROCM_HEADER_CONTENT) # Since Windows currently supports only a part of ROCm and names it HIP-SDK, # we need to refer to the HIP-SDK equivalents of entities existing in ROCm lib. if(UNIX) set(ROCM_LIB_NAME "ROCM") else() # Win32 set(ROCM_LIB_NAME "HIP") endif() # Below we use a RegEx to find ROCM version numbers. # Note that CMake does not support \s for blank space. That is # why in the regular expressions below we have a blank space in # the square brackets. # There are three steps: # 1. Match regular expression # 2. Strip the non-numerical part of the string # 3. Strip leading and trailing spaces string(REGEX MATCH "${ROCM_LIB_NAME}_VERSION_MAJOR[ ]+[0-9]+" TEMP1 ${ROCM_HEADER_CONTENT}) string(REPLACE "${ROCM_LIB_NAME}_VERSION_MAJOR" "" TEMP2 ${TEMP1}) string(STRIP ${TEMP2} ROCM_VERSION_DEV_MAJOR) string(REGEX MATCH "${ROCM_LIB_NAME}_VERSION_MINOR[ ]+[0-9]+" TEMP1 ${ROCM_HEADER_CONTENT}) string(REPLACE "${ROCM_LIB_NAME}_VERSION_MINOR" "" TEMP2 ${TEMP1}) string(STRIP ${TEMP2} ROCM_VERSION_DEV_MINOR) string(REGEX MATCH "${ROCM_LIB_NAME}_VERSION_PATCH[ ]+[0-9]+" TEMP1 ${ROCM_HEADER_CONTENT}) string(REPLACE "${ROCM_LIB_NAME}_VERSION_PATCH" "" TEMP2 ${TEMP1}) string(STRIP ${TEMP2} ROCM_VERSION_DEV_PATCH) # Create ROCM_VERSION_DEV_INT which is later used as a preprocessor macros set(ROCM_VERSION_DEV "${ROCM_VERSION_DEV_MAJOR}.${ROCM_VERSION_DEV_MINOR}.${ROCM_VERSION_DEV_PATCH}") math(EXPR ROCM_VERSION_DEV_INT "(${ROCM_VERSION_DEV_MAJOR}*10000) + (${ROCM_VERSION_DEV_MINOR}*100) + ${ROCM_VERSION_DEV_PATCH}") message("\n***** ROCm version from ${ROCM_HEADER_NAME} ****\n") message("ROCM_VERSION_DEV: ${ROCM_VERSION_DEV}") message("ROCM_VERSION_DEV_MAJOR: ${ROCM_VERSION_DEV_MAJOR}") message("ROCM_VERSION_DEV_MINOR: ${ROCM_VERSION_DEV_MINOR}") message("ROCM_VERSION_DEV_PATCH: ${ROCM_VERSION_DEV_PATCH}") message("ROCM_VERSION_DEV_INT: ${ROCM_VERSION_DEV_INT}") math(EXPR TORCH_HIP_VERSION "(${HIP_VERSION_MAJOR} * 100) + ${HIP_VERSION_MINOR}") message("HIP_VERSION_MAJOR: ${HIP_VERSION_MAJOR}") message("HIP_VERSION_MINOR: ${HIP_VERSION_MINOR}") message("TORCH_HIP_VERSION: ${TORCH_HIP_VERSION}") # Find ROCM components using Config mode # These components will be searced for recursively in ${ROCM_PATH} message("\n***** Library versions from cmake find_package *****\n") find_package_and_print_version(hip REQUIRED CONFIG) find_package_and_print_version(amd_comgr REQUIRED) find_package_and_print_version(rocrand REQUIRED) find_package_and_print_version(hiprand REQUIRED) find_package_and_print_version(rocblas REQUIRED) find_package_and_print_version(hipblas REQUIRED) find_package_and_print_version(miopen REQUIRED) find_package_and_print_version(hipfft REQUIRED) find_package_and_print_version(hipsparse REQUIRED) find_package_and_print_version(rocprim REQUIRED) find_package_and_print_version(hipcub REQUIRED) find_package_and_print_version(rocthrust REQUIRED) find_package_and_print_version(hipsolver REQUIRED) # workaround cmake 4 build issue if(CMAKE_VERSION VERSION_GREATER_EQUAL "4.0.0") message(WARNING "Work around hiprtc cmake failure for cmake >= 4") set(CMAKE_POLICY_VERSION_MINIMUM 3.5) find_package_and_print_version(hiprtc REQUIRED) unset(CMAKE_POLICY_VERSION_MINIMUM) else() find_package_and_print_version(hiprtc REQUIRED) endif() find_package_and_print_version(hipblaslt REQUIRED) if(UNIX) find_package_and_print_version(rccl) find_package_and_print_version(hsa-runtime64 REQUIRED) # roctx is part of roctracer find_library(ROCM_ROCTX_LIB roctx64 HINTS ${ROCM_PATH}/lib) set(PROJECT_RANDOM_BINARY_DIR "${PROJECT_BINARY_DIR}") if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "5.7.0") # check whether hipblaslt provides HIPBLASLT_MATMUL_DESC_A_SCALE_POINTER_VEC_EXT set(file "${PROJECT_BINARY_DIR}/hipblaslt_test_vec_ext.cc") file(WRITE ${file} "" "#define LEGACY_HIPBLAS_DIRECT\n" "#include <hipblaslt/hipblaslt.h>\n" "int main() {\n" " hipblasLtMatmulDescAttributes_t attr = HIPBLASLT_MATMUL_DESC_A_SCALE_POINTER_VEC_EXT;\n" " return 0;\n" "}\n" ) try_compile(hipblaslt_compile_result_vec_ext ${PROJECT_RANDOM_BINARY_DIR} ${file} CMAKE_FLAGS "-DINCLUDE_DIRECTORIES=${ROCM_INCLUDE_DIRS}" COMPILE_DEFINITIONS -D__HIP_PLATFORM_AMD__ -D__HIP_PLATFORM_HCC__ OUTPUT_VARIABLE hipblaslt_compile_output) if(hipblaslt_compile_result_vec_ext) set(HIPBLASLT_VEC_EXT ON) #message("hipblaslt is using scale pointer vec ext: ${hipblaslt_compile_output}") message("hipblaslt is using scale pointer vec ext") else() set(HIPBLASLT_VEC_EXT OFF) message("hipblaslt is NOT using scale pointer vec ext: ${hipblaslt_compile_output}") #message("hipblaslt is NOT using scale pointer vec ext") endif() endif() endif() endif() ```
================================================================================================================================== SOURCE CODE FILE: cuda.cmake LINES: 9 SIZE: 14.19 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\public\cuda.cmake ENCODING: utf-8 ```cmake # ---[ cuda # Poor man's include guard if(TARGET torch::cudart) return() endif() # sccache is only supported in CMake master and not in the newest official # release (3.11.3) yet. Hence we need our own Modules_CUDA_fix to enable sccache. list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR}/../Modules_CUDA_fix) # We don't want to statically link cudart, because we rely on it's dynamic linkage in # python (follow along torch/cuda/__init__.py and usage of cudaGetErrorName). # Technically, we can link cudart here statically, and link libtorch_python.so # to a dynamic libcudart.so, but that's just wasteful. # However, on Windows, if this one gets switched off, the error "cuda: unknown error" # will be raised when running the following code: # >>> import torch # >>> torch.cuda.is_available() # >>> torch.cuda.current_device() # More details can be found in the following links. # https://github.com/pytorch/pytorch/issues/20635 # https://github.com/pytorch/pytorch/issues/17108 if(NOT MSVC) set(CUDA_USE_STATIC_CUDA_RUNTIME OFF CACHE INTERNAL "") endif() # Find CUDA. find_package(CUDA) if(NOT CUDA_FOUND) message(WARNING "PyTorch: CUDA cannot be found. Depending on whether you are building " "PyTorch or a PyTorch dependent library, the next warning / error will " "give you more info.") set(CAFFE2_USE_CUDA OFF) return() endif() # Enable CUDA language support set(CUDAToolkit_ROOT "${CUDA_TOOLKIT_ROOT_DIR}") # Pass clang as host compiler, which according to the docs # Must be done before CUDA language is enabled, see # https://cmake.org/cmake/help/v3.15/variable/CMAKE_CUDA_HOST_COMPILER.html if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") set(CMAKE_CUDA_HOST_COMPILER "${CMAKE_CXX_COMPILER}") endif() enable_language(CUDA) if("X${CMAKE_CUDA_STANDARD}" STREQUAL "X" ) set(CMAKE_CUDA_STANDARD ${CMAKE_CXX_STANDARD}) endif() set(CMAKE_CUDA_STANDARD_REQUIRED ON) # CMP0074 - find_package will respect <PackageName>_ROOT variables cmake_policy(PUSH) if(CMAKE_VERSION VERSION_GREATER_EQUAL 3.12.0) cmake_policy(SET CMP0074 NEW) endif() find_package(CUDAToolkit REQUIRED) cmake_policy(POP) if(NOT CMAKE_CUDA_COMPILER_VERSION VERSION_EQUAL CUDAToolkit_VERSION) message(FATAL_ERROR "Found two conflicting CUDA versions:\n" "V${CMAKE_CUDA_COMPILER_VERSION} in '${CUDA_INCLUDE_DIRS}' and\n" "V${CUDAToolkit_VERSION} in '${CUDAToolkit_INCLUDE_DIRS}'") endif() message(STATUS "PyTorch: CUDA detected: " ${CUDA_VERSION}) message(STATUS "PyTorch: CUDA nvcc is: " ${CUDA_NVCC_EXECUTABLE}) message(STATUS "PyTorch: CUDA toolkit directory: " ${CUDA_TOOLKIT_ROOT_DIR}) if(CUDA_VERSION VERSION_LESS 11.0) message(FATAL_ERROR "PyTorch requires CUDA 11.0 or above.") endif() if(CUDA_FOUND) # Sometimes, we may mismatch nvcc with the CUDA headers we are # compiling with, e.g., if a ccache nvcc is fed to us by CUDA_NVCC_EXECUTABLE # but the PATH is not consistent with CUDA_HOME. It's better safe # than sorry: make sure everything is consistent. if(MSVC AND CMAKE_GENERATOR MATCHES "Visual Studio") # When using Visual Studio, it attempts to lock the whole binary dir when # `try_run` is called, which will cause the build to fail. string(RANDOM BUILD_SUFFIX) set(PROJECT_RANDOM_BINARY_DIR "${PROJECT_BINARY_DIR}/${BUILD_SUFFIX}") else() set(PROJECT_RANDOM_BINARY_DIR "${PROJECT_BINARY_DIR}") endif() set(file "${PROJECT_BINARY_DIR}/detect_cuda_version.cc") file(WRITE ${file} "" "#include <cuda.h>\n" "#include <cstdio>\n" "int main() {\n" " printf(\"%d.%d\", CUDA_VERSION / 1000, (CUDA_VERSION / 10) % 100);\n" " return 0;\n" "}\n" ) if(NOT CMAKE_CROSSCOMPILING) try_run(run_result compile_result ${PROJECT_RANDOM_BINARY_DIR} ${file} CMAKE_FLAGS "-DINCLUDE_DIRECTORIES=${CUDA_INCLUDE_DIRS}" LINK_LIBRARIES ${CUDA_LIBRARIES} RUN_OUTPUT_VARIABLE cuda_version_from_header COMPILE_OUTPUT_VARIABLE output_var ) if(NOT compile_result) message(FATAL_ERROR "PyTorch: Couldn't determine version from header: " ${output_var}) endif() message(STATUS "PyTorch: Header version is: " ${cuda_version_from_header}) if(NOT cuda_version_from_header STREQUAL ${CUDA_VERSION_STRING}) # Force CUDA to be processed for again next time # TODO: I'm not sure if this counts as an implementation detail of # FindCUDA set(${cuda_version_from_findcuda} ${CUDA_VERSION_STRING}) unset(CUDA_TOOLKIT_ROOT_DIR_INTERNAL CACHE) # Not strictly necessary, but for good luck. unset(CUDA_VERSION CACHE) # Error out message(FATAL_ERROR "FindCUDA says CUDA version is ${cuda_version_from_findcuda} (usually determined by nvcc), " "but the CUDA headers say the version is ${cuda_version_from_header}. This often occurs " "when you set both CUDA_HOME and CUDA_NVCC_EXECUTABLE to " "non-standard locations, without also setting PATH to point to the correct nvcc. " "Perhaps, try re-running this command again with PATH=${CUDA_TOOLKIT_ROOT_DIR}/bin:$PATH. " "See above log messages for more diagnostics, and see https://github.com/pytorch/pytorch/issues/8092 for more details.") endif() endif() endif() # ---[ CUDA libraries wrapper # find lbnvrtc.so set(CUDA_NVRTC_LIB "${CUDA_nvrtc_LIBRARY}" CACHE FILEPATH "") if(CUDA_NVRTC_LIB AND NOT CUDA_NVRTC_SHORTHASH) find_package(Python COMPONENTS Interpreter) execute_process( COMMAND Python::Interpreter -c "import hashlib;hash=hashlib.sha256();hash.update(open('${CUDA_NVRTC_LIB}','rb').read());print(hash.hexdigest()[:8])" RESULT_VARIABLE _retval OUTPUT_VARIABLE CUDA_NVRTC_SHORTHASH) if(NOT _retval EQUAL 0) message(WARNING "Failed to compute shorthash for libnvrtc.so") set(CUDA_NVRTC_SHORTHASH "XXXXXXXX") else() string(STRIP "${CUDA_NVRTC_SHORTHASH}" CUDA_NVRTC_SHORTHASH) message(STATUS "${CUDA_NVRTC_LIB} shorthash is ${CUDA_NVRTC_SHORTHASH}") endif() endif() # Create new style imported libraries. # Several of these libraries have a hardcoded path if CAFFE2_STATIC_LINK_CUDA # is set. This path is where sane CUDA installations have their static # libraries installed. This flag should only be used for binary builds, so # end-users should never have this flag set. # cuda add_library(caffe2::cuda INTERFACE IMPORTED) set_property( TARGET caffe2::cuda PROPERTY INTERFACE_LINK_LIBRARIES CUDA::cuda_driver) # cudart add_library(torch::cudart INTERFACE IMPORTED) if(CAFFE2_STATIC_LINK_CUDA) set_property( TARGET torch::cudart PROPERTY INTERFACE_LINK_LIBRARIES CUDA::cudart_static) else() set_property( TARGET torch::cudart PROPERTY INTERFACE_LINK_LIBRARIES CUDA::cudart) endif() # nvToolsExt if(USE_SYSTEM_NVTX) find_path(nvtx3_dir NAMES nvtx3 PATHS ${CUDA_INCLUDE_DIRS}) else() find_path(nvtx3_dir NAMES nvtx3 PATHS "${PROJECT_SOURCE_DIR}/third_party/NVTX/c/include" NO_DEFAULT_PATH) endif() find_package_handle_standard_args(nvtx3 DEFAULT_MSG nvtx3_dir) if(nvtx3_FOUND) add_library(torch::nvtx3 INTERFACE IMPORTED) target_include_directories(torch::nvtx3 INTERFACE "${nvtx3_dir}") target_compile_definitions(torch::nvtx3 INTERFACE TORCH_CUDA_USE_NVTX3) else() message(WARNING "Cannot find NVTX3, find old NVTX instead") add_library(torch::nvtoolsext INTERFACE IMPORTED) set_property(TARGET torch::nvtoolsext PROPERTY INTERFACE_LINK_LIBRARIES CUDA::nvToolsExt) endif() # cublas add_library(caffe2::cublas INTERFACE IMPORTED) if(CAFFE2_STATIC_LINK_CUDA AND NOT WIN32) set_property( TARGET caffe2::cublas PROPERTY INTERFACE_LINK_LIBRARIES # NOTE: cublas is always linked dynamically CUDA::cublas CUDA::cublasLt) set_property( TARGET caffe2::cublas APPEND PROPERTY INTERFACE_LINK_LIBRARIES CUDA::cudart_static rt) else() set_property( TARGET caffe2::cublas PROPERTY INTERFACE_LINK_LIBRARIES CUDA::cublas CUDA::cublasLt) endif() # cudnn interface # static linking is handled by USE_STATIC_CUDNN environment variable if(CAFFE2_USE_CUDNN) if(USE_STATIC_CUDNN) set(CUDNN_STATIC ON CACHE BOOL "") else() set(CUDNN_STATIC OFF CACHE BOOL "") endif() find_package(CUDNN) if(NOT CUDNN_FOUND) message(WARNING "Cannot find cuDNN library. Turning the option off") set(CAFFE2_USE_CUDNN OFF) else() if(CUDNN_VERSION VERSION_LESS "8.1.0") message(FATAL_ERROR "PyTorch requires cuDNN 8.1 and above.") endif() endif() add_library(torch::cudnn INTERFACE IMPORTED) target_include_directories(torch::cudnn INTERFACE ${CUDNN_INCLUDE_PATH}) if(CUDNN_STATIC AND NOT WIN32) target_link_options(torch::cudnn INTERFACE "-Wl,--exclude-libs,libcudnn_static.a") else() target_link_libraries(torch::cudnn INTERFACE ${CUDNN_LIBRARY_PATH}) endif() else() message(STATUS "USE_CUDNN is set to 0. Compiling without cuDNN support") endif() if(CAFFE2_USE_CUSPARSELT) find_package(CUSPARSELT) if(NOT CUSPARSELT_FOUND) message(WARNING "Cannot find cuSPARSELt library. Turning the option off") set(CAFFE2_USE_CUSPARSELT OFF) else() add_library(torch::cusparselt INTERFACE IMPORTED) target_include_directories(torch::cusparselt INTERFACE ${CUSPARSELT_INCLUDE_PATH}) target_link_libraries(torch::cusparselt INTERFACE ${CUSPARSELT_LIBRARY_PATH}) endif() else() message(STATUS "USE_CUSPARSELT is set to 0. Compiling without cuSPARSELt support") endif() if(USE_CUDSS) find_package(CUDSS) if(NOT CUDSS_FOUND) message(WARNING "Cannot find CUDSS library. Turning the option off") set(USE_CUDSS OFF) else() add_library(torch::cudss INTERFACE IMPORTED) target_include_directories(torch::cudss INTERFACE ${CUDSS_INCLUDE_PATH}) target_link_libraries(torch::cudss INTERFACE ${CUDSS_LIBRARY_PATH}) endif() else() message(STATUS "USE_CUDSS is set to 0. Compiling without cuDSS support") endif() # cufile if(CAFFE2_USE_CUFILE) add_library(torch::cufile INTERFACE IMPORTED) if(CAFFE2_STATIC_LINK_CUDA AND NOT WIN32) set_property( TARGET torch::cufile PROPERTY INTERFACE_LINK_LIBRARIES CUDA::cuFile_static) else() set_property( TARGET torch::cufile PROPERTY INTERFACE_LINK_LIBRARIES CUDA::cuFile) endif() else() message(STATUS "USE_CUFILE is set to 0. Compiling without cuFile support") endif() # curand add_library(caffe2::curand INTERFACE IMPORTED) if(CAFFE2_STATIC_LINK_CUDA AND NOT WIN32) set_property( TARGET caffe2::curand PROPERTY INTERFACE_LINK_LIBRARIES CUDA::curand_static) else() set_property( TARGET caffe2::curand PROPERTY INTERFACE_LINK_LIBRARIES CUDA::curand) endif() # cufft add_library(caffe2::cufft INTERFACE IMPORTED) if(CAFFE2_STATIC_LINK_CUDA AND NOT WIN32) set_property( TARGET caffe2::cufft PROPERTY INTERFACE_LINK_LIBRARIES CUDA::cufft_static_nocallback) else() set_property( TARGET caffe2::cufft PROPERTY INTERFACE_LINK_LIBRARIES CUDA::cufft) endif() # nvrtc add_library(caffe2::nvrtc INTERFACE IMPORTED) set_property( TARGET caffe2::nvrtc PROPERTY INTERFACE_LINK_LIBRARIES CUDA::nvrtc caffe2::cuda) # Add onnx namepsace definition to nvcc if(ONNX_NAMESPACE) list(APPEND CUDA_NVCC_FLAGS "-DONNX_NAMESPACE=${ONNX_NAMESPACE}") else() list(APPEND CUDA_NVCC_FLAGS "-DONNX_NAMESPACE=onnx_c2") endif() # Don't activate VC env again for Ninja generators with MSVC on Windows if CUDAHOSTCXX is not defined # by adding --use-local-env. if(MSVC AND CMAKE_GENERATOR STREQUAL "Ninja" AND NOT DEFINED ENV{CUDAHOSTCXX}) list(APPEND CUDA_NVCC_FLAGS "--use-local-env") endif() # setting nvcc arch flags torch_cuda_get_nvcc_gencode_flag(NVCC_FLAGS_EXTRA) # CMake 3.18 adds integrated support for architecture selection, but we can't rely on it set(CMAKE_CUDA_ARCHITECTURES OFF) list(APPEND CUDA_NVCC_FLAGS ${NVCC_FLAGS_EXTRA}) message(STATUS "Added CUDA NVCC flags for: ${NVCC_FLAGS_EXTRA}") # disable some nvcc diagnostic that appears in boost, glog, glags, opencv, etc. foreach(diag cc_clobber_ignored field_without_dll_interface base_class_has_different_dll_interface dll_interface_conflict_none_assumed dll_interface_conflict_dllexport_assumed bad_friend_decl) list(APPEND SUPPRESS_WARNING_FLAGS --diag_suppress=${diag}) endforeach() string(REPLACE ";" "," SUPPRESS_WARNING_FLAGS "${SUPPRESS_WARNING_FLAGS}") list(APPEND CUDA_NVCC_FLAGS -Xcudafe ${SUPPRESS_WARNING_FLAGS}) set(CUDA_PROPAGATE_HOST_FLAGS_BLOCKLIST "-Werror") if(MSVC) list(APPEND CUDA_NVCC_FLAGS "--Werror" "cross-execution-space-call") list(APPEND CUDA_NVCC_FLAGS "--no-host-device-move-forward") endif() # Debug and Release symbol support if(MSVC) if(${CAFFE2_USE_MSVC_STATIC_RUNTIME}) string(APPEND CMAKE_CUDA_FLAGS_DEBUG " -Xcompiler /MTd") string(APPEND CMAKE_CUDA_FLAGS_MINSIZEREL " -Xcompiler /MT") string(APPEND CMAKE_CUDA_FLAGS_RELEASE " -Xcompiler /MT") string(APPEND CMAKE_CUDA_FLAGS_RELWITHDEBINFO " -Xcompiler /MT") else() string(APPEND CMAKE_CUDA_FLAGS_DEBUG " -Xcompiler /MDd") string(APPEND CMAKE_CUDA_FLAGS_MINSIZEREL " -Xcompiler /MD") string(APPEND CMAKE_CUDA_FLAGS_RELEASE " -Xcompiler /MD") string(APPEND CMAKE_CUDA_FLAGS_RELWITHDEBINFO " -Xcompiler /MD") endif() if(CUDA_NVCC_FLAGS MATCHES "Zi") list(APPEND CUDA_NVCC_FLAGS "-Xcompiler" "-FS") endif() elseif(CUDA_DEVICE_DEBUG) list(APPEND CUDA_NVCC_FLAGS "-g" "-G") # -G enables device code debugging symbols endif() # Set expt-relaxed-constexpr to suppress Eigen warnings list(APPEND CUDA_NVCC_FLAGS "--expt-relaxed-constexpr") # Set expt-extended-lambda to support lambda on device list(APPEND CUDA_NVCC_FLAGS "--expt-extended-lambda") foreach(FLAG ${CUDA_NVCC_FLAGS}) string(FIND "${FLAG}" " " flag_space_position) if(NOT flag_space_position EQUAL -1) message(FATAL_ERROR "Found spaces in CUDA_NVCC_FLAGS entry '${FLAG}'") endif() string(APPEND CMAKE_CUDA_FLAGS " ${FLAG}") endforeach() ```
==================================================================================================================================== SOURCE CODE FILE: gflags.cmake LINES: 1 SIZE: 2.64 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\public\gflags.cmake ENCODING: utf-8 ```cmake # ---[ gflags # We will try to use the config mode first, and then manual find. find_package(gflags CONFIG QUIET) if(NOT TARGET gflags) find_package(gflags MODULE QUIET) endif() if(TARGET gflags) message(STATUS "Caffe2: Found gflags with new-style gflags target.") elseif(GFLAGS_FOUND) message(STATUS "Caffe2: Found gflags with old-style gflag starget.") add_library(gflags UNKNOWN IMPORTED) set_property( TARGET gflags PROPERTY IMPORTED_LOCATION ${GFLAGS_LIBRARY}) set_property( TARGET gflags PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${GFLAGS_INCLUDE_DIR}) else() message(STATUS "Caffe2: Cannot find gflags automatically. Using legacy find.") # - Try to find GFLAGS in the legacy way. # # The following variables are optionally searched for defaults # GFLAGS_ROOT_DIR: Base directory where all GFLAGS components are found # # The following are set after configuration is done: # GFLAGS_FOUND # GFLAGS_INCLUDE_DIRS # GFLAGS_LIBRARIES # GFLAGS_LIBRARYRARY_DIRS include(FindPackageHandleStandardArgs) set(GFLAGS_ROOT_DIR "" CACHE PATH "Folder contains Gflags") # We are testing only a couple of files in the include directories if(WIN32) find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h PATHS ${GFLAGS_ROOT_DIR}/src/windows) else() find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h PATHS ${GFLAGS_ROOT_DIR}) endif() if(WIN32) find_library(GFLAGS_LIBRARY_RELEASE NAMES libgflags PATHS ${GFLAGS_ROOT_DIR} PATH_SUFFIXES Release) find_library(GFLAGS_LIBRARY_DEBUG NAMES libgflags-debug PATHS ${GFLAGS_ROOT_DIR} PATH_SUFFIXES Debug) set(GFLAGS_LIBRARY optimized ${GFLAGS_LIBRARY_RELEASE} debug ${GFLAGS_LIBRARY_DEBUG}) else() find_library(GFLAGS_LIBRARY gflags) endif() find_package_handle_standard_args( gflags DEFAULT_MSG GFLAGS_INCLUDE_DIR GFLAGS_LIBRARY) if(GFLAGS_FOUND) message( STATUS "Caffe2: Found gflags (include: ${GFLAGS_INCLUDE_DIR}, " "library: ${GFLAGS_LIBRARY})") add_library(gflags UNKNOWN IMPORTED) set_property( TARGET gflags PROPERTY IMPORTED_LOCATION ${GFLAGS_LIBRARY}) set_property( TARGET gflags PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${GFLAGS_INCLUDE_DIR}) endif() endif() # After above, we should have the gflags target now. if(NOT TARGET gflags) message(WARNING "Caffe2: gflags cannot be found. Depending on whether you are building " "Caffe2 or a Caffe2 dependent library, the next warning / error will " "give you more info.") endif() ```
================================================================================================================================== SOURCE CODE FILE: glog.cmake LINES: 1 SIZE: 2.33 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\public\glog.cmake ENCODING: utf-8 ```cmake # ---[ glog # We will try to use the config mode first, and then manual find. find_package(glog CONFIG QUIET) if(NOT TARGET glog::glog) find_package(glog MODULE QUIET) endif() if(TARGET glog::glog) message(STATUS "Caffe2: Found glog with new-style glog target.") elseif(GLOG_FOUND) message( STATUS "Caffe2: Found glog with old-style glog starget. Glog never shipped " "old style glog targets, so somewhere in your cmake path there might " "be a custom Findglog.cmake file that got triggered. We will make a " "best effort to create the new style glog target for you.") add_library(glog::glog UNKNOWN IMPORTED) set_property( TARGET glog::glog PROPERTY IMPORTED_LOCATION ${GLOG_LIBRARY}) set_property( TARGET glog::glog PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${GLOG_INCLUDE_DIR}) else() message(STATUS "Caffe2: Cannot find glog automatically. Using legacy find.") # - Try to find Glog # # The following variables are optionally searched for defaults # GLOG_ROOT_DIR: Base directory where all GLOG components are found # # The following are set after configuration is done: # GLOG_FOUND # GLOG_INCLUDE_DIRS # GLOG_LIBRARIES # GLOG_LIBRARYRARY_DIRS include(FindPackageHandleStandardArgs) set(GLOG_ROOT_DIR "" CACHE PATH "Folder contains Google glog") if(NOT WIN32) find_path(GLOG_INCLUDE_DIR glog/logging.h PATHS ${GLOG_ROOT_DIR}) endif() find_library(GLOG_LIBRARY glog PATHS ${GLOG_ROOT_DIR} PATH_SUFFIXES lib lib64) find_package_handle_standard_args(glog DEFAULT_MSG GLOG_INCLUDE_DIR GLOG_LIBRARY) if(GLOG_FOUND) message(STATUS "Caffe2: Found glog (include: ${GLOG_INCLUDE_DIR}, " "library: ${GLOG_LIBRARY})") add_library(glog::glog UNKNOWN IMPORTED) set_property( TARGET glog::glog PROPERTY IMPORTED_LOCATION ${GLOG_LIBRARY}) set_property( TARGET glog::glog PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${GLOG_INCLUDE_DIR}) endif() endif() # After above, we should have the glog::glog target now. if(NOT TARGET glog::glog) message(WARNING "Caffe2: glog cannot be found. Depending on whether you are building " "Caffe2 or a Caffe2 dependent library, the next warning / error will " "give you more info.") endif() ```
================================================================================================================================= SOURCE CODE FILE: mkl.cmake LINES: 1 SIZE: 1.33 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\public\mkl.cmake ENCODING: utf-8 ```cmake find_package(MKL QUIET) if(TARGET caffe2::mkl) return() endif() add_library(caffe2::mkl INTERFACE IMPORTED) target_include_directories(caffe2::mkl INTERFACE ${MKL_INCLUDE_DIR}) target_link_libraries(caffe2::mkl INTERFACE ${MKL_LIBRARIES}) foreach(MKL_LIB IN LISTS MKL_LIBRARIES) if(EXISTS "${MKL_LIB}") get_filename_component(MKL_LINK_DIR "${MKL_LIB}" DIRECTORY) if(IS_DIRECTORY "${MKL_LINK_DIR}") target_link_directories(caffe2::mkl INTERFACE "${MKL_LINK_DIR}") endif() endif() endforeach() # TODO: This is a hack, it will not pick up architecture dependent # MKL libraries correctly; see https://github.com/pytorch/pytorch/issues/73008 set_property( TARGET caffe2::mkl PROPERTY INTERFACE_LINK_DIRECTORIES ${MKL_ROOT}/lib ${MKL_ROOT}/lib/intel64 ${MKL_ROOT}/lib/intel64_win ${MKL_ROOT}/lib/win-x64) if(UNIX) if(USE_STATIC_MKL) foreach(MKL_LIB_PATH IN LISTS MKL_LIBRARIES) if(NOT EXISTS "${MKL_LIB_PATH}") continue() endif() get_filename_component(MKL_LIB_NAME "${MKL_LIB_PATH}" NAME) # Match archive libraries starting with "libmkl_" if(MKL_LIB_NAME MATCHES "^libmkl_" AND MKL_LIB_NAME MATCHES ".a$") target_link_options(caffe2::mkl INTERFACE "-Wl,--exclude-libs,${MKL_LIB_NAME}") endif() endforeach() endif() endif() ```
==================================================================================================================================== SOURCE CODE FILE: mkldnn.cmake LINES: 1 SIZE: 0.45 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\public\mkldnn.cmake ENCODING: utf-8 ```cmake set(MKLDNN_USE_NATIVE_ARCH ${USE_NATIVE_ARCH}) if(CPU_AARCH64) include(${CMAKE_CURRENT_LIST_DIR}/ComputeLibrary.cmake) endif() find_package(MKLDNN QUIET) if(NOT TARGET caffe2::mkldnn) add_library(caffe2::mkldnn INTERFACE IMPORTED) endif() set_property( TARGET caffe2::mkldnn PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${MKLDNN_INCLUDE_DIR}) set_property( TARGET caffe2::mkldnn PROPERTY INTERFACE_LINK_LIBRARIES ${MKLDNN_LIBRARIES}) ```
====================================================================================================================================== SOURCE CODE FILE: protobuf.cmake LINES: 1 SIZE: 4.00 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\public\protobuf.cmake ENCODING: utf-8 ```cmake # ---[ Protobuf # We will try to use the config mode first, and then manual find. find_package(Protobuf CONFIG QUIET) if(NOT Protobuf_FOUND) find_package(Protobuf MODULE QUIET) endif() if((TARGET protobuf::libprotobuf OR TARGET protobuf::libprotobuf-lite) AND TARGET protobuf::protoc) # Hooray. This is the most ideal situation, meaning that you either have a # Protobuf config file installed (like on Windows), or you are using a # modern CMake that ships with a FindProtobuf.cmake file that produces # modern targets. message(STATUS "Caffe2: Found protobuf with new-style protobuf targets.") elseif(Protobuf_FOUND OR PROTOBUF_FOUND) # If the modern targets are not present, we will generate them for you for # backward compatibility. This is backported from CMake's new FindProtobuf.cmake # content. if((NOT PROTOBUF_LIBRARY) AND (NOT PROTOBUF_LITE_LIBRARY)) message(FATAL_ERROR "Caffe2: Found protobuf with old style targets, but could not find targets." " PROTOBUF_LIBRARY: " ${PROTOBUF_LIBRARY} " PROTOBUF_LITE_LIBRARY: " ${PROTOBUF_LITE_LIBRARY} " Protobuf_LIBRARY: " ${Protobuf_LIBRARY} " Protobuf_LITE_LIBRARY: " ${Protobuf_LITE_LIBRARY}) endif() message(STATUS "Caffe2: Found protobuf with old-style protobuf targets.") if(PROTOBUF_LIBRARY) if(NOT TARGET protobuf::libprotobuf) add_library(protobuf::libprotobuf UNKNOWN IMPORTED) set_target_properties(protobuf::libprotobuf PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${PROTOBUF_INCLUDE_DIRS}") endif() if(EXISTS "${PROTOBUF_LIBRARY}") set_target_properties(protobuf::libprotobuf PROPERTIES IMPORTED_LOCATION "${PROTOBUF_LIBRARY}") endif() if(EXISTS "${PROTOBUF_LIBRARY_RELEASE}") set_property(TARGET protobuf::libprotobuf APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) set_target_properties(protobuf::libprotobuf PROPERTIES IMPORTED_LOCATION_RELEASE "${PROTOBUF_LIBRARY_RELEASE}") endif() if(EXISTS "${PROTOBUF_LIBRARY_DEBUG}") set_property(TARGET protobuf::libprotobuf APPEND PROPERTY IMPORTED_CONFIGURATIONS DEBUG) set_target_properties(protobuf::libprotobuf PROPERTIES IMPORTED_LOCATION_DEBUG "${PROTOBUF_LIBRARY_DEBUG}") endif() endif() if(PROTOBUF_LITE_LIBRARY) if(NOT TARGET protobuf::libprotobuf-lite) add_library(protobuf::libprotobuf-lite UNKNOWN IMPORTED) set_target_properties(protobuf::libprotobuf-lite PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${PROTOBUF_INCLUDE_DIRS}") endif() if(EXISTS "${PROTOBUF_LITE_LIBRARY}") set_target_properties(protobuf::libprotobuf-lite PROPERTIES IMPORTED_LOCATION "${PROTOBUF_LITE_LIBRARY}") endif() if(EXISTS "${PROTOBUF_LITE_LIBRARY_RELEASE}") set_property(TARGET protobuf::libprotobuf-lite APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) set_target_properties(protobuf::libprotobuf-lite PROPERTIES IMPORTED_LOCATION_RELEASE "${PROTOBUF_LITE_LIBRARY_RELEASE}") endif() if(EXISTS "${PROTOBUF_LITE_LIBRARY_DEBUG}") set_property(TARGET protobuf::libprotobuf-lite APPEND PROPERTY IMPORTED_CONFIGURATIONS DEBUG) set_target_properties(protobuf::libprotobuf-lite PROPERTIES IMPORTED_LOCATION_DEBUG "${PROTOBUF_LITE_LIBRARY_DEBUG}") endif() endif() if(PROTOBUF_PROTOC_EXECUTABLE) if(NOT TARGET protobuf::protoc) add_executable(protobuf::protoc IMPORTED) endif() set_property(TARGET protobuf::protoc PROPERTY IMPORTED_LOCATION ${PROTOBUF_PROTOC_EXECUTABLE}) endif() endif() # After above, we should have the protobuf related target now. if((NOT TARGET protobuf::libprotobuf) AND (NOT TARGET protobuf::libprotobuf-lite)) message(WARNING "Protobuf cannot be found. Depending on whether you are building Caffe2 " "or a Caffe2 dependent library, the next warning / error will give you " "more info.") endif() ```
=================================================================================================================================== SOURCE CODE FILE: utils.cmake LINES: 12 SIZE: 21.17 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\public\utils.cmake ENCODING: utf-8 ```cmake ################################################################################################ # Exclude and prepend functionalities function(exclude OUTPUT INPUT) set(EXCLUDES ${ARGN}) foreach(EXCLUDE ${EXCLUDES}) list(REMOVE_ITEM INPUT "${EXCLUDE}") endforeach() set(${OUTPUT} ${INPUT} PARENT_SCOPE) endfunction(exclude) function(prepend OUTPUT PREPEND) set(OUT "") foreach(ITEM ${ARGN}) list(APPEND OUT "${PREPEND}${ITEM}") endforeach() set(${OUTPUT} ${OUT} PARENT_SCOPE) endfunction(prepend) ################################################################################################ # Parses a version string that might have values beyond major, minor, and patch # and set version variables for the library. # Usage: # caffe2_parse_version_str(<library_name> <version_string>) function(caffe2_parse_version_str LIBNAME VERSIONSTR) string(REGEX REPLACE "^([0-9]+).*$" "\\1" ${LIBNAME}_VERSION_MAJOR "${VERSIONSTR}") string(REGEX REPLACE "^[0-9]+\\.([0-9]+).*$" "\\1" ${LIBNAME}_VERSION_MINOR "${VERSIONSTR}") string(REGEX REPLACE "[0-9]+\\.[0-9]+\\.([0-9]+).*$" "\\1" ${LIBNAME}_VERSION_PATCH "${VERSIONSTR}") set(${LIBNAME}_VERSION_MAJOR ${${LIBNAME}_VERSION_MAJOR} ${ARGN} PARENT_SCOPE) set(${LIBNAME}_VERSION_MINOR ${${LIBNAME}_VERSION_MINOR} ${ARGN} PARENT_SCOPE) set(${LIBNAME}_VERSION_PATCH ${${LIBNAME}_VERSION_PATCH} ${ARGN} PARENT_SCOPE) set(${LIBNAME}_VERSION "${${LIBNAME}_VERSION_MAJOR}.${${LIBNAME}_VERSION_MINOR}.${${LIBNAME}_VERSION_PATCH}" PARENT_SCOPE) endfunction() ### # Removes common indentation from a block of text to produce code suitable for # setting to `python -c`, or using with pycmd. This allows multiline code to be # nested nicely in the surrounding code structure. # # This function respsects Python_EXECUTABLE if it defined, otherwise it uses # `python` and hopes for the best. An error will be thrown if it is not found. # # Args: # outvar : variable that will hold the stdout of the python command # text : text to remove indentation from # function(dedent outvar text) # Use Python_EXECUTABLE if it is defined, otherwise default to python if("${Python_EXECUTABLE}" STREQUAL "") set(_python_exe "python3") else() set(_python_exe "${Python_EXECUTABLE}") endif() set(_fixup_cmd "import sys; from textwrap import dedent; print(dedent(sys.stdin.read()))") file(WRITE "${CMAKE_BINARY_DIR}/indented.txt" "${text}") execute_process( COMMAND "${_python_exe}" -c "${_fixup_cmd}" INPUT_FILE "${CMAKE_BINARY_DIR}/indented.txt" RESULT_VARIABLE _dedent_exitcode OUTPUT_VARIABLE _dedent_text) if(NOT _dedent_exitcode EQUAL 0) message(ERROR " Failed to remove indentation from: \n\"\"\"\n${text}\n\"\"\" Python dedent failed with error code: ${_dedent_exitcode}") message(FATAL_ERROR " Python dedent failed with error code: ${_dedent_exitcode}") endif() # Remove supurflous newlines (artifacts of print) string(STRIP "${_dedent_text}" _dedent_text) set(${outvar} "${_dedent_text}" PARENT_SCOPE) endfunction() function(pycmd_no_exit outvar exitcode cmd) # Use Python_EXECUTABLE if it is defined, otherwise default to python if("${Python_EXECUTABLE}" STREQUAL "") set(_python_exe "python") else() set(_python_exe "${Python_EXECUTABLE}") endif() # run the actual command execute_process( COMMAND "${_python_exe}" -c "${cmd}" RESULT_VARIABLE _exitcode OUTPUT_VARIABLE _output) # Remove supurflous newlines (artifacts of print) string(STRIP "${_output}" _output) set(${outvar} "${_output}" PARENT_SCOPE) set(${exitcode} "${_exitcode}" PARENT_SCOPE) endfunction() ### # Helper function to run `python -c "<cmd>"` and capture the results of stdout # # Runs a python command and populates an outvar with the result of stdout. # Common indentation in the text of `cmd` is removed before the command is # executed, so the caller does not need to worry about indentation issues. # # This function respsects Python_EXECUTABLE if it defined, otherwise it uses # `python` and hopes for the best. An error will be thrown if it is not found. # # Args: # outvar : variable that will hold the stdout of the python command # cmd : text representing a (possibly multiline) block of python code # function(pycmd outvar cmd) dedent(_dedent_cmd "${cmd}") pycmd_no_exit(_output _exitcode "${_dedent_cmd}") if(NOT _exitcode EQUAL 0) message(ERROR " Failed when running python code: \"\"\"\n${_dedent_cmd}\n\"\"\"") message(FATAL_ERROR " Python command failed with error code: ${_exitcode}") endif() # Remove supurflous newlines (artifacts of print) string(STRIP "${_output}" _output) set(${outvar} "${_output}" PARENT_SCOPE) endfunction() ############################################################################## # Macro to update cached options. macro(caffe2_update_option variable value) if(CAFFE2_CMAKE_BUILDING_WITH_MAIN_REPO) get_property(__help_string CACHE ${variable} PROPERTY HELPSTRING) set(${variable} ${value} CACHE BOOL ${__help_string} FORCE) else() set(${variable} ${value}) endif() endmacro() ############################################################################## # Add an interface library definition that is dependent on the source. # # It's probably easiest to explain why this macro exists, by describing # what things would look like if we didn't have this macro. # # Let's suppose we want to statically link against torch. We've defined # a library in cmake called torch, and we might think that we just # target_link_libraries(my-app PUBLIC torch). This will result in a # linker argument 'libtorch.a' getting passed to the linker. # # Unfortunately, this link command is wrong! We have static # initializers in libtorch.a that would get improperly pruned by # the default link settings. What we actually need is for you # to do -Wl,--whole-archive,libtorch.a -Wl,--no-whole-archive to ensure # that we keep all symbols, even if they are (seemingly) not used. # # What caffe2_interface_library does is create an interface library # that indirectly depends on the real library, but sets up the link # arguments so that you get all of the extra link settings you need. # The result is not a "real" library, and so we have to manually # copy over necessary properties from the original target. # # (The discussion above is about static libraries, but a similar # situation occurs for dynamic libraries: if no symbols are used from # a dynamic library, it will be pruned unless you are --no-as-needed) macro(caffe2_interface_library SRC DST) add_library(${DST} INTERFACE) add_dependencies(${DST} ${SRC}) # Depending on the nature of the source library as well as the compiler, # determine the needed compilation flags. get_target_property(__src_target_type ${SRC} TYPE) # Depending on the type of the source library, we will set up the # link command for the specific SRC library. if(${__src_target_type} STREQUAL "STATIC_LIBRARY") # In the case of static library, we will need to add whole-static flags. if(APPLE) target_link_libraries( ${DST} INTERFACE -Wl,-force_load,\"$<TARGET_FILE:${SRC}>\") elseif(MSVC) # In MSVC, we will add whole archive in default. target_link_libraries( ${DST} INTERFACE "$<TARGET_FILE:${SRC}>") target_link_options( ${DST} INTERFACE "-WHOLEARCHIVE:$<TARGET_FILE:${SRC}>") else() # Assume everything else is like gcc target_link_libraries(${DST} INTERFACE "-Wl,--whole-archive,\"$<TARGET_FILE:${SRC}>\" -Wl,--no-whole-archive") endif() # Link all interface link libraries of the src target as well. # For static library, we need to explicitly depend on all the libraries # that are the dependent library of the source library. Note that we cannot # use the populated INTERFACE_LINK_LIBRARIES property, because if one of the # dependent library is not a target, cmake creates a $<LINK_ONLY:src> wrapper # and then one is not able to find target "src". For more discussions, check # https://gitlab.kitware.com/cmake/cmake/issues/15415 # https://cmake.org/pipermail/cmake-developers/2013-May/019019.html # Specifically the following quote # # """ # For STATIC libraries we can define that the PUBLIC/PRIVATE/INTERFACE keys # are ignored for linking and that it always populates both LINK_LIBRARIES # LINK_INTERFACE_LIBRARIES. Note that for STATIC libraries the # LINK_LIBRARIES property will not be used for anything except build-order # dependencies. # """ target_link_libraries(${DST} INTERFACE $<TARGET_PROPERTY:${SRC},LINK_LIBRARIES>) elseif(${__src_target_type} STREQUAL "SHARED_LIBRARY") if("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU") target_link_libraries(${DST} INTERFACE "-Wl,--no-as-needed,\"$<TARGET_FILE:${SRC}>\" -Wl,--as-needed") else() target_link_libraries(${DST} INTERFACE ${SRC}) endif() # Link all interface link libraries of the src target as well. # For shared libraries, we can simply depend on the INTERFACE_LINK_LIBRARIES # property of the target. target_link_libraries(${DST} INTERFACE $<TARGET_PROPERTY:${SRC},INTERFACE_LINK_LIBRARIES>) else() message(FATAL_ERROR "You made a CMake build file error: target " ${SRC} " must be of type either STATIC_LIBRARY or SHARED_LIBRARY. However, " "I got " ${__src_target_type} ".") endif() # For all other interface properties, manually inherit from the source target. set_target_properties(${DST} PROPERTIES INTERFACE_COMPILE_DEFINITIONS $<TARGET_PROPERTY:${SRC},INTERFACE_COMPILE_DEFINITIONS> INTERFACE_COMPILE_OPTIONS $<TARGET_PROPERTY:${SRC},INTERFACE_COMPILE_OPTIONS> INTERFACE_INCLUDE_DIRECTORIES $<TARGET_PROPERTY:${SRC},INTERFACE_INCLUDE_DIRECTORIES> INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $<TARGET_PROPERTY:${SRC},INTERFACE_SYSTEM_INCLUDE_DIRECTORIES>) endmacro() ############################################################################## # Creating a Caffe2 binary target with sources specified with relative path. # Usage: # caffe2_binary_target(target_name_or_src <src1> [<src2>] [<src3>] ...) # If only target_name_or_src is specified, this target is build with one single # source file and the target name is autogen from the filename. Otherwise, the # target name is given by the first argument and the rest are the source files # to build the target. function(caffe2_binary_target target_name_or_src) # https://cmake.org/cmake/help/latest/command/function.html # Checking that ARGC is greater than # is the only way to ensure # that ARGV# was passed to the function as an extra argument. if(ARGC GREATER 1) set(__target ${target_name_or_src}) prepend(__srcs "${CMAKE_CURRENT_SOURCE_DIR}/" "${ARGN}") else() get_filename_component(__target ${target_name_or_src} NAME_WE) prepend(__srcs "${CMAKE_CURRENT_SOURCE_DIR}/" "${target_name_or_src}") endif() add_executable(${__target} ${__srcs}) target_link_libraries(${__target} torch_library) # If we have Caffe2_MODULES defined, we will also link with the modules. if(DEFINED Caffe2_MODULES) target_link_libraries(${__target} ${Caffe2_MODULES}) endif() install(TARGETS ${__target} DESTINATION bin) endfunction() function(caffe2_hip_binary_target target_name_or_src) if(ARGC GREATER 1) set(__target ${target_name_or_src}) prepend(__srcs "${CMAKE_CURRENT_SOURCE_DIR}/" "${ARGN}") else() get_filename_component(__target ${target_name_or_src} NAME_WE) prepend(__srcs "${CMAKE_CURRENT_SOURCE_DIR}/" "${target_name_or_src}") endif() caffe2_binary_target(${target_name_or_src}) target_compile_options(${__target} PRIVATE ${HIP_CXX_FLAGS}) target_include_directories(${__target} PRIVATE ${Caffe2_HIP_INCLUDE}) endfunction() ############################################################################## # Multiplex between adding libraries for CUDA versus HIP (AMD Software Stack). # Usage: # torch_cuda_based_add_library(cuda_target) # macro(torch_cuda_based_add_library cuda_target) if(USE_ROCM) hip_add_library(${cuda_target} ${ARGN}) elseif(USE_CUDA) add_library(${cuda_target} ${ARGN}) else() endif() endmacro() ############################################################################## # Get the HIP arch flags specified by PYTORCH_ROCM_ARCH. # Usage: # torch_hip_get_arch_list(variable_to_store_flags) # macro(torch_hip_get_arch_list store_var) if(DEFINED ENV{PYTORCH_ROCM_ARCH}) set(_TMP $ENV{PYTORCH_ROCM_ARCH}) else() # Use arch of installed GPUs as default execute_process(COMMAND "rocm_agent_enumerator" COMMAND bash "-c" "grep -v gfx000 | sort -u | xargs | tr -d '\n'" RESULT_VARIABLE ROCM_AGENT_ENUMERATOR_RESULT OUTPUT_VARIABLE ROCM_ARCH_INSTALLED) if(NOT ROCM_AGENT_ENUMERATOR_RESULT EQUAL 0) message(FATAL_ERROR " Could not detect ROCm arch for GPUs on machine. Result: '${ROCM_AGENT_ENUMERATOR_RESULT}'") endif() set(_TMP ${ROCM_ARCH_INSTALLED}) endif() string(REPLACE " " ";" ${store_var} "${_TMP}") endmacro() ############################################################################## # Get the XPU arch flags specified by TORCH_XPU_ARCH_LIST. # Usage: # torch_xpu_get_arch_list(variable_to_store_flags) # macro(torch_xpu_get_arch_list store_var) if(DEFINED ENV{TORCH_XPU_ARCH_LIST}) set(${store_var} $ENV{TORCH_XPU_ARCH_LIST}) endif() endmacro() ############################################################################## # Get the NVCC arch flags specified by TORCH_CUDA_ARCH_LIST and CUDA_ARCH_NAME. # Usage: # torch_cuda_get_nvcc_gencode_flag(variable_to_store_flags) # macro(torch_cuda_get_nvcc_gencode_flag store_var) # setting nvcc arch flags if((NOT DEFINED TORCH_CUDA_ARCH_LIST) AND (DEFINED ENV{TORCH_CUDA_ARCH_LIST})) message(WARNING "In the future we will require one to explicitly pass " "TORCH_CUDA_ARCH_LIST to cmake instead of implicitly setting it as an " "env variable. This will become a FATAL_ERROR in future version of " "pytorch.") set(TORCH_CUDA_ARCH_LIST $ENV{TORCH_CUDA_ARCH_LIST}) endif() if(DEFINED CUDA_ARCH_NAME) message(WARNING "CUDA_ARCH_NAME is no longer used. Use TORCH_CUDA_ARCH_LIST instead. " "Right now, CUDA_ARCH_NAME is ${CUDA_ARCH_NAME} and " "TORCH_CUDA_ARCH_LIST is ${TORCH_CUDA_ARCH_LIST}.") set(TORCH_CUDA_ARCH_LIST TORCH_CUDA_ARCH_LIST ${CUDA_ARCH_NAME}) endif() # Invoke cuda_select_nvcc_arch_flags from proper cmake FindCUDA. cuda_select_nvcc_arch_flags(${store_var} ${TORCH_CUDA_ARCH_LIST}) endmacro() ############################################################################## # Add standard compile options. # Usage: # torch_compile_options(lib_name) function(torch_compile_options libname) set_property(TARGET ${libname} PROPERTY CXX_STANDARD 17) # until they can be unified, keep these lists synced with setup.py if(MSVC) if(MSVC_Z7_OVERRIDE) set(MSVC_DEBINFO_OPTION "/Z7") else() set(MSVC_DEBINFO_OPTION "/Zi") endif() target_compile_options(${libname} PUBLIC $<$<COMPILE_LANGUAGE:CXX>: ${MSVC_RUNTIME_LIBRARY_OPTION} $<$<OR:$<CONFIG:Debug>,$<CONFIG:RelWithDebInfo>>:${MSVC_DEBINFO_OPTION}> /EHsc /bigobj> ) else() set(private_compile_options -Wall -Wextra -Wdeprecated -Wno-unused-parameter -Wno-missing-field-initializers -Wno-array-bounds -Wno-unknown-pragmas -Wno-strict-overflow -Wno-strict-aliasing ) list(APPEND private_compile_options -Wunused-function) list(APPEND private_compile_options -Wunused-variable) if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") list(APPEND private_compile_options -Wunused-but-set-variable) endif() if(CMAKE_CXX_COMPILER_ID MATCHES "Clang") list(APPEND private_compile_options -Wunused-private-field -Wextra-semi -Wno-error=extra-semi) else() list(APPEND private_compile_options # Considered to be flaky. See the discussion at # https://github.com/pytorch/pytorch/pull/9608 -Wno-maybe-uninitialized) endif() if(WERROR) list(APPEND private_compile_options -Werror -Werror=inconsistent-missing-override -Werror=inconsistent-missing-destructor-override -Werror=unused-function -Werror=unused-variable -Werror=pedantic ) if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") list(APPEND private_compile_options -Werror=unused-but-set-variable) endif() endif() endif() target_compile_options(${libname} PRIVATE $<$<COMPILE_LANGUAGE:CXX>:${private_compile_options}>) if(USE_CUDA) foreach(option IN LISTS private_compile_options) if(CMAKE_CUDA_HOST_COMPILER_ID STREQUAL "GNU") if("${option}" STREQUAL "-Wextra-semi") continue() endif() if("${option}" STREQUAL "-Wunused-private-field") continue() endif() endif() target_compile_options(${libname} PRIVATE $<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler ${option}>) endforeach() endif() if(NOT WIN32 AND NOT USE_ASAN) # Enable hidden visibility by default to make it easier to debug issues with # TORCH_API annotations. Hidden visibility with selective default visibility # behaves close enough to Windows' dllimport/dllexport. # # Unfortunately, hidden visibility messes up some ubsan warnings because # templated classes crossing library boundary get duplicated (but identical) # definitions. It's easier to just disable it. target_compile_options(${libname} PRIVATE $<$<COMPILE_LANGUAGE:CXX>: -fvisibility=hidden>) endif() # Use -O2 for release builds (-O3 doesn't improve perf, and -Os results in perf regression) target_compile_options(${libname} PRIVATE $<$<AND:$<COMPILE_LANGUAGE:CXX>,$<OR:$<CONFIG:Release>,$<CONFIG:RelWithDebInfo>>>:-O2>) endfunction() ############################################################################## # Set old-style FindCuda.cmake compile flags from modern CMake cuda flags. # Usage: # torch_update_find_cuda_flags() function(torch_update_find_cuda_flags) # Convert -O2 -Xcompiler="-O2 -Wall" to "-O2;-Xcompiler=-O2,-Wall" if(USE_CUDA) separate_arguments(FLAGS UNIX_COMMAND "${CMAKE_CUDA_FLAGS}") string(REPLACE " " "," FLAGS "${FLAGS}") set(CUDA_NVCC_FLAGS ${FLAGS} PARENT_SCOPE) separate_arguments(FLAGS_DEBUG UNIX_COMMAND "${CMAKE_CUDA_FLAGS_DEBUG}") string(REPLACE " " "," FLAGS_DEBUG "${FLAGS_DEBUG}") set(CUDA_NVCC_FLAGS_DEBUG "${FLAGS_DEBUG}" PARENT_SCOPE) separate_arguments(FLAGS_RELEASE UNIX_COMMAND "${CMAKE_CUDA_FLAGS_RELEASE}") string(REPLACE " " "," FLAGS_RELEASE "${FLAGS_RELEASE}") set(CUDA_NVCC_FLAGS_RELEASE "${FLAGS_RELEASE}" PARENT_SCOPE) separate_arguments(FLAGS_MINSIZEREL UNIX_COMMAND "${CMAKE_CUDA_FLAGS_MINSIZEREL}") string(REPLACE " " "," FLAGS_MINSIZEREL "${FLAGS_MINSIZEREL}") set(CUDA_NVCC_FLAGS_MINSIZEREL "${FLAGS_MINSIZEREL}" PARENT_SCOPE) separate_arguments(FLAGS_RELWITHDEBINFO UNIX_COMMAND "${CMAKE_CUDA_FLAGS_RELWITHDEBINFO}") string(REPLACE " " "," FLAGS_RELWITHDEBINFO "${FLAGS_RELWITHDEBINFO}") set(CUDA_NVCC_FLAGS_RELWITHDEBINFO "${FLAGS_RELWITHDEBINFO}" PARENT_SCOPE) message(STATUS "Converting CMAKE_CUDA_FLAGS to CUDA_NVCC_FLAGS:\n" " CUDA_NVCC_FLAGS = ${FLAGS}\n" " CUDA_NVCC_FLAGS_DEBUG = ${FLAGS_DEBUG}\n" " CUDA_NVCC_FLAGS_RELEASE = ${FLAGS_RELEASE}\n" " CUDA_NVCC_FLAGS_RELWITHDEBINFO = ${FLAGS_RELWITHDEBINFO}\n" " CUDA_NVCC_FLAGS_MINSIZEREL = ${FLAGS_MINSIZEREL}") endif() endfunction() include(CheckCXXCompilerFlag) ############################################################################## # CHeck if given flag is supported and append it to provided outputvar # Also define HAS_UPPER_CASE_FLAG_NAME variable # Usage: # append_cxx_flag_if_supported("-Werror" CMAKE_CXX_FLAGS) function(append_cxx_flag_if_supported flag outputvar) string(TOUPPER "HAS${flag}" _FLAG_NAME) string(REGEX REPLACE "[=-]" "_" _FLAG_NAME "${_FLAG_NAME}") # GCC silents unknown -Wno-XXX flags, so we detect the corresponding -WXXX. if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") string(REGEX REPLACE "Wno-" "W" new_flag "${flag}") else() set(new_flag ${flag}) endif() check_cxx_compiler_flag("${new_flag}" ${_FLAG_NAME}) if(${_FLAG_NAME}) string(APPEND ${outputvar} " ${flag}") set(${outputvar} "${${outputvar}}" PARENT_SCOPE) endif() endfunction() function(target_compile_options_if_supported target flag) set(_compile_options "") append_cxx_flag_if_supported("${flag}" _compile_options) if(NOT "${_compile_options}" STREQUAL "") target_compile_options(${target} PRIVATE ${flag}) endif() endfunction() ```
================================================================================================================================= SOURCE CODE FILE: xpu.cmake LINES: 1 SIZE: 1.41 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Caffe2\public\xpu.cmake ENCODING: utf-8 ```cmake # ---[ xpu # Poor man's include guard if(TARGET torch::xpurt) return() endif() set(XPU_HOST_CXX_FLAGS) set(XPU_DEVICE_CXX_FLAGS) # Find SYCL library. find_package(SYCLToolkit REQUIRED) if(NOT SYCL_FOUND) set(PYTORCH_FOUND_XPU FALSE) return() endif() set(PYTORCH_FOUND_XPU TRUE) # SYCL library interface add_library(torch::sycl INTERFACE IMPORTED) set_property( TARGET torch::sycl PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${SYCL_INCLUDE_DIR}) set_property( TARGET torch::sycl PROPERTY INTERFACE_LINK_LIBRARIES ${SYCL_LIBRARY}) # xpurt add_library(torch::xpurt INTERFACE IMPORTED) set_property( TARGET torch::xpurt PROPERTY INTERFACE_LINK_LIBRARIES torch::sycl) # setting xpu arch flags torch_xpu_get_arch_list(XPU_ARCH_FLAGS) # propagate to torch-xpu-ops set(TORCH_XPU_ARCH_LIST ${XPU_ARCH_FLAGS}) if(CMAKE_SYSTEM_NAME MATCHES "Linux" AND SYCL_COMPILER_VERSION VERSION_LESS_EQUAL PYTORCH_2_5_SYCL_TOOLKIT_VERSION) # for ABI compatibility on Linux string(APPEND XPU_HOST_CXX_FLAGS " -D__INTEL_PREVIEW_BREAKING_CHANGES") string(APPEND XPU_DEVICE_CXX_FLAGS " -fpreview-breaking-changes") endif() string(APPEND XPU_HOST_CXX_FLAGS " -DSYCL_COMPILER_VERSION=${SYCL_COMPILER_VERSION}") if(DEFINED ENV{XPU_ENABLE_KINETO}) set(XPU_ENABLE_KINETO TRUE) else() set(XPU_ENABLE_KINETO FALSE) endif() if(NOT WIN32) set(XPU_ENABLE_KINETO TRUE) endif() ```
================================================================================================================================= SOURCE CODE FILE: TorchConfig.cmake LINES: 1 SIZE: 5.40 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Torch\TorchConfig.cmake ENCODING: utf-8 ```cmake # FindTorch # ------- # # Finds the Torch library # # This will define the following variables: # # TORCH_FOUND -- True if the system has the Torch library # TORCH_INCLUDE_DIRS -- The include directories for torch # TORCH_LIBRARIES -- Libraries to link against # TORCH_CXX_FLAGS -- Additional (required) compiler flags # # and the following imported targets: # # torch macro(append_torchlib_if_found) foreach (_arg ${ARGN}) find_library(${_arg}_LIBRARY ${_arg} PATHS "${TORCH_INSTALL_PREFIX}/lib") if(${_arg}_LIBRARY) list(APPEND TORCH_LIBRARIES ${${_arg}_LIBRARY}) else() message(WARNING "static library ${${_arg}_LIBRARY} not found.") endif() endforeach() endmacro() macro(append_wholearchive_lib_if_found) foreach (_arg ${ARGN}) find_library(${_arg}_LIBRARY ${_arg} PATHS "${TORCH_INSTALL_PREFIX}/lib") if(${_arg}_LIBRARY) if(APPLE) list(APPEND TORCH_LIBRARIES "-Wl,-force_load,${${_arg}_LIBRARY}") elseif(MSVC) list(APPEND TORCH_LIBRARIES "-WHOLEARCHIVE:${${_arg}_LIBRARY}") else() # Linux list(APPEND TORCH_LIBRARIES "-Wl,--whole-archive ${${_arg}_LIBRARY} -Wl,--no-whole-archive") endif() else() message(WARNING "static library ${${_arg}_LIBRARY} not found.") endif() endforeach() endmacro() include(FindPackageHandleStandardArgs) if(DEFINED ENV{TORCH_INSTALL_PREFIX}) set(TORCH_INSTALL_PREFIX $ENV{TORCH_INSTALL_PREFIX}) else() # Assume we are in <install-prefix>/share/cmake/Torch/TorchConfig.cmake get_filename_component(CMAKE_CURRENT_LIST_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) get_filename_component(TORCH_INSTALL_PREFIX "${CMAKE_CURRENT_LIST_DIR}/../../../" ABSOLUTE) endif() # Include directories. if(EXISTS "${TORCH_INSTALL_PREFIX}/include") set(TORCH_INCLUDE_DIRS ${TORCH_INSTALL_PREFIX}/include ${TORCH_INSTALL_PREFIX}/include/torch/csrc/api/include) else() set(TORCH_INCLUDE_DIRS ${TORCH_INSTALL_PREFIX}/include ${TORCH_INSTALL_PREFIX}/include/torch/csrc/api/include) endif() # Library dependencies. if(ON) find_package(Caffe2 REQUIRED PATHS ${CMAKE_CURRENT_LIST_DIR}/../Caffe2) set(TORCH_LIBRARIES torch ${Caffe2_MAIN_LIBS}) append_torchlib_if_found(c10) else() add_library(torch STATIC IMPORTED) # set imported_location at the bottom #library need whole archive append_wholearchive_lib_if_found(torch torch_cpu) if(ON) append_wholearchive_lib_if_found(torch_cuda c10_cuda) endif() if(OFF) append_wholearchive_lib_if_found(torch_xpu c10_xpu) endif() # We need manually add dependent libraries when they are not linked into the # shared library. # TODO: this list might be incomplete. append_torchlib_if_found(c10) if(OFF) append_torchlib_if_found(nnpack) endif() if(OFF) append_torchlib_if_found(pytorch_qnnpack) endif() if(ON) append_torchlib_if_found(XNNPACK) append_torchlib_if_found(microkernels-prod) endif() if(OFF) append_torchlib_if_found(kleidiai) endif() append_torchlib_if_found(caffe2_protos protobuf-lite protobuf protoc) append_torchlib_if_found(onnx onnx_proto) append_torchlib_if_found(fmt) append_torchlib_if_found(cpuinfo clog) append_torchlib_if_found(eigen_blas) append_torchlib_if_found(pthreadpool) if(ON) append_torchlib_if_found(fbgemm) endif() if(ON) append_torchlib_if_found(dnnl mkldnn) endif() append_torchlib_if_found(sleef asmjit) endif() if(ON) append_torchlib_if_found(kineto) endif() if(ON) if(MSVC) find_library(CAFFE2_NVRTC_LIBRARY caffe2_nvrtc PATHS "${TORCH_INSTALL_PREFIX}/lib") list(APPEND TORCH_CUDA_LIBRARIES ${CAFFE2_NVRTC_LIBRARY}) else() set(TORCH_CUDA_LIBRARIES ${CUDA_NVRTC_LIB}) endif() if(TARGET torch::nvtoolsext) list(APPEND TORCH_CUDA_LIBRARIES torch::nvtoolsext) endif() if(ON) find_library(C10_CUDA_LIBRARY c10_cuda PATHS "${TORCH_INSTALL_PREFIX}/lib") list(APPEND TORCH_CUDA_LIBRARIES ${C10_CUDA_LIBRARY} ${Caffe2_PUBLIC_CUDA_DEPENDENCY_LIBS}) endif() list(APPEND TORCH_LIBRARIES ${TORCH_CUDA_LIBRARIES}) endif() if(OFF AND ON) append_torchlib_if_found(c10_xpu torch_xpu) endif() # When we build libtorch with the old libstdc++ ABI, dependent libraries must too. if(CMAKE_SYSTEM_NAME STREQUAL "Linux") set(TORCH_CXX_FLAGS "-D_GLIBCXX_USE_CXX11_ABI=") endif() find_library(TORCH_LIBRARY torch PATHS "${TORCH_INSTALL_PREFIX}/lib") # the statements below changes target properties on # - the imported target from Caffe2Targets.cmake in shared library mode (see the find_package above) # - this is untested whether it is the correct (or desired) methodology in CMake # - the imported target created in this file in static library mode if(NOT ON) # do not set this property on the shared library target, as it will cause confusion in some builds # as the configuration specific property is set in the Caffe2Targets.cmake file set_target_properties(torch PROPERTIES IMPORTED_LOCATION "${TORCH_LIBRARY}" ) endif() set_target_properties(torch PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${TORCH_INCLUDE_DIRS}" CXX_STANDARD 17 ) if(TORCH_CXX_FLAGS) set_property(TARGET torch PROPERTY INTERFACE_COMPILE_OPTIONS "${TORCH_CXX_FLAGS}") endif() find_package_handle_standard_args(Torch DEFAULT_MSG TORCH_LIBRARY TORCH_INCLUDE_DIRS) ```
======================================================================================================================================== SOURCE CODE FILE: TorchConfigVersion.cmake LINES: 1 SIZE: 0.37 KB PATH: scripts\freecad_env\Lib\site-packages\torch\share\cmake\Torch\TorchConfigVersion.cmake ENCODING: utf-8 ```cmake set(PACKAGE_VERSION "2.7.0") # Check whether the requested PACKAGE_FIND_VERSION is compatible if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}") set(PACKAGE_VERSION_COMPATIBLE FALSE) else() set(PACKAGE_VERSION_COMPATIBLE TRUE) if("${PACKAGE_VERSION}" VERSION_EQUAL "${PACKAGE_FIND_VERSION}") set(PACKAGE_VERSION_EXACT TRUE) endif() endif() ```
================================================================================================================ SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.05 KB PATH: scripts\freecad_env\Lib\site-packages\torch\signal\__init__.py ENCODING: utf-8 ```py from . import windows __all__ = ["windows"] ```
======================================================================================================================== SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.40 KB PATH: scripts\freecad_env\Lib\site-packages\torch\signal\windows\__init__.py ENCODING: utf-8 ```py from .windows import ( bartlett, blackman, cosine, exponential, gaussian, general_cosine, general_hamming, hamming, hann, kaiser, nuttall, ) __all__ = [ "bartlett", "blackman", "cosine", "exponential", "gaussian", "general_cosine", "general_hamming", "hamming", "hann", "kaiser", "nuttall", ] ```
======================================================================================================================= SOURCE CODE FILE: windows.py LINES: 1 SIZE: 22.99 KB PATH: scripts\freecad_env\Lib\site-packages\torch\signal\windows\windows.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs from collections.abc import Iterable from math import sqrt from typing import Callable, Optional, TypeVar import torch from torch import Tensor from torch._torch_docs import factory_common_args, merge_dicts, parse_kwargs __all__ = [ "bartlett", "blackman", "cosine", "exponential", "gaussian", "general_cosine", "general_hamming", "hamming", "hann", "kaiser", "nuttall", ] _T = TypeVar("_T") window_common_args = merge_dicts( parse_kwargs( """ M (int): the length of the window. In other words, the number of points of the returned window. sym (bool, optional): If `False`, returns a periodic window suitable for use in spectral analysis. If `True`, returns a symmetric window suitable for use in filter design. Default: `True`. """ ), factory_common_args, { "normalization": "The window is normalized to 1 (maximum value is 1). However, the 1 doesn't appear if " ":attr:`M` is even and :attr:`sym` is `True`.", }, ) def _add_docstr(*args: str) -> Callable[[_T], _T]: r"""Adds docstrings to a given decorated function. Specially useful when then docstrings needs string interpolation, e.g., with str.format(). REMARK: Do not use this function if the docstring doesn't need string interpolation, just write a conventional docstring. Args: args (str): """ def decorator(o: _T) -> _T: o.__doc__ = "".join(args) return o return decorator def _window_function_checks( function_name: str, M: int, dtype: torch.dtype, layout: torch.layout ) -> None: r"""Performs common checks for all the defined windows. This function should be called before computing any window. Args: function_name (str): name of the window function. M (int): length of the window. dtype (:class:`torch.dtype`): the desired data type of returned tensor. layout (:class:`torch.layout`): the desired layout of returned tensor. """ if M < 0: raise ValueError( f"{function_name} requires non-negative window length, got M={M}" ) if layout is not torch.strided: raise ValueError( f"{function_name} is implemented for strided tensors only, got: {layout}" ) if dtype not in [torch.float32, torch.float64]: raise ValueError( f"{function_name} expects float32 or float64 dtypes, got: {dtype}" ) @_add_docstr( r""" Computes a window with an exponential waveform. Also known as Poisson window. The exponential window is defined as follows: .. math:: w_n = \exp{\left(-\frac{|n - c|}{\tau}\right)} where `c` is the ``center`` of the window. """, r""" {normalization} Args: {M} Keyword args: center (float, optional): where the center of the window will be located. Default: `M / 2` if `sym` is `False`, else `(M - 1) / 2`. tau (float, optional): the decay value. Tau is generally associated with a percentage, that means, that the value should vary within the interval (0, 100]. If tau is 100, it is considered the uniform window. Default: 1.0. {sym} {dtype} {layout} {device} {requires_grad} Examples:: >>> # Generates a symmetric exponential window of size 10 and with a decay value of 1.0. >>> # The center will be at (M - 1) / 2, where M is 10. >>> torch.signal.windows.exponential(10) tensor([0.0111, 0.0302, 0.0821, 0.2231, 0.6065, 0.6065, 0.2231, 0.0821, 0.0302, 0.0111]) >>> # Generates a periodic exponential window and decay factor equal to .5 >>> torch.signal.windows.exponential(10, sym=False,tau=.5) tensor([4.5400e-05, 3.3546e-04, 2.4788e-03, 1.8316e-02, 1.3534e-01, 1.0000e+00, 1.3534e-01, 1.8316e-02, 2.4788e-03, 3.3546e-04]) """.format( **window_common_args ), ) def exponential( M: int, *, center: Optional[float] = None, tau: float = 1.0, sym: bool = True, dtype: Optional[torch.dtype] = None, layout: torch.layout = torch.strided, device: Optional[torch.device] = None, requires_grad: bool = False, ) -> Tensor: if dtype is None: dtype = torch.get_default_dtype() _window_function_checks("exponential", M, dtype, layout) if tau <= 0: raise ValueError(f"Tau must be positive, got: {tau} instead.") if sym and center is not None: raise ValueError("Center must be None for symmetric windows") if M == 0: return torch.empty( (0,), dtype=dtype, layout=layout, device=device, requires_grad=requires_grad ) if center is None: center = (M if not sym and M > 1 else M - 1) / 2.0 constant = 1 / tau k = torch.linspace( start=-center * constant, end=(-center + (M - 1)) * constant, steps=M, dtype=dtype, layout=layout, device=device, requires_grad=requires_grad, ) return torch.exp(-torch.abs(k)) @_add_docstr( r""" Computes a window with a simple cosine waveform, following the same implementation as SciPy. This window is also known as the sine window. The cosine window is defined as follows: .. math:: w_n = \sin\left(\frac{\pi (n + 0.5)}{M}\right) This formula differs from the typical cosine window formula by incorporating a 0.5 term in the numerator, which shifts the sample positions. This adjustment results in a window that starts and ends with non-zero values. """, r""" {normalization} Args: {M} Keyword args: {sym} {dtype} {layout} {device} {requires_grad} Examples:: >>> # Generates a symmetric cosine window. >>> torch.signal.windows.cosine(10) tensor([0.1564, 0.4540, 0.7071, 0.8910, 0.9877, 0.9877, 0.8910, 0.7071, 0.4540, 0.1564]) >>> # Generates a periodic cosine window. >>> torch.signal.windows.cosine(10, sym=False) tensor([0.1423, 0.4154, 0.6549, 0.8413, 0.9595, 1.0000, 0.9595, 0.8413, 0.6549, 0.4154]) """.format( **window_common_args, ), ) def cosine( M: int, *, sym: bool = True, dtype: Optional[torch.dtype] = None, layout: torch.layout = torch.strided, device: Optional[torch.device] = None, requires_grad: bool = False, ) -> Tensor: if dtype is None: dtype = torch.get_default_dtype() _window_function_checks("cosine", M, dtype, layout) if M == 0: return torch.empty( (0,), dtype=dtype, layout=layout, device=device, requires_grad=requires_grad ) start = 0.5 constant = torch.pi / (M + 1 if not sym and M > 1 else M) k = torch.linspace( start=start * constant, end=(start + (M - 1)) * constant, steps=M, dtype=dtype, layout=layout, device=device, requires_grad=requires_grad, ) return torch.sin(k) @_add_docstr( r""" Computes a window with a gaussian waveform. The gaussian window is defined as follows: .. math:: w_n = \exp{\left(-\left(\frac{n}{2\sigma}\right)^2\right)} """, r""" {normalization} Args: {M} Keyword args: std (float, optional): the standard deviation of the gaussian. It controls how narrow or wide the window is. Default: 1.0. {sym} {dtype} {layout} {device} {requires_grad} Examples:: >>> # Generates a symmetric gaussian window with a standard deviation of 1.0. >>> torch.signal.windows.gaussian(10) tensor([4.0065e-05, 2.1875e-03, 4.3937e-02, 3.2465e-01, 8.8250e-01, 8.8250e-01, 3.2465e-01, 4.3937e-02, 2.1875e-03, 4.0065e-05]) >>> # Generates a periodic gaussian window and standard deviation equal to 0.9. >>> torch.signal.windows.gaussian(10, sym=False,std=0.9) tensor([1.9858e-07, 5.1365e-05, 3.8659e-03, 8.4658e-02, 5.3941e-01, 1.0000e+00, 5.3941e-01, 8.4658e-02, 3.8659e-03, 5.1365e-05]) """.format( **window_common_args, ), ) def gaussian( M: int, *, std: float = 1.0, sym: bool = True, dtype: Optional[torch.dtype] = None, layout: torch.layout = torch.strided, device: Optional[torch.device] = None, requires_grad: bool = False, ) -> Tensor: if dtype is None: dtype = torch.get_default_dtype() _window_function_checks("gaussian", M, dtype, layout) if std <= 0: raise ValueError(f"Standard deviation must be positive, got: {std} instead.") if M == 0: return torch.empty( (0,), dtype=dtype, layout=layout, device=device, requires_grad=requires_grad ) start = -(M if not sym and M > 1 else M - 1) / 2.0 constant = 1 / (std * sqrt(2)) k = torch.linspace( start=start * constant, end=(start + (M - 1)) * constant, steps=M, dtype=dtype, layout=layout, device=device, requires_grad=requires_grad, ) return torch.exp(-(k**2)) @_add_docstr( r""" Computes the Kaiser window. The Kaiser window is defined as follows: .. math:: w_n = I_0 \left( \beta \sqrt{1 - \left( {\frac{n - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta ) where ``I_0`` is the zeroth order modified Bessel function of the first kind (see :func:`torch.special.i0`), and ``N = M - 1 if sym else M``. """, r""" {normalization} Args: {M} Keyword args: beta (float, optional): shape parameter for the window. Must be non-negative. Default: 12.0 {sym} {dtype} {layout} {device} {requires_grad} Examples:: >>> # Generates a symmetric gaussian window with a standard deviation of 1.0. >>> torch.signal.windows.kaiser(5) tensor([4.0065e-05, 2.1875e-03, 4.3937e-02, 3.2465e-01, 8.8250e-01, 8.8250e-01, 3.2465e-01, 4.3937e-02, 2.1875e-03, 4.0065e-05]) >>> # Generates a periodic gaussian window and standard deviation equal to 0.9. >>> torch.signal.windows.kaiser(5, sym=False,std=0.9) tensor([1.9858e-07, 5.1365e-05, 3.8659e-03, 8.4658e-02, 5.3941e-01, 1.0000e+00, 5.3941e-01, 8.4658e-02, 3.8659e-03, 5.1365e-05]) """.format( **window_common_args, ), ) def kaiser( M: int, *, beta: float = 12.0, sym: bool = True, dtype: Optional[torch.dtype] = None, layout: torch.layout = torch.strided, device: Optional[torch.device] = None, requires_grad: bool = False, ) -> Tensor: if dtype is None: dtype = torch.get_default_dtype() _window_function_checks("kaiser", M, dtype, layout) if beta < 0: raise ValueError(f"beta must be non-negative, got: {beta} instead.") if M == 0: return torch.empty( (0,), dtype=dtype, layout=layout, device=device, requires_grad=requires_grad ) if M == 1: return torch.ones( (1,), dtype=dtype, layout=layout, device=device, requires_grad=requires_grad ) # Avoid NaNs by casting `beta` to the appropriate dtype. beta = torch.tensor(beta, dtype=dtype, device=device) start = -beta constant = 2.0 * beta / (M if not sym else M - 1) end = torch.minimum(beta, start + (M - 1) * constant) k = torch.linspace( start=start, end=end, steps=M, dtype=dtype, layout=layout, device=device, requires_grad=requires_grad, ) return torch.i0(torch.sqrt(beta * beta - torch.pow(k, 2))) / torch.i0(beta) @_add_docstr( r""" Computes the Hamming window. The Hamming window is defined as follows: .. math:: w_n = \alpha - \beta\ \cos \left( \frac{2 \pi n}{M - 1} \right) """, r""" {normalization} Arguments: {M} Keyword args: {sym} alpha (float, optional): The coefficient :math:`\alpha` in the equation above. beta (float, optional): The coefficient :math:`\beta` in the equation above. {dtype} {layout} {device} {requires_grad} Examples:: >>> # Generates a symmetric Hamming window. >>> torch.signal.windows.hamming(10) tensor([0.0800, 0.1876, 0.4601, 0.7700, 0.9723, 0.9723, 0.7700, 0.4601, 0.1876, 0.0800]) >>> # Generates a periodic Hamming window. >>> torch.signal.windows.hamming(10, sym=False) tensor([0.0800, 0.1679, 0.3979, 0.6821, 0.9121, 1.0000, 0.9121, 0.6821, 0.3979, 0.1679]) """.format( **window_common_args ), ) def hamming( M: int, *, sym: bool = True, dtype: Optional[torch.dtype] = None, layout: torch.layout = torch.strided, device: Optional[torch.device] = None, requires_grad: bool = False, ) -> Tensor: return general_hamming( M, sym=sym, dtype=dtype, layout=layout, device=device, requires_grad=requires_grad, ) @_add_docstr( r""" Computes the Hann window. The Hann window is defined as follows: .. math:: w_n = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{M - 1} \right)\right] = \sin^2 \left( \frac{\pi n}{M - 1} \right) """, r""" {normalization} Arguments: {M} Keyword args: {sym} {dtype} {layout} {device} {requires_grad} Examples:: >>> # Generates a symmetric Hann window. >>> torch.signal.windows.hann(10) tensor([0.0000, 0.1170, 0.4132, 0.7500, 0.9698, 0.9698, 0.7500, 0.4132, 0.1170, 0.0000]) >>> # Generates a periodic Hann window. >>> torch.signal.windows.hann(10, sym=False) tensor([0.0000, 0.0955, 0.3455, 0.6545, 0.9045, 1.0000, 0.9045, 0.6545, 0.3455, 0.0955]) """.format( **window_common_args ), ) def hann( M: int, *, sym: bool = True, dtype: Optional[torch.dtype] = None, layout: torch.layout = torch.strided, device: Optional[torch.device] = None, requires_grad: bool = False, ) -> Tensor: return general_hamming( M, alpha=0.5, sym=sym, dtype=dtype, layout=layout, device=device, requires_grad=requires_grad, ) @_add_docstr( r""" Computes the Blackman window. The Blackman window is defined as follows: .. math:: w_n = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{M - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{M - 1} \right) """, r""" {normalization} Arguments: {M} Keyword args: {sym} {dtype} {layout} {device} {requires_grad} Examples:: >>> # Generates a symmetric Blackman window. >>> torch.signal.windows.blackman(5) tensor([-1.4901e-08, 3.4000e-01, 1.0000e+00, 3.4000e-01, -1.4901e-08]) >>> # Generates a periodic Blackman window. >>> torch.signal.windows.blackman(5, sym=False) tensor([-1.4901e-08, 2.0077e-01, 8.4923e-01, 8.4923e-01, 2.0077e-01]) """.format( **window_common_args ), ) def blackman( M: int, *, sym: bool = True, dtype: Optional[torch.dtype] = None, layout: torch.layout = torch.strided, device: Optional[torch.device] = None, requires_grad: bool = False, ) -> Tensor: if dtype is None: dtype = torch.get_default_dtype() _window_function_checks("blackman", M, dtype, layout) return general_cosine( M, a=[0.42, 0.5, 0.08], sym=sym, dtype=dtype, layout=layout, device=device, requires_grad=requires_grad, ) @_add_docstr( r""" Computes the Bartlett window. The Bartlett window is defined as follows: .. math:: w_n = 1 - \left| \frac{2n}{M - 1} - 1 \right| = \begin{cases} \frac{2n}{M - 1} & \text{if } 0 \leq n \leq \frac{M - 1}{2} \\ 2 - \frac{2n}{M - 1} & \text{if } \frac{M - 1}{2} < n < M \\ \end{cases} """, r""" {normalization} Arguments: {M} Keyword args: {sym} {dtype} {layout} {device} {requires_grad} Examples:: >>> # Generates a symmetric Bartlett window. >>> torch.signal.windows.bartlett(10) tensor([0.0000, 0.2222, 0.4444, 0.6667, 0.8889, 0.8889, 0.6667, 0.4444, 0.2222, 0.0000]) >>> # Generates a periodic Bartlett window. >>> torch.signal.windows.bartlett(10, sym=False) tensor([0.0000, 0.2000, 0.4000, 0.6000, 0.8000, 1.0000, 0.8000, 0.6000, 0.4000, 0.2000]) """.format( **window_common_args ), ) def bartlett( M: int, *, sym: bool = True, dtype: Optional[torch.dtype] = None, layout: torch.layout = torch.strided, device: Optional[torch.device] = None, requires_grad: bool = False, ) -> Tensor: if dtype is None: dtype = torch.get_default_dtype() _window_function_checks("bartlett", M, dtype, layout) if M == 0: return torch.empty( (0,), dtype=dtype, layout=layout, device=device, requires_grad=requires_grad ) if M == 1: return torch.ones( (1,), dtype=dtype, layout=layout, device=device, requires_grad=requires_grad ) start = -1 constant = 2 / (M if not sym else M - 1) k = torch.linspace( start=start, end=start + (M - 1) * constant, steps=M, dtype=dtype, layout=layout, device=device, requires_grad=requires_grad, ) return 1 - torch.abs(k) @_add_docstr( r""" Computes the general cosine window. The general cosine window is defined as follows: .. math:: w_n = \sum^{M-1}_{i=0} (-1)^i a_i \cos{ \left( \frac{2 \pi i n}{M - 1}\right)} """, r""" {normalization} Arguments: {M} Keyword args: a (Iterable): the coefficients associated to each of the cosine functions. {sym} {dtype} {layout} {device} {requires_grad} Examples:: >>> # Generates a symmetric general cosine window with 3 coefficients. >>> torch.signal.windows.general_cosine(10, a=[0.46, 0.23, 0.31], sym=True) tensor([0.5400, 0.3376, 0.1288, 0.4200, 0.9136, 0.9136, 0.4200, 0.1288, 0.3376, 0.5400]) >>> # Generates a periodic general cosine window wit 2 coefficients. >>> torch.signal.windows.general_cosine(10, a=[0.5, 1 - 0.5], sym=False) tensor([0.0000, 0.0955, 0.3455, 0.6545, 0.9045, 1.0000, 0.9045, 0.6545, 0.3455, 0.0955]) """.format( **window_common_args ), ) def general_cosine( M, *, a: Iterable, sym: bool = True, dtype: Optional[torch.dtype] = None, layout: torch.layout = torch.strided, device: Optional[torch.device] = None, requires_grad: bool = False, ) -> Tensor: if dtype is None: dtype = torch.get_default_dtype() _window_function_checks("general_cosine", M, dtype, layout) if M == 0: return torch.empty( (0,), dtype=dtype, layout=layout, device=device, requires_grad=requires_grad ) if M == 1: return torch.ones( (1,), dtype=dtype, layout=layout, device=device, requires_grad=requires_grad ) if not isinstance(a, Iterable): raise TypeError("Coefficients must be a list/tuple") if not a: raise ValueError("Coefficients cannot be empty") constant = 2 * torch.pi / (M if not sym else M - 1) k = torch.linspace( start=0, end=(M - 1) * constant, steps=M, dtype=dtype, layout=layout, device=device, requires_grad=requires_grad, ) a_i = torch.tensor( [(-1) ** i * w for i, w in enumerate(a)], device=device, dtype=dtype, requires_grad=requires_grad, ) i = torch.arange( a_i.shape[0], dtype=a_i.dtype, device=a_i.device, requires_grad=a_i.requires_grad, ) return (a_i.unsqueeze(-1) * torch.cos(i.unsqueeze(-1) * k)).sum(0) @_add_docstr( r""" Computes the general Hamming window. The general Hamming window is defined as follows: .. math:: w_n = \alpha - (1 - \alpha) \cos{ \left( \frac{2 \pi n}{M-1} \right)} """, r""" {normalization} Arguments: {M} Keyword args: alpha (float, optional): the window coefficient. Default: 0.54. {sym} {dtype} {layout} {device} {requires_grad} Examples:: >>> # Generates a symmetric Hamming window with the general Hamming window. >>> torch.signal.windows.general_hamming(10, sym=True) tensor([0.0800, 0.1876, 0.4601, 0.7700, 0.9723, 0.9723, 0.7700, 0.4601, 0.1876, 0.0800]) >>> # Generates a periodic Hann window with the general Hamming window. >>> torch.signal.windows.general_hamming(10, alpha=0.5, sym=False) tensor([0.0000, 0.0955, 0.3455, 0.6545, 0.9045, 1.0000, 0.9045, 0.6545, 0.3455, 0.0955]) """.format( **window_common_args ), ) def general_hamming( M, *, alpha: float = 0.54, sym: bool = True, dtype: Optional[torch.dtype] = None, layout: torch.layout = torch.strided, device: Optional[torch.device] = None, requires_grad: bool = False, ) -> Tensor: return general_cosine( M, a=[alpha, 1.0 - alpha], sym=sym, dtype=dtype, layout=layout, device=device, requires_grad=requires_grad, ) @_add_docstr( r""" Computes the minimum 4-term Blackman-Harris window according to Nuttall. .. math:: w_n = 1 - 0.36358 \cos{(z_n)} + 0.48917 \cos{(2z_n)} - 0.13659 \cos{(3z_n)} + 0.01064 \cos{(4z_n)} where :math:`z_n = \frac{2 \pi n}{M}`. """, """ {normalization} Arguments: {M} Keyword args: {sym} {dtype} {layout} {device} {requires_grad} References:: - A. Nuttall, "Some windows with very good sidelobe behavior," IEEE Transactions on Acoustics, Speech, and Signal Processing, vol. 29, no. 1, pp. 84-91, Feb 1981. https://doi.org/10.1109/TASSP.1981.1163506 - Heinzel G. et al., "Spectrum and spectral density estimation by the Discrete Fourier transform (DFT), including a comprehensive list of window functions and some new flat-top windows", February 15, 2002 https://holometer.fnal.gov/GH_FFT.pdf Examples:: >>> # Generates a symmetric Nutall window. >>> torch.signal.windows.general_hamming(5, sym=True) tensor([3.6280e-04, 2.2698e-01, 1.0000e+00, 2.2698e-01, 3.6280e-04]) >>> # Generates a periodic Nuttall window. >>> torch.signal.windows.general_hamming(5, sym=False) tensor([3.6280e-04, 1.1052e-01, 7.9826e-01, 7.9826e-01, 1.1052e-01]) """.format( **window_common_args ), ) def nuttall( M: int, *, sym: bool = True, dtype: Optional[torch.dtype] = None, layout: torch.layout = torch.strided, device: Optional[torch.device] = None, requires_grad: bool = False, ) -> Tensor: return general_cosine( M, a=[0.3635819, 0.4891775, 0.1365995, 0.0106411], sym=sym, dtype=dtype, layout=layout, device=device, requires_grad=requires_grad, ) ```
================================================================================================================ SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 25.60 KB PATH: scripts\freecad_env\Lib\site-packages\torch\sparse\__init__.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs # The Tensor classes are added to this module by python_tensor.cpp # A workaround to support both TorchScript and MyPy: from typing import Any, Optional, TYPE_CHECKING, Union import torch from torch import Tensor from torch._C import _add_docstr, _sparse # type: ignore[attr-defined] # Semi structured sparsity support from .semi_structured import ( SparseSemiStructuredTensor, SparseSemiStructuredTensorCUSPARSELT, SparseSemiStructuredTensorCUTLASS, to_sparse_semi_structured, ) if TYPE_CHECKING: from torch.types import _dtype as DType DimOrDims = Optional[Union[int, tuple[int, ...], list[int]]] else: # The JIT doesn't understand Union, nor torch.dtype here DType = int DimOrDims = Optional[tuple[int]] __all__ = [ "addmm", "check_sparse_tensor_invariants", "mm", "sum", "softmax", "solve", "log_softmax", "SparseSemiStructuredTensor", "SparseSemiStructuredTensorCUTLASS", "SparseSemiStructuredTensorCUSPARSELT", "to_sparse_semi_structured", "as_sparse_gradcheck", ] addmm = _add_docstr( _sparse._sparse_addmm, r""" sparse.addmm(mat, mat1, mat2, *, beta=1., alpha=1.) -> Tensor This function does exact same thing as :func:`torch.addmm` in the forward, except that it supports backward for sparse COO matrix :attr:`mat1`. When :attr:`mat1` is a COO tensor it must have `sparse_dim = 2`. When inputs are COO tensors, this function also supports backward for both inputs. Supports both CSR and COO storage formats. .. note:: This function doesn't support computing derivaties with respect to CSR matrices. Args: mat (Tensor): a dense matrix to be added mat1 (Tensor): a sparse matrix to be multiplied mat2 (Tensor): a dense matrix to be multiplied beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`) alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) """, ) mm = _add_docstr( _sparse._sparse_mm, r""" Performs a matrix multiplication of the sparse matrix :attr:`mat1` and the (sparse or strided) matrix :attr:`mat2`. Similar to :func:`torch.mm`, if :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a :math:`(m \times p)` tensor, out will be a :math:`(n \times p)` tensor. When :attr:`mat1` is a COO tensor it must have `sparse_dim = 2`. When inputs are COO tensors, this function also supports backward for both inputs. Supports both CSR and COO storage formats. .. note:: This function doesn't support computing derivaties with respect to CSR matrices. This function also additionally accepts an optional :attr:`reduce` argument that allows specification of an optional reduction operation, mathematically performs the following operation: .. math:: z_{ij} = \bigoplus_{k = 0}^{K - 1} x_{ik} y_{kj} where :math:`\bigoplus` defines the reduce operator. :attr:`reduce` is implemented only for CSR storage format on CPU device. Args: mat1 (Tensor): the first sparse matrix to be multiplied mat2 (Tensor): the second matrix to be multiplied, which could be sparse or dense reduce (str, optional): the reduction operation to apply for non-unique indices (:obj:`"sum"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`). Default :obj:`"sum"`. Shape: The format of the output tensor of this function follows: - sparse x sparse -> sparse - sparse x dense -> dense Example:: >>> a = torch.tensor([[1., 0, 2], [0, 3, 0]]).to_sparse().requires_grad_() >>> a tensor(indices=tensor([[0, 0, 1], [0, 2, 1]]), values=tensor([1., 2., 3.]), size=(2, 3), nnz=3, layout=torch.sparse_coo, requires_grad=True) >>> b = torch.tensor([[0, 1.], [2, 0], [0, 0]], requires_grad=True) >>> b tensor([[0., 1.], [2., 0.], [0., 0.]], requires_grad=True) >>> y = torch.sparse.mm(a, b) >>> y tensor([[0., 1.], [6., 0.]], grad_fn=<SparseAddmmBackward0>) >>> y.sum().backward() >>> a.grad tensor(indices=tensor([[0, 0, 1], [0, 2, 1]]), values=tensor([1., 0., 2.]), size=(2, 3), nnz=3, layout=torch.sparse_coo) >>> c = a.detach().to_sparse_csr() >>> c tensor(crow_indices=tensor([0, 2, 3]), col_indices=tensor([0, 2, 1]), values=tensor([1., 2., 3.]), size=(2, 3), nnz=3, layout=torch.sparse_csr) >>> y1 = torch.sparse.mm(c, b, 'sum') >>> y1 tensor([[0., 1.], [6., 0.]], grad_fn=<SparseMmReduceImplBackward0>) >>> y2 = torch.sparse.mm(c, b, 'max') >>> y2 tensor([[0., 1.], [6., 0.]], grad_fn=<SparseMmReduceImplBackward0>) """, ) sampled_addmm = _add_docstr( _sparse.sparse_sampled_addmm, r""" sparse.sampled_addmm(input, mat1, mat2, *, beta=1., alpha=1., out=None) -> Tensor Performs a matrix multiplication of the dense matrices :attr:`mat1` and :attr:`mat2` at the locations specified by the sparsity pattern of :attr:`input`. The matrix :attr:`input` is added to the final result. Mathematically this performs the following operation: .. math:: \text{out} = \alpha\ (\text{mat1} \mathbin{@} \text{mat2})*\text{spy}(\text{input}) + \beta\ \text{input} where :math:`\text{spy}(\text{input})` is the sparsity pattern matrix of :attr:`input`, :attr:`alpha` and :attr:`beta` are the scaling factors. :math:`\text{spy}(\text{input})` has value 1 at the positions where :attr:`input` has non-zero values, and 0 elsewhere. .. note:: :attr:`input` must be a sparse CSR tensor. :attr:`mat1` and :attr:`mat2` must be dense tensors. Args: input (Tensor): a sparse CSR matrix of shape `(m, n)` to be added and used to compute the sampled matrix multiplication mat1 (Tensor): a dense matrix of shape `(m, k)` to be multiplied mat2 (Tensor): a dense matrix of shape `(k, n)` to be multiplied Keyword args: beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. Examples:: >>> input = torch.eye(3, device='cuda').to_sparse_csr() >>> mat1 = torch.randn(3, 5, device='cuda') >>> mat2 = torch.randn(5, 3, device='cuda') >>> torch.sparse.sampled_addmm(input, mat1, mat2) tensor(crow_indices=tensor([0, 1, 2, 3]), col_indices=tensor([0, 1, 2]), values=tensor([ 0.2847, -0.7805, -0.1900]), device='cuda:0', size=(3, 3), nnz=3, layout=torch.sparse_csr) >>> torch.sparse.sampled_addmm(input, mat1, mat2).to_dense() tensor([[ 0.2847, 0.0000, 0.0000], [ 0.0000, -0.7805, 0.0000], [ 0.0000, 0.0000, -0.1900]], device='cuda:0') >>> torch.sparse.sampled_addmm(input, mat1, mat2, beta=0.5, alpha=0.5) tensor(crow_indices=tensor([0, 1, 2, 3]), col_indices=tensor([0, 1, 2]), values=tensor([ 0.1423, -0.3903, -0.0950]), device='cuda:0', size=(3, 3), nnz=3, layout=torch.sparse_csr) """, ) def sum(input: Tensor, dim: DimOrDims = None, dtype: Optional[DType] = None) -> Tensor: r"""Return the sum of each row of the given sparse tensor. Returns the sum of each row of the sparse tensor :attr:`input` in the given dimensions :attr:`dim`. If :attr:`dim` is a list of dimensions, reduce over all of them. When sum over all ``sparse_dim``, this method returns a dense tensor instead of a sparse tensor. All summed :attr:`dim` are squeezed (see :func:`torch.squeeze`), resulting an output tensor having :attr:`dim` fewer dimensions than :attr:`input`. During backward, only gradients at ``nnz`` locations of :attr:`input` will propagate back. Note that the gradients of :attr:`input` is coalesced. Args: input (Tensor): the input sparse tensor dim (int or tuple of ints): a dimension or a list of dimensions to reduce. Default: reduce over all dims. dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. Default: dtype of :attr:`input`. Example:: >>> nnz = 3 >>> dims = [5, 5, 2, 3] >>> I = torch.cat([torch.randint(0, dims[0], size=(nnz,)), torch.randint(0, dims[1], size=(nnz,))], 0).reshape(2, nnz) >>> V = torch.randn(nnz, dims[2], dims[3]) >>> size = torch.Size(dims) >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> S = torch.sparse_coo_tensor(I, V, size) >>> S tensor(indices=tensor([[2, 0, 3], [2, 4, 1]]), values=tensor([[[-0.6438, -1.6467, 1.4004], [ 0.3411, 0.0918, -0.2312]], [[ 0.5348, 0.0634, -2.0494], [-0.7125, -1.0646, 2.1844]], [[ 0.1276, 0.1874, -0.6334], [-1.9682, -0.5340, 0.7483]]]), size=(5, 5, 2, 3), nnz=3, layout=torch.sparse_coo) # when sum over only part of sparse_dims, return a sparse tensor >>> torch.sparse.sum(S, [1, 3]) tensor(indices=tensor([[0, 2, 3]]), values=tensor([[-1.4512, 0.4073], [-0.8901, 0.2017], [-0.3183, -1.7539]]), size=(5, 2), nnz=3, layout=torch.sparse_coo) # when sum over all sparse dim, return a dense tensor # with summed dims squeezed >>> torch.sparse.sum(S, [0, 1, 3]) tensor([-2.6596, -1.1450]) """ if dtype is None: if dim is not None: return torch._sparse_sum(input, dim) else: return torch._sparse_sum(input) else: if dim is not None: return torch._sparse_sum(input, dim, dtype=dtype) else: return torch._sparse_sum(input, dtype=dtype) softmax = _add_docstr( _sparse._sparse_softmax, r""" sparse.softmax(input, dim, *, dtype=None) -> Tensor Applies a softmax function. Softmax is defined as: :math:`\text{Softmax}(x_{i}) = \frac{exp(x_i)}{\sum_j exp(x_j)}` where :math:`i, j` run over sparse tensor indices and unspecified entries are ignores. This is equivalent to defining unspecified entries as negative infinity so that :math:`exp(x_k) = 0` when the entry with index :math:`k` has not specified. It is applied to all slices along `dim`, and will re-scale them so that the elements lie in the range `[0, 1]` and sum to 1. Args: input (Tensor): input dim (int): A dimension along which softmax will be computed. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. If specified, the input tensor is casted to :attr:`dtype` before the operation is performed. This is useful for preventing data type overflows. Default: None """, ) spsolve = _add_docstr( _sparse._spsolve, r""" sparse.spsolve(input, other, *, left=True) -> Tensor Computes the solution of a square system of linear equations with a unique solution. Its purpose is similar to :func:`torch.linalg.solve`, except that the system is defined by a sparse CSR matrix with layout `sparse_csr`. Args: input (Tensor): a sparse CSR matrix of shape `(n, n)` representing the coefficients of the linear system. other (Tensor): a dense matrix of shape `(n, )` representing the right-hand side of the linear system. left (bool, optional): whether to solve the system for `input @ out = other` (default) or `out @ input = other`. Only `left=True` is supported. """, ) log_softmax = _add_docstr( _sparse._sparse_log_softmax, r""" sparse.log_softmax(input, dim, *, dtype=None) -> Tensor Applies a softmax function followed by logarithm. See :class:`~torch.sparse.softmax` for more details. Args: input (Tensor): input dim (int): A dimension along which softmax will be computed. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. If specified, the input tensor is casted to :attr:`dtype` before the operation is performed. This is useful for preventing data type overflows. Default: None """, ) spdiags = _add_docstr( _sparse._spdiags, r""" sparse.spdiags(diagonals, offsets, shape, layout=None) -> Tensor Creates a sparse 2D tensor by placing the values from rows of :attr:`diagonals` along specified diagonals of the output The :attr:`offsets` tensor controls which diagonals are set. - If :attr:`offsets[i]` = 0, it is the main diagonal - If :attr:`offsets[i]` < 0, it is below the main diagonal - If :attr:`offsets[i]` > 0, it is above the main diagonal The number of rows in :attr:`diagonals` must match the length of :attr:`offsets`, and an offset may not be repeated. Args: diagonals (Tensor): Matrix storing diagonals row-wise offsets (Tensor): The diagonals to be set, stored as a vector shape (2-tuple of ints): The desired shape of the result Keyword args: layout (:class:`torch.layout`, optional): The desired layout of the returned tensor. ``torch.sparse_coo``, ``torch.sparse_csc`` and ``torch.sparse_csr`` are supported. Default: ``torch.sparse_coo`` Examples: Set the main and first two lower diagonals of a matrix:: >>> diags = torch.arange(9).reshape(3, 3) >>> diags tensor([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) >>> s = torch.sparse.spdiags(diags, torch.tensor([0, -1, -2]), (3, 3)) >>> s tensor(indices=tensor([[0, 1, 2, 1, 2, 2], [0, 1, 2, 0, 1, 0]]), values=tensor([0, 1, 2, 3, 4, 6]), size=(3, 3), nnz=6, layout=torch.sparse_coo) >>> s.to_dense() tensor([[0, 0, 0], [3, 1, 0], [6, 4, 2]]) Change the output layout:: >>> diags = torch.arange(9).reshape(3, 3) >>> diags tensor([[0, 1, 2],[3, 4, 5], [6, 7, 8]) >>> s = torch.sparse.spdiags(diags, torch.tensor([0, -1, -2]), (3, 3), layout=torch.sparse_csr) >>> s tensor(crow_indices=tensor([0, 1, 3, 6]), col_indices=tensor([0, 0, 1, 0, 1, 2]), values=tensor([0, 3, 1, 6, 4, 2]), size=(3, 3), nnz=6, layout=torch.sparse_csr) >>> s.to_dense() tensor([[0, 0, 0], [3, 1, 0], [6, 4, 2]]) Set partial diagonals of a large output:: >>> diags = torch.tensor([[1, 2], [3, 4]]) >>> offsets = torch.tensor([0, -1]) >>> torch.sparse.spdiags(diags, offsets, (5, 5)).to_dense() tensor([[1, 0, 0, 0, 0], [3, 2, 0, 0, 0], [0, 4, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) .. note:: When setting the values along a given diagonal the index into the diagonal and the index into the row of :attr:`diagonals` is taken as the column index in the output. This has the effect that when setting a diagonal with a positive offset `k` the first value along that diagonal will be the value in position `k` of the row of :attr:`diagonals` Specifying a positive offset:: >>> diags = torch.tensor([[1, 2, 3], [1, 2, 3], [1, 2, 3]]) >>> torch.sparse.spdiags(diags, torch.tensor([0, 1, 2]), (5, 5)).to_dense() tensor([[1, 2, 3, 0, 0], [0, 2, 3, 0, 0], [0, 0, 3, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) """, ) class check_sparse_tensor_invariants: """A tool to control checking sparse tensor invariants. The following options exists to manage sparsr tensor invariants checking in sparse tensor construction: 1. Using a context manager: .. code:: python with torch.sparse.check_sparse_tensor_invariants(): run_my_model() 2. Using a procedural approach: .. code:: python prev_checks_enabled = torch.sparse.check_sparse_tensor_invariants.is_enabled() torch.sparse.check_sparse_tensor_invariants.enable() run_my_model() if not prev_checks_enabled: torch.sparse.check_sparse_tensor_invariants.disable() 3. Using function decoration: .. code:: python @torch.sparse.check_sparse_tensor_invariants() def run_my_model(): ... run_my_model() 4. Using ``check_invariants`` keyword argument in sparse tensor constructor call. For example: >>> torch.sparse_csr_tensor([0, 1, 3], [0, 1], [1, 2], check_invariants=True) Traceback (most recent call last): File "<stdin>", line 1, in <module> RuntimeError: `crow_indices[..., -1] == nnz` is not satisfied. """ @staticmethod def is_enabled(): r"""Return True if the sparse tensor invariants checking is enabled. .. note:: Use :func:`torch.sparse.check_sparse_tensor_invariants.enable` or :func:`torch.sparse.check_sparse_tensor_invariants.disable` to manage the state of the sparse tensor invariants checks. """ return torch._C._check_sparse_tensor_invariants() @staticmethod def enable(): r"""Enable sparse tensor invariants checking in sparse tensor constructors. .. note:: By default, the sparse tensor invariants checks are disabled. Use :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled` to retrieve the current state of sparse tensor invariants checking. .. note:: The sparse tensor invariants check flag is effective to all sparse tensor constructors, both in Python and ATen. The flag can be locally overridden by the ``check_invariants`` optional argument of the sparse tensor constructor functions. """ torch._C._set_check_sparse_tensor_invariants(True) @staticmethod def disable(): r"""Disable sparse tensor invariants checking in sparse tensor constructors. See :func:`torch.sparse.check_sparse_tensor_invariants.enable` for more information. """ torch._C._set_check_sparse_tensor_invariants(False) # context manager support def __init__(self, enable=True): self.state = enable self.saved_state: Optional[bool] = None def __enter__(self): if self.saved_state is not None: raise RuntimeError( "This context manager instance is already activated." " Use a different context manager instance for context nesting." ) self.saved_state = self.is_enabled() torch._C._set_check_sparse_tensor_invariants(self.state) def __exit__(self, type, value, traceback): assert self.saved_state is not None torch._C._set_check_sparse_tensor_invariants(self.saved_state) self.saved_state = None # decorator support def __call__(self, mth): def test_mth(*args, **kwargs): with type(self)(self.state): return mth(*args, **kwargs) return test_mth def as_sparse_gradcheck(gradcheck): """Decorate function, to extend gradcheck for sparse tensors. Decorator for torch.autograd.gradcheck or its functools.partial variants that extends the gradcheck function with support to input functions that operate on or/and return sparse tensors. The specified gradcheck function itself is guaranteed to operate on strided tensors only. For example: >>> gradcheck = torch.sparse.as_sparse_gradcheck(torch.autograd.gradcheck) >>> x = torch.tensor([[0, 1], [2, 3]], dtype=torch.float64).to_sparse_coo().requires_grad_(True) >>> gradcheck(lambda x: x.to_sparse_csr(), x) True """ def gradcheck_with_sparse_support(func, inputs, **kwargs): """ Create gradcheck with support for sparse tensors. Same as :func:`torch.autograd.gradcheck` but with sparse tensors inputs and outputs support. """ masked = kwargs.pop("masked", False) sparse_layouts = { torch.sparse_coo, torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc, } sparse_compressed_layouts = { torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc, } sparse_block_layouts = {torch.sparse_bsr, torch.sparse_bsc} STRIDED_REPRESENTATION = "__STRIDED_REPRESENTATION__" def convert_to_strided_representation(args): """Convert differentiable non-strided tensors to a representation containing differentiable strided tensors.""" if not isinstance(args, (list, tuple)): args = (args,) new_args: list[Any] = [] for obj in args: if ( isinstance(obj, torch.Tensor) and obj.requires_grad and obj.layout in sparse_layouts ): d = dict(layout=obj.layout, shape=obj.shape) if not masked: # Materialize unspecified elements with zero values batch_dim = obj.ndim - obj.dense_dim() - obj.sparse_dim() blocksize = ( obj.values().shape[batch_dim + 1 : batch_dim + 3] if obj.layout in sparse_block_layouts else None ) full_mask = torch.ones( obj.shape, device=obj.device, dtype=torch.bool ).to_sparse( layout=obj.layout, blocksize=blocksize, dense_dim=obj.dense_dim(), ) obj = obj.to_dense().sparse_mask(full_mask) if obj.layout is torch.sparse_coo: d.update( indices=obj._indices(), is_coalesced=obj.is_coalesced() ) values = obj._values() elif obj.layout in {torch.sparse_csr, torch.sparse_bsr}: d.update( compressed_indices=obj.crow_indices(), plain_indices=obj.col_indices(), ) values = obj.values() else: d.update( compressed_indices=obj.ccol_indices(), plain_indices=obj.row_indices(), ) values = obj.values() new_args.extend( (STRIDED_REPRESENTATION, d, values.requires_grad_(True)) ) else: new_args.append(obj) return tuple(new_args) def restore_from_strided_representation(args): """Restore non-strided differentiable tensosr from their strided representations.""" new_args = [] args = list(args) while args: a = args.pop(0) if a == STRIDED_REPRESENTATION: d, values = args.pop(0), args.pop(0) if d["layout"] is torch.sparse_coo: a = torch.sparse_coo_tensor( d["indices"], values, size=d["shape"], is_coalesced=d["is_coalesced"], ) elif d["layout"] in sparse_compressed_layouts: a = torch.sparse_compressed_tensor( d["compressed_indices"], d["plain_indices"], values, size=d["shape"], layout=d["layout"], ) else: raise NotImplementedError( f'conversion of {d["layout"]} strided representation to tensor' ) new_args.append(a) return tuple(new_args) def func_wrapper(*args, **kwargs): restored_args = restore_from_strided_representation(args) # convert differentiable output sparse tensors to strided # tensors: outputs = func(*restored_args, **kwargs) strided_outputs = ( tuple(outputs) if isinstance(outputs, (list, tuple)) else (outputs,) ) strided_outputs = tuple( ( o.to_dense(masked_grad=masked) if isinstance(o, torch.Tensor) and o.requires_grad and o.layout in sparse_layouts else o ) for o in strided_outputs ) return ( strided_outputs if isinstance(outputs, (list, tuple)) else strided_outputs[0] ) args = (func_wrapper, convert_to_strided_representation(inputs)) return gradcheck(*args, **kwargs) return gradcheck_with_sparse_support ```
==================================================================================================================================== SOURCE CODE FILE: _semi_structured_conversions.py LINES: 1 SIZE: 14.04 KB PATH: scripts\freecad_env\Lib\site-packages\torch\sparse\_semi_structured_conversions.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import torch def _calculate_meta_reordering_scatter_offsets(m, meta_ncols, meta_dtype, device): """ This is PyTorch implementation of main part of reorder_meta() function, from tools/util/include/cutlass/util/host_reorder.h file of CUTLASS source tree. Furthermore, CUTLASS template for sparse GEMM decides upon layout of this matrix, and at the moment for the sparse GEMM executed on tensor cores, this is layout described by ColumnMajorInterleaved<2> data structure, in include/cutlass/layout/matrix.h of CUTLASS source tree. The reordering of meta matrix into meta_reordered matrix calculated according to these segments of CUTLASS code is re-implemented here. Note that this calculation produces offsets for scattering metadata matrix elements into reordered metadata matrix elements (or, equivalently, for gathering reordered metadata matrix element back into metadata matrix elements). """ dst_rows = torch.arange(0, m, device=device)[:, None].repeat(1, meta_ncols) dst_cols = torch.arange(0, meta_ncols, device=device).repeat(m, 1) # Reorder the rows, then swizzle the 2x2 blocks. group = 32 if meta_dtype.itemsize == 2 else 16 interweave = 4 if meta_dtype.itemsize == 2 else 2 dst_rows = ( dst_rows // group * group + (dst_rows % 8) * interweave + (dst_rows % group) // 8 ) topright = ((dst_rows % 2 == 0) & (dst_cols % 2 == 1)).to(torch.int8) bottomleft = ((dst_rows % 2 == 1) & (dst_cols % 2 == 0)).to(torch.int8) dst_rows += topright - bottomleft dst_cols -= topright - bottomleft # Assumed that meta tensor is to be stored in CUTLASS # InterleavedColumnMajor layout, and reverse engineered # corresponding code to store values into this tensor. interleave = 2 cols_maj = dst_cols // interleave cols_min = dst_cols % interleave return (cols_maj * m * interleave + dst_rows * interleave + cols_min).view(-1) def sparse_semi_structured_from_dense_cutlass(dense): """ This function converts dense matrix into sparse semi-structured representation, producing "compressed" matrix, in the layout used by CUTLASS backend, and corresponding metadata matrix. """ if dense.dim() != 2: raise RuntimeError( f"Expected 2-dimensional dense tensor, got {dense.dim()}-dimensional tensor" ) m, k = dense.shape device = dense.device meta_dtype = torch.int8 if dense.dtype == torch.int8: meta_dtype = torch.int32 elif dense.dtype in [torch.half, torch.bfloat16, torch.float]: meta_dtype = torch.int16 else: raise RuntimeError(f"Invalid datatype {dense.dtype} of dense matrix") quadbits_per_meta_elem = meta_dtype.itemsize * 8 // 4 if quadbits_per_meta_elem not in (4, 8): raise RuntimeError("Invalid number of elements per meta element calculated") if meta_dtype == torch.int32: if m % 16 != 0: raise RuntimeError( f"Number of rows of dense matrix {m} must be divisible by 16" ) else: if m % 32 != 0: raise RuntimeError( f"Number of rows of dense matrix {m} must be divisible by 32" ) if k % (4 * quadbits_per_meta_elem) != 0: raise RuntimeError( f"Number of columns of dense matrix {k} must be divisible by {4 * quadbits_per_meta_elem}" ) if dense.dtype != torch.float: ksparse = 4 dense_4 = dense.view(-1, k // ksparse, ksparse) m0, m1, _m2, m3 = (dense_4 != 0).unbind(-1) else: ksparse = 2 dense_2 = dense.view(-1, k // ksparse, ksparse) m0, _m2 = m1, m3 = (dense_2 != 0).unbind(-1) meta_ncols = k // (ksparse * quadbits_per_meta_elem) # Encoding quadruples of True/False values as follows: # [True, True, False, False] -> 0b0100 # [True, False, True, False] -> 0b1000 # [False, True, True, False] -> 0b1001 # [True, False, False, True ] -> 0b1100 # [False, True, False, True ] -> 0b1101 # [False, False, True, True ] -> 0b1110 # Thus, lower two bits in the encoding are index of the True value # at the lowest index in the quadruple, and the higher two bits in # the encoding are index of the other True value in the quadruple. # In case there are less than two True values, than False value or # values at some index or indices are considered True for the # encoding. In case there are more than two True values, then the # excess True value(s) at some indices are considered False for # the encoding. The exact encodings used for these cases are as # follows: # [False, False, False, False] -> 0b1110 # [False, False, False, True ] -> 0b1110 # [False, False, True, False] -> 0b1110 # [False, True, False, False] -> 0b1001 # [False, True, True, True ] -> 0b1101 # [True, False, False, False] -> 0b1000 # [True, False, True, True ] -> 0b1100 # [True, True, False, True ] -> 0b0100 # [True, True, True, False] -> 0b0100 # [True, True, True, True ] -> 0b0100 # These particular encodings are chosen, with the help of Espresso # logic minimizer software, for the purpose of minimization of # corresponding Boolean functions, that translate non-zero flags # into encoding bits. Note also possible choices for the first # and last of these encodings were limited only to (0b0100, # 0b1110), in order to produce valid encodings for 1:2 sparsity # case. expr0 = m0 & m1 expr1 = ~m0 & m1 expr2 = ~m0 & ~m1 bit0 = expr1 bit1 = expr2 bit2 = expr0 | expr2 | m3 bit3 = expr1 | ~m1 idxs0 = bit0 | (bit1.to(torch.int64) << 1) idxs1 = bit2 | (bit3.to(torch.int64) << 1) if dense.dtype != torch.float: sparse0 = dense_4.gather(-1, idxs0.unsqueeze(-1)) # type: ignore[possibly-undefined] sparse1 = dense_4.gather(-1, idxs1.unsqueeze(-1)) sparse = torch.stack((sparse0, sparse1), dim=-1).view(m, k // 2) else: sparse = dense_2.gather(-1, idxs0.unsqueeze(-1) // 2).view(m, k // 2) # type: ignore[possibly-undefined] meta_4 = idxs0 | (idxs1 << 2) meta_n = meta_4.view((-1, meta_ncols, quadbits_per_meta_elem)).to(meta_dtype) if quadbits_per_meta_elem == 4: meta = ( meta_n[:, :, 0] | (meta_n[:, :, 1] << 4) | (meta_n[:, :, 2] << 8) | (meta_n[:, :, 3] << 12) ) elif quadbits_per_meta_elem == 8: meta = ( meta_n[:, :, 0] | (meta_n[:, :, 1] << 4) | (meta_n[:, :, 2] << 8) | (meta_n[:, :, 3] << 12) | (meta_n[:, :, 4] << 16) | (meta_n[:, :, 5] << 20) | (meta_n[:, :, 6] << 24) | (meta_n[:, :, 7] << 28) ) # Reorder meta tensor elements. meta_reordered = meta.new_empty((m * meta_ncols,)) # type: ignore[possibly-undefined] meta_offsets = _calculate_meta_reordering_scatter_offsets( m, meta_ncols, meta_dtype, device ) meta_reordered.scatter_(0, meta_offsets, meta.view(-1)) return (sparse, meta_reordered.view(m, meta_ncols)) def sparse_semi_structured_to_dense_cutlass(sparse, meta_reordered): """ This function performs reverse of the function above - it reconstructs dense matrix from a pair of "compressed" matrix, given in the layout used by CUTLASS backend, and accompanying metadata matrix. """ if sparse.dim() != 2: raise RuntimeError( f"Expected 2-dimensional sparse tensor, got {sparse.dim()}-dimensional tensor" ) m, k = sparse.shape device = sparse.device if meta_reordered.dim() != 2: raise RuntimeError( f"Expected 2-dimensional meta tensor, got {meta_reordered.dim()}-dimensional tensor" ) if meta_reordered.device != device: raise RuntimeError( f"Expected meta matrix to be on {device} device, got matrix on {meta_reordered.device} device" ) meta_dtype = meta_reordered.dtype if meta_dtype not in (torch.int16, torch.int32): raise RuntimeError(f"Invalid datatype {meta_dtype} of meta matrix") quadbits_per_meta_elem = meta_dtype.itemsize * 8 // 4 if sparse.dtype != torch.float: ksparse = 4 else: ksparse = 2 meta_nrows, meta_ncols = meta_reordered.shape if meta_nrows != m: raise RuntimeError( f"Number of rows of meta matrix {meta_nrows} must be equal to number of columns of spase matrix {m}" ) if meta_ncols * ksparse * quadbits_per_meta_elem != 2 * k: raise RuntimeError( f"Number of columns of sparse matrix {k} different from the {meta_ncols * ksparse * quadbits_per_meta_elem // 2}, " "expected according to the number of columns of meta matrix" ) # Undo meta tensor elements reordering. meta_offsets = _calculate_meta_reordering_scatter_offsets( m, meta_ncols, meta_dtype, device ) meta = torch.gather(meta_reordered.view(-1), 0, meta_offsets).view(m, meta_ncols) # Unpack sparse tensor back to original dense tensor, using # information provided by meta tensor. Note that torch.float # datatype is handled pretty much the same as # torch.half/torch.bfloat16, as metadata for a pair of torch.float # value is encoded as if underlying 8 bytes contain four # torch.half/torch.bfloat16 values, where either first two or last # two are zeros. meta_2 = torch.empty( (m, meta_ncols, 2 * quadbits_per_meta_elem), dtype=meta_dtype, device=device, ) if quadbits_per_meta_elem == 4: meta_2[:, :, 0] = meta & 0b11 meta_2[:, :, 1] = (meta >> 2) & 0b11 meta_2[:, :, 2] = (meta >> 4) & 0b11 meta_2[:, :, 3] = (meta >> 6) & 0b11 meta_2[:, :, 4] = (meta >> 8) & 0b11 meta_2[:, :, 5] = (meta >> 10) & 0b11 meta_2[:, :, 6] = (meta >> 12) & 0b11 meta_2[:, :, 7] = (meta >> 14) & 0b11 elif quadbits_per_meta_elem == 8: meta_2[:, :, 0] = meta & 0b11 meta_2[:, :, 1] = (meta >> 2) & 0b11 meta_2[:, :, 2] = (meta >> 4) & 0b11 meta_2[:, :, 3] = (meta >> 6) & 0b11 meta_2[:, :, 4] = (meta >> 8) & 0b11 meta_2[:, :, 5] = (meta >> 10) & 0b11 meta_2[:, :, 6] = (meta >> 12) & 0b11 meta_2[:, :, 7] = (meta >> 14) & 0b11 meta_2[:, :, 8] = (meta >> 16) & 0b11 meta_2[:, :, 9] = (meta >> 18) & 0b11 meta_2[:, :, 10] = (meta >> 20) & 0b11 meta_2[:, :, 11] = (meta >> 22) & 0b11 meta_2[:, :, 12] = (meta >> 24) & 0b11 meta_2[:, :, 13] = (meta >> 26) & 0b11 meta_2[:, :, 14] = (meta >> 28) & 0b11 meta_2[:, :, 15] = (meta >> 30) & 0b11 dense_offsets = meta_2.view(-1) + ( torch.arange(0, 2 * m * k // ksparse, device=device) * 4 ).view(-1, 1).repeat(1, 2).view(-1) dense = torch.zeros((m * 2 * k,), dtype=sparse.dtype, device=device) if sparse.dtype != torch.float: dense.scatter_(0, dense_offsets, sparse.view(-1)) else: dense.view(torch.half).scatter_( 0, dense_offsets, sparse.view(torch.half).view(-1) ) return dense.view(m, 2 * k) def _sparse_semi_structured_tile(dense): """ This function computes a 2:4 sparse tile by greedily taking the largest values. Since we take the largest values greedily, how the sorting algorithm handles duplicates affects the ultimate sparsity pattern. Note that this function does not have the same sorting semantics as our CUDA backend, which is exposed via `torch._sparse_semi_structured_tile` and thus returns a different pattern. """ def greedy_prune_tile(tile): num_kept_row = [0, 0, 0, 0] num_kept_col = [0, 0, 0, 0] for x in tile.flatten().sort(descending=True, stable=True).indices: r, c = x // 4, x % 4 if num_kept_row[r] < 2 and num_kept_col[c] < 2: num_kept_row[r] += 1 num_kept_col[c] += 1 else: tile[r, c] = 0 for batch in dense.unfold(0, 4, 4).unfold(1, 4, 4): for tile in batch: greedy_prune_tile(tile) return dense def _compute_compressed_swizzled_bitmask(dense): """ Calculates the compressed swizzled bitmask from a dense tensor """ # first we need to convert the dense tensor to a bitmask int_bitmask = dense.bool().to(torch.uint8) # Each thread is responsible for an 8x8 tile, which contains 4 4x4 tiles: # A, B, C and D, as displayed in the following schema: # +---+---+ # | A | B | # +---+---+ # | C | D | # +---+---+ # we first need to split into the 8x8 tiles bitmask_8x8_chunks = int_bitmask.unfold(0, 8, 8).unfold(1, 8, 8) # then we unfold again to get our indivdual 4x4 tiles bitmask_4x4_chunks = bitmask_8x8_chunks.unfold(2, 4, 4).unfold(3, 4, 4) # Each 4x4 bitmask defines two 8-bit integers, which encode the sparsity pattern # of that tile. Note that the least siginificant bit is stored first. # [1 1 0 0] # [1 1 0 0] -> 0011 0011 -> 51 # [0 0 1 1] 1100 1100 204 # [0 0 1 1] # reshape tensor to expand tiles into 8-bit vectors bitmask_binary_representation = bitmask_4x4_chunks.reshape( *bitmask_4x4_chunks.shape[:2], 4, 2, 8 ) # to convert from binary representaiton, we can do a matmul with powers of two powers_of_two = 2 ** torch.arange(8, dtype=torch.float, device="cuda") # To run on GPU: cast to float to do matmul and then cast back compressed_swizzled_bitmask = ( bitmask_binary_representation.to(torch.float) @ powers_of_two ).to(torch.uint8) return compressed_swizzled_bitmask ```
============================================================================================================================ SOURCE CODE FILE: _semi_structured_ops.py LINES: 1 SIZE: 6.41 KB PATH: scripts\freecad_env\Lib\site-packages\torch\sparse\_semi_structured_ops.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import contextlib import torch __all__ = [ "fallback_dispatcher", "semi_sparse_values", "semi_sparse_indices", "semi_sparse_t", "semi_sparse_view", "semi_sparse_detach", "semi_sparse_mm", "semi_sparse_addmm", "semi_sparse_linear", "semi_sparse_scaled_mm", ] @contextlib.contextmanager def no_dispatch(): guard = torch._C._DisableTorchDispatch() try: yield finally: del guard def fallback_dispatcher(func, types, args, kwargs): with no_dispatch(): return func(*args) def semi_sparse_values(func, types, args=(), kwargs=None) -> torch.Tensor: assert len(args) == 1 A = args[0] assert isinstance(A, torch.sparse.SparseSemiStructuredTensor) assert A.packed is not None if A.meta is None: m, k = A.shape num_kept_elements = m * k // 2 return A.packed[:num_kept_elements:].view(m, -1) else: return A.packed.detach() def semi_sparse_indices(func, types, args=(), kwargs=None) -> torch.Tensor: assert len(args) == 1 A = args[0] assert isinstance(A, torch.sparse.SparseSemiStructuredTensor) assert A.packed is not None if A.meta is None: m, k = A.shape num_kept_elements = m * k // 2 metadata = A.packed[num_kept_elements:].view(m, -1) return metadata.view(torch.int32 if A.dtype == torch.int32 else torch.int16) else: return A.meta def semi_sparse_t(func, types, args=(), kwargs=None) -> torch.Tensor: assert len(args) == 1 self = args[0] assert isinstance(self, torch.sparse.SparseSemiStructuredTensor) assert len(self.shape) == 2 # Because we cannot go from the compressed representation back to the dense representation currently, # we just keep track of how many times we have been transposed. Depending on whether the sparse matrix # is the first or second argument, we expect an even / odd number of calls to transpose respectively. return self.__class__( torch.Size([self.shape[-1], self.shape[0]]), packed=self.packed_t, meta=self.meta_t, packed_t=self.packed, meta_t=self.meta, compressed_swizzled_bitmask=( self.compressed_swizzled_bitmask.transpose(0, 1) if self.compressed_swizzled_bitmask is not None else None ), fuse_transpose_cusparselt=args[0].fuse_transpose_cusparselt, alg_id_cusparselt=args[0].alg_id_cusparselt, ) def semi_sparse_view(func, types, args=(), kwargs=None) -> torch.Tensor: assert len(args) == 2 self, shape = args if tuple(shape) != self.shape: raise NotImplementedError( f"`view` is not implemented for SparseSemiStructuredTensor, except for the dummy case (shape={shape})" ) return self def semi_sparse_detach(func, types, args, kwargs) -> torch.Tensor: assert len(args) == 1 self = args[0] return self.__class__( shape=self.shape, packed=self.packed, meta=self.meta, packed_t=self.packed_t, meta_t=self.meta_t, compressed_swizzled_bitmask=self.compressed_swizzled_bitmask, fuse_transpose_cusparselt=self.fuse_transpose_cusparselt, alg_id_cusparselt=self.alg_id_cusparselt, requires_grad=False, ) def semi_sparse_mm(func, types, args=(), kwargs=None) -> torch.Tensor: assert len(args) == 2 A, B = args if A.ndim != 2 or B.ndim != 2: raise NotImplementedError( "`SparseSemiStructuredTensor` matmul: Broadcasting is not implemented" ) if isinstance(A, torch.sparse.SparseSemiStructuredTensor): row, col = B.shape B_padded = A._pad_dense_input(B) res = A._mm(B_padded) return res[:, :col] else: B_t = B.t() assert isinstance(B_t, torch.sparse.SparseSemiStructuredTensor) row, col = A.shape A_padded = B._pad_dense_input(A) res = B_t._mm(A_padded.t()).t() return res[:row, :] def semi_sparse_addmm(func, types, args=(), kwargs=None) -> torch.Tensor: assert len(args) == 3 bias, A, B = args if A.ndim != 2 or B.ndim != 2: raise NotImplementedError( "`SparseSemiStructuredTensor` matmul: Broadcasting is not implemented" ) if bias.ndim != 1: raise NotImplementedError( f"`SparseSemiStructuredTensor` matmul: only bias dim=1 supported. Shape={bias.shape}" ) if isinstance(A, torch.sparse.SparseSemiStructuredTensor): raise NotImplementedError( "`SparseSemiStructuredTensor` matmul: only operand B of `addmm` can be sparse" ) B_t = B.t() assert isinstance(B_t, torch.sparse.SparseSemiStructuredTensor) row, _col = A.shape A_padded = B_t._pad_dense_input(A) result = B_t._mm(A_padded.t(), bias=bias).t() return result[:row, :] def semi_sparse_linear(func, types, args=(), kwargs=None) -> torch.Tensor: assert len(args) in [2, 3] A, B = args[:2] bias = args[2] if len(args) == 3 else None shape = A.shape A_2d = A.view(-1, shape[-1]) if bias is None: res = A_2d @ B.t() else: res = semi_sparse_addmm( func=None, types=None, args=[bias, A_2d, B.t()], ) return res.view(*shape[:-1], -1) def semi_sparse_scaled_mm(func, types, args=(), kwargs=None) -> torch.Tensor: # pull all args, excluding use_fast_accum flag if set. A, B, A_scale, B_scale, bias, scale_result, out_dtype = args[:7] assert A.dtype == torch.float8_e4m3fn assert B.dtype == torch.float8_e4m3fn # only cuSPARSELt supports float8_e4m3fn currentl assert isinstance(A, torch.sparse.SparseSemiStructuredTensorCUSPARSELT) assert A.packed is not None # Currently we only support per-tensor scaling, with float32 scales assert A_scale.numel() == 1 and B_scale.numel() == 1 assert A_scale.dtype == torch.float32 and B_scale.dtype == torch.float32 # cuSPARSELt lacks the A and B operand scaling support, so instead we use alpha to scale the result. # Note that this limits us to per-tensor scalig only. sparse_result = torch._cslt_sparse_mm( A.packed, B, alpha=A_scale * B_scale, out_dtype=out_dtype, ) return sparse_result ```
=================================================================================================================== SOURCE CODE FILE: _triton_ops.py LINES: 1 SIZE: 86.59 KB PATH: scripts\freecad_env\Lib\site-packages\torch\sparse\_triton_ops.py ENCODING: utf-8 ```py # mypy: allow-untyped-decorators # mypy: allow-untyped-defs import math import os import weakref from functools import lru_cache from typing import Optional import torch from torch._dynamo.utils import warn_once from torch.utils._triton import has_triton from ._triton_ops_meta import get_meta TORCH_SPARSE_BSR_SCATTER_MM_LRU_CACHE_SIZE = int( os.getenv("TORCH_SPARSE_BSR_SCATTER_MM_LRU_CACHE_SIZE", 2) ) def check(cond, msg): if not cond: raise ValueError(msg) def check_bsr_layout(f_name, t): check( t.layout == torch.sparse_bsr, f"{f_name}(): only BSR sparse format is supported for the sparse argument.", ) def check_device(f_name, t, device): check( t.device == device and t.device.type == "cuda", f"{f_name}(): all inputs are expected to be on the same GPU device.", ) def check_mm_compatible_shapes(f_name, lhs, rhs): check( lhs.dim() >= 2 and rhs.dim() >= 2, f"{f_name}(): all inputs involved in the matrix product are expected to be at least 2D, " f"but got lhs.dim() == {lhs.dim()} and rhs.dim() == {rhs.dim()}.", ) _m, kl = lhs.shape[-2:] kr, _n = rhs.shape[-2:] check( kl == kr, f"{f_name}(): arguments' sizes involved in the matrix product are not compatible for matrix multiplication, " f"got lhs.shape[-1] == {kl} which is not equal to rhs.shape[-2] == {kr}.", ) def check_dtype(f_name, t, dtype, *additional_dtypes): check( t.dtype == dtype and t.dtype in ((torch.half, torch.bfloat16, torch.float) + tuple(*additional_dtypes)), f"{f_name}(): all inputs are expected to be of the same dtype " f"and one of (half, bfloat16, float32) or {additional_dtypes}, " f"but got dtype == {t.dtype}.", ) def check_blocksize(f_name, blocksize): assert len(blocksize) == 2 def is_power_of_two(v): return not (v & (v - 1)) def is_compatible_blocksize(b): res = True for blocksize in b: # Triton loads only blocks which are at least 16 and powers of 2. res = (blocksize >= 16 and is_power_of_two(blocksize)) and res return res check( is_compatible_blocksize(blocksize), f"{f_name}(): sparse inputs' blocksize ({blocksize[0]}, {blocksize[1]}) " "should be at least 16 and a power of 2 in each dimension.", ) def make_triton_contiguous(t): """Return input as a triton-contiguous tensor. A triton-contiguous tensor is defined as a tensor that has strides with minimal value smaller than or equal to 1. While triton kernels support triton-non-contiguous tensors (all strides being greater than 1) arguments, a considerable slow-down occurs because tensor data is copied element-wise rather than chunk-wise. Zero strides is assumed to not have this defect. """ if min(t.stride()) > 1: # TODO: investigate if contiguity along other axes than the # last one can be beneficial for performance return t.contiguous() else: return t def broadcast_batch_dims(f_name, *tensors): try: return torch.broadcast_shapes(*(t.shape[:-2] for t in tensors)) except Exception: check(False, f"{f_name}(): inputs' batch dimensions are not broadcastable!") def slicer(dim, slice_range, *tensors): for t in tensors: slices = [slice(None)] * t.dim() slices[dim] = slice_range yield t[slices] def multidim_slicer(dims, slices, *tensors): for t in tensors: s = [slice(None)] * t.dim() for d, d_slice in zip(dims, slices): if d is not None: s[d] = d_slice yield t[s] def ptr_stride_extractor(*tensors): for t in tensors: yield t yield from t.stride() def grid_partitioner(full_grid, grid_blocks, tensor_dims_map): assert 0 <= len(full_grid) <= 3 assert 0 <= len(grid_blocks) <= 3 import itertools def generate_grid_points(): for fg, mg in zip(full_grid, grid_blocks): yield range(0, fg, mg) def generate_sliced_tensors(slices): for t, t_dims in tensor_dims_map.items(): yield next(multidim_slicer(t_dims, slices, t)) for grid_point in itertools.product(*generate_grid_points()): grid = [ min(fg - gp, mg) for fg, gp, mg in zip(full_grid, grid_point, grid_blocks) ] slices = [slice(gp, gp + g) for gp, g in zip(grid_point, grid)] # grid_points are iterated in a "contiguous" order, i.e. # left dimensions traversed slower than right dimensions. # This order is reversed for CUDA grids. yield grid[::-1], *generate_sliced_tensors(slices) def launch_kernel(kernel, tensor_dims_map, full_grid, grid_blocks=None): # cuda_max_grid = (2 ** 31 - 1, 2 ** 16 - 1, 2 ** 16 - 1) cuda_max_grid = (2147483647, 65535, 65535)[::-1] if grid_blocks is None: grid_blocks = cuda_max_grid else: def valid_grid_dim(g, mg): if g is None: return mg else: # grid must be at least 1 and no greater than mg return max(1, min(g, mg)) grid_blocks = tuple( valid_grid_dim(g, mg) for g, mg in zip(grid_blocks, cuda_max_grid) ) # type: ignore[assignment] for grid, *sliced_tensors in grid_partitioner( full_grid, grid_blocks, tensor_dims_map ): kernel(grid, *sliced_tensors) def prepare_inputs(bsr, *dense_tensors): # Introduce fake batch dimension if not present for convenience. crow_indices = bsr.crow_indices().unsqueeze(0) col_indices = bsr.col_indices().unsqueeze(0) values = make_triton_contiguous(bsr.values().unsqueeze(0)) tensors = [make_triton_contiguous(t.unsqueeze(0)) for t in dense_tensors] # Compute broadcasted batch dimension batch_dims_broadcasted = torch.broadcast_shapes( values.shape[:-3], *(t.shape[:-2] for t in tensors) ) # Broadcast batch dimensions and squash. # The result can be either a view or a copy. def batch_broadcast_and_squash(t, batch_dims, invariant_dims): return t.broadcast_to(batch_dims + invariant_dims).flatten( 0, len(batch_dims) - 1 ) crow_indices = batch_broadcast_and_squash( crow_indices, batch_dims_broadcasted, (-1,) ) col_indices = batch_broadcast_and_squash(col_indices, batch_dims_broadcasted, (-1,)) values = batch_broadcast_and_squash( values, batch_dims_broadcasted, values.shape[-3:] ) tensors = [ batch_broadcast_and_squash(t, batch_dims_broadcasted, t.shape[-2:]) for t in tensors ] return crow_indices, col_indices, values, *tensors def broadcast_batch_dims_bsr(f_name, bsr, *tensors): batch_shape = broadcast_batch_dims(f_name, bsr, *tensors) crow_indices = bsr.crow_indices().broadcast_to(batch_shape + (-1,)) col_indices = bsr.col_indices().broadcast_to(batch_shape + (-1,)) values = bsr.values().broadcast_to(batch_shape + bsr.values().shape[-3:]) size = batch_shape + bsr.shape[-2:] return torch.sparse_compressed_tensor( crow_indices, col_indices, values, size=size, layout=bsr.layout ) # NOTE: this function will ALWAYS create a view def tile_to_blocksize(t, blocksize): *rest, m, n = t.shape new_shape = rest + [ m // blocksize[0], blocksize[0], n // blocksize[1], blocksize[1], ] # using .view instead of .reshape to ensure that the result is # indeed a view: return t.view(new_shape).transpose(-3, -2) def as1Dbatch(tensor): """Return tensor as 3D tensor by either prepending new dimensions to the tensor shape (when ``tensor.ndim < 3``), or by collapsing starting dimensions into the first dimension (when ``tensor.ndim > 3``). """ while tensor.ndim < 3: tensor = tensor.unsqueeze(0) if tensor.ndim > 3: tensor = tensor.flatten(0, tensor.ndim - 3) assert tensor.ndim == 3, tensor.shape return tensor def scatter_mm(blocks, others, indices_data, *, accumulators=None): """Scattered matrix multiplication of tensors. A scattered matrix multiplication is defined as a series of matrix multiplications applied to input tensors according to the input and output mappings specified by indices data. The following indices data formats are supported for defining a scattered matrix multiplication operation (:attr:`indices_data[0]` holds the name of the indices data format as specified below): - ``"scatter_mm"`` - matrix multiplications scattered in batches of tensors. If :attr:`blocks` is a :math:`(* \times M \times K) tensor, :attr:`others` is a :math:`(* \times K \times N)` tensor, :attr:`accumulators` is a :math:`(* \times M \times N)` tensor, and :attr:`indices = indices_data['indices']` is a :math:`(* \times 3)` tensor, then the operation is equivalent to the following code:: c_offsets, pq = indices_data[1:] for r in range(len(c_offsets) - 1): for g in range(c_offsets[r], c_offsets[r + 1]): p, q = pq[g] accumulators[r] += blocks[p] @ others[q] - ``"bsr_strided_mm"`` - matrix multiplications scattered in batches of tensors and a tensor. If :attr:`blocks` is a :math:`(Ms \times Ks) tensor, :attr:`others` is a :math:`(* \times K \times N)` tensor, :attr:`accumulators` is a :math:`(* \times M \times N)` tensor, then the operation is equivalent to the following code:: c_indices, r_offsets, p_offsets, q_offsets, meta = indices_data[1:] for b in range(nbatches): for i, r in enumerate(r_offsets): r0, r1 = divmod(r, N) acc = accumulators[b, r0:r0 + Ms, r1:r1 + Ns] for g in range(c_indices[i], c_indices[i+1]): p = p_offsets[g] q0, q1 = divmod(q_offsets[g], N) acc += blocks[p] @ others[b, q0:q0 + Ks, q1:q1 + Ns] where ``Ns = N // meta['SPLIT_N']``, and ``M`` and ``K`` are integer multiples of ``Ms`` and ``Ks``, respectively. - ``"bsr_strided_mm_compressed"`` - matrix multiplications scattered in batches of tensors and a tensor. A memory and processor efficient version of ``"bsr_strided_mm"`` format. If :attr:`blocks` is a :math:`(Ms \times Ks) tensor, :attr:`others` is a :math:`(* \times K \times N)` tensor, :attr:`accumulators` is a :math:`(* \times M \times N)` tensor, then the operation is equivalent to the following code:: c_indices, r_offsets, q_offsets, meta = indices_data[1:] for b in range(nbatches): for r in r_offsets: m = (r // N) // Ms n = (r % N) // Ns r0, r1 = divmod(r, N) c0, c1 = c_indices[m], c_indices[m + 1] acc = accumulators[b, r0:r0 + Ms, r1:r1 + Ns] for i, p in enumerate(range(c0, c1)): q = q_offsets[n * c1 + (SPLIT_N - n) * c0 + i] q0, q1 = divmod(q, N) acc += blocks[p] @ others[b, q0:q0 + Ks, q1:q1 + Ns] where ``Ns = N // meta['SPLIT_N']``, and ``M`` and ``K`` are integer multiples of ``Ms`` and ``Ks``, respectively. Notice that the order of ``r_offsets`` items can be arbitrary; this property enables defining swizzle operators via rearrangements of ``r_offsets`` items.. Auxilary functions are provided for pre-computing :attr:`indices_data`. For example, :func:`bsr_scatter_mm_indices_data` is used to define indices data for matrix multiplication of BSR and strided tensors. Parameters ---------- blocks (Tensor): a 3-D tensor of first matrices to be multiplied others (Tensor): a tensor of second matrices to be multiplied. If ``indices_data[0]=="scatter_mm"``, the tensor is a 1-D batch tensor of second input matrices to be multiplied. Otherwise, the second input matrices are slices of the :attr:`others` tensor. indices_data (tuple): a format data that defines the inputs and outputs of scattered matrix multiplications. Keyword arguments ----------------- accumulators (Tensor, optional): a tensor of matrix product accumulators. If ``indices_data[0]=="scatter_mm"``, the tensor is a 1-D batch tensor of output matrices. Otherwise, output matrices are slices of the :attr:`accumulators` tensor. """ indices_format = indices_data[0] assert blocks.ndim == 3 _P, Ms, Ks = blocks.shape if indices_format == "scatter_mm": c_offsets, pq = indices_data[1:] assert others.ndim == 3 _Q, Ks_, Ns = others.shape assert Ks == Ks_ if accumulators is None: R = c_offsets.shape[0] - 1 accumulators = torch.zeros( (R, Ms, Ns), dtype=blocks.dtype, device=blocks.device ) else: R, Ms_, Ns_ = accumulators.shape assert Ms_ == Ms assert Ns_ == Ns if Ms % 16 or Ks % 16 or Ns % 16 or _scatter_mm2 is None: for r in range(c_offsets.shape[0] - 1): g0 = c_offsets[r] g1 = c_offsets[r + 1] for g in range(g0, g1): p, q = pq[g] accumulators[r] += blocks[p] @ others[q] else: _scatter_mm2(blocks, others, c_offsets, pq, accumulators) return accumulators elif indices_format == "bsr_strided_mm": others_shape = others.shape others = as1Dbatch(others) B, K, N = others.shape assert K % Ks == 0 c_indices, r_offsets, p_offsets, q_offsets, meta = indices_data[1:] SPLIT_N = meta["SPLIT_N"] if accumulators is None: M = Ms + (r_offsets.max().item() + 1) // N accumulators = torch.zeros( (*others_shape[:-2], M, N), dtype=blocks.dtype, device=blocks.device ) else: M, N_ = accumulators.shape[-2:] assert N_ == N accumulators_shape = accumulators.shape accumulators = as1Dbatch(accumulators) Ns = N // SPLIT_N if Ms % 16 or Ks % 16 or Ns % 16 or _scatter_mm6 is None: accumulators.zero_() for b in range(B): for r in range(r_offsets.shape[0]): r_ = r_offsets[r].item() g0 = c_indices[r].item() g1 = c_indices[r + 1].item() r0, r1 = divmod(r_, N) acc = accumulators[b, r0 : r0 + Ms, r1 : r1 + Ns] for g in range(g0, g1): p, q = p_offsets[g], q_offsets[g] q0, q1 = divmod(q.item(), N) acc += blocks[p] @ others[b, q0 : q0 + Ks, q1 : q1 + Ns] else: _scatter_mm6( blocks, others, c_indices, r_offsets, p_offsets, q_offsets, meta, accumulators, ) return accumulators.view(accumulators_shape) elif indices_format == "bsr_strided_mm_compressed": others_shape = others.shape others = as1Dbatch(others) B, K, N = others.shape assert K % Ks == 0 c_indices, r_offsets, q_offsets, meta = indices_data[1:] SPLIT_N = meta["SPLIT_N"] if accumulators is None: M = Ms + (r_offsets.max().item() + 1) // N accumulators = torch.zeros( (*others_shape[:-2], M, N), dtype=blocks.dtype, device=blocks.device ) else: M, N_ = accumulators.shape[-2:] assert N_ == N accumulators_shape = accumulators.shape accumulators = as1Dbatch(accumulators) Ns = N // SPLIT_N if Ms % 16 or Ks % 16 or Ns % 16 or _scatter_mm6 is None: for b in range(B): for j in range(len(r_offsets)): r0, r1 = divmod(r_offsets[j].item(), N) m = r0 // Ms n = r1 // Ns c0 = c_indices[m].item() c1 = c_indices[m + 1].item() acc = accumulators[b, r0 : r0 + Ms, r1 : r1 + Ns] for i, p in enumerate(range(c0, c1)): q = q_offsets[n * c1 + (SPLIT_N - n) * c0 + i].item() q0, q1 = divmod(q, N) acc += blocks[p] @ others[b, q0 : q0 + Ks, q1 : q1 + Ns] else: p_offsets = torch.empty( (0,), dtype=q_offsets.dtype, device=q_offsets.device ) _scatter_mm6( blocks, others, c_indices, r_offsets, p_offsets, q_offsets, meta, accumulators, ) return accumulators.view(accumulators_shape) else: raise NotImplementedError(indices_format) def scatter_mm_meta( M, K, N, Ms, Ks, GROUP_SIZE=None, TILE_M=None, TILE_N=None, SPLIT_N=None, num_warps=None, num_stages=None, **extra, ): if {TILE_M, TILE_N, SPLIT_N, num_warps, num_stages, GROUP_SIZE} == {None}: device_name = torch.cuda.get_device_name() meta = get_meta( "scatter_mm", (M, K, N, Ms, Ks), device_name, version=(0, torch.float16, 0.5), ) if meta is not None: meta.update(**extra) return meta # The following parameters are optimized for the performance # equilibrium points of bsr-dense and dense-dense matrix # multiplications when using GPU card NVIDIA GeForce RTX 2060 # SUPER. For points far from the performance equilibrium # points as well as for other GPU cards, the optimal # parameters are likely different from what specified below. if (M, K, N) == (256,) * 3: if (Ms, Ks) == (16, 16): SPLIT_N = 1 TILE_M = 16 TILE_N = 16 GROUP_SIZE = 4 num_stages = 1 num_warps = 4 # noqa: E225,E231,E702 elif (Ms, Ks) == (32, 32): SPLIT_N = 2 TILE_M = 32 TILE_N = 16 GROUP_SIZE = 4 num_stages = 1 num_warps = 4 # noqa: E225,E231,E702 elif (Ms, Ks) == (64, 64): SPLIT_N = 1 TILE_M = 32 TILE_N = 32 GROUP_SIZE = 4 num_stages = 1 num_warps = 4 # noqa: E225,E231,E702 elif (Ms, Ks) == (128, 128): SPLIT_N = 1 TILE_M = 32 TILE_N = 32 GROUP_SIZE = 2 num_stages = 1 num_warps = 4 # noqa: E225,E231,E702 elif (M, K, N) == (512,) * 3: if (Ms, Ks) == (16, 16): SPLIT_N = 8 TILE_M = 16 TILE_N = 64 GROUP_SIZE = 2 num_stages = 1 num_warps = 2 # noqa: E225,E231,E702 elif (Ms, Ks) == (32, 32): SPLIT_N = 8 TILE_M = 32 TILE_N = 64 GROUP_SIZE = 4 num_stages = 1 num_warps = 2 # noqa: E225,E231,E702 elif (Ms, Ks) == (64, 64): SPLIT_N = 4 TILE_M = 32 TILE_N = 128 GROUP_SIZE = 4 num_stages = 1 num_warps = 4 # noqa: E225,E231,E702 elif (Ms, Ks) == (128, 128): SPLIT_N = 8 TILE_M = 64 TILE_N = 64 GROUP_SIZE = 4 num_stages = 1 num_warps = 4 # noqa: E225,E231,E702 elif (M, K, N) == (1024,) * 3: if (Ms, Ks) == (16, 16): SPLIT_N = 4 TILE_M = 16 TILE_N = 128 GROUP_SIZE = 2 num_stages = 1 num_warps = 1 # noqa: E225,E231,E702 elif (Ms, Ks) == (32, 32): SPLIT_N = 8 TILE_M = 32 TILE_N = 64 GROUP_SIZE = 2 num_stages = 1 num_warps = 1 # noqa: E225,E231,E702 elif (Ms, Ks) == (64, 64): SPLIT_N = 16 TILE_M = 64 TILE_N = 64 GROUP_SIZE = 4 num_stages = 1 num_warps = 2 # noqa: E225,E231,E702 elif (Ms, Ks) == (128, 128): SPLIT_N = 16 TILE_M = 64 TILE_N = 64 GROUP_SIZE = 4 num_stages = 1 num_warps = 4 # noqa: E225,E231,E702 elif (Ms, Ks) == (256, 256): SPLIT_N = 16 TILE_M = 64 TILE_N = 64 GROUP_SIZE = 2 num_stages = 1 num_warps = 4 # noqa: E225,E231,E702 elif (M, K, N) == (2048,) * 3: if (Ms, Ks) == (16, 16): SPLIT_N = 4 TILE_M = 16 TILE_N = 128 GROUP_SIZE = 8 num_stages = 1 num_warps = 1 # noqa: E225,E231,E702 elif (Ms, Ks) == (32, 32): SPLIT_N = 4 TILE_M = 32 TILE_N = 64 GROUP_SIZE = 4 num_stages = 1 num_warps = 1 # noqa: E225,E231,E702 elif (Ms, Ks) == (64, 64): SPLIT_N = 4 TILE_M = 64 TILE_N = 128 GROUP_SIZE = 4 num_stages = 1 num_warps = 4 # noqa: E225,E231,E702 elif (Ms, Ks) == (128, 128): SPLIT_N = 8 TILE_M = 64 TILE_N = 64 GROUP_SIZE = 4 num_stages = 1 num_warps = 4 # noqa: E225,E231,E702 elif (Ms, Ks) == (256, 256): SPLIT_N = 4 TILE_M = 64 TILE_N = 64 GROUP_SIZE = 2 num_stages = 1 num_warps = 4 # noqa: E225,E231,E702 elif (M, K, N) == (4096,) * 3: if (Ms, Ks) == (16, 16): SPLIT_N = 2 TILE_M = 16 TILE_N = 256 GROUP_SIZE = 2 num_stages = 1 num_warps = 2 # noqa: E225,E231,E702 elif (Ms, Ks) == (32, 32): SPLIT_N = 2 TILE_M = 32 TILE_N = 64 GROUP_SIZE = 2 num_stages = 1 num_warps = 1 # noqa: E225,E231,E702 elif (Ms, Ks) == (64, 64): SPLIT_N = 2 TILE_M = 64 TILE_N = 128 GROUP_SIZE = 2 num_stages = 1 num_warps = 4 # noqa: E225,E231,E702 if SPLIT_N is None: # Assume NVIDIA GeForce RTX 2060 SUPER: # With the probality of 92% (99.9% when N > 512), the # performance will not be worse more than 2% from the # performance when using an optimal value. Otherwise, when N # <= 512, using the following heuristics may give upto 15% # lower performance. SPLIT_N = { 16: 1, 32: 2, 64: 4, 128: 8, 256: 16, 512: 8, 1024: 16, 4096: 32, 8192: 64, }.get(N, 16) if Ms >= 512 and N >= 2048: SPLIT_N = 1 Ns = N // SPLIT_N if TILE_M is None: TILE_M = min(64 if Ns < 512 else 32, Ms) if TILE_N is None: TILE_N = min(64 if Ns < 512 else 32, Ns) num_stages = num_stages or 1 if num_warps is None: if min(M, N) > 1024: num_warps = {16: 1, 32: 1, 64: 2}.get(Ms, 4) elif min(M, N) == 1024: num_warps = {16: 1, 32: 1, 64: 2}.get(Ms, 4) elif min(M, N) == 256: num_warps = {16: 1, 32: 4}.get(Ms, 4) else: num_warps = {16: 1, 32: 2}.get(Ms, 4) GROUP_SIZE = GROUP_SIZE or 4 assert TILE_M <= Ms, dict(TILE_M=TILE_M, Ms=Ms) assert TILE_N <= Ns, dict(TILE_N=TILE_N, Ns=Ns) assert Ms <= M, dict(M=M, Ms=Ms) assert Ns <= N, dict(N=N, Ns=Ns) assert Ks <= K, dict(K=K, Ks=Ks) return dict( TILE_M=TILE_M, TILE_N=TILE_N, GROUP_SIZE=GROUP_SIZE, num_stages=num_stages, num_warps=num_warps, SPLIT_N=SPLIT_N, **extra, ) def bsr_dense_addmm_meta( M, K, N, Ms, Ks, beta, alpha, SPLIT_N=None, GROUP_SIZE_ROW=None, num_warps=None, num_stages=None, sparsity=None, dtype=None, out_dtype=None, _version=0, **extra, ): # Specifying _version is useful for situations when one wants to # discard existing triton kernel tuning results, say, in testing # bsr_dense_addmm_meta functionality. if dtype is None: dtype = torch.float16 if out_dtype is None: out_dtype = dtype if sparsity is None: sparsity = 0.5 if {SPLIT_N, num_warps, num_stages, GROUP_SIZE_ROW} == {None}: device_name = torch.cuda.get_device_name() key = (M, K, N, Ms, Ks, beta == 0, beta == 1, alpha == 1) if dtype is out_dtype: version_dtype = dtype else: version_dtype = dtype, out_dtype meta = get_meta( "bsr_dense_addmm", key, device_name, version=(_version, version_dtype, sparsity), ) if meta is None and sparsity != 0.5: meta = get_meta( "bsr_dense_addmm", key, device_name, version=(_version, version_dtype, 0.5), ) if meta is None and dtype is not out_dtype: meta = get_meta( "bsr_dense_addmm", key, device_name, version=(_version, dtype, 0.5) ) if meta is None: # find approximate meta such that N % SPLIT_N == 0. matching_meta = get_meta( "bsr_dense_addmm", (*key[:2], "*", *key[3:]), device_name, version=(_version, version_dtype, 0.5), ) if matching_meta is None and dtype is not out_dtype: matching_meta = get_meta( "bsr_dense_addmm", (*key[:2], "*", *key[3:]), device_name, version=(_version, dtype, 0.5), ) for mkey in sorted(matching_meta or {}): meta_ = matching_meta[mkey] n = mkey[2] split_n = meta_["SPLIT_N"] c = n // split_n if N % c == 0 and n <= N: meta = dict(meta_) meta["SPLIT_N"] = N // c if meta is not None: meta.update(**extra) return meta else: # see [Computing optimal kernel parameters] in # _triton_ops_meta.py for ways to avoid this warning # message warn_once( "bsr_dense_addmm uses non-optimal triton kernel parameters" f" for {M=} {K=} {N=} {Ms=}, {Ks=} {beta=} {alpha=} {dtype=} {out_dtype=}" ) SPLIT_N = SPLIT_N or max(N // Ms, 1) GROUP_SIZE_ROW = GROUP_SIZE_ROW or 4 num_stages = num_stages or 1 num_warps = num_warps or 4 return dict( SPLIT_N=SPLIT_N, GROUP_SIZE_ROW=GROUP_SIZE_ROW, num_stages=num_stages, num_warps=num_warps, **extra, ) class TensorAsKey: """A light-weight wrapper of a tensor that enables storing tensors as keys with efficient memory reference based comparision as an approximation to data equality based keys. Motivation: the hash value of a torch tensor is tensor instance based that does not use data equality and makes the usage of tensors as keys less useful. For instance, the result of ``len({a.crow_indices(), a.crow_indices()})`` is `2`, although, the tensor results from `crow_indices` method call are equal, in fact, these share the same data storage. On the other hand, for efficient caching of tensors we want to avoid calling torch.equal that compares tensors item-wise. TensorAsKey offers a compromise in that it guarantees key equality of tensors that references data in the same storage in the same manner and without accessing underlying data. However, this approach does not always guarantee correctness. For instance, for a complex tensor ``x``, we have ``TensorAsKey(x) == TensorAsKey(x.conj())`` while ``torch.equal(x, x.conj())`` would return False. """ def __init__(self, obj): def get_tensor_key(obj): # Warning: TensorAsKey does not track negative nor # conjugate bits of its input object because in the use # case of wrapping compressed/plain indices of compressed # sparse tensors (that are always integer tensors with # non-negative items) these bits are never set. However, # when extending the use of TensorAsKey to float or # complex tensors, the values of these bits (see is_neg # and is_conj methods) must be included in the key as # well. assert not (obj.dtype.is_floating_point or obj.dtype.is_complex), obj.dtype return ( obj.data_ptr(), obj.storage_offset(), obj.shape, obj.stride(), obj.dtype, ) self._obj_ref = weakref.ref(obj) if obj.layout is torch.strided: self.key = get_tensor_key(obj) elif obj.layout in {torch.sparse_csr, torch.sparse_bsr}: self.key = ( get_tensor_key(obj.crow_indices()), get_tensor_key(obj.col_indices()), ) elif obj.layout in {torch.sparse_csc, torch.sparse_bsc}: self.key = ( get_tensor_key(obj.ccol_indices()), get_tensor_key(obj.row_indices()), ) else: raise NotImplementedError(obj.layout) self._hash = hash(self.key) def __hash__(self): return self._hash def __eq__(self, other): if not isinstance(other, TensorAsKey): return False if self.obj is None or other.obj is None: # dead objects always compare unequal unless these are # same objects return self is other return self.key == other.key @property def obj(self): """Return object if alive, otherwise None.""" return self._obj_ref() @lru_cache(maxsize=TORCH_SPARSE_BSR_SCATTER_MM_LRU_CACHE_SIZE) def _bsr_scatter_mm_indices_data( indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, compressed_sparse_tensor_as_key ): bsr = compressed_sparse_tensor_as_key.obj assert bsr is not None crow_indices, col_indices = bsr.crow_indices(), bsr.col_indices() device = crow_indices.device indices_dtype = torch.int32 if indices_format == "bsr_strided_mm_compressed": Ns = N // SPLIT_N q_offsets_lst = [] b = torch.arange(SPLIT_N, dtype=indices_dtype, device=device) * Ns for m in range(M // Ms): r0 = crow_indices[m].item() r1 = crow_indices[m + 1].item() if r1 == r0: continue q_offsets_lst.append( (col_indices[r0:r1] * (Ks * N)).repeat(SPLIT_N) + b.repeat_interleave(r1 - r0) ) q_offsets = torch.cat(q_offsets_lst) crow_indices_diff = crow_indices.diff() non_zero_row_indices = crow_indices_diff.nonzero() a = non_zero_row_indices * (Ms * N) r_offsets = (a + b).view(-1) c_indices = crow_indices # swizzle operation: mm elements with longer sums are computed first: nnz_per_row = crow_indices_diff[non_zero_row_indices].repeat_interleave(SPLIT_N) nnz_per_row, indices = nnz_per_row.sort(descending=True, stable=True) r_offsets = r_offsets[indices] return (indices_format, c_indices, r_offsets, q_offsets) elif indices_format == "bsr_strided_mm": Ns = N // SPLIT_N p_offsets_lst = [] q_offsets_lst = [] b = torch.arange(SPLIT_N, dtype=indices_dtype, device=device) * Ns for m in range(M // Ms): r0 = crow_indices[m].item() r1 = crow_indices[m + 1].item() if r1 == r0: continue p_offsets_lst.append( torch.arange(r0, r1, dtype=indices_dtype, device=device).repeat(SPLIT_N) ) q_offsets_lst.append( (col_indices[r0:r1] * (Ks * N)).repeat(SPLIT_N) + b.repeat_interleave(r1 - r0) ) q_offsets = torch.cat(q_offsets_lst) crow_indices_diff = crow_indices.diff() non_zero_row_indices = crow_indices_diff.nonzero() a = non_zero_row_indices * (Ms * N) r_offsets = (a + b).view(-1) c_indices = torch.cat( ( crow_indices[:1], torch.cumsum( crow_indices_diff[non_zero_row_indices].repeat_interleave(SPLIT_N), 0, ), ) ) p_offsets = torch.cat(p_offsets_lst) return (indices_format, c_indices, r_offsets, p_offsets, q_offsets) elif indices_format == "scatter_mm": Ns = Ms c_indices = [0] pq_offsets = [] # todo: eliminate inner for-loops for efficiency for b in range(nbatches): for m in range(M // Ms): r0 = crow_indices[m].item() r1 = crow_indices[m + 1].item() for n in range(N // Ns): c_indices.append(c_indices[-1] + r1 - r0) for t in range(r1 - r0): p = r0 + t q = (col_indices[p].item() + b * (K // Ks)) * (N // Ns) + n pq_offsets.append([p, q]) return ( indices_format, torch.tensor(c_indices, dtype=indices_dtype, device=device), torch.tensor(pq_offsets, dtype=indices_dtype, device=device), ) else: raise ValueError( f"Invalid {indices_format=}. Expected bsr_strided_mm_compressed|bsr_strided_mm|scatter_mm" ) def bsr_scatter_mm_indices_data( bsr, other, indices_format="bsr_strided_mm_compressed", **meta_input ): """Computes indices data for :func:`scatter_mm` used in BSR and strided tensor matrix multiplication. """ assert bsr.dense_dim() == 0 assert bsr.ndim == 2 # no batch dims blocksize = bsr.values().shape[-2:] M, K = bsr.shape Ms, Ks = blocksize K_, N = other.shape[-2:] assert K_ == K nbatches = other.shape[:-2].numel() meta = scatter_mm_meta(M, K, N, Ms, Ks, **meta_input) if "allow_tf32" not in meta_input: meta.update(allow_tf32=bsr.dtype in {torch.float16, torch.bfloat16}) SPLIT_N = meta["SPLIT_N"] indices_data = _bsr_scatter_mm_indices_data( indices_format, M, K, N, Ms, Ks, nbatches, SPLIT_N, TensorAsKey(bsr) ) if indices_format == "bsr_strided_mm_compressed": meta.update(is_compressed=True) return indices_data + (meta,) elif indices_format == "bsr_strided_mm": meta.update(is_compressed=False) return indices_data + (meta,) else: return indices_data def bsr_scatter_mm(bsr, other, indices_data=None, out=None): """BSR @ strided -> strided""" assert bsr.ndim == 2 assert other.ndim >= 2 Ms, Ks, Ns = bsr.shape[-2], bsr.shape[-1], other.shape[-1] blocksize = bsr.values().shape[-2:] if indices_data is None: indices_data = bsr_scatter_mm_indices_data( bsr, other, indices_format="bsr_strided_mm_compressed" ) indices_format = indices_data[0] if out is None: out = torch.empty( (*other.shape[:-2], Ms, Ns), dtype=bsr.dtype, device=bsr.device ) out_shape = out.shape out = as1Dbatch(out) if bsr._nnz() == 0: out.zero_() elif indices_format in {"bsr_strided_mm_compressed", "bsr_strided_mm"}: out.zero_() scatter_mm(bsr.values(), other, indices_data, accumulators=out) elif indices_format == "scatter_mm": nbatches = other.shape[:-2].numel() accumulators = torch.zeros( ( nbatches * Ms // blocksize[0] * Ns // blocksize[0], blocksize[0], blocksize[0], ), dtype=bsr.dtype, device=bsr.device, ) others = ( as1Dbatch(other) .transpose(-2, -1) .view( nbatches, Ns // blocksize[0], blocksize[0], Ks // blocksize[1], blocksize[1], ) .movedim( (3, 1, 4, 2), (1, 2, 3, 4) ) # equivalent to .transpose(-3, -2).transpose(-2, -1).transpose(-4, -3) .flatten(0, 2) ) scatter_mm(bsr.values(), others, indices_data, accumulators=accumulators) out.copy_( accumulators.unflatten( 0, (nbatches, Ms // blocksize[0], Ns // blocksize[0]) ) .movedim( (1, 2, 3, 4), (3, 1, 4, 2) ) # equivalent to .transpose(-4, -3).transpose(-2, -1).transpose(-3, -2) .reshape(nbatches, Ns, Ms) .transpose(-2, -1) ) else: raise NotImplementedError(indices_format) return out.view(out_shape) def _int_bsr_dense_addmm( input: torch.Tensor, bsr: torch.Tensor, dense: torch.Tensor, *, beta=1, alpha=1, left_alpha: Optional[torch.Tensor] = None, right_alpha: Optional[torch.Tensor] = None, out: Optional[torch.Tensor] = None, skip_checks: bool = False, max_grid: Optional[tuple[Optional[int], Optional[int], Optional[int]]] = None, meta: Optional[dict] = None, ): if out is None and dense.dtype is torch.int8: f_name = "_int_bsr_dense_addmm" crow_indices = bsr.crow_indices() batch_ndim = crow_indices.dim() - 1 M = bsr.shape[batch_ndim] N = dense.shape[-1] original_batch_dims_broadcasted = broadcast_batch_dims(f_name, bsr, dense) out = torch.empty( original_batch_dims_broadcasted + (M, N), dtype=torch.int32, device=dense.device, ) return bsr_dense_addmm( input, bsr, dense, beta=beta, alpha=alpha, left_alpha=left_alpha, right_alpha=right_alpha, out=out, skip_checks=skip_checks, max_grid=max_grid, meta=meta, ) def bsr_dense_addmm( input: torch.Tensor, bsr: torch.Tensor, dense: torch.Tensor, *, beta=1, alpha=1, left_alpha: Optional[torch.Tensor] = None, right_alpha: Optional[torch.Tensor] = None, out: Optional[torch.Tensor] = None, skip_checks: bool = False, max_grid: Optional[tuple[Optional[int], Optional[int], Optional[int]]] = None, meta: Optional[dict] = None, ): """Compute out = beta * input + left_alpha.reshape(-1, 1) * (alpha * (bsr @ dense)) * right_alpha.reshape(1, -1) where left_alpha, right_alpha are (* + 1)-D tensors when specified, otherwise, these are treated as tensors filled with ones. """ f_name = "bsr_dense_addmm" values = bsr.values() crow_indices = bsr.crow_indices() col_indices = bsr.col_indices() batch_ndim = crow_indices.dim() - 1 M, K = bsr.shape[batch_ndim : batch_ndim + 2] blocksize = values.shape[batch_ndim + 1 : batch_ndim + 3] N = dense.shape[-1] # todo: implement checks original_batch_dims_broadcasted = broadcast_batch_dims(f_name, bsr, dense) if out is None: out = dense.new_empty(original_batch_dims_broadcasted + (M, N)) if bsr._nnz() == 0 or alpha == 0 or N == 0 or M == 0 or K == 0: if beta == 0: out.zero_() else: out.copy_(input) if beta != 1: out.mul_(beta) return out left_alpha_is_one = False right_alpha_is_one = False if left_alpha is None: left_alpha_is_one = True left_alpha = dense.new_empty(()).expand( *original_batch_dims_broadcasted, M, N ) # not referenced else: left_alpha = left_alpha.view(*original_batch_dims_broadcasted, M, 1).expand( *original_batch_dims_broadcasted, M, N ) if right_alpha is None: right_alpha_is_one = True right_alpha = dense.new_empty(()).expand( *original_batch_dims_broadcasted, M, N ) # not referenced else: right_alpha = right_alpha.view(*original_batch_dims_broadcasted, 1, N).expand( *original_batch_dims_broadcasted, M, N ) assert left_alpha.stride()[-1] == 0 assert right_alpha.stride()[-2] == 0 if meta is None: sparsity = round(1 - bsr._nnz() * blocksize[0] * blocksize[1] / (M * K), 2) meta = bsr_dense_addmm_meta( M, K, N, blocksize[0], blocksize[1], beta, alpha, sparsity=sparsity, dtype=dense.dtype, out_dtype=out.dtype, ) out_backup = out ( crow_indices, col_indices, values, input, dense, left_alpha, right_alpha, out, ) = prepare_inputs(bsr, input, dense, left_alpha, right_alpha, out) BM, BK = blocksize SPLIT_N = meta.get("SPLIT_N", N // BM) BN = N // SPLIT_N out_untiled = out out = tile_to_blocksize(out, (BM, BN)) dense = tile_to_blocksize(dense, (BK, BN)) input = tile_to_blocksize(input, (BM, BN)) left_alpha = tile_to_blocksize(left_alpha, (BM, BN)) right_alpha = tile_to_blocksize(right_alpha, (BM, BN)) # tl.dot supports float16, float32, int32 as accumulator types. dot_out_dtype = { torch.float16: tl.float32, torch.bfloat16: tl.float32, torch.float32: tl.float64, torch.float64: tl.float64, torch.int8: tl.int32, torch.int32: tl.int32, }[out.dtype] n_batches = dense.size(0) n_block_rows = crow_indices.size(-1) - 1 n_block_cols = dense.size(-3) full_grid = (n_batches, n_block_cols, n_block_rows) if max_grid is not None: grid_blocks = tuple(max_grid[:3][::-1]) + (None,) * (3 - len(max_grid[:3])) else: grid_blocks = None tensor_dims_map = { values: (0, None, None), crow_indices: (0, None, -1), col_indices: (0, None, None), input: (0, -3, -4), dense: (0, -3, None), left_alpha: (0, -3, -4), right_alpha: (0, -3, -4), out: (0, -3, -4), } assert alpha != 0 def kernel(grid, *sliced_tensors): _bsr_strided_addmm_kernel[grid]( *ptr_stride_extractor(*sliced_tensors), beta, alpha, beta_is_one=beta == 1, beta_is_nonzero=beta != 0, alpha_is_one=alpha == 1, left_alpha_is_one=left_alpha_is_one, right_alpha_is_one=right_alpha_is_one, BLOCKSIZE_ROW=BM, BLOCKSIZE_INNER=BK, BLOCKSIZE_COL=BN, allow_tf32=dot_out_dtype == tl.float32, acc_dtype=dot_out_dtype, **meta, ) launch_kernel(kernel, tensor_dims_map, full_grid, grid_blocks) if out.data_ptr() != out_backup.data_ptr(): # prepare_inputs has made a copy of out, copy its content back # to out_backup: out_backup.copy_(out_untiled.view(out_backup.shape)) return out_backup if has_triton(): import triton import triton.language as tl @triton.jit def _sampled_addmm_kernel( alpha, beta, IS_BETA_ZERO: tl.constexpr, BLOCKSIZE_ROW: tl.constexpr, BLOCKSIZE_COL: tl.constexpr, k, TILE_K: tl.constexpr, values_ptr, values_batch_stride, values_nnz_stride, values_row_block_stride, values_col_block_stride, crow_indices_ptr, crow_indices_batch_stride, crow_indices_stride, col_indices_ptr, col_indices_batch_stride, col_indices_stride, mat1_ptr, mat1_batch_stride, mat1_tiled_row_stride, mat1_tiled_col_stride, mat1_row_block_stride, mat1_col_block_stride, mat2_ptr, mat2_batch_stride, mat2_tiled_row_stride, mat2_tiled_col_stride, mat2_row_block_stride, mat2_col_block_stride, acc_dtype: tl.constexpr, allow_tf32: tl.constexpr, ): batch_pid = tl.program_id(axis=1) row_block_pid = tl.program_id(axis=0) crow_indices_offset_ptr = ( crow_indices_ptr + crow_indices_batch_stride * batch_pid + crow_indices_stride * row_block_pid ) nnz_offset = tl.load(crow_indices_offset_ptr) nnz_offset_next = tl.load(crow_indices_offset_ptr + crow_indices_stride) # Compute nnz for the row with number row_block_pid. # If it is zero, skip the row. row_nnz = nnz_offset_next - nnz_offset if row_nnz == 0: return row_block_arange = tl.arange(0, BLOCKSIZE_ROW) col_block_arange = tl.arange(0, BLOCKSIZE_COL) # Pointers are set to the first block of the current row. values_block_ptrs = ( values_ptr + values_batch_stride * batch_pid + values_nnz_stride * nnz_offset + values_row_block_stride * row_block_arange[:, None] + values_col_block_stride * col_block_arange[None, :] ) col_index_nnz_ptr = ( col_indices_ptr + col_indices_batch_stride * batch_pid + col_indices_stride * nnz_offset ) # Advance mat1 to the current tiled row, ignore columns. mat1_block_ptrs = ( mat1_ptr + mat1_batch_stride * batch_pid + mat1_tiled_row_stride * row_block_pid + mat1_row_block_stride * row_block_arange[:, None] ) # Advance mat2 in batch and block col dimension. mat2_block_ptrs = ( mat2_ptr + mat2_batch_stride * batch_pid + mat2_col_block_stride * col_block_arange[None, :] ) k_tile_arange = tl.arange(0, TILE_K) for _ in range(row_nnz): acc_block = tl.zeros((BLOCKSIZE_ROW, BLOCKSIZE_COL), dtype=acc_dtype) # find column block index col_block = tl.load(col_index_nnz_ptr) for k_tile in range(0, k, TILE_K): k_offsets = k_tile + k_tile_arange mask_k = k_offsets < k mat1_block = tl.load( mat1_block_ptrs + mat1_col_block_stride * k_offsets[None, :], mask=mask_k[None, :], other=0.0, ) mat2_block = tl.load( mat2_block_ptrs + mat2_tiled_col_stride * col_block + mat2_row_block_stride * k_offsets[:, None], mask=mask_k[:, None], other=0.0, ) acc_block += tl.dot( mat1_block, mat2_block, allow_tf32=allow_tf32, out_dtype=acc_dtype ) if IS_BETA_ZERO: acc_block *= alpha else: acc_block = alpha * acc_block + beta * tl.load(values_block_ptrs) # write result tl.store(values_block_ptrs, acc_block.to(values_ptr.dtype.element_ty)) # advance val/col_index ptrs to the next block in the row. values_block_ptrs += values_nnz_stride col_index_nnz_ptr += col_indices_stride @triton.jit def _bsr_strided_dense_rowspace_kernel( # values prologue values_ptr, values_batch_stride, values_nnz_stride, values_row_block_stride, values_col_block_stride, # values epilogue # crow_indices prologue crow_indices_ptr, crow_indices_batch_stride, crow_indices_stride, # crow_indices epilogue # col_indices prologue col_indices_ptr, col_indices_batch_stride, col_indices_stride, # col_indices epilogue # dense prologue dense_ptr, dense_batch_stride, dense_tiled_row_stride, dense_tiled_col_stride, dense_row_block_stride, dense_col_block_stride, # dense epilogue # output prologue output_ptr, output_batch_stride, output_tiled_row_stride, output_tiled_col_stride, output_row_block_stride, output_col_block_stride, # output epilogue # # gh-113754: Always keep all constexpr arguments at the end of # triton kernel arguments list because with triton 2.1 or # earlier non-contiguous outputs will corrupt CUDA state due # to a triton bug (fixed in openai/triton#2262). BLOCKSIZE_ROW: tl.constexpr, BLOCKSIZE_COL: tl.constexpr, acc_dtype: tl.constexpr, allow_tf32: tl.constexpr, GROUP_SIZE_ROW: tl.constexpr, ): batch_pid = tl.program_id(axis=2) row_block_pid = tl.program_id(axis=0) col_block_pid = tl.program_id(axis=1) n_block_rows = tl.num_programs(axis=0) n_block_cols = tl.num_programs(axis=1) row_block_pid, col_block_pid = tl.swizzle2d( row_block_pid, col_block_pid, n_block_rows, n_block_cols, GROUP_SIZE_ROW ) crow_indices_offset_ptr = ( crow_indices_ptr + crow_indices_batch_stride * batch_pid + crow_indices_stride * row_block_pid ) nnz_offset = tl.load(crow_indices_offset_ptr) nnz_offset_next = tl.load(crow_indices_offset_ptr + crow_indices_stride) # Compute nnz for the row with number row_block_pid. # If it is zero, skip the row. row_nnz = nnz_offset_next - nnz_offset if row_nnz == 0: return row_block_arange = tl.arange(0, BLOCKSIZE_ROW) col_block_arange = tl.arange(0, BLOCKSIZE_COL) # Pointers are set to the first block of the current row. values_block_ptrs = ( values_ptr + values_batch_stride * batch_pid + values_nnz_stride * nnz_offset + values_row_block_stride * row_block_arange[:, None] + values_col_block_stride * col_block_arange[None, :] ) # NOTE: dense is advanced into all dimensions but the tiled row one. # That will be advanced in the loop according to values in col_indices. dense_block_ptrs = ( dense_ptr + dense_batch_stride * batch_pid + dense_tiled_col_stride * col_block_pid + dense_row_block_stride * col_block_arange[:, None] + dense_col_block_stride * row_block_arange[None, :] ) # Pointers are set to exact write-to locations output_ptrs = ( output_ptr + output_batch_stride * batch_pid + output_tiled_row_stride * row_block_pid + output_tiled_col_stride * col_block_pid + output_row_block_stride * row_block_arange[:, None] + output_col_block_stride * row_block_arange[None, :] ) # Set pointer to the first nonzero element in the current row col_index_nnz_ptr = ( col_indices_ptr + col_indices_batch_stride * batch_pid + col_indices_stride * nnz_offset ) output_acc_block = tl.zeros((BLOCKSIZE_ROW, BLOCKSIZE_COL), dtype=acc_dtype) for _ in range(row_nnz): values_block = tl.load(values_block_ptrs) # find which row of dense needs to get loaded # for multiplication with values_block. dense_row_idx = tl.load(col_index_nnz_ptr) dense_block = tl.load( dense_block_ptrs + dense_tiled_row_stride * dense_row_idx ) # do block mm output_acc_block += tl.dot( values_block, dense_block, allow_tf32=allow_tf32, out_dtype=acc_dtype ) # move val/col_index ptrs to the next block in the row values_block_ptrs += values_nnz_stride col_index_nnz_ptr += col_indices_stride # write back the result tl.store(output_ptrs, output_acc_block.to(output_ptr.dtype.element_ty)) def _run_sampled_addmm_kernel( alpha, beta, is_beta_zero, blocksize, k, tile_k, values, crow_indices, col_indices, mat1, mat2, max_grid, ): n_batches = values.size(0) n_block_rows = crow_indices.size(-1) - 1 full_grid = (n_batches, n_block_rows) if max_grid is not None: grid_blocks = tuple(max_grid[:2][::-1]) + (None,) * (2 - len(max_grid[:2])) else: grid_blocks = None tensor_dims_map = { values: (0, None), crow_indices: (0, -1), col_indices: (0, None), mat1: (0, -4), mat2: (0, None), } if values.dtype in (torch.half, torch.bfloat16): acc_dtype = tl.float32 allow_tf32 = True else: acc_dtype = tl.float64 allow_tf32 = False def kernel(grid, *sliced_tensors): _sampled_addmm_kernel[grid]( alpha, beta, is_beta_zero, *blocksize, k, tile_k, *ptr_stride_extractor(*sliced_tensors), acc_dtype=acc_dtype, allow_tf32=allow_tf32, num_stages=1, num_warps=4, ) launch_kernel(kernel, tensor_dims_map, full_grid, grid_blocks) def sampled_addmm( input: torch.Tensor, mat1: torch.Tensor, mat2: torch.Tensor, *, beta=1.0, alpha=1.0, out: Optional[torch.Tensor] = None, skip_checks: bool = False, max_grid: Optional[tuple[Optional[int], Optional[int], Optional[int]]] = None, ): f_name = "sampled_addmm" check_bsr_layout(f_name, input) input_broadcasted = broadcast_batch_dims_bsr(f_name, input, mat1, mat2) if not skip_checks: check_device(f_name, mat1, input.device) check_device(f_name, mat2, input.device) if beta != 0.0 and input.dtype is torch.bool: check( False, f"{f_name}(): having beta == {beta} not equal to 0.0 with boolean mask is not allowed.", ) if input.dtype is not torch.bool: check_dtype(f_name, mat1, input.dtype) check_dtype(f_name, mat2, input.dtype) else: check_dtype(f_name, mat1, mat2.dtype) check_mm_compatible_shapes(f_name, mat1, mat2) if out is not None: check_bsr_layout(f_name, out) check_device(f_name, out, mat1.device) check_dtype(f_name, out, input.dtype) check( out.shape == input_broadcasted.shape and out._nnz() == input._nnz(), f"{f_name}(): Expects `out` to be of shape {input_broadcasted.shape} " f"and with nnz equal to {input_broadcasted._nnz()} " f"but got out.shape = {out.shape} and out.nnz = {out._nnz()}", ) if out is None: out = input_broadcasted.to(mat1.dtype, copy=True) else: out.copy_(input_broadcasted) if out.numel() == 0 or out._nnz() == 0: return out blocksize = out.values().shape[-2:] k = mat1.size(-1) # NOTE: (m, 0) @ (0, n) == zeros(m, n) if alpha == 0.0 or k == 0: out.values().mul_(beta) return out # prepare inputs by reshaping them to be kernel-compatible out_backup = out crow_indices, col_indices, values, mat1, mat2 = prepare_inputs(out, mat1, mat2) mat1 = tile_to_blocksize(mat1, (blocksize[0], k)) mat2 = tile_to_blocksize(mat2, (k, blocksize[1])) tile_k = max(*blocksize) _run_sampled_addmm_kernel( alpha, beta, beta == 0.0, blocksize, k, tile_k, values, crow_indices, col_indices, mat1, mat2, max_grid, ) # If nnz x block strides are not the same in out_backup.values and values, # it means that out_backup.values and values are not the views of each other, # so we have to copy. if out_backup.values().stride()[-3:] != values.stride()[-3:]: out_backup.values().copy_(values.reshape(out_backup.values().shape)) return out_backup def bsr_dense_mm( bsr: torch.Tensor, dense: torch.Tensor, *, out: Optional[torch.Tensor] = None, skip_checks: bool = False, max_grid: Optional[tuple[Optional[int], Optional[int], Optional[int]]] = None, meta: Optional[dict] = None, ): f_name = "bsr_dense_mm" m, _kl = bsr.shape[-2:] if not skip_checks: check_bsr_layout(f_name, bsr) check_device(f_name, bsr, dense.device) check_dtype(f_name, bsr, dense.dtype, (torch.int8,)) check_mm_compatible_shapes(f_name, bsr, dense) n = dense.size(-1) row_block, col_block = bsr.values().shape[-2:] check_blocksize(f_name, (row_block, col_block)) check( not n % 16, f"{f_name}(): dense.size(-1) == {n} should be divisible by 16", ) else: _kr, n = dense.shape[-2:] original_batch_dims_broadcasted = broadcast_batch_dims(f_name, bsr, dense) if out is not None and not skip_checks: expected_out_shape = original_batch_dims_broadcasted + (m, n) check( out.shape == expected_out_shape, "bsr_dense_mm(): `out` argument has wrong shape, " f"expected {expected_out_shape}, but got {out.shape}.", ) check( out.is_contiguous() or out.transpose(-2, -1).is_contiguous(), "bsr_dense_mm(): only row-major/col-major `out` arguments are supported, " "i.e. (out.is_contiguous() or out.transpose(-2, -1).is_contiguous()) " "should be True.", ) # Allocate out if out is None: out = dense.new_empty(original_batch_dims_broadcasted + (m, n)) # Short circuit if lhs is zero if bsr._nnz() == 0: return out.zero_() # with beta==0, addmm ignores input content, so we can use out # as a placeholder for input because their shapes match: return bsr_dense_addmm(out, bsr, dense, alpha=1, beta=0, out=out) @triton.jit def _bsr_softmax_kernel( crow_indices_ptr, crow_indices_batch_stride, crow_indices_stride, values_ptr, values_batch_stride, values_row_block_stride, values_nnz_col_block_stride, row_block, col_block, MAX_ROW_NNZ: tl.constexpr, TILE: tl.constexpr, ): batch_pid = tl.program_id(axis=2) row_block_offset_pid = tl.program_id(axis=1) row_block_pid = tl.program_id(axis=0) crow_indices_offset_ptr = ( crow_indices_ptr + crow_indices_batch_stride * batch_pid + crow_indices_stride * row_block_pid ) nnz_offset = tl.load(crow_indices_offset_ptr) nnz_offset_next = tl.load(crow_indices_offset_ptr + crow_indices_stride) # Compute nnz for the row with number row_block_pid. # If it is zero, skip the row. row_nnz = nnz_offset_next - nnz_offset if row_nnz == 0: return row_arange = tl.arange(0, TILE) mask = row_arange < row_nnz * col_block curr_row_values_ptrs = ( values_ptr + values_batch_stride * batch_pid + values_row_block_stride * row_block_offset_pid + nnz_offset * col_block ) # find max in the row row_tile = tl.load( curr_row_values_ptrs + row_arange, mask=mask, other=-float("inf") ).to(tl.float32) max_row_value = tl.max(row_tile, axis=0) for _ in range(TILE, MAX_ROW_NNZ, TILE): row_arange += TILE mask = row_arange < row_nnz * col_block row_tile = tl.load( curr_row_values_ptrs + row_arange, mask=mask, other=-float("inf") ).to(tl.float32) curr_max_row_value = tl.max(row_tile, axis=0) max_row_value = tl.where( max_row_value > curr_max_row_value, max_row_value, curr_max_row_value ) # find denominator for stable softmax num = tl.exp(row_tile - max_row_value) denom = tl.sum(num, axis=0) for _ in range(TILE, MAX_ROW_NNZ, TILE): row_arange -= TILE mask = row_arange < row_nnz * col_block row_tile = tl.load( curr_row_values_ptrs + row_arange, mask=mask, other=-float("inf") ).to(tl.float32) num = tl.exp(row_tile - max_row_value) denom += tl.sum(num, axis=0) # populate output tl.store( curr_row_values_ptrs + row_arange, (num / denom).to(values_ptr.dtype.element_ty), mask=mask, ) for _ in range(TILE, MAX_ROW_NNZ, TILE): row_arange += TILE mask = row_arange < row_nnz * col_block row_tile = tl.load( curr_row_values_ptrs + row_arange, mask=mask, other=-float("inf") ).to(tl.float32) num = tl.exp(row_tile - max_row_value) tl.store( curr_row_values_ptrs + row_arange, (num / denom).to(values_ptr.dtype.element_ty), mask=mask, ) def bsr_softmax(input, max_row_nnz=None): f_name = "bsr_softmax" check_bsr_layout(f_name, input) check_dtype(f_name, input, input.dtype) if input._nnz() == 0 or input.numel() == 0: return input.clone() m, n = input.shape[-2:] nnz = input._nnz() row_block, col_block = input.values().shape[-2:] if max_row_nnz is None: max_row_nnz = triton.next_power_of_2(n) else: max_row_nnz = triton.next_power_of_2(max_row_nnz) crow_indices = input.crow_indices().unsqueeze(0).flatten(0, -2) # reshape values from # (b1, ..., bn, nnz, row_block, col_block) to # (b1 * ... * bn, row_block, nnz * col_block). # This simplifies batch dim manipulation and unlocks # the possibility to access all nnzs in any given row. if input.values().transpose(-3, -2).is_contiguous(): # Need to clone to avoid `contiguous` returning a view. values = input.values().clone() else: values = input.values() values = ( values.transpose(-3, -2) .contiguous() .unsqueeze(0) .flatten(0, -4) .reshape(-1, row_block, nnz * col_block) ) full_grid = (values.shape[0], row_block, m // row_block) grid_blocks = None tensor_dims_map = { # We span nnz number of blocks, not nnz + 1, # hence crow_indices[..., :-1] crow_indices[..., :-1]: (0, None, -1), values: (0, None, None), } def kernel(grid, *sliced_tensors): _bsr_softmax_kernel[grid]( *ptr_stride_extractor(*sliced_tensors), row_block, col_block, max_row_nnz, # Triton's max numel is bounded by 2 ** 17. min(2**17, max_row_nnz), ) launch_kernel(kernel, tensor_dims_map, full_grid, grid_blocks) values = ( values.reshape(-1, row_block, nnz, col_block) .transpose(-3, -2) .reshape(*input.values().shape) ) return torch.sparse_compressed_tensor( input.crow_indices().clone(), input.col_indices().clone(), values, size=input.shape, layout=input.layout, ) def _scaled_dot_product_attention( query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_mask: Optional[torch.Tensor], dropout_p: float = 0.0, is_causal: bool = False, scale: Optional[float] = None, ): f_name = "_scaled_dot_product_attention" check(not is_causal, f"{f_name}(): is_causal == True is not supported.") check(attn_mask is not None, f"{f_name}(): attn_mask == None is not supported.") assert attn_mask is not None check( attn_mask.layout == torch.sparse_bsr, f"{f_name}(): " f"attn_mask.layout must be {torch.sparse_bsr}, but got " f"attn_mask.layout == {attn_mask.layout}.", ) check_device(f_name, key, query.device) check_device(f_name, value, query.device) check_device(f_name, attn_mask, query.device) check_dtype(f_name, key, query.dtype) check_dtype(f_name, value, query.dtype) if attn_mask.dtype is not torch.bool: check_dtype(f_name, attn_mask, query.dtype) sdpa = sampled_addmm( attn_mask, query, key.transpose(-2, -1), beta=0.0, skip_checks=False ) if scale is None and query.size(-1) == 0 or scale == 0.0: check( False, f"{f_name}(): current value of scale == {scale} " "results in division by zero.", ) scale_factor = 1 / math.sqrt(query.size(-1)) if scale is None else scale sdpa.values().mul_(scale_factor) sdpa = bsr_softmax(sdpa) torch.nn.functional.dropout(sdpa.values(), p=dropout_p, inplace=True) sdpa = bsr_dense_mm(sdpa, value) return sdpa @triton.jit def _scatter_mm2_kernel( M: tl.constexpr, K: tl.constexpr, N: tl.constexpr, blocks_ptr, blocks_stride_P, blocks_stride_M, blocks_stride_K, others_ptr, others_stride_Q, others_stride_K, others_stride_N, accumulators_ptr, accumulators_stride_R, accumulators_stride_M, accumulators_stride_N, pq_offsets_ptr, pq_offsets_stride, pq_ptr, pq_stride_T, pq_stride_1, dot_out_dtype: tl.constexpr, TILE_M: tl.constexpr, TILE_N: tl.constexpr, allow_tf32: tl.constexpr, ): Ms = M // TILE_M pid_t = tl.program_id(axis=0) pid = tl.program_id(axis=1) pid_m = pid // Ms pid_n = pid % Ms rm = pid_m * TILE_M + tl.arange(0, TILE_M) rn = pid_n * TILE_N + tl.arange(0, TILE_N) rk = tl.arange(0, K) A_ptr = blocks_ptr + ( rm[:, None] * blocks_stride_M + rk[None, :] * blocks_stride_K ) B_ptr = others_ptr + ( rk[:, None] * others_stride_K + rn[None, :] * others_stride_N ) g0 = tl.load(pq_offsets_ptr + pid_t * pq_offsets_stride) g1 = tl.load(pq_offsets_ptr + (pid_t + 1) * pq_offsets_stride) if g0 == g1: return acc_block = tl.zeros((TILE_M, TILE_N), dtype=dot_out_dtype) for i in range(g0, g1): p = tl.load(pq_ptr + i * pq_stride_T) q = tl.load(pq_ptr + i * pq_stride_T + pq_stride_1) A = tl.load(A_ptr + p * blocks_stride_P) B = tl.load(B_ptr + q * others_stride_Q) acc_block += tl.dot(A, B, out_dtype=dot_out_dtype, allow_tf32=allow_tf32) C_ptr = ( accumulators_ptr + pid_t * accumulators_stride_R + ( rm[:, None] * accumulators_stride_M + rn[None, :] * accumulators_stride_N ) ) tl.store(C_ptr, acc_block.to(accumulators_ptr.dtype.element_ty)) def _scatter_mm2( blocks: torch.Tensor, others: torch.Tensor, pq_offsets: torch.Tensor, pq_indices: torch.Tensor, accumulators: torch.Tensor, ): _P, M, K = blocks.shape _Q, _, N = others.shape meta = dict( TILE_M=max(16, M // 4), TILE_N=max(16, N // 4), num_stages=1, num_warps=2 ) def grid(META): return ( pq_offsets.shape[0] - 1, triton.cdiv(M, META["TILE_M"]) * triton.cdiv(N, META["TILE_N"]), 1, ) dot_out_dtype = { torch.float16: tl.float32, torch.bfloat16: tl.float32, torch.float32: tl.float64, torch.float64: tl.float64, }[accumulators.dtype] if "allow_tf32" not in meta: meta.update(allow_tf32=dot_out_dtype == tl.float32) _scatter_mm2_kernel[grid]( M, K, N, blocks, blocks.stride(0), blocks.stride(1), blocks.stride(2), others, others.stride(0), others.stride(1), others.stride(2), accumulators, accumulators.stride(0), accumulators.stride(1), accumulators.stride(2), pq_offsets, pq_offsets.stride(0), pq_indices, pq_indices.stride(0), pq_indices.stride(1), dot_out_dtype=dot_out_dtype, **meta, ) @triton.jit def _scatter_mm6_kernel( nbatches, Ms, Ks: tl.constexpr, N, blocks_ptr, blocks_stride_P, blocks_stride_M, blocks_stride_K, others_ptr, others_stride_B, others_stride_K, others_stride_N, accumulators_ptr, accumulators_stride_B, accumulators_stride_M, accumulators_stride_N, c_indices_ptr, r_offsets_ptr, p_offsets_ptr, q_offsets_ptr, is_compressed: tl.constexpr, dot_out_dtype: tl.constexpr, SPLIT_N: tl.constexpr, TILE_M: tl.constexpr, TILE_N: tl.constexpr, GROUP_SIZE: tl.constexpr, allow_tf32: tl.constexpr, ): Ns = N // SPLIT_N BLOCKS_M = Ms // TILE_M BLOCKS_N = Ns // TILE_N pid_t_ = tl.program_id(axis=0) pid = tl.program_id(axis=1) pid_b = pid_t_ % nbatches pid_t = pid_t_ // nbatches num_pid_in_group = GROUP_SIZE * BLOCKS_N group_id = pid // num_pid_in_group first_pid_m = group_id * GROUP_SIZE group_size_m = min(BLOCKS_M - first_pid_m, GROUP_SIZE) pid_m = first_pid_m + (pid % group_size_m) pid_n = (pid % num_pid_in_group) // group_size_m rm = pid_m * TILE_M + tl.arange(0, TILE_M) rn = pid_n * TILE_N + tl.arange(0, TILE_N) rk = tl.arange(0, Ks) A_ptr = blocks_ptr + ( rm[:, None] * blocks_stride_M + rk[None, :] * blocks_stride_K ) B_ptr = ( others_ptr + pid_b * others_stride_B + (rk[:, None] * others_stride_K + rn[None, :] * others_stride_N) ) # When is_compressed is True, r is the only variable that # depends on pid_t. This property allows sorting r values # before calling the kernel. The sorting of r is equivalent to # defining swizzle operator outside of the kernel. r = tl.load(r_offsets_ptr + pid_t) if is_compressed: m = (r // N) // Ms n = (r % N) // Ns r0 = tl.load(c_indices_ptr + m) r1 = tl.load(c_indices_ptr + m + 1) g0 = n * r1 + (SPLIT_N - n) * r0 nnz = r1 - r0 else: g0 = tl.load(c_indices_ptr + pid_t) g1 = tl.load(c_indices_ptr + pid_t + 1) nnz = g1 - g0 q_ptr = q_offsets_ptr + g0 acc_block = tl.zeros((TILE_M, TILE_N), dtype=dot_out_dtype) if is_compressed: A_ptr += r0 * blocks_stride_P # type: ignore[possibly-undefined] for _ in range(nnz): q = tl.load(q_ptr) B = tl.load(B_ptr + q) A = tl.load(A_ptr) acc_block += tl.dot( A, B, out_dtype=dot_out_dtype, allow_tf32=allow_tf32 ) A_ptr += blocks_stride_P q_ptr += 1 else: p_ptr = p_offsets_ptr + g0 for _ in range(nnz): q = tl.load(q_ptr) B = tl.load(B_ptr + q) p = tl.load(p_ptr) A = tl.load(A_ptr + p * blocks_stride_P) p_ptr += 1 q_ptr += 1 acc_block += tl.dot( A, B, out_dtype=dot_out_dtype, allow_tf32=allow_tf32 ) C_ptr = ( accumulators_ptr + r + pid_b * accumulators_stride_B + ( rm[:, None] * accumulators_stride_M + rn[None, :] * accumulators_stride_N ) ) tl.store(C_ptr, acc_block.to(accumulators_ptr.dtype.element_ty)) def _scatter_mm6( blocks: torch.Tensor, others: torch.Tensor, c_indices: torch.Tensor, r_offsets: torch.Tensor, p_offsets: torch.Tensor, q_offsets: torch.Tensor, meta: dict, accumulators: torch.Tensor, force_contiguous: bool = True, ): SPLIT_N = meta["SPLIT_N"] _P, Ms, Ks = blocks.shape B, _K, N = others.shape B_, _M, N_ = accumulators.shape assert N_ == N Ns = N // SPLIT_N assert B_ == B def grid(META): return ( r_offsets.shape[0] * B, triton.cdiv(Ms, META["TILE_M"]) * triton.cdiv(Ns, META["TILE_N"]), ) dot_out_dtype = { torch.float16: tl.float32, torch.bfloat16: tl.float32, torch.float32: tl.float64, torch.float64: tl.float64, }[accumulators.dtype] if "allow_tf32" not in meta: meta.update(allow_tf32=dot_out_dtype == tl.float32) assert c_indices.stride(0) == 1 assert r_offsets.stride(0) == 1 assert p_offsets.stride(0) == 1 assert q_offsets.stride(0) == 1 # Re non-contiguous tensor arguments. Sometimes triton kernel # launches may fail with # # RuntimeError: Triton Error [CUDA]: an illegal memory access was encountered # # that appears to be case when the size of a non-contiguous # tensor argument is larger than a certain threshold. Could # this be related to shared memory or L1 cache size of a GPU # card? In anycase, ensuring that tensor arguments are # contiguous seems to avoid the above exception. So, in the # following we'll always convert tensor arguments to # C-contiguous tensors. if force_contiguous: blocks = blocks.contiguous() others = others.contiguous() if not accumulators.is_contiguous(): accumulators_ = accumulators.contiguous() else: accumulators_ = accumulators else: accumulators_ = accumulators _scatter_mm6_kernel[grid]( B, Ms, Ks, N, blocks, blocks.stride(0), blocks.stride(1), blocks.stride(2), others, others.stride(0), others.stride(1), others.stride(2), accumulators_, accumulators_.stride(0), accumulators_.stride(1), accumulators_.stride(2), c_indices, r_offsets, p_offsets, q_offsets, dot_out_dtype=dot_out_dtype, **meta, ) if force_contiguous and not accumulators.is_contiguous(): accumulators.copy_(accumulators_) @triton.jit def _bsr_strided_addmm_kernel( # values prologue values_ptr, values_batch_stride, values_nnz_stride, values_row_block_stride, values_col_block_stride, # values epilogue # crow_indices prologue crow_indices_ptr, crow_indices_batch_stride, crow_indices_stride, # crow_indices epilogue # col_indices prologue col_indices_ptr, col_indices_batch_stride, col_indices_stride, # col_indices epilogue # input prologue input_ptr, input_batch_stride, input_tiled_row_stride, input_tiled_col_stride, input_row_block_stride, input_col_block_stride, # input epilogue # dense prologue dense_ptr, dense_batch_stride, dense_tiled_row_stride, dense_tiled_col_stride, dense_row_block_stride, dense_col_block_stride, # dense epilogue # left_alpha prologue left_alpha_ptr, left_alpha_batch_stride, left_alpha_tiled_row_stride, left_alpha_tiled_col_stride: tl.constexpr, left_alpha_row_block_stride, left_alpha_col_block_stride: tl.constexpr, # left_alpha epilogue # right_alpha prologue right_alpha_ptr, right_alpha_batch_stride, right_alpha_tiled_row_stride: tl.constexpr, right_alpha_tiled_col_stride, right_alpha_row_block_stride: tl.constexpr, right_alpha_col_block_stride, # right_alpha epilogue # output prologue output_ptr, output_batch_stride, output_tiled_row_stride, output_tiled_col_stride, output_row_block_stride, output_col_block_stride, # output epilogue beta, alpha, beta_is_one: tl.constexpr, beta_is_nonzero: tl.constexpr, alpha_is_one: tl.constexpr, left_alpha_is_one: tl.constexpr, right_alpha_is_one: tl.constexpr, BLOCKSIZE_ROW: tl.constexpr, BLOCKSIZE_COL: tl.constexpr, BLOCKSIZE_INNER: tl.constexpr, acc_dtype: tl.constexpr, allow_tf32: tl.constexpr, GROUP_SIZE_ROW: tl.constexpr, SPLIT_N: tl.constexpr, ): # left/right_alpha tensors are originally (* + 1)-dimensional assert left_alpha_tiled_col_stride == 0 assert left_alpha_col_block_stride == 0 assert right_alpha_tiled_row_stride == 0 assert right_alpha_row_block_stride == 0 batch_pid = tl.program_id(axis=2) row_block_pid = tl.program_id(axis=0) col_block_pid = tl.program_id(axis=1) n_block_rows = tl.num_programs(axis=0) n_block_cols = tl.num_programs(axis=1) row_block_pid, col_block_pid = tl.swizzle2d( row_block_pid, col_block_pid, n_block_rows, n_block_cols, GROUP_SIZE_ROW ) crow_indices_offset_ptr = ( crow_indices_ptr + crow_indices_batch_stride * batch_pid + crow_indices_stride * row_block_pid ) nnz_offset = tl.load(crow_indices_offset_ptr) nnz_offset_next = tl.load(crow_indices_offset_ptr + crow_indices_stride) # Compute nnz for the row with number row_block_pid. row_nnz = nnz_offset_next - nnz_offset row_block_arange = tl.arange(0, BLOCKSIZE_ROW) inner_block_arange = tl.arange(0, BLOCKSIZE_INNER) col_block_arange = tl.arange(0, BLOCKSIZE_COL) # Pointers are set to the first block of the current row. values_block_ptrs = ( values_ptr + values_batch_stride * batch_pid + values_nnz_stride * nnz_offset + values_row_block_stride * row_block_arange[:, None] + values_col_block_stride * inner_block_arange[None, :] ) # NOTE: dense is advanced into all dimensions but the tiled row one. # That will be advanced in the loop according to values in col_indices. dense_block_ptrs = ( dense_ptr + dense_batch_stride * batch_pid + dense_tiled_col_stride * col_block_pid + dense_row_block_stride * inner_block_arange[:, None] + dense_col_block_stride * col_block_arange[None, :] ) # Pointers are set to exact write-to locations output_ptrs = ( output_ptr + output_batch_stride * batch_pid + output_tiled_row_stride * row_block_pid + output_tiled_col_stride * col_block_pid + output_row_block_stride * row_block_arange[:, None] + output_col_block_stride * col_block_arange[None, :] ) # Set pointer to the first nonzero element in the current row col_index_nnz_ptr = ( col_indices_ptr + col_indices_batch_stride * batch_pid + col_indices_stride * nnz_offset ) output_acc_block = tl.zeros((BLOCKSIZE_ROW, BLOCKSIZE_COL), dtype=acc_dtype) for _ in range(row_nnz): values_block = tl.load(values_block_ptrs) # find which row of dense needs to get loaded # for multiplication with values_block. dense_row_idx = tl.load(col_index_nnz_ptr) dense_block = tl.load( dense_block_ptrs + dense_tiled_row_stride * dense_row_idx ) # do block mm output_acc_block += tl.dot( values_block, dense_block, allow_tf32=allow_tf32, out_dtype=acc_dtype ) # move val/col_index ptrs to the next block in the row values_block_ptrs += values_nnz_stride col_index_nnz_ptr += col_indices_stride if not alpha_is_one: output_acc_block *= alpha if not left_alpha_is_one: left_alpha_ptrs = ( left_alpha_ptr + left_alpha_batch_stride * batch_pid + left_alpha_tiled_row_stride * row_block_pid + left_alpha_tiled_col_stride * col_block_pid + left_alpha_row_block_stride * row_block_arange[:, None] + left_alpha_col_block_stride * col_block_arange[None, :] ) output_acc_block *= tl.load(left_alpha_ptrs) if not right_alpha_is_one: right_alpha_ptrs = ( right_alpha_ptr + right_alpha_batch_stride * batch_pid + right_alpha_tiled_row_stride * row_block_pid + right_alpha_tiled_col_stride * col_block_pid + right_alpha_row_block_stride * row_block_arange[:, None] + right_alpha_col_block_stride * col_block_arange[None, :] ) output_acc_block *= tl.load(right_alpha_ptrs) if beta_is_nonzero: input_ptrs = ( input_ptr + input_batch_stride * batch_pid + input_tiled_row_stride * row_block_pid + input_tiled_col_stride * col_block_pid + input_row_block_stride * row_block_arange[:, None] + input_col_block_stride * col_block_arange[None, :] ) if beta_is_one: output_acc_block += tl.load(input_ptrs) else: output_acc_block += beta * tl.load(input_ptrs) # write back the result tl.store(output_ptrs, output_acc_block.to(output_ptr.dtype.element_ty)) else: bsr_softmax = None # type: ignore[assignment] bsr_dense_mm = None # type: ignore[assignment] sampled_addmm = None # type: ignore[assignment] _scaled_dot_product_attention = None # type: ignore[assignment] _scatter_mm2 = None # type: ignore[assignment] _scatter_mm6 = None # type: ignore[assignment] _bsr_strided_addmm_kernel = None # type: ignore[assignment] ```
======================================================================================================================== SOURCE CODE FILE: _triton_ops_meta.py LINES: 5 SIZE: 496.62 KB PATH: scripts\freecad_env\Lib\site-packages\torch\sparse\_triton_ops_meta.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs """Provides optimal triton kernel parameters. Aim --- The usage of optimal triton kernel parameters may increase the performance of operations several times. For example, for large tensor shapes, the usage of a bsr tensor as mat1 argument in addmm-based operations typically outperforms the corresponding operation with strided-only inputs when the blocked representation of a tensor provides a better alignement with memory access than what the strided representation would provide. Pre-computed kernel parameters ------------------------------ This script finds and stores the optimal triton kernel parameters for a specific set of shape configurations. For instance, the set of shape configurations of the bsr_dense_addmm kernel is defined as input, out: M x N strided tensor mat1: M x K bsr tensor with blocksize (BM, BK) and given sparsity mat2: M x N strided tensor dtype = float16, bfloat16, float32 sparsity = 0.5 M = 256, 512, ..., 16384 K = M N = 256, 512, ..., 131072 BM = 16, 32, ..., 128 BK = BM alpha = 1 beta = 0, 1 GPUs: NVIDIA A100-SXM4-80GB Approximations -------------- It is practically infeasible to pre-compute optimal kernel parameter for all possible shape configurations as well as for all existing GPUs. Therefore, we'll assume that the pre-computed optimal parameters are good enough approximations when 1) the used GPU is any of NVIDIA A100 Tensor Core GPUs, 2) the actual sparsity of mat1 is different from sparsity value 0.5. If a particular shape configuration does not fall in the set of pre-computed kernel parameters, or it does not match with the listed approximations above, or the used GPU device is not a NVIDIA A100 GPU, then a reference set of triton kernel parameters will be used when executing operations. The reference kernel parameters are defined in torch/sparse/_triton_ops.py, see bsr_dense_addmm_meta function, for instance. Computing optimal kernel parameters ----------------------------------- If the approximations listed above are unacceptable, e.g. when one seeks a maximal performance possible, the optimal kernel parameters for a particular GPU can be computed by simply running this script in the pytorch developement tree:: cd /path/to/pytorch python setup.py develop python torch/sparse/_triton_ops_meta.py This will compute the optimal kernel parameters for the GPU device available in the host system for all shape configurations listed in "Pre-computed kernel parameters" above. The results will be stored in the database of kernel parameters. Currently, this database is defined as this module (see "BEGIN GENERATED DATA" comment below) that will be modified when the script is run. Create a pytorch PR with the corresponding modifications in this file to make the computed optimal kernel parameters available for other users as pre-computed kernel parameters. Moreover, one can compute the optimal kernel parameters for a specific set of shape configurations and specific sparsity patterns. For that, use tuning functions provided by this module: tune_bsr_dense_addmm(input, mat1, mat2, beta=1, alpha=1, out=None, verbose=False, store=False) -> meta The tuning functions return a dictionary of optimal kernel parameters that can be passed to the corresponding operation, e.g. bsr_dense_addmm(..., meta=meta) Or, when store==True, the optimal kernel parameters will be stored in the database of pre-computed kernel parameters in runtime so that all addmm-based operations such as torch.addmm, torch.mm, torch.nn.functional.linear will benefit from using the computed optimal set of kernel parameters. Note that running tune_bsr_dense_addmm can take several minutes. So, use it wisely, e.g. by implementing persisten storage of optimized kernel parameters. See the source code of get_meta and tune_bsr_dense_addmm to learn how to register a custom set of optimal kernel parameters for addmm-based operations. """ __all__ = ["get_meta", "tune_bsr_dense_addmm", "tune__int_bsr_dense_addmm"] import inspect import itertools import re import warnings from typing import Any import torch from torch.hub import tqdm from torch.testing import make_tensor def get_meta(op, key, device_name=None, version=(0, torch.float16, 0.5), exact=False): """Return triton kernel meta parameters of the specified op and its inputs key. Parameters ---------- op (str): The name of an operation that implementation uses meta parameters. key (tuple): A tuple of op input parameters, e.g. shapes, etc. device_name (optional, str): The name of a device for which op parameters are provided. version (optional, hashable): Specifies the version of parameters. exact (optional, bool): When True, the returned data (if available) corresponds exactly to the specified device_name and version information. Otherwise, if the corresponding data is not available but there exists a data set that is computed for a similar GPU device, then this data set will be returned. Returns ------- result (dict): The requested mapping of parameter names and values, or None when no data is available. If the input `key` contains `"*"`, the result will be a dictionary of keys and mappings that match with the given `key`. """ if device_name is None: device_name = torch.cuda.get_device_name() op_data = _operation_device_version_data.get((op, device_name, version)) if op_data is None and not exact: # A lack of op data could be due to using a (slightly) # different GPU model compared to a model for which optimal # meta parameters have been computed. In the following we'll # assume that there is a set of GPU models that all have # a similar set of optimal meta parameters. if re.match(r"NVIDIA A100[^\d]", device_name) is not None: device_name = "NVIDIA A100-SXM4-80GB" else: return op_data = _operation_device_version_data.get((op, device_name, version)) if op_data is None: return matching_data = {} if "*" in key: for op_key in op_data: if [None for k1, k2 in zip(op_key, key) if k2 != "*" and k1 != k2]: continue matching_data[op_key] = op_data[op_key] else: values = op_data.get(key) if values is not None: matching_data[key] = values matching_meta = {} for op_key, values in matching_data.items(): if op == "scatter_mm": names = ( "GROUP_SIZE", "SPLIT_N", "TILE_M", "TILE_N", "num_stages", "num_warps", ) meta = dict(zip(names, values)) elif op in {"bsr_dense_addmm", "_int_bsr_dense_addmm"}: meta = dict( zip(("GROUP_SIZE_ROW", "SPLIT_N", "num_stages", "num_warps"), values) ) else: raise NotImplementedError(f"names for {op=}") if "*" not in key: return meta matching_meta[op_key] = meta if "*" in key: return matching_meta def update(op, device_name, version, key, value): """Update the db of op parameters.""" # skip storing possible optimization failures: if not value: warnings.warn( f"skipping empty value for {op}: {device_name=} {version=} {key=}" ) return if (op, device_name, version) in _operation_device_version_data: if _operation_device_version_data[op, device_name, version].get(key) == value: return _operation_device_version_data[op, device_name, version][key] = value else: _operation_device_version_data[op, device_name, version] = {key: value} def dump(): """Store the current runtime db state to the module file.""" current_file = inspect.getfile(dump) f = open(current_file) current_content = f.read() f.close() begin_data_str = "# BEGIN GENERATED DATA\n" begin_data_index = current_content.find(begin_data_str) end_data_index = current_content.find(" # END GENERATED DATA\n") if begin_data_index == -1 or end_data_index == -1: warnings.warn( f"{current_file} cannot be updated:" " BEGIN/END GENERATED DATA comment blocks appear to be corrupted" ) return def sort_key(key): op, device_name, version = key version = tuple( (str(item) if isinstance(item, torch.dtype) else item) for item in version ) return (op, device_name, version) part1 = current_content[: begin_data_index + len(begin_data_str)] part2 = current_content[end_data_index:] data_part = [] for op_key in sorted(_operation_device_version_data, key=sort_key): data_part.append(" " + repr(op_key).replace("'", '"') + ": {") op_data = _operation_device_version_data[op_key] data_part.extend(f" {key}: {op_data[key]}," for key in sorted(op_data)) data_part.append(" },") new_content = part1 + "\n".join(data_part) + "\n" + part2 if current_content != new_content: f = open(current_file, "w") f.write(new_content) f.close() def minimize( target_func, initial_parameters, reference_parameters, step_func, max_step=2, verbose=False, all_values=None, ): """Find a dict of parameters that minimizes the target function using the initial dict of parameters and a step function that progresses a specified parameter in a dict of parameters. Parameters ---------- target_func (callable): a functional with the signature ``target_func(parameters: dict) -> float`` initial_parameters (dict): a set of parameters used as an initial value to the minimization process. reference_parameters (dict): a set of parameters used as an reference value with respect to which the speed up is computed. step_func (callable): a functional with the signature ``step_func(parameter_name:str, parameter_value:int, direction:int, parameters:dict) -> int`` that increments or decrements (when ``direction`` is positive or negative, respectively) the parameter with given name and value. When return value is equal to ``parameter_value``, it means that no step along the given direction can be made. Returns ------- parameters (dict): a set of parameters that minimizes the target function. speedup_incr (float): a speedup change given in percentage. timing (float): the value of the target function at the parameters. sensitivity_message (str): a message containing sensitivity. information of parameters around the target function minimizer. """ def to_key(parameters): return tuple(parameters[k] for k in sorted(parameters)) def from_key(key, parameters): return dict(zip(sorted(parameters), key)) if all_values is None: all_values = {} directions = list(range(-max_step, max_step + 1)) names = sorted(initial_parameters) all_directions = [] for d_tuple in itertools.product(*((directions,) * len(names))): dist = sum(map(abs, d_tuple)) if dist > 0 and dist <= max_step: all_directions.append((dist, d_tuple)) all_directions.sort() try: reference_target = target_func(reference_parameters) except Exception as msg: if verbose and "out of resource" not in str(msg): print(f"{reference_parameters=} lead to failure: {msg}.") reference_target = None if reference_target is not None: all_values[to_key(reference_parameters)] = reference_target parameters = initial_parameters try: initial_target = target_func(parameters) except Exception as msg: if reference_target is None: if verbose: print( f"{initial_parameters=} lead to failure: {msg}. Optimization failed!" ) return {}, -1, -1, f"{msg}" if verbose and "out of resource" not in str(msg): print( f"{initial_parameters=} lead to failure: {msg}. Using reference parameters instead of initial parameters." ) parameters = reference_parameters initial_target = reference_target if reference_target is None: if verbose: print("Using initial parameters instead of reference parameters.") reference_target = initial_target initial_key = to_key(parameters) minimal_target = all_values[initial_key] = initial_target pbar = tqdm( total=len(all_directions), desc="Tuning...", disable=not verbose, ncols=75, ) while True: for i, (_, d_tuple) in enumerate(all_directions): pbar.update(1) next_parameters = parameters.copy() for name, direction in zip(names, d_tuple): value = next_parameters[name] if direction == 0: continue next_value = step_func(name, value, direction, parameters) if next_value == value: break next_parameters[name] = next_value else: next_key = to_key(next_parameters) if next_key in all_values: continue try: next_target = target_func(next_parameters) except Exception as msg: all_values[next_key] = str(msg) if verbose and "out of resource" not in str(msg): print(f"{next_parameters=} lead to failure: {msg}. Skipping.") continue all_values[next_key] = next_target if next_target < minimal_target: minimal_target = next_target parameters = next_parameters pbar.total += i + 1 break else: # ensure stable minimizer: minimizer_keys = { k for k, v in all_values.items() if isinstance(v, float) and abs(1 - v / minimal_target) < 0.001 } minimizer_key = ( initial_key if initial_key in minimizer_keys else min(minimizer_keys) ) parameters = from_key(minimizer_key, parameters) speedup_incr = (1 - minimal_target / reference_target) * 100 if speedup_incr < 0: if verbose: print( f"{speedup_incr=} is negative. Rerunning minimize with reference parameters as initial parameters." ) return minimize( target_func, reference_parameters, reference_parameters, step_func, max_step=max_step, verbose=verbose, all_values=all_values, ) sensitivity = [] for name in parameters: value = parameters[name] rel_diffs = [] for direction in range(-max_step, max_step + 1): if direction == 0: continue next_value = step_func(name, value, direction, parameters) if next_value == value: rel_diffs.append(0) continue next_parameters = parameters.copy() next_parameters[name] = next_value next_key = to_key(next_parameters) next_target = all_values.get(next_key) if next_target is None or isinstance(next_target, str): rel_diffs.append(0) continue rel_diff = (next_target / minimal_target - 1) * 100 rel_diffs.append(rel_diff) sensitivity.append((max(rel_diffs), rel_diffs, name)) sensitivity_message = [f"timing0={initial_target:.3f}"] for _, rel_diffs, name in sorted(sensitivity, reverse=True): left_diffs = "|".join( [f"{rel_diff:.1f}" for rel_diff in rel_diffs[:max_step]] ) right_diffs = "|".join( [f"{rel_diff:.1f}" for rel_diff in rel_diffs[max_step:]] ) sensitivity_message.append( f"{name}={parameters[name]} ({left_diffs}...{right_diffs} %)" ) sensitivity_message = ", ".join(sensitivity_message) return parameters, speedup_incr, minimal_target, sensitivity_message def create_blocked_tensor(B, M, N, blocksize, sparsity, dtype, device): assert ( sparsity <= 1.0 and sparsity >= 0.0 ), "sparsity should be a value between 0 and 1" assert M % blocksize[0] == 0 assert N % blocksize[1] == 0 shape = (B, M // blocksize[0], N // blocksize[1])[int(B == 0) :] A = torch.bernoulli( torch.full(shape, 1 - sparsity, dtype=torch.float32, device=device) ).to(dtype) expected_nnz = int((1 - sparsity) * M * N / (blocksize[0] * blocksize[1])) nonzero_indices = A.flatten().nonzero() actual_nnz = nonzero_indices.shape[0] if actual_nnz > expected_nnz: selected_nonzeros = torch.randperm(actual_nnz)[: actual_nnz - expected_nnz] A.flatten()[nonzero_indices[selected_nonzeros]] = 0 elif actual_nnz < expected_nnz: zero_indices = (A == 0).flatten().nonzero() selected_zeros = torch.randperm(zero_indices.shape[0])[ : expected_nnz - actual_nnz ] A.flatten()[zero_indices[selected_zeros]] = 1 A = torch.repeat_interleave(A, blocksize[0], dim=-2) A = torch.repeat_interleave(A, blocksize[1], dim=-1) return A def optimize_scatter_mm( m, k, n, bm, bk, dtype=torch.float16, device="cuda", sparsity=0.5, force=False ): import triton from torch.sparse._triton_ops import bsr_scatter_mm, bsr_scatter_mm_indices_data key = (m, k, n, bm, bk) version = (0, dtype, sparsity) device_name = torch.cuda.get_device_name() reference_meta = dict( GROUP_SIZE=1, TILE_M=16, TILE_N=16, SPLIT_N=n // 16, num_stages=1, num_warps=1, ) initial_meta = get_meta( "scatter_mm", key, device_name=device_name, version=version, exact=True ) if initial_meta is None: initial_meta = get_meta( "bsr_dense_addmm", key, device_name=device_name, version=(0, dtype, 0.5), exact=True, ) if initial_meta is None: initial_meta = reference_meta elif not force: return torch.manual_seed(0) bsr = create_blocked_tensor( 0, m, k, (bm, bk), sparsity, dtype, device ).to_sparse_bsr((bm, bk)) dense = make_tensor(k, n, dtype=dtype, device=device) def bench(meta, bsr=bsr, dense=dense): indices_data = bsr_scatter_mm_indices_data( bsr, dense, indices_format="bsr_strided_mm_compressed", **meta ) def test_func(): return bsr_scatter_mm(bsr, dense, indices_data=indices_data) ms_min = triton.testing.do_bench(test_func, warmup=500, rep=100) return ms_min def step_meta_parameter(name, value, direction, meta, m=m, n=n, k=k, bm=bm, bk=bk): # return next value in positive or negative direction, or # input value if the step will result an invalid # value. The input value is assumed to be valid. is_log = name in {"SPLIT_N", "TILE_M", "TILE_N", "num_warps"} min_value = dict( SPLIT_N=1, TILE_M=16, TILE_N=16, num_warps=1, num_stages=1, GROUP_SIZE=1 )[name] max_value = dict( SPLIT_N=n // meta["TILE_N"], TILE_M=bm, TILE_N=n // meta["SPLIT_N"] ).get(name) value_step = dict( SPLIT_N=2, TILE_M=2, TILE_N=2, num_warps=2, num_stages=1, GROUP_SIZE=1 )[name] if is_log: next_value = ( value * value_step**direction if direction > 0 else value // (value_step ** abs(direction)) ) else: next_value = value + value_step * direction if min_value is not None: next_value = max(next_value, min_value) if max_value is not None: next_value = min(next_value, max_value) if name == "SPLIT_N" and n % next_value != 0: return value # Hard-skip parameter combinations that break CUDA state for pytorch: if (dtype, name, next_value, m, n, k, bm, bk) in { (torch.float32, "num_warps", 32, 256, 256, 256, 16, 16), (torch.float32, "num_warps", 16, 256, 256, 256, 32, 32), (torch.float32, "num_warps", 16, 256, 256, 256, 64, 64), (torch.float32, "num_warps", 16, 256, 256, 256, 128, 128), (torch.float32, "num_warps", 16, 512, 512, 256, 128, 128), } and re.match(r"NVIDIA A100[^\d]", device_name) is not None: return value return next_value meta, speedup, timing, _sensitivity_message = minimize( bench, initial_meta, reference_meta, step_meta_parameter ) if initial_meta is not reference_meta and initial_meta == meta and not force: return print(f"{meta=} {speedup=:.1f} % {timing=:.3f} ms") if speedup < 0: return device_name = torch.cuda.get_device_name() update( "scatter_mm", device_name, version, key, tuple(meta[k] for k in sorted(meta)) ) def tune__int_bsr_dense_addmm( input, bsr, dense, *, beta=1, alpha=1, out=None, store=False, verbose=False, force=False, ): return tune_bsr_dense_addmm( input, bsr, dense, beta=beta, alpha=alpha, out=out, store=store, verbose=verbose, force=force, opname="_int_bsr_dense_addmm", ) def tune_bsr_dense_addmm( input, bsr, dense, *, beta=1, alpha=1, left_alpha=None, right_alpha=None, out=None, store=False, verbose=False, force=False, opname=None, ): """Tune bsr_dense_addmm kernel parameters against the given inputs. When store is True, the tuning results will be stored in the database of kernel parameters. """ import triton if opname is None: opname = "bsr_dense_addmm" if opname == "_int_bsr_dense_addmm": from torch.sparse._triton_ops import _int_bsr_dense_addmm as bsr_dense_addmm else: from torch.sparse._triton_ops import bsr_dense_addmm N = dense.shape[-1] values = bsr.values() crow_indices = bsr.crow_indices() batch_ndim = crow_indices.dim() - 1 M, K = bsr.shape[batch_ndim : batch_ndim + 2] BM, BK = values.shape[batch_ndim + 1 : batch_ndim + 3] # Reference parameters is a set of parameters that leads to a # successful kernel call and the corresponding timing is used as a # reference for computing speedups. Avoid changing the reference # parameters when possible. reference_meta = dict( GROUP_SIZE_ROW=1, num_stages=1, num_warps=4, SPLIT_N=max(N // BM, 1) ) # Compute the key of parameters: sparsity = round(1 - bsr._nnz() * BM * BK / (M * K), 2) dtype = bsr.dtype if out is None: out_dtype = dtype else: out_dtype = out.dtype if out_dtype is dtype: version_dtype = dtype else: version_dtype = (dtype, out_dtype) version = (0, version_dtype, sparsity) key = (M, K, N, BM, BK, beta == 0, beta == 1, alpha == 1) # For tuning, for an initial state, use parameters from the # database if available, otherwise, use the reference parameters. initial_meta = get_meta(opname, key, version=version, exact=True) if initial_meta is None: may_skip_update = False initial_meta = get_meta(opname, key, version=(0, dtype, 0.5), exact=True) if initial_meta is None: initial_meta = reference_meta elif not force: return initial_meta else: may_skip_update = True # The target function that is minimized in the tuning process: def bench(meta, input=input, bsr=bsr, dense=dense, alpha=alpha, out=out): def test_func(): return bsr_dense_addmm( input, bsr, dense, beta=beta, alpha=alpha, left_alpha=left_alpha, right_alpha=right_alpha, meta=meta, out=out, ) return triton.testing.do_bench(test_func, warmup=500, rep=100) # The step function that increments a specified meta parameter: def step_meta_parameter(name, value, direction, meta, M=M, N=N, K=K, BM=BM, BK=BK): # return next value in positive or negative direction, or # input value if the step will result an invalid # value. The input value is assumed to be valid. is_log = name in {"SPLIT_N", "num_warps"} min_value = dict(SPLIT_N=1, num_warps=1, num_stages=1, GROUP_SIZE_ROW=1)[name] max_value = dict(SPLIT_N=max(N // BM, 1)).get(name) value_step = dict(SPLIT_N=2, num_warps=2, num_stages=1, GROUP_SIZE_ROW=1)[name] if is_log: next_value = ( value * value_step**direction if direction > 0 else value // (value_step ** abs(direction)) ) else: next_value = value + value_step * direction if min_value is not None: next_value = max(next_value, min_value) if max_value is not None: next_value = min(next_value, max_value) if name == "SPLIT_N" and N % next_value != 0: return value return next_value # Tune: meta, speedup, timing, sensitivity_message = minimize( bench, initial_meta, reference_meta, step_meta_parameter, max_step=2, verbose=verbose, ) if verbose: print(f"-> {sensitivity_message}, {speedup=:.1f} %, {timing=:.3f} ms") if store and not ( may_skip_update and meta == initial_meta and initial_meta is not reference_meta ): device_name = torch.cuda.get_device_name() update( opname, device_name, version, key, tuple(meta[k] for k in sorted(meta)), ) return meta def optimize_bsr_dense_addmm( m, k, n, bm, bk, beta=1, alpha=1, use_left_alpha=False, use_right_alpha=False, dtype=torch.float16, out_dtype=None, device="cuda", sparsity=0.5, force=False, verbose=False, opname=None, ): torch.manual_seed(0) bsr = create_blocked_tensor( 0, m, k, (bm, bk), sparsity, dtype, device ).to_sparse_bsr((bm, bk)) dense = make_tensor(k, n, dtype=dtype, device=device) input = make_tensor(m, n, dtype=dtype, device=device) left_alpha = make_tensor(m, dtype=dtype, device=device) if use_left_alpha else None right_alpha = ( make_tensor(n, dtype=dtype, device=device) if use_right_alpha else None ) if out_dtype is not None: out = dense.new_empty((m, n), dtype=out_dtype) else: out = None tune_bsr_dense_addmm( input, bsr, dense, beta=beta, alpha=alpha, left_alpha=left_alpha, right_alpha=right_alpha, out=out, store=True, force=force, verbose=verbose, opname=opname, ) def main(op="scatter_mm", force=False, dtype=torch.float16, verbose=True): import itertools sizes_lst = [ 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 50432, 65792, ] sizes3_lst = [3 * sz for sz in [64, 128] + sizes_lst if sz <= 2048] shapes_lst = [(sz, sz) for sz in sizes_lst[:-4] + sizes3_lst] shapes_lst.extend([(3072, 768), (768, 3072)]) shapes_lst.extend([(5120, 1280), (1280, 5120)]) if dtype is torch.int8: # triton does not support smaller blocks than 32 blocksize_lst = [(32, 32), (64, 64), (128, 128), (256, 256)] else: blocksize_lst = [(16, 16), (32, 32), (64, 64), (128, 128)] sparsity_lst = [0.5, 0.7, 0.3][:1] for sparsity in sparsity_lst: print(f"{op, dtype, sparsity=}") try: for (M, K), N, (BM, BK) in itertools.product( shapes_lst, sizes_lst, blocksize_lst ): if not (BM <= M and BK <= K and M % BM == 0 and K % BK == 0): continue if op == "scatter_mm": optimize_scatter_mm( M, K, N, BM, BK, force=force, sparsity=sparsity, dtype=dtype ) elif op in {"bsr_dense_addmm", "_int_bsr_dense_addmm"}: if M == K and N == 50432: continue print(f"{M, K, N, (BM, BK)=}") for alpha, beta in [(1, 1), (1, 0)]: optimize_bsr_dense_addmm( M, K, N, BM, BK, beta=beta, alpha=alpha, force=force, sparsity=sparsity, dtype=dtype, verbose=verbose, opname=op, ) else: raise NotImplementedError(op) except KeyboardInterrupt: break except Exception: dump() raise dump() if 0: # Check performance dependence on sparsity and apply # adjustments when differences are noticable (more than 10%). # # When using NVIDIA A100 GPU, the performance dependence on # sparsity is insignificant (0 % ... 10 %) for majority of # shapes/blocksizes combinations. However, for a very few # specific size combinations, the effect of sparsity on # performance can be up to 20 %. for (M, K), N, (BM, BK) in itertools.product( shapes_lst, sizes_lst, blocksize_lst ): meta_lst: list = [] key = (M, K, N, BM, BK) for sparsity1 in sparsity_lst: torch.manual_seed(0) bsr = create_blocked_tensor( 0, M, K, (BM, BK), sparsity1, dtype, device="cuda" ).to_sparse_bsr((BM, BK)) dense = make_tensor(K, N, dtype=dtype, device="cuda") meta_lst = [] for sparsity in sparsity_lst: meta = get_meta(op, key, version=(0, dtype, sparsity), exact=True) if meta is None: continue def bench(meta, bsr=bsr, dense=dense): import triton if op == "scatter_mm": from torch.sparse._triton_ops import ( bsr_scatter_mm, bsr_scatter_mm_indices_data, ) indices_data = bsr_scatter_mm_indices_data( bsr, dense, indices_format="bsr_strided_mm_compressed", **meta, ) def test_func(): return bsr_scatter_mm( bsr, dense, indices_data=indices_data ) else: raise NotImplementedError(op) ms_min = triton.testing.do_bench(test_func, warmup=500, rep=100) return ms_min meta_lst.append( (bench(meta), sparsity, tuple(meta[k] for k in sorted(meta))) ) if not meta_lst: continue meta_lst = sorted(meta_lst) index = next( i for i, item in enumerate(meta_lst) if item[1] == sparsity1 ) if meta_lst[0][2] == meta_lst[index][2]: continue speeddiff = (1 - meta_lst[index][0] / meta_lst[0][0]) * 100 if abs(speeddiff) < 10: continue print(sparsity1, index, key, meta_lst, speeddiff) if index > 0: device_name = torch.cuda.get_device_name() meta = get_meta( op, key, version=(0, dtype, meta_lst[0][1]), exact=True ) update( op, device_name, (0, dtype, sparsity1), key, tuple(meta[k] for k in sorted(meta)), ) print("update") dump() _operation_device_version_data: dict[Any, dict] = { # Warning: the data in between the BEGIN/END DATA comment lines # below is generated. It can be updated either manually or via # calling dump function defined above. # # Legend [op: key -> data]: # scatter_mm : M, K, N, Ms, Ks -> GROUP_SIZE, SPLIT_N, TILE_M, TILE_N, num_stages, num_warps # bsr_dense_addmm : M, K, N, Ms, Ks, beta==0, beta==1, alpha==1 -> GROUP_SIZE_ROW, SPLIT_N, num_stages, num_warps # # BEGIN GENERATED DATA ("_int_bsr_dense_addmm", "NVIDIA A100-SXM4-80GB", (0, torch.int8, 0.5)): { (192, 192, 256, 32, 32, False, True, True): (2, 8, 1, 4), (192, 192, 256, 32, 32, True, False, True): (2, 8, 5, 4), (192, 192, 512, 32, 32, False, True, True): (1, 16, 1, 4), (192, 192, 512, 32, 32, True, False, True): (1, 16, 5, 4), (192, 192, 1024, 32, 32, False, True, True): (1, 32, 1, 4), (192, 192, 1024, 32, 32, True, False, True): (4, 32, 4, 4), (192, 192, 2048, 32, 32, False, True, True): (2, 64, 1, 4), (192, 192, 2048, 32, 32, True, False, True): (3, 16, 5, 4), (192, 192, 4096, 32, 32, False, True, True): (1, 128, 1, 4), (192, 192, 4096, 32, 32, True, False, True): (1, 128, 1, 4), (192, 192, 8192, 32, 32, False, True, True): (1, 256, 1, 4), (192, 192, 8192, 32, 32, True, False, True): (1, 64, 3, 4), (192, 192, 16384, 32, 32, False, True, True): (2, 512, 1, 4), (192, 192, 16384, 32, 32, True, False, True): (5, 128, 1, 4), (192, 192, 32768, 32, 32, False, True, True): (1, 1024, 1, 4), (192, 192, 32768, 32, 32, True, False, True): (1, 256, 1, 4), (192, 192, 65536, 32, 32, False, True, True): (1, 1024, 1, 8), (192, 192, 65536, 32, 32, True, False, True): (1, 512, 1, 4), (192, 192, 131072, 32, 32, False, True, True): (1, 2048, 1, 8), (192, 192, 131072, 32, 32, True, False, True): (2, 512, 1, 4), (256, 256, 256, 32, 32, False, True, True): (4, 8, 1, 4), (256, 256, 256, 32, 32, True, False, True): (1, 8, 6, 4), (256, 256, 256, 64, 64, False, True, True): (1, 4, 1, 16), (256, 256, 256, 64, 64, True, False, True): (1, 4, 4, 4), (256, 256, 256, 128, 128, False, True, True): (3, 2, 1, 16), (256, 256, 256, 128, 128, True, False, True): (1, 2, 1, 4), (256, 256, 512, 32, 32, False, True, True): (2, 16, 1, 4), (256, 256, 512, 32, 32, True, False, True): (2, 16, 4, 4), (256, 256, 512, 64, 64, False, True, True): (7, 8, 1, 16), (256, 256, 512, 64, 64, True, False, True): (3, 8, 3, 4), (256, 256, 512, 128, 128, False, True, True): (1, 4, 1, 32), (256, 256, 512, 128, 128, True, False, True): (1, 4, 1, 4), (256, 256, 1024, 32, 32, False, True, True): (1, 32, 1, 4), (256, 256, 1024, 32, 32, True, False, True): (1, 8, 6, 4), (256, 256, 1024, 64, 64, False, True, True): (2, 16, 1, 16), (256, 256, 1024, 64, 64, True, False, True): (1, 16, 5, 4), (256, 256, 1024, 128, 128, False, True, True): (4, 8, 1, 32), (256, 256, 1024, 128, 128, True, False, True): (1, 8, 2, 4), (256, 256, 2048, 32, 32, False, True, True): (1, 64, 1, 4), (256, 256, 2048, 32, 32, True, False, True): (2, 32, 3, 2), (256, 256, 2048, 64, 64, False, True, True): (2, 32, 1, 16), (256, 256, 2048, 64, 64, True, False, True): (1, 16, 3, 4), (256, 256, 2048, 128, 128, False, True, True): (1, 16, 1, 32), (256, 256, 2048, 128, 128, True, False, True): (1, 16, 2, 4), (256, 256, 4096, 32, 32, False, True, True): (2, 128, 1, 4), (256, 256, 4096, 32, 32, True, False, True): (1, 32, 3, 2), (256, 256, 4096, 64, 64, False, True, True): (2, 64, 1, 8), (256, 256, 4096, 64, 64, True, False, True): (1, 64, 3, 2), (256, 256, 4096, 128, 128, False, True, True): (2, 32, 1, 32), (256, 256, 4096, 128, 128, True, False, True): (3, 32, 2, 8), (256, 256, 8192, 32, 32, False, True, True): (1, 256, 1, 4), (256, 256, 8192, 32, 32, True, False, True): (1, 64, 1, 4), (256, 256, 8192, 64, 64, False, True, True): (1, 128, 1, 8), (256, 256, 8192, 64, 64, True, False, True): (2, 128, 1, 4), (256, 256, 8192, 128, 128, False, True, True): (4, 64, 1, 32), (256, 256, 8192, 128, 128, True, False, True): (3, 64, 1, 4), (256, 256, 16384, 32, 32, False, True, True): (1, 256, 1, 8), (256, 256, 16384, 32, 32, True, False, True): (3, 128, 1, 4), (256, 256, 16384, 64, 64, False, True, True): (2, 256, 1, 8), (256, 256, 16384, 64, 64, True, False, True): (2, 256, 1, 4), (256, 256, 16384, 128, 128, False, True, True): (2, 128, 1, 32), (256, 256, 16384, 128, 128, True, False, True): (4, 128, 2, 4), (256, 256, 32768, 32, 32, False, True, True): (2, 512, 1, 8), (256, 256, 32768, 32, 32, True, False, True): (1, 256, 1, 4), (256, 256, 32768, 64, 64, False, True, True): (1, 512, 1, 8), (256, 256, 32768, 64, 64, True, False, True): (1, 512, 1, 4), (256, 256, 32768, 128, 128, False, True, True): (2, 256, 1, 32), (256, 256, 32768, 128, 128, True, False, True): (1, 256, 2, 4), (256, 256, 65536, 32, 32, False, True, True): (1, 1024, 1, 8), (256, 256, 65536, 32, 32, True, False, True): (1, 512, 1, 4), (256, 256, 65536, 64, 64, False, True, True): (1, 1024, 1, 8), (256, 256, 65536, 64, 64, True, False, True): (1, 512, 1, 4), (256, 256, 65536, 128, 128, False, True, True): (2, 512, 1, 16), (256, 256, 65536, 128, 128, True, False, True): (1, 512, 1, 4), (256, 256, 65792, 32, 32, False, True, True): (1, 1028, 1, 8), (256, 256, 65792, 32, 32, True, False, True): (1, 514, 1, 4), (256, 256, 65792, 64, 64, False, True, True): (1, 1028, 1, 8), (256, 256, 65792, 64, 64, True, False, True): (4, 257, 1, 4), (256, 256, 65792, 128, 128, False, True, True): (2, 514, 1, 16), (256, 256, 65792, 128, 128, True, False, True): (3, 514, 1, 4), (256, 256, 131072, 32, 32, False, True, True): (1, 2048, 1, 8), (256, 256, 131072, 32, 32, True, False, True): (2, 1024, 1, 4), (256, 256, 131072, 64, 64, False, True, True): (1, 2048, 1, 8), (256, 256, 131072, 64, 64, True, False, True): (2, 512, 1, 4), (256, 256, 131072, 128, 128, False, True, True): (2, 1024, 1, 16), (256, 256, 131072, 128, 128, True, False, True): (4, 1024, 1, 4), (384, 384, 256, 32, 32, False, True, True): (1, 8, 1, 4), (384, 384, 256, 32, 32, True, False, True): (5, 8, 5, 4), (384, 384, 256, 64, 64, False, True, True): (2, 4, 1, 16), (384, 384, 256, 64, 64, True, False, True): (1, 4, 5, 4), (384, 384, 512, 32, 32, False, True, True): (2, 16, 1, 4), (384, 384, 512, 32, 32, True, False, True): (1, 16, 4, 4), (384, 384, 512, 64, 64, False, True, True): (3, 8, 1, 16), (384, 384, 512, 64, 64, True, False, True): (3, 8, 3, 4), (384, 384, 1024, 32, 32, False, True, True): (2, 32, 1, 4), (384, 384, 1024, 32, 32, True, False, True): (1, 8, 6, 4), (384, 384, 1024, 64, 64, False, True, True): (2, 16, 1, 16), (384, 384, 1024, 64, 64, True, False, True): (1, 16, 5, 4), (384, 384, 2048, 32, 32, False, True, True): (1, 64, 1, 4), (384, 384, 2048, 32, 32, True, False, True): (3, 16, 4, 4), (384, 384, 2048, 64, 64, False, True, True): (2, 32, 1, 16), (384, 384, 2048, 64, 64, True, False, True): (1, 16, 4, 4), (384, 384, 4096, 32, 32, False, True, True): (4, 64, 1, 8), (384, 384, 4096, 32, 32, True, False, True): (4, 32, 1, 4), (384, 384, 4096, 64, 64, False, True, True): (1, 64, 1, 8), (384, 384, 4096, 64, 64, True, False, True): (1, 64, 1, 4), (384, 384, 8192, 32, 32, False, True, True): (1, 128, 1, 8), (384, 384, 8192, 32, 32, True, False, True): (3, 64, 1, 1), (384, 384, 8192, 64, 64, False, True, True): (2, 128, 1, 8), (384, 384, 8192, 64, 64, True, False, True): (1, 64, 2, 2), (384, 384, 16384, 32, 32, False, True, True): (1, 256, 1, 8), (384, 384, 16384, 32, 32, True, False, True): (1, 128, 1, 4), (384, 384, 16384, 64, 64, False, True, True): (2, 256, 1, 8), (384, 384, 16384, 64, 64, True, False, True): (2, 128, 1, 4), (384, 384, 32768, 32, 32, False, True, True): (1, 512, 1, 8), (384, 384, 32768, 32, 32, True, False, True): (1, 256, 1, 4), (384, 384, 32768, 64, 64, False, True, True): (1, 512, 1, 8), (384, 384, 32768, 64, 64, True, False, True): (1, 256, 3, 2), (384, 384, 65536, 32, 32, False, True, True): (1, 1024, 1, 8), (384, 384, 65536, 32, 32, True, False, True): (1, 512, 3, 2), (384, 384, 65536, 64, 64, False, True, True): (2, 1024, 1, 8), (384, 384, 65536, 64, 64, True, False, True): (3, 256, 3, 4), (384, 384, 131072, 32, 32, False, True, True): (1, 2048, 1, 8), (384, 384, 131072, 32, 32, True, False, True): (1, 1024, 3, 2), (384, 384, 131072, 64, 64, False, True, True): (1, 2048, 1, 8), (384, 384, 131072, 64, 64, True, False, True): (2, 512, 3, 4), (512, 512, 256, 32, 32, False, True, True): (1, 8, 1, 4), (512, 512, 256, 32, 32, True, False, True): (4, 8, 4, 4), (512, 512, 256, 64, 64, False, True, True): (3, 4, 1, 16), (512, 512, 256, 64, 64, True, False, True): (2, 4, 5, 4), (512, 512, 256, 128, 128, False, True, True): (4, 2, 1, 16), (512, 512, 256, 128, 128, True, False, True): (1, 2, 3, 4), (512, 512, 256, 256, 256, False, True, True): (1, 1, 1, 32), (512, 512, 256, 256, 256, True, False, True): (2, 1, 1, 32), (512, 512, 512, 32, 32, False, True, True): (3, 16, 1, 4), (512, 512, 512, 32, 32, True, False, True): (1, 8, 4, 2), (512, 512, 512, 64, 64, False, True, True): (2, 8, 1, 16), (512, 512, 512, 64, 64, True, False, True): (2, 8, 5, 4), (512, 512, 512, 128, 128, False, True, True): (3, 4, 1, 16), (512, 512, 512, 128, 128, True, False, True): (1, 4, 3, 4), (512, 512, 512, 256, 256, False, True, True): (1, 2, 1, 32), (512, 512, 512, 256, 256, True, False, True): (2, 2, 1, 32), (512, 512, 1024, 32, 32, False, True, True): (2, 32, 1, 4), (512, 512, 1024, 32, 32, True, False, True): (4, 16, 3, 2), (512, 512, 1024, 64, 64, False, True, True): (4, 16, 1, 16), (512, 512, 1024, 64, 64, True, False, True): (1, 8, 4, 4), (512, 512, 1024, 128, 128, False, True, True): (1, 8, 1, 32), (512, 512, 1024, 128, 128, True, False, True): (1, 8, 3, 4), (512, 512, 1024, 256, 256, False, True, True): (4, 4, 1, 32), (512, 512, 1024, 256, 256, True, False, True): (2, 4, 1, 32), (512, 512, 2048, 32, 32, False, True, True): (3, 32, 1, 8), (512, 512, 2048, 32, 32, True, False, True): (1, 16, 3, 4), (512, 512, 2048, 64, 64, False, True, True): (1, 32, 1, 8), (512, 512, 2048, 64, 64, True, False, True): (1, 32, 3, 2), (512, 512, 2048, 128, 128, False, True, True): (4, 16, 1, 32), (512, 512, 2048, 128, 128, True, False, True): (1, 16, 3, 4), (512, 512, 2048, 256, 256, False, True, True): (1, 8, 1, 32), (512, 512, 2048, 256, 256, True, False, True): (3, 8, 1, 32), (512, 512, 4096, 32, 32, False, True, True): (1, 64, 1, 8), (512, 512, 4096, 32, 32, True, False, True): (5, 32, 1, 4), (512, 512, 4096, 64, 64, False, True, True): (1, 64, 1, 8), (512, 512, 4096, 64, 64, True, False, True): (1, 64, 1, 4), (512, 512, 4096, 128, 128, False, True, True): (5, 32, 1, 32), (512, 512, 4096, 128, 128, True, False, True): (2, 32, 3, 4), (512, 512, 4096, 256, 256, False, True, True): (1, 16, 1, 32), (512, 512, 4096, 256, 256, True, False, True): (3, 16, 1, 32), (512, 512, 8192, 32, 32, False, True, True): (3, 128, 1, 8), (512, 512, 8192, 32, 32, True, False, True): (3, 64, 1, 4), (512, 512, 8192, 64, 64, False, True, True): (4, 128, 1, 8), (512, 512, 8192, 64, 64, True, False, True): (1, 64, 3, 2), (512, 512, 8192, 128, 128, False, True, True): (5, 64, 1, 32), (512, 512, 8192, 128, 128, True, False, True): (1, 64, 2, 4), (512, 512, 8192, 256, 256, False, True, True): (1, 32, 1, 32), (512, 512, 8192, 256, 256, True, False, True): (1, 32, 1, 32), (512, 512, 16384, 32, 32, False, True, True): (1, 256, 1, 8), (512, 512, 16384, 32, 32, True, False, True): (2, 128, 1, 4), (512, 512, 16384, 64, 64, False, True, True): (2, 256, 1, 8), (512, 512, 16384, 64, 64, True, False, True): (1, 128, 3, 2), (512, 512, 16384, 128, 128, False, True, True): (4, 128, 1, 16), (512, 512, 16384, 128, 128, True, False, True): (2, 128, 1, 4), (512, 512, 16384, 256, 256, False, True, True): (1, 64, 1, 32), (512, 512, 16384, 256, 256, True, False, True): (2, 64, 1, 32), (512, 512, 32768, 32, 32, False, True, True): (1, 512, 1, 8), (512, 512, 32768, 32, 32, True, False, True): (2, 256, 1, 4), (512, 512, 32768, 64, 64, False, True, True): (1, 512, 1, 8), (512, 512, 32768, 64, 64, True, False, True): (1, 256, 3, 2), (512, 512, 32768, 128, 128, False, True, True): (4, 256, 1, 16), (512, 512, 32768, 128, 128, True, False, True): (2, 256, 1, 4), (512, 512, 32768, 256, 256, False, True, True): (1, 128, 1, 32), (512, 512, 32768, 256, 256, True, False, True): (2, 128, 1, 32), (512, 512, 65536, 32, 32, False, True, True): (1, 1024, 1, 8), (512, 512, 65536, 32, 32, True, False, True): (2, 512, 1, 2), (512, 512, 65536, 64, 64, False, True, True): (1, 1024, 1, 8), (512, 512, 65536, 64, 64, True, False, True): (1, 512, 3, 2), (512, 512, 65536, 128, 128, False, True, True): (4, 512, 1, 16), (512, 512, 65536, 128, 128, True, False, True): (1, 512, 1, 4), (512, 512, 65536, 256, 256, False, True, True): (1, 256, 1, 32), (512, 512, 65536, 256, 256, True, False, True): (1, 256, 1, 32), (512, 512, 65792, 32, 32, False, True, True): (1, 1028, 1, 8), (512, 512, 65792, 32, 32, True, False, True): (1, 514, 3, 2), (512, 512, 65792, 64, 64, False, True, True): (1, 1028, 1, 8), (512, 512, 65792, 64, 64, True, False, True): (2, 257, 3, 4), (512, 512, 65792, 128, 128, False, True, True): (4, 514, 1, 16), (512, 512, 65792, 128, 128, True, False, True): (1, 514, 1, 4), (512, 512, 65792, 256, 256, False, True, True): (1, 257, 1, 32), (512, 512, 65792, 256, 256, True, False, True): (2, 257, 1, 32), (512, 512, 131072, 32, 32, False, True, True): (1, 2048, 1, 8), (512, 512, 131072, 32, 32, True, False, True): (1, 1024, 3, 2), (512, 512, 131072, 64, 64, False, True, True): (1, 2048, 1, 8), (512, 512, 131072, 64, 64, True, False, True): (1, 1024, 3, 2), (512, 512, 131072, 128, 128, False, True, True): (4, 1024, 1, 16), (512, 512, 131072, 128, 128, True, False, True): (1, 1024, 1, 4), (512, 512, 131072, 256, 256, False, True, True): (1, 512, 1, 32), (512, 512, 131072, 256, 256, True, False, True): (2, 512, 1, 32), (768, 768, 256, 32, 32, False, True, True): (1, 8, 1, 4), (768, 768, 256, 32, 32, True, False, True): (2, 8, 4, 4), (768, 768, 256, 64, 64, False, True, True): (3, 4, 1, 16), (768, 768, 256, 64, 64, True, False, True): (2, 4, 4, 4), (768, 768, 256, 128, 128, False, True, True): (1, 2, 1, 8), (768, 768, 256, 128, 128, True, False, True): (1, 2, 3, 4), (768, 768, 512, 32, 32, False, True, True): (1, 16, 1, 4), (768, 768, 512, 32, 32, True, False, True): (1, 4, 5, 4), (768, 768, 512, 64, 64, False, True, True): (1, 8, 3, 32), (768, 768, 512, 64, 64, True, False, True): (4, 8, 4, 4), (768, 768, 512, 128, 128, False, True, True): (4, 4, 1, 16), (768, 768, 512, 128, 128, True, False, True): (4, 4, 3, 4), (768, 768, 1024, 32, 32, False, True, True): (1, 16, 1, 8), (768, 768, 1024, 32, 32, True, False, True): (1, 8, 3, 4), (768, 768, 1024, 64, 64, False, True, True): (3, 16, 1, 16), (768, 768, 1024, 64, 64, True, False, True): (1, 8, 4, 4), (768, 768, 1024, 128, 128, False, True, True): (3, 8, 1, 32), (768, 768, 1024, 128, 128, True, False, True): (1, 8, 3, 4), (768, 768, 2048, 32, 32, False, True, True): (2, 32, 1, 8), (768, 768, 2048, 32, 32, True, False, True): (3, 16, 1, 4), (768, 768, 2048, 64, 64, False, True, True): (1, 32, 1, 8), (768, 768, 2048, 64, 64, True, False, True): (4, 8, 3, 4), (768, 768, 2048, 128, 128, False, True, True): (1, 16, 1, 32), (768, 768, 2048, 128, 128, True, False, True): (1, 16, 3, 4), (768, 768, 4096, 32, 32, False, True, True): (1, 64, 1, 8), (768, 768, 4096, 32, 32, True, False, True): (1, 32, 1, 1), (768, 768, 4096, 64, 64, False, True, True): (2, 64, 1, 8), (768, 768, 4096, 64, 64, True, False, True): (1, 32, 2, 2), (768, 768, 4096, 128, 128, False, True, True): (1, 32, 1, 32), (768, 768, 4096, 128, 128, True, False, True): (6, 32, 1, 4), (768, 768, 8192, 32, 32, False, True, True): (1, 128, 1, 8), (768, 768, 8192, 32, 32, True, False, True): (1, 64, 1, 4), (768, 768, 8192, 64, 64, False, True, True): (1, 128, 1, 8), (768, 768, 8192, 64, 64, True, False, True): (4, 32, 3, 4), (768, 768, 8192, 128, 128, False, True, True): (2, 64, 1, 16), (768, 768, 8192, 128, 128, True, False, True): (2, 64, 3, 4), (768, 768, 16384, 32, 32, False, True, True): (1, 256, 1, 8), (768, 768, 16384, 32, 32, True, False, True): (1, 128, 1, 4), (768, 768, 16384, 64, 64, False, True, True): (1, 256, 1, 8), (768, 768, 16384, 64, 64, True, False, True): (1, 128, 3, 2), (768, 768, 16384, 128, 128, False, True, True): (2, 128, 1, 16), (768, 768, 16384, 128, 128, True, False, True): (2, 128, 1, 4), (768, 768, 32768, 32, 32, False, True, True): (1, 512, 1, 8), (768, 768, 32768, 32, 32, True, False, True): (1, 256, 3, 2), (768, 768, 32768, 64, 64, False, True, True): (2, 512, 1, 8), (768, 768, 32768, 64, 64, True, False, True): (1, 256, 3, 2), (768, 768, 32768, 128, 128, False, True, True): (2, 256, 1, 16), (768, 768, 32768, 128, 128, True, False, True): (3, 256, 1, 4), (768, 768, 65536, 32, 32, False, True, True): (1, 1024, 1, 8), (768, 768, 65536, 32, 32, True, False, True): (1, 512, 3, 2), (768, 768, 65536, 64, 64, False, True, True): (2, 512, 1, 4), (768, 768, 65536, 64, 64, True, False, True): (1, 512, 3, 2), (768, 768, 65536, 128, 128, False, True, True): (2, 512, 1, 16), (768, 768, 65536, 128, 128, True, False, True): (2, 512, 1, 4), (768, 768, 131072, 32, 32, False, True, True): (1, 2048, 1, 8), (768, 768, 131072, 32, 32, True, False, True): (1, 1024, 3, 2), (768, 768, 131072, 64, 64, False, True, True): (2, 1024, 1, 4), (768, 768, 131072, 64, 64, True, False, True): (2, 1024, 3, 2), (768, 768, 131072, 128, 128, False, True, True): (2, 1024, 1, 16), (768, 768, 131072, 128, 128, True, False, True): (2, 1024, 1, 4), (768, 3072, 256, 32, 32, False, True, True): (3, 8, 4, 8), (768, 3072, 256, 32, 32, True, False, True): (3, 8, 5, 4), (768, 3072, 256, 64, 64, False, True, True): (1, 4, 4, 16), (768, 3072, 256, 64, 64, True, False, True): (1, 4, 4, 4), (768, 3072, 256, 128, 128, False, True, True): (2, 2, 1, 8), (768, 3072, 256, 128, 128, True, False, True): (2, 2, 4, 4), (768, 3072, 256, 256, 256, False, True, True): (1, 1, 1, 32), (768, 3072, 256, 256, 256, True, False, True): (1, 1, 1, 32), (768, 3072, 512, 32, 32, False, True, True): (1, 16, 1, 4), (768, 3072, 512, 32, 32, True, False, True): (2, 4, 4, 4), (768, 3072, 512, 64, 64, False, True, True): (3, 8, 4, 16), (768, 3072, 512, 64, 64, True, False, True): (1, 8, 4, 4), (768, 3072, 512, 128, 128, False, True, True): (2, 4, 1, 8), (768, 3072, 512, 128, 128, True, False, True): (4, 4, 3, 4), (768, 3072, 512, 256, 256, False, True, True): (1, 2, 1, 32), (768, 3072, 512, 256, 256, True, False, True): (1, 2, 1, 32), (768, 3072, 1024, 32, 32, False, True, True): (1, 16, 1, 8), (768, 3072, 1024, 32, 32, True, False, True): (3, 8, 3, 4), (768, 3072, 1024, 64, 64, False, True, True): (2, 16, 1, 16), (768, 3072, 1024, 64, 64, True, False, True): (1, 8, 3, 4), (768, 3072, 1024, 128, 128, False, True, True): (1, 8, 1, 8), (768, 3072, 1024, 128, 128, True, False, True): (3, 8, 4, 4), (768, 3072, 1024, 256, 256, False, True, True): (1, 4, 1, 32), (768, 3072, 1024, 256, 256, True, False, True): (4, 4, 1, 32), (768, 3072, 2048, 32, 32, False, True, True): (3, 32, 1, 8), (768, 3072, 2048, 32, 32, True, False, True): (4, 8, 3, 4), (768, 3072, 2048, 64, 64, False, True, True): (5, 16, 1, 16), (768, 3072, 2048, 64, 64, True, False, True): (6, 8, 3, 4), (768, 3072, 2048, 128, 128, False, True, True): (2, 16, 1, 16), (768, 3072, 2048, 128, 128, True, False, True): (1, 16, 4, 4), (768, 3072, 2048, 256, 256, False, True, True): (1, 8, 1, 32), (768, 3072, 2048, 256, 256, True, False, True): (1, 8, 1, 32), (768, 3072, 4096, 32, 32, False, True, True): (1, 64, 1, 8), (768, 3072, 4096, 32, 32, True, False, True): (1, 32, 3, 4), (768, 3072, 4096, 64, 64, False, True, True): (1, 64, 1, 8), (768, 3072, 4096, 64, 64, True, False, True): (2, 16, 3, 4), (768, 3072, 4096, 128, 128, False, True, True): (1, 32, 1, 8), (768, 3072, 4096, 128, 128, True, False, True): (2, 32, 2, 4), (768, 3072, 4096, 256, 256, False, True, True): (1, 16, 1, 32), (768, 3072, 4096, 256, 256, True, False, True): (1, 16, 1, 32), (768, 3072, 8192, 32, 32, False, True, True): (1, 128, 1, 8), (768, 3072, 8192, 32, 32, True, False, True): (1, 64, 1, 4), (768, 3072, 8192, 64, 64, False, True, True): (1, 128, 1, 8), (768, 3072, 8192, 64, 64, True, False, True): (2, 32, 3, 4), (768, 3072, 8192, 128, 128, False, True, True): (2, 64, 1, 16), (768, 3072, 8192, 128, 128, True, False, True): (2, 64, 3, 4), (768, 3072, 8192, 256, 256, False, True, True): (1, 32, 1, 32), (768, 3072, 8192, 256, 256, True, False, True): (1, 32, 1, 32), (768, 3072, 16384, 32, 32, False, True, True): (1, 256, 1, 8), (768, 3072, 16384, 32, 32, True, False, True): (1, 128, 1, 4), (768, 3072, 16384, 64, 64, False, True, True): (1, 256, 1, 8), (768, 3072, 16384, 64, 64, True, False, True): (2, 64, 3, 4), (768, 3072, 16384, 128, 128, False, True, True): (2, 128, 1, 16), (768, 3072, 16384, 128, 128, True, False, True): (2, 128, 3, 4), (768, 3072, 16384, 256, 256, False, True, True): (1, 64, 1, 32), (768, 3072, 16384, 256, 256, True, False, True): (1, 64, 1, 32), (768, 3072, 32768, 32, 32, False, True, True): (1, 512, 1, 8), (768, 3072, 32768, 32, 32, True, False, True): (1, 256, 3, 2), (768, 3072, 32768, 64, 64, False, True, True): (1, 512, 1, 8), (768, 3072, 32768, 64, 64, True, False, True): (3, 128, 3, 4), (768, 3072, 32768, 128, 128, False, True, True): (2, 256, 1, 16), (768, 3072, 32768, 128, 128, True, False, True): (2, 256, 3, 4), (768, 3072, 32768, 256, 256, False, True, True): (1, 128, 1, 32), (768, 3072, 32768, 256, 256, True, False, True): (1, 128, 1, 32), (768, 3072, 50432, 32, 32, False, True, True): (1, 788, 1, 8), (768, 3072, 50432, 32, 32, True, False, True): (1, 394, 3, 2), (768, 3072, 50432, 64, 64, False, True, True): (1, 788, 1, 8), (768, 3072, 50432, 64, 64, True, False, True): (2, 197, 3, 4), (768, 3072, 50432, 128, 128, False, True, True): (2, 394, 1, 16), (768, 3072, 50432, 128, 128, True, False, True): (2, 394, 3, 4), (768, 3072, 50432, 256, 256, False, True, True): (1, 197, 1, 32), (768, 3072, 50432, 256, 256, True, False, True): (1, 197, 1, 32), (768, 3072, 65536, 32, 32, False, True, True): (1, 1024, 1, 8), (768, 3072, 65536, 32, 32, True, False, True): (1, 512, 3, 2), (768, 3072, 65536, 64, 64, False, True, True): (1, 1024, 1, 8), (768, 3072, 65536, 64, 64, True, False, True): (2, 256, 3, 4), (768, 3072, 65536, 128, 128, False, True, True): (2, 512, 1, 16), (768, 3072, 65536, 128, 128, True, False, True): (2, 512, 3, 4), (768, 3072, 65536, 256, 256, False, True, True): (1, 256, 1, 32), (768, 3072, 65536, 256, 256, True, False, True): (1, 256, 1, 32), (768, 3072, 131072, 32, 32, False, True, True): (1, 2048, 1, 8), (768, 3072, 131072, 32, 32, True, False, True): (1, 1024, 3, 2), (768, 3072, 131072, 64, 64, False, True, True): (1, 2048, 1, 8), (768, 3072, 131072, 64, 64, True, False, True): (2, 512, 3, 4), (768, 3072, 131072, 128, 128, False, True, True): (2, 1024, 1, 16), (768, 3072, 131072, 128, 128, True, False, True): (1, 1024, 3, 4), (768, 3072, 131072, 256, 256, False, True, True): (1, 512, 1, 32), (768, 3072, 131072, 256, 256, True, False, True): (1, 512, 1, 32), (1024, 1024, 256, 32, 32, False, True, True): (1, 8, 1, 4), (1024, 1024, 256, 32, 32, True, False, True): (1, 8, 5, 4), (1024, 1024, 256, 64, 64, False, True, True): (1, 4, 1, 16), (1024, 1024, 256, 64, 64, True, False, True): (4, 4, 4, 4), (1024, 1024, 256, 128, 128, False, True, True): (1, 2, 1, 8), (1024, 1024, 256, 128, 128, True, False, True): (1, 2, 3, 8), (1024, 1024, 256, 256, 256, False, True, True): (1, 1, 1, 32), (1024, 1024, 256, 256, 256, True, False, True): (1, 1, 1, 32), (1024, 1024, 512, 32, 32, False, True, True): (5, 16, 1, 4), (1024, 1024, 512, 32, 32, True, False, True): (2, 8, 4, 2), (1024, 1024, 512, 64, 64, False, True, True): (4, 8, 1, 16), (1024, 1024, 512, 64, 64, True, False, True): (1, 4, 3, 4), (1024, 1024, 512, 128, 128, False, True, True): (3, 4, 1, 16), (1024, 1024, 512, 128, 128, True, False, True): (1, 4, 2, 4), (1024, 1024, 512, 256, 256, False, True, True): (1, 2, 1, 32), (1024, 1024, 512, 256, 256, True, False, True): (1, 2, 1, 32), (1024, 1024, 1024, 32, 32, False, True, True): (1, 16, 1, 8), (1024, 1024, 1024, 32, 32, True, False, True): (1, 8, 3, 4), (1024, 1024, 1024, 64, 64, False, True, True): (3, 16, 1, 8), (1024, 1024, 1024, 64, 64, True, False, True): (1, 16, 3, 2), (1024, 1024, 1024, 128, 128, False, True, True): (1, 8, 1, 16), (1024, 1024, 1024, 128, 128, True, False, True): (2, 8, 3, 8), (1024, 1024, 1024, 256, 256, False, True, True): (1, 4, 1, 32), (1024, 1024, 1024, 256, 256, True, False, True): (2, 4, 1, 32), (1024, 1024, 2048, 32, 32, False, True, True): (2, 32, 1, 8), (1024, 1024, 2048, 32, 32, True, False, True): (3, 16, 1, 4), (1024, 1024, 2048, 64, 64, False, True, True): (1, 32, 1, 8), (1024, 1024, 2048, 64, 64, True, False, True): (3, 32, 1, 4), (1024, 1024, 2048, 128, 128, False, True, True): (4, 16, 1, 16), (1024, 1024, 2048, 128, 128, True, False, True): (1, 16, 3, 4), (1024, 1024, 2048, 256, 256, False, True, True): (1, 8, 1, 32), (1024, 1024, 2048, 256, 256, True, False, True): (1, 8, 1, 32), (1024, 1024, 4096, 32, 32, False, True, True): (4, 64, 1, 8), (1024, 1024, 4096, 32, 32, True, False, True): (3, 32, 1, 4), (1024, 1024, 4096, 64, 64, False, True, True): (3, 64, 1, 8), (1024, 1024, 4096, 64, 64, True, False, True): (1, 32, 3, 2), (1024, 1024, 4096, 128, 128, False, True, True): (4, 32, 1, 16), (1024, 1024, 4096, 128, 128, True, False, True): (2, 32, 2, 4), (1024, 1024, 4096, 256, 256, False, True, True): (1, 16, 1, 32), (1024, 1024, 4096, 256, 256, True, False, True): (7, 16, 1, 32), (1024, 1024, 8192, 32, 32, False, True, True): (1, 128, 1, 8), (1024, 1024, 8192, 32, 32, True, False, True): (4, 64, 1, 4), (1024, 1024, 8192, 64, 64, False, True, True): (2, 128, 1, 8), (1024, 1024, 8192, 64, 64, True, False, True): (3, 32, 3, 4), (1024, 1024, 8192, 128, 128, False, True, True): (4, 64, 1, 16), (1024, 1024, 8192, 128, 128, True, False, True): (2, 64, 2, 4), (1024, 1024, 8192, 256, 256, False, True, True): (1, 32, 1, 32), (1024, 1024, 8192, 256, 256, True, False, True): (1, 32, 1, 32), (1024, 1024, 16384, 32, 32, False, True, True): (1, 256, 1, 8), (1024, 1024, 16384, 32, 32, True, False, True): (1, 128, 1, 4), (1024, 1024, 16384, 64, 64, False, True, True): (1, 256, 1, 8), (1024, 1024, 16384, 64, 64, True, False, True): (4, 64, 3, 4), (1024, 1024, 16384, 128, 128, False, True, True): (4, 128, 1, 16), (1024, 1024, 16384, 128, 128, True, False, True): (1, 128, 3, 4), (1024, 1024, 16384, 256, 256, False, True, True): (1, 64, 1, 32), (1024, 1024, 16384, 256, 256, True, False, True): (1, 64, 1, 32), (1024, 1024, 32768, 32, 32, False, True, True): (1, 512, 1, 8), (1024, 1024, 32768, 32, 32, True, False, True): (1, 256, 3, 2), (1024, 1024, 32768, 64, 64, False, True, True): (1, 256, 1, 4), (1024, 1024, 32768, 64, 64, True, False, True): (4, 128, 3, 4), (1024, 1024, 32768, 128, 128, False, True, True): (4, 256, 1, 16), (1024, 1024, 32768, 128, 128, True, False, True): (2, 256, 3, 4), (1024, 1024, 32768, 256, 256, False, True, True): (1, 128, 1, 32), (1024, 1024, 32768, 256, 256, True, False, True): (2, 128, 1, 32), (1024, 1024, 65536, 32, 32, False, True, True): (1, 1024, 1, 8), (1024, 1024, 65536, 32, 32, True, False, True): (1, 512, 3, 2), (1024, 1024, 65536, 64, 64, False, True, True): (1, 512, 1, 4), (1024, 1024, 65536, 64, 64, True, False, True): (2, 256, 3, 4), (1024, 1024, 65536, 128, 128, False, True, True): (4, 512, 1, 16), (1024, 1024, 65536, 128, 128, True, False, True): (4, 512, 3, 4), (1024, 1024, 65536, 256, 256, False, True, True): (1, 256, 1, 32), (1024, 1024, 65536, 256, 256, True, False, True): (1, 256, 1, 32), (1024, 1024, 65792, 32, 32, False, True, True): (1, 1028, 1, 8), (1024, 1024, 65792, 32, 32, True, False, True): (1, 514, 3, 2), (1024, 1024, 65792, 64, 64, False, True, True): (2, 514, 1, 4), (1024, 1024, 65792, 64, 64, True, False, True): (4, 257, 3, 4), (1024, 1024, 65792, 128, 128, False, True, True): (2, 514, 1, 16), (1024, 1024, 65792, 128, 128, True, False, True): (2, 514, 2, 4), (1024, 1024, 65792, 256, 256, False, True, True): (1, 257, 1, 32), (1024, 1024, 65792, 256, 256, True, False, True): (1, 257, 1, 32), (1024, 1024, 131072, 32, 32, False, True, True): (1, 2048, 1, 8), (1024, 1024, 131072, 32, 32, True, False, True): (1, 1024, 3, 2), (1024, 1024, 131072, 64, 64, False, True, True): (2, 1024, 1, 4), (1024, 1024, 131072, 64, 64, True, False, True): (2, 512, 3, 4), (1024, 1024, 131072, 128, 128, False, True, True): (4, 1024, 1, 16), (1024, 1024, 131072, 128, 128, True, False, True): (1, 1024, 3, 4), (1024, 1024, 131072, 256, 256, False, True, True): (1, 512, 1, 32), (1024, 1024, 131072, 256, 256, True, False, True): (1, 512, 1, 32), (1280, 5120, 65792, 32, 32, False, True, True): (1, 1028, 1, 8), (1280, 5120, 65792, 32, 32, True, False, True): (1, 514, 3, 2), (1280, 5120, 65792, 64, 64, False, True, True): (1, 1028, 1, 8), (1280, 5120, 65792, 64, 64, True, False, True): (2, 257, 3, 4), (1280, 5120, 65792, 128, 128, False, True, True): (2, 514, 1, 16), (1280, 5120, 65792, 128, 128, True, False, True): (1, 514, 3, 4), (1280, 5120, 65792, 256, 256, False, True, True): (1, 257, 1, 32), (1280, 5120, 65792, 256, 256, True, False, True): (1, 257, 1, 32), (1536, 1536, 256, 32, 32, False, True, True): (1, 8, 1, 4), (1536, 1536, 256, 32, 32, True, False, True): (2, 8, 1, 8), (1536, 1536, 256, 64, 64, False, True, True): (4, 4, 1, 16), (1536, 1536, 256, 64, 64, True, False, True): (1, 4, 4, 4), (1536, 1536, 256, 128, 128, False, True, True): (2, 2, 1, 16), (1536, 1536, 256, 128, 128, True, False, True): (2, 2, 3, 4), (1536, 1536, 256, 256, 256, False, True, True): (1, 1, 1, 32), (1536, 1536, 256, 256, 256, True, False, True): (1, 1, 1, 32), (1536, 1536, 512, 32, 32, False, True, True): (1, 8, 1, 8), (1536, 1536, 512, 32, 32, True, False, True): (3, 4, 4, 4), (1536, 1536, 512, 64, 64, False, True, True): (3, 8, 1, 16), (1536, 1536, 512, 64, 64, True, False, True): (1, 4, 3, 4), (1536, 1536, 512, 128, 128, False, True, True): (1, 4, 1, 16), (1536, 1536, 512, 128, 128, True, False, True): (2, 4, 4, 4), (1536, 1536, 512, 256, 256, False, True, True): (1, 2, 1, 32), (1536, 1536, 512, 256, 256, True, False, True): (1, 2, 1, 32), (1536, 1536, 1024, 32, 32, False, True, True): (4, 16, 1, 8), (1536, 1536, 1024, 32, 32, True, False, True): (2, 8, 1, 4), (1536, 1536, 1024, 64, 64, False, True, True): (2, 16, 1, 16), (1536, 1536, 1024, 64, 64, True, False, True): (2, 4, 3, 4), (1536, 1536, 1024, 128, 128, False, True, True): (3, 8, 1, 32), (1536, 1536, 1024, 128, 128, True, False, True): (4, 8, 3, 4), (1536, 1536, 1024, 256, 256, False, True, True): (1, 4, 1, 32), (1536, 1536, 1024, 256, 256, True, False, True): (1, 4, 1, 32), (1536, 1536, 2048, 32, 32, False, True, True): (1, 32, 1, 8), (1536, 1536, 2048, 32, 32, True, False, True): (1, 16, 1, 4), (1536, 1536, 2048, 64, 64, False, True, True): (1, 32, 1, 8), (1536, 1536, 2048, 64, 64, True, False, True): (1, 16, 2, 2), (1536, 1536, 2048, 128, 128, False, True, True): (2, 16, 1, 16), (1536, 1536, 2048, 128, 128, True, False, True): (4, 16, 2, 4), (1536, 1536, 2048, 256, 256, False, True, True): (1, 8, 1, 32), (1536, 1536, 2048, 256, 256, True, False, True): (1, 8, 1, 32), (1536, 1536, 4096, 32, 32, False, True, True): (1, 64, 1, 8), (1536, 1536, 4096, 32, 32, True, False, True): (1, 32, 1, 4), (1536, 1536, 4096, 64, 64, False, True, True): (3, 64, 1, 8), (1536, 1536, 4096, 64, 64, True, False, True): (1, 32, 3, 2), (1536, 1536, 4096, 128, 128, False, True, True): (1, 32, 1, 8), (1536, 1536, 4096, 128, 128, True, False, True): (2, 32, 2, 4), (1536, 1536, 4096, 256, 256, False, True, True): (1, 16, 1, 32), (1536, 1536, 4096, 256, 256, True, False, True): (2, 16, 1, 32), (1536, 1536, 8192, 32, 32, False, True, True): (1, 128, 1, 8), (1536, 1536, 8192, 32, 32, True, False, True): (1, 64, 1, 4), (1536, 1536, 8192, 64, 64, False, True, True): (3, 128, 1, 8), (1536, 1536, 8192, 64, 64, True, False, True): (1, 64, 3, 2), (1536, 1536, 8192, 128, 128, False, True, True): (1, 64, 1, 8), (1536, 1536, 8192, 128, 128, True, False, True): (1, 64, 2, 4), (1536, 1536, 8192, 256, 256, False, True, True): (1, 32, 1, 32), (1536, 1536, 8192, 256, 256, True, False, True): (2, 32, 1, 32), (1536, 1536, 16384, 32, 32, False, True, True): (1, 256, 1, 8), (1536, 1536, 16384, 32, 32, True, False, True): (1, 128, 3, 2), (1536, 1536, 16384, 64, 64, False, True, True): (2, 128, 1, 4), (1536, 1536, 16384, 64, 64, True, False, True): (2, 64, 3, 4), (1536, 1536, 16384, 128, 128, False, True, True): (1, 128, 1, 8), (1536, 1536, 16384, 128, 128, True, False, True): (2, 128, 2, 4), (1536, 1536, 16384, 256, 256, False, True, True): (1, 64, 1, 32), (1536, 1536, 16384, 256, 256, True, False, True): (2, 64, 1, 32), (1536, 1536, 32768, 32, 32, False, True, True): (1, 512, 1, 8), (1536, 1536, 32768, 32, 32, True, False, True): (1, 256, 3, 2), (1536, 1536, 32768, 64, 64, False, True, True): (1, 256, 1, 4), (1536, 1536, 32768, 64, 64, True, False, True): (3, 128, 3, 4), (1536, 1536, 32768, 128, 128, False, True, True): (1, 256, 1, 8), (1536, 1536, 32768, 128, 128, True, False, True): (1, 256, 2, 4), (1536, 1536, 32768, 256, 256, False, True, True): (1, 128, 1, 32), (1536, 1536, 32768, 256, 256, True, False, True): (2, 128, 1, 32), (1536, 1536, 65536, 32, 32, False, True, True): (1, 1024, 1, 8), (1536, 1536, 65536, 32, 32, True, False, True): (1, 512, 3, 2), (1536, 1536, 65536, 64, 64, False, True, True): (1, 512, 1, 4), (1536, 1536, 65536, 64, 64, True, False, True): (1, 512, 3, 2), (1536, 1536, 65536, 128, 128, False, True, True): (1, 512, 1, 8), (1536, 1536, 65536, 128, 128, True, False, True): (1, 512, 3, 4), (1536, 1536, 65536, 256, 256, False, True, True): (1, 256, 1, 32), (1536, 1536, 65536, 256, 256, True, False, True): (2, 256, 1, 32), (1536, 1536, 131072, 32, 32, False, True, True): (1, 2048, 1, 8), (1536, 1536, 131072, 32, 32, True, False, True): (1, 1024, 3, 2), (1536, 1536, 131072, 64, 64, False, True, True): (3, 1024, 1, 4), (1536, 1536, 131072, 64, 64, True, False, True): (3, 512, 3, 4), (1536, 1536, 131072, 128, 128, False, True, True): (1, 1024, 1, 8), (1536, 1536, 131072, 128, 128, True, False, True): (1, 1024, 3, 4), (1536, 1536, 131072, 256, 256, False, True, True): (1, 512, 1, 32), (1536, 1536, 131072, 256, 256, True, False, True): (2, 512, 1, 32), (2048, 2048, 256, 32, 32, False, True, True): (3, 8, 1, 4), (2048, 2048, 256, 32, 32, True, False, True): (1, 4, 4, 2), (2048, 2048, 256, 64, 64, False, True, True): (2, 4, 1, 16), (2048, 2048, 256, 64, 64, True, False, True): (1, 2, 3, 4), (2048, 2048, 256, 128, 128, False, True, True): (1, 2, 1, 8), (2048, 2048, 256, 128, 128, True, False, True): (1, 2, 4, 4), (2048, 2048, 256, 256, 256, False, True, True): (1, 1, 1, 32), (2048, 2048, 256, 256, 256, True, False, True): (1, 1, 1, 32), (2048, 2048, 512, 32, 32, False, True, True): (3, 8, 1, 8), (2048, 2048, 512, 32, 32, True, False, True): (4, 4, 3, 2), (2048, 2048, 512, 64, 64, False, True, True): (1, 8, 1, 8), (2048, 2048, 512, 64, 64, True, False, True): (1, 8, 3, 4), (2048, 2048, 512, 128, 128, False, True, True): (1, 4, 1, 8), (2048, 2048, 512, 128, 128, True, False, True): (1, 4, 4, 4), (2048, 2048, 512, 256, 256, False, True, True): (1, 2, 1, 32), (2048, 2048, 512, 256, 256, True, False, True): (2, 2, 1, 32), (2048, 2048, 1024, 32, 32, False, True, True): (1, 16, 1, 8), (2048, 2048, 1024, 32, 32, True, False, True): (3, 8, 1, 4), (2048, 2048, 1024, 64, 64, False, True, True): (4, 16, 1, 8), (2048, 2048, 1024, 64, 64, True, False, True): (1, 8, 3, 2), (2048, 2048, 1024, 128, 128, False, True, True): (4, 8, 1, 16), (2048, 2048, 1024, 128, 128, True, False, True): (2, 8, 2, 4), (2048, 2048, 1024, 256, 256, False, True, True): (1, 4, 1, 32), (2048, 2048, 1024, 256, 256, True, False, True): (3, 4, 1, 32), (2048, 2048, 2048, 32, 32, False, True, True): (1, 32, 1, 8), (2048, 2048, 2048, 32, 32, True, False, True): (1, 16, 1, 4), (2048, 2048, 2048, 64, 64, False, True, True): (1, 32, 1, 8), (2048, 2048, 2048, 64, 64, True, False, True): (1, 16, 3, 2), (2048, 2048, 2048, 128, 128, False, True, True): (4, 16, 1, 16), (2048, 2048, 2048, 128, 128, True, False, True): (2, 16, 2, 4), (2048, 2048, 2048, 256, 256, False, True, True): (1, 8, 1, 32), (2048, 2048, 2048, 256, 256, True, False, True): (1, 8, 1, 32), (2048, 2048, 4096, 32, 32, False, True, True): (1, 64, 1, 8), (2048, 2048, 4096, 32, 32, True, False, True): (1, 32, 1, 4), (2048, 2048, 4096, 64, 64, False, True, True): (4, 64, 1, 8), (2048, 2048, 4096, 64, 64, True, False, True): (2, 16, 3, 4), (2048, 2048, 4096, 128, 128, False, True, True): (4, 32, 1, 8), (2048, 2048, 4096, 128, 128, True, False, True): (1, 32, 2, 4), (2048, 2048, 4096, 256, 256, False, True, True): (1, 16, 1, 32), (2048, 2048, 4096, 256, 256, True, False, True): (4, 16, 1, 32), (2048, 2048, 8192, 32, 32, False, True, True): (1, 128, 1, 8), (2048, 2048, 8192, 32, 32, True, False, True): (1, 64, 1, 4), (2048, 2048, 8192, 64, 64, False, True, True): (2, 64, 1, 4), (2048, 2048, 8192, 64, 64, True, False, True): (2, 32, 3, 4), (2048, 2048, 8192, 128, 128, False, True, True): (4, 64, 1, 8), (2048, 2048, 8192, 128, 128, True, False, True): (2, 64, 2, 4), (2048, 2048, 8192, 256, 256, False, True, True): (1, 32, 1, 32), (2048, 2048, 8192, 256, 256, True, False, True): (4, 32, 1, 32), (2048, 2048, 16384, 32, 32, False, True, True): (1, 256, 1, 8), (2048, 2048, 16384, 32, 32, True, False, True): (1, 128, 3, 2), (2048, 2048, 16384, 64, 64, False, True, True): (2, 128, 1, 4), (2048, 2048, 16384, 64, 64, True, False, True): (2, 64, 3, 4), (2048, 2048, 16384, 128, 128, False, True, True): (1, 128, 1, 8), (2048, 2048, 16384, 128, 128, True, False, True): (2, 128, 2, 4), (2048, 2048, 16384, 256, 256, False, True, True): (1, 64, 1, 32), (2048, 2048, 16384, 256, 256, True, False, True): (4, 64, 1, 32), (2048, 2048, 32768, 32, 32, False, True, True): (1, 512, 1, 8), (2048, 2048, 32768, 32, 32, True, False, True): (1, 256, 3, 2), (2048, 2048, 32768, 64, 64, False, True, True): (2, 256, 1, 4), (2048, 2048, 32768, 64, 64, True, False, True): (2, 128, 3, 4), (2048, 2048, 32768, 128, 128, False, True, True): (1, 256, 1, 8), (2048, 2048, 32768, 128, 128, True, False, True): (2, 256, 2, 4), (2048, 2048, 32768, 256, 256, False, True, True): (1, 128, 1, 32), (2048, 2048, 32768, 256, 256, True, False, True): (4, 128, 1, 32), (2048, 2048, 65536, 32, 32, False, True, True): (1, 1024, 1, 8), (2048, 2048, 65536, 32, 32, True, False, True): (1, 512, 3, 2), (2048, 2048, 65536, 64, 64, False, True, True): (1, 512, 1, 4), (2048, 2048, 65536, 64, 64, True, False, True): (2, 256, 3, 4), (2048, 2048, 65536, 128, 128, False, True, True): (1, 512, 1, 8), (2048, 2048, 65536, 128, 128, True, False, True): (1, 512, 2, 4), (2048, 2048, 65536, 256, 256, False, True, True): (1, 256, 1, 32), (2048, 2048, 65536, 256, 256, True, False, True): (4, 256, 1, 32), (2048, 2048, 65792, 32, 32, False, True, True): (1, 1028, 1, 8), (2048, 2048, 65792, 32, 32, True, False, True): (1, 514, 3, 2), (2048, 2048, 65792, 64, 64, False, True, True): (1, 514, 1, 4), (2048, 2048, 65792, 64, 64, True, False, True): (2, 257, 3, 4), (2048, 2048, 65792, 128, 128, False, True, True): (1, 514, 1, 8), (2048, 2048, 65792, 128, 128, True, False, True): (1, 514, 2, 4), (2048, 2048, 65792, 256, 256, False, True, True): (1, 257, 1, 32), (2048, 2048, 65792, 256, 256, True, False, True): (1, 257, 1, 32), (2048, 2048, 131072, 32, 32, False, True, True): (1, 2048, 1, 8), (2048, 2048, 131072, 32, 32, True, False, True): (1, 1024, 3, 2), (2048, 2048, 131072, 64, 64, False, True, True): (1, 1024, 1, 4), (2048, 2048, 131072, 64, 64, True, False, True): (2, 512, 3, 4), (2048, 2048, 131072, 128, 128, False, True, True): (1, 1024, 1, 8), (2048, 2048, 131072, 128, 128, True, False, True): (1, 1024, 3, 4), (2048, 2048, 131072, 256, 256, False, True, True): (1, 512, 1, 32), (2048, 2048, 131072, 256, 256, True, False, True): (4, 512, 1, 32), (3072, 768, 256, 32, 32, False, True, True): (5, 4, 1, 8), (3072, 768, 256, 32, 32, True, False, True): (2, 2, 4, 4), (3072, 768, 256, 64, 64, False, True, True): (1, 4, 1, 16), (3072, 768, 256, 64, 64, True, False, True): (2, 2, 3, 4), (3072, 768, 256, 128, 128, False, True, True): (5, 2, 1, 16), (3072, 768, 256, 128, 128, True, False, True): (1, 2, 5, 4), (3072, 768, 256, 256, 256, False, True, True): (1, 1, 1, 32), (3072, 768, 256, 256, 256, True, False, True): (1, 1, 1, 32), (3072, 768, 512, 32, 32, False, True, True): (1, 8, 1, 8), (3072, 768, 512, 32, 32, True, False, True): (5, 4, 1, 4), (3072, 768, 512, 64, 64, False, True, True): (1, 8, 1, 8), (3072, 768, 512, 64, 64, True, False, True): (3, 2, 3, 4), (3072, 768, 512, 128, 128, False, True, True): (3, 4, 1, 32), (3072, 768, 512, 128, 128, True, False, True): (2, 4, 3, 4), (3072, 768, 512, 256, 256, False, True, True): (1, 2, 1, 32), (3072, 768, 512, 256, 256, True, False, True): (2, 2, 1, 32), (3072, 768, 1024, 32, 32, False, True, True): (2, 16, 1, 8), (3072, 768, 1024, 32, 32, True, False, True): (3, 8, 1, 4), (3072, 768, 1024, 64, 64, False, True, True): (4, 16, 1, 8), (3072, 768, 1024, 64, 64, True, False, True): (1, 8, 3, 2), (3072, 768, 1024, 128, 128, False, True, True): (2, 8, 1, 32), (3072, 768, 1024, 128, 128, True, False, True): (3, 8, 2, 4), (3072, 768, 1024, 256, 256, False, True, True): (1, 4, 1, 32), (3072, 768, 1024, 256, 256, True, False, True): (4, 4, 1, 32), (3072, 768, 2048, 32, 32, False, True, True): (1, 32, 1, 8), (3072, 768, 2048, 32, 32, True, False, True): (1, 16, 1, 4), (3072, 768, 2048, 64, 64, False, True, True): (2, 32, 1, 8), (3072, 768, 2048, 64, 64, True, False, True): (2, 8, 3, 4), (3072, 768, 2048, 128, 128, False, True, True): (2, 16, 1, 16), (3072, 768, 2048, 128, 128, True, False, True): (2, 16, 1, 4), (3072, 768, 2048, 256, 256, False, True, True): (1, 8, 1, 32), (3072, 768, 2048, 256, 256, True, False, True): (2, 8, 1, 32), (3072, 768, 4096, 32, 32, False, True, True): (1, 64, 1, 8), (3072, 768, 4096, 32, 32, True, False, True): (1, 32, 1, 2), (3072, 768, 4096, 64, 64, False, True, True): (2, 64, 1, 8), (3072, 768, 4096, 64, 64, True, False, True): (2, 32, 2, 2), (3072, 768, 4096, 128, 128, False, True, True): (1, 32, 1, 8), (3072, 768, 4096, 128, 128, True, False, True): (2, 32, 2, 4), (3072, 768, 4096, 256, 256, False, True, True): (1, 16, 1, 32), (3072, 768, 4096, 256, 256, True, False, True): (4, 16, 1, 32), (3072, 768, 8192, 32, 32, False, True, True): (1, 128, 1, 8), (3072, 768, 8192, 32, 32, True, False, True): (3, 64, 1, 2), (3072, 768, 8192, 64, 64, False, True, True): (1, 128, 1, 8), (3072, 768, 8192, 64, 64, True, False, True): (2, 64, 2, 2), (3072, 768, 8192, 128, 128, False, True, True): (1, 64, 1, 8), (3072, 768, 8192, 128, 128, True, False, True): (2, 64, 2, 4), (3072, 768, 8192, 256, 256, False, True, True): (1, 32, 1, 32), (3072, 768, 8192, 256, 256, True, False, True): (4, 32, 1, 32), (3072, 768, 16384, 32, 32, False, True, True): (1, 256, 1, 8), (3072, 768, 16384, 32, 32, True, False, True): (1, 128, 1, 2), (3072, 768, 16384, 64, 64, False, True, True): (2, 128, 1, 4), (3072, 768, 16384, 64, 64, True, False, True): (1, 128, 2, 2), (3072, 768, 16384, 128, 128, False, True, True): (1, 128, 1, 8), (3072, 768, 16384, 128, 128, True, False, True): (1, 128, 1, 4), (3072, 768, 16384, 256, 256, False, True, True): (1, 64, 1, 32), (3072, 768, 16384, 256, 256, True, False, True): (4, 64, 1, 32), (3072, 768, 32768, 32, 32, False, True, True): (1, 512, 1, 8), (3072, 768, 32768, 32, 32, True, False, True): (1, 256, 1, 2), (3072, 768, 32768, 64, 64, False, True, True): (1, 256, 1, 4), (3072, 768, 32768, 64, 64, True, False, True): (2, 256, 2, 2), (3072, 768, 32768, 128, 128, False, True, True): (1, 256, 1, 8), (3072, 768, 32768, 128, 128, True, False, True): (2, 256, 1, 4), (3072, 768, 32768, 256, 256, False, True, True): (1, 128, 1, 32), (3072, 768, 32768, 256, 256, True, False, True): (4, 128, 1, 32), (3072, 768, 50432, 32, 32, False, True, True): (1, 788, 1, 8), (3072, 768, 50432, 32, 32, True, False, True): (1, 394, 1, 2), (3072, 768, 50432, 64, 64, False, True, True): (2, 394, 1, 4), (3072, 768, 50432, 64, 64, True, False, True): (2, 394, 2, 2), (3072, 768, 50432, 128, 128, False, True, True): (1, 394, 1, 8), (3072, 768, 50432, 128, 128, True, False, True): (2, 394, 1, 4), (3072, 768, 50432, 256, 256, False, True, True): (1, 197, 1, 32), (3072, 768, 50432, 256, 256, True, False, True): (1, 197, 1, 32), (3072, 768, 65536, 32, 32, False, True, True): (1, 1024, 1, 8), (3072, 768, 65536, 32, 32, True, False, True): (1, 512, 1, 2), (3072, 768, 65536, 64, 64, False, True, True): (1, 512, 1, 4), (3072, 768, 65536, 64, 64, True, False, True): (2, 512, 2, 2), (3072, 768, 65536, 128, 128, False, True, True): (1, 512, 1, 8), (3072, 768, 65536, 128, 128, True, False, True): (2, 512, 1, 4), (3072, 768, 65536, 256, 256, False, True, True): (1, 256, 1, 32), (3072, 768, 65536, 256, 256, True, False, True): (4, 256, 1, 32), (3072, 768, 131072, 32, 32, False, True, True): (1, 2048, 1, 8), (3072, 768, 131072, 32, 32, True, False, True): (1, 1024, 1, 2), (3072, 768, 131072, 64, 64, False, True, True): (2, 1024, 1, 4), (3072, 768, 131072, 64, 64, True, False, True): (2, 1024, 2, 2), (3072, 768, 131072, 128, 128, False, True, True): (1, 1024, 1, 8), (3072, 768, 131072, 128, 128, True, False, True): (2, 1024, 1, 4), (3072, 768, 131072, 256, 256, False, True, True): (1, 512, 1, 32), (3072, 768, 131072, 256, 256, True, False, True): (4, 512, 1, 32), (3072, 3072, 256, 32, 32, False, True, True): (1, 4, 1, 8), (3072, 3072, 256, 32, 32, True, False, True): (2, 2, 5, 4), (3072, 3072, 256, 64, 64, False, True, True): (2, 4, 1, 16), (3072, 3072, 256, 64, 64, True, False, True): (3, 2, 3, 4), (3072, 3072, 256, 128, 128, False, True, True): (1, 2, 1, 8), (3072, 3072, 256, 128, 128, True, False, True): (1, 2, 5, 4), (3072, 3072, 256, 256, 256, False, True, True): (1, 1, 1, 32), (3072, 3072, 256, 256, 256, True, False, True): (1, 1, 1, 32), (3072, 3072, 512, 32, 32, False, True, True): (1, 8, 1, 8), (3072, 3072, 512, 32, 32, True, False, True): (3, 2, 3, 4), (3072, 3072, 512, 64, 64, False, True, True): (1, 8, 1, 8), (3072, 3072, 512, 64, 64, True, False, True): (3, 2, 3, 4), (3072, 3072, 512, 128, 128, False, True, True): (2, 4, 1, 8), (3072, 3072, 512, 128, 128, True, False, True): (2, 4, 4, 4), (3072, 3072, 512, 256, 256, False, True, True): (1, 2, 1, 32), (3072, 3072, 512, 256, 256, True, False, True): (1, 2, 1, 32), (3072, 3072, 1024, 32, 32, False, True, True): (1, 16, 1, 8), (3072, 3072, 1024, 32, 32, True, False, True): (3, 8, 3, 4), (3072, 3072, 1024, 64, 64, False, True, True): (2, 16, 1, 8), (3072, 3072, 1024, 64, 64, True, False, True): (2, 4, 3, 4), (3072, 3072, 1024, 128, 128, False, True, True): (1, 8, 1, 8), (3072, 3072, 1024, 128, 128, True, False, True): (3, 8, 2, 4), (3072, 3072, 1024, 256, 256, False, True, True): (1, 4, 1, 32), (3072, 3072, 1024, 256, 256, True, False, True): (3, 4, 1, 32), (3072, 3072, 2048, 32, 32, False, True, True): (1, 32, 1, 8), (3072, 3072, 2048, 32, 32, True, False, True): (1, 16, 1, 4), (3072, 3072, 2048, 64, 64, False, True, True): (1, 32, 1, 8), (3072, 3072, 2048, 64, 64, True, False, True): (1, 16, 3, 2), (3072, 3072, 2048, 128, 128, False, True, True): (1, 16, 1, 8), (3072, 3072, 2048, 128, 128, True, False, True): (2, 16, 2, 4), (3072, 3072, 2048, 256, 256, False, True, True): (1, 8, 1, 32), (3072, 3072, 2048, 256, 256, True, False, True): (3, 8, 1, 32), (3072, 3072, 4096, 32, 32, False, True, True): (1, 64, 1, 8), (3072, 3072, 4096, 32, 32, True, False, True): (1, 32, 1, 4), (3072, 3072, 4096, 64, 64, False, True, True): (1, 64, 1, 8), (3072, 3072, 4096, 64, 64, True, False, True): (3, 16, 3, 4), (3072, 3072, 4096, 128, 128, False, True, True): (1, 32, 1, 8), (3072, 3072, 4096, 128, 128, True, False, True): (2, 32, 2, 4), (3072, 3072, 4096, 256, 256, False, True, True): (1, 16, 1, 32), (3072, 3072, 4096, 256, 256, True, False, True): (2, 16, 1, 32), (3072, 3072, 8192, 32, 32, False, True, True): (1, 128, 1, 8), (3072, 3072, 8192, 32, 32, True, False, True): (1, 64, 1, 2), (3072, 3072, 8192, 64, 64, False, True, True): (1, 64, 1, 4), (3072, 3072, 8192, 64, 64, True, False, True): (1, 64, 3, 2), (3072, 3072, 8192, 128, 128, False, True, True): (1, 64, 1, 8), (3072, 3072, 8192, 128, 128, True, False, True): (2, 64, 2, 4), (3072, 3072, 8192, 256, 256, False, True, True): (1, 32, 1, 32), (3072, 3072, 8192, 256, 256, True, False, True): (4, 32, 1, 32), (3072, 3072, 16384, 32, 32, False, True, True): (1, 256, 1, 8), (3072, 3072, 16384, 32, 32, True, False, True): (1, 128, 3, 2), (3072, 3072, 16384, 64, 64, False, True, True): (1, 128, 1, 4), (3072, 3072, 16384, 64, 64, True, False, True): (2, 64, 3, 4), (3072, 3072, 16384, 128, 128, False, True, True): (1, 128, 1, 8), (3072, 3072, 16384, 128, 128, True, False, True): (1, 128, 2, 4), (3072, 3072, 16384, 256, 256, False, True, True): (1, 64, 1, 32), (3072, 3072, 16384, 256, 256, True, False, True): (4, 64, 1, 32), (3072, 3072, 32768, 32, 32, False, True, True): (1, 512, 1, 8), (3072, 3072, 32768, 32, 32, True, False, True): (1, 256, 3, 2), (3072, 3072, 32768, 64, 64, False, True, True): (1, 256, 1, 4), (3072, 3072, 32768, 64, 64, True, False, True): (1, 256, 3, 2), (3072, 3072, 32768, 128, 128, False, True, True): (1, 256, 1, 8), (3072, 3072, 32768, 128, 128, True, False, True): (1, 256, 2, 4), (3072, 3072, 32768, 256, 256, False, True, True): (1, 128, 1, 32), (3072, 3072, 32768, 256, 256, True, False, True): (4, 128, 1, 32), (3072, 3072, 65536, 32, 32, False, True, True): (1, 1024, 1, 8), (3072, 3072, 65536, 32, 32, True, False, True): (1, 512, 3, 2), (3072, 3072, 65536, 64, 64, False, True, True): (1, 512, 1, 4), (3072, 3072, 65536, 64, 64, True, False, True): (2, 256, 3, 4), (3072, 3072, 65536, 128, 128, False, True, True): (1, 512, 1, 8), (3072, 3072, 65536, 128, 128, True, False, True): (1, 512, 3, 4), (3072, 3072, 65536, 256, 256, False, True, True): (1, 256, 1, 32), (3072, 3072, 65536, 256, 256, True, False, True): (4, 256, 1, 32), (3072, 3072, 131072, 32, 32, False, True, True): (1, 2048, 1, 8), (3072, 3072, 131072, 32, 32, True, False, True): (1, 1024, 3, 2), (3072, 3072, 131072, 64, 64, False, True, True): (1, 1024, 1, 4), (3072, 3072, 131072, 64, 64, True, False, True): (1, 1024, 3, 2), (3072, 3072, 131072, 128, 128, False, True, True): (1, 1024, 1, 8), (3072, 3072, 131072, 128, 128, True, False, True): (1, 1024, 3, 4), (3072, 3072, 131072, 256, 256, False, True, True): (1, 512, 1, 32), (3072, 3072, 131072, 256, 256, True, False, True): (4, 512, 1, 32), (4096, 4096, 256, 32, 32, False, True, True): (1, 4, 1, 8), (4096, 4096, 256, 32, 32, True, False, True): (5, 2, 3, 4), (4096, 4096, 256, 64, 64, False, True, True): (3, 4, 1, 8), (4096, 4096, 256, 64, 64, True, False, True): (3, 4, 3, 2), (4096, 4096, 256, 128, 128, False, True, True): (1, 2, 1, 8), (4096, 4096, 256, 128, 128, True, False, True): (2, 2, 4, 4), (4096, 4096, 256, 256, 256, False, True, True): (1, 1, 1, 32), (4096, 4096, 256, 256, 256, True, False, True): (1, 1, 1, 32), (4096, 4096, 512, 32, 32, False, True, True): (1, 8, 1, 8), (4096, 4096, 512, 32, 32, True, False, True): (1, 4, 1, 4), (4096, 4096, 512, 64, 64, False, True, True): (1, 8, 1, 8), (4096, 4096, 512, 64, 64, True, False, True): (3, 4, 2, 2), (4096, 4096, 512, 128, 128, False, True, True): (2, 4, 1, 8), (4096, 4096, 512, 128, 128, True, False, True): (2, 4, 2, 4), (4096, 4096, 512, 256, 256, False, True, True): (2, 2, 1, 32), (4096, 4096, 512, 256, 256, True, False, True): (2, 2, 1, 32), (4096, 4096, 1024, 32, 32, False, True, True): (4, 16, 1, 8), (4096, 4096, 1024, 32, 32, True, False, True): (1, 8, 1, 4), (4096, 4096, 1024, 64, 64, False, True, True): (1, 16, 1, 8), (4096, 4096, 1024, 64, 64, True, False, True): (4, 4, 3, 4), (4096, 4096, 1024, 128, 128, False, True, True): (2, 8, 1, 8), (4096, 4096, 1024, 128, 128, True, False, True): (1, 8, 3, 4), (4096, 4096, 1024, 256, 256, False, True, True): (1, 4, 1, 32), (4096, 4096, 1024, 256, 256, True, False, True): (6, 4, 1, 32), (4096, 4096, 2048, 32, 32, False, True, True): (1, 32, 1, 8), (4096, 4096, 2048, 32, 32, True, False, True): (1, 16, 1, 4), (4096, 4096, 2048, 64, 64, False, True, True): (4, 32, 1, 8), (4096, 4096, 2048, 64, 64, True, False, True): (4, 8, 3, 4), (4096, 4096, 2048, 128, 128, False, True, True): (2, 16, 1, 8), (4096, 4096, 2048, 128, 128, True, False, True): (1, 16, 3, 4), (4096, 4096, 2048, 256, 256, False, True, True): (1, 8, 1, 32), (4096, 4096, 2048, 256, 256, True, False, True): (4, 8, 1, 32), (4096, 4096, 4096, 32, 32, False, True, True): (1, 64, 1, 8), (4096, 4096, 4096, 32, 32, True, False, True): (1, 32, 1, 4), (4096, 4096, 4096, 64, 64, False, True, True): (1, 64, 1, 8), (4096, 4096, 4096, 64, 64, True, False, True): (1, 32, 3, 2), (4096, 4096, 4096, 128, 128, False, True, True): (1, 32, 1, 8), (4096, 4096, 4096, 128, 128, True, False, True): (2, 32, 3, 4), (4096, 4096, 4096, 256, 256, False, True, True): (1, 16, 1, 32), (4096, 4096, 4096, 256, 256, True, False, True): (4, 16, 1, 32), (4096, 4096, 8192, 32, 32, False, True, True): (1, 128, 1, 8), (4096, 4096, 8192, 32, 32, True, False, True): (1, 64, 1, 4), (4096, 4096, 8192, 64, 64, False, True, True): (1, 128, 1, 8), (4096, 4096, 8192, 64, 64, True, False, True): (1, 64, 3, 2), (4096, 4096, 8192, 128, 128, False, True, True): (1, 64, 1, 8), (4096, 4096, 8192, 128, 128, True, False, True): (1, 64, 3, 4), (4096, 4096, 8192, 256, 256, False, True, True): (1, 32, 1, 32), (4096, 4096, 8192, 256, 256, True, False, True): (4, 32, 1, 32), (4096, 4096, 16384, 32, 32, False, True, True): (1, 256, 1, 8), (4096, 4096, 16384, 32, 32, True, False, True): (1, 128, 3, 2), (4096, 4096, 16384, 64, 64, False, True, True): (1, 128, 1, 4), (4096, 4096, 16384, 64, 64, True, False, True): (4, 64, 3, 4), (4096, 4096, 16384, 128, 128, False, True, True): (1, 128, 1, 8), (4096, 4096, 16384, 128, 128, True, False, True): (1, 128, 3, 4), (4096, 4096, 16384, 256, 256, False, True, True): (1, 64, 1, 32), (4096, 4096, 16384, 256, 256, True, False, True): (4, 64, 1, 32), (4096, 4096, 32768, 32, 32, False, True, True): (1, 512, 1, 8), (4096, 4096, 32768, 32, 32, True, False, True): (1, 256, 3, 2), (4096, 4096, 32768, 64, 64, False, True, True): (1, 256, 1, 4), (4096, 4096, 32768, 64, 64, True, False, True): (1, 256, 3, 2), (4096, 4096, 32768, 128, 128, False, True, True): (1, 256, 1, 8), (4096, 4096, 32768, 128, 128, True, False, True): (1, 256, 3, 4), (4096, 4096, 32768, 256, 256, False, True, True): (1, 128, 1, 32), (4096, 4096, 32768, 256, 256, True, False, True): (4, 128, 1, 32), (4096, 4096, 65536, 32, 32, False, True, True): (1, 1024, 1, 8), (4096, 4096, 65536, 32, 32, True, False, True): (1, 512, 3, 2), (4096, 4096, 65536, 64, 64, False, True, True): (1, 512, 1, 4), (4096, 4096, 65536, 64, 64, True, False, True): (4, 256, 3, 4), (4096, 4096, 65536, 128, 128, False, True, True): (1, 512, 1, 8), (4096, 4096, 65536, 128, 128, True, False, True): (1, 512, 3, 4), (4096, 4096, 65536, 256, 256, False, True, True): (1, 256, 1, 32), (4096, 4096, 65536, 256, 256, True, False, True): (4, 256, 1, 32), (4096, 4096, 65792, 32, 32, False, True, True): (1, 1028, 1, 8), (4096, 4096, 65792, 32, 32, True, False, True): (1, 514, 3, 2), (4096, 4096, 65792, 64, 64, False, True, True): (1, 1028, 1, 8), (4096, 4096, 65792, 64, 64, True, False, True): (1, 514, 3, 2), (4096, 4096, 65792, 128, 128, False, True, True): (1, 514, 1, 8), (4096, 4096, 65792, 128, 128, True, False, True): (1, 514, 2, 4), (4096, 4096, 65792, 256, 256, False, True, True): (1, 257, 1, 32), (4096, 4096, 65792, 256, 256, True, False, True): (1, 257, 1, 32), (4096, 4096, 131072, 32, 32, False, True, True): (1, 2048, 1, 8), (4096, 4096, 131072, 32, 32, True, False, True): (1, 1024, 3, 2), (4096, 4096, 131072, 64, 64, False, True, True): (1, 2048, 1, 8), (4096, 4096, 131072, 64, 64, True, False, True): (1, 1024, 3, 2), (4096, 4096, 131072, 128, 128, False, True, True): (1, 1024, 1, 8), (4096, 4096, 131072, 128, 128, True, False, True): (1, 1024, 3, 4), (4096, 4096, 131072, 256, 256, False, True, True): (1, 512, 1, 32), (4096, 4096, 131072, 256, 256, True, False, True): (4, 512, 1, 32), (5120, 1280, 65792, 32, 32, False, True, True): (1, 1028, 1, 8), (5120, 1280, 65792, 32, 32, True, False, True): (1, 514, 1, 2), (5120, 1280, 65792, 64, 64, False, True, True): (1, 514, 1, 4), (5120, 1280, 65792, 64, 64, True, False, True): (1, 514, 2, 2), (5120, 1280, 65792, 128, 128, False, True, True): (1, 514, 1, 8), (5120, 1280, 65792, 128, 128, True, False, True): (1, 514, 2, 4), (5120, 1280, 65792, 256, 256, False, True, True): (1, 257, 1, 32), (5120, 1280, 65792, 256, 256, True, False, True): (1, 257, 1, 32), (6144, 6144, 256, 32, 32, False, True, True): (2, 4, 1, 8), (6144, 6144, 256, 32, 32, True, False, True): (2, 1, 4, 4), (6144, 6144, 256, 64, 64, False, True, True): (1, 4, 1, 8), (6144, 6144, 256, 64, 64, True, False, True): (5, 1, 3, 4), (6144, 6144, 256, 128, 128, False, True, True): (1, 2, 1, 8), (6144, 6144, 256, 128, 128, True, False, True): (1, 2, 3, 4), (6144, 6144, 256, 256, 256, False, True, True): (1, 1, 1, 32), (6144, 6144, 256, 256, 256, True, False, True): (1, 1, 1, 32), (6144, 6144, 512, 32, 32, False, True, True): (1, 8, 1, 8), (6144, 6144, 512, 32, 32, True, False, True): (1, 4, 4, 2), (6144, 6144, 512, 64, 64, False, True, True): (2, 8, 1, 8), (6144, 6144, 512, 64, 64, True, False, True): (2, 2, 3, 4), (6144, 6144, 512, 128, 128, False, True, True): (3, 4, 1, 8), (6144, 6144, 512, 128, 128, True, False, True): (2, 4, 3, 4), (6144, 6144, 512, 256, 256, False, True, True): (1, 2, 1, 32), (6144, 6144, 512, 256, 256, True, False, True): (2, 2, 1, 32), (6144, 6144, 1024, 32, 32, False, True, True): (1, 16, 1, 8), (6144, 6144, 1024, 32, 32, True, False, True): (1, 8, 1, 4), (6144, 6144, 1024, 64, 64, False, True, True): (1, 16, 1, 8), (6144, 6144, 1024, 64, 64, True, False, True): (4, 4, 3, 4), (6144, 6144, 1024, 128, 128, False, True, True): (1, 8, 1, 8), (6144, 6144, 1024, 128, 128, True, False, True): (3, 8, 3, 4), (6144, 6144, 1024, 256, 256, False, True, True): (1, 4, 1, 32), (6144, 6144, 1024, 256, 256, True, False, True): (1, 4, 1, 32), (6144, 6144, 2048, 32, 32, False, True, True): (1, 32, 1, 8), (6144, 6144, 2048, 32, 32, True, False, True): (1, 16, 1, 4), (6144, 6144, 2048, 64, 64, False, True, True): (1, 32, 1, 8), (6144, 6144, 2048, 64, 64, True, False, True): (4, 8, 3, 4), (6144, 6144, 2048, 128, 128, False, True, True): (1, 16, 1, 8), (6144, 6144, 2048, 128, 128, True, False, True): (3, 16, 3, 4), (6144, 6144, 2048, 256, 256, False, True, True): (1, 8, 1, 32), (6144, 6144, 2048, 256, 256, True, False, True): (4, 8, 1, 32), (6144, 6144, 4096, 32, 32, False, True, True): (1, 64, 1, 8), (6144, 6144, 4096, 32, 32, True, False, True): (1, 32, 1, 4), (6144, 6144, 4096, 64, 64, False, True, True): (1, 64, 1, 8), (6144, 6144, 4096, 64, 64, True, False, True): (4, 16, 3, 4), (6144, 6144, 4096, 128, 128, False, True, True): (1, 32, 1, 8), (6144, 6144, 4096, 128, 128, True, False, True): (4, 32, 3, 4), (6144, 6144, 4096, 256, 256, False, True, True): (1, 16, 1, 32), (6144, 6144, 4096, 256, 256, True, False, True): (4, 16, 1, 32), (6144, 6144, 8192, 32, 32, False, True, True): (1, 128, 1, 8), (6144, 6144, 8192, 32, 32, True, False, True): (1, 64, 1, 4), (6144, 6144, 8192, 64, 64, False, True, True): (1, 128, 1, 8), (6144, 6144, 8192, 64, 64, True, False, True): (4, 32, 3, 4), (6144, 6144, 8192, 128, 128, False, True, True): (1, 64, 1, 8), (6144, 6144, 8192, 128, 128, True, False, True): (1, 64, 3, 4), (6144, 6144, 8192, 256, 256, False, True, True): (1, 32, 1, 32), (6144, 6144, 8192, 256, 256, True, False, True): (4, 32, 1, 32), (6144, 6144, 16384, 32, 32, False, True, True): (1, 256, 1, 8), (6144, 6144, 16384, 32, 32, True, False, True): (1, 128, 1, 4), (6144, 6144, 16384, 64, 64, False, True, True): (1, 256, 1, 8), (6144, 6144, 16384, 64, 64, True, False, True): (4, 64, 3, 4), (6144, 6144, 16384, 128, 128, False, True, True): (1, 128, 1, 8), (6144, 6144, 16384, 128, 128, True, False, True): (4, 128, 3, 4), (6144, 6144, 16384, 256, 256, False, True, True): (1, 64, 1, 32), (6144, 6144, 16384, 256, 256, True, False, True): (4, 64, 1, 32), (6144, 6144, 32768, 32, 32, False, True, True): (1, 512, 1, 8), (6144, 6144, 32768, 32, 32, True, False, True): (1, 256, 1, 4), (6144, 6144, 32768, 64, 64, False, True, True): (1, 512, 1, 8), (6144, 6144, 32768, 64, 64, True, False, True): (4, 128, 3, 4), (6144, 6144, 32768, 128, 128, False, True, True): (1, 256, 1, 8), (6144, 6144, 32768, 128, 128, True, False, True): (1, 256, 3, 4), (6144, 6144, 32768, 256, 256, False, True, True): (1, 128, 1, 32), (6144, 6144, 32768, 256, 256, True, False, True): (4, 128, 1, 32), (6144, 6144, 65536, 32, 32, False, True, True): (1, 1024, 1, 8), (6144, 6144, 65536, 32, 32, True, False, True): (1, 512, 1, 4), (6144, 6144, 65536, 64, 64, False, True, True): (1, 1024, 1, 8), (6144, 6144, 65536, 64, 64, True, False, True): (4, 256, 3, 4), (6144, 6144, 65536, 128, 128, False, True, True): (1, 512, 1, 8), (6144, 6144, 65536, 128, 128, True, False, True): (1, 512, 3, 4), (6144, 6144, 65536, 256, 256, False, True, True): (1, 256, 1, 32), (6144, 6144, 65536, 256, 256, True, False, True): (4, 256, 1, 32), (6144, 6144, 131072, 32, 32, False, True, True): (1, 2048, 1, 8), (6144, 6144, 131072, 32, 32, True, False, True): (1, 1024, 1, 4), (6144, 6144, 131072, 64, 64, False, True, True): (1, 2048, 1, 8), (6144, 6144, 131072, 64, 64, True, False, True): (4, 512, 3, 4), (6144, 6144, 131072, 128, 128, False, True, True): (1, 1024, 1, 8), (6144, 6144, 131072, 128, 128, True, False, True): (1, 1024, 3, 4), (6144, 6144, 131072, 256, 256, False, True, True): (1, 512, 1, 32), (6144, 6144, 131072, 256, 256, True, False, True): (4, 512, 1, 32), (8192, 8192, 256, 32, 32, False, True, True): (1, 4, 1, 8), (8192, 8192, 256, 32, 32, True, False, True): (3, 2, 3, 4), (8192, 8192, 256, 64, 64, False, True, True): (1, 4, 1, 4), (8192, 8192, 256, 64, 64, True, False, True): (1, 4, 1, 4), (8192, 8192, 256, 128, 128, False, True, True): (1, 2, 1, 8), (8192, 8192, 256, 128, 128, True, False, True): (2, 2, 3, 4), (8192, 8192, 256, 256, 256, False, True, True): (1, 1, 1, 32), (8192, 8192, 256, 256, 256, True, False, True): (1, 1, 1, 32), (8192, 8192, 512, 32, 32, False, True, True): (4, 8, 1, 8), (8192, 8192, 512, 32, 32, True, False, True): (2, 4, 4, 2), (8192, 8192, 512, 64, 64, False, True, True): (4, 4, 1, 4), (8192, 8192, 512, 64, 64, True, False, True): (3, 2, 3, 4), (8192, 8192, 512, 128, 128, False, True, True): (1, 4, 1, 8), (8192, 8192, 512, 128, 128, True, False, True): (1, 4, 3, 4), (8192, 8192, 512, 256, 256, False, True, True): (1, 2, 1, 32), (8192, 8192, 512, 256, 256, True, False, True): (1, 2, 1, 32), (8192, 8192, 1024, 32, 32, False, True, True): (4, 16, 1, 8), (8192, 8192, 1024, 32, 32, True, False, True): (1, 8, 3, 2), (8192, 8192, 1024, 64, 64, False, True, True): (4, 8, 1, 4), (8192, 8192, 1024, 64, 64, True, False, True): (4, 4, 3, 4), (8192, 8192, 1024, 128, 128, False, True, True): (1, 8, 1, 8), (8192, 8192, 1024, 128, 128, True, False, True): (1, 8, 3, 4), (8192, 8192, 1024, 256, 256, False, True, True): (1, 4, 1, 32), (8192, 8192, 1024, 256, 256, True, False, True): (4, 4, 1, 32), (8192, 8192, 2048, 32, 32, False, True, True): (4, 32, 1, 8), (8192, 8192, 2048, 32, 32, True, False, True): (1, 16, 3, 2), (8192, 8192, 2048, 64, 64, False, True, True): (4, 32, 1, 8), (8192, 8192, 2048, 64, 64, True, False, True): (4, 8, 3, 4), (8192, 8192, 2048, 128, 128, False, True, True): (4, 16, 1, 8), (8192, 8192, 2048, 128, 128, True, False, True): (4, 16, 3, 4), (8192, 8192, 2048, 256, 256, False, True, True): (1, 8, 1, 32), (8192, 8192, 2048, 256, 256, True, False, True): (4, 8, 1, 32), (8192, 8192, 4096, 32, 32, False, True, True): (4, 64, 1, 8), (8192, 8192, 4096, 32, 32, True, False, True): (2, 32, 3, 2), (8192, 8192, 4096, 64, 64, False, True, True): (4, 64, 1, 8), (8192, 8192, 4096, 64, 64, True, False, True): (4, 16, 3, 4), (8192, 8192, 4096, 128, 128, False, True, True): (4, 32, 1, 8), (8192, 8192, 4096, 128, 128, True, False, True): (4, 32, 3, 4), (8192, 8192, 4096, 256, 256, False, True, True): (1, 16, 1, 32), (8192, 8192, 4096, 256, 256, True, False, True): (2, 16, 1, 32), (8192, 8192, 8192, 32, 32, False, True, True): (4, 128, 1, 8), (8192, 8192, 8192, 32, 32, True, False, True): (1, 64, 3, 2), (8192, 8192, 8192, 64, 64, False, True, True): (4, 64, 1, 4), (8192, 8192, 8192, 64, 64, True, False, True): (4, 32, 3, 4), (8192, 8192, 8192, 128, 128, False, True, True): (4, 64, 1, 16), (8192, 8192, 8192, 128, 128, True, False, True): (4, 64, 3, 4), (8192, 8192, 8192, 256, 256, False, True, True): (1, 32, 1, 32), (8192, 8192, 8192, 256, 256, True, False, True): (4, 32, 1, 32), (8192, 8192, 16384, 32, 32, False, True, True): (4, 256, 1, 8), (8192, 8192, 16384, 32, 32, True, False, True): (4, 128, 3, 2), (8192, 8192, 16384, 64, 64, False, True, True): (4, 128, 1, 4), (8192, 8192, 16384, 64, 64, True, False, True): (4, 64, 3, 4), (8192, 8192, 16384, 128, 128, False, True, True): (4, 128, 1, 16), (8192, 8192, 16384, 128, 128, True, False, True): (4, 128, 3, 4), (8192, 8192, 16384, 256, 256, False, True, True): (1, 64, 1, 32), (8192, 8192, 16384, 256, 256, True, False, True): (4, 64, 1, 32), (8192, 8192, 32768, 32, 32, False, True, True): (4, 512, 1, 8), (8192, 8192, 32768, 32, 32, True, False, True): (2, 256, 3, 2), (8192, 8192, 32768, 64, 64, False, True, True): (4, 256, 1, 4), (8192, 8192, 32768, 64, 64, True, False, True): (4, 128, 3, 4), (8192, 8192, 32768, 128, 128, False, True, True): (4, 256, 1, 16), (8192, 8192, 32768, 128, 128, True, False, True): (4, 256, 3, 4), (8192, 8192, 32768, 256, 256, False, True, True): (1, 128, 1, 32), (8192, 8192, 32768, 256, 256, True, False, True): (4, 128, 1, 32), (8192, 8192, 65536, 32, 32, False, True, True): (4, 1024, 1, 8), (8192, 8192, 65536, 32, 32, True, False, True): (4, 512, 3, 2), (8192, 8192, 65536, 64, 64, False, True, True): (4, 512, 1, 4), (8192, 8192, 65536, 64, 64, True, False, True): (4, 256, 3, 4), (8192, 8192, 65536, 128, 128, False, True, True): (4, 512, 1, 16), (8192, 8192, 65536, 128, 128, True, False, True): (4, 512, 3, 4), (8192, 8192, 65536, 256, 256, False, True, True): (1, 256, 1, 32), (8192, 8192, 65536, 256, 256, True, False, True): (4, 256, 1, 32), (8192, 8192, 65792, 32, 32, False, True, True): (4, 1028, 1, 8), (8192, 8192, 65792, 32, 32, True, False, True): (1, 514, 3, 2), (8192, 8192, 65792, 64, 64, False, True, True): (4, 1028, 1, 8), (8192, 8192, 65792, 64, 64, True, False, True): (2, 257, 3, 4), (8192, 8192, 65792, 128, 128, False, True, True): (4, 514, 1, 16), (8192, 8192, 65792, 128, 128, True, False, True): (2, 514, 3, 4), (8192, 8192, 65792, 256, 256, False, True, True): (1, 257, 1, 32), (8192, 8192, 65792, 256, 256, True, False, True): (1, 257, 1, 32), (8192, 8192, 131072, 32, 32, False, True, True): (4, 2048, 1, 8), (8192, 8192, 131072, 32, 32, True, False, True): (4, 1024, 3, 2), (8192, 8192, 131072, 64, 64, False, True, True): (4, 1024, 1, 4), (8192, 8192, 131072, 64, 64, True, False, True): (4, 512, 3, 4), (8192, 8192, 131072, 128, 128, False, True, True): (4, 1024, 1, 16), (8192, 8192, 131072, 128, 128, True, False, True): (4, 1024, 3, 4), (8192, 8192, 131072, 256, 256, False, True, True): (1, 512, 1, 32), (8192, 8192, 131072, 256, 256, True, False, True): (4, 512, 1, 32), (16384, 16384, 256, 32, 32, False, True, True): (4, 4, 1, 8), (16384, 16384, 256, 32, 32, True, False, True): (2, 2, 4, 2), (16384, 16384, 256, 64, 64, False, True, True): (2, 2, 1, 4), (16384, 16384, 256, 64, 64, True, False, True): (5, 1, 3, 4), (16384, 16384, 256, 128, 128, False, True, True): (6, 2, 1, 8), (16384, 16384, 256, 128, 128, True, False, True): (6, 2, 3, 4), (16384, 16384, 256, 256, 256, False, True, True): (1, 1, 1, 32), (16384, 16384, 256, 256, 256, True, False, True): (1, 1, 1, 32), (16384, 16384, 512, 32, 32, False, True, True): (4, 8, 1, 8), (16384, 16384, 512, 32, 32, True, False, True): (1, 4, 4, 2), (16384, 16384, 512, 64, 64, False, True, True): (4, 4, 1, 4), (16384, 16384, 512, 64, 64, True, False, True): (2, 2, 3, 4), (16384, 16384, 512, 128, 128, False, True, True): (4, 4, 1, 8), (16384, 16384, 512, 128, 128, True, False, True): (4, 4, 3, 4), (16384, 16384, 512, 256, 256, False, True, True): (1, 2, 1, 32), (16384, 16384, 512, 256, 256, True, False, True): (2, 2, 1, 32), (16384, 16384, 1024, 32, 32, False, True, True): (4, 16, 1, 8), (16384, 16384, 1024, 32, 32, True, False, True): (1, 8, 3, 2), (16384, 16384, 1024, 64, 64, False, True, True): (4, 8, 1, 4), (16384, 16384, 1024, 64, 64, True, False, True): (4, 4, 3, 4), (16384, 16384, 1024, 128, 128, False, True, True): (4, 4, 1, 8), (16384, 16384, 1024, 128, 128, True, False, True): (4, 8, 3, 4), (16384, 16384, 1024, 256, 256, False, True, True): (1, 4, 1, 32), (16384, 16384, 1024, 256, 256, True, False, True): (4, 4, 1, 32), (16384, 16384, 2048, 32, 32, False, True, True): (4, 32, 1, 8), (16384, 16384, 2048, 32, 32, True, False, True): (2, 16, 3, 2), (16384, 16384, 2048, 64, 64, False, True, True): (4, 16, 1, 4), (16384, 16384, 2048, 64, 64, True, False, True): (4, 8, 3, 4), (16384, 16384, 2048, 128, 128, False, True, True): (4, 16, 1, 8), (16384, 16384, 2048, 128, 128, True, False, True): (4, 16, 3, 4), (16384, 16384, 2048, 256, 256, False, True, True): (1, 8, 1, 32), (16384, 16384, 2048, 256, 256, True, False, True): (4, 8, 1, 32), (16384, 16384, 4096, 32, 32, False, True, True): (4, 64, 1, 8), (16384, 16384, 4096, 32, 32, True, False, True): (2, 32, 3, 2), (16384, 16384, 4096, 64, 64, False, True, True): (2, 32, 1, 4), (16384, 16384, 4096, 64, 64, True, False, True): (4, 16, 3, 4), (16384, 16384, 4096, 128, 128, False, True, True): (4, 32, 1, 8), (16384, 16384, 4096, 128, 128, True, False, True): (4, 32, 3, 4), (16384, 16384, 4096, 256, 256, False, True, True): (1, 16, 1, 32), (16384, 16384, 4096, 256, 256, True, False, True): (4, 16, 1, 32), (16384, 16384, 8192, 32, 32, False, True, True): (4, 128, 1, 8), (16384, 16384, 8192, 32, 32, True, False, True): (2, 64, 3, 2), (16384, 16384, 8192, 64, 64, False, True, True): (4, 64, 1, 4), (16384, 16384, 8192, 64, 64, True, False, True): (4, 32, 3, 4), (16384, 16384, 8192, 128, 128, False, True, True): (4, 64, 1, 16), (16384, 16384, 8192, 128, 128, True, False, True): (4, 64, 3, 4), (16384, 16384, 8192, 256, 256, False, True, True): (1, 32, 1, 32), (16384, 16384, 8192, 256, 256, True, False, True): (4, 32, 1, 32), (16384, 16384, 16384, 32, 32, False, True, True): (4, 256, 1, 8), (16384, 16384, 16384, 32, 32, True, False, True): (2, 128, 3, 2), (16384, 16384, 16384, 64, 64, False, True, True): (4, 128, 1, 4), (16384, 16384, 16384, 64, 64, True, False, True): (4, 64, 3, 4), (16384, 16384, 16384, 128, 128, False, True, True): (1, 64, 1, 8), (16384, 16384, 16384, 128, 128, True, False, True): (4, 128, 3, 4), (16384, 16384, 16384, 256, 256, False, True, True): (1, 64, 1, 32), (16384, 16384, 16384, 256, 256, True, False, True): (4, 64, 1, 32), (16384, 16384, 32768, 32, 32, False, True, True): (4, 512, 1, 8), (16384, 16384, 32768, 32, 32, True, False, True): (1, 256, 3, 2), (16384, 16384, 32768, 64, 64, False, True, True): (4, 256, 1, 4), (16384, 16384, 32768, 64, 64, True, False, True): (4, 128, 3, 4), (16384, 16384, 32768, 128, 128, False, True, True): (4, 256, 1, 16), (16384, 16384, 32768, 128, 128, True, False, True): (4, 256, 3, 4), (16384, 16384, 32768, 256, 256, False, True, True): (1, 128, 1, 32), (16384, 16384, 32768, 256, 256, True, False, True): (4, 128, 1, 32), (16384, 16384, 65536, 32, 32, False, True, True): (4, 1024, 1, 8), (16384, 16384, 65536, 32, 32, True, False, True): (1, 512, 3, 2), (16384, 16384, 65536, 64, 64, False, True, True): (2, 512, 1, 4), (16384, 16384, 65536, 64, 64, True, False, True): (4, 256, 3, 4), (16384, 16384, 65536, 128, 128, False, True, True): (4, 512, 1, 16), (16384, 16384, 65536, 128, 128, True, False, True): (4, 512, 3, 4), (16384, 16384, 65536, 256, 256, False, True, True): (1, 256, 1, 32), (16384, 16384, 65536, 256, 256, True, False, True): (4, 256, 1, 32), (16384, 16384, 65792, 32, 32, False, True, True): (4, 1028, 1, 8), (16384, 16384, 65792, 32, 32, True, False, True): (1, 514, 3, 2), (16384, 16384, 65792, 64, 64, False, True, True): (2, 514, 1, 4), (16384, 16384, 65792, 64, 64, True, False, True): (2, 257, 3, 4), (16384, 16384, 65792, 128, 128, False, True, True): (2, 514, 1, 16), (16384, 16384, 65792, 128, 128, True, False, True): (2, 514, 3, 4), (16384, 16384, 65792, 256, 256, False, True, True): (1, 257, 1, 32), (16384, 16384, 65792, 256, 256, True, False, True): (1, 257, 1, 32), (16384, 16384, 131072, 32, 32, False, True, True): (4, 1024, 1, 8), (16384, 16384, 131072, 32, 32, True, False, True): (4, 512, 3, 4), (16384, 16384, 131072, 64, 64, False, True, True): (4, 1024, 1, 4), (16384, 16384, 131072, 64, 64, True, False, True): (4, 1024, 3, 2), (16384, 16384, 131072, 128, 128, False, True, True): (2, 1024, 3, 8), (16384, 16384, 131072, 128, 128, True, False, True): (4, 1024, 3, 4), (16384, 16384, 131072, 256, 256, False, True, True): (4, 512, 1, 32), (16384, 16384, 131072, 256, 256, True, False, True): (4, 512, 1, 32), (32768, 32768, 256, 32, 32, False, True, True): (4, 4, 1, 8), (32768, 32768, 256, 32, 32, True, False, True): (1, 2, 4, 2), (32768, 32768, 256, 64, 64, False, True, True): (2, 2, 1, 4), (32768, 32768, 256, 64, 64, True, False, True): (2, 1, 3, 4), (32768, 32768, 256, 128, 128, False, True, True): (4, 2, 1, 8), (32768, 32768, 256, 128, 128, True, False, True): (4, 2, 3, 4), (32768, 32768, 256, 256, 256, False, True, True): (1, 1, 1, 32), (32768, 32768, 256, 256, 256, True, False, True): (1, 1, 1, 32), (32768, 32768, 512, 32, 32, False, True, True): (4, 8, 1, 8), (32768, 32768, 512, 32, 32, True, False, True): (1, 4, 3, 2), (32768, 32768, 512, 64, 64, False, True, True): (4, 4, 1, 4), (32768, 32768, 512, 64, 64, True, False, True): (4, 2, 3, 4), (32768, 32768, 512, 128, 128, False, True, True): (1, 2, 1, 8), (32768, 32768, 512, 128, 128, True, False, True): (4, 4, 3, 4), (32768, 32768, 512, 256, 256, False, True, True): (1, 2, 1, 32), (32768, 32768, 512, 256, 256, True, False, True): (2, 2, 1, 32), (32768, 32768, 1024, 32, 32, False, True, True): (4, 16, 1, 8), (32768, 32768, 1024, 32, 32, True, False, True): (1, 8, 4, 2), (32768, 32768, 1024, 64, 64, False, True, True): (4, 8, 1, 4), (32768, 32768, 1024, 64, 64, True, False, True): (4, 4, 3, 4), (32768, 32768, 1024, 128, 128, False, True, True): (1, 4, 1, 8), (32768, 32768, 1024, 128, 128, True, False, True): (4, 8, 3, 4), (32768, 32768, 1024, 256, 256, False, True, True): (1, 4, 1, 32), (32768, 32768, 1024, 256, 256, True, False, True): (1, 4, 1, 32), (32768, 32768, 2048, 32, 32, False, True, True): (2, 32, 1, 8), (32768, 32768, 2048, 32, 32, True, False, True): (1, 16, 4, 2), (32768, 32768, 2048, 64, 64, False, True, True): (2, 16, 1, 4), (32768, 32768, 2048, 64, 64, True, False, True): (4, 8, 3, 4), (32768, 32768, 2048, 128, 128, False, True, True): (1, 8, 1, 8), (32768, 32768, 2048, 128, 128, True, False, True): (4, 16, 3, 4), (32768, 32768, 2048, 256, 256, False, True, True): (1, 8, 1, 32), (32768, 32768, 2048, 256, 256, True, False, True): (4, 8, 1, 32), (32768, 32768, 4096, 32, 32, False, True, True): (2, 64, 1, 8), (32768, 32768, 4096, 32, 32, True, False, True): (2, 32, 3, 2), (32768, 32768, 4096, 64, 64, False, True, True): (2, 32, 1, 4), (32768, 32768, 4096, 64, 64, True, False, True): (2, 16, 3, 4), (32768, 32768, 4096, 128, 128, False, True, True): (1, 16, 1, 8), (32768, 32768, 4096, 128, 128, True, False, True): (2, 32, 3, 4), (32768, 32768, 4096, 256, 256, False, True, True): (1, 16, 1, 32), (32768, 32768, 4096, 256, 256, True, False, True): (4, 16, 1, 32), (32768, 32768, 8192, 32, 32, False, True, True): (2, 128, 1, 8), (32768, 32768, 8192, 32, 32, True, False, True): (2, 64, 3, 2), (32768, 32768, 8192, 64, 64, False, True, True): (2, 64, 1, 4), (32768, 32768, 8192, 64, 64, True, False, True): (2, 32, 3, 4), (32768, 32768, 8192, 128, 128, False, True, True): (1, 32, 1, 8), (32768, 32768, 8192, 128, 128, True, False, True): (4, 64, 3, 4), (32768, 32768, 8192, 256, 256, False, True, True): (1, 32, 1, 32), (32768, 32768, 8192, 256, 256, True, False, True): (4, 32, 1, 32), (32768, 32768, 16384, 32, 32, False, True, True): (2, 256, 1, 8), (32768, 32768, 16384, 32, 32, True, False, True): (2, 128, 4, 2), (32768, 32768, 16384, 64, 64, False, True, True): (2, 128, 1, 4), (32768, 32768, 16384, 64, 64, True, False, True): (4, 64, 3, 4), (32768, 32768, 16384, 128, 128, False, True, True): (1, 64, 1, 8), (32768, 32768, 16384, 128, 128, True, False, True): (4, 128, 3, 4), (32768, 32768, 16384, 256, 256, False, True, True): (1, 64, 1, 32), (32768, 32768, 16384, 256, 256, True, False, True): (2, 64, 1, 32), (32768, 32768, 32768, 32, 32, False, True, True): (2, 512, 1, 8), (32768, 32768, 32768, 32, 32, True, False, True): (4, 256, 3, 2), (32768, 32768, 32768, 64, 64, False, True, True): (1, 256, 1, 4), (32768, 32768, 32768, 64, 64, True, False, True): (2, 128, 3, 4), (32768, 32768, 32768, 128, 128, False, True, True): (1, 128, 1, 8), (32768, 32768, 32768, 128, 128, True, False, True): (2, 256, 3, 4), (32768, 32768, 32768, 256, 256, False, True, True): (1, 128, 1, 32), (32768, 32768, 32768, 256, 256, True, False, True): (1, 128, 1, 32), (32768, 32768, 65536, 32, 32, False, True, True): (2, 512, 1, 8), (32768, 32768, 65536, 32, 32, True, False, True): (3, 512, 4, 2), (32768, 32768, 65536, 64, 64, False, True, True): (1, 512, 1, 4), (32768, 32768, 65536, 64, 64, True, False, True): (2, 512, 3, 2), (32768, 32768, 65536, 128, 128, False, True, True): (1, 256, 1, 8), (32768, 32768, 65536, 128, 128, True, False, True): (2, 512, 3, 4), (32768, 32768, 65536, 256, 256, False, True, True): (1, 256, 1, 32), (32768, 32768, 65536, 256, 256, True, False, True): (1, 256, 1, 32), }, ("_int_bsr_dense_addmm", "NVIDIA A100-SXM4-80GB", (0, torch.int8, 0.56)): { (192, 192, 256, 64, 64, False, True, True): (3, 4, 3, 32), (192, 192, 256, 64, 64, True, False, True): (1, 4, 3, 4), (192, 192, 512, 64, 64, False, True, True): (1, 8, 1, 16), (192, 192, 512, 64, 64, True, False, True): (1, 8, 5, 4), (192, 192, 1024, 64, 64, False, True, True): (4, 16, 1, 16), (192, 192, 1024, 64, 64, True, False, True): (3, 16, 3, 4), (192, 192, 2048, 64, 64, False, True, True): (5, 32, 1, 8), (192, 192, 2048, 64, 64, True, False, True): (2, 32, 4, 4), (192, 192, 4096, 64, 64, False, True, True): (4, 64, 1, 16), (192, 192, 4096, 64, 64, True, False, True): (1, 32, 4, 4), (192, 192, 8192, 64, 64, False, True, True): (2, 128, 1, 8), (192, 192, 8192, 64, 64, True, False, True): (3, 64, 1, 4), (192, 192, 16384, 64, 64, False, True, True): (2, 256, 1, 8), (192, 192, 16384, 64, 64, True, False, True): (1, 128, 3, 2), (192, 192, 32768, 64, 64, False, True, True): (2, 512, 1, 8), (192, 192, 32768, 64, 64, True, False, True): (3, 128, 1, 4), (192, 192, 65536, 64, 64, False, True, True): (3, 1024, 1, 8), (192, 192, 65536, 64, 64, True, False, True): (1, 512, 3, 4), (192, 192, 131072, 64, 64, False, True, True): (1, 2048, 1, 8), (192, 192, 131072, 64, 64, True, False, True): (1, 512, 1, 4), (384, 384, 256, 128, 128, False, True, True): (4, 2, 1, 16), (384, 384, 256, 128, 128, True, False, True): (1, 2, 3, 4), (384, 384, 512, 128, 128, False, True, True): (2, 4, 1, 16), (384, 384, 512, 128, 128, True, False, True): (2, 4, 3, 4), (384, 384, 1024, 128, 128, False, True, True): (3, 8, 1, 32), (384, 384, 1024, 128, 128, True, False, True): (3, 8, 3, 4), (384, 384, 2048, 128, 128, False, True, True): (3, 16, 1, 32), (384, 384, 2048, 128, 128, True, False, True): (2, 16, 3, 4), (384, 384, 4096, 128, 128, False, True, True): (3, 32, 1, 32), (384, 384, 4096, 128, 128, True, False, True): (3, 32, 3, 4), (384, 384, 8192, 128, 128, False, True, True): (2, 64, 1, 32), (384, 384, 8192, 128, 128, True, False, True): (4, 64, 1, 4), (384, 384, 16384, 128, 128, False, True, True): (2, 128, 1, 32), (384, 384, 16384, 128, 128, True, False, True): (2, 128, 1, 4), (384, 384, 32768, 128, 128, False, True, True): (3, 256, 1, 16), (384, 384, 32768, 128, 128, True, False, True): (1, 256, 1, 4), (384, 384, 65536, 128, 128, False, True, True): (4, 512, 1, 16), (384, 384, 65536, 128, 128, True, False, True): (1, 512, 1, 4), (384, 384, 131072, 128, 128, False, True, True): (4, 1024, 1, 16), (384, 384, 131072, 128, 128, True, False, True): (1, 1024, 1, 4), (768, 768, 256, 256, 256, False, True, True): (1, 1, 1, 32), (768, 768, 256, 256, 256, True, False, True): (3, 1, 1, 32), (768, 768, 512, 256, 256, False, True, True): (1, 2, 1, 32), (768, 768, 512, 256, 256, True, False, True): (1, 2, 1, 32), (768, 768, 1024, 256, 256, False, True, True): (1, 4, 1, 32), (768, 768, 1024, 256, 256, True, False, True): (2, 4, 1, 32), (768, 768, 2048, 256, 256, False, True, True): (1, 8, 1, 32), (768, 768, 2048, 256, 256, True, False, True): (2, 8, 1, 32), (768, 768, 4096, 256, 256, False, True, True): (1, 16, 1, 32), (768, 768, 4096, 256, 256, True, False, True): (1, 16, 1, 32), (768, 768, 8192, 256, 256, False, True, True): (1, 32, 1, 32), (768, 768, 8192, 256, 256, True, False, True): (2, 32, 1, 32), (768, 768, 16384, 256, 256, False, True, True): (1, 64, 1, 32), (768, 768, 16384, 256, 256, True, False, True): (7, 64, 1, 32), (768, 768, 32768, 256, 256, False, True, True): (1, 128, 1, 32), (768, 768, 32768, 256, 256, True, False, True): (1, 128, 1, 32), (768, 768, 65536, 256, 256, False, True, True): (1, 256, 1, 32), (768, 768, 65536, 256, 256, True, False, True): (1, 256, 1, 32), (768, 768, 131072, 256, 256, False, True, True): (1, 512, 1, 32), (768, 768, 131072, 256, 256, True, False, True): (1, 512, 1, 32), }, ("_int_bsr_dense_addmm", "NVIDIA A100-SXM4-80GB", (0, torch.int8, 1.0)): { (256, 256, 256, 256, 256, False, True, True): (2, 1, 1, 4), (256, 256, 256, 256, 256, True, False, True): (2, 1, 2, 1), (256, 256, 512, 256, 256, False, True, True): (2, 1, 1, 2), (256, 256, 512, 256, 256, True, False, True): (2, 2, 2, 8), (256, 256, 1024, 256, 256, False, True, True): (1, 4, 1, 4), (256, 256, 1024, 256, 256, True, False, True): (1, 2, 2, 4), (256, 256, 2048, 256, 256, False, True, True): (1, 4, 1, 2), (256, 256, 2048, 256, 256, True, False, True): (1, 8, 1, 2), (256, 256, 4096, 256, 256, False, True, True): (1, 16, 1, 4), (256, 256, 4096, 256, 256, True, False, True): (1, 16, 1, 2), (256, 256, 8192, 256, 256, False, True, True): (1, 16, 3, 4), (256, 256, 8192, 256, 256, True, False, True): (1, 8, 1, 4), (256, 256, 16384, 256, 256, False, True, True): (2, 16, 1, 8), (256, 256, 16384, 256, 256, True, False, True): (1, 32, 1, 2), (256, 256, 32768, 256, 256, False, True, True): (1, 128, 1, 8), (256, 256, 32768, 256, 256, True, False, True): (1, 128, 1, 4), (256, 256, 65536, 256, 256, False, True, True): (1, 4, 1, 1), (256, 256, 65536, 256, 256, True, False, True): (1, 128, 1, 4), (256, 256, 65792, 256, 256, False, True, True): (1, 128, 2, 16), (256, 256, 65792, 256, 256, True, False, True): (1, 16, 3, 4), (256, 256, 131072, 256, 256, False, True, True): (1, 512, 1, 4), (256, 256, 131072, 256, 256, True, False, True): (1, 512, 1, 2), }, ("bsr_dense_addmm", "NVIDIA A100-SXM4-80GB", (0, torch.bfloat16, 0.5)): { (16, 16, 16, 16, 16, False, False, False): (2, 1, 1, 2), (16, 16, 16, 16, 16, False, False, True): (1, 1, 1, 4), (16, 16, 16, 16, 16, False, True, False): (1, 1, 3, 16), (16, 16, 16, 16, 16, False, True, True): (1, 1, 1, 8), (16, 16, 16, 16, 16, True, False, False): (2, 1, 1, 8), (16, 16, 16, 16, 16, True, False, True): (1, 1, 1, 8), (16, 16, 32, 16, 16, False, False, False): (1, 2, 1, 8), (16, 16, 32, 16, 16, False, False, True): (1, 2, 2, 4), (16, 16, 32, 16, 16, False, True, False): (1, 1, 2, 4), (16, 16, 32, 16, 16, False, True, True): (1, 1, 2, 4), (16, 16, 32, 16, 16, True, False, False): (1, 1, 2, 4), (16, 16, 32, 16, 16, True, False, True): (2, 2, 1, 2), (16, 16, 64, 16, 16, False, False, False): (1, 4, 2, 4), (16, 16, 64, 16, 16, False, False, True): (1, 2, 1, 2), (16, 16, 64, 16, 16, False, True, False): (2, 1, 1, 2), (16, 16, 64, 16, 16, False, True, True): (1, 4, 1, 8), (16, 16, 64, 16, 16, True, False, False): (1, 4, 1, 1), (16, 16, 64, 16, 16, True, False, True): (1, 4, 2, 4), (16, 32, 16, 16, 16, False, False, False): (1, 1, 2, 2), (16, 32, 16, 16, 16, False, False, True): (1, 1, 1, 4), (16, 32, 16, 16, 16, False, True, False): (1, 1, 1, 2), (16, 32, 16, 16, 16, False, True, True): (1, 1, 1, 1), (16, 32, 16, 16, 16, True, False, False): (1, 1, 1, 2), (16, 32, 16, 16, 16, True, False, True): (2, 1, 1, 2), (16, 32, 16, 16, 32, False, False, False): (1, 1, 1, 4), (16, 32, 16, 16, 32, False, False, True): (1, 1, 1, 8), (16, 32, 16, 16, 32, False, True, False): (1, 1, 1, 8), (16, 32, 16, 16, 32, False, True, True): (1, 1, 2, 4), (16, 32, 16, 16, 32, True, False, False): (1, 1, 1, 2), (16, 32, 16, 16, 32, True, False, True): (1, 1, 1, 1), (16, 32, 32, 16, 16, False, False, False): (2, 2, 1, 4), (16, 32, 32, 16, 16, False, False, True): (2, 2, 1, 2), (16, 32, 32, 16, 16, False, True, False): (1, 1, 2, 8), (16, 32, 32, 16, 16, False, True, True): (1, 2, 1, 1), (16, 32, 32, 16, 16, True, False, False): (1, 1, 1, 8), (16, 32, 32, 16, 16, True, False, True): (1, 2, 1, 4), (16, 32, 32, 16, 32, False, False, False): (1, 1, 2, 8), (16, 32, 32, 16, 32, False, False, True): (2, 1, 1, 8), (16, 32, 32, 16, 32, False, True, False): (1, 1, 1, 4), (16, 32, 32, 16, 32, False, True, True): (1, 1, 1, 4), (16, 32, 32, 16, 32, True, False, False): (1, 2, 1, 8), (16, 32, 32, 16, 32, True, False, True): (1, 1, 1, 4), (16, 32, 64, 16, 16, False, False, False): (1, 4, 3, 8), (16, 32, 64, 16, 16, False, False, True): (1, 4, 1, 4), (16, 32, 64, 16, 16, False, True, False): (1, 4, 1, 4), (16, 32, 64, 16, 16, False, True, True): (2, 4, 1, 4), (16, 32, 64, 16, 16, True, False, False): (1, 2, 1, 4), (16, 32, 64, 16, 16, True, False, True): (1, 2, 1, 4), (16, 32, 64, 16, 32, False, False, False): (1, 4, 1, 8), (16, 32, 64, 16, 32, False, False, True): (1, 4, 1, 4), (16, 32, 64, 16, 32, False, True, False): (1, 4, 1, 2), (16, 32, 64, 16, 32, False, True, True): (1, 2, 1, 4), (16, 32, 64, 16, 32, True, False, False): (1, 2, 1, 4), (16, 32, 64, 16, 32, True, False, True): (1, 2, 1, 2), (16, 64, 16, 16, 32, False, False, False): (1, 1, 1, 2), (16, 64, 16, 16, 32, False, False, True): (1, 1, 2, 2), (16, 64, 16, 16, 32, False, True, False): (1, 1, 2, 8), (16, 64, 16, 16, 32, False, True, True): (1, 1, 1, 4), (16, 64, 16, 16, 32, True, False, False): (1, 1, 1, 8), (16, 64, 16, 16, 32, True, False, True): (1, 1, 1, 4), (16, 64, 32, 16, 32, False, False, False): (1, 2, 1, 2), (16, 64, 32, 16, 32, False, False, True): (1, 2, 1, 4), (16, 64, 32, 16, 32, False, True, False): (1, 2, 1, 4), (16, 64, 32, 16, 32, False, True, True): (2, 2, 1, 4), (16, 64, 32, 16, 32, True, False, False): (1, 2, 1, 4), (16, 64, 32, 16, 32, True, False, True): (1, 2, 1, 8), (16, 64, 64, 16, 32, False, False, False): (1, 2, 1, 4), (16, 64, 64, 16, 32, False, False, True): (1, 4, 2, 2), (16, 64, 64, 16, 32, False, True, False): (1, 1, 1, 4), (16, 64, 64, 16, 32, False, True, True): (1, 4, 1, 2), (16, 64, 64, 16, 32, True, False, False): (1, 2, 1, 4), (16, 64, 64, 16, 32, True, False, True): (1, 4, 1, 4), (32, 16, 16, 16, 16, False, False, False): (1, 1, 1, 8), (32, 16, 16, 16, 16, False, False, True): (1, 1, 2, 4), (32, 16, 16, 16, 16, False, True, False): (1, 1, 1, 4), (32, 16, 16, 16, 16, False, True, True): (1, 1, 2, 4), (32, 16, 16, 16, 16, True, False, False): (1, 1, 1, 2), (32, 16, 16, 16, 16, True, False, True): (1, 1, 1, 4), (32, 16, 32, 16, 16, False, False, False): (1, 1, 1, 4), (32, 16, 32, 16, 16, False, False, True): (2, 2, 1, 4), (32, 16, 32, 16, 16, False, True, False): (1, 2, 2, 2), (32, 16, 32, 16, 16, False, True, True): (2, 2, 1, 4), (32, 16, 32, 16, 16, True, False, False): (1, 2, 2, 8), (32, 16, 32, 16, 16, True, False, True): (1, 2, 1, 2), (32, 16, 64, 16, 16, False, False, False): (1, 4, 1, 4), (32, 16, 64, 16, 16, False, False, True): (1, 4, 2, 4), (32, 16, 64, 16, 16, False, True, False): (1, 2, 2, 2), (32, 16, 64, 16, 16, False, True, True): (3, 4, 1, 4), (32, 16, 64, 16, 16, True, False, False): (1, 2, 1, 2), (32, 16, 64, 16, 16, True, False, True): (1, 2, 1, 4), (32, 32, 16, 16, 16, False, False, False): (1, 1, 3, 4), (32, 32, 16, 16, 16, False, False, True): (1, 1, 1, 4), (32, 32, 16, 16, 16, False, True, False): (1, 1, 1, 2), (32, 32, 16, 16, 16, False, True, True): (1, 1, 1, 4), (32, 32, 16, 16, 16, True, False, False): (1, 1, 1, 4), (32, 32, 16, 16, 16, True, False, True): (1, 1, 2, 2), (32, 32, 16, 16, 32, False, False, False): (2, 1, 1, 4), (32, 32, 16, 16, 32, False, False, True): (1, 1, 1, 4), (32, 32, 16, 16, 32, False, True, False): (1, 1, 1, 4), (32, 32, 16, 16, 32, False, True, True): (3, 1, 2, 4), (32, 32, 16, 16, 32, True, False, False): (1, 1, 1, 4), (32, 32, 16, 16, 32, True, False, True): (1, 1, 1, 4), (32, 32, 16, 32, 32, False, False, False): (1, 1, 1, 8), (32, 32, 16, 32, 32, False, False, True): (1, 1, 1, 4), (32, 32, 16, 32, 32, False, True, False): (1, 1, 2, 1), (32, 32, 16, 32, 32, False, True, True): (2, 1, 2, 2), (32, 32, 16, 32, 32, True, False, False): (1, 1, 1, 8), (32, 32, 16, 32, 32, True, False, True): (2, 1, 3, 4), (32, 32, 32, 16, 16, False, False, False): (1, 2, 1, 4), (32, 32, 32, 16, 16, False, False, True): (2, 2, 1, 4), (32, 32, 32, 16, 16, False, True, False): (1, 1, 1, 8), (32, 32, 32, 16, 16, False, True, True): (2, 2, 1, 4), (32, 32, 32, 16, 16, True, False, False): (1, 1, 1, 4), (32, 32, 32, 16, 16, True, False, True): (2, 2, 2, 4), (32, 32, 32, 16, 32, False, False, False): (2, 2, 1, 8), (32, 32, 32, 16, 32, False, False, True): (1, 2, 1, 2), (32, 32, 32, 16, 32, False, True, False): (1, 2, 1, 4), (32, 32, 32, 16, 32, False, True, True): (1, 2, 1, 4), (32, 32, 32, 16, 32, True, False, False): (1, 2, 1, 4), (32, 32, 32, 16, 32, True, False, True): (1, 2, 1, 2), (32, 32, 32, 32, 32, False, False, False): (1, 1, 3, 8), (32, 32, 32, 32, 32, False, False, True): (1, 1, 1, 8), (32, 32, 32, 32, 32, False, True, False): (2, 1, 3, 4), (32, 32, 32, 32, 32, False, True, True): (2, 1, 1, 2), (32, 32, 32, 32, 32, True, False, False): (1, 1, 1, 2), (32, 32, 32, 32, 32, True, False, True): (4, 1, 1, 1), (32, 32, 64, 16, 16, False, False, False): (1, 4, 1, 4), (32, 32, 64, 16, 16, False, False, True): (1, 4, 1, 4), (32, 32, 64, 16, 16, False, True, False): (1, 2, 1, 8), (32, 32, 64, 16, 16, False, True, True): (1, 4, 1, 2), (32, 32, 64, 16, 16, True, False, False): (2, 4, 1, 2), (32, 32, 64, 16, 16, True, False, True): (1, 4, 1, 2), (32, 32, 64, 16, 32, False, False, False): (1, 2, 1, 8), (32, 32, 64, 16, 32, False, False, True): (1, 4, 2, 2), (32, 32, 64, 16, 32, False, True, False): (1, 2, 1, 4), (32, 32, 64, 16, 32, False, True, True): (1, 4, 1, 4), (32, 32, 64, 16, 32, True, False, False): (1, 4, 2, 2), (32, 32, 64, 16, 32, True, False, True): (3, 4, 2, 2), (32, 32, 64, 32, 32, False, False, False): (2, 2, 1, 4), (32, 32, 64, 32, 32, False, False, True): (1, 2, 1, 4), (32, 32, 64, 32, 32, False, True, False): (1, 1, 1, 8), (32, 32, 64, 32, 32, False, True, True): (1, 1, 1, 4), (32, 32, 64, 32, 32, True, False, False): (1, 2, 1, 2), (32, 32, 64, 32, 32, True, False, True): (3, 2, 1, 8), (32, 64, 16, 16, 32, False, False, False): (1, 1, 2, 2), (32, 64, 16, 16, 32, False, False, True): (1, 1, 1, 4), (32, 64, 16, 16, 32, False, True, False): (1, 1, 2, 4), (32, 64, 16, 16, 32, False, True, True): (1, 1, 1, 4), (32, 64, 16, 16, 32, True, False, False): (1, 1, 1, 2), (32, 64, 16, 16, 32, True, False, True): (2, 1, 2, 2), (32, 64, 16, 32, 32, False, False, False): (1, 1, 1, 1), (32, 64, 16, 32, 32, False, False, True): (2, 1, 1, 4), (32, 64, 16, 32, 32, False, True, False): (1, 1, 1, 1), (32, 64, 16, 32, 32, False, True, True): (1, 1, 2, 2), (32, 64, 16, 32, 32, True, False, False): (1, 1, 2, 4), (32, 64, 16, 32, 32, True, False, True): (1, 1, 1, 4), (32, 64, 32, 16, 32, False, False, False): (2, 2, 1, 4), (32, 64, 32, 16, 32, False, False, True): (1, 2, 1, 4), (32, 64, 32, 16, 32, False, True, False): (1, 1, 1, 4), (32, 64, 32, 16, 32, False, True, True): (2, 2, 3, 4), (32, 64, 32, 16, 32, True, False, False): (1, 1, 1, 2), (32, 64, 32, 16, 32, True, False, True): (1, 2, 1, 2), (32, 64, 32, 32, 32, False, False, False): (1, 1, 1, 2), (32, 64, 32, 32, 32, False, False, True): (2, 1, 1, 4), (32, 64, 32, 32, 32, False, True, False): (1, 1, 1, 8), (32, 64, 32, 32, 32, False, True, True): (1, 1, 2, 4), (32, 64, 32, 32, 32, True, False, False): (2, 1, 1, 4), (32, 64, 32, 32, 32, True, False, True): (1, 1, 2, 4), (32, 64, 64, 16, 32, False, False, False): (1, 4, 1, 4), (32, 64, 64, 16, 32, False, False, True): (1, 4, 2, 4), (32, 64, 64, 16, 32, False, True, False): (1, 4, 2, 2), (32, 64, 64, 16, 32, False, True, True): (1, 4, 1, 4), (32, 64, 64, 16, 32, True, False, False): (1, 4, 1, 8), (32, 64, 64, 16, 32, True, False, True): (1, 4, 2, 1), (32, 64, 64, 32, 32, False, False, False): (1, 1, 1, 4), (32, 64, 64, 32, 32, False, False, True): (2, 2, 1, 4), (32, 64, 64, 32, 32, False, True, False): (1, 1, 1, 4), (32, 64, 64, 32, 32, False, True, True): (2, 2, 1, 4), (32, 64, 64, 32, 32, True, False, False): (1, 2, 2, 4), (32, 64, 64, 32, 32, True, False, True): (2, 2, 3, 4), (64, 32, 16, 32, 32, False, False, False): (1, 1, 1, 4), (64, 32, 16, 32, 32, False, False, True): (1, 1, 1, 4), (64, 32, 16, 32, 32, False, True, False): (1, 1, 1, 8), (64, 32, 16, 32, 32, False, True, True): (1, 1, 1, 4), (64, 32, 16, 32, 32, True, False, False): (1, 1, 1, 16), (64, 32, 16, 32, 32, True, False, True): (2, 1, 1, 4), (64, 32, 32, 32, 32, False, False, False): (1, 1, 3, 4), (64, 32, 32, 32, 32, False, False, True): (2, 1, 1, 4), (64, 32, 32, 32, 32, False, True, False): (1, 1, 2, 4), (64, 32, 32, 32, 32, False, True, True): (2, 1, 1, 4), (64, 32, 32, 32, 32, True, False, False): (2, 1, 1, 16), (64, 32, 32, 32, 32, True, False, True): (2, 1, 1, 4), (64, 32, 64, 32, 32, False, False, False): (1, 2, 1, 4), (64, 32, 64, 32, 32, False, False, True): (2, 2, 1, 4), (64, 32, 64, 32, 32, False, True, False): (1, 1, 1, 4), (64, 32, 64, 32, 32, False, True, True): (2, 2, 1, 4), (64, 32, 64, 32, 32, True, False, False): (1, 2, 1, 8), (64, 32, 64, 32, 32, True, False, True): (2, 2, 3, 4), (64, 64, 16, 32, 32, False, False, False): (1, 1, 2, 16), (64, 64, 16, 32, 32, False, False, True): (1, 1, 3, 4), (64, 64, 16, 32, 32, False, True, False): (1, 1, 1, 2), (64, 64, 16, 32, 32, False, True, True): (2, 1, 1, 4), (64, 64, 16, 32, 32, True, False, False): (2, 1, 3, 2), (64, 64, 16, 32, 32, True, False, True): (1, 1, 2, 4), (64, 64, 32, 32, 32, False, False, False): (1, 1, 1, 8), (64, 64, 32, 32, 32, False, False, True): (2, 1, 2, 4), (64, 64, 32, 32, 32, False, True, False): (2, 1, 1, 4), (64, 64, 32, 32, 32, False, True, True): (1, 1, 2, 4), (64, 64, 32, 32, 32, True, False, False): (2, 1, 1, 4), (64, 64, 32, 32, 32, True, False, True): (1, 1, 2, 4), (64, 64, 64, 32, 32, False, False, False): (1, 2, 2, 4), (64, 64, 64, 32, 32, False, False, True): (1, 2, 2, 2), (64, 64, 64, 32, 32, False, True, False): (1, 2, 1, 2), (64, 64, 64, 32, 32, False, True, True): (1, 2, 1, 4), (64, 64, 64, 32, 32, True, False, False): (1, 2, 1, 4), (64, 64, 64, 32, 32, True, False, True): (1, 2, 1, 4), (192, 192, 256, 16, 16, False, True, True): (1, 8, 5, 4), (192, 192, 256, 16, 16, True, False, True): (2, 8, 5, 2), (192, 192, 256, 32, 32, False, True, True): (1, 8, 6, 4), (192, 192, 256, 32, 32, True, False, True): (3, 8, 5, 2), (192, 192, 512, 16, 16, False, True, True): (1, 16, 5, 2), (192, 192, 512, 16, 16, True, False, True): (1, 8, 4, 2), (192, 192, 512, 32, 32, False, True, True): (2, 16, 5, 4), (192, 192, 512, 32, 32, True, False, True): (2, 8, 5, 2), (192, 192, 1024, 16, 16, False, True, True): (1, 16, 3, 4), (192, 192, 1024, 16, 16, True, False, True): (1, 16, 6, 2), (192, 192, 1024, 32, 32, False, True, True): (1, 32, 3, 4), (192, 192, 1024, 32, 32, True, False, True): (1, 16, 4, 2), (192, 192, 2048, 16, 16, False, True, True): (1, 32, 1, 4), (192, 192, 2048, 16, 16, True, False, True): (4, 32, 4, 2), (192, 192, 2048, 32, 32, False, True, True): (1, 16, 3, 8), (192, 192, 2048, 32, 32, True, False, True): (2, 32, 4, 2), (192, 192, 4096, 16, 16, False, True, True): (2, 64, 1, 4), (192, 192, 4096, 16, 16, True, False, True): (1, 32, 3, 2), (192, 192, 4096, 32, 32, False, True, True): (1, 64, 1, 8), (192, 192, 4096, 32, 32, True, False, True): (2, 32, 4, 4), (192, 192, 8192, 16, 16, False, True, True): (1, 64, 1, 4), (192, 192, 8192, 16, 16, True, False, True): (2, 32, 3, 1), (192, 192, 8192, 32, 32, False, True, True): (3, 128, 1, 4), (192, 192, 8192, 32, 32, True, False, True): (1, 64, 3, 4), (192, 192, 16384, 16, 16, False, True, True): (1, 128, 1, 4), (192, 192, 16384, 16, 16, True, False, True): (4, 64, 3, 1), (192, 192, 16384, 32, 32, False, True, True): (1, 128, 1, 4), (192, 192, 16384, 32, 32, True, False, True): (1, 64, 3, 4), (192, 192, 32768, 16, 16, False, True, True): (2, 256, 1, 2), (192, 192, 32768, 16, 16, True, False, True): (1, 128, 3, 4), (192, 192, 32768, 32, 32, False, True, True): (2, 256, 1, 4), (192, 192, 32768, 32, 32, True, False, True): (4, 128, 3, 4), (192, 192, 65536, 16, 16, False, True, True): (2, 512, 1, 2), (192, 192, 65536, 16, 16, True, False, True): (2, 256, 3, 2), (192, 192, 65536, 32, 32, False, True, True): (2, 512, 1, 4), (192, 192, 65536, 32, 32, True, False, True): (1, 256, 3, 4), (192, 192, 131072, 16, 16, False, True, True): (4, 1024, 1, 2), (192, 192, 131072, 16, 16, True, False, True): (3, 512, 3, 2), (192, 192, 131072, 32, 32, False, True, True): (1, 1024, 1, 2), (192, 192, 131072, 32, 32, True, False, True): (1, 512, 3, 4), (256, 256, 256, 16, 16, False, True, True): (4, 8, 5, 1), (256, 256, 256, 16, 16, True, False, True): (2, 8, 4, 2), (256, 256, 256, 32, 32, False, True, True): (2, 8, 5, 2), (256, 256, 256, 32, 32, True, False, True): (1, 8, 5, 4), (256, 256, 256, 64, 64, False, True, True): (2, 4, 4, 4), (256, 256, 256, 64, 64, True, False, True): (1, 4, 3, 4), (256, 256, 256, 128, 128, False, True, True): (4, 2, 2, 8), (256, 256, 256, 128, 128, True, False, True): (1, 2, 2, 8), (256, 256, 512, 16, 16, False, True, True): (1, 16, 5, 1), (256, 256, 512, 16, 16, True, False, True): (3, 16, 3, 2), (256, 256, 512, 32, 32, False, True, True): (2, 8, 5, 2), (256, 256, 512, 32, 32, True, False, True): (1, 16, 4, 4), (256, 256, 512, 64, 64, False, True, True): (1, 8, 4, 4), (256, 256, 512, 64, 64, True, False, True): (3, 8, 3, 4), (256, 256, 512, 128, 128, False, True, True): (1, 4, 2, 8), (256, 256, 512, 128, 128, True, False, True): (1, 4, 2, 8), (256, 256, 1024, 16, 16, False, True, True): (1, 16, 5, 4), (256, 256, 1024, 16, 16, True, False, True): (5, 16, 4, 2), (256, 256, 1024, 32, 32, False, True, True): (1, 32, 5, 2), (256, 256, 1024, 32, 32, True, False, True): (2, 16, 5, 2), (256, 256, 1024, 64, 64, False, True, True): (1, 16, 4, 4), (256, 256, 1024, 64, 64, True, False, True): (1, 16, 4, 4), (256, 256, 1024, 128, 128, False, True, True): (1, 8, 2, 8), (256, 256, 1024, 128, 128, True, False, True): (1, 8, 2, 8), (256, 256, 2048, 16, 16, False, True, True): (1, 16, 4, 4), (256, 256, 2048, 16, 16, True, False, True): (2, 32, 5, 1), (256, 256, 2048, 32, 32, False, True, True): (1, 64, 4, 1), (256, 256, 2048, 32, 32, True, False, True): (2, 32, 4, 2), (256, 256, 2048, 64, 64, False, True, True): (8, 16, 5, 4), (256, 256, 2048, 64, 64, True, False, True): (1, 16, 4, 4), (256, 256, 2048, 128, 128, False, True, True): (2, 16, 2, 8), (256, 256, 2048, 128, 128, True, False, True): (1, 16, 2, 8), (256, 256, 4096, 16, 16, False, True, True): (1, 64, 1, 4), (256, 256, 4096, 16, 16, True, False, True): (1, 16, 3, 2), (256, 256, 4096, 32, 32, False, True, True): (6, 32, 3, 2), (256, 256, 4096, 32, 32, True, False, True): (4, 32, 4, 2), (256, 256, 4096, 64, 64, False, True, True): (6, 64, 3, 4), (256, 256, 4096, 64, 64, True, False, True): (2, 64, 3, 4), (256, 256, 4096, 128, 128, False, True, True): (1, 32, 2, 8), (256, 256, 4096, 128, 128, True, False, True): (1, 32, 2, 8), (256, 256, 8192, 16, 16, False, True, True): (2, 32, 3, 4), (256, 256, 8192, 16, 16, True, False, True): (4, 64, 3, 2), (256, 256, 8192, 32, 32, False, True, True): (1, 64, 3, 4), (256, 256, 8192, 32, 32, True, False, True): (3, 128, 1, 2), (256, 256, 8192, 64, 64, False, True, True): (9, 128, 1, 4), (256, 256, 8192, 64, 64, True, False, True): (8, 128, 1, 4), (256, 256, 8192, 128, 128, False, True, True): (7, 64, 1, 4), (256, 256, 8192, 128, 128, True, False, True): (1, 32, 1, 16), (256, 256, 16384, 16, 16, False, True, True): (3, 128, 3, 2), (256, 256, 16384, 16, 16, True, False, True): (5, 64, 3, 2), (256, 256, 16384, 32, 32, False, True, True): (3, 128, 3, 2), (256, 256, 16384, 32, 32, True, False, True): (1, 128, 3, 2), (256, 256, 16384, 64, 64, False, True, True): (3, 128, 1, 4), (256, 256, 16384, 64, 64, True, False, True): (2, 128, 1, 4), (256, 256, 16384, 128, 128, False, True, True): (7, 128, 1, 4), (256, 256, 16384, 128, 128, True, False, True): (1, 128, 2, 8), (256, 256, 32768, 16, 16, False, True, True): (2, 128, 3, 2), (256, 256, 32768, 16, 16, True, False, True): (1, 128, 3, 2), (256, 256, 32768, 32, 32, False, True, True): (1, 256, 3, 4), (256, 256, 32768, 32, 32, True, False, True): (3, 256, 3, 2), (256, 256, 32768, 64, 64, False, True, True): (1, 256, 1, 4), (256, 256, 32768, 64, 64, True, False, True): (3, 256, 1, 4), (256, 256, 32768, 128, 128, False, True, True): (9, 256, 1, 4), (256, 256, 32768, 128, 128, True, False, True): (2, 256, 1, 4), (256, 256, 65536, 16, 16, False, True, True): (1, 256, 3, 2), (256, 256, 65536, 16, 16, True, False, True): (1, 256, 3, 2), (256, 256, 65536, 32, 32, False, True, True): (2, 512, 3, 2), (256, 256, 65536, 32, 32, True, False, True): (2, 512, 3, 2), (256, 256, 65536, 64, 64, False, True, True): (2, 512, 1, 4), (256, 256, 65536, 64, 64, True, False, True): (1, 512, 1, 4), (256, 256, 65536, 128, 128, False, True, True): (7, 512, 1, 4), (256, 256, 65536, 128, 128, True, False, True): (2, 512, 1, 4), (256, 256, 131072, 16, 16, False, True, True): (1, 512, 3, 2), (256, 256, 131072, 16, 16, True, False, True): (1, 512, 3, 2), (256, 256, 131072, 32, 32, False, True, True): (1, 1024, 3, 2), (256, 256, 131072, 32, 32, True, False, True): (1, 1024, 3, 2), (256, 256, 131072, 64, 64, False, True, True): (1, 1024, 1, 4), (256, 256, 131072, 64, 64, True, False, True): (1, 1024, 1, 4), (256, 256, 131072, 128, 128, False, True, True): (3, 1024, 1, 4), (256, 256, 131072, 128, 128, True, False, True): (1, 1024, 1, 4), (384, 384, 256, 16, 16, False, True, True): (1, 8, 5, 2), (384, 384, 256, 16, 16, True, False, True): (3, 4, 5, 2), (384, 384, 256, 32, 32, False, True, True): (2, 8, 4, 4), (384, 384, 256, 32, 32, True, False, True): (1, 4, 6, 2), (384, 384, 256, 64, 64, False, True, True): (2, 4, 4, 4), (384, 384, 256, 64, 64, True, False, True): (2, 4, 4, 4), (384, 384, 512, 16, 16, False, True, True): (1, 8, 4, 2), (384, 384, 512, 16, 16, True, False, True): (1, 4, 5, 4), (384, 384, 512, 32, 32, False, True, True): (1, 8, 4, 4), (384, 384, 512, 32, 32, True, False, True): (3, 8, 5, 2), (384, 384, 512, 64, 64, False, True, True): (3, 8, 3, 4), (384, 384, 512, 64, 64, True, False, True): (5, 8, 5, 4), (384, 384, 1024, 16, 16, False, True, True): (3, 16, 4, 2), (384, 384, 1024, 16, 16, True, False, True): (1, 8, 4, 4), (384, 384, 1024, 32, 32, False, True, True): (6, 32, 3, 2), (384, 384, 1024, 32, 32, True, False, True): (3, 8, 4, 4), (384, 384, 1024, 64, 64, False, True, True): (3, 16, 3, 4), (384, 384, 1024, 64, 64, True, False, True): (2, 16, 4, 4), (384, 384, 2048, 16, 16, False, True, True): (1, 32, 1, 4), (384, 384, 2048, 16, 16, True, False, True): (1, 16, 5, 2), (384, 384, 2048, 32, 32, False, True, True): (1, 32, 1, 8), (384, 384, 2048, 32, 32, True, False, True): (1, 8, 4, 4), (384, 384, 2048, 64, 64, False, True, True): (4, 16, 3, 4), (384, 384, 2048, 64, 64, True, False, True): (1, 16, 3, 8), (384, 384, 4096, 16, 16, False, True, True): (5, 32, 1, 4), (384, 384, 4096, 16, 16, True, False, True): (6, 32, 3, 2), (384, 384, 4096, 32, 32, False, True, True): (1, 32, 1, 8), (384, 384, 4096, 32, 32, True, False, True): (1, 16, 3, 4), (384, 384, 4096, 64, 64, False, True, True): (1, 64, 1, 4), (384, 384, 4096, 64, 64, True, False, True): (2, 32, 3, 4), (384, 384, 8192, 16, 16, False, True, True): (2, 64, 1, 4), (384, 384, 8192, 16, 16, True, False, True): (3, 32, 3, 2), (384, 384, 8192, 32, 32, False, True, True): (5, 64, 1, 8), (384, 384, 8192, 32, 32, True, False, True): (1, 32, 3, 2), (384, 384, 8192, 64, 64, False, True, True): (1, 128, 1, 4), (384, 384, 8192, 64, 64, True, False, True): (3, 64, 3, 4), (384, 384, 16384, 16, 16, False, True, True): (1, 128, 1, 2), (384, 384, 16384, 16, 16, True, False, True): (4, 128, 3, 2), (384, 384, 16384, 32, 32, False, True, True): (3, 128, 1, 4), (384, 384, 16384, 32, 32, True, False, True): (1, 128, 3, 2), (384, 384, 16384, 64, 64, False, True, True): (3, 256, 1, 4), (384, 384, 16384, 64, 64, True, False, True): (2, 128, 3, 4), (384, 384, 32768, 16, 16, False, True, True): (1, 256, 1, 2), (384, 384, 32768, 16, 16, True, False, True): (1, 128, 3, 4), (384, 384, 32768, 32, 32, False, True, True): (1, 256, 1, 2), (384, 384, 32768, 32, 32, True, False, True): (1, 128, 3, 4), (384, 384, 32768, 64, 64, False, True, True): (2, 256, 1, 4), (384, 384, 32768, 64, 64, True, False, True): (1, 256, 3, 4), (384, 384, 65536, 16, 16, False, True, True): (4, 512, 1, 2), (384, 384, 65536, 16, 16, True, False, True): (1, 256, 3, 4), (384, 384, 65536, 32, 32, False, True, True): (1, 512, 1, 2), (384, 384, 65536, 32, 32, True, False, True): (1, 256, 3, 4), (384, 384, 65536, 64, 64, False, True, True): (3, 512, 1, 4), (384, 384, 65536, 64, 64, True, False, True): (3, 256, 3, 4), (384, 384, 131072, 16, 16, False, True, True): (1, 512, 1, 1), (384, 384, 131072, 16, 16, True, False, True): (1, 512, 3, 4), (384, 384, 131072, 32, 32, False, True, True): (1, 512, 1, 4), (384, 384, 131072, 32, 32, True, False, True): (1, 512, 3, 4), (384, 384, 131072, 64, 64, False, True, True): (3, 1024, 1, 4), (384, 384, 131072, 64, 64, True, False, True): (3, 512, 3, 4), (512, 512, 256, 16, 16, False, True, True): (2, 4, 5, 4), (512, 512, 256, 16, 16, True, False, True): (3, 4, 5, 4), (512, 512, 256, 32, 32, False, True, True): (1, 4, 5, 2), (512, 512, 256, 32, 32, True, False, True): (4, 8, 5, 1), (512, 512, 256, 64, 64, False, True, True): (4, 4, 5, 4), (512, 512, 256, 64, 64, True, False, True): (5, 4, 5, 4), (512, 512, 256, 128, 128, False, True, True): (3, 2, 2, 8), (512, 512, 256, 128, 128, True, False, True): (2, 2, 2, 8), (512, 512, 512, 16, 16, False, True, True): (1, 8, 5, 4), (512, 512, 512, 16, 16, True, False, True): (4, 8, 5, 2), (512, 512, 512, 32, 32, False, True, True): (1, 16, 4, 1), (512, 512, 512, 32, 32, True, False, True): (1, 8, 5, 2), (512, 512, 512, 64, 64, False, True, True): (4, 8, 5, 4), (512, 512, 512, 64, 64, True, False, True): (2, 8, 5, 4), (512, 512, 512, 128, 128, False, True, True): (2, 4, 2, 8), (512, 512, 512, 128, 128, True, False, True): (1, 4, 2, 8), (512, 512, 1024, 16, 16, False, True, True): (2, 8, 4, 4), (512, 512, 1024, 16, 16, True, False, True): (1, 8, 4, 4), (512, 512, 1024, 32, 32, False, True, True): (3, 16, 4, 2), (512, 512, 1024, 32, 32, True, False, True): (1, 16, 5, 2), (512, 512, 1024, 64, 64, False, True, True): (2, 8, 3, 4), (512, 512, 1024, 64, 64, True, False, True): (2, 16, 3, 4), (512, 512, 1024, 128, 128, False, True, True): (2, 8, 2, 8), (512, 512, 1024, 128, 128, True, False, True): (3, 8, 2, 8), (512, 512, 2048, 16, 16, False, True, True): (4, 16, 3, 2), (512, 512, 2048, 16, 16, True, False, True): (1, 16, 4, 2), (512, 512, 2048, 32, 32, False, True, True): (3, 32, 3, 2), (512, 512, 2048, 32, 32, True, False, True): (2, 32, 3, 2), (512, 512, 2048, 64, 64, False, True, True): (6, 32, 3, 2), (512, 512, 2048, 64, 64, True, False, True): (1, 32, 3, 2), (512, 512, 2048, 128, 128, False, True, True): (4, 16, 2, 8), (512, 512, 2048, 128, 128, True, False, True): (1, 16, 2, 8), (512, 512, 4096, 16, 16, False, True, True): (1, 16, 3, 2), (512, 512, 4096, 16, 16, True, False, True): (4, 32, 3, 2), (512, 512, 4096, 32, 32, False, True, True): (3, 32, 3, 2), (512, 512, 4096, 32, 32, True, False, True): (2, 32, 3, 2), (512, 512, 4096, 64, 64, False, True, True): (1, 32, 3, 4), (512, 512, 4096, 64, 64, True, False, True): (1, 64, 3, 4), (512, 512, 4096, 128, 128, False, True, True): (4, 32, 1, 4), (512, 512, 4096, 128, 128, True, False, True): (4, 32, 2, 8), (512, 512, 8192, 16, 16, False, True, True): (8, 64, 3, 2), (512, 512, 8192, 16, 16, True, False, True): (4, 64, 3, 2), (512, 512, 8192, 32, 32, False, True, True): (3, 64, 3, 2), (512, 512, 8192, 32, 32, True, False, True): (3, 64, 3, 2), (512, 512, 8192, 64, 64, False, True, True): (1, 64, 3, 4), (512, 512, 8192, 64, 64, True, False, True): (7, 64, 3, 4), (512, 512, 8192, 128, 128, False, True, True): (1, 64, 1, 4), (512, 512, 8192, 128, 128, True, False, True): (4, 64, 2, 8), (512, 512, 16384, 16, 16, False, True, True): (1, 64, 3, 2), (512, 512, 16384, 16, 16, True, False, True): (1, 128, 3, 2), (512, 512, 16384, 32, 32, False, True, True): (3, 128, 3, 2), (512, 512, 16384, 32, 32, True, False, True): (1, 128, 3, 2), (512, 512, 16384, 64, 64, False, True, True): (4, 64, 2, 4), (512, 512, 16384, 64, 64, True, False, True): (2, 64, 2, 4), (512, 512, 16384, 128, 128, False, True, True): (4, 128, 1, 4), (512, 512, 16384, 128, 128, True, False, True): (2, 128, 1, 4), (512, 512, 32768, 16, 16, False, True, True): (1, 128, 3, 2), (512, 512, 32768, 16, 16, True, False, True): (1, 128, 3, 2), (512, 512, 32768, 32, 32, False, True, True): (1, 256, 3, 2), (512, 512, 32768, 32, 32, True, False, True): (1, 256, 3, 2), (512, 512, 32768, 64, 64, False, True, True): (1, 256, 3, 4), (512, 512, 32768, 64, 64, True, False, True): (2, 256, 3, 4), (512, 512, 32768, 128, 128, False, True, True): (5, 256, 1, 4), (512, 512, 32768, 128, 128, True, False, True): (4, 256, 1, 4), (512, 512, 65536, 16, 16, False, True, True): (1, 256, 3, 2), (512, 512, 65536, 16, 16, True, False, True): (1, 256, 3, 1), (512, 512, 65536, 32, 32, False, True, True): (1, 512, 3, 2), (512, 512, 65536, 32, 32, True, False, True): (1, 512, 3, 2), (512, 512, 65536, 64, 64, False, True, True): (4, 256, 2, 4), (512, 512, 65536, 64, 64, True, False, True): (2, 512, 3, 4), (512, 512, 65536, 128, 128, False, True, True): (6, 512, 1, 4), (512, 512, 65536, 128, 128, True, False, True): (4, 512, 1, 4), (512, 512, 131072, 16, 16, False, True, True): (1, 512, 3, 2), (512, 512, 131072, 16, 16, True, False, True): (1, 512, 3, 1), (512, 512, 131072, 32, 32, False, True, True): (1, 1024, 3, 2), (512, 512, 131072, 32, 32, True, False, True): (1, 1024, 3, 2), (512, 512, 131072, 64, 64, False, True, True): (4, 512, 2, 4), (512, 512, 131072, 64, 64, True, False, True): (4, 1024, 3, 4), (512, 512, 131072, 128, 128, False, True, True): (6, 1024, 1, 4), (512, 512, 131072, 128, 128, True, False, True): (4, 1024, 1, 4), (768, 768, 256, 16, 16, False, True, True): (1, 8, 4, 1), (768, 768, 256, 16, 16, True, False, True): (3, 2, 6, 4), (768, 768, 256, 32, 32, False, True, True): (3, 8, 3, 4), (768, 768, 256, 32, 32, True, False, True): (1, 4, 4, 2), (768, 768, 256, 64, 64, False, True, True): (2, 4, 3, 4), (768, 768, 256, 64, 64, True, False, True): (1, 4, 4, 4), (768, 768, 256, 128, 128, False, True, True): (2, 2, 3, 8), (768, 768, 256, 128, 128, True, False, True): (4, 2, 3, 8), (768, 768, 512, 16, 16, False, True, True): (4, 8, 4, 2), (768, 768, 512, 16, 16, True, False, True): (4, 8, 6, 2), (768, 768, 512, 32, 32, False, True, True): (1, 8, 4, 4), (768, 768, 512, 32, 32, True, False, True): (3, 8, 4, 2), (768, 768, 512, 64, 64, False, True, True): (1, 8, 3, 4), (768, 768, 512, 64, 64, True, False, True): (1, 8, 4, 4), (768, 768, 512, 128, 128, False, True, True): (1, 4, 3, 8), (768, 768, 512, 128, 128, True, False, True): (4, 4, 3, 8), (768, 768, 1024, 16, 16, False, True, True): (3, 16, 1, 4), (768, 768, 1024, 16, 16, True, False, True): (1, 8, 5, 2), (768, 768, 1024, 32, 32, False, True, True): (3, 16, 1, 8), (768, 768, 1024, 32, 32, True, False, True): (1, 16, 3, 2), (768, 768, 1024, 64, 64, False, True, True): (1, 8, 3, 4), (768, 768, 1024, 64, 64, True, False, True): (2, 8, 3, 8), (768, 768, 1024, 128, 128, False, True, True): (1, 8, 3, 8), (768, 768, 1024, 128, 128, True, False, True): (1, 8, 3, 8), (768, 768, 2048, 16, 16, False, True, True): (2, 16, 1, 2), (768, 768, 2048, 16, 16, True, False, True): (1, 16, 3, 2), (768, 768, 2048, 32, 32, False, True, True): (5, 32, 1, 4), (768, 768, 2048, 32, 32, True, False, True): (3, 8, 3, 4), (768, 768, 2048, 64, 64, False, True, True): (1, 16, 1, 8), (768, 768, 2048, 64, 64, True, False, True): (3, 16, 3, 4), (768, 768, 2048, 128, 128, False, True, True): (2, 16, 3, 8), (768, 768, 2048, 128, 128, True, False, True): (1, 16, 3, 8), (768, 768, 4096, 16, 16, False, True, True): (3, 32, 1, 4), (768, 768, 4096, 16, 16, True, False, True): (2, 32, 3, 1), (768, 768, 4096, 32, 32, False, True, True): (2, 64, 1, 4), (768, 768, 4096, 32, 32, True, False, True): (1, 16, 4, 4), (768, 768, 4096, 64, 64, False, True, True): (3, 64, 3, 4), (768, 768, 4096, 64, 64, True, False, True): (2, 16, 3, 4), (768, 768, 4096, 128, 128, False, True, True): (1, 32, 3, 8), (768, 768, 4096, 128, 128, True, False, True): (4, 32, 3, 8), (768, 768, 8192, 16, 16, False, True, True): (1, 64, 1, 2), (768, 768, 8192, 16, 16, True, False, True): (4, 64, 3, 2), (768, 768, 8192, 32, 32, False, True, True): (1, 64, 1, 8), (768, 768, 8192, 32, 32, True, False, True): (2, 32, 3, 4), (768, 768, 8192, 64, 64, False, True, True): (4, 64, 3, 4), (768, 768, 8192, 64, 64, True, False, True): (2, 32, 3, 4), (768, 768, 8192, 128, 128, False, True, True): (2, 64, 3, 8), (768, 768, 8192, 128, 128, True, False, True): (1, 64, 3, 8), (768, 768, 16384, 16, 16, False, True, True): (1, 128, 1, 2), (768, 768, 16384, 16, 16, True, False, True): (1, 64, 4, 4), (768, 768, 16384, 32, 32, False, True, True): (1, 128, 1, 8), (768, 768, 16384, 32, 32, True, False, True): (1, 64, 3, 4), (768, 768, 16384, 64, 64, False, True, True): (4, 128, 3, 4), (768, 768, 16384, 64, 64, True, False, True): (1, 64, 3, 4), (768, 768, 16384, 128, 128, False, True, True): (3, 128, 1, 4), (768, 768, 16384, 128, 128, True, False, True): (3, 128, 2, 4), (768, 768, 32768, 16, 16, False, True, True): (1, 256, 1, 2), (768, 768, 32768, 16, 16, True, False, True): (1, 128, 4, 4), (768, 768, 32768, 32, 32, False, True, True): (1, 128, 1, 2), (768, 768, 32768, 32, 32, True, False, True): (1, 128, 3, 4), (768, 768, 32768, 64, 64, False, True, True): (1, 256, 1, 4), (768, 768, 32768, 64, 64, True, False, True): (2, 128, 3, 4), (768, 768, 32768, 128, 128, False, True, True): (3, 256, 1, 4), (768, 768, 32768, 128, 128, True, False, True): (2, 256, 2, 4), (768, 768, 65536, 16, 16, False, True, True): (4, 512, 1, 2), (768, 768, 65536, 16, 16, True, False, True): (1, 256, 4, 4), (768, 768, 65536, 32, 32, False, True, True): (1, 256, 1, 2), (768, 768, 65536, 32, 32, True, False, True): (1, 256, 3, 4), (768, 768, 65536, 64, 64, False, True, True): (3, 512, 1, 4), (768, 768, 65536, 64, 64, True, False, True): (2, 256, 3, 4), (768, 768, 65536, 128, 128, False, True, True): (3, 512, 1, 4), (768, 768, 65536, 128, 128, True, False, True): (2, 512, 2, 4), (768, 768, 131072, 16, 16, False, True, True): (4, 1024, 1, 2), (768, 768, 131072, 16, 16, True, False, True): (1, 512, 4, 1), (768, 768, 131072, 32, 32, False, True, True): (1, 512, 1, 2), (768, 768, 131072, 32, 32, True, False, True): (1, 512, 3, 4), (768, 768, 131072, 64, 64, False, True, True): (1, 1024, 1, 4), (768, 768, 131072, 64, 64, True, False, True): (2, 512, 3, 4), (768, 768, 131072, 128, 128, False, True, True): (3, 1024, 1, 4), (768, 768, 131072, 128, 128, True, False, True): (1, 1024, 2, 4), (768, 3072, 256, 16, 16, False, True, True): (3, 8, 6, 1), (768, 3072, 256, 16, 16, True, False, True): (1, 4, 6, 2), (768, 3072, 256, 32, 32, False, True, True): (1, 8, 4, 4), (768, 3072, 256, 32, 32, True, False, True): (3, 4, 6, 4), (768, 3072, 256, 64, 64, False, True, True): (2, 4, 3, 4), (768, 3072, 256, 64, 64, True, False, True): (1, 4, 4, 4), (768, 3072, 256, 128, 128, False, True, True): (2, 2, 3, 8), (768, 3072, 256, 128, 128, True, False, True): (1, 2, 3, 8), (768, 3072, 512, 16, 16, False, True, True): (1, 8, 4, 2), (768, 3072, 512, 16, 16, True, False, True): (1, 8, 5, 2), (768, 3072, 512, 32, 32, False, True, True): (1, 16, 3, 2), (768, 3072, 512, 32, 32, True, False, True): (1, 8, 5, 2), (768, 3072, 512, 64, 64, False, True, True): (1, 8, 3, 4), (768, 3072, 512, 64, 64, True, False, True): (3, 8, 4, 4), (768, 3072, 512, 128, 128, False, True, True): (1, 4, 3, 8), (768, 3072, 512, 128, 128, True, False, True): (2, 4, 3, 8), (768, 3072, 1024, 16, 16, False, True, True): (1, 16, 1, 4), (768, 3072, 1024, 16, 16, True, False, True): (5, 4, 4, 4), (768, 3072, 1024, 32, 32, False, True, True): (3, 8, 3, 4), (768, 3072, 1024, 32, 32, True, False, True): (1, 8, 4, 4), (768, 3072, 1024, 64, 64, False, True, True): (2, 16, 3, 4), (768, 3072, 1024, 64, 64, True, False, True): (2, 16, 4, 4), (768, 3072, 1024, 128, 128, False, True, True): (1, 8, 3, 8), (768, 3072, 1024, 128, 128, True, False, True): (5, 8, 3, 8), (768, 3072, 2048, 16, 16, False, True, True): (3, 16, 1, 2), (768, 3072, 2048, 16, 16, True, False, True): (1, 8, 3, 4), (768, 3072, 2048, 32, 32, False, True, True): (4, 16, 1, 8), (768, 3072, 2048, 32, 32, True, False, True): (3, 8, 3, 4), (768, 3072, 2048, 64, 64, False, True, True): (2, 16, 3, 4), (768, 3072, 2048, 64, 64, True, False, True): (2, 16, 3, 4), (768, 3072, 2048, 128, 128, False, True, True): (3, 16, 3, 8), (768, 3072, 2048, 128, 128, True, False, True): (4, 16, 3, 8), (768, 3072, 4096, 16, 16, False, True, True): (1, 32, 1, 4), (768, 3072, 4096, 16, 16, True, False, True): (1, 16, 3, 1), (768, 3072, 4096, 32, 32, False, True, True): (3, 32, 1, 8), (768, 3072, 4096, 32, 32, True, False, True): (3, 16, 4, 4), (768, 3072, 4096, 64, 64, False, True, True): (2, 32, 3, 4), (768, 3072, 4096, 64, 64, True, False, True): (2, 16, 3, 4), (768, 3072, 4096, 128, 128, False, True, True): (5, 32, 1, 4), (768, 3072, 4096, 128, 128, True, False, True): (9, 32, 3, 8), (768, 3072, 8192, 16, 16, False, True, True): (1, 32, 1, 4), (768, 3072, 8192, 16, 16, True, False, True): (4, 64, 4, 2), (768, 3072, 8192, 32, 32, False, True, True): (1, 64, 1, 8), (768, 3072, 8192, 32, 32, True, False, True): (2, 64, 4, 2), (768, 3072, 8192, 64, 64, False, True, True): (1, 64, 3, 4), (768, 3072, 8192, 64, 64, True, False, True): (2, 32, 3, 4), (768, 3072, 8192, 128, 128, False, True, True): (2, 64, 3, 8), (768, 3072, 8192, 128, 128, True, False, True): (2, 64, 3, 8), (768, 3072, 16384, 16, 16, False, True, True): (1, 64, 1, 4), (768, 3072, 16384, 16, 16, True, False, True): (1, 64, 4, 1), (768, 3072, 16384, 32, 32, False, True, True): (1, 128, 1, 8), (768, 3072, 16384, 32, 32, True, False, True): (1, 64, 3, 4), (768, 3072, 16384, 64, 64, False, True, True): (1, 128, 3, 4), (768, 3072, 16384, 64, 64, True, False, True): (4, 64, 3, 4), (768, 3072, 16384, 128, 128, False, True, True): (2, 128, 3, 8), (768, 3072, 16384, 128, 128, True, False, True): (2, 128, 3, 8), (768, 3072, 32768, 16, 16, False, True, True): (1, 128, 1, 4), (768, 3072, 32768, 16, 16, True, False, True): (1, 128, 4, 1), (768, 3072, 32768, 32, 32, False, True, True): (1, 256, 1, 8), (768, 3072, 32768, 32, 32, True, False, True): (1, 128, 3, 4), (768, 3072, 32768, 64, 64, False, True, True): (1, 256, 3, 4), (768, 3072, 32768, 64, 64, True, False, True): (1, 128, 3, 4), (768, 3072, 32768, 128, 128, False, True, True): (3, 256, 1, 4), (768, 3072, 32768, 128, 128, True, False, True): (2, 256, 3, 8), (768, 3072, 50432, 16, 16, False, True, True): (1, 197, 1, 4), (768, 3072, 50432, 16, 16, True, False, True): (4, 197, 4, 4), (768, 3072, 50432, 32, 32, False, True, True): (1, 197, 1, 4), (768, 3072, 50432, 32, 32, True, False, True): (4, 197, 3, 4), (768, 3072, 50432, 64, 64, False, True, True): (1, 394, 3, 4), (768, 3072, 50432, 64, 64, True, False, True): (3, 197, 3, 4), (768, 3072, 50432, 128, 128, False, True, True): (3, 394, 1, 4), (768, 3072, 50432, 128, 128, True, False, True): (1, 394, 3, 8), (768, 3072, 65536, 16, 16, False, True, True): (1, 256, 1, 4), (768, 3072, 65536, 16, 16, True, False, True): (5, 256, 4, 1), (768, 3072, 65536, 32, 32, False, True, True): (1, 256, 1, 4), (768, 3072, 65536, 32, 32, True, False, True): (3, 256, 3, 4), (768, 3072, 65536, 64, 64, False, True, True): (2, 512, 3, 4), (768, 3072, 65536, 64, 64, True, False, True): (3, 256, 3, 4), (768, 3072, 65536, 128, 128, False, True, True): (3, 512, 1, 4), (768, 3072, 65536, 128, 128, True, False, True): (2, 512, 3, 8), (768, 3072, 131072, 16, 16, False, True, True): (1, 512, 1, 4), (768, 3072, 131072, 16, 16, True, False, True): (5, 512, 4, 1), (768, 3072, 131072, 32, 32, False, True, True): (1, 512, 1, 4), (768, 3072, 131072, 32, 32, True, False, True): (4, 512, 3, 4), (768, 3072, 131072, 64, 64, False, True, True): (1, 1024, 3, 4), (768, 3072, 131072, 64, 64, True, False, True): (1, 512, 3, 4), (768, 3072, 131072, 128, 128, False, True, True): (3, 1024, 1, 4), (768, 3072, 131072, 128, 128, True, False, True): (1, 1024, 3, 8), (1024, 1024, 256, 16, 16, False, True, True): (1, 4, 5, 4), (1024, 1024, 256, 16, 16, True, False, True): (3, 4, 4, 4), (1024, 1024, 256, 32, 32, False, True, True): (4, 4, 5, 2), (1024, 1024, 256, 32, 32, True, False, True): (3, 4, 5, 2), (1024, 1024, 256, 64, 64, False, True, True): (1, 4, 5, 4), (1024, 1024, 256, 64, 64, True, False, True): (1, 4, 5, 4), (1024, 1024, 256, 128, 128, False, True, True): (1, 2, 2, 8), (1024, 1024, 256, 128, 128, True, False, True): (2, 2, 2, 8), (1024, 1024, 512, 16, 16, False, True, True): (3, 4, 4, 4), (1024, 1024, 512, 16, 16, True, False, True): (4, 8, 5, 2), (1024, 1024, 512, 32, 32, False, True, True): (1, 8, 4, 2), (1024, 1024, 512, 32, 32, True, False, True): (1, 8, 4, 2), (1024, 1024, 512, 64, 64, False, True, True): (4, 8, 4, 4), (1024, 1024, 512, 64, 64, True, False, True): (2, 8, 3, 4), (1024, 1024, 512, 128, 128, False, True, True): (2, 4, 2, 8), (1024, 1024, 512, 128, 128, True, False, True): (1, 4, 2, 8), (1024, 1024, 1024, 16, 16, False, True, True): (3, 8, 4, 4), (1024, 1024, 1024, 16, 16, True, False, True): (4, 8, 4, 2), (1024, 1024, 1024, 32, 32, False, True, True): (1, 16, 3, 2), (1024, 1024, 1024, 32, 32, True, False, True): (1, 16, 3, 2), (1024, 1024, 1024, 64, 64, False, True, True): (1, 16, 3, 4), (1024, 1024, 1024, 64, 64, True, False, True): (3, 16, 3, 2), (1024, 1024, 1024, 128, 128, False, True, True): (1, 8, 2, 8), (1024, 1024, 1024, 128, 128, True, False, True): (2, 8, 2, 8), (1024, 1024, 2048, 16, 16, False, True, True): (3, 8, 3, 4), (1024, 1024, 2048, 16, 16, True, False, True): (3, 8, 3, 2), (1024, 1024, 2048, 32, 32, False, True, True): (5, 16, 3, 4), (1024, 1024, 2048, 32, 32, True, False, True): (1, 16, 3, 2), (1024, 1024, 2048, 64, 64, False, True, True): (6, 16, 4, 4), (1024, 1024, 2048, 64, 64, True, False, True): (5, 16, 3, 4), (1024, 1024, 2048, 128, 128, False, True, True): (4, 16, 2, 8), (1024, 1024, 2048, 128, 128, True, False, True): (4, 16, 2, 8), (1024, 1024, 4096, 16, 16, False, True, True): (8, 32, 3, 2), (1024, 1024, 4096, 16, 16, True, False, True): (4, 32, 3, 2), (1024, 1024, 4096, 32, 32, False, True, True): (2, 32, 3, 4), (1024, 1024, 4096, 32, 32, True, False, True): (3, 32, 3, 2), (1024, 1024, 4096, 64, 64, False, True, True): (3, 32, 3, 4), (1024, 1024, 4096, 64, 64, True, False, True): (1, 32, 3, 4), (1024, 1024, 4096, 128, 128, False, True, True): (4, 32, 2, 8), (1024, 1024, 4096, 128, 128, True, False, True): (1, 32, 2, 8), (1024, 1024, 8192, 16, 16, False, True, True): (4, 64, 3, 2), (1024, 1024, 8192, 16, 16, True, False, True): (4, 64, 3, 2), (1024, 1024, 8192, 32, 32, False, True, True): (8, 64, 3, 4), (1024, 1024, 8192, 32, 32, True, False, True): (4, 32, 3, 4), (1024, 1024, 8192, 64, 64, False, True, True): (4, 64, 3, 4), (1024, 1024, 8192, 64, 64, True, False, True): (2, 64, 3, 4), (1024, 1024, 8192, 128, 128, False, True, True): (4, 64, 2, 8), (1024, 1024, 8192, 128, 128, True, False, True): (4, 64, 1, 4), (1024, 1024, 16384, 16, 16, False, True, True): (1, 64, 3, 2), (1024, 1024, 16384, 16, 16, True, False, True): (1, 64, 3, 2), (1024, 1024, 16384, 32, 32, False, True, True): (1, 128, 3, 2), (1024, 1024, 16384, 32, 32, True, False, True): (1, 64, 3, 4), (1024, 1024, 16384, 64, 64, False, True, True): (1, 128, 3, 4), (1024, 1024, 16384, 64, 64, True, False, True): (1, 128, 3, 4), (1024, 1024, 16384, 128, 128, False, True, True): (2, 128, 1, 4), (1024, 1024, 16384, 128, 128, True, False, True): (4, 128, 1, 4), (1024, 1024, 32768, 16, 16, False, True, True): (1, 128, 3, 2), (1024, 1024, 32768, 16, 16, True, False, True): (1, 128, 3, 2), (1024, 1024, 32768, 32, 32, False, True, True): (1, 256, 3, 2), (1024, 1024, 32768, 32, 32, True, False, True): (1, 128, 3, 4), (1024, 1024, 32768, 64, 64, False, True, True): (2, 128, 2, 4), (1024, 1024, 32768, 64, 64, True, False, True): (1, 256, 3, 4), (1024, 1024, 32768, 128, 128, False, True, True): (2, 256, 1, 4), (1024, 1024, 32768, 128, 128, True, False, True): (4, 256, 1, 4), (1024, 1024, 65536, 16, 16, False, True, True): (1, 256, 3, 4), (1024, 1024, 65536, 16, 16, True, False, True): (1, 256, 3, 4), (1024, 1024, 65536, 32, 32, False, True, True): (9, 256, 3, 4), (1024, 1024, 65536, 32, 32, True, False, True): (7, 256, 3, 4), (1024, 1024, 65536, 64, 64, False, True, True): (2, 256, 2, 4), (1024, 1024, 65536, 64, 64, True, False, True): (2, 512, 3, 4), (1024, 1024, 65536, 128, 128, False, True, True): (2, 512, 1, 4), (1024, 1024, 65536, 128, 128, True, False, True): (4, 512, 1, 4), (1024, 1024, 131072, 16, 16, False, True, True): (11, 512, 3, 2), (1024, 1024, 131072, 16, 16, True, False, True): (11, 512, 3, 2), (1024, 1024, 131072, 32, 32, False, True, True): (4, 512, 3, 4), (1024, 1024, 131072, 32, 32, True, False, True): (6, 512, 3, 4), (1024, 1024, 131072, 64, 64, False, True, True): (2, 512, 2, 4), (1024, 1024, 131072, 64, 64, True, False, True): (2, 1024, 3, 4), (1024, 1024, 131072, 128, 128, False, True, True): (4, 1024, 1, 4), (1024, 1024, 131072, 128, 128, True, False, True): (4, 1024, 1, 4), (1280, 5120, 65792, 16, 16, False, True, True): (1, 257, 1, 4), (1280, 5120, 65792, 16, 16, True, False, True): (5, 257, 4, 1), (1280, 5120, 65792, 32, 32, False, True, True): (1, 514, 1, 8), (1280, 5120, 65792, 32, 32, True, False, True): (2, 257, 3, 4), (1280, 5120, 65792, 64, 64, False, True, True): (1, 514, 3, 4), (1280, 5120, 65792, 64, 64, True, False, True): (1, 257, 3, 4), (1280, 5120, 65792, 128, 128, False, True, True): (1, 514, 3, 8), (1280, 5120, 65792, 128, 128, True, False, True): (2, 514, 3, 8), (1536, 1536, 256, 16, 16, False, True, True): (1, 4, 6, 2), (1536, 1536, 256, 16, 16, True, False, True): (3, 4, 5, 2), (1536, 1536, 256, 32, 32, False, True, True): (2, 4, 3, 4), (1536, 1536, 256, 32, 32, True, False, True): (1, 4, 5, 2), (1536, 1536, 256, 64, 64, False, True, True): (2, 4, 3, 4), (1536, 1536, 256, 64, 64, True, False, True): (1, 4, 4, 4), (1536, 1536, 256, 128, 128, False, True, True): (3, 2, 3, 8), (1536, 1536, 256, 128, 128, True, False, True): (6, 2, 3, 8), (1536, 1536, 512, 16, 16, False, True, True): (1, 8, 1, 4), (1536, 1536, 512, 16, 16, True, False, True): (3, 4, 5, 2), (1536, 1536, 512, 32, 32, False, True, True): (1, 8, 1, 8), (1536, 1536, 512, 32, 32, True, False, True): (1, 4, 4, 4), (1536, 1536, 512, 64, 64, False, True, True): (3, 8, 5, 4), (1536, 1536, 512, 64, 64, True, False, True): (3, 8, 3, 4), (1536, 1536, 512, 128, 128, False, True, True): (2, 4, 3, 8), (1536, 1536, 512, 128, 128, True, False, True): (3, 4, 3, 8), (1536, 1536, 1024, 16, 16, False, True, True): (1, 8, 1, 2), (1536, 1536, 1024, 16, 16, True, False, True): (2, 8, 4, 2), (1536, 1536, 1024, 32, 32, False, True, True): (8, 16, 1, 4), (1536, 1536, 1024, 32, 32, True, False, True): (3, 8, 4, 2), (1536, 1536, 1024, 64, 64, False, True, True): (1, 16, 3, 4), (1536, 1536, 1024, 64, 64, True, False, True): (3, 8, 3, 4), (1536, 1536, 1024, 128, 128, False, True, True): (3, 8, 3, 8), (1536, 1536, 1024, 128, 128, True, False, True): (3, 8, 3, 8), (1536, 1536, 2048, 16, 16, False, True, True): (1, 16, 1, 4), (1536, 1536, 2048, 16, 16, True, False, True): (1, 8, 3, 1), (1536, 1536, 2048, 32, 32, False, True, True): (3, 16, 1, 8), (1536, 1536, 2048, 32, 32, True, False, True): (3, 8, 4, 4), (1536, 1536, 2048, 64, 64, False, True, True): (1, 16, 3, 4), (1536, 1536, 2048, 64, 64, True, False, True): (3, 8, 3, 4), (1536, 1536, 2048, 128, 128, False, True, True): (4, 16, 1, 4), (1536, 1536, 2048, 128, 128, True, False, True): (6, 16, 3, 8), (1536, 1536, 4096, 16, 16, False, True, True): (1, 32, 1, 2), (1536, 1536, 4096, 16, 16, True, False, True): (4, 32, 4, 2), (1536, 1536, 4096, 32, 32, False, True, True): (1, 32, 1, 8), (1536, 1536, 4096, 32, 32, True, False, True): (5, 32, 4, 2), (1536, 1536, 4096, 64, 64, False, True, True): (2, 32, 3, 4), (1536, 1536, 4096, 64, 64, True, False, True): (2, 16, 3, 4), (1536, 1536, 4096, 128, 128, False, True, True): (4, 32, 3, 8), (1536, 1536, 4096, 128, 128, True, False, True): (4, 32, 3, 8), (1536, 1536, 8192, 16, 16, False, True, True): (1, 64, 1, 2), (1536, 1536, 8192, 16, 16, True, False, True): (4, 64, 4, 2), (1536, 1536, 8192, 32, 32, False, True, True): (2, 64, 1, 8), (1536, 1536, 8192, 32, 32, True, False, True): (2, 32, 3, 4), (1536, 1536, 8192, 64, 64, False, True, True): (1, 64, 3, 4), (1536, 1536, 8192, 64, 64, True, False, True): (2, 32, 3, 4), (1536, 1536, 8192, 128, 128, False, True, True): (4, 64, 3, 8), (1536, 1536, 8192, 128, 128, True, False, True): (1, 64, 3, 8), (1536, 1536, 16384, 16, 16, False, True, True): (1, 128, 1, 2), (1536, 1536, 16384, 16, 16, True, False, True): (1, 64, 4, 4), (1536, 1536, 16384, 32, 32, False, True, True): (1, 64, 1, 2), (1536, 1536, 16384, 32, 32, True, False, True): (1, 64, 3, 4), (1536, 1536, 16384, 64, 64, False, True, True): (1, 128, 3, 4), (1536, 1536, 16384, 64, 64, True, False, True): (1, 64, 3, 4), (1536, 1536, 16384, 128, 128, False, True, True): (1, 128, 1, 4), (1536, 1536, 16384, 128, 128, True, False, True): (1, 128, 2, 4), (1536, 1536, 32768, 16, 16, False, True, True): (1, 256, 1, 2), (1536, 1536, 32768, 16, 16, True, False, True): (1, 128, 3, 2), (1536, 1536, 32768, 32, 32, False, True, True): (1, 128, 1, 2), (1536, 1536, 32768, 32, 32, True, False, True): (1, 128, 3, 4), (1536, 1536, 32768, 64, 64, False, True, True): (1, 256, 3, 4), (1536, 1536, 32768, 64, 64, True, False, True): (1, 128, 3, 4), (1536, 1536, 32768, 128, 128, False, True, True): (1, 256, 1, 4), (1536, 1536, 32768, 128, 128, True, False, True): (2, 256, 2, 4), (1536, 1536, 65536, 16, 16, False, True, True): (2, 512, 1, 2), (1536, 1536, 65536, 16, 16, True, False, True): (1, 256, 4, 4), (1536, 1536, 65536, 32, 32, False, True, True): (1, 256, 1, 2), (1536, 1536, 65536, 32, 32, True, False, True): (1, 256, 3, 4), (1536, 1536, 65536, 64, 64, False, True, True): (1, 512, 3, 4), (1536, 1536, 65536, 64, 64, True, False, True): (3, 256, 3, 4), (1536, 1536, 65536, 128, 128, False, True, True): (3, 512, 1, 4), (1536, 1536, 65536, 128, 128, True, False, True): (4, 512, 2, 4), (1536, 1536, 131072, 16, 16, False, True, True): (2, 1024, 1, 2), (1536, 1536, 131072, 16, 16, True, False, True): (9, 512, 4, 4), (1536, 1536, 131072, 32, 32, False, True, True): (1, 512, 1, 2), (1536, 1536, 131072, 32, 32, True, False, True): (5, 512, 3, 4), (1536, 1536, 131072, 64, 64, False, True, True): (1, 1024, 3, 4), (1536, 1536, 131072, 64, 64, True, False, True): (2, 512, 3, 4), (1536, 1536, 131072, 128, 128, False, True, True): (3, 1024, 1, 4), (1536, 1536, 131072, 128, 128, True, False, True): (1, 1024, 2, 4), (2048, 2048, 256, 16, 16, False, True, True): (1, 4, 5, 2), (2048, 2048, 256, 16, 16, True, False, True): (4, 4, 5, 2), (2048, 2048, 256, 32, 32, False, True, True): (3, 4, 6, 2), (2048, 2048, 256, 32, 32, True, False, True): (2, 4, 5, 2), (2048, 2048, 256, 64, 64, False, True, True): (2, 4, 4, 4), (2048, 2048, 256, 64, 64, True, False, True): (2, 4, 3, 4), (2048, 2048, 256, 128, 128, False, True, True): (3, 2, 2, 8), (2048, 2048, 256, 128, 128, True, False, True): (3, 2, 2, 8), (2048, 2048, 512, 16, 16, False, True, True): (3, 4, 4, 4), (2048, 2048, 512, 16, 16, True, False, True): (1, 4, 4, 4), (2048, 2048, 512, 32, 32, False, True, True): (1, 4, 3, 4), (2048, 2048, 512, 32, 32, True, False, True): (1, 4, 4, 2), (2048, 2048, 512, 64, 64, False, True, True): (1, 8, 3, 4), (2048, 2048, 512, 64, 64, True, False, True): (1, 8, 3, 4), (2048, 2048, 512, 128, 128, False, True, True): (3, 4, 2, 8), (2048, 2048, 512, 128, 128, True, False, True): (2, 4, 2, 8), (2048, 2048, 1024, 16, 16, False, True, True): (3, 4, 3, 4), (2048, 2048, 1024, 16, 16, True, False, True): (4, 8, 3, 2), (2048, 2048, 1024, 32, 32, False, True, True): (3, 8, 3, 4), (2048, 2048, 1024, 32, 32, True, False, True): (1, 8, 3, 2), (2048, 2048, 1024, 64, 64, False, True, True): (1, 8, 3, 4), (2048, 2048, 1024, 64, 64, True, False, True): (1, 8, 3, 4), (2048, 2048, 1024, 128, 128, False, True, True): (4, 8, 1, 4), (2048, 2048, 1024, 128, 128, True, False, True): (2, 8, 1, 4), (2048, 2048, 2048, 16, 16, False, True, True): (4, 16, 3, 2), (2048, 2048, 2048, 16, 16, True, False, True): (4, 16, 3, 2), (2048, 2048, 2048, 32, 32, False, True, True): (1, 16, 3, 2), (2048, 2048, 2048, 32, 32, True, False, True): (1, 16, 3, 2), (2048, 2048, 2048, 64, 64, False, True, True): (4, 16, 3, 4), (2048, 2048, 2048, 64, 64, True, False, True): (4, 16, 3, 4), (2048, 2048, 2048, 128, 128, False, True, True): (6, 16, 2, 8), (2048, 2048, 2048, 128, 128, True, False, True): (3, 16, 1, 4), (2048, 2048, 4096, 16, 16, False, True, True): (4, 32, 4, 2), (2048, 2048, 4096, 16, 16, True, False, True): (4, 32, 3, 2), (2048, 2048, 4096, 32, 32, False, True, True): (4, 16, 3, 8), (2048, 2048, 4096, 32, 32, True, False, True): (4, 16, 3, 8), (2048, 2048, 4096, 64, 64, False, True, True): (1, 32, 3, 4), (2048, 2048, 4096, 64, 64, True, False, True): (3, 32, 3, 4), (2048, 2048, 4096, 128, 128, False, True, True): (2, 32, 1, 4), (2048, 2048, 4096, 128, 128, True, False, True): (2, 32, 1, 4), (2048, 2048, 8192, 16, 16, False, True, True): (4, 64, 4, 2), (2048, 2048, 8192, 16, 16, True, False, True): (4, 64, 4, 2), (2048, 2048, 8192, 32, 32, False, True, True): (4, 32, 4, 8), (2048, 2048, 8192, 32, 32, True, False, True): (4, 32, 3, 8), (2048, 2048, 8192, 64, 64, False, True, True): (4, 64, 3, 4), (2048, 2048, 8192, 64, 64, True, False, True): (4, 64, 3, 4), (2048, 2048, 8192, 128, 128, False, True, True): (2, 64, 1, 4), (2048, 2048, 8192, 128, 128, True, False, True): (2, 64, 1, 4), (2048, 2048, 16384, 16, 16, False, True, True): (4, 64, 3, 2), (2048, 2048, 16384, 16, 16, True, False, True): (1, 64, 3, 2), (2048, 2048, 16384, 32, 32, False, True, True): (4, 64, 3, 4), (2048, 2048, 16384, 32, 32, True, False, True): (4, 64, 3, 4), (2048, 2048, 16384, 64, 64, False, True, True): (4, 128, 3, 4), (2048, 2048, 16384, 64, 64, True, False, True): (4, 128, 3, 4), (2048, 2048, 16384, 128, 128, False, True, True): (2, 128, 1, 4), (2048, 2048, 16384, 128, 128, True, False, True): (2, 128, 1, 4), (2048, 2048, 32768, 16, 16, False, True, True): (8, 128, 3, 2), (2048, 2048, 32768, 16, 16, True, False, True): (8, 128, 3, 4), (2048, 2048, 32768, 32, 32, False, True, True): (8, 128, 3, 4), (2048, 2048, 32768, 32, 32, True, False, True): (8, 128, 3, 4), (2048, 2048, 32768, 64, 64, False, True, True): (1, 128, 2, 4), (2048, 2048, 32768, 64, 64, True, False, True): (8, 256, 3, 4), (2048, 2048, 32768, 128, 128, False, True, True): (2, 256, 1, 4), (2048, 2048, 32768, 128, 128, True, False, True): (2, 256, 1, 4), (2048, 2048, 65536, 16, 16, False, True, True): (9, 256, 4, 4), (2048, 2048, 65536, 16, 16, True, False, True): (7, 256, 4, 4), (2048, 2048, 65536, 32, 32, False, True, True): (7, 256, 3, 4), (2048, 2048, 65536, 32, 32, True, False, True): (3, 256, 3, 4), (2048, 2048, 65536, 64, 64, False, True, True): (2, 256, 2, 4), (2048, 2048, 65536, 64, 64, True, False, True): (6, 512, 3, 4), (2048, 2048, 65536, 128, 128, False, True, True): (2, 512, 1, 4), (2048, 2048, 65536, 128, 128, True, False, True): (2, 512, 1, 4), (2048, 2048, 131072, 16, 16, False, True, True): (9, 512, 4, 4), (2048, 2048, 131072, 16, 16, True, False, True): (9, 512, 4, 4), (2048, 2048, 131072, 32, 32, False, True, True): (7, 512, 4, 4), (2048, 2048, 131072, 32, 32, True, False, True): (3, 512, 3, 4), (2048, 2048, 131072, 64, 64, False, True, True): (2, 512, 2, 4), (2048, 2048, 131072, 64, 64, True, False, True): (4, 1024, 3, 4), (2048, 2048, 131072, 128, 128, False, True, True): (1, 1024, 1, 4), (2048, 2048, 131072, 128, 128, True, False, True): (2, 1024, 1, 4), (3072, 768, 256, 16, 16, False, True, True): (6, 4, 1, 4), (3072, 768, 256, 16, 16, True, False, True): (3, 1, 4, 4), (3072, 768, 256, 32, 32, False, True, True): (6, 8, 1, 2), (3072, 768, 256, 32, 32, True, False, True): (1, 2, 4, 4), (3072, 768, 256, 64, 64, False, True, True): (1, 4, 4, 4), (3072, 768, 256, 64, 64, True, False, True): (4, 2, 4, 4), (3072, 768, 256, 128, 128, False, True, True): (1, 2, 3, 8), (3072, 768, 256, 128, 128, True, False, True): (1, 2, 3, 8), (3072, 768, 512, 16, 16, False, True, True): (2, 4, 1, 4), (3072, 768, 512, 16, 16, True, False, True): (1, 4, 4, 1), (3072, 768, 512, 32, 32, False, True, True): (3, 8, 1, 4), (3072, 768, 512, 32, 32, True, False, True): (1, 2, 3, 4), (3072, 768, 512, 64, 64, False, True, True): (1, 8, 1, 4), (3072, 768, 512, 64, 64, True, False, True): (4, 4, 3, 4), (3072, 768, 512, 128, 128, False, True, True): (1, 4, 3, 8), (3072, 768, 512, 128, 128, True, False, True): (1, 4, 3, 8), (3072, 768, 1024, 16, 16, False, True, True): (1, 8, 1, 4), (3072, 768, 1024, 16, 16, True, False, True): (3, 4, 3, 1), (3072, 768, 1024, 32, 32, False, True, True): (1, 8, 1, 8), (3072, 768, 1024, 32, 32, True, False, True): (1, 4, 4, 4), (3072, 768, 1024, 64, 64, False, True, True): (1, 16, 3, 4), (3072, 768, 1024, 64, 64, True, False, True): (1, 4, 3, 4), (3072, 768, 1024, 128, 128, False, True, True): (1, 8, 3, 8), (3072, 768, 1024, 128, 128, True, False, True): (2, 8, 3, 8), (3072, 768, 2048, 16, 16, False, True, True): (3, 8, 1, 4), (3072, 768, 2048, 16, 16, True, False, True): (2, 8, 3, 4), (3072, 768, 2048, 32, 32, False, True, True): (3, 16, 1, 8), (3072, 768, 2048, 32, 32, True, False, True): (3, 8, 3, 4), (3072, 768, 2048, 64, 64, False, True, True): (1, 16, 1, 4), (3072, 768, 2048, 64, 64, True, False, True): (1, 16, 3, 4), (3072, 768, 2048, 128, 128, False, True, True): (1, 16, 3, 8), (3072, 768, 2048, 128, 128, True, False, True): (2, 16, 2, 4), (3072, 768, 4096, 16, 16, False, True, True): (1, 16, 1, 4), (3072, 768, 4096, 16, 16, True, False, True): (4, 32, 4, 2), (3072, 768, 4096, 32, 32, False, True, True): (2, 32, 1, 8), (3072, 768, 4096, 32, 32, True, False, True): (7, 16, 3, 4), (3072, 768, 4096, 64, 64, False, True, True): (2, 32, 1, 4), (3072, 768, 4096, 64, 64, True, False, True): (2, 16, 2, 4), (3072, 768, 4096, 128, 128, False, True, True): (1, 32, 3, 8), (3072, 768, 4096, 128, 128, True, False, True): (3, 32, 2, 4), (3072, 768, 8192, 16, 16, False, True, True): (2, 32, 1, 4), (3072, 768, 8192, 16, 16, True, False, True): (4, 64, 4, 2), (3072, 768, 8192, 32, 32, False, True, True): (4, 32, 1, 4), (3072, 768, 8192, 32, 32, True, False, True): (4, 32, 3, 4), (3072, 768, 8192, 64, 64, False, True, True): (2, 64, 1, 4), (3072, 768, 8192, 64, 64, True, False, True): (4, 32, 2, 4), (3072, 768, 8192, 128, 128, False, True, True): (3, 64, 1, 4), (3072, 768, 8192, 128, 128, True, False, True): (6, 64, 2, 4), (3072, 768, 16384, 16, 16, False, True, True): (1, 64, 1, 4), (3072, 768, 16384, 16, 16, True, False, True): (1, 64, 1, 1), (3072, 768, 16384, 32, 32, False, True, True): (1, 64, 1, 4), (3072, 768, 16384, 32, 32, True, False, True): (4, 64, 3, 4), (3072, 768, 16384, 64, 64, False, True, True): (4, 128, 1, 4), (3072, 768, 16384, 64, 64, True, False, True): (4, 64, 2, 4), (3072, 768, 16384, 128, 128, False, True, True): (3, 128, 1, 4), (3072, 768, 16384, 128, 128, True, False, True): (4, 128, 2, 4), (3072, 768, 32768, 16, 16, False, True, True): (1, 128, 1, 4), (3072, 768, 32768, 16, 16, True, False, True): (8, 128, 4, 1), (3072, 768, 32768, 32, 32, False, True, True): (1, 128, 1, 4), (3072, 768, 32768, 32, 32, True, False, True): (8, 128, 3, 4), (3072, 768, 32768, 64, 64, False, True, True): (1, 256, 1, 4), (3072, 768, 32768, 64, 64, True, False, True): (1, 128, 2, 4), (3072, 768, 32768, 128, 128, False, True, True): (3, 256, 1, 4), (3072, 768, 32768, 128, 128, True, False, True): (8, 256, 2, 4), (3072, 768, 50432, 16, 16, False, True, True): (1, 197, 1, 4), (3072, 768, 50432, 16, 16, True, False, True): (7, 197, 4, 1), (3072, 768, 50432, 32, 32, False, True, True): (1, 197, 1, 4), (3072, 768, 50432, 32, 32, True, False, True): (4, 197, 3, 4), (3072, 768, 50432, 64, 64, False, True, True): (1, 394, 1, 4), (3072, 768, 50432, 64, 64, True, False, True): (3, 197, 2, 4), (3072, 768, 50432, 128, 128, False, True, True): (3, 394, 1, 4), (3072, 768, 50432, 128, 128, True, False, True): (8, 394, 2, 4), (3072, 768, 65536, 16, 16, False, True, True): (1, 256, 1, 4), (3072, 768, 65536, 16, 16, True, False, True): (15, 256, 4, 1), (3072, 768, 65536, 32, 32, False, True, True): (1, 256, 1, 4), (3072, 768, 65536, 32, 32, True, False, True): (15, 256, 3, 4), (3072, 768, 65536, 64, 64, False, True, True): (1, 512, 1, 4), (3072, 768, 65536, 64, 64, True, False, True): (2, 256, 2, 4), (3072, 768, 65536, 128, 128, False, True, True): (3, 512, 1, 4), (3072, 768, 65536, 128, 128, True, False, True): (3, 512, 2, 4), (3072, 768, 131072, 16, 16, False, True, True): (1, 512, 1, 4), (3072, 768, 131072, 16, 16, True, False, True): (15, 512, 4, 1), (3072, 768, 131072, 32, 32, False, True, True): (1, 512, 1, 4), (3072, 768, 131072, 32, 32, True, False, True): (9, 512, 3, 4), (3072, 768, 131072, 64, 64, False, True, True): (1, 1024, 1, 4), (3072, 768, 131072, 64, 64, True, False, True): (3, 512, 2, 4), (3072, 768, 131072, 128, 128, False, True, True): (3, 1024, 1, 4), (3072, 768, 131072, 128, 128, True, False, True): (1, 1024, 2, 4), (3072, 3072, 256, 16, 16, False, True, True): (5, 4, 1, 4), (3072, 3072, 256, 16, 16, True, False, True): (1, 2, 5, 2), (3072, 3072, 256, 32, 32, False, True, True): (5, 4, 1, 8), (3072, 3072, 256, 32, 32, True, False, True): (1, 4, 4, 2), (3072, 3072, 256, 64, 64, False, True, True): (2, 4, 4, 4), (3072, 3072, 256, 64, 64, True, False, True): (2, 4, 4, 4), (3072, 3072, 256, 128, 128, False, True, True): (1, 2, 3, 8), (3072, 3072, 256, 128, 128, True, False, True): (1, 2, 3, 8), (3072, 3072, 512, 16, 16, False, True, True): (5, 4, 1, 2), (3072, 3072, 512, 16, 16, True, False, True): (1, 2, 3, 4), (3072, 3072, 512, 32, 32, False, True, True): (3, 8, 1, 4), (3072, 3072, 512, 32, 32, True, False, True): (1, 4, 4, 2), (3072, 3072, 512, 64, 64, False, True, True): (1, 8, 2, 2), (3072, 3072, 512, 64, 64, True, False, True): (2, 4, 3, 4), (3072, 3072, 512, 128, 128, False, True, True): (2, 4, 3, 8), (3072, 3072, 512, 128, 128, True, False, True): (1, 4, 3, 8), (3072, 3072, 1024, 16, 16, False, True, True): (1, 8, 1, 4), (3072, 3072, 1024, 16, 16, True, False, True): (2, 8, 3, 1), (3072, 3072, 1024, 32, 32, False, True, True): (1, 16, 1, 4), (3072, 3072, 1024, 32, 32, True, False, True): (1, 4, 4, 4), (3072, 3072, 1024, 64, 64, False, True, True): (1, 8, 3, 4), (3072, 3072, 1024, 64, 64, True, False, True): (2, 4, 3, 4), (3072, 3072, 1024, 128, 128, False, True, True): (1, 8, 1, 4), (3072, 3072, 1024, 128, 128, True, False, True): (2, 8, 3, 8), (3072, 3072, 2048, 16, 16, False, True, True): (1, 16, 1, 2), (3072, 3072, 2048, 16, 16, True, False, True): (2, 16, 4, 2), (3072, 3072, 2048, 32, 32, False, True, True): (1, 16, 1, 8), (3072, 3072, 2048, 32, 32, True, False, True): (3, 8, 4, 4), (3072, 3072, 2048, 64, 64, False, True, True): (3, 16, 3, 4), (3072, 3072, 2048, 64, 64, True, False, True): (3, 8, 3, 4), (3072, 3072, 2048, 128, 128, False, True, True): (1, 16, 3, 8), (3072, 3072, 2048, 128, 128, True, False, True): (5, 16, 3, 8), (3072, 3072, 4096, 16, 16, False, True, True): (1, 32, 1, 2), (3072, 3072, 4096, 16, 16, True, False, True): (4, 32, 4, 2), (3072, 3072, 4096, 32, 32, False, True, True): (1, 32, 1, 8), (3072, 3072, 4096, 32, 32, True, False, True): (3, 16, 3, 4), (3072, 3072, 4096, 64, 64, False, True, True): (1, 32, 3, 4), (3072, 3072, 4096, 64, 64, True, False, True): (3, 16, 3, 4), (3072, 3072, 4096, 128, 128, False, True, True): (3, 32, 3, 8), (3072, 3072, 4096, 128, 128, True, False, True): (3, 32, 3, 8), (3072, 3072, 8192, 16, 16, False, True, True): (1, 64, 1, 2), (3072, 3072, 8192, 16, 16, True, False, True): (4, 64, 4, 2), (3072, 3072, 8192, 32, 32, False, True, True): (1, 64, 1, 8), (3072, 3072, 8192, 32, 32, True, False, True): (6, 32, 3, 4), (3072, 3072, 8192, 64, 64, False, True, True): (1, 64, 3, 4), (3072, 3072, 8192, 64, 64, True, False, True): (2, 32, 3, 4), (3072, 3072, 8192, 128, 128, False, True, True): (2, 64, 3, 8), (3072, 3072, 8192, 128, 128, True, False, True): (1, 64, 3, 8), (3072, 3072, 16384, 16, 16, False, True, True): (1, 128, 1, 2), (3072, 3072, 16384, 16, 16, True, False, True): (4, 128, 4, 2), (3072, 3072, 16384, 32, 32, False, True, True): (1, 64, 1, 2), (3072, 3072, 16384, 32, 32, True, False, True): (4, 64, 3, 4), (3072, 3072, 16384, 64, 64, False, True, True): (1, 128, 3, 4), (3072, 3072, 16384, 64, 64, True, False, True): (4, 64, 3, 4), (3072, 3072, 16384, 128, 128, False, True, True): (1, 128, 1, 4), (3072, 3072, 16384, 128, 128, True, False, True): (1, 128, 3, 8), (3072, 3072, 32768, 16, 16, False, True, True): (1, 256, 1, 2), (3072, 3072, 32768, 16, 16, True, False, True): (8, 128, 4, 4), (3072, 3072, 32768, 32, 32, False, True, True): (1, 256, 1, 8), (3072, 3072, 32768, 32, 32, True, False, True): (5, 128, 3, 4), (3072, 3072, 32768, 64, 64, False, True, True): (1, 256, 3, 4), (3072, 3072, 32768, 64, 64, True, False, True): (3, 128, 3, 4), (3072, 3072, 32768, 128, 128, False, True, True): (1, 256, 1, 4), (3072, 3072, 32768, 128, 128, True, False, True): (3, 256, 2, 4), (3072, 3072, 65536, 16, 16, False, True, True): (1, 512, 1, 2), (3072, 3072, 65536, 16, 16, True, False, True): (7, 256, 4, 4), (3072, 3072, 65536, 32, 32, False, True, True): (1, 256, 1, 2), (3072, 3072, 65536, 32, 32, True, False, True): (5, 256, 3, 4), (3072, 3072, 65536, 64, 64, False, True, True): (1, 512, 3, 4), (3072, 3072, 65536, 64, 64, True, False, True): (3, 256, 3, 4), (3072, 3072, 65536, 128, 128, False, True, True): (1, 512, 1, 4), (3072, 3072, 65536, 128, 128, True, False, True): (3, 512, 2, 4), (3072, 3072, 131072, 16, 16, False, True, True): (1, 1024, 1, 2), (3072, 3072, 131072, 16, 16, True, False, True): (5, 512, 4, 4), (3072, 3072, 131072, 32, 32, False, True, True): (1, 512, 1, 2), (3072, 3072, 131072, 32, 32, True, False, True): (5, 512, 3, 4), (3072, 3072, 131072, 64, 64, False, True, True): (1, 1024, 3, 4), (3072, 3072, 131072, 64, 64, True, False, True): (3, 512, 3, 4), (3072, 3072, 131072, 128, 128, False, True, True): (1, 1024, 1, 4), (3072, 3072, 131072, 128, 128, True, False, True): (6, 1024, 2, 4), (4096, 4096, 256, 16, 16, False, True, True): (2, 2, 5, 4), (4096, 4096, 256, 16, 16, True, False, True): (2, 2, 4, 2), (4096, 4096, 256, 32, 32, False, True, True): (1, 2, 4, 4), (4096, 4096, 256, 32, 32, True, False, True): (3, 2, 4, 2), (4096, 4096, 256, 64, 64, False, True, True): (3, 4, 3, 4), (4096, 4096, 256, 64, 64, True, False, True): (1, 4, 3, 2), (4096, 4096, 256, 128, 128, False, True, True): (1, 2, 2, 8), (4096, 4096, 256, 128, 128, True, False, True): (1, 2, 2, 8), (4096, 4096, 512, 16, 16, False, True, True): (4, 2, 3, 4), (4096, 4096, 512, 16, 16, True, False, True): (1, 2, 3, 4), (4096, 4096, 512, 32, 32, False, True, True): (1, 4, 3, 4), (4096, 4096, 512, 32, 32, True, False, True): (3, 4, 3, 2), (4096, 4096, 512, 64, 64, False, True, True): (4, 4, 4, 4), (4096, 4096, 512, 64, 64, True, False, True): (3, 4, 3, 4), (4096, 4096, 512, 128, 128, False, True, True): (2, 4, 2, 8), (4096, 4096, 512, 128, 128, True, False, True): (2, 4, 1, 4), (4096, 4096, 1024, 16, 16, False, True, True): (2, 8, 3, 2), (4096, 4096, 1024, 16, 16, True, False, True): (2, 8, 3, 2), (4096, 4096, 1024, 32, 32, False, True, True): (1, 8, 3, 4), (4096, 4096, 1024, 32, 32, True, False, True): (1, 8, 3, 2), (4096, 4096, 1024, 64, 64, False, True, True): (1, 8, 3, 4), (4096, 4096, 1024, 64, 64, True, False, True): (1, 8, 3, 4), (4096, 4096, 1024, 128, 128, False, True, True): (4, 8, 1, 4), (4096, 4096, 1024, 128, 128, True, False, True): (2, 8, 2, 8), (4096, 4096, 2048, 16, 16, False, True, True): (2, 8, 4, 4), (4096, 4096, 2048, 16, 16, True, False, True): (2, 8, 4, 4), (4096, 4096, 2048, 32, 32, False, True, True): (4, 8, 3, 8), (4096, 4096, 2048, 32, 32, True, False, True): (4, 8, 4, 8), (4096, 4096, 2048, 64, 64, False, True, True): (4, 16, 3, 4), (4096, 4096, 2048, 64, 64, True, False, True): (4, 16, 3, 4), (4096, 4096, 2048, 128, 128, False, True, True): (1, 16, 1, 4), (4096, 4096, 2048, 128, 128, True, False, True): (4, 16, 1, 4), (4096, 4096, 4096, 16, 16, False, True, True): (4, 32, 4, 4), (4096, 4096, 4096, 16, 16, True, False, True): (2, 32, 4, 4), (4096, 4096, 4096, 32, 32, False, True, True): (4, 16, 4, 8), (4096, 4096, 4096, 32, 32, True, False, True): (4, 16, 4, 8), (4096, 4096, 4096, 64, 64, False, True, True): (4, 32, 3, 4), (4096, 4096, 4096, 64, 64, True, False, True): (2, 32, 3, 4), (4096, 4096, 4096, 128, 128, False, True, True): (2, 32, 1, 4), (4096, 4096, 4096, 128, 128, True, False, True): (2, 32, 1, 4), (4096, 4096, 8192, 16, 16, False, True, True): (4, 64, 4, 2), (4096, 4096, 8192, 16, 16, True, False, True): (4, 64, 4, 2), (4096, 4096, 8192, 32, 32, False, True, True): (4, 32, 4, 8), (4096, 4096, 8192, 32, 32, True, False, True): (4, 32, 4, 8), (4096, 4096, 8192, 64, 64, False, True, True): (4, 64, 3, 4), (4096, 4096, 8192, 64, 64, True, False, True): (4, 64, 3, 4), (4096, 4096, 8192, 128, 128, False, True, True): (1, 64, 1, 4), (4096, 4096, 8192, 128, 128, True, False, True): (1, 64, 1, 4), (4096, 4096, 16384, 16, 16, False, True, True): (4, 64, 4, 4), (4096, 4096, 16384, 16, 16, True, False, True): (4, 64, 4, 4), (4096, 4096, 16384, 32, 32, False, True, True): (4, 64, 4, 8), (4096, 4096, 16384, 32, 32, True, False, True): (4, 64, 4, 8), (4096, 4096, 16384, 64, 64, False, True, True): (4, 128, 3, 4), (4096, 4096, 16384, 64, 64, True, False, True): (4, 128, 3, 4), (4096, 4096, 16384, 128, 128, False, True, True): (1, 128, 1, 4), (4096, 4096, 16384, 128, 128, True, False, True): (1, 128, 1, 4), (4096, 4096, 32768, 16, 16, False, True, True): (8, 128, 4, 4), (4096, 4096, 32768, 16, 16, True, False, True): (5, 128, 4, 4), (4096, 4096, 32768, 32, 32, False, True, True): (5, 128, 4, 4), (4096, 4096, 32768, 32, 32, True, False, True): (3, 128, 4, 8), (4096, 4096, 32768, 64, 64, False, True, True): (3, 256, 3, 4), (4096, 4096, 32768, 64, 64, True, False, True): (2, 256, 3, 4), (4096, 4096, 32768, 128, 128, False, True, True): (1, 256, 1, 4), (4096, 4096, 32768, 128, 128, True, False, True): (1, 256, 1, 4), (4096, 4096, 65536, 16, 16, False, True, True): (5, 256, 4, 4), (4096, 4096, 65536, 16, 16, True, False, True): (5, 256, 4, 4), (4096, 4096, 65536, 32, 32, False, True, True): (4, 256, 4, 8), (4096, 4096, 65536, 32, 32, True, False, True): (4, 256, 4, 8), (4096, 4096, 65536, 64, 64, False, True, True): (1, 512, 3, 4), (4096, 4096, 65536, 64, 64, True, False, True): (3, 512, 3, 4), (4096, 4096, 65536, 128, 128, False, True, True): (1, 512, 1, 4), (4096, 4096, 65536, 128, 128, True, False, True): (1, 512, 1, 4), (4096, 4096, 131072, 16, 16, False, True, True): (5, 512, 4, 4), (4096, 4096, 131072, 16, 16, True, False, True): (5, 512, 4, 4), (4096, 4096, 131072, 32, 32, False, True, True): (4, 512, 4, 4), (4096, 4096, 131072, 32, 32, True, False, True): (2, 512, 3, 4), (4096, 4096, 131072, 64, 64, False, True, True): (1, 1024, 3, 4), (4096, 4096, 131072, 64, 64, True, False, True): (3, 1024, 3, 4), (4096, 4096, 131072, 128, 128, False, True, True): (1, 1024, 1, 4), (4096, 4096, 131072, 128, 128, True, False, True): (1, 1024, 1, 4), (5120, 1280, 65792, 16, 16, False, True, True): (1, 257, 1, 4), (5120, 1280, 65792, 16, 16, True, False, True): (11, 257, 4, 1), (5120, 1280, 65792, 32, 32, False, True, True): (1, 257, 1, 4), (5120, 1280, 65792, 32, 32, True, False, True): (5, 257, 3, 4), (5120, 1280, 65792, 64, 64, False, True, True): (1, 514, 1, 4), (5120, 1280, 65792, 64, 64, True, False, True): (5, 257, 2, 4), (5120, 1280, 65792, 128, 128, False, True, True): (3, 514, 1, 4), (5120, 1280, 65792, 128, 128, True, False, True): (7, 514, 2, 4), (6144, 6144, 256, 16, 16, False, True, True): (1, 2, 1, 4), (6144, 6144, 256, 16, 16, True, False, True): (3, 1, 4, 4), (6144, 6144, 256, 32, 32, False, True, True): (3, 2, 1, 8), (6144, 6144, 256, 32, 32, True, False, True): (1, 1, 4, 4), (6144, 6144, 256, 64, 64, False, True, True): (4, 2, 3, 4), (6144, 6144, 256, 64, 64, True, False, True): (3, 2, 4, 4), (6144, 6144, 256, 128, 128, False, True, True): (2, 2, 3, 8), (6144, 6144, 256, 128, 128, True, False, True): (1, 2, 3, 8), (6144, 6144, 512, 16, 16, False, True, True): (4, 4, 1, 4), (6144, 6144, 512, 16, 16, True, False, True): (3, 2, 3, 1), (6144, 6144, 512, 32, 32, False, True, True): (1, 8, 1, 4), (6144, 6144, 512, 32, 32, True, False, True): (1, 2, 3, 2), (6144, 6144, 512, 64, 64, False, True, True): (2, 4, 3, 4), (6144, 6144, 512, 64, 64, True, False, True): (2, 2, 3, 4), (6144, 6144, 512, 128, 128, False, True, True): (1, 4, 3, 8), (6144, 6144, 512, 128, 128, True, False, True): (1, 4, 3, 8), (6144, 6144, 1024, 16, 16, False, True, True): (1, 8, 1, 2), (6144, 6144, 1024, 16, 16, True, False, True): (4, 8, 4, 4), (6144, 6144, 1024, 32, 32, False, True, True): (1, 8, 4, 2), (6144, 6144, 1024, 32, 32, True, False, True): (1, 8, 4, 2), (6144, 6144, 1024, 64, 64, False, True, True): (4, 8, 3, 4), (6144, 6144, 1024, 64, 64, True, False, True): (1, 4, 3, 4), (6144, 6144, 1024, 128, 128, False, True, True): (2, 8, 3, 8), (6144, 6144, 1024, 128, 128, True, False, True): (1, 8, 3, 8), (6144, 6144, 2048, 16, 16, False, True, True): (4, 4, 1, 4), (6144, 6144, 2048, 16, 16, True, False, True): (2, 8, 4, 4), (6144, 6144, 2048, 32, 32, False, True, True): (1, 16, 4, 2), (6144, 6144, 2048, 32, 32, True, False, True): (4, 8, 4, 8), (6144, 6144, 2048, 64, 64, False, True, True): (4, 16, 3, 4), (6144, 6144, 2048, 64, 64, True, False, True): (2, 8, 3, 4), (6144, 6144, 2048, 128, 128, False, True, True): (1, 16, 3, 8), (6144, 6144, 2048, 128, 128, True, False, True): (4, 16, 3, 8), (6144, 6144, 4096, 16, 16, False, True, True): (4, 8, 1, 4), (6144, 6144, 4096, 16, 16, True, False, True): (4, 32, 4, 2), (6144, 6144, 4096, 32, 32, False, True, True): (4, 16, 1, 2), (6144, 6144, 4096, 32, 32, True, False, True): (2, 8, 3, 8), (6144, 6144, 4096, 64, 64, False, True, True): (4, 32, 3, 4), (6144, 6144, 4096, 64, 64, True, False, True): (4, 16, 3, 4), (6144, 6144, 4096, 128, 128, False, True, True): (4, 32, 3, 8), (6144, 6144, 4096, 128, 128, True, False, True): (4, 32, 3, 8), (6144, 6144, 8192, 16, 16, False, True, True): (2, 16, 1, 2), (6144, 6144, 8192, 16, 16, True, False, True): (4, 64, 4, 2), (6144, 6144, 8192, 32, 32, False, True, True): (4, 32, 1, 2), (6144, 6144, 8192, 32, 32, True, False, True): (4, 32, 4, 8), (6144, 6144, 8192, 64, 64, False, True, True): (4, 64, 3, 4), (6144, 6144, 8192, 64, 64, True, False, True): (4, 32, 3, 4), (6144, 6144, 8192, 128, 128, False, True, True): (4, 64, 3, 8), (6144, 6144, 8192, 128, 128, True, False, True): (4, 64, 3, 8), (6144, 6144, 16384, 16, 16, False, True, True): (2, 32, 1, 2), (6144, 6144, 16384, 16, 16, True, False, True): (4, 64, 4, 4), (6144, 6144, 16384, 32, 32, False, True, True): (4, 64, 1, 2), (6144, 6144, 16384, 32, 32, True, False, True): (4, 64, 3, 2), (6144, 6144, 16384, 64, 64, False, True, True): (4, 128, 3, 4), (6144, 6144, 16384, 64, 64, True, False, True): (2, 32, 3, 8), (6144, 6144, 16384, 128, 128, False, True, True): (4, 128, 3, 8), (6144, 6144, 16384, 128, 128, True, False, True): (4, 128, 3, 8), (6144, 6144, 32768, 16, 16, False, True, True): (2, 64, 1, 2), (6144, 6144, 32768, 16, 16, True, False, True): (3, 128, 4, 4), (6144, 6144, 32768, 32, 32, False, True, True): (4, 128, 1, 2), (6144, 6144, 32768, 32, 32, True, False, True): (3, 128, 3, 4), (6144, 6144, 32768, 64, 64, False, True, True): (4, 256, 3, 4), (6144, 6144, 32768, 64, 64, True, False, True): (2, 64, 3, 8), (6144, 6144, 32768, 128, 128, False, True, True): (4, 256, 3, 8), (6144, 6144, 32768, 128, 128, True, False, True): (4, 256, 3, 8), (6144, 6144, 65536, 16, 16, False, True, True): (2, 128, 1, 2), (6144, 6144, 65536, 16, 16, True, False, True): (4, 256, 4, 4), (6144, 6144, 65536, 32, 32, False, True, True): (4, 256, 1, 2), (6144, 6144, 65536, 32, 32, True, False, True): (4, 256, 3, 4), (6144, 6144, 65536, 64, 64, False, True, True): (4, 512, 3, 4), (6144, 6144, 65536, 64, 64, True, False, True): (2, 128, 3, 8), (6144, 6144, 65536, 128, 128, False, True, True): (4, 512, 3, 8), (6144, 6144, 65536, 128, 128, True, False, True): (4, 512, 3, 8), (6144, 6144, 131072, 16, 16, False, True, True): (2, 256, 1, 2), (6144, 6144, 131072, 16, 16, True, False, True): (5, 512, 4, 1), (6144, 6144, 131072, 32, 32, False, True, True): (4, 512, 1, 2), (6144, 6144, 131072, 32, 32, True, False, True): (4, 512, 3, 2), (6144, 6144, 131072, 64, 64, False, True, True): (4, 1024, 3, 4), (6144, 6144, 131072, 64, 64, True, False, True): (2, 256, 3, 8), (6144, 6144, 131072, 128, 128, False, True, True): (4, 1024, 3, 8), (6144, 6144, 131072, 128, 128, True, False, True): (4, 1024, 3, 8), (8192, 8192, 256, 16, 16, False, True, True): (1, 1, 3, 4), (8192, 8192, 256, 16, 16, True, False, True): (4, 1, 3, 4), (8192, 8192, 256, 32, 32, False, True, True): (1, 2, 3, 4), (8192, 8192, 256, 32, 32, True, False, True): (1, 2, 3, 4), (8192, 8192, 256, 64, 64, False, True, True): (6, 2, 3, 8), (8192, 8192, 256, 64, 64, True, False, True): (4, 2, 3, 8), (8192, 8192, 256, 128, 128, False, True, True): (1, 2, 1, 4), (8192, 8192, 256, 128, 128, True, False, True): (1, 2, 1, 4), (8192, 8192, 512, 16, 16, False, True, True): (4, 4, 3, 2), (8192, 8192, 512, 16, 16, True, False, True): (4, 4, 3, 4), (8192, 8192, 512, 32, 32, False, True, True): (1, 4, 3, 4), (8192, 8192, 512, 32, 32, True, False, True): (3, 4, 3, 2), (8192, 8192, 512, 64, 64, False, True, True): (1, 4, 3, 4), (8192, 8192, 512, 64, 64, True, False, True): (1, 4, 3, 4), (8192, 8192, 512, 128, 128, False, True, True): (4, 4, 2, 8), (8192, 8192, 512, 128, 128, True, False, True): (4, 4, 2, 8), (8192, 8192, 1024, 16, 16, False, True, True): (4, 8, 4, 4), (8192, 8192, 1024, 16, 16, True, False, True): (2, 8, 4, 4), (8192, 8192, 1024, 32, 32, False, True, True): (2, 4, 4, 8), (8192, 8192, 1024, 32, 32, True, False, True): (1, 4, 3, 4), (8192, 8192, 1024, 64, 64, False, True, True): (4, 8, 3, 4), (8192, 8192, 1024, 64, 64, True, False, True): (2, 8, 3, 4), (8192, 8192, 1024, 128, 128, False, True, True): (4, 8, 1, 4), (8192, 8192, 1024, 128, 128, True, False, True): (4, 8, 1, 4), (8192, 8192, 2048, 16, 16, False, True, True): (2, 8, 4, 4), (8192, 8192, 2048, 16, 16, True, False, True): (2, 8, 4, 4), (8192, 8192, 2048, 32, 32, False, True, True): (2, 8, 4, 8), (8192, 8192, 2048, 32, 32, True, False, True): (2, 8, 4, 8), (8192, 8192, 2048, 64, 64, False, True, True): (4, 8, 2, 4), (8192, 8192, 2048, 64, 64, True, False, True): (4, 16, 3, 4), (8192, 8192, 2048, 128, 128, False, True, True): (4, 16, 1, 4), (8192, 8192, 2048, 128, 128, True, False, True): (4, 16, 1, 4), (8192, 8192, 4096, 16, 16, False, True, True): (4, 16, 4, 4), (8192, 8192, 4096, 16, 16, True, False, True): (4, 32, 4, 2), (8192, 8192, 4096, 32, 32, False, True, True): (2, 16, 4, 8), (8192, 8192, 4096, 32, 32, True, False, True): (2, 16, 4, 8), (8192, 8192, 4096, 64, 64, False, True, True): (4, 32, 3, 4), (8192, 8192, 4096, 64, 64, True, False, True): (4, 16, 2, 4), (8192, 8192, 4096, 128, 128, False, True, True): (4, 32, 1, 4), (8192, 8192, 4096, 128, 128, True, False, True): (4, 32, 1, 4), (8192, 8192, 8192, 16, 16, False, True, True): (4, 64, 4, 2), (8192, 8192, 8192, 16, 16, True, False, True): (4, 64, 4, 2), (8192, 8192, 8192, 32, 32, False, True, True): (2, 32, 4, 8), (8192, 8192, 8192, 32, 32, True, False, True): (2, 32, 4, 8), (8192, 8192, 8192, 64, 64, False, True, True): (4, 32, 3, 8), (8192, 8192, 8192, 64, 64, True, False, True): (4, 32, 2, 4), (8192, 8192, 8192, 128, 128, False, True, True): (4, 64, 1, 4), (8192, 8192, 8192, 128, 128, True, False, True): (4, 64, 1, 4), (8192, 8192, 16384, 16, 16, False, True, True): (4, 64, 4, 4), (8192, 8192, 16384, 16, 16, True, False, True): (4, 64, 4, 4), (8192, 8192, 16384, 32, 32, False, True, True): (4, 64, 3, 4), (8192, 8192, 16384, 32, 32, True, False, True): (4, 64, 4, 8), (8192, 8192, 16384, 64, 64, False, True, True): (4, 64, 2, 4), (8192, 8192, 16384, 64, 64, True, False, True): (4, 64, 2, 4), (8192, 8192, 16384, 128, 128, False, True, True): (4, 128, 1, 4), (8192, 8192, 16384, 128, 128, True, False, True): (4, 128, 1, 4), (8192, 8192, 32768, 16, 16, False, True, True): (3, 128, 4, 4), (8192, 8192, 32768, 16, 16, True, False, True): (3, 128, 4, 4), (8192, 8192, 32768, 32, 32, False, True, True): (2, 128, 4, 8), (8192, 8192, 32768, 32, 32, True, False, True): (2, 128, 4, 8), (8192, 8192, 32768, 64, 64, False, True, True): (2, 128, 2, 4), (8192, 8192, 32768, 64, 64, True, False, True): (2, 128, 2, 4), (8192, 8192, 32768, 128, 128, False, True, True): (4, 256, 1, 4), (8192, 8192, 32768, 128, 128, True, False, True): (4, 256, 1, 4), (8192, 8192, 65536, 16, 16, False, True, True): (3, 256, 4, 4), (8192, 8192, 65536, 16, 16, True, False, True): (3, 256, 4, 4), (8192, 8192, 65536, 32, 32, False, True, True): (2, 256, 3, 4), (8192, 8192, 65536, 32, 32, True, False, True): (2, 256, 3, 4), (8192, 8192, 65536, 64, 64, False, True, True): (2, 256, 2, 4), (8192, 8192, 65536, 64, 64, True, False, True): (2, 256, 3, 8), (8192, 8192, 65536, 128, 128, False, True, True): (4, 512, 1, 4), (8192, 8192, 65536, 128, 128, True, False, True): (4, 512, 1, 4), (8192, 8192, 131072, 16, 16, False, True, True): (3, 512, 4, 4), (8192, 8192, 131072, 16, 16, True, False, True): (3, 512, 4, 4), (8192, 8192, 131072, 32, 32, False, True, True): (2, 512, 4, 4), (8192, 8192, 131072, 32, 32, True, False, True): (2, 512, 3, 4), (8192, 8192, 131072, 64, 64, False, True, True): (4, 512, 2, 4), (8192, 8192, 131072, 64, 64, True, False, True): (2, 512, 2, 4), (8192, 8192, 131072, 128, 128, False, True, True): (4, 1024, 1, 4), (8192, 8192, 131072, 128, 128, True, False, True): (4, 1024, 1, 4), (16384, 16384, 256, 16, 16, False, True, True): (2, 2, 6, 4), (16384, 16384, 256, 16, 16, True, False, True): (2, 2, 6, 4), (16384, 16384, 256, 32, 32, False, True, True): (4, 2, 3, 2), (16384, 16384, 256, 32, 32, True, False, True): (4, 2, 3, 2), (16384, 16384, 256, 64, 64, False, True, True): (2, 2, 4, 4), (16384, 16384, 256, 64, 64, True, False, True): (4, 2, 3, 8), (16384, 16384, 256, 128, 128, False, True, True): (4, 2, 2, 8), (16384, 16384, 256, 128, 128, True, False, True): (4, 2, 2, 8), (16384, 16384, 512, 16, 16, False, True, True): (1, 2, 4, 4), (16384, 16384, 512, 16, 16, True, False, True): (1, 2, 4, 4), (16384, 16384, 512, 32, 32, False, True, True): (2, 2, 4, 8), (16384, 16384, 512, 32, 32, True, False, True): (2, 2, 4, 8), (16384, 16384, 512, 64, 64, False, True, True): (4, 4, 3, 4), (16384, 16384, 512, 64, 64, True, False, True): (4, 4, 3, 4), (16384, 16384, 512, 128, 128, False, True, True): (4, 4, 2, 8), (16384, 16384, 512, 128, 128, True, False, True): (4, 4, 2, 8), (16384, 16384, 1024, 16, 16, False, True, True): (3, 4, 4, 4), (16384, 16384, 1024, 16, 16, True, False, True): (2, 8, 4, 4), (16384, 16384, 1024, 32, 32, False, True, True): (2, 4, 4, 8), (16384, 16384, 1024, 32, 32, True, False, True): (1, 4, 4, 8), (16384, 16384, 1024, 64, 64, False, True, True): (2, 8, 3, 4), (16384, 16384, 1024, 64, 64, True, False, True): (2, 8, 3, 4), (16384, 16384, 1024, 128, 128, False, True, True): (4, 8, 1, 4), (16384, 16384, 1024, 128, 128, True, False, True): (4, 8, 1, 4), (16384, 16384, 2048, 16, 16, False, True, True): (2, 8, 4, 4), (16384, 16384, 2048, 16, 16, True, False, True): (2, 8, 4, 4), (16384, 16384, 2048, 32, 32, False, True, True): (1, 8, 4, 8), (16384, 16384, 2048, 32, 32, True, False, True): (2, 8, 4, 8), (16384, 16384, 2048, 64, 64, False, True, True): (2, 8, 2, 4), (16384, 16384, 2048, 64, 64, True, False, True): (2, 8, 2, 4), (16384, 16384, 2048, 128, 128, False, True, True): (4, 16, 1, 4), (16384, 16384, 2048, 128, 128, True, False, True): (4, 16, 1, 4), (16384, 16384, 4096, 16, 16, False, True, True): (2, 16, 4, 4), (16384, 16384, 4096, 16, 16, True, False, True): (2, 16, 4, 4), (16384, 16384, 4096, 32, 32, False, True, True): (1, 8, 3, 8), (16384, 16384, 4096, 32, 32, True, False, True): (2, 16, 3, 4), (16384, 16384, 4096, 64, 64, False, True, True): (2, 16, 2, 4), (16384, 16384, 4096, 64, 64, True, False, True): (2, 16, 2, 4), (16384, 16384, 4096, 128, 128, False, True, True): (4, 32, 1, 4), (16384, 16384, 4096, 128, 128, True, False, True): (4, 32, 1, 4), (16384, 16384, 8192, 16, 16, False, True, True): (4, 64, 4, 2), (16384, 16384, 8192, 16, 16, True, False, True): (4, 64, 4, 2), (16384, 16384, 8192, 32, 32, False, True, True): (2, 32, 4, 8), (16384, 16384, 8192, 32, 32, True, False, True): (2, 32, 3, 4), (16384, 16384, 8192, 64, 64, False, True, True): (2, 32, 4, 8), (16384, 16384, 8192, 64, 64, True, False, True): (2, 32, 3, 8), (16384, 16384, 8192, 128, 128, False, True, True): (4, 64, 1, 4), (16384, 16384, 8192, 128, 128, True, False, True): (4, 64, 1, 4), (16384, 16384, 16384, 16, 16, False, True, True): (1, 64, 4, 4), (16384, 16384, 16384, 16, 16, True, False, True): (1, 64, 4, 4), (16384, 16384, 16384, 32, 32, False, True, True): (1, 64, 3, 8), (16384, 16384, 16384, 32, 32, True, False, True): (1, 64, 3, 4), (16384, 16384, 16384, 64, 64, False, True, True): (1, 64, 2, 4), (16384, 16384, 16384, 64, 64, True, False, True): (1, 64, 4, 8), (16384, 16384, 16384, 128, 128, False, True, True): (4, 128, 1, 4), (16384, 16384, 16384, 128, 128, True, False, True): (4, 128, 1, 4), (16384, 16384, 32768, 16, 16, False, True, True): (1, 128, 4, 4), (16384, 16384, 32768, 16, 16, True, False, True): (1, 128, 4, 4), (16384, 16384, 32768, 32, 32, False, True, True): (1, 128, 4, 2), (16384, 16384, 32768, 32, 32, True, False, True): (1, 128, 3, 8), (16384, 16384, 32768, 64, 64, False, True, True): (2, 128, 2, 4), (16384, 16384, 32768, 64, 64, True, False, True): (1, 128, 3, 8), (16384, 16384, 32768, 128, 128, False, True, True): (4, 256, 1, 4), (16384, 16384, 32768, 128, 128, True, False, True): (4, 256, 1, 4), (16384, 16384, 65536, 16, 16, False, True, True): (1, 256, 4, 4), (16384, 16384, 65536, 16, 16, True, False, True): (1, 256, 4, 4), (16384, 16384, 65536, 32, 32, False, True, True): (1, 256, 3, 4), (16384, 16384, 65536, 32, 32, True, False, True): (1, 256, 3, 4), (16384, 16384, 65536, 64, 64, False, True, True): (1, 256, 2, 4), (16384, 16384, 65536, 64, 64, True, False, True): (2, 256, 2, 4), (16384, 16384, 65536, 128, 128, False, True, True): (4, 512, 1, 4), (16384, 16384, 65536, 128, 128, True, False, True): (4, 512, 1, 4), (16384, 16384, 131072, 16, 16, False, True, True): (2, 512, 4, 4), (16384, 16384, 131072, 16, 16, True, False, True): (1, 512, 4, 4), (16384, 16384, 131072, 32, 32, False, True, True): (1, 512, 4, 8), (16384, 16384, 131072, 32, 32, True, False, True): (1, 512, 3, 4), (16384, 16384, 131072, 64, 64, False, True, True): (2, 512, 2, 4), (16384, 16384, 131072, 64, 64, True, False, True): (1, 512, 2, 4), (16384, 16384, 131072, 128, 128, False, True, True): (4, 1024, 1, 4), (16384, 16384, 131072, 128, 128, True, False, True): (4, 1024, 1, 4), }, ("bsr_dense_addmm", "NVIDIA A100-SXM4-80GB", (0, torch.bfloat16, 0.56)): { (192, 192, 256, 64, 64, False, True, True): (3, 4, 3, 4), (192, 192, 256, 64, 64, True, False, True): (1, 4, 4, 4), (192, 192, 512, 64, 64, False, True, True): (2, 8, 3, 4), (192, 192, 512, 64, 64, True, False, True): (2, 8, 3, 4), (192, 192, 1024, 64, 64, False, True, True): (1, 16, 3, 4), (192, 192, 1024, 64, 64, True, False, True): (1, 16, 5, 4), (192, 192, 2048, 64, 64, False, True, True): (3, 32, 3, 4), (192, 192, 2048, 64, 64, True, False, True): (5, 32, 3, 4), (192, 192, 4096, 64, 64, False, True, True): (1, 64, 4, 4), (192, 192, 4096, 64, 64, True, False, True): (2, 32, 3, 4), (192, 192, 8192, 64, 64, False, True, True): (1, 128, 2, 4), (192, 192, 8192, 64, 64, True, False, True): (1, 64, 3, 4), (192, 192, 16384, 64, 64, False, True, True): (1, 256, 1, 4), (192, 192, 16384, 64, 64, True, False, True): (1, 64, 3, 4), (192, 192, 32768, 64, 64, False, True, True): (2, 512, 1, 2), (192, 192, 32768, 64, 64, True, False, True): (2, 256, 2, 4), (192, 192, 65536, 64, 64, False, True, True): (3, 512, 1, 4), (192, 192, 65536, 64, 64, True, False, True): (1, 512, 2, 4), (192, 192, 131072, 64, 64, False, True, True): (5, 1024, 1, 4), (192, 192, 131072, 64, 64, True, False, True): (4, 512, 2, 4), (384, 384, 256, 128, 128, False, True, True): (3, 2, 3, 8), (384, 384, 256, 128, 128, True, False, True): (1, 2, 3, 8), (384, 384, 512, 128, 128, False, True, True): (4, 4, 3, 8), (384, 384, 512, 128, 128, True, False, True): (3, 4, 3, 8), (384, 384, 1024, 128, 128, False, True, True): (1, 8, 3, 8), (384, 384, 1024, 128, 128, True, False, True): (2, 8, 3, 8), (384, 384, 2048, 128, 128, False, True, True): (5, 16, 3, 8), (384, 384, 2048, 128, 128, True, False, True): (5, 16, 3, 8), (384, 384, 4096, 128, 128, False, True, True): (3, 32, 3, 8), (384, 384, 4096, 128, 128, True, False, True): (6, 32, 3, 8), (384, 384, 8192, 128, 128, False, True, True): (2, 64, 3, 8), (384, 384, 8192, 128, 128, True, False, True): (4, 32, 2, 8), (384, 384, 16384, 128, 128, False, True, True): (2, 128, 3, 8), (384, 384, 16384, 128, 128, True, False, True): (5, 128, 2, 4), (384, 384, 32768, 128, 128, False, True, True): (2, 256, 3, 8), (384, 384, 32768, 128, 128, True, False, True): (3, 256, 2, 4), (384, 384, 65536, 128, 128, False, True, True): (3, 512, 1, 4), (384, 384, 65536, 128, 128, True, False, True): (1, 512, 2, 4), (384, 384, 131072, 128, 128, False, True, True): (3, 1024, 1, 4), (384, 384, 131072, 128, 128, True, False, True): (1, 1024, 2, 4), }, ("bsr_dense_addmm", "NVIDIA A100-SXM4-80GB", (0, torch.float16, 0.5)): { (16, 16, 16, 16, 16, False, False, False): (1, 1, 1, 1), (16, 16, 16, 16, 16, False, False, True): (1, 1, 2, 2), (16, 16, 16, 16, 16, False, True, False): (1, 1, 1, 1), (16, 16, 16, 16, 16, False, True, True): (1, 1, 1, 8), (16, 16, 16, 16, 16, True, False, False): (3, 1, 3, 4), (16, 16, 16, 16, 16, True, False, True): (1, 1, 2, 1), (16, 16, 32, 16, 16, False, False, False): (1, 2, 1, 8), (16, 16, 32, 16, 16, False, False, True): (1, 2, 1, 2), (16, 16, 32, 16, 16, False, True, False): (2, 1, 1, 4), (16, 16, 32, 16, 16, False, True, True): (1, 2, 1, 4), (16, 16, 32, 16, 16, True, False, False): (1, 1, 1, 4), (16, 16, 32, 16, 16, True, False, True): (1, 2, 1, 2), (16, 16, 64, 16, 16, False, False, False): (1, 4, 1, 1), (16, 16, 64, 16, 16, False, False, True): (1, 2, 2, 4), (16, 16, 64, 16, 16, False, True, False): (1, 4, 1, 4), (16, 16, 64, 16, 16, False, True, True): (1, 2, 1, 4), (16, 16, 64, 16, 16, True, False, False): (1, 4, 1, 2), (16, 16, 64, 16, 16, True, False, True): (1, 1, 1, 2), (16, 32, 16, 16, 16, False, False, False): (1, 1, 2, 4), (16, 32, 16, 16, 16, False, False, True): (1, 1, 1, 4), (16, 32, 16, 16, 16, False, True, False): (1, 1, 1, 2), (16, 32, 16, 16, 16, False, True, True): (1, 1, 1, 2), (16, 32, 16, 16, 16, True, False, False): (1, 1, 2, 16), (16, 32, 16, 16, 16, True, False, True): (1, 1, 1, 4), (16, 32, 16, 16, 32, False, False, False): (2, 1, 1, 8), (16, 32, 16, 16, 32, False, False, True): (2, 1, 1, 8), (16, 32, 16, 16, 32, False, True, False): (1, 1, 2, 1), (16, 32, 16, 16, 32, False, True, True): (1, 1, 1, 4), (16, 32, 16, 16, 32, True, False, False): (2, 1, 1, 8), (16, 32, 16, 16, 32, True, False, True): (1, 1, 2, 4), (16, 32, 32, 16, 16, False, False, False): (1, 1, 1, 16), (16, 32, 32, 16, 16, False, False, True): (1, 2, 1, 2), (16, 32, 32, 16, 16, False, True, False): (1, 2, 1, 8), (16, 32, 32, 16, 16, False, True, True): (3, 2, 1, 4), (16, 32, 32, 16, 16, True, False, False): (1, 2, 1, 4), (16, 32, 32, 16, 16, True, False, True): (1, 2, 1, 2), (16, 32, 32, 16, 32, False, False, False): (1, 2, 1, 2), (16, 32, 32, 16, 32, False, False, True): (1, 1, 1, 4), (16, 32, 32, 16, 32, False, True, False): (1, 1, 2, 4), (16, 32, 32, 16, 32, False, True, True): (1, 2, 1, 2), (16, 32, 32, 16, 32, True, False, False): (1, 2, 1, 2), (16, 32, 32, 16, 32, True, False, True): (1, 2, 1, 16), (16, 32, 64, 16, 16, False, False, False): (1, 4, 1, 4), (16, 32, 64, 16, 16, False, False, True): (2, 4, 1, 4), (16, 32, 64, 16, 16, False, True, False): (1, 4, 1, 4), (16, 32, 64, 16, 16, False, True, True): (1, 4, 1, 4), (16, 32, 64, 16, 16, True, False, False): (3, 4, 1, 2), (16, 32, 64, 16, 16, True, False, True): (1, 4, 1, 1), (16, 32, 64, 16, 32, False, False, False): (1, 4, 1, 16), (16, 32, 64, 16, 32, False, False, True): (1, 2, 1, 2), (16, 32, 64, 16, 32, False, True, False): (1, 4, 2, 2), (16, 32, 64, 16, 32, False, True, True): (1, 4, 1, 8), (16, 32, 64, 16, 32, True, False, False): (1, 4, 1, 8), (16, 32, 64, 16, 32, True, False, True): (1, 2, 1, 4), (16, 64, 16, 16, 32, False, False, False): (1, 1, 1, 2), (16, 64, 16, 16, 32, False, False, True): (1, 1, 1, 4), (16, 64, 16, 16, 32, False, True, False): (2, 1, 2, 4), (16, 64, 16, 16, 32, False, True, True): (1, 1, 1, 4), (16, 64, 16, 16, 32, True, False, False): (1, 1, 1, 4), (16, 64, 16, 16, 32, True, False, True): (1, 1, 1, 4), (16, 64, 32, 16, 32, False, False, False): (1, 2, 1, 2), (16, 64, 32, 16, 32, False, False, True): (1, 1, 1, 4), (16, 64, 32, 16, 32, False, True, False): (1, 1, 1, 4), (16, 64, 32, 16, 32, False, True, True): (1, 2, 3, 2), (16, 64, 32, 16, 32, True, False, False): (1, 1, 1, 4), (16, 64, 32, 16, 32, True, False, True): (1, 1, 2, 4), (16, 64, 64, 16, 32, False, False, False): (1, 4, 1, 8), (16, 64, 64, 16, 32, False, False, True): (1, 4, 1, 4), (16, 64, 64, 16, 32, False, True, False): (1, 4, 1, 1), (16, 64, 64, 16, 32, False, True, True): (2, 4, 1, 4), (16, 64, 64, 16, 32, True, False, False): (1, 4, 1, 4), (16, 64, 64, 16, 32, True, False, True): (1, 4, 1, 4), (32, 16, 16, 16, 16, False, False, False): (2, 1, 2, 4), (32, 16, 16, 16, 16, False, False, True): (2, 1, 1, 2), (32, 16, 16, 16, 16, False, True, False): (1, 1, 2, 4), (32, 16, 16, 16, 16, False, True, True): (1, 1, 1, 2), (32, 16, 16, 16, 16, True, False, False): (1, 1, 1, 4), (32, 16, 16, 16, 16, True, False, True): (2, 1, 1, 2), (32, 16, 32, 16, 16, False, False, False): (1, 1, 1, 4), (32, 16, 32, 16, 16, False, False, True): (1, 1, 1, 4), (32, 16, 32, 16, 16, False, True, False): (1, 2, 1, 4), (32, 16, 32, 16, 16, False, True, True): (2, 2, 1, 4), (32, 16, 32, 16, 16, True, False, False): (2, 1, 1, 4), (32, 16, 32, 16, 16, True, False, True): (2, 2, 1, 2), (32, 16, 64, 16, 16, False, False, False): (1, 4, 1, 2), (32, 16, 64, 16, 16, False, False, True): (1, 4, 1, 4), (32, 16, 64, 16, 16, False, True, False): (1, 2, 1, 4), (32, 16, 64, 16, 16, False, True, True): (1, 4, 1, 2), (32, 16, 64, 16, 16, True, False, False): (1, 4, 2, 8), (32, 16, 64, 16, 16, True, False, True): (1, 4, 1, 1), (32, 32, 16, 16, 16, False, False, False): (1, 1, 1, 4), (32, 32, 16, 16, 16, False, False, True): (2, 1, 1, 4), (32, 32, 16, 16, 16, False, True, False): (1, 1, 2, 4), (32, 32, 16, 16, 16, False, True, True): (1, 1, 2, 2), (32, 32, 16, 16, 16, True, False, False): (1, 1, 1, 8), (32, 32, 16, 16, 16, True, False, True): (1, 1, 1, 4), (32, 32, 16, 16, 32, False, False, False): (1, 1, 3, 2), (32, 32, 16, 16, 32, False, False, True): (2, 1, 1, 4), (32, 32, 16, 16, 32, False, True, False): (3, 1, 1, 4), (32, 32, 16, 16, 32, False, True, True): (1, 1, 1, 4), (32, 32, 16, 16, 32, True, False, False): (2, 1, 1, 8), (32, 32, 16, 16, 32, True, False, True): (1, 1, 3, 2), (32, 32, 16, 32, 32, False, False, False): (1, 1, 1, 2), (32, 32, 16, 32, 32, False, False, True): (2, 1, 1, 8), (32, 32, 16, 32, 32, False, True, False): (1, 1, 1, 2), (32, 32, 16, 32, 32, False, True, True): (1, 1, 1, 8), (32, 32, 16, 32, 32, True, False, False): (1, 1, 2, 4), (32, 32, 16, 32, 32, True, False, True): (1, 1, 1, 2), (32, 32, 32, 16, 16, False, False, False): (1, 1, 1, 4), (32, 32, 32, 16, 16, False, False, True): (1, 2, 1, 4), (32, 32, 32, 16, 16, False, True, False): (1, 2, 1, 4), (32, 32, 32, 16, 16, False, True, True): (1, 2, 1, 2), (32, 32, 32, 16, 16, True, False, False): (1, 2, 1, 4), (32, 32, 32, 16, 16, True, False, True): (1, 2, 1, 4), (32, 32, 32, 16, 32, False, False, False): (1, 2, 1, 4), (32, 32, 32, 16, 32, False, False, True): (1, 2, 1, 2), (32, 32, 32, 16, 32, False, True, False): (1, 2, 1, 4), (32, 32, 32, 16, 32, False, True, True): (1, 2, 1, 2), (32, 32, 32, 16, 32, True, False, False): (1, 2, 1, 1), (32, 32, 32, 16, 32, True, False, True): (1, 2, 1, 2), (32, 32, 32, 32, 32, False, False, False): (1, 1, 1, 4), (32, 32, 32, 32, 32, False, False, True): (2, 1, 1, 4), (32, 32, 32, 32, 32, False, True, False): (1, 1, 1, 8), (32, 32, 32, 32, 32, False, True, True): (1, 1, 1, 8), (32, 32, 32, 32, 32, True, False, False): (1, 1, 3, 4), (32, 32, 32, 32, 32, True, False, True): (1, 1, 1, 8), (32, 32, 64, 16, 16, False, False, False): (1, 4, 1, 4), (32, 32, 64, 16, 16, False, False, True): (1, 4, 1, 2), (32, 32, 64, 16, 16, False, True, False): (1, 1, 1, 4), (32, 32, 64, 16, 16, False, True, True): (1, 4, 1, 4), (32, 32, 64, 16, 16, True, False, False): (1, 4, 1, 8), (32, 32, 64, 16, 16, True, False, True): (1, 4, 1, 2), (32, 32, 64, 16, 32, False, False, False): (1, 1, 1, 4), (32, 32, 64, 16, 32, False, False, True): (1, 4, 1, 4), (32, 32, 64, 16, 32, False, True, False): (1, 1, 1, 4), (32, 32, 64, 16, 32, False, True, True): (1, 4, 1, 4), (32, 32, 64, 16, 32, True, False, False): (2, 2, 1, 8), (32, 32, 64, 16, 32, True, False, True): (1, 2, 1, 2), (32, 32, 64, 32, 32, False, False, False): (1, 2, 1, 4), (32, 32, 64, 32, 32, False, False, True): (1, 2, 1, 1), (32, 32, 64, 32, 32, False, True, False): (1, 2, 2, 8), (32, 32, 64, 32, 32, False, True, True): (1, 1, 1, 4), (32, 32, 64, 32, 32, True, False, False): (1, 2, 1, 4), (32, 32, 64, 32, 32, True, False, True): (2, 2, 1, 4), (32, 64, 16, 16, 32, False, False, False): (1, 1, 1, 8), (32, 64, 16, 16, 32, False, False, True): (1, 1, 1, 4), (32, 64, 16, 16, 32, False, True, False): (2, 1, 1, 4), (32, 64, 16, 16, 32, False, True, True): (1, 1, 1, 4), (32, 64, 16, 16, 32, True, False, False): (1, 1, 2, 4), (32, 64, 16, 16, 32, True, False, True): (1, 1, 2, 2), (32, 64, 16, 32, 32, False, False, False): (1, 1, 1, 8), (32, 64, 16, 32, 32, False, False, True): (2, 1, 1, 4), (32, 64, 16, 32, 32, False, True, False): (1, 1, 1, 4), (32, 64, 16, 32, 32, False, True, True): (1, 1, 2, 2), (32, 64, 16, 32, 32, True, False, False): (1, 1, 1, 2), (32, 64, 16, 32, 32, True, False, True): (2, 1, 2, 4), (32, 64, 32, 16, 32, False, False, False): (1, 1, 1, 4), (32, 64, 32, 16, 32, False, False, True): (1, 2, 1, 2), (32, 64, 32, 16, 32, False, True, False): (1, 2, 3, 4), (32, 64, 32, 16, 32, False, True, True): (2, 2, 1, 4), (32, 64, 32, 16, 32, True, False, False): (1, 1, 1, 4), (32, 64, 32, 16, 32, True, False, True): (1, 2, 2, 1), (32, 64, 32, 32, 32, False, False, False): (1, 1, 1, 8), (32, 64, 32, 32, 32, False, False, True): (1, 1, 1, 4), (32, 64, 32, 32, 32, False, True, False): (1, 1, 2, 4), (32, 64, 32, 32, 32, False, True, True): (1, 1, 1, 4), (32, 64, 32, 32, 32, True, False, False): (2, 1, 1, 2), (32, 64, 32, 32, 32, True, False, True): (1, 1, 1, 4), (32, 64, 64, 16, 32, False, False, False): (1, 4, 2, 1), (32, 64, 64, 16, 32, False, False, True): (3, 4, 1, 4), (32, 64, 64, 16, 32, False, True, False): (1, 1, 1, 8), (32, 64, 64, 16, 32, False, True, True): (1, 4, 1, 4), (32, 64, 64, 16, 32, True, False, False): (1, 4, 1, 4), (32, 64, 64, 16, 32, True, False, True): (2, 2, 3, 4), (32, 64, 64, 32, 32, False, False, False): (1, 2, 1, 4), (32, 64, 64, 32, 32, False, False, True): (1, 2, 1, 4), (32, 64, 64, 32, 32, False, True, False): (1, 2, 2, 8), (32, 64, 64, 32, 32, False, True, True): (1, 2, 1, 4), (32, 64, 64, 32, 32, True, False, False): (1, 2, 2, 4), (32, 64, 64, 32, 32, True, False, True): (1, 2, 1, 4), (64, 32, 16, 32, 32, False, False, False): (1, 1, 1, 1), (64, 32, 16, 32, 32, False, False, True): (1, 1, 2, 4), (64, 32, 16, 32, 32, False, True, False): (2, 1, 1, 8), (64, 32, 16, 32, 32, False, True, True): (1, 1, 1, 4), (64, 32, 16, 32, 32, True, False, False): (2, 1, 1, 2), (64, 32, 16, 32, 32, True, False, True): (1, 1, 1, 4), (64, 32, 32, 32, 32, False, False, False): (3, 1, 1, 4), (64, 32, 32, 32, 32, False, False, True): (1, 1, 1, 4), (64, 32, 32, 32, 32, False, True, False): (1, 1, 1, 8), (64, 32, 32, 32, 32, False, True, True): (1, 1, 1, 2), (64, 32, 32, 32, 32, True, False, False): (1, 1, 1, 2), (64, 32, 32, 32, 32, True, False, True): (1, 1, 1, 4), (64, 32, 64, 32, 32, False, False, False): (1, 2, 1, 2), (64, 32, 64, 32, 32, False, False, True): (3, 2, 1, 4), (64, 32, 64, 32, 32, False, True, False): (1, 1, 1, 1), (64, 32, 64, 32, 32, False, True, True): (1, 2, 1, 4), (64, 32, 64, 32, 32, True, False, False): (1, 1, 3, 4), (64, 32, 64, 32, 32, True, False, True): (1, 2, 2, 4), (64, 64, 16, 32, 32, False, False, False): (1, 1, 2, 2), (64, 64, 16, 32, 32, False, False, True): (1, 1, 3, 2), (64, 64, 16, 32, 32, False, True, False): (1, 1, 1, 8), (64, 64, 16, 32, 32, False, True, True): (1, 1, 2, 4), (64, 64, 16, 32, 32, True, False, False): (1, 1, 2, 4), (64, 64, 16, 32, 32, True, False, True): (2, 1, 2, 4), (64, 64, 32, 32, 32, False, False, False): (1, 1, 2, 8), (64, 64, 32, 32, 32, False, False, True): (1, 1, 2, 4), (64, 64, 32, 32, 32, False, True, False): (1, 1, 1, 4), (64, 64, 32, 32, 32, False, True, True): (1, 1, 1, 4), (64, 64, 32, 32, 32, True, False, False): (1, 1, 1, 4), (64, 64, 32, 32, 32, True, False, True): (2, 1, 2, 4), (64, 64, 64, 32, 32, False, False, False): (1, 2, 1, 4), (64, 64, 64, 32, 32, False, False, True): (1, 2, 1, 4), (64, 64, 64, 32, 32, False, True, False): (1, 2, 1, 4), (64, 64, 64, 32, 32, False, True, True): (3, 2, 1, 4), (64, 64, 64, 32, 32, True, False, False): (1, 2, 1, 8), (64, 64, 64, 32, 32, True, False, True): (1, 2, 3, 4), (192, 192, 256, 16, 16, False, True, True): (1, 8, 4, 2), (192, 192, 256, 16, 16, True, False, True): (1, 4, 4, 4), (192, 192, 256, 32, 32, False, True, True): (2, 8, 5, 4), (192, 192, 256, 32, 32, True, False, True): (2, 8, 5, 1), (192, 192, 512, 16, 16, False, True, True): (3, 8, 4, 4), (192, 192, 512, 16, 16, True, False, True): (5, 8, 5, 4), (192, 192, 512, 32, 32, False, True, True): (1, 16, 5, 4), (192, 192, 512, 32, 32, True, False, True): (1, 8, 6, 2), (192, 192, 1024, 16, 16, False, True, True): (1, 16, 4, 4), (192, 192, 1024, 16, 16, True, False, True): (3, 16, 5, 2), (192, 192, 1024, 32, 32, False, True, True): (3, 16, 4, 4), (192, 192, 1024, 32, 32, True, False, True): (1, 16, 5, 4), (192, 192, 2048, 16, 16, False, True, True): (2, 16, 3, 4), (192, 192, 2048, 16, 16, True, False, True): (1, 16, 4, 4), (192, 192, 2048, 32, 32, False, True, True): (1, 32, 3, 4), (192, 192, 2048, 32, 32, True, False, True): (3, 16, 4, 4), (192, 192, 4096, 16, 16, False, True, True): (1, 64, 1, 4), (192, 192, 4096, 16, 16, True, False, True): (1, 16, 3, 4), (192, 192, 4096, 32, 32, False, True, True): (1, 128, 1, 4), (192, 192, 4096, 32, 32, True, False, True): (2, 32, 4, 2), (192, 192, 8192, 16, 16, False, True, True): (1, 64, 1, 4), (192, 192, 8192, 16, 16, True, False, True): (2, 64, 3, 2), (192, 192, 8192, 32, 32, False, True, True): (1, 128, 1, 4), (192, 192, 8192, 32, 32, True, False, True): (4, 32, 3, 4), (192, 192, 16384, 16, 16, False, True, True): (1, 128, 1, 4), (192, 192, 16384, 16, 16, True, False, True): (1, 64, 3, 2), (192, 192, 16384, 32, 32, False, True, True): (1, 128, 1, 4), (192, 192, 16384, 32, 32, True, False, True): (1, 64, 3, 4), (192, 192, 32768, 16, 16, False, True, True): (2, 256, 1, 2), (192, 192, 32768, 16, 16, True, False, True): (1, 128, 3, 2), (192, 192, 32768, 32, 32, False, True, True): (2, 256, 1, 4), (192, 192, 32768, 32, 32, True, False, True): (1, 256, 3, 2), (192, 192, 65536, 16, 16, False, True, True): (2, 512, 1, 2), (192, 192, 65536, 16, 16, True, False, True): (1, 256, 3, 2), (192, 192, 65536, 32, 32, False, True, True): (2, 512, 1, 4), (192, 192, 65536, 32, 32, True, False, True): (2, 256, 3, 4), (192, 192, 131072, 16, 16, False, True, True): (4, 1024, 1, 2), (192, 192, 131072, 16, 16, True, False, True): (3, 512, 3, 2), (192, 192, 131072, 32, 32, False, True, True): (1, 1024, 1, 4), (192, 192, 131072, 32, 32, True, False, True): (3, 512, 3, 4), (256, 256, 256, 16, 16, False, True, True): (4, 8, 6, 2), (256, 256, 256, 16, 16, True, False, True): (5, 16, 5, 1), (256, 256, 256, 32, 32, False, True, True): (1, 8, 7, 4), (256, 256, 256, 32, 32, True, False, True): (1, 8, 5, 4), (256, 256, 256, 64, 64, False, True, True): (1, 4, 5, 4), (256, 256, 256, 64, 64, True, False, True): (2, 4, 3, 4), (256, 256, 256, 128, 128, False, True, True): (1, 2, 2, 8), (256, 256, 256, 128, 128, True, False, True): (1, 2, 2, 8), (256, 256, 512, 16, 16, False, True, True): (4, 8, 4, 4), (256, 256, 512, 16, 16, True, False, True): (4, 8, 6, 2), (256, 256, 512, 32, 32, False, True, True): (3, 8, 5, 4), (256, 256, 512, 32, 32, True, False, True): (2, 8, 5, 4), (256, 256, 512, 64, 64, False, True, True): (2, 8, 4, 4), (256, 256, 512, 64, 64, True, False, True): (1, 8, 7, 4), (256, 256, 512, 128, 128, False, True, True): (2, 4, 2, 8), (256, 256, 512, 128, 128, True, False, True): (5, 4, 2, 8), (256, 256, 1024, 16, 16, False, True, True): (1, 8, 4, 4), (256, 256, 1024, 16, 16, True, False, True): (1, 16, 4, 2), (256, 256, 1024, 32, 32, False, True, True): (5, 32, 5, 1), (256, 256, 1024, 32, 32, True, False, True): (1, 16, 4, 2), (256, 256, 1024, 64, 64, False, True, True): (1, 16, 4, 4), (256, 256, 1024, 64, 64, True, False, True): (2, 16, 3, 4), (256, 256, 1024, 128, 128, False, True, True): (9, 8, 2, 8), (256, 256, 1024, 128, 128, True, False, True): (1, 8, 2, 8), (256, 256, 2048, 16, 16, False, True, True): (6, 32, 5, 2), (256, 256, 2048, 16, 16, True, False, True): (2, 32, 4, 2), (256, 256, 2048, 32, 32, False, True, True): (1, 32, 3, 2), (256, 256, 2048, 32, 32, True, False, True): (1, 32, 3, 2), (256, 256, 2048, 64, 64, False, True, True): (2, 32, 4, 4), (256, 256, 2048, 64, 64, True, False, True): (2, 16, 4, 4), (256, 256, 2048, 128, 128, False, True, True): (3, 16, 2, 8), (256, 256, 2048, 128, 128, True, False, True): (4, 16, 2, 8), (256, 256, 4096, 16, 16, False, True, True): (1, 32, 3, 4), (256, 256, 4096, 16, 16, True, False, True): (3, 16, 3, 2), (256, 256, 4096, 32, 32, False, True, True): (3, 32, 3, 2), (256, 256, 4096, 32, 32, True, False, True): (1, 32, 3, 2), (256, 256, 4096, 64, 64, False, True, True): (2, 32, 3, 4), (256, 256, 4096, 64, 64, True, False, True): (2, 32, 3, 4), (256, 256, 4096, 128, 128, False, True, True): (5, 32, 2, 8), (256, 256, 4096, 128, 128, True, False, True): (1, 32, 2, 8), (256, 256, 8192, 16, 16, False, True, True): (8, 32, 3, 4), (256, 256, 8192, 16, 16, True, False, True): (1, 32, 3, 2), (256, 256, 8192, 32, 32, False, True, True): (3, 64, 3, 4), (256, 256, 8192, 32, 32, True, False, True): (2, 128, 1, 2), (256, 256, 8192, 64, 64, False, True, True): (7, 128, 1, 4), (256, 256, 8192, 64, 64, True, False, True): (4, 128, 1, 4), (256, 256, 8192, 128, 128, False, True, True): (2, 64, 1, 4), (256, 256, 8192, 128, 128, True, False, True): (4, 64, 1, 4), (256, 256, 16384, 16, 16, False, True, True): (4, 128, 3, 2), (256, 256, 16384, 16, 16, True, False, True): (5, 64, 3, 2), (256, 256, 16384, 32, 32, False, True, True): (5, 128, 3, 2), (256, 256, 16384, 32, 32, True, False, True): (5, 128, 3, 2), (256, 256, 16384, 64, 64, False, True, True): (1, 256, 1, 4), (256, 256, 16384, 64, 64, True, False, True): (5, 128, 3, 4), (256, 256, 16384, 128, 128, False, True, True): (11, 128, 2, 8), (256, 256, 16384, 128, 128, True, False, True): (3, 128, 1, 4), (256, 256, 32768, 16, 16, False, True, True): (1, 128, 3, 4), (256, 256, 32768, 16, 16, True, False, True): (2, 128, 3, 2), (256, 256, 32768, 32, 32, False, True, True): (4, 256, 3, 2), (256, 256, 32768, 32, 32, True, False, True): (1, 256, 3, 2), (256, 256, 32768, 64, 64, False, True, True): (2, 256, 1, 4), (256, 256, 32768, 64, 64, True, False, True): (2, 256, 1, 4), (256, 256, 32768, 128, 128, False, True, True): (3, 256, 1, 4), (256, 256, 32768, 128, 128, True, False, True): (2, 256, 1, 4), (256, 256, 50432, 16, 16, False, True, True): (4, 197, 1, 4), (256, 256, 50432, 16, 16, True, False, True): (4, 197, 3, 2), (256, 256, 50432, 32, 32, False, True, True): (1, 394, 1, 2), (256, 256, 50432, 32, 32, True, False, True): (4, 197, 3, 4), (256, 256, 50432, 64, 64, False, True, True): (6, 394, 1, 4), (256, 256, 50432, 64, 64, True, False, True): (4, 394, 2, 4), (256, 256, 50432, 128, 128, False, True, True): (3, 394, 1, 4), (256, 256, 50432, 128, 128, True, False, True): (1, 394, 2, 4), (256, 256, 65536, 16, 16, False, True, True): (1, 256, 3, 2), (256, 256, 65536, 16, 16, True, False, True): (1, 256, 3, 2), (256, 256, 65536, 32, 32, False, True, True): (1, 512, 3, 2), (256, 256, 65536, 32, 32, True, False, True): (4, 512, 3, 2), (256, 256, 65536, 64, 64, False, True, True): (2, 512, 1, 4), (256, 256, 65536, 64, 64, True, False, True): (5, 512, 1, 4), (256, 256, 65536, 128, 128, False, True, True): (3, 512, 1, 4), (256, 256, 65536, 128, 128, True, False, True): (1, 512, 1, 4), (256, 256, 65792, 16, 16, False, True, True): (2, 257, 1, 4), (256, 256, 65792, 16, 16, True, False, True): (1, 257, 3, 2), (256, 256, 65792, 32, 32, False, True, True): (2, 257, 1, 4), (256, 256, 65792, 32, 32, True, False, True): (1, 257, 3, 4), (256, 256, 65792, 64, 64, False, True, True): (2, 514, 1, 4), (256, 256, 65792, 64, 64, True, False, True): (2, 514, 2, 4), (256, 256, 65792, 128, 128, False, True, True): (3, 514, 1, 4), (256, 256, 65792, 128, 128, True, False, True): (1, 514, 2, 4), (256, 256, 131072, 16, 16, False, True, True): (1, 512, 3, 1), (256, 256, 131072, 16, 16, True, False, True): (1, 512, 3, 2), (256, 256, 131072, 32, 32, False, True, True): (2, 1024, 3, 2), (256, 256, 131072, 32, 32, True, False, True): (1, 1024, 3, 2), (256, 256, 131072, 64, 64, False, True, True): (1, 1024, 1, 4), (256, 256, 131072, 64, 64, True, False, True): (1, 1024, 1, 4), (256, 256, 131072, 128, 128, False, True, True): (7, 1024, 1, 4), (256, 256, 131072, 128, 128, True, False, True): (1, 1024, 1, 4), (384, 384, 256, 16, 16, False, True, True): (3, 16, 4, 1), (384, 384, 256, 16, 16, True, False, True): (2, 4, 6, 2), (384, 384, 256, 32, 32, False, True, True): (1, 8, 4, 4), (384, 384, 256, 32, 32, True, False, True): (1, 4, 5, 2), (384, 384, 256, 64, 64, False, True, True): (3, 4, 3, 4), (384, 384, 256, 64, 64, True, False, True): (4, 4, 5, 4), (384, 384, 512, 16, 16, False, True, True): (1, 16, 4, 1), (384, 384, 512, 16, 16, True, False, True): (1, 8, 5, 2), (384, 384, 512, 32, 32, False, True, True): (4, 16, 4, 2), (384, 384, 512, 32, 32, True, False, True): (1, 8, 5, 2), (384, 384, 512, 64, 64, False, True, True): (2, 8, 3, 4), (384, 384, 512, 64, 64, True, False, True): (1, 8, 4, 4), (384, 384, 1024, 16, 16, False, True, True): (1, 16, 4, 2), (384, 384, 1024, 16, 16, True, False, True): (7, 8, 5, 2), (384, 384, 1024, 32, 32, False, True, True): (2, 16, 3, 4), (384, 384, 1024, 32, 32, True, False, True): (1, 16, 4, 2), (384, 384, 1024, 64, 64, False, True, True): (6, 16, 3, 4), (384, 384, 1024, 64, 64, True, False, True): (4, 16, 4, 4), (384, 384, 2048, 16, 16, False, True, True): (1, 32, 1, 4), (384, 384, 2048, 16, 16, True, False, True): (1, 16, 3, 2), (384, 384, 2048, 32, 32, False, True, True): (1, 32, 1, 8), (384, 384, 2048, 32, 32, True, False, True): (1, 8, 4, 4), (384, 384, 2048, 64, 64, False, True, True): (2, 32, 1, 8), (384, 384, 2048, 64, 64, True, False, True): (3, 16, 3, 4), (384, 384, 4096, 16, 16, False, True, True): (5, 32, 1, 4), (384, 384, 4096, 16, 16, True, False, True): (1, 32, 3, 2), (384, 384, 4096, 32, 32, False, True, True): (1, 32, 1, 8), (384, 384, 4096, 32, 32, True, False, True): (2, 16, 4, 4), (384, 384, 4096, 64, 64, False, True, True): (1, 64, 1, 4), (384, 384, 4096, 64, 64, True, False, True): (2, 32, 3, 4), (384, 384, 8192, 16, 16, False, True, True): (2, 64, 1, 4), (384, 384, 8192, 16, 16, True, False, True): (3, 32, 3, 2), (384, 384, 8192, 32, 32, False, True, True): (4, 128, 1, 4), (384, 384, 8192, 32, 32, True, False, True): (1, 32, 3, 2), (384, 384, 8192, 64, 64, False, True, True): (1, 128, 1, 4), (384, 384, 8192, 64, 64, True, False, True): (1, 64, 3, 4), (384, 384, 16384, 16, 16, False, True, True): (1, 128, 1, 2), (384, 384, 16384, 16, 16, True, False, True): (1, 64, 3, 2), (384, 384, 16384, 32, 32, False, True, True): (1, 128, 1, 4), (384, 384, 16384, 32, 32, True, False, True): (1, 64, 3, 4), (384, 384, 16384, 64, 64, False, True, True): (5, 128, 3, 4), (384, 384, 16384, 64, 64, True, False, True): (1, 128, 3, 4), (384, 384, 32768, 16, 16, False, True, True): (2, 256, 1, 2), (384, 384, 32768, 16, 16, True, False, True): (1, 128, 3, 4), (384, 384, 32768, 32, 32, False, True, True): (1, 256, 1, 2), (384, 384, 32768, 32, 32, True, False, True): (2, 128, 3, 4), (384, 384, 32768, 64, 64, False, True, True): (3, 256, 1, 4), (384, 384, 32768, 64, 64, True, False, True): (2, 256, 3, 4), (384, 384, 65536, 16, 16, False, True, True): (2, 128, 1, 4), (384, 384, 65536, 16, 16, True, False, True): (1, 256, 3, 4), (384, 384, 65536, 32, 32, False, True, True): (1, 512, 1, 2), (384, 384, 65536, 32, 32, True, False, True): (1, 256, 3, 4), (384, 384, 65536, 64, 64, False, True, True): (3, 512, 1, 4), (384, 384, 65536, 64, 64, True, False, True): (3, 256, 3, 4), (384, 384, 131072, 16, 16, False, True, True): (2, 256, 1, 2), (384, 384, 131072, 16, 16, True, False, True): (1, 512, 3, 4), (384, 384, 131072, 32, 32, False, True, True): (1, 512, 1, 2), (384, 384, 131072, 32, 32, True, False, True): (1, 512, 3, 4), (384, 384, 131072, 64, 64, False, True, True): (3, 1024, 1, 4), (384, 384, 131072, 64, 64, True, False, True): (3, 512, 3, 4), (512, 512, 256, 16, 16, False, True, True): (1, 8, 5, 1), (512, 512, 256, 16, 16, True, False, True): (2, 16, 5, 1), (512, 512, 256, 32, 32, False, True, True): (2, 8, 5, 2), (512, 512, 256, 32, 32, True, False, True): (4, 4, 5, 2), (512, 512, 256, 64, 64, False, True, True): (1, 4, 5, 4), (512, 512, 256, 64, 64, True, False, True): (3, 4, 5, 4), (512, 512, 256, 128, 128, False, True, True): (1, 2, 2, 8), (512, 512, 256, 128, 128, True, False, True): (1, 2, 2, 8), (512, 512, 512, 16, 16, False, True, True): (1, 8, 4, 4), (512, 512, 512, 16, 16, True, False, True): (4, 16, 5, 1), (512, 512, 512, 32, 32, False, True, True): (4, 8, 5, 2), (512, 512, 512, 32, 32, True, False, True): (7, 16, 4, 1), (512, 512, 512, 64, 64, False, True, True): (3, 8, 5, 4), (512, 512, 512, 64, 64, True, False, True): (1, 8, 4, 4), (512, 512, 512, 128, 128, False, True, True): (4, 4, 2, 8), (512, 512, 512, 128, 128, True, False, True): (4, 4, 2, 8), (512, 512, 1024, 16, 16, False, True, True): (2, 8, 4, 4), (512, 512, 1024, 16, 16, True, False, True): (2, 16, 4, 2), (512, 512, 1024, 32, 32, False, True, True): (3, 16, 4, 2), (512, 512, 1024, 32, 32, True, False, True): (3, 16, 3, 2), (512, 512, 1024, 64, 64, False, True, True): (5, 8, 5, 4), (512, 512, 1024, 64, 64, True, False, True): (4, 16, 3, 4), (512, 512, 1024, 128, 128, False, True, True): (6, 8, 2, 8), (512, 512, 1024, 128, 128, True, False, True): (4, 8, 2, 8), (512, 512, 2048, 16, 16, False, True, True): (2, 16, 3, 4), (512, 512, 2048, 16, 16, True, False, True): (1, 16, 4, 2), (512, 512, 2048, 32, 32, False, True, True): (2, 32, 3, 2), (512, 512, 2048, 32, 32, True, False, True): (2, 32, 3, 2), (512, 512, 2048, 64, 64, False, True, True): (1, 32, 3, 4), (512, 512, 2048, 64, 64, True, False, True): (1, 32, 3, 2), (512, 512, 2048, 128, 128, False, True, True): (3, 16, 2, 8), (512, 512, 2048, 128, 128, True, False, True): (1, 16, 2, 8), (512, 512, 4096, 16, 16, False, True, True): (4, 32, 3, 2), (512, 512, 4096, 16, 16, True, False, True): (1, 32, 3, 2), (512, 512, 4096, 32, 32, False, True, True): (3, 32, 3, 2), (512, 512, 4096, 32, 32, True, False, True): (3, 32, 3, 2), (512, 512, 4096, 64, 64, False, True, True): (1, 32, 3, 4), (512, 512, 4096, 64, 64, True, False, True): (1, 64, 1, 4), (512, 512, 4096, 128, 128, False, True, True): (7, 32, 2, 8), (512, 512, 4096, 128, 128, True, False, True): (1, 32, 2, 8), (512, 512, 8192, 16, 16, False, True, True): (4, 64, 3, 2), (512, 512, 8192, 16, 16, True, False, True): (1, 64, 3, 2), (512, 512, 8192, 32, 32, False, True, True): (3, 64, 3, 2), (512, 512, 8192, 32, 32, True, False, True): (1, 64, 3, 2), (512, 512, 8192, 64, 64, False, True, True): (1, 64, 3, 4), (512, 512, 8192, 64, 64, True, False, True): (1, 64, 3, 4), (512, 512, 8192, 128, 128, False, True, True): (7, 64, 2, 8), (512, 512, 8192, 128, 128, True, False, True): (1, 64, 1, 4), (512, 512, 16384, 16, 16, False, True, True): (1, 128, 3, 2), (512, 512, 16384, 16, 16, True, False, True): (1, 64, 3, 2), (512, 512, 16384, 32, 32, False, True, True): (1, 128, 3, 2), (512, 512, 16384, 32, 32, True, False, True): (1, 128, 3, 2), (512, 512, 16384, 64, 64, False, True, True): (1, 128, 3, 4), (512, 512, 16384, 64, 64, True, False, True): (4, 128, 3, 4), (512, 512, 16384, 128, 128, False, True, True): (5, 128, 2, 8), (512, 512, 16384, 128, 128, True, False, True): (2, 128, 1, 4), (512, 512, 32768, 16, 16, False, True, True): (1, 128, 3, 4), (512, 512, 32768, 16, 16, True, False, True): (1, 128, 3, 2), (512, 512, 32768, 32, 32, False, True, True): (1, 256, 3, 2), (512, 512, 32768, 32, 32, True, False, True): (1, 256, 3, 2), (512, 512, 32768, 64, 64, False, True, True): (1, 256, 3, 4), (512, 512, 32768, 64, 64, True, False, True): (1, 256, 3, 4), (512, 512, 32768, 128, 128, False, True, True): (5, 256, 1, 4), (512, 512, 32768, 128, 128, True, False, True): (1, 256, 1, 4), (512, 512, 50432, 16, 16, False, True, True): (4, 197, 1, 4), (512, 512, 50432, 16, 16, True, False, True): (4, 197, 3, 2), (512, 512, 50432, 32, 32, False, True, True): (2, 197, 1, 4), (512, 512, 50432, 32, 32, True, False, True): (4, 197, 3, 4), (512, 512, 50432, 64, 64, False, True, True): (2, 394, 1, 4), (512, 512, 50432, 64, 64, True, False, True): (4, 197, 2, 4), (512, 512, 50432, 128, 128, False, True, True): (5, 394, 1, 4), (512, 512, 50432, 128, 128, True, False, True): (6, 394, 2, 4), (512, 512, 65536, 16, 16, False, True, True): (1, 256, 3, 2), (512, 512, 65536, 16, 16, True, False, True): (1, 256, 3, 1), (512, 512, 65536, 32, 32, False, True, True): (1, 512, 3, 2), (512, 512, 65536, 32, 32, True, False, True): (1, 512, 3, 2), (512, 512, 65536, 64, 64, False, True, True): (2, 256, 2, 4), (512, 512, 65536, 64, 64, True, False, True): (1, 512, 3, 4), (512, 512, 65536, 128, 128, False, True, True): (7, 512, 1, 4), (512, 512, 65536, 128, 128, True, False, True): (5, 512, 1, 4), (512, 512, 65792, 16, 16, False, True, True): (2, 257, 1, 4), (512, 512, 65792, 16, 16, True, False, True): (1, 257, 3, 4), (512, 512, 65792, 32, 32, False, True, True): (2, 257, 1, 4), (512, 512, 65792, 32, 32, True, False, True): (1, 257, 3, 4), (512, 512, 65792, 64, 64, False, True, True): (4, 514, 1, 4), (512, 512, 65792, 64, 64, True, False, True): (4, 257, 2, 4), (512, 512, 65792, 128, 128, False, True, True): (5, 514, 1, 4), (512, 512, 65792, 128, 128, True, False, True): (4, 514, 2, 4), (512, 512, 131072, 16, 16, False, True, True): (1, 512, 3, 1), (512, 512, 131072, 16, 16, True, False, True): (1, 512, 3, 1), (512, 512, 131072, 32, 32, False, True, True): (1, 1024, 3, 2), (512, 512, 131072, 32, 32, True, False, True): (1, 1024, 3, 2), (512, 512, 131072, 64, 64, False, True, True): (4, 512, 2, 4), (512, 512, 131072, 64, 64, True, False, True): (2, 512, 2, 4), (512, 512, 131072, 128, 128, False, True, True): (5, 1024, 1, 4), (512, 512, 131072, 128, 128, True, False, True): (4, 1024, 1, 4), (768, 768, 256, 16, 16, False, True, True): (1, 8, 4, 1), (768, 768, 256, 16, 16, True, False, True): (3, 2, 5, 2), (768, 768, 256, 32, 32, False, True, True): (1, 8, 4, 2), (768, 768, 256, 32, 32, True, False, True): (2, 4, 6, 2), (768, 768, 256, 64, 64, False, True, True): (3, 4, 3, 4), (768, 768, 256, 64, 64, True, False, True): (2, 4, 4, 4), (768, 768, 256, 128, 128, False, True, True): (1, 2, 3, 8), (768, 768, 256, 128, 128, True, False, True): (2, 2, 3, 8), (768, 768, 512, 16, 16, False, True, True): (1, 8, 4, 2), (768, 768, 512, 16, 16, True, False, True): (2, 8, 5, 2), (768, 768, 512, 32, 32, False, True, True): (1, 16, 1, 4), (768, 768, 512, 32, 32, True, False, True): (3, 8, 5, 2), (768, 768, 512, 64, 64, False, True, True): (4, 8, 3, 4), (768, 768, 512, 64, 64, True, False, True): (2, 8, 4, 4), (768, 768, 512, 128, 128, False, True, True): (1, 4, 3, 8), (768, 768, 512, 128, 128, True, False, True): (3, 4, 3, 8), (768, 768, 1024, 16, 16, False, True, True): (1, 16, 1, 4), (768, 768, 1024, 16, 16, True, False, True): (1, 8, 5, 2), (768, 768, 1024, 32, 32, False, True, True): (1, 16, 1, 8), (768, 768, 1024, 32, 32, True, False, True): (1, 4, 4, 4), (768, 768, 1024, 64, 64, False, True, True): (2, 16, 1, 8), (768, 768, 1024, 64, 64, True, False, True): (1, 8, 3, 8), (768, 768, 1024, 128, 128, False, True, True): (1, 8, 3, 8), (768, 768, 1024, 128, 128, True, False, True): (3, 8, 3, 8), (768, 768, 2048, 16, 16, False, True, True): (6, 16, 1, 2), (768, 768, 2048, 16, 16, True, False, True): (2, 16, 4, 2), (768, 768, 2048, 32, 32, False, True, True): (3, 32, 1, 4), (768, 768, 2048, 32, 32, True, False, True): (6, 8, 3, 4), (768, 768, 2048, 64, 64, False, True, True): (2, 32, 2, 2), (768, 768, 2048, 64, 64, True, False, True): (1, 16, 4, 4), (768, 768, 2048, 128, 128, False, True, True): (2, 16, 3, 8), (768, 768, 2048, 128, 128, True, False, True): (4, 16, 3, 8), (768, 768, 4096, 16, 16, False, True, True): (1, 32, 1, 4), (768, 768, 4096, 16, 16, True, False, True): (2, 16, 3, 2), (768, 768, 4096, 32, 32, False, True, True): (3, 32, 1, 8), (768, 768, 4096, 32, 32, True, False, True): (1, 16, 4, 4), (768, 768, 4096, 64, 64, False, True, True): (1, 64, 2, 4), (768, 768, 4096, 64, 64, True, False, True): (1, 8, 3, 8), (768, 768, 4096, 128, 128, False, True, True): (1, 32, 3, 8), (768, 768, 4096, 128, 128, True, False, True): (2, 32, 3, 8), (768, 768, 8192, 16, 16, False, True, True): (1, 64, 1, 2), (768, 768, 8192, 16, 16, True, False, True): (2, 64, 3, 2), (768, 768, 8192, 32, 32, False, True, True): (2, 64, 1, 8), (768, 768, 8192, 32, 32, True, False, True): (2, 32, 3, 4), (768, 768, 8192, 64, 64, False, True, True): (4, 64, 3, 4), (768, 768, 8192, 64, 64, True, False, True): (1, 64, 3, 4), (768, 768, 8192, 128, 128, False, True, True): (4, 64, 3, 8), (768, 768, 8192, 128, 128, True, False, True): (2, 64, 3, 8), (768, 768, 16384, 16, 16, False, True, True): (4, 128, 1, 2), (768, 768, 16384, 16, 16, True, False, True): (1, 64, 3, 4), (768, 768, 16384, 32, 32, False, True, True): (1, 128, 1, 8), (768, 768, 16384, 32, 32, True, False, True): (1, 64, 3, 4), (768, 768, 16384, 64, 64, False, True, True): (1, 128, 3, 4), (768, 768, 16384, 64, 64, True, False, True): (1, 128, 3, 4), (768, 768, 16384, 128, 128, False, True, True): (3, 128, 1, 4), (768, 768, 16384, 128, 128, True, False, True): (1, 128, 2, 4), (768, 768, 32768, 16, 16, False, True, True): (2, 256, 1, 2), (768, 768, 32768, 16, 16, True, False, True): (1, 128, 4, 4), (768, 768, 32768, 32, 32, False, True, True): (1, 128, 1, 2), (768, 768, 32768, 32, 32, True, False, True): (1, 128, 3, 4), (768, 768, 32768, 64, 64, False, True, True): (1, 256, 1, 4), (768, 768, 32768, 64, 64, True, False, True): (1, 128, 3, 4), (768, 768, 32768, 128, 128, False, True, True): (3, 256, 1, 4), (768, 768, 32768, 128, 128, True, False, True): (3, 256, 2, 4), (768, 768, 65536, 16, 16, False, True, True): (4, 512, 1, 2), (768, 768, 65536, 16, 16, True, False, True): (1, 256, 4, 4), (768, 768, 65536, 32, 32, False, True, True): (1, 256, 1, 2), (768, 768, 65536, 32, 32, True, False, True): (1, 256, 3, 4), (768, 768, 65536, 64, 64, False, True, True): (1, 512, 1, 4), (768, 768, 65536, 64, 64, True, False, True): (1, 256, 3, 4), (768, 768, 65536, 128, 128, False, True, True): (3, 512, 1, 4), (768, 768, 65536, 128, 128, True, False, True): (2, 512, 2, 4), (768, 768, 131072, 16, 16, False, True, True): (1, 512, 1, 1), (768, 768, 131072, 16, 16, True, False, True): (1, 512, 4, 4), (768, 768, 131072, 32, 32, False, True, True): (1, 512, 1, 2), (768, 768, 131072, 32, 32, True, False, True): (1, 512, 3, 4), (768, 768, 131072, 64, 64, False, True, True): (1, 1024, 1, 4), (768, 768, 131072, 64, 64, True, False, True): (3, 512, 3, 4), (768, 768, 131072, 128, 128, False, True, True): (3, 1024, 1, 4), (768, 768, 131072, 128, 128, True, False, True): (1, 1024, 2, 4), (768, 3072, 256, 16, 16, False, True, True): (1, 8, 5, 2), (768, 3072, 256, 16, 16, True, False, True): (3, 4, 7, 2), (768, 3072, 256, 32, 32, False, True, True): (1, 8, 4, 2), (768, 3072, 256, 32, 32, True, False, True): (1, 4, 5, 4), (768, 3072, 256, 64, 64, False, True, True): (1, 4, 3, 4), (768, 3072, 256, 64, 64, True, False, True): (1, 4, 5, 4), (768, 3072, 256, 128, 128, False, True, True): (2, 2, 3, 8), (768, 3072, 256, 128, 128, True, False, True): (2, 2, 3, 8), (768, 3072, 512, 16, 16, False, True, True): (1, 8, 5, 2), (768, 3072, 512, 16, 16, True, False, True): (1, 8, 5, 2), (768, 3072, 512, 32, 32, False, True, True): (3, 8, 3, 4), (768, 3072, 512, 32, 32, True, False, True): (1, 8, 7, 4), (768, 3072, 512, 64, 64, False, True, True): (3, 8, 3, 4), (768, 3072, 512, 64, 64, True, False, True): (3, 8, 5, 4), (768, 3072, 512, 128, 128, False, True, True): (1, 4, 3, 8), (768, 3072, 512, 128, 128, True, False, True): (1, 4, 3, 8), (768, 3072, 1024, 16, 16, False, True, True): (4, 16, 1, 4), (768, 3072, 1024, 16, 16, True, False, True): (2, 8, 5, 2), (768, 3072, 1024, 32, 32, False, True, True): (1, 16, 6, 2), (768, 3072, 1024, 32, 32, True, False, True): (1, 8, 4, 4), (768, 3072, 1024, 64, 64, False, True, True): (2, 16, 4, 4), (768, 3072, 1024, 64, 64, True, False, True): (2, 16, 4, 4), (768, 3072, 1024, 128, 128, False, True, True): (1, 8, 3, 8), (768, 3072, 1024, 128, 128, True, False, True): (3, 8, 3, 8), (768, 3072, 2048, 16, 16, False, True, True): (1, 16, 1, 2), (768, 3072, 2048, 16, 16, True, False, True): (1, 16, 5, 2), (768, 3072, 2048, 32, 32, False, True, True): (4, 16, 1, 8), (768, 3072, 2048, 32, 32, True, False, True): (2, 8, 3, 4), (768, 3072, 2048, 64, 64, False, True, True): (2, 16, 3, 4), (768, 3072, 2048, 64, 64, True, False, True): (2, 16, 3, 4), (768, 3072, 2048, 128, 128, False, True, True): (3, 16, 3, 8), (768, 3072, 2048, 128, 128, True, False, True): (1, 16, 3, 8), (768, 3072, 4096, 16, 16, False, True, True): (1, 32, 1, 4), (768, 3072, 4096, 16, 16, True, False, True): (1, 16, 3, 1), (768, 3072, 4096, 32, 32, False, True, True): (3, 32, 1, 8), (768, 3072, 4096, 32, 32, True, False, True): (2, 16, 3, 8), (768, 3072, 4096, 64, 64, False, True, True): (2, 32, 3, 4), (768, 3072, 4096, 64, 64, True, False, True): (2, 16, 3, 4), (768, 3072, 4096, 128, 128, False, True, True): (5, 32, 1, 4), (768, 3072, 4096, 128, 128, True, False, True): (4, 32, 3, 8), (768, 3072, 8192, 16, 16, False, True, True): (1, 32, 1, 4), (768, 3072, 8192, 16, 16, True, False, True): (4, 64, 4, 2), (768, 3072, 8192, 32, 32, False, True, True): (1, 64, 1, 8), (768, 3072, 8192, 32, 32, True, False, True): (2, 32, 3, 8), (768, 3072, 8192, 64, 64, False, True, True): (2, 64, 3, 4), (768, 3072, 8192, 64, 64, True, False, True): (2, 32, 3, 4), (768, 3072, 8192, 128, 128, False, True, True): (1, 64, 3, 8), (768, 3072, 8192, 128, 128, True, False, True): (2, 64, 3, 8), (768, 3072, 16384, 16, 16, False, True, True): (1, 64, 1, 4), (768, 3072, 16384, 16, 16, True, False, True): (1, 64, 4, 1), (768, 3072, 16384, 32, 32, False, True, True): (1, 128, 1, 8), (768, 3072, 16384, 32, 32, True, False, True): (1, 64, 3, 4), (768, 3072, 16384, 64, 64, False, True, True): (1, 128, 3, 4), (768, 3072, 16384, 64, 64, True, False, True): (1, 64, 3, 4), (768, 3072, 16384, 128, 128, False, True, True): (2, 128, 3, 8), (768, 3072, 16384, 128, 128, True, False, True): (1, 128, 3, 8), (768, 3072, 32768, 16, 16, False, True, True): (1, 128, 1, 4), (768, 3072, 32768, 16, 16, True, False, True): (1, 128, 4, 1), (768, 3072, 32768, 32, 32, False, True, True): (1, 256, 1, 8), (768, 3072, 32768, 32, 32, True, False, True): (1, 128, 3, 4), (768, 3072, 32768, 64, 64, False, True, True): (1, 256, 3, 4), (768, 3072, 32768, 64, 64, True, False, True): (1, 128, 3, 4), (768, 3072, 32768, 128, 128, False, True, True): (3, 256, 1, 4), (768, 3072, 32768, 128, 128, True, False, True): (5, 256, 3, 8), (768, 3072, 50432, 16, 16, False, True, True): (1, 197, 1, 4), (768, 3072, 50432, 16, 16, True, False, True): (4, 197, 4, 1), (768, 3072, 50432, 32, 32, False, True, True): (2, 197, 1, 4), (768, 3072, 50432, 32, 32, True, False, True): (4, 197, 3, 4), (768, 3072, 50432, 64, 64, False, True, True): (1, 394, 3, 4), (768, 3072, 50432, 64, 64, True, False, True): (1, 197, 3, 4), (768, 3072, 50432, 128, 128, False, True, True): (3, 394, 1, 4), (768, 3072, 50432, 128, 128, True, False, True): (3, 394, 2, 4), (768, 3072, 65536, 16, 16, False, True, True): (1, 256, 1, 4), (768, 3072, 65536, 16, 16, True, False, True): (5, 256, 4, 1), (768, 3072, 65536, 32, 32, False, True, True): (2, 256, 1, 4), (768, 3072, 65536, 32, 32, True, False, True): (3, 256, 3, 4), (768, 3072, 65536, 64, 64, False, True, True): (1, 512, 3, 4), (768, 3072, 65536, 64, 64, True, False, True): (1, 256, 3, 4), (768, 3072, 65536, 128, 128, False, True, True): (3, 512, 1, 4), (768, 3072, 65536, 128, 128, True, False, True): (2, 512, 3, 8), (768, 3072, 131072, 16, 16, False, True, True): (1, 512, 1, 4), (768, 3072, 131072, 16, 16, True, False, True): (5, 512, 4, 1), (768, 3072, 131072, 32, 32, False, True, True): (2, 512, 1, 4), (768, 3072, 131072, 32, 32, True, False, True): (2, 512, 3, 4), (768, 3072, 131072, 64, 64, False, True, True): (1, 1024, 3, 4), (768, 3072, 131072, 64, 64, True, False, True): (2, 512, 3, 4), (768, 3072, 131072, 128, 128, False, True, True): (3, 1024, 1, 4), (768, 3072, 131072, 128, 128, True, False, True): (2, 1024, 3, 8), (1024, 1024, 256, 16, 16, False, True, True): (3, 4, 5, 4), (1024, 1024, 256, 16, 16, True, False, True): (3, 4, 5, 4), (1024, 1024, 256, 32, 32, False, True, True): (2, 4, 6, 2), (1024, 1024, 256, 32, 32, True, False, True): (2, 4, 6, 2), (1024, 1024, 256, 64, 64, False, True, True): (1, 4, 4, 4), (1024, 1024, 256, 64, 64, True, False, True): (2, 4, 6, 4), (1024, 1024, 256, 128, 128, False, True, True): (1, 2, 2, 8), (1024, 1024, 256, 128, 128, True, False, True): (1, 2, 2, 8), (1024, 1024, 512, 16, 16, False, True, True): (3, 4, 5, 4), (1024, 1024, 512, 16, 16, True, False, True): (3, 8, 4, 2), (1024, 1024, 512, 32, 32, False, True, True): (1, 8, 4, 2), (1024, 1024, 512, 32, 32, True, False, True): (1, 8, 4, 2), (1024, 1024, 512, 64, 64, False, True, True): (2, 8, 3, 4), (1024, 1024, 512, 64, 64, True, False, True): (1, 4, 4, 4), (1024, 1024, 512, 128, 128, False, True, True): (7, 4, 2, 8), (1024, 1024, 512, 128, 128, True, False, True): (1, 4, 2, 8), (1024, 1024, 1024, 16, 16, False, True, True): (4, 8, 4, 2), (1024, 1024, 1024, 16, 16, True, False, True): (3, 8, 5, 2), (1024, 1024, 1024, 32, 32, False, True, True): (1, 8, 4, 4), (1024, 1024, 1024, 32, 32, True, False, True): (1, 8, 4, 2), (1024, 1024, 1024, 64, 64, False, True, True): (1, 16, 3, 4), (1024, 1024, 1024, 64, 64, True, False, True): (3, 16, 3, 4), (1024, 1024, 1024, 128, 128, False, True, True): (6, 8, 2, 8), (1024, 1024, 1024, 128, 128, True, False, True): (4, 8, 2, 8), (1024, 1024, 2048, 16, 16, False, True, True): (3, 8, 3, 4), (1024, 1024, 2048, 16, 16, True, False, True): (3, 8, 3, 4), (1024, 1024, 2048, 32, 32, False, True, True): (1, 16, 3, 4), (1024, 1024, 2048, 32, 32, True, False, True): (1, 16, 3, 2), (1024, 1024, 2048, 64, 64, False, True, True): (5, 16, 3, 4), (1024, 1024, 2048, 64, 64, True, False, True): (5, 16, 3, 4), (1024, 1024, 2048, 128, 128, False, True, True): (3, 16, 2, 8), (1024, 1024, 2048, 128, 128, True, False, True): (4, 16, 2, 16), (1024, 1024, 4096, 16, 16, False, True, True): (4, 32, 3, 2), (1024, 1024, 4096, 16, 16, True, False, True): (8, 32, 3, 2), (1024, 1024, 4096, 32, 32, False, True, True): (9, 32, 3, 2), (1024, 1024, 4096, 32, 32, True, False, True): (1, 32, 3, 2), (1024, 1024, 4096, 64, 64, False, True, True): (6, 32, 3, 4), (1024, 1024, 4096, 64, 64, True, False, True): (1, 32, 3, 4), (1024, 1024, 4096, 128, 128, False, True, True): (4, 32, 2, 8), (1024, 1024, 4096, 128, 128, True, False, True): (4, 32, 1, 4), (1024, 1024, 8192, 16, 16, False, True, True): (4, 64, 3, 2), (1024, 1024, 8192, 16, 16, True, False, True): (4, 64, 3, 2), (1024, 1024, 8192, 32, 32, False, True, True): (8, 64, 3, 2), (1024, 1024, 8192, 32, 32, True, False, True): (6, 64, 3, 2), (1024, 1024, 8192, 64, 64, False, True, True): (2, 64, 3, 4), (1024, 1024, 8192, 64, 64, True, False, True): (2, 64, 3, 4), (1024, 1024, 8192, 128, 128, False, True, True): (3, 64, 1, 4), (1024, 1024, 8192, 128, 128, True, False, True): (2, 64, 1, 4), (1024, 1024, 16384, 16, 16, False, True, True): (1, 64, 3, 4), (1024, 1024, 16384, 16, 16, True, False, True): (1, 64, 3, 2), (1024, 1024, 16384, 32, 32, False, True, True): (1, 128, 3, 4), (1024, 1024, 16384, 32, 32, True, False, True): (1, 64, 3, 4), (1024, 1024, 16384, 64, 64, False, True, True): (1, 128, 3, 4), (1024, 1024, 16384, 64, 64, True, False, True): (1, 128, 3, 4), (1024, 1024, 16384, 128, 128, False, True, True): (11, 128, 1, 4), (1024, 1024, 16384, 128, 128, True, False, True): (4, 128, 1, 4), (1024, 1024, 32768, 16, 16, False, True, True): (1, 128, 3, 4), (1024, 1024, 32768, 16, 16, True, False, True): (1, 128, 3, 1), (1024, 1024, 32768, 32, 32, False, True, True): (1, 256, 3, 2), (1024, 1024, 32768, 32, 32, True, False, True): (1, 128, 3, 4), (1024, 1024, 32768, 64, 64, False, True, True): (2, 128, 2, 4), (1024, 1024, 32768, 64, 64, True, False, True): (1, 256, 3, 4), (1024, 1024, 32768, 128, 128, False, True, True): (7, 256, 1, 4), (1024, 1024, 32768, 128, 128, True, False, True): (4, 256, 1, 4), (1024, 1024, 50432, 16, 16, False, True, True): (1, 197, 1, 4), (1024, 1024, 50432, 16, 16, True, False, True): (4, 197, 3, 4), (1024, 1024, 50432, 32, 32, False, True, True): (2, 197, 1, 4), (1024, 1024, 50432, 32, 32, True, False, True): (1, 197, 3, 4), (1024, 1024, 50432, 64, 64, False, True, True): (2, 394, 1, 4), (1024, 1024, 50432, 64, 64, True, False, True): (1, 197, 2, 4), (1024, 1024, 50432, 128, 128, False, True, True): (3, 394, 1, 4), (1024, 1024, 50432, 128, 128, True, False, True): (2, 394, 2, 4), (1024, 1024, 65536, 16, 16, False, True, True): (1, 256, 3, 4), (1024, 1024, 65536, 16, 16, True, False, True): (1, 256, 3, 1), (1024, 1024, 65536, 32, 32, False, True, True): (1, 512, 3, 2), (1024, 1024, 65536, 32, 32, True, False, True): (1, 256, 3, 4), (1024, 1024, 65536, 64, 64, False, True, True): (2, 256, 2, 4), (1024, 1024, 65536, 64, 64, True, False, True): (1, 512, 3, 4), (1024, 1024, 65536, 128, 128, False, True, True): (10, 512, 1, 4), (1024, 1024, 65536, 128, 128, True, False, True): (4, 512, 1, 4), (1024, 1024, 65792, 16, 16, False, True, True): (1, 257, 1, 4), (1024, 1024, 65792, 16, 16, True, False, True): (10, 257, 4, 1), (1024, 1024, 65792, 32, 32, False, True, True): (2, 257, 1, 4), (1024, 1024, 65792, 32, 32, True, False, True): (1, 257, 3, 4), (1024, 1024, 65792, 64, 64, False, True, True): (2, 514, 1, 4), (1024, 1024, 65792, 64, 64, True, False, True): (2, 257, 2, 4), (1024, 1024, 65792, 128, 128, False, True, True): (6, 514, 1, 4), (1024, 1024, 65792, 128, 128, True, False, True): (2, 514, 2, 4), (1024, 1024, 131072, 16, 16, False, True, True): (11, 512, 3, 2), (1024, 1024, 131072, 16, 16, True, False, True): (11, 512, 3, 2), (1024, 1024, 131072, 32, 32, False, True, True): (7, 1024, 3, 2), (1024, 1024, 131072, 32, 32, True, False, True): (6, 512, 3, 4), (1024, 1024, 131072, 64, 64, False, True, True): (1, 512, 2, 4), (1024, 1024, 131072, 64, 64, True, False, True): (4, 1024, 3, 4), (1024, 1024, 131072, 128, 128, False, True, True): (12, 1024, 1, 4), (1024, 1024, 131072, 128, 128, True, False, True): (4, 1024, 1, 4), (1280, 5120, 65792, 16, 16, False, True, True): (1, 257, 1, 4), (1280, 5120, 65792, 16, 16, True, False, True): (5, 257, 4, 1), (1280, 5120, 65792, 32, 32, False, True, True): (2, 257, 1, 4), (1280, 5120, 65792, 32, 32, True, False, True): (2, 257, 3, 4), (1280, 5120, 65792, 64, 64, False, True, True): (1, 514, 3, 4), (1280, 5120, 65792, 64, 64, True, False, True): (2, 257, 3, 4), (1280, 5120, 65792, 128, 128, False, True, True): (1, 514, 3, 8), (1280, 5120, 65792, 128, 128, True, False, True): (1, 514, 3, 8), (1536, 1536, 256, 16, 16, False, True, True): (5, 4, 4, 2), (1536, 1536, 256, 16, 16, True, False, True): (3, 4, 5, 2), (1536, 1536, 256, 32, 32, False, True, True): (2, 4, 4, 4), (1536, 1536, 256, 32, 32, True, False, True): (1, 4, 6, 2), (1536, 1536, 256, 64, 64, False, True, True): (5, 4, 4, 4), (1536, 1536, 256, 64, 64, True, False, True): (2, 4, 4, 4), (1536, 1536, 256, 128, 128, False, True, True): (1, 2, 3, 8), (1536, 1536, 256, 128, 128, True, False, True): (2, 2, 3, 8), (1536, 1536, 512, 16, 16, False, True, True): (1, 8, 1, 4), (1536, 1536, 512, 16, 16, True, False, True): (3, 4, 4, 2), (1536, 1536, 512, 32, 32, False, True, True): (1, 8, 1, 8), (1536, 1536, 512, 32, 32, True, False, True): (1, 4, 4, 4), (1536, 1536, 512, 64, 64, False, True, True): (3, 8, 3, 4), (1536, 1536, 512, 64, 64, True, False, True): (5, 8, 3, 4), (1536, 1536, 512, 128, 128, False, True, True): (3, 4, 3, 8), (1536, 1536, 512, 128, 128, True, False, True): (1, 4, 3, 8), (1536, 1536, 1024, 16, 16, False, True, True): (6, 8, 1, 2), (1536, 1536, 1024, 16, 16, True, False, True): (2, 8, 5, 2), (1536, 1536, 1024, 32, 32, False, True, True): (6, 8, 1, 8), (1536, 1536, 1024, 32, 32, True, False, True): (2, 4, 3, 4), (1536, 1536, 1024, 64, 64, False, True, True): (1, 16, 3, 4), (1536, 1536, 1024, 64, 64, True, False, True): (3, 8, 3, 4), (1536, 1536, 1024, 128, 128, False, True, True): (3, 8, 3, 8), (1536, 1536, 1024, 128, 128, True, False, True): (3, 8, 3, 8), (1536, 1536, 2048, 16, 16, False, True, True): (1, 16, 1, 4), (1536, 1536, 2048, 16, 16, True, False, True): (1, 8, 3, 1), (1536, 1536, 2048, 32, 32, False, True, True): (1, 16, 1, 8), (1536, 1536, 2048, 32, 32, True, False, True): (4, 8, 3, 2), (1536, 1536, 2048, 64, 64, False, True, True): (1, 16, 3, 4), (1536, 1536, 2048, 64, 64, True, False, True): (3, 8, 3, 4), (1536, 1536, 2048, 128, 128, False, True, True): (6, 16, 1, 4), (1536, 1536, 2048, 128, 128, True, False, True): (4, 16, 3, 8), (1536, 1536, 4096, 16, 16, False, True, True): (1, 32, 1, 2), (1536, 1536, 4096, 16, 16, True, False, True): (4, 32, 4, 2), (1536, 1536, 4096, 32, 32, False, True, True): (1, 32, 1, 8), (1536, 1536, 4096, 32, 32, True, False, True): (3, 16, 3, 4), (1536, 1536, 4096, 64, 64, False, True, True): (1, 32, 3, 4), (1536, 1536, 4096, 64, 64, True, False, True): (1, 16, 3, 4), (1536, 1536, 4096, 128, 128, False, True, True): (4, 32, 3, 8), (1536, 1536, 4096, 128, 128, True, False, True): (2, 32, 3, 8), (1536, 1536, 8192, 16, 16, False, True, True): (2, 64, 1, 2), (1536, 1536, 8192, 16, 16, True, False, True): (4, 64, 4, 2), (1536, 1536, 8192, 32, 32, False, True, True): (1, 64, 1, 8), (1536, 1536, 8192, 32, 32, True, False, True): (12, 32, 3, 4), (1536, 1536, 8192, 64, 64, False, True, True): (2, 64, 3, 4), (1536, 1536, 8192, 64, 64, True, False, True): (2, 32, 3, 4), (1536, 1536, 8192, 128, 128, False, True, True): (3, 64, 1, 4), (1536, 1536, 8192, 128, 128, True, False, True): (4, 64, 3, 8), (1536, 1536, 16384, 16, 16, False, True, True): (1, 128, 1, 2), (1536, 1536, 16384, 16, 16, True, False, True): (1, 64, 4, 4), (1536, 1536, 16384, 32, 32, False, True, True): (1, 64, 1, 2), (1536, 1536, 16384, 32, 32, True, False, True): (1, 64, 3, 4), (1536, 1536, 16384, 64, 64, False, True, True): (1, 128, 3, 4), (1536, 1536, 16384, 64, 64, True, False, True): (1, 64, 3, 4), (1536, 1536, 16384, 128, 128, False, True, True): (3, 128, 1, 4), (1536, 1536, 16384, 128, 128, True, False, True): (1, 128, 2, 4), (1536, 1536, 32768, 16, 16, False, True, True): (1, 256, 1, 2), (1536, 1536, 32768, 16, 16, True, False, True): (1, 128, 3, 2), (1536, 1536, 32768, 32, 32, False, True, True): (1, 128, 1, 2), (1536, 1536, 32768, 32, 32, True, False, True): (1, 128, 3, 4), (1536, 1536, 32768, 64, 64, False, True, True): (3, 256, 3, 4), (1536, 1536, 32768, 64, 64, True, False, True): (1, 128, 3, 4), (1536, 1536, 32768, 128, 128, False, True, True): (3, 256, 1, 4), (1536, 1536, 32768, 128, 128, True, False, True): (1, 256, 2, 4), (1536, 1536, 65536, 16, 16, False, True, True): (4, 512, 1, 2), (1536, 1536, 65536, 16, 16, True, False, True): (1, 256, 4, 4), (1536, 1536, 65536, 32, 32, False, True, True): (1, 256, 1, 2), (1536, 1536, 65536, 32, 32, True, False, True): (1, 256, 3, 4), (1536, 1536, 65536, 64, 64, False, True, True): (2, 512, 3, 4), (1536, 1536, 65536, 64, 64, True, False, True): (1, 256, 3, 4), (1536, 1536, 65536, 128, 128, False, True, True): (3, 512, 1, 4), (1536, 1536, 65536, 128, 128, True, False, True): (2, 512, 2, 4), (1536, 1536, 131072, 16, 16, False, True, True): (2, 1024, 1, 2), (1536, 1536, 131072, 16, 16, True, False, True): (9, 512, 4, 4), (1536, 1536, 131072, 32, 32, False, True, True): (1, 512, 1, 2), (1536, 1536, 131072, 32, 32, True, False, True): (9, 512, 3, 4), (1536, 1536, 131072, 64, 64, False, True, True): (1, 1024, 3, 4), (1536, 1536, 131072, 64, 64, True, False, True): (1, 512, 3, 4), (1536, 1536, 131072, 128, 128, False, True, True): (3, 1024, 1, 4), (1536, 1536, 131072, 128, 128, True, False, True): (1, 1024, 2, 4), (2048, 2048, 256, 16, 16, False, True, True): (4, 4, 6, 2), (2048, 2048, 256, 16, 16, True, False, True): (2, 8, 4, 1), (2048, 2048, 256, 32, 32, False, True, True): (3, 4, 4, 2), (2048, 2048, 256, 32, 32, True, False, True): (1, 4, 5, 2), (2048, 2048, 256, 64, 64, False, True, True): (2, 4, 4, 4), (2048, 2048, 256, 64, 64, True, False, True): (2, 4, 4, 4), (2048, 2048, 256, 128, 128, False, True, True): (3, 2, 2, 8), (2048, 2048, 256, 128, 128, True, False, True): (5, 2, 2, 8), (2048, 2048, 512, 16, 16, False, True, True): (5, 4, 4, 4), (2048, 2048, 512, 16, 16, True, False, True): (2, 4, 4, 2), (2048, 2048, 512, 32, 32, False, True, True): (1, 4, 3, 4), (2048, 2048, 512, 32, 32, True, False, True): (3, 4, 4, 2), (2048, 2048, 512, 64, 64, False, True, True): (1, 8, 3, 4), (2048, 2048, 512, 64, 64, True, False, True): (1, 8, 3, 2), (2048, 2048, 512, 128, 128, False, True, True): (3, 4, 2, 8), (2048, 2048, 512, 128, 128, True, False, True): (2, 4, 2, 8), (2048, 2048, 1024, 16, 16, False, True, True): (3, 4, 3, 4), (2048, 2048, 1024, 16, 16, True, False, True): (2, 8, 3, 2), (2048, 2048, 1024, 32, 32, False, True, True): (3, 8, 3, 4), (2048, 2048, 1024, 32, 32, True, False, True): (1, 8, 3, 2), (2048, 2048, 1024, 64, 64, False, True, True): (1, 8, 3, 4), (2048, 2048, 1024, 64, 64, True, False, True): (1, 8, 3, 4), (2048, 2048, 1024, 128, 128, False, True, True): (4, 8, 2, 8), (2048, 2048, 1024, 128, 128, True, False, True): (4, 8, 1, 4), (2048, 2048, 2048, 16, 16, False, True, True): (4, 16, 3, 2), (2048, 2048, 2048, 16, 16, True, False, True): (2, 16, 3, 2), (2048, 2048, 2048, 32, 32, False, True, True): (1, 16, 3, 4), (2048, 2048, 2048, 32, 32, True, False, True): (1, 16, 3, 2), (2048, 2048, 2048, 64, 64, False, True, True): (1, 16, 3, 4), (2048, 2048, 2048, 64, 64, True, False, True): (1, 16, 3, 4), (2048, 2048, 2048, 128, 128, False, True, True): (6, 16, 2, 8), (2048, 2048, 2048, 128, 128, True, False, True): (5, 16, 1, 4), (2048, 2048, 4096, 16, 16, False, True, True): (4, 32, 4, 2), (2048, 2048, 4096, 16, 16, True, False, True): (4, 32, 3, 2), (2048, 2048, 4096, 32, 32, False, True, True): (4, 16, 3, 8), (2048, 2048, 4096, 32, 32, True, False, True): (4, 16, 3, 4), (2048, 2048, 4096, 64, 64, False, True, True): (4, 32, 3, 4), (2048, 2048, 4096, 64, 64, True, False, True): (4, 32, 3, 4), (2048, 2048, 4096, 128, 128, False, True, True): (4, 32, 2, 8), (2048, 2048, 4096, 128, 128, True, False, True): (2, 32, 1, 4), (2048, 2048, 8192, 16, 16, False, True, True): (4, 64, 4, 2), (2048, 2048, 8192, 16, 16, True, False, True): (4, 64, 4, 2), (2048, 2048, 8192, 32, 32, False, True, True): (4, 32, 3, 8), (2048, 2048, 8192, 32, 32, True, False, True): (4, 32, 4, 8), (2048, 2048, 8192, 64, 64, False, True, True): (2, 64, 3, 4), (2048, 2048, 8192, 64, 64, True, False, True): (4, 64, 3, 4), (2048, 2048, 8192, 128, 128, False, True, True): (3, 64, 1, 4), (2048, 2048, 8192, 128, 128, True, False, True): (2, 64, 1, 4), (2048, 2048, 16384, 16, 16, False, True, True): (4, 64, 3, 4), (2048, 2048, 16384, 16, 16, True, False, True): (1, 64, 3, 4), (2048, 2048, 16384, 32, 32, False, True, True): (4, 64, 3, 4), (2048, 2048, 16384, 32, 32, True, False, True): (4, 64, 3, 4), (2048, 2048, 16384, 64, 64, False, True, True): (4, 128, 3, 4), (2048, 2048, 16384, 64, 64, True, False, True): (4, 128, 3, 4), (2048, 2048, 16384, 128, 128, False, True, True): (3, 128, 1, 4), (2048, 2048, 16384, 128, 128, True, False, True): (2, 128, 1, 4), (2048, 2048, 32768, 16, 16, False, True, True): (8, 128, 3, 2), (2048, 2048, 32768, 16, 16, True, False, True): (8, 128, 3, 4), (2048, 2048, 32768, 32, 32, False, True, True): (8, 128, 3, 4), (2048, 2048, 32768, 32, 32, True, False, True): (8, 128, 3, 4), (2048, 2048, 32768, 64, 64, False, True, True): (8, 256, 3, 4), (2048, 2048, 32768, 64, 64, True, False, True): (8, 256, 3, 4), (2048, 2048, 32768, 128, 128, False, True, True): (3, 256, 1, 4), (2048, 2048, 32768, 128, 128, True, False, True): (1, 256, 1, 4), (2048, 2048, 50432, 16, 16, False, True, True): (1, 197, 1, 4), (2048, 2048, 50432, 16, 16, True, False, True): (4, 197, 4, 1), (2048, 2048, 50432, 32, 32, False, True, True): (2, 197, 1, 4), (2048, 2048, 50432, 32, 32, True, False, True): (4, 197, 3, 4), (2048, 2048, 50432, 64, 64, False, True, True): (2, 394, 3, 4), (2048, 2048, 50432, 64, 64, True, False, True): (4, 197, 2, 4), (2048, 2048, 50432, 128, 128, False, True, True): (3, 394, 1, 4), (2048, 2048, 50432, 128, 128, True, False, True): (4, 394, 2, 4), (2048, 2048, 65536, 16, 16, False, True, True): (9, 256, 3, 2), (2048, 2048, 65536, 16, 16, True, False, True): (9, 256, 4, 4), (2048, 2048, 65536, 32, 32, False, True, True): (7, 256, 3, 4), (2048, 2048, 65536, 32, 32, True, False, True): (7, 256, 3, 4), (2048, 2048, 65536, 64, 64, False, True, True): (2, 256, 2, 4), (2048, 2048, 65536, 64, 64, True, False, True): (9, 512, 3, 4), (2048, 2048, 65536, 128, 128, False, True, True): (5, 512, 1, 4), (2048, 2048, 65536, 128, 128, True, False, True): (1, 512, 1, 4), (2048, 2048, 65792, 16, 16, False, True, True): (1, 257, 1, 4), (2048, 2048, 65792, 16, 16, True, False, True): (7, 257, 4, 1), (2048, 2048, 65792, 32, 32, False, True, True): (2, 257, 1, 4), (2048, 2048, 65792, 32, 32, True, False, True): (7, 257, 3, 4), (2048, 2048, 65792, 64, 64, False, True, True): (1, 514, 3, 4), (2048, 2048, 65792, 64, 64, True, False, True): (1, 257, 2, 4), (2048, 2048, 65792, 128, 128, False, True, True): (3, 514, 1, 4), (2048, 2048, 65792, 128, 128, True, False, True): (1, 514, 2, 4), (2048, 2048, 131072, 16, 16, False, True, True): (9, 512, 3, 2), (2048, 2048, 131072, 16, 16, True, False, True): (9, 512, 4, 4), (2048, 2048, 131072, 32, 32, False, True, True): (7, 512, 3, 4), (2048, 2048, 131072, 32, 32, True, False, True): (3, 512, 3, 4), (2048, 2048, 131072, 64, 64, False, True, True): (1, 512, 2, 4), (2048, 2048, 131072, 64, 64, True, False, True): (2, 1024, 3, 4), (2048, 2048, 131072, 128, 128, False, True, True): (3, 1024, 1, 4), (2048, 2048, 131072, 128, 128, True, False, True): (1, 1024, 1, 4), (3072, 768, 256, 16, 16, False, True, True): (6, 4, 1, 4), (3072, 768, 256, 16, 16, True, False, True): (2, 1, 5, 2), (3072, 768, 256, 32, 32, False, True, True): (1, 4, 1, 8), (3072, 768, 256, 32, 32, True, False, True): (4, 2, 4, 4), (3072, 768, 256, 64, 64, False, True, True): (1, 2, 3, 4), (3072, 768, 256, 64, 64, True, False, True): (3, 4, 3, 4), (3072, 768, 256, 128, 128, False, True, True): (1, 2, 3, 8), (3072, 768, 256, 128, 128, True, False, True): (3, 2, 3, 8), (3072, 768, 512, 16, 16, False, True, True): (1, 4, 1, 4), (3072, 768, 512, 16, 16, True, False, True): (3, 4, 4, 1), (3072, 768, 512, 32, 32, False, True, True): (5, 8, 1, 4), (3072, 768, 512, 32, 32, True, False, True): (3, 4, 4, 2), (3072, 768, 512, 64, 64, False, True, True): (1, 8, 1, 4), (3072, 768, 512, 64, 64, True, False, True): (1, 4, 3, 4), (3072, 768, 512, 128, 128, False, True, True): (3, 4, 3, 8), (3072, 768, 512, 128, 128, True, False, True): (1, 4, 3, 8), (3072, 768, 1024, 16, 16, False, True, True): (1, 8, 1, 4), (3072, 768, 1024, 16, 16, True, False, True): (3, 4, 3, 1), (3072, 768, 1024, 32, 32, False, True, True): (1, 16, 1, 4), (3072, 768, 1024, 32, 32, True, False, True): (1, 4, 3, 8), (3072, 768, 1024, 64, 64, False, True, True): (8, 16, 3, 2), (3072, 768, 1024, 64, 64, True, False, True): (1, 4, 3, 4), (3072, 768, 1024, 128, 128, False, True, True): (2, 8, 3, 8), (3072, 768, 1024, 128, 128, True, False, True): (3, 8, 2, 4), (3072, 768, 2048, 16, 16, False, True, True): (1, 8, 1, 4), (3072, 768, 2048, 16, 16, True, False, True): (6, 8, 4, 4), (3072, 768, 2048, 32, 32, False, True, True): (1, 16, 1, 8), (3072, 768, 2048, 32, 32, True, False, True): (6, 8, 3, 4), (3072, 768, 2048, 64, 64, False, True, True): (8, 16, 3, 4), (3072, 768, 2048, 64, 64, True, False, True): (3, 16, 3, 4), (3072, 768, 2048, 128, 128, False, True, True): (1, 16, 3, 8), (3072, 768, 2048, 128, 128, True, False, True): (2, 16, 2, 4), (3072, 768, 4096, 16, 16, False, True, True): (1, 16, 1, 4), (3072, 768, 4096, 16, 16, True, False, True): (4, 32, 4, 2), (3072, 768, 4096, 32, 32, False, True, True): (1, 32, 1, 8), (3072, 768, 4096, 32, 32, True, False, True): (4, 16, 3, 4), (3072, 768, 4096, 64, 64, False, True, True): (2, 32, 1, 4), (3072, 768, 4096, 64, 64, True, False, True): (2, 16, 2, 4), (3072, 768, 4096, 128, 128, False, True, True): (2, 32, 1, 16), (3072, 768, 4096, 128, 128, True, False, True): (3, 32, 2, 4), (3072, 768, 8192, 16, 16, False, True, True): (2, 32, 1, 4), (3072, 768, 8192, 16, 16, True, False, True): (4, 64, 4, 2), (3072, 768, 8192, 32, 32, False, True, True): (2, 32, 1, 4), (3072, 768, 8192, 32, 32, True, False, True): (6, 32, 3, 4), (3072, 768, 8192, 64, 64, False, True, True): (2, 64, 1, 4), (3072, 768, 8192, 64, 64, True, False, True): (2, 32, 2, 4), (3072, 768, 8192, 128, 128, False, True, True): (3, 64, 1, 4), (3072, 768, 8192, 128, 128, True, False, True): (2, 64, 2, 4), (3072, 768, 16384, 16, 16, False, True, True): (1, 64, 1, 4), (3072, 768, 16384, 16, 16, True, False, True): (1, 64, 1, 1), (3072, 768, 16384, 32, 32, False, True, True): (2, 64, 1, 4), (3072, 768, 16384, 32, 32, True, False, True): (4, 64, 3, 4), (3072, 768, 16384, 64, 64, False, True, True): (2, 128, 1, 4), (3072, 768, 16384, 64, 64, True, False, True): (4, 64, 2, 4), (3072, 768, 16384, 128, 128, False, True, True): (3, 128, 1, 4), (3072, 768, 16384, 128, 128, True, False, True): (1, 128, 2, 4), (3072, 768, 32768, 16, 16, False, True, True): (1, 128, 1, 4), (3072, 768, 32768, 16, 16, True, False, True): (8, 256, 3, 2), (3072, 768, 32768, 32, 32, False, True, True): (2, 128, 1, 4), (3072, 768, 32768, 32, 32, True, False, True): (8, 128, 3, 4), (3072, 768, 32768, 64, 64, False, True, True): (1, 256, 1, 4), (3072, 768, 32768, 64, 64, True, False, True): (8, 128, 2, 4), (3072, 768, 32768, 128, 128, False, True, True): (3, 256, 1, 4), (3072, 768, 32768, 128, 128, True, False, True): (3, 256, 2, 4), (3072, 768, 50432, 16, 16, False, True, True): (1, 197, 1, 4), (3072, 768, 50432, 16, 16, True, False, True): (7, 197, 4, 1), (3072, 768, 50432, 32, 32, False, True, True): (2, 197, 1, 4), (3072, 768, 50432, 32, 32, True, False, True): (10, 197, 3, 4), (3072, 768, 50432, 64, 64, False, True, True): (1, 394, 1, 4), (3072, 768, 50432, 64, 64, True, False, True): (3, 197, 2, 4), (3072, 768, 50432, 128, 128, False, True, True): (3, 394, 1, 4), (3072, 768, 50432, 128, 128, True, False, True): (2, 394, 2, 4), (3072, 768, 65536, 16, 16, False, True, True): (1, 256, 1, 4), (3072, 768, 65536, 16, 16, True, False, True): (15, 256, 4, 1), (3072, 768, 65536, 32, 32, False, True, True): (2, 256, 1, 4), (3072, 768, 65536, 32, 32, True, False, True): (10, 256, 3, 4), (3072, 768, 65536, 64, 64, False, True, True): (1, 512, 1, 4), (3072, 768, 65536, 64, 64, True, False, True): (3, 256, 2, 4), (3072, 768, 65536, 128, 128, False, True, True): (3, 512, 1, 4), (3072, 768, 65536, 128, 128, True, False, True): (3, 512, 2, 4), (3072, 768, 131072, 16, 16, False, True, True): (1, 512, 1, 4), (3072, 768, 131072, 16, 16, True, False, True): (15, 512, 4, 1), (3072, 768, 131072, 32, 32, False, True, True): (2, 512, 1, 4), (3072, 768, 131072, 32, 32, True, False, True): (9, 512, 3, 4), (3072, 768, 131072, 64, 64, False, True, True): (1, 1024, 1, 4), (3072, 768, 131072, 64, 64, True, False, True): (3, 512, 2, 4), (3072, 768, 131072, 128, 128, False, True, True): (3, 1024, 1, 4), (3072, 768, 131072, 128, 128, True, False, True): (3, 1024, 2, 4), (3072, 3072, 256, 16, 16, False, True, True): (5, 4, 1, 4), (3072, 3072, 256, 16, 16, True, False, True): (1, 2, 5, 2), (3072, 3072, 256, 32, 32, False, True, True): (1, 4, 1, 8), (3072, 3072, 256, 32, 32, True, False, True): (3, 4, 4, 2), (3072, 3072, 256, 64, 64, False, True, True): (2, 4, 3, 4), (3072, 3072, 256, 64, 64, True, False, True): (3, 4, 4, 4), (3072, 3072, 256, 128, 128, False, True, True): (1, 2, 3, 8), (3072, 3072, 256, 128, 128, True, False, True): (1, 2, 3, 8), (3072, 3072, 512, 16, 16, False, True, True): (5, 4, 1, 2), (3072, 3072, 512, 16, 16, True, False, True): (1, 2, 4, 4), (3072, 3072, 512, 32, 32, False, True, True): (3, 8, 1, 4), (3072, 3072, 512, 32, 32, True, False, True): (4, 2, 3, 4), (3072, 3072, 512, 64, 64, False, True, True): (1, 8, 2, 2), (3072, 3072, 512, 64, 64, True, False, True): (2, 4, 3, 4), (3072, 3072, 512, 128, 128, False, True, True): (1, 4, 3, 8), (3072, 3072, 512, 128, 128, True, False, True): (4, 4, 3, 8), (3072, 3072, 1024, 16, 16, False, True, True): (1, 8, 1, 4), (3072, 3072, 1024, 16, 16, True, False, True): (4, 8, 5, 2), (3072, 3072, 1024, 32, 32, False, True, True): (1, 8, 1, 8), (3072, 3072, 1024, 32, 32, True, False, True): (1, 4, 4, 4), (3072, 3072, 1024, 64, 64, False, True, True): (3, 8, 3, 4), (3072, 3072, 1024, 64, 64, True, False, True): (2, 4, 3, 4), (3072, 3072, 1024, 128, 128, False, True, True): (3, 8, 1, 4), (3072, 3072, 1024, 128, 128, True, False, True): (1, 8, 3, 8), (3072, 3072, 2048, 16, 16, False, True, True): (1, 16, 1, 2), (3072, 3072, 2048, 16, 16, True, False, True): (4, 16, 4, 2), (3072, 3072, 2048, 32, 32, False, True, True): (1, 16, 1, 8), (3072, 3072, 2048, 32, 32, True, False, True): (3, 8, 4, 4), (3072, 3072, 2048, 64, 64, False, True, True): (3, 16, 3, 4), (3072, 3072, 2048, 64, 64, True, False, True): (3, 8, 3, 4), (3072, 3072, 2048, 128, 128, False, True, True): (4, 16, 3, 8), (3072, 3072, 2048, 128, 128, True, False, True): (3, 16, 3, 8), (3072, 3072, 4096, 16, 16, False, True, True): (1, 32, 1, 2), (3072, 3072, 4096, 16, 16, True, False, True): (4, 32, 4, 2), (3072, 3072, 4096, 32, 32, False, True, True): (1, 32, 1, 8), (3072, 3072, 4096, 32, 32, True, False, True): (3, 16, 3, 4), (3072, 3072, 4096, 64, 64, False, True, True): (1, 32, 3, 4), (3072, 3072, 4096, 64, 64, True, False, True): (3, 16, 3, 4), (3072, 3072, 4096, 128, 128, False, True, True): (1, 32, 3, 8), (3072, 3072, 4096, 128, 128, True, False, True): (3, 32, 3, 8), (3072, 3072, 8192, 16, 16, False, True, True): (1, 64, 1, 2), (3072, 3072, 8192, 16, 16, True, False, True): (4, 64, 4, 2), (3072, 3072, 8192, 32, 32, False, True, True): (1, 64, 1, 8), (3072, 3072, 8192, 32, 32, True, False, True): (8, 32, 3, 4), (3072, 3072, 8192, 64, 64, False, True, True): (3, 64, 3, 4), (3072, 3072, 8192, 64, 64, True, False, True): (2, 32, 3, 4), (3072, 3072, 8192, 128, 128, False, True, True): (2, 64, 3, 8), (3072, 3072, 8192, 128, 128, True, False, True): (1, 64, 3, 8), (3072, 3072, 16384, 16, 16, False, True, True): (1, 128, 1, 2), (3072, 3072, 16384, 16, 16, True, False, True): (4, 128, 4, 2), (3072, 3072, 16384, 32, 32, False, True, True): (1, 64, 1, 2), (3072, 3072, 16384, 32, 32, True, False, True): (4, 64, 3, 4), (3072, 3072, 16384, 64, 64, False, True, True): (1, 128, 3, 4), (3072, 3072, 16384, 64, 64, True, False, True): (4, 64, 3, 4), (3072, 3072, 16384, 128, 128, False, True, True): (3, 128, 1, 4), (3072, 3072, 16384, 128, 128, True, False, True): (1, 128, 3, 8), (3072, 3072, 32768, 16, 16, False, True, True): (1, 256, 1, 2), (3072, 3072, 32768, 16, 16, True, False, True): (8, 128, 4, 4), (3072, 3072, 32768, 32, 32, False, True, True): (1, 256, 1, 8), (3072, 3072, 32768, 32, 32, True, False, True): (5, 128, 3, 4), (3072, 3072, 32768, 64, 64, False, True, True): (1, 256, 3, 4), (3072, 3072, 32768, 64, 64, True, False, True): (1, 128, 3, 4), (3072, 3072, 32768, 128, 128, False, True, True): (3, 256, 1, 4), (3072, 3072, 32768, 128, 128, True, False, True): (3, 256, 2, 4), (3072, 3072, 65536, 16, 16, False, True, True): (1, 512, 1, 2), (3072, 3072, 65536, 16, 16, True, False, True): (7, 256, 4, 4), (3072, 3072, 65536, 32, 32, False, True, True): (1, 256, 1, 2), (3072, 3072, 65536, 32, 32, True, False, True): (5, 256, 3, 4), (3072, 3072, 65536, 64, 64, False, True, True): (1, 512, 3, 4), (3072, 3072, 65536, 64, 64, True, False, True): (3, 256, 3, 4), (3072, 3072, 65536, 128, 128, False, True, True): (3, 512, 1, 4), (3072, 3072, 65536, 128, 128, True, False, True): (3, 512, 2, 4), (3072, 3072, 131072, 16, 16, False, True, True): (1, 1024, 1, 2), (3072, 3072, 131072, 16, 16, True, False, True): (5, 512, 4, 4), (3072, 3072, 131072, 32, 32, False, True, True): (1, 512, 1, 2), (3072, 3072, 131072, 32, 32, True, False, True): (3, 512, 3, 4), (3072, 3072, 131072, 64, 64, False, True, True): (1, 1024, 3, 4), (3072, 3072, 131072, 64, 64, True, False, True): (3, 512, 3, 4), (3072, 3072, 131072, 128, 128, False, True, True): (3, 1024, 1, 4), (3072, 3072, 131072, 128, 128, True, False, True): (1, 1024, 2, 4), (4096, 4096, 256, 16, 16, False, True, True): (2, 2, 6, 4), (4096, 4096, 256, 16, 16, True, False, True): (2, 2, 5, 4), (4096, 4096, 256, 32, 32, False, True, True): (7, 2, 4, 4), (4096, 4096, 256, 32, 32, True, False, True): (1, 2, 4, 4), (4096, 4096, 256, 64, 64, False, True, True): (3, 4, 3, 4), (4096, 4096, 256, 64, 64, True, False, True): (3, 4, 3, 4), (4096, 4096, 256, 128, 128, False, True, True): (1, 2, 2, 8), (4096, 4096, 256, 128, 128, True, False, True): (1, 2, 2, 8), (4096, 4096, 512, 16, 16, False, True, True): (4, 2, 3, 4), (4096, 4096, 512, 16, 16, True, False, True): (2, 4, 3, 2), (4096, 4096, 512, 32, 32, False, True, True): (3, 4, 3, 4), (4096, 4096, 512, 32, 32, True, False, True): (3, 4, 3, 2), (4096, 4096, 512, 64, 64, False, True, True): (3, 4, 3, 4), (4096, 4096, 512, 64, 64, True, False, True): (3, 4, 3, 4), (4096, 4096, 512, 128, 128, False, True, True): (2, 4, 2, 8), (4096, 4096, 512, 128, 128, True, False, True): (2, 4, 1, 4), (4096, 4096, 1024, 16, 16, False, True, True): (2, 8, 3, 2), (4096, 4096, 1024, 16, 16, True, False, True): (2, 8, 3, 2), (4096, 4096, 1024, 32, 32, False, True, True): (3, 8, 3, 4), (4096, 4096, 1024, 32, 32, True, False, True): (1, 8, 3, 2), (4096, 4096, 1024, 64, 64, False, True, True): (1, 8, 3, 4), (4096, 4096, 1024, 64, 64, True, False, True): (1, 8, 3, 4), (4096, 4096, 1024, 128, 128, False, True, True): (2, 8, 2, 8), (4096, 4096, 1024, 128, 128, True, False, True): (2, 8, 2, 8), (4096, 4096, 2048, 16, 16, False, True, True): (2, 8, 4, 4), (4096, 4096, 2048, 16, 16, True, False, True): (2, 8, 4, 4), (4096, 4096, 2048, 32, 32, False, True, True): (4, 8, 4, 8), (4096, 4096, 2048, 32, 32, True, False, True): (4, 8, 4, 8), (4096, 4096, 2048, 64, 64, False, True, True): (1, 16, 3, 4), (4096, 4096, 2048, 64, 64, True, False, True): (4, 16, 3, 4), (4096, 4096, 2048, 128, 128, False, True, True): (2, 16, 2, 8), (4096, 4096, 2048, 128, 128, True, False, True): (4, 16, 1, 4), (4096, 4096, 4096, 16, 16, False, True, True): (4, 32, 4, 4), (4096, 4096, 4096, 16, 16, True, False, True): (4, 32, 4, 2), (4096, 4096, 4096, 32, 32, False, True, True): (4, 16, 4, 8), (4096, 4096, 4096, 32, 32, True, False, True): (4, 16, 3, 8), (4096, 4096, 4096, 64, 64, False, True, True): (1, 32, 3, 4), (4096, 4096, 4096, 64, 64, True, False, True): (1, 32, 3, 4), (4096, 4096, 4096, 128, 128, False, True, True): (3, 32, 1, 4), (4096, 4096, 4096, 128, 128, True, False, True): (2, 32, 1, 4), (4096, 4096, 8192, 16, 16, False, True, True): (4, 64, 4, 2), (4096, 4096, 8192, 16, 16, True, False, True): (4, 64, 4, 2), (4096, 4096, 8192, 32, 32, False, True, True): (4, 32, 4, 8), (4096, 4096, 8192, 32, 32, True, False, True): (4, 32, 4, 8), (4096, 4096, 8192, 64, 64, False, True, True): (2, 64, 3, 4), (4096, 4096, 8192, 64, 64, True, False, True): (2, 64, 3, 4), (4096, 4096, 8192, 128, 128, False, True, True): (3, 64, 1, 4), (4096, 4096, 8192, 128, 128, True, False, True): (1, 64, 1, 4), (4096, 4096, 16384, 16, 16, False, True, True): (4, 64, 3, 4), (4096, 4096, 16384, 16, 16, True, False, True): (4, 64, 4, 4), (4096, 4096, 16384, 32, 32, False, True, True): (4, 64, 4, 8), (4096, 4096, 16384, 32, 32, True, False, True): (4, 64, 4, 8), (4096, 4096, 16384, 64, 64, False, True, True): (1, 64, 2, 4), (4096, 4096, 16384, 64, 64, True, False, True): (1, 64, 3, 8), (4096, 4096, 16384, 128, 128, False, True, True): (3, 128, 1, 4), (4096, 4096, 16384, 128, 128, True, False, True): (1, 128, 1, 4), (4096, 4096, 32768, 16, 16, False, True, True): (8, 128, 3, 2), (4096, 4096, 32768, 16, 16, True, False, True): (5, 128, 4, 4), (4096, 4096, 32768, 32, 32, False, True, True): (3, 128, 4, 4), (4096, 4096, 32768, 32, 32, True, False, True): (3, 128, 4, 8), (4096, 4096, 32768, 64, 64, False, True, True): (1, 128, 2, 4), (4096, 4096, 32768, 64, 64, True, False, True): (3, 256, 3, 4), (4096, 4096, 32768, 128, 128, False, True, True): (3, 256, 1, 4), (4096, 4096, 32768, 128, 128, True, False, True): (1, 256, 1, 4), (4096, 4096, 50432, 16, 16, False, True, True): (1, 197, 1, 4), (4096, 4096, 50432, 16, 16, True, False, True): (4, 197, 4, 1), (4096, 4096, 50432, 32, 32, False, True, True): (1, 197, 1, 4), (4096, 4096, 50432, 32, 32, True, False, True): (2, 197, 3, 4), (4096, 4096, 50432, 64, 64, False, True, True): (1, 394, 3, 4), (4096, 4096, 50432, 64, 64, True, False, True): (1, 197, 2, 4), (4096, 4096, 50432, 128, 128, False, True, True): (3, 394, 1, 4), (4096, 4096, 50432, 128, 128, True, False, True): (1, 394, 2, 4), (4096, 4096, 65536, 16, 16, False, True, True): (5, 256, 4, 4), (4096, 4096, 65536, 16, 16, True, False, True): (5, 256, 4, 4), (4096, 4096, 65536, 32, 32, False, True, True): (4, 256, 4, 8), (4096, 4096, 65536, 32, 32, True, False, True): (4, 256, 3, 8), (4096, 4096, 65536, 64, 64, False, True, True): (1, 256, 2, 4), (4096, 4096, 65536, 64, 64, True, False, True): (1, 512, 3, 4), (4096, 4096, 65536, 128, 128, False, True, True): (3, 512, 1, 4), (4096, 4096, 65536, 128, 128, True, False, True): (1, 512, 1, 4), (4096, 4096, 65792, 16, 16, False, True, True): (1, 257, 1, 4), (4096, 4096, 65792, 16, 16, True, False, True): (5, 257, 4, 1), (4096, 4096, 65792, 32, 32, False, True, True): (1, 257, 1, 4), (4096, 4096, 65792, 32, 32, True, False, True): (1, 257, 3, 4), (4096, 4096, 65792, 64, 64, False, True, True): (1, 514, 3, 4), (4096, 4096, 65792, 64, 64, True, False, True): (1, 257, 2, 4), (4096, 4096, 65792, 128, 128, False, True, True): (3, 514, 1, 4), (4096, 4096, 65792, 128, 128, True, False, True): (1, 514, 2, 4), (4096, 4096, 131072, 16, 16, False, True, True): (4, 512, 3, 4), (4096, 4096, 131072, 16, 16, True, False, True): (5, 512, 4, 4), (4096, 4096, 131072, 32, 32, False, True, True): (1, 512, 4, 8), (4096, 4096, 131072, 32, 32, True, False, True): (4, 512, 4, 8), (4096, 4096, 131072, 64, 64, False, True, True): (1, 512, 2, 4), (4096, 4096, 131072, 64, 64, True, False, True): (1, 512, 2, 4), (4096, 4096, 131072, 128, 128, False, True, True): (3, 1024, 1, 4), (4096, 4096, 131072, 128, 128, True, False, True): (1, 1024, 1, 4), (5120, 1280, 65792, 16, 16, False, True, True): (1, 257, 1, 4), (5120, 1280, 65792, 16, 16, True, False, True): (7, 257, 4, 1), (5120, 1280, 65792, 32, 32, False, True, True): (2, 257, 1, 4), (5120, 1280, 65792, 32, 32, True, False, True): (5, 257, 3, 4), (5120, 1280, 65792, 64, 64, False, True, True): (1, 514, 1, 4), (5120, 1280, 65792, 64, 64, True, False, True): (5, 257, 2, 4), (5120, 1280, 65792, 128, 128, False, True, True): (3, 514, 1, 4), (5120, 1280, 65792, 128, 128, True, False, True): (4, 514, 2, 4), (6144, 6144, 256, 16, 16, False, True, True): (1, 2, 1, 4), (6144, 6144, 256, 16, 16, True, False, True): (1, 1, 4, 4), (6144, 6144, 256, 32, 32, False, True, True): (3, 2, 1, 8), (6144, 6144, 256, 32, 32, True, False, True): (2, 1, 3, 4), (6144, 6144, 256, 64, 64, False, True, True): (2, 2, 3, 4), (6144, 6144, 256, 64, 64, True, False, True): (6, 2, 4, 4), (6144, 6144, 256, 128, 128, False, True, True): (2, 2, 3, 8), (6144, 6144, 256, 128, 128, True, False, True): (1, 2, 3, 8), (6144, 6144, 512, 16, 16, False, True, True): (4, 4, 1, 4), (6144, 6144, 512, 16, 16, True, False, True): (3, 2, 3, 1), (6144, 6144, 512, 32, 32, False, True, True): (1, 8, 1, 4), (6144, 6144, 512, 32, 32, True, False, True): (2, 2, 3, 8), (6144, 6144, 512, 64, 64, False, True, True): (4, 4, 3, 4), (6144, 6144, 512, 64, 64, True, False, True): (6, 2, 3, 4), (6144, 6144, 512, 128, 128, False, True, True): (3, 4, 1, 4), (6144, 6144, 512, 128, 128, True, False, True): (4, 4, 3, 8), (6144, 6144, 1024, 16, 16, False, True, True): (1, 8, 1, 2), (6144, 6144, 1024, 16, 16, True, False, True): (4, 8, 4, 2), (6144, 6144, 1024, 32, 32, False, True, True): (1, 8, 4, 2), (6144, 6144, 1024, 32, 32, True, False, True): (1, 8, 4, 2), (6144, 6144, 1024, 64, 64, False, True, True): (4, 8, 3, 4), (6144, 6144, 1024, 64, 64, True, False, True): (1, 4, 3, 4), (6144, 6144, 1024, 128, 128, False, True, True): (3, 8, 1, 4), (6144, 6144, 1024, 128, 128, True, False, True): (1, 8, 3, 8), (6144, 6144, 2048, 16, 16, False, True, True): (4, 4, 1, 4), (6144, 6144, 2048, 16, 16, True, False, True): (2, 8, 4, 4), (6144, 6144, 2048, 32, 32, False, True, True): (4, 8, 3, 4), (6144, 6144, 2048, 32, 32, True, False, True): (2, 8, 3, 4), (6144, 6144, 2048, 64, 64, False, True, True): (4, 16, 3, 4), (6144, 6144, 2048, 64, 64, True, False, True): (2, 8, 3, 4), (6144, 6144, 2048, 128, 128, False, True, True): (3, 16, 1, 4), (6144, 6144, 2048, 128, 128, True, False, True): (4, 16, 3, 8), (6144, 6144, 4096, 16, 16, False, True, True): (4, 8, 1, 4), (6144, 6144, 4096, 16, 16, True, False, True): (4, 32, 4, 2), (6144, 6144, 4096, 32, 32, False, True, True): (4, 16, 1, 2), (6144, 6144, 4096, 32, 32, True, False, True): (2, 8, 3, 8), (6144, 6144, 4096, 64, 64, False, True, True): (4, 32, 3, 4), (6144, 6144, 4096, 64, 64, True, False, True): (4, 16, 3, 4), (6144, 6144, 4096, 128, 128, False, True, True): (6, 32, 1, 4), (6144, 6144, 4096, 128, 128, True, False, True): (4, 32, 3, 8), (6144, 6144, 8192, 16, 16, False, True, True): (2, 16, 1, 2), (6144, 6144, 8192, 16, 16, True, False, True): (4, 64, 4, 2), (6144, 6144, 8192, 32, 32, False, True, True): (4, 32, 1, 2), (6144, 6144, 8192, 32, 32, True, False, True): (4, 32, 3, 4), (6144, 6144, 8192, 64, 64, False, True, True): (4, 64, 3, 4), (6144, 6144, 8192, 64, 64, True, False, True): (4, 32, 3, 4), (6144, 6144, 8192, 128, 128, False, True, True): (6, 64, 1, 4), (6144, 6144, 8192, 128, 128, True, False, True): (4, 64, 3, 8), (6144, 6144, 16384, 16, 16, False, True, True): (2, 32, 1, 2), (6144, 6144, 16384, 16, 16, True, False, True): (4, 64, 4, 4), (6144, 6144, 16384, 32, 32, False, True, True): (4, 64, 1, 2), (6144, 6144, 16384, 32, 32, True, False, True): (4, 64, 3, 4), (6144, 6144, 16384, 64, 64, False, True, True): (4, 128, 3, 4), (6144, 6144, 16384, 64, 64, True, False, True): (1, 32, 3, 8), (6144, 6144, 16384, 128, 128, False, True, True): (4, 128, 1, 4), (6144, 6144, 16384, 128, 128, True, False, True): (4, 128, 3, 8), (6144, 6144, 32768, 16, 16, False, True, True): (2, 64, 1, 2), (6144, 6144, 32768, 16, 16, True, False, True): (5, 128, 4, 1), (6144, 6144, 32768, 32, 32, False, True, True): (4, 128, 1, 2), (6144, 6144, 32768, 32, 32, True, False, True): (3, 128, 3, 4), (6144, 6144, 32768, 64, 64, False, True, True): (4, 256, 3, 4), (6144, 6144, 32768, 64, 64, True, False, True): (2, 64, 3, 8), (6144, 6144, 32768, 128, 128, False, True, True): (8, 256, 1, 4), (6144, 6144, 32768, 128, 128, True, False, True): (4, 256, 3, 8), (6144, 6144, 65536, 16, 16, False, True, True): (2, 128, 1, 2), (6144, 6144, 65536, 16, 16, True, False, True): (5, 256, 4, 1), (6144, 6144, 65536, 32, 32, False, True, True): (4, 256, 1, 2), (6144, 6144, 65536, 32, 32, True, False, True): (2, 256, 3, 4), (6144, 6144, 65536, 64, 64, False, True, True): (4, 512, 3, 4), (6144, 6144, 65536, 64, 64, True, False, True): (1, 128, 3, 8), (6144, 6144, 65536, 128, 128, False, True, True): (4, 512, 1, 4), (6144, 6144, 65536, 128, 128, True, False, True): (4, 512, 3, 8), (6144, 6144, 131072, 16, 16, False, True, True): (2, 256, 1, 2), (6144, 6144, 131072, 16, 16, True, False, True): (3, 512, 4, 4), (6144, 6144, 131072, 32, 32, False, True, True): (4, 512, 1, 2), (6144, 6144, 131072, 32, 32, True, False, True): (4, 512, 3, 4), (6144, 6144, 131072, 64, 64, False, True, True): (4, 1024, 3, 4), (6144, 6144, 131072, 64, 64, True, False, True): (2, 256, 3, 8), (6144, 6144, 131072, 128, 128, False, True, True): (4, 1024, 1, 4), (6144, 6144, 131072, 128, 128, True, False, True): (4, 1024, 3, 8), (8192, 8192, 256, 16, 16, False, True, True): (2, 2, 6, 4), (8192, 8192, 256, 16, 16, True, False, True): (2, 4, 2, 2), (8192, 8192, 256, 32, 32, False, True, True): (4, 2, 3, 4), (8192, 8192, 256, 32, 32, True, False, True): (4, 2, 3, 4), (8192, 8192, 256, 64, 64, False, True, True): (2, 2, 3, 8), (8192, 8192, 256, 64, 64, True, False, True): (6, 2, 3, 8), (8192, 8192, 256, 128, 128, False, True, True): (3, 2, 1, 4), (8192, 8192, 256, 128, 128, True, False, True): (1, 2, 1, 4), (8192, 8192, 512, 16, 16, False, True, True): (4, 4, 3, 2), (8192, 8192, 512, 16, 16, True, False, True): (4, 4, 3, 4), (8192, 8192, 512, 32, 32, False, True, True): (1, 4, 3, 4), (8192, 8192, 512, 32, 32, True, False, True): (5, 4, 3, 2), (8192, 8192, 512, 64, 64, False, True, True): (1, 4, 3, 4), (8192, 8192, 512, 64, 64, True, False, True): (2, 2, 3, 8), (8192, 8192, 512, 128, 128, False, True, True): (4, 4, 2, 8), (8192, 8192, 512, 128, 128, True, False, True): (4, 4, 2, 8), (8192, 8192, 1024, 16, 16, False, True, True): (4, 8, 4, 4), (8192, 8192, 1024, 16, 16, True, False, True): (4, 8, 4, 4), (8192, 8192, 1024, 32, 32, False, True, True): (2, 4, 4, 8), (8192, 8192, 1024, 32, 32, True, False, True): (1, 4, 3, 4), (8192, 8192, 1024, 64, 64, False, True, True): (4, 8, 3, 4), (8192, 8192, 1024, 64, 64, True, False, True): (2, 8, 3, 4), (8192, 8192, 1024, 128, 128, False, True, True): (4, 8, 2, 8), (8192, 8192, 1024, 128, 128, True, False, True): (4, 8, 1, 4), (8192, 8192, 2048, 16, 16, False, True, True): (2, 8, 4, 4), (8192, 8192, 2048, 16, 16, True, False, True): (2, 8, 4, 4), (8192, 8192, 2048, 32, 32, False, True, True): (2, 8, 4, 8), (8192, 8192, 2048, 32, 32, True, False, True): (2, 8, 4, 8), (8192, 8192, 2048, 64, 64, False, True, True): (4, 8, 2, 4), (8192, 8192, 2048, 64, 64, True, False, True): (4, 16, 3, 4), (8192, 8192, 2048, 128, 128, False, True, True): (6, 16, 1, 4), (8192, 8192, 2048, 128, 128, True, False, True): (4, 16, 1, 4), (8192, 8192, 4096, 16, 16, False, True, True): (4, 32, 4, 2), (8192, 8192, 4096, 16, 16, True, False, True): (4, 32, 4, 2), (8192, 8192, 4096, 32, 32, False, True, True): (2, 16, 4, 8), (8192, 8192, 4096, 32, 32, True, False, True): (4, 16, 4, 8), (8192, 8192, 4096, 64, 64, False, True, True): (4, 16, 2, 4), (8192, 8192, 4096, 64, 64, True, False, True): (4, 16, 2, 4), (8192, 8192, 4096, 128, 128, False, True, True): (6, 32, 1, 4), (8192, 8192, 4096, 128, 128, True, False, True): (4, 32, 1, 4), (8192, 8192, 8192, 16, 16, False, True, True): (4, 64, 4, 2), (8192, 8192, 8192, 16, 16, True, False, True): (4, 64, 4, 2), (8192, 8192, 8192, 32, 32, False, True, True): (2, 32, 4, 8), (8192, 8192, 8192, 32, 32, True, False, True): (2, 32, 4, 8), (8192, 8192, 8192, 64, 64, False, True, True): (2, 32, 2, 4), (8192, 8192, 8192, 64, 64, True, False, True): (4, 32, 2, 4), (8192, 8192, 8192, 128, 128, False, True, True): (6, 64, 1, 4), (8192, 8192, 8192, 128, 128, True, False, True): (4, 64, 1, 4), (8192, 8192, 16384, 16, 16, False, True, True): (4, 64, 3, 4), (8192, 8192, 16384, 16, 16, True, False, True): (4, 64, 4, 4), (8192, 8192, 16384, 32, 32, False, True, True): (4, 64, 4, 8), (8192, 8192, 16384, 32, 32, True, False, True): (4, 64, 4, 8), (8192, 8192, 16384, 64, 64, False, True, True): (4, 64, 2, 4), (8192, 8192, 16384, 64, 64, True, False, True): (4, 64, 3, 8), (8192, 8192, 16384, 128, 128, False, True, True): (6, 128, 1, 4), (8192, 8192, 16384, 128, 128, True, False, True): (4, 128, 1, 4), (8192, 8192, 32768, 16, 16, False, True, True): (3, 128, 4, 4), (8192, 8192, 32768, 16, 16, True, False, True): (3, 128, 4, 4), (8192, 8192, 32768, 32, 32, False, True, True): (2, 128, 4, 8), (8192, 8192, 32768, 32, 32, True, False, True): (2, 128, 4, 8), (8192, 8192, 32768, 64, 64, False, True, True): (2, 128, 2, 4), (8192, 8192, 32768, 64, 64, True, False, True): (2, 128, 3, 8), (8192, 8192, 32768, 128, 128, False, True, True): (6, 256, 1, 4), (8192, 8192, 32768, 128, 128, True, False, True): (4, 256, 1, 4), (8192, 8192, 50432, 16, 16, False, True, True): (1, 197, 1, 1), (8192, 8192, 50432, 16, 16, True, False, True): (3, 197, 4, 1), (8192, 8192, 50432, 32, 32, False, True, True): (2, 197, 1, 4), (8192, 8192, 50432, 32, 32, True, False, True): (2, 197, 3, 4), (8192, 8192, 50432, 64, 64, False, True, True): (2, 394, 3, 4), (8192, 8192, 65536, 16, 16, False, True, True): (3, 256, 4, 4), (8192, 8192, 65536, 16, 16, True, False, True): (4, 256, 4, 4), (8192, 8192, 65536, 32, 32, False, True, True): (2, 256, 4, 8), (8192, 8192, 65536, 32, 32, True, False, True): (2, 256, 3, 8), (8192, 8192, 65536, 64, 64, False, True, True): (2, 256, 2, 4), (8192, 8192, 65536, 64, 64, True, False, True): (4, 256, 3, 8), (8192, 8192, 65536, 128, 128, False, True, True): (6, 512, 1, 4), (8192, 8192, 65536, 128, 128, True, False, True): (4, 512, 1, 4), (8192, 8192, 65792, 16, 16, False, True, True): (1, 257, 1, 1), (8192, 8192, 65792, 16, 16, True, False, True): (3, 257, 4, 1), (8192, 8192, 65792, 32, 32, False, True, True): (2, 257, 1, 4), (8192, 8192, 65792, 32, 32, True, False, True): (1, 257, 3, 4), (8192, 8192, 65792, 64, 64, False, True, True): (2, 514, 3, 4), (8192, 8192, 65792, 64, 64, True, False, True): (1, 257, 3, 4), (8192, 8192, 65792, 128, 128, False, True, True): (2, 514, 1, 4), (8192, 8192, 65792, 128, 128, True, False, True): (2, 514, 3, 8), (8192, 8192, 131072, 16, 16, False, True, True): (4, 512, 4, 4), (8192, 8192, 131072, 16, 16, True, False, True): (3, 512, 4, 4), (8192, 8192, 131072, 32, 32, False, True, True): (2, 512, 4, 8), (8192, 8192, 131072, 32, 32, True, False, True): (2, 512, 4, 8), (8192, 8192, 131072, 64, 64, False, True, True): (2, 512, 2, 4), (8192, 8192, 131072, 64, 64, True, False, True): (2, 512, 2, 4), (8192, 8192, 131072, 128, 128, False, True, True): (4, 1024, 1, 4), (8192, 8192, 131072, 128, 128, True, False, True): (4, 1024, 1, 4), (12288, 12288, 256, 16, 16, False, True, True): (4, 2, 1, 4), (12288, 12288, 256, 16, 16, True, False, True): (1, 1, 3, 1), (12288, 12288, 256, 32, 32, False, True, True): (4, 4, 1, 4), (12288, 12288, 256, 32, 32, True, False, True): (2, 1, 3, 2), (12288, 12288, 256, 64, 64, False, True, True): (4, 2, 3, 4), (12288, 12288, 256, 64, 64, True, False, True): (3, 1, 3, 4), (12288, 12288, 256, 128, 128, False, True, True): (6, 2, 1, 4), (12288, 12288, 256, 128, 128, True, False, True): (4, 2, 3, 8), (12288, 12288, 512, 16, 16, False, True, True): (4, 4, 1, 2), (12288, 12288, 512, 16, 16, True, False, True): (4, 4, 4, 2), (12288, 12288, 512, 32, 32, False, True, True): (4, 4, 4, 2), (12288, 12288, 512, 32, 32, True, False, True): (2, 2, 3, 8), (12288, 12288, 512, 64, 64, False, True, True): (4, 4, 3, 4), (12288, 12288, 512, 64, 64, True, False, True): (8, 2, 3, 4), (12288, 12288, 512, 128, 128, False, True, True): (4, 4, 3, 8), (12288, 12288, 512, 128, 128, True, False, True): (4, 4, 3, 8), (12288, 12288, 1024, 16, 16, False, True, True): (4, 8, 1, 2), (12288, 12288, 1024, 16, 16, True, False, True): (2, 4, 4, 4), (12288, 12288, 1024, 32, 32, False, True, True): (4, 4, 3, 4), (12288, 12288, 1024, 32, 32, True, False, True): (1, 4, 3, 4), (12288, 12288, 1024, 64, 64, False, True, True): (4, 8, 3, 4), (12288, 12288, 1024, 64, 64, True, False, True): (2, 4, 3, 4), (12288, 12288, 1024, 128, 128, False, True, True): (4, 8, 3, 8), (12288, 12288, 1024, 128, 128, True, False, True): (4, 8, 3, 8), (12288, 12288, 2048, 16, 16, False, True, True): (2, 4, 1, 4), (12288, 12288, 2048, 16, 16, True, False, True): (2, 8, 4, 4), (12288, 12288, 2048, 32, 32, False, True, True): (4, 8, 1, 2), (12288, 12288, 2048, 32, 32, True, False, True): (2, 8, 4, 8), (12288, 12288, 2048, 64, 64, False, True, True): (4, 16, 3, 4), (12288, 12288, 2048, 64, 64, True, False, True): (2, 8, 3, 4), (12288, 12288, 2048, 128, 128, False, True, True): (4, 16, 3, 8), (12288, 12288, 2048, 128, 128, True, False, True): (4, 16, 3, 8), (12288, 12288, 4096, 16, 16, False, True, True): (2, 8, 1, 4), (12288, 12288, 4096, 16, 16, True, False, True): (2, 16, 4, 4), (12288, 12288, 4096, 32, 32, False, True, True): (2, 16, 1, 2), (12288, 12288, 4096, 32, 32, True, False, True): (2, 16, 3, 4), (12288, 12288, 4096, 64, 64, False, True, True): (4, 32, 3, 4), (12288, 12288, 4096, 64, 64, True, False, True): (2, 16, 3, 4), (12288, 12288, 4096, 128, 128, False, True, True): (4, 32, 1, 4), (12288, 12288, 4096, 128, 128, True, False, True): (4, 32, 3, 8), (12288, 12288, 8192, 16, 16, False, True, True): (2, 32, 1, 1), (12288, 12288, 8192, 16, 16, True, False, True): (4, 64, 4, 2), (12288, 12288, 8192, 32, 32, False, True, True): (2, 32, 1, 2), (12288, 12288, 8192, 32, 32, True, False, True): (2, 32, 3, 2), (12288, 12288, 8192, 64, 64, False, True, True): (4, 64, 3, 4), (12288, 12288, 8192, 64, 64, True, False, True): (2, 32, 3, 4), (12288, 12288, 8192, 128, 128, False, True, True): (4, 64, 3, 8), (12288, 12288, 8192, 128, 128, True, False, True): (2, 64, 3, 8), (12288, 12288, 16384, 16, 16, False, True, True): (4, 128, 1, 2), (12288, 12288, 16384, 16, 16, True, False, True): (4, 128, 4, 2), (12288, 12288, 16384, 32, 32, False, True, True): (2, 64, 1, 2), (12288, 12288, 16384, 32, 32, True, False, True): (2, 64, 3, 4), (12288, 12288, 16384, 64, 64, False, True, True): (4, 128, 3, 4), (12288, 12288, 16384, 64, 64, True, False, True): (2, 64, 3, 4), (12288, 12288, 16384, 128, 128, False, True, True): (4, 128, 1, 4), (12288, 12288, 16384, 128, 128, True, False, True): (4, 128, 3, 8), (12288, 12288, 32768, 16, 16, False, True, True): (2, 128, 1, 1), (12288, 12288, 32768, 16, 16, True, False, True): (3, 128, 4, 1), (12288, 12288, 32768, 32, 32, False, True, True): (2, 128, 1, 2), (12288, 12288, 32768, 32, 32, True, False, True): (2, 128, 3, 2), (12288, 12288, 32768, 64, 64, False, True, True): (4, 256, 3, 4), (12288, 12288, 32768, 64, 64, True, False, True): (1, 64, 3, 8), (12288, 12288, 32768, 128, 128, False, True, True): (4, 256, 3, 8), (12288, 12288, 32768, 128, 128, True, False, True): (4, 256, 3, 8), (12288, 12288, 65536, 16, 16, False, True, True): (4, 512, 1, 2), (12288, 12288, 65536, 16, 16, True, False, True): (3, 256, 4, 1), (12288, 12288, 65536, 32, 32, False, True, True): (2, 256, 1, 2), (12288, 12288, 65536, 32, 32, True, False, True): (2, 256, 3, 2), (12288, 12288, 65536, 64, 64, False, True, True): (4, 512, 3, 4), (12288, 12288, 65536, 64, 64, True, False, True): (2, 256, 3, 4), (12288, 12288, 65536, 128, 128, False, True, True): (4, 512, 1, 4), (12288, 12288, 65536, 128, 128, True, False, True): (4, 512, 3, 8), (12288, 12288, 131072, 16, 16, False, True, True): (2, 512, 1, 1), (12288, 12288, 131072, 16, 16, True, False, True): (2, 512, 4, 4), (12288, 12288, 131072, 32, 32, False, True, True): (2, 512, 1, 2), (12288, 12288, 131072, 32, 32, True, False, True): (2, 512, 3, 4), (12288, 12288, 131072, 64, 64, False, True, True): (4, 1024, 3, 4), (12288, 12288, 131072, 64, 64, True, False, True): (2, 512, 3, 4), (12288, 12288, 131072, 128, 128, False, True, True): (4, 1024, 3, 8), (12288, 12288, 131072, 128, 128, True, False, True): (4, 1024, 3, 8), (16384, 16384, 256, 16, 16, False, True, True): (2, 2, 3, 2), (16384, 16384, 256, 16, 16, True, False, True): (2, 2, 6, 4), (16384, 16384, 256, 32, 32, False, True, True): (4, 2, 3, 4), (16384, 16384, 256, 32, 32, True, False, True): (4, 2, 3, 2), (16384, 16384, 256, 64, 64, False, True, True): (2, 2, 5, 4), (16384, 16384, 256, 64, 64, True, False, True): (2, 2, 3, 8), (16384, 16384, 256, 128, 128, False, True, True): (4, 2, 2, 8), (16384, 16384, 256, 128, 128, True, False, True): (2, 2, 1, 4), (16384, 16384, 512, 16, 16, False, True, True): (1, 2, 4, 4), (16384, 16384, 512, 16, 16, True, False, True): (1, 2, 4, 4), (16384, 16384, 512, 32, 32, False, True, True): (2, 2, 3, 8), (16384, 16384, 512, 32, 32, True, False, True): (2, 2, 4, 8), (16384, 16384, 512, 64, 64, False, True, True): (4, 4, 3, 4), (16384, 16384, 512, 64, 64, True, False, True): (2, 4, 3, 4), (16384, 16384, 512, 128, 128, False, True, True): (4, 4, 2, 8), (16384, 16384, 512, 128, 128, True, False, True): (4, 4, 2, 8), (16384, 16384, 1024, 16, 16, False, True, True): (4, 8, 4, 4), (16384, 16384, 1024, 16, 16, True, False, True): (2, 4, 4, 4), (16384, 16384, 1024, 32, 32, False, True, True): (2, 4, 4, 8), (16384, 16384, 1024, 32, 32, True, False, True): (2, 4, 4, 8), (16384, 16384, 1024, 64, 64, False, True, True): (4, 4, 2, 4), (16384, 16384, 1024, 64, 64, True, False, True): (2, 4, 2, 4), (16384, 16384, 1024, 128, 128, False, True, True): (6, 8, 1, 4), (16384, 16384, 1024, 128, 128, True, False, True): (4, 8, 1, 4), (16384, 16384, 2048, 16, 16, False, True, True): (2, 8, 4, 4), (16384, 16384, 2048, 16, 16, True, False, True): (2, 8, 4, 4), (16384, 16384, 2048, 32, 32, False, True, True): (2, 8, 4, 8), (16384, 16384, 2048, 32, 32, True, False, True): (2, 8, 4, 8), (16384, 16384, 2048, 64, 64, False, True, True): (2, 8, 2, 4), (16384, 16384, 2048, 64, 64, True, False, True): (2, 8, 2, 4), (16384, 16384, 2048, 128, 128, False, True, True): (4, 16, 2, 8), (16384, 16384, 2048, 128, 128, True, False, True): (4, 16, 1, 4), (16384, 16384, 4096, 16, 16, False, True, True): (2, 16, 4, 4), (16384, 16384, 4096, 16, 16, True, False, True): (2, 16, 4, 4), (16384, 16384, 4096, 32, 32, False, True, True): (1, 16, 4, 8), (16384, 16384, 4096, 32, 32, True, False, True): (2, 16, 3, 4), (16384, 16384, 4096, 64, 64, False, True, True): (1, 16, 2, 4), (16384, 16384, 4096, 64, 64, True, False, True): (2, 16, 2, 4), (16384, 16384, 4096, 128, 128, False, True, True): (4, 32, 2, 8), (16384, 16384, 4096, 128, 128, True, False, True): (4, 32, 1, 4), (16384, 16384, 8192, 16, 16, False, True, True): (2, 64, 4, 2), (16384, 16384, 8192, 16, 16, True, False, True): (2, 64, 4, 2), (16384, 16384, 8192, 32, 32, False, True, True): (2, 32, 4, 8), (16384, 16384, 8192, 32, 32, True, False, True): (2, 32, 4, 8), (16384, 16384, 8192, 64, 64, False, True, True): (2, 32, 2, 4), (16384, 16384, 8192, 64, 64, True, False, True): (2, 32, 4, 8), (16384, 16384, 8192, 128, 128, False, True, True): (4, 64, 2, 8), (16384, 16384, 8192, 128, 128, True, False, True): (4, 64, 1, 4), (16384, 16384, 16384, 16, 16, False, True, True): (1, 64, 4, 4), (16384, 16384, 16384, 16, 16, True, False, True): (1, 64, 4, 4), (16384, 16384, 16384, 32, 32, False, True, True): (1, 64, 4, 8), (16384, 16384, 16384, 32, 32, True, False, True): (1, 64, 4, 8), (16384, 16384, 16384, 64, 64, False, True, True): (1, 64, 2, 4), (16384, 16384, 16384, 64, 64, True, False, True): (1, 64, 3, 8), (16384, 16384, 16384, 128, 128, False, True, True): (4, 128, 1, 4), (16384, 16384, 16384, 128, 128, True, False, True): (4, 128, 1, 4), (16384, 16384, 32768, 16, 16, False, True, True): (1, 128, 4, 4), (16384, 16384, 32768, 16, 16, True, False, True): (1, 128, 4, 4), (16384, 16384, 32768, 32, 32, False, True, True): (1, 128, 3, 4), (16384, 16384, 32768, 32, 32, True, False, True): (1, 128, 3, 8), (16384, 16384, 32768, 64, 64, False, True, True): (2, 128, 2, 4), (16384, 16384, 32768, 64, 64, True, False, True): (1, 128, 4, 8), (16384, 16384, 32768, 128, 128, False, True, True): (4, 256, 2, 8), (16384, 16384, 32768, 128, 128, True, False, True): (4, 256, 1, 4), (16384, 16384, 65536, 16, 16, False, True, True): (1, 256, 3, 4), (16384, 16384, 65536, 16, 16, True, False, True): (1, 256, 4, 4), (16384, 16384, 65536, 32, 32, False, True, True): (1, 256, 4, 8), (16384, 16384, 65536, 32, 32, True, False, True): (1, 256, 3, 4), (16384, 16384, 65536, 64, 64, False, True, True): (2, 256, 2, 4), (16384, 16384, 65536, 64, 64, True, False, True): (1, 256, 3, 8), (16384, 16384, 65536, 128, 128, False, True, True): (4, 512, 2, 8), (16384, 16384, 65536, 128, 128, True, False, True): (4, 512, 1, 4), (16384, 16384, 65792, 16, 16, False, True, True): (1, 257, 1, 1), (16384, 16384, 65792, 16, 16, True, False, True): (1, 257, 4, 1), (16384, 16384, 65792, 32, 32, False, True, True): (1, 257, 1, 4), (16384, 16384, 65792, 32, 32, True, False, True): (1, 257, 3, 4), (16384, 16384, 65792, 64, 64, False, True, True): (2, 514, 3, 4), (16384, 16384, 65792, 64, 64, True, False, True): (1, 257, 3, 4), (16384, 16384, 65792, 128, 128, False, True, True): (2, 514, 3, 8), (16384, 16384, 65792, 128, 128, True, False, True): (2, 514, 3, 8), (16384, 16384, 131072, 16, 16, False, True, True): (1, 512, 4, 4), (16384, 16384, 131072, 16, 16, True, False, True): (1, 512, 3, 2), (16384, 16384, 131072, 32, 32, False, True, True): (1, 512, 4, 8), (16384, 16384, 131072, 32, 32, True, False, True): (1, 512, 3, 2), (16384, 16384, 131072, 64, 64, False, True, True): (1, 512, 2, 4), (16384, 16384, 131072, 64, 64, True, False, True): (1, 512, 2, 4), (16384, 16384, 131072, 128, 128, False, True, True): (4, 1024, 1, 4), (16384, 16384, 131072, 128, 128, True, False, True): (4, 1024, 1, 4), (24576, 24576, 256, 16, 16, False, True, True): (6, 2, 1, 2), (24576, 24576, 256, 16, 16, True, False, True): (2, 2, 5, 4), (24576, 24576, 256, 32, 32, False, True, True): (4, 4, 1, 4), (24576, 24576, 256, 32, 32, True, False, True): (2, 2, 4, 2), (24576, 24576, 256, 64, 64, False, True, True): (2, 2, 3, 4), (24576, 24576, 256, 64, 64, True, False, True): (1, 1, 3, 4), (24576, 24576, 256, 128, 128, False, True, True): (6, 2, 1, 4), (24576, 24576, 256, 128, 128, True, False, True): (2, 2, 3, 8), (24576, 24576, 512, 16, 16, False, True, True): (4, 4, 1, 2), (24576, 24576, 512, 16, 16, True, False, True): (2, 2, 4, 4), (24576, 24576, 512, 32, 32, False, True, True): (1, 2, 3, 4), (24576, 24576, 512, 32, 32, True, False, True): (1, 2, 3, 4), (24576, 24576, 512, 64, 64, False, True, True): (4, 4, 3, 4), (24576, 24576, 512, 64, 64, True, False, True): (1, 2, 3, 4), (24576, 24576, 512, 128, 128, False, True, True): (4, 4, 3, 8), (24576, 24576, 512, 128, 128, True, False, True): (4, 4, 3, 8), (24576, 24576, 1024, 16, 16, False, True, True): (2, 8, 1, 2), (24576, 24576, 1024, 16, 16, True, False, True): (2, 4, 4, 4), (24576, 24576, 1024, 32, 32, False, True, True): (2, 4, 1, 2), (24576, 24576, 1024, 32, 32, True, False, True): (1, 4, 3, 4), (24576, 24576, 1024, 64, 64, False, True, True): (4, 8, 3, 4), (24576, 24576, 1024, 64, 64, True, False, True): (1, 4, 3, 4), (24576, 24576, 1024, 128, 128, False, True, True): (4, 8, 3, 8), (24576, 24576, 1024, 128, 128, True, False, True): (4, 8, 3, 8), (24576, 24576, 2048, 16, 16, False, True, True): (1, 4, 1, 4), (24576, 24576, 2048, 16, 16, True, False, True): (1, 8, 4, 4), (24576, 24576, 2048, 32, 32, False, True, True): (2, 8, 1, 2), (24576, 24576, 2048, 32, 32, True, False, True): (1, 8, 3, 4), (24576, 24576, 2048, 64, 64, False, True, True): (4, 16, 3, 4), (24576, 24576, 2048, 64, 64, True, False, True): (1, 4, 3, 8), (24576, 24576, 2048, 128, 128, False, True, True): (4, 16, 3, 8), (24576, 24576, 2048, 128, 128, True, False, True): (2, 16, 3, 8), (24576, 24576, 4096, 16, 16, False, True, True): (2, 32, 1, 2), (24576, 24576, 4096, 16, 16, True, False, True): (1, 16, 4, 4), (24576, 24576, 4096, 32, 32, False, True, True): (1, 16, 1, 2), (24576, 24576, 4096, 32, 32, True, False, True): (1, 16, 3, 4), (24576, 24576, 4096, 64, 64, False, True, True): (4, 32, 3, 4), (24576, 24576, 4096, 64, 64, True, False, True): (1, 8, 3, 8), (24576, 24576, 4096, 128, 128, False, True, True): (4, 32, 3, 8), (24576, 24576, 4096, 128, 128, True, False, True): (2, 32, 3, 8), (24576, 24576, 8192, 16, 16, False, True, True): (1, 32, 1, 1), (24576, 24576, 8192, 16, 16, True, False, True): (2, 64, 4, 2), (24576, 24576, 8192, 32, 32, False, True, True): (1, 32, 1, 2), (24576, 24576, 8192, 32, 32, True, False, True): (1, 32, 3, 4), (24576, 24576, 8192, 64, 64, False, True, True): (4, 64, 3, 4), (24576, 24576, 8192, 64, 64, True, False, True): (1, 32, 3, 4), (24576, 24576, 8192, 128, 128, False, True, True): (4, 64, 3, 8), (24576, 24576, 8192, 128, 128, True, False, True): (4, 64, 3, 8), (24576, 24576, 16384, 16, 16, False, True, True): (2, 128, 1, 2), (24576, 24576, 16384, 16, 16, True, False, True): (1, 64, 4, 4), (24576, 24576, 16384, 32, 32, False, True, True): (1, 64, 1, 2), (24576, 24576, 16384, 32, 32, True, False, True): (1, 64, 3, 2), (24576, 24576, 16384, 64, 64, False, True, True): (2, 128, 3, 4), (24576, 24576, 16384, 64, 64, True, False, True): (1, 32, 3, 8), (24576, 24576, 16384, 128, 128, False, True, True): (4, 128, 3, 8), (24576, 24576, 16384, 128, 128, True, False, True): (4, 128, 3, 8), (24576, 24576, 32768, 16, 16, False, True, True): (1, 128, 1, 1), (24576, 24576, 32768, 16, 16, True, False, True): (1, 128, 4, 4), (24576, 24576, 32768, 32, 32, False, True, True): (1, 128, 1, 2), (24576, 24576, 32768, 32, 32, True, False, True): (1, 128, 3, 4), (24576, 24576, 32768, 64, 64, False, True, True): (2, 256, 3, 4), (24576, 24576, 32768, 64, 64, True, False, True): (1, 128, 3, 4), (24576, 24576, 32768, 128, 128, False, True, True): (4, 256, 3, 8), (24576, 24576, 32768, 128, 128, True, False, True): (2, 256, 3, 8), (24576, 24576, 65536, 16, 16, False, True, True): (2, 512, 1, 2), (24576, 24576, 65536, 16, 16, True, False, True): (1, 256, 4, 4), (32768, 32768, 256, 16, 16, False, True, True): (4, 2, 1, 2), (32768, 32768, 256, 16, 16, True, False, True): (2, 2, 5, 4), (32768, 32768, 256, 32, 32, False, True, True): (4, 2, 4, 2), (32768, 32768, 256, 32, 32, True, False, True): (1, 1, 4, 8), (32768, 32768, 256, 64, 64, False, True, True): (2, 2, 3, 4), (32768, 32768, 256, 64, 64, True, False, True): (1, 1, 3, 8), (32768, 32768, 256, 128, 128, False, True, True): (2, 2, 3, 8), (32768, 32768, 256, 128, 128, True, False, True): (2, 2, 3, 8), (32768, 32768, 512, 16, 16, False, True, True): (2, 2, 1, 4), (32768, 32768, 512, 16, 16, True, False, True): (2, 2, 4, 2), (32768, 32768, 512, 32, 32, False, True, True): (1, 2, 3, 4), (32768, 32768, 512, 32, 32, True, False, True): (1, 2, 4, 8), (32768, 32768, 512, 64, 64, False, True, True): (4, 4, 3, 4), (32768, 32768, 512, 64, 64, True, False, True): (1, 2, 3, 4), (32768, 32768, 512, 128, 128, False, True, True): (4, 4, 3, 8), (32768, 32768, 512, 128, 128, True, False, True): (4, 4, 3, 8), (32768, 32768, 1024, 16, 16, False, True, True): (2, 4, 1, 1), (32768, 32768, 1024, 16, 16, True, False, True): (1, 4, 4, 2), (32768, 32768, 1024, 32, 32, False, True, True): (2, 4, 1, 4), (32768, 32768, 1024, 32, 32, True, False, True): (1, 4, 3, 4), (32768, 32768, 1024, 64, 64, False, True, True): (4, 8, 3, 4), (32768, 32768, 1024, 64, 64, True, False, True): (1, 4, 3, 4), (32768, 32768, 1024, 128, 128, False, True, True): (4, 8, 3, 8), (32768, 32768, 1024, 128, 128, True, False, True): (4, 8, 3, 8), (32768, 32768, 2048, 16, 16, False, True, True): (1, 8, 1, 4), (32768, 32768, 2048, 16, 16, True, False, True): (1, 8, 4, 4), (32768, 32768, 2048, 32, 32, False, True, True): (2, 8, 1, 4), (32768, 32768, 2048, 32, 32, True, False, True): (1, 8, 3, 4), (32768, 32768, 2048, 64, 64, False, True, True): (4, 16, 3, 4), (32768, 32768, 2048, 64, 64, True, False, True): (1, 8, 3, 4), (32768, 32768, 2048, 128, 128, False, True, True): (4, 16, 3, 8), (32768, 32768, 2048, 128, 128, True, False, True): (2, 16, 3, 8), (32768, 32768, 4096, 16, 16, False, True, True): (1, 16, 1, 4), (32768, 32768, 4096, 16, 16, True, False, True): (1, 16, 4, 4), (32768, 32768, 4096, 32, 32, False, True, True): (2, 16, 1, 4), (32768, 32768, 4096, 32, 32, True, False, True): (1, 16, 3, 4), (32768, 32768, 4096, 64, 64, False, True, True): (2, 32, 3, 4), (32768, 32768, 4096, 64, 64, True, False, True): (1, 16, 3, 4), (32768, 32768, 4096, 128, 128, False, True, True): (4, 32, 3, 8), (32768, 32768, 4096, 128, 128, True, False, True): (4, 32, 3, 8), (32768, 32768, 8192, 16, 16, False, True, True): (1, 32, 1, 4), (32768, 32768, 8192, 16, 16, True, False, True): (2, 64, 4, 1), (32768, 32768, 8192, 32, 32, False, True, True): (2, 32, 1, 4), (32768, 32768, 8192, 32, 32, True, False, True): (1, 32, 3, 4), (32768, 32768, 8192, 64, 64, False, True, True): (2, 64, 3, 4), (32768, 32768, 8192, 64, 64, True, False, True): (1, 32, 3, 4), (32768, 32768, 8192, 128, 128, False, True, True): (4, 64, 3, 8), (32768, 32768, 8192, 128, 128, True, False, True): (2, 64, 3, 8), (32768, 32768, 16384, 16, 16, False, True, True): (1, 64, 1, 4), (32768, 32768, 16384, 16, 16, True, False, True): (1, 64, 4, 1), (32768, 32768, 16384, 32, 32, False, True, True): (2, 64, 1, 4), (32768, 32768, 16384, 32, 32, True, False, True): (1, 64, 3, 4), (32768, 32768, 16384, 64, 64, False, True, True): (2, 128, 3, 4), (32768, 32768, 16384, 64, 64, True, False, True): (1, 64, 3, 4), (32768, 32768, 16384, 128, 128, False, True, True): (4, 128, 3, 8), (32768, 32768, 16384, 128, 128, True, False, True): (2, 128, 3, 8), (32768, 32768, 32768, 16, 16, False, True, True): (1, 128, 1, 4), (32768, 32768, 32768, 16, 16, True, False, True): (1, 128, 4, 1), (32768, 32768, 32768, 32, 32, False, True, True): (2, 128, 1, 4), (32768, 32768, 32768, 32, 32, True, False, True): (1, 128, 3, 4), (32768, 32768, 32768, 64, 64, False, True, True): (2, 256, 3, 4), (32768, 32768, 32768, 64, 64, True, False, True): (1, 128, 3, 4), (32768, 32768, 32768, 128, 128, False, True, True): (2, 256, 3, 8), (32768, 32768, 32768, 128, 128, True, False, True): (4, 256, 3, 8), (32768, 32768, 65536, 16, 16, False, True, True): (1, 256, 1, 4), (32768, 32768, 65536, 16, 16, True, False, True): (1, 256, 4, 1), (32768, 32768, 65536, 32, 32, False, True, True): (1, 256, 3, 4), (32768, 32768, 65536, 32, 32, True, False, True): (1, 256, 3, 4), (32768, 32768, 65536, 64, 64, False, True, True): (1, 512, 3, 4), (32768, 32768, 65536, 64, 64, True, False, True): (1, 256, 3, 4), (32768, 32768, 65536, 128, 128, False, True, True): (4, 512, 1, 4), (32768, 32768, 65536, 128, 128, True, False, True): (2, 512, 3, 8), }, ("bsr_dense_addmm", "NVIDIA A100-SXM4-80GB", (0, torch.float16, 0.56)): { (192, 192, 256, 64, 64, False, True, True): (1, 4, 3, 4), (192, 192, 256, 64, 64, True, False, True): (1, 4, 3, 4), (192, 192, 512, 64, 64, False, True, True): (1, 8, 5, 4), (192, 192, 512, 64, 64, True, False, True): (1, 8, 3, 4), (192, 192, 1024, 64, 64, False, True, True): (1, 16, 3, 2), (192, 192, 1024, 64, 64, True, False, True): (1, 16, 3, 4), (192, 192, 2048, 64, 64, False, True, True): (1, 32, 5, 4), (192, 192, 2048, 64, 64, True, False, True): (4, 32, 5, 4), (192, 192, 4096, 64, 64, False, True, True): (1, 64, 1, 8), (192, 192, 4096, 64, 64, True, False, True): (1, 32, 3, 4), (192, 192, 8192, 64, 64, False, True, True): (4, 128, 1, 4), (192, 192, 8192, 64, 64, True, False, True): (3, 64, 3, 4), (192, 192, 16384, 64, 64, False, True, True): (1, 256, 1, 4), (192, 192, 16384, 64, 64, True, False, True): (3, 64, 2, 4), (192, 192, 32768, 64, 64, False, True, True): (1, 512, 1, 2), (192, 192, 32768, 64, 64, True, False, True): (2, 256, 2, 4), (192, 192, 65536, 64, 64, False, True, True): (1, 512, 1, 4), (192, 192, 65536, 64, 64, True, False, True): (2, 512, 2, 4), (192, 192, 131072, 64, 64, False, True, True): (1, 1024, 1, 4), (192, 192, 131072, 64, 64, True, False, True): (1, 512, 3, 4), (384, 384, 256, 128, 128, False, True, True): (3, 2, 3, 8), (384, 384, 256, 128, 128, True, False, True): (5, 2, 3, 8), (384, 384, 512, 128, 128, False, True, True): (4, 4, 3, 8), (384, 384, 512, 128, 128, True, False, True): (1, 4, 3, 8), (384, 384, 1024, 128, 128, False, True, True): (1, 8, 3, 8), (384, 384, 1024, 128, 128, True, False, True): (1, 8, 2, 8), (384, 384, 2048, 128, 128, False, True, True): (3, 16, 3, 8), (384, 384, 2048, 128, 128, True, False, True): (1, 16, 3, 8), (384, 384, 4096, 128, 128, False, True, True): (3, 32, 3, 8), (384, 384, 4096, 128, 128, True, False, True): (3, 32, 3, 8), (384, 384, 8192, 128, 128, False, True, True): (2, 64, 3, 8), (384, 384, 8192, 128, 128, True, False, True): (2, 64, 2, 4), (384, 384, 16384, 128, 128, False, True, True): (1, 128, 2, 8), (384, 384, 16384, 128, 128, True, False, True): (3, 128, 2, 4), (384, 384, 32768, 128, 128, False, True, True): (2, 256, 3, 8), (384, 384, 32768, 128, 128, True, False, True): (1, 256, 2, 4), (384, 384, 65536, 128, 128, False, True, True): (7, 512, 1, 4), (384, 384, 65536, 128, 128, True, False, True): (3, 512, 2, 4), (384, 384, 131072, 128, 128, False, True, True): (5, 1024, 1, 4), (384, 384, 131072, 128, 128, True, False, True): (1, 1024, 2, 4), }, ("bsr_dense_addmm", "NVIDIA A100-SXM4-80GB", (0, torch.float32, 0.5)): { (16, 16, 16, 16, 16, False, False, False): (2, 1, 1, 16), (16, 16, 16, 16, 16, False, False, True): (1, 1, 2, 4), (16, 16, 16, 16, 16, False, True, False): (1, 1, 2, 16), (16, 16, 16, 16, 16, False, True, True): (2, 1, 2, 8), (16, 16, 16, 16, 16, True, False, False): (1, 1, 1, 2), (16, 16, 16, 16, 16, True, False, True): (2, 1, 1, 4), (16, 16, 32, 16, 16, False, False, False): (1, 1, 1, 2), (16, 16, 32, 16, 16, False, False, True): (1, 1, 2, 8), (16, 16, 32, 16, 16, False, True, False): (1, 2, 1, 4), (16, 16, 32, 16, 16, False, True, True): (1, 2, 2, 4), (16, 16, 32, 16, 16, True, False, False): (1, 1, 2, 4), (16, 16, 32, 16, 16, True, False, True): (1, 2, 2, 4), (16, 16, 64, 16, 16, False, False, False): (1, 4, 1, 4), (16, 16, 64, 16, 16, False, False, True): (2, 2, 1, 4), (16, 16, 64, 16, 16, False, True, False): (1, 4, 1, 4), (16, 16, 64, 16, 16, False, True, True): (1, 4, 1, 8), (16, 16, 64, 16, 16, True, False, False): (1, 2, 1, 4), (16, 16, 64, 16, 16, True, False, True): (1, 4, 2, 8), (16, 32, 16, 16, 16, False, False, False): (1, 1, 2, 8), (16, 32, 16, 16, 16, False, False, True): (2, 1, 1, 4), (16, 32, 16, 16, 16, False, True, False): (1, 1, 1, 4), (16, 32, 16, 16, 16, False, True, True): (1, 1, 1, 4), (16, 32, 16, 16, 16, True, False, False): (1, 1, 1, 4), (16, 32, 16, 16, 16, True, False, True): (1, 1, 2, 8), (16, 32, 16, 16, 32, False, False, False): (1, 1, 2, 4), (16, 32, 16, 16, 32, False, False, True): (2, 1, 2, 2), (16, 32, 16, 16, 32, False, True, False): (1, 1, 1, 8), (16, 32, 16, 16, 32, False, True, True): (1, 1, 1, 2), (16, 32, 16, 16, 32, True, False, False): (3, 1, 1, 4), (16, 32, 16, 16, 32, True, False, True): (1, 1, 1, 4), (16, 32, 32, 16, 16, False, False, False): (1, 2, 1, 4), (16, 32, 32, 16, 16, False, False, True): (2, 2, 1, 4), (16, 32, 32, 16, 16, False, True, False): (1, 2, 1, 2), (16, 32, 32, 16, 16, False, True, True): (1, 2, 1, 4), (16, 32, 32, 16, 16, True, False, False): (1, 2, 1, 4), (16, 32, 32, 16, 16, True, False, True): (1, 2, 1, 4), (16, 32, 32, 16, 32, False, False, False): (1, 1, 2, 4), (16, 32, 32, 16, 32, False, False, True): (1, 2, 1, 4), (16, 32, 32, 16, 32, False, True, False): (1, 2, 2, 8), (16, 32, 32, 16, 32, False, True, True): (1, 2, 1, 1), (16, 32, 32, 16, 32, True, False, False): (1, 2, 1, 2), (16, 32, 32, 16, 32, True, False, True): (1, 2, 1, 4), (16, 32, 64, 16, 16, False, False, False): (1, 2, 1, 4), (16, 32, 64, 16, 16, False, False, True): (2, 4, 1, 4), (16, 32, 64, 16, 16, False, True, False): (1, 4, 2, 4), (16, 32, 64, 16, 16, False, True, True): (1, 4, 1, 4), (16, 32, 64, 16, 16, True, False, False): (1, 2, 2, 8), (16, 32, 64, 16, 16, True, False, True): (1, 4, 1, 2), (16, 32, 64, 16, 32, False, False, False): (1, 4, 1, 4), (16, 32, 64, 16, 32, False, False, True): (1, 4, 3, 4), (16, 32, 64, 16, 32, False, True, False): (1, 2, 1, 4), (16, 32, 64, 16, 32, False, True, True): (1, 4, 1, 4), (16, 32, 64, 16, 32, True, False, False): (1, 2, 1, 8), (16, 32, 64, 16, 32, True, False, True): (1, 2, 1, 4), (16, 64, 16, 16, 32, False, False, False): (1, 1, 1, 2), (16, 64, 16, 16, 32, False, False, True): (1, 1, 1, 8), (16, 64, 16, 16, 32, False, True, False): (1, 1, 1, 8), (16, 64, 16, 16, 32, False, True, True): (1, 1, 1, 4), (16, 64, 16, 16, 32, True, False, False): (1, 1, 1, 8), (16, 64, 16, 16, 32, True, False, True): (1, 1, 1, 4), (16, 64, 32, 16, 32, False, False, False): (1, 2, 1, 4), (16, 64, 32, 16, 32, False, False, True): (1, 1, 1, 4), (16, 64, 32, 16, 32, False, True, False): (1, 2, 1, 1), (16, 64, 32, 16, 32, False, True, True): (1, 2, 1, 8), (16, 64, 32, 16, 32, True, False, False): (2, 2, 1, 4), (16, 64, 32, 16, 32, True, False, True): (2, 2, 1, 4), (16, 64, 64, 16, 32, False, False, False): (1, 2, 1, 4), (16, 64, 64, 16, 32, False, False, True): (1, 4, 1, 4), (16, 64, 64, 16, 32, False, True, False): (1, 4, 1, 4), (16, 64, 64, 16, 32, False, True, True): (1, 4, 1, 4), (16, 64, 64, 16, 32, True, False, False): (1, 4, 1, 2), (16, 64, 64, 16, 32, True, False, True): (3, 4, 1, 4), (32, 16, 16, 16, 16, False, False, False): (1, 1, 2, 4), (32, 16, 16, 16, 16, False, False, True): (1, 1, 1, 2), (32, 16, 16, 16, 16, False, True, False): (1, 1, 2, 4), (32, 16, 16, 16, 16, False, True, True): (1, 1, 2, 4), (32, 16, 16, 16, 16, True, False, False): (1, 1, 3, 8), (32, 16, 16, 16, 16, True, False, True): (1, 1, 2, 4), (32, 16, 32, 16, 16, False, False, False): (1, 2, 1, 4), (32, 16, 32, 16, 16, False, False, True): (1, 2, 3, 4), (32, 16, 32, 16, 16, False, True, False): (1, 1, 1, 8), (32, 16, 32, 16, 16, False, True, True): (1, 2, 1, 4), (32, 16, 32, 16, 16, True, False, False): (1, 1, 1, 2), (32, 16, 32, 16, 16, True, False, True): (1, 1, 1, 4), (32, 16, 64, 16, 16, False, False, False): (1, 4, 1, 4), (32, 16, 64, 16, 16, False, False, True): (3, 4, 1, 4), (32, 16, 64, 16, 16, False, True, False): (1, 4, 1, 1), (32, 16, 64, 16, 16, False, True, True): (1, 4, 1, 4), (32, 16, 64, 16, 16, True, False, False): (1, 4, 1, 4), (32, 16, 64, 16, 16, True, False, True): (1, 4, 1, 4), (32, 32, 16, 16, 16, False, False, False): (1, 1, 1, 2), (32, 32, 16, 16, 16, False, False, True): (2, 1, 1, 4), (32, 32, 16, 16, 16, False, True, False): (1, 1, 1, 2), (32, 32, 16, 16, 16, False, True, True): (2, 1, 1, 4), (32, 32, 16, 16, 16, True, False, False): (3, 1, 2, 4), (32, 32, 16, 16, 16, True, False, True): (1, 1, 2, 4), (32, 32, 16, 16, 32, False, False, False): (2, 1, 1, 2), (32, 32, 16, 16, 32, False, False, True): (1, 1, 1, 4), (32, 32, 16, 16, 32, False, True, False): (1, 1, 1, 4), (32, 32, 16, 16, 32, False, True, True): (1, 1, 1, 8), (32, 32, 16, 16, 32, True, False, False): (1, 1, 1, 8), (32, 32, 16, 16, 32, True, False, True): (1, 1, 1, 4), (32, 32, 16, 32, 32, False, False, False): (2, 1, 1, 4), (32, 32, 16, 32, 32, False, False, True): (1, 1, 2, 4), (32, 32, 16, 32, 32, False, True, False): (2, 1, 1, 1), (32, 32, 16, 32, 32, False, True, True): (2, 1, 2, 4), (32, 32, 16, 32, 32, True, False, False): (1, 1, 1, 8), (32, 32, 16, 32, 32, True, False, True): (1, 1, 1, 4), (32, 32, 32, 16, 16, False, False, False): (1, 1, 1, 4), (32, 32, 32, 16, 16, False, False, True): (1, 2, 1, 2), (32, 32, 32, 16, 16, False, True, False): (2, 2, 1, 4), (32, 32, 32, 16, 16, False, True, True): (1, 2, 2, 4), (32, 32, 32, 16, 16, True, False, False): (1, 2, 1, 4), (32, 32, 32, 16, 16, True, False, True): (2, 2, 1, 4), (32, 32, 32, 16, 32, False, False, False): (1, 2, 1, 4), (32, 32, 32, 16, 32, False, False, True): (1, 2, 1, 4), (32, 32, 32, 16, 32, False, True, False): (1, 2, 1, 4), (32, 32, 32, 16, 32, False, True, True): (1, 2, 1, 4), (32, 32, 32, 16, 32, True, False, False): (2, 1, 1, 2), (32, 32, 32, 16, 32, True, False, True): (2, 2, 2, 4), (32, 32, 32, 32, 32, False, False, False): (1, 1, 1, 4), (32, 32, 32, 32, 32, False, False, True): (1, 1, 1, 2), (32, 32, 32, 32, 32, False, True, False): (1, 1, 1, 4), (32, 32, 32, 32, 32, False, True, True): (1, 1, 2, 2), (32, 32, 32, 32, 32, True, False, False): (1, 1, 1, 2), (32, 32, 32, 32, 32, True, False, True): (1, 1, 2, 1), (32, 32, 64, 16, 16, False, False, False): (2, 4, 1, 4), (32, 32, 64, 16, 16, False, False, True): (1, 4, 2, 4), (32, 32, 64, 16, 16, False, True, False): (1, 4, 1, 4), (32, 32, 64, 16, 16, False, True, True): (1, 4, 1, 4), (32, 32, 64, 16, 16, True, False, False): (1, 2, 1, 4), (32, 32, 64, 16, 16, True, False, True): (2, 4, 1, 4), (32, 32, 64, 16, 32, False, False, False): (1, 4, 1, 8), (32, 32, 64, 16, 32, False, False, True): (1, 4, 1, 4), (32, 32, 64, 16, 32, False, True, False): (1, 4, 1, 4), (32, 32, 64, 16, 32, False, True, True): (2, 4, 1, 4), (32, 32, 64, 16, 32, True, False, False): (1, 2, 2, 4), (32, 32, 64, 16, 32, True, False, True): (2, 4, 1, 4), (32, 32, 64, 32, 32, False, False, False): (2, 2, 1, 4), (32, 32, 64, 32, 32, False, False, True): (1, 1, 1, 4), (32, 32, 64, 32, 32, False, True, False): (1, 1, 1, 8), (32, 32, 64, 32, 32, False, True, True): (2, 1, 1, 4), (32, 32, 64, 32, 32, True, False, False): (1, 1, 1, 4), (32, 32, 64, 32, 32, True, False, True): (1, 2, 1, 1), (32, 64, 16, 16, 32, False, False, False): (1, 1, 2, 2), (32, 64, 16, 16, 32, False, False, True): (2, 1, 1, 4), (32, 64, 16, 16, 32, False, True, False): (1, 1, 1, 8), (32, 64, 16, 16, 32, False, True, True): (1, 1, 3, 4), (32, 64, 16, 16, 32, True, False, False): (1, 1, 1, 2), (32, 64, 16, 16, 32, True, False, True): (1, 1, 2, 4), (32, 64, 16, 32, 32, False, False, False): (1, 1, 1, 2), (32, 64, 16, 32, 32, False, False, True): (1, 1, 3, 4), (32, 64, 16, 32, 32, False, True, False): (1, 1, 2, 4), (32, 64, 16, 32, 32, False, True, True): (1, 1, 1, 8), (32, 64, 16, 32, 32, True, False, False): (1, 1, 2, 4), (32, 64, 16, 32, 32, True, False, True): (1, 1, 1, 8), (32, 64, 32, 16, 32, False, False, False): (1, 2, 1, 4), (32, 64, 32, 16, 32, False, False, True): (1, 2, 3, 4), (32, 64, 32, 16, 32, False, True, False): (1, 2, 1, 8), (32, 64, 32, 16, 32, False, True, True): (3, 2, 1, 4), (32, 64, 32, 16, 32, True, False, False): (1, 1, 1, 8), (32, 64, 32, 16, 32, True, False, True): (1, 2, 1, 4), (32, 64, 32, 32, 32, False, False, False): (1, 1, 1, 1), (32, 64, 32, 32, 32, False, False, True): (1, 1, 1, 4), (32, 64, 32, 32, 32, False, True, False): (1, 1, 1, 4), (32, 64, 32, 32, 32, False, True, True): (1, 1, 1, 4), (32, 64, 32, 32, 32, True, False, False): (1, 1, 1, 4), (32, 64, 32, 32, 32, True, False, True): (1, 1, 2, 8), (32, 64, 64, 16, 32, False, False, False): (2, 4, 1, 4), (32, 64, 64, 16, 32, False, False, True): (1, 4, 1, 4), (32, 64, 64, 16, 32, False, True, False): (1, 4, 1, 4), (32, 64, 64, 16, 32, False, True, True): (2, 4, 1, 4), (32, 64, 64, 16, 32, True, False, False): (1, 4, 1, 4), (32, 64, 64, 16, 32, True, False, True): (1, 4, 1, 4), (32, 64, 64, 32, 32, False, False, False): (2, 2, 1, 4), (32, 64, 64, 32, 32, False, False, True): (1, 2, 1, 8), (32, 64, 64, 32, 32, False, True, False): (1, 2, 1, 4), (32, 64, 64, 32, 32, False, True, True): (1, 2, 1, 4), (32, 64, 64, 32, 32, True, False, False): (2, 2, 1, 4), (32, 64, 64, 32, 32, True, False, True): (1, 2, 3, 8), (64, 32, 16, 32, 32, False, False, False): (1, 1, 1, 4), (64, 32, 16, 32, 32, False, False, True): (3, 1, 2, 4), (64, 32, 16, 32, 32, False, True, False): (2, 1, 1, 2), (64, 32, 16, 32, 32, False, True, True): (1, 1, 1, 8), (64, 32, 16, 32, 32, True, False, False): (1, 1, 1, 2), (64, 32, 16, 32, 32, True, False, True): (1, 1, 1, 4), (64, 32, 32, 32, 32, False, False, False): (1, 1, 1, 4), (64, 32, 32, 32, 32, False, False, True): (1, 1, 2, 8), (64, 32, 32, 32, 32, False, True, False): (1, 1, 1, 8), (64, 32, 32, 32, 32, False, True, True): (1, 1, 1, 4), (64, 32, 32, 32, 32, True, False, False): (1, 1, 2, 4), (64, 32, 32, 32, 32, True, False, True): (1, 1, 3, 8), (64, 32, 64, 32, 32, False, False, False): (1, 2, 1, 4), (64, 32, 64, 32, 32, False, False, True): (2, 2, 1, 4), (64, 32, 64, 32, 32, False, True, False): (1, 1, 1, 4), (64, 32, 64, 32, 32, False, True, True): (1, 2, 1, 8), (64, 32, 64, 32, 32, True, False, False): (2, 2, 1, 4), (64, 32, 64, 32, 32, True, False, True): (1, 2, 1, 8), (64, 64, 16, 32, 32, False, False, False): (1, 1, 2, 8), (64, 64, 16, 32, 32, False, False, True): (2, 1, 2, 4), (64, 64, 16, 32, 32, False, True, False): (1, 1, 1, 2), (64, 64, 16, 32, 32, False, True, True): (1, 1, 2, 4), (64, 64, 16, 32, 32, True, False, False): (1, 1, 1, 2), (64, 64, 16, 32, 32, True, False, True): (1, 1, 2, 4), (64, 64, 32, 32, 32, False, False, False): (1, 1, 1, 4), (64, 64, 32, 32, 32, False, False, True): (2, 1, 1, 4), (64, 64, 32, 32, 32, False, True, False): (1, 1, 1, 8), (64, 64, 32, 32, 32, False, True, True): (2, 1, 1, 4), (64, 64, 32, 32, 32, True, False, False): (1, 1, 1, 4), (64, 64, 32, 32, 32, True, False, True): (1, 1, 1, 8), (64, 64, 64, 32, 32, False, False, False): (2, 2, 1, 4), (64, 64, 64, 32, 32, False, False, True): (1, 2, 1, 4), (64, 64, 64, 32, 32, False, True, False): (1, 2, 1, 4), (64, 64, 64, 32, 32, False, True, True): (2, 2, 1, 4), (64, 64, 64, 32, 32, True, False, False): (1, 1, 1, 8), (64, 64, 64, 32, 32, True, False, True): (1, 2, 2, 4), (192, 192, 256, 16, 16, False, True, True): (1, 16, 3, 2), (192, 192, 256, 16, 16, True, False, True): (1, 8, 5, 4), (192, 192, 256, 32, 32, False, True, True): (2, 8, 4, 4), (192, 192, 256, 32, 32, True, False, True): (1, 8, 5, 4), (192, 192, 512, 16, 16, False, True, True): (2, 16, 3, 4), (192, 192, 512, 16, 16, True, False, True): (1, 16, 5, 4), (192, 192, 512, 32, 32, False, True, True): (1, 16, 3, 4), (192, 192, 512, 32, 32, True, False, True): (2, 16, 3, 4), (192, 192, 1024, 16, 16, False, True, True): (3, 16, 3, 4), (192, 192, 1024, 16, 16, True, False, True): (2, 8, 3, 4), (192, 192, 1024, 32, 32, False, True, True): (3, 32, 1, 4), (192, 192, 1024, 32, 32, True, False, True): (3, 16, 3, 4), (192, 192, 2048, 16, 16, False, True, True): (1, 32, 3, 4), (192, 192, 2048, 16, 16, True, False, True): (2, 16, 3, 4), (192, 192, 2048, 32, 32, False, True, True): (1, 64, 1, 4), (192, 192, 2048, 32, 32, True, False, True): (1, 64, 2, 4), (192, 192, 4096, 16, 16, False, True, True): (1, 64, 2, 4), (192, 192, 4096, 16, 16, True, False, True): (1, 32, 3, 4), (192, 192, 4096, 32, 32, False, True, True): (3, 128, 2, 4), (192, 192, 4096, 32, 32, True, False, True): (1, 128, 2, 4), (192, 192, 8192, 16, 16, False, True, True): (2, 64, 3, 4), (192, 192, 8192, 16, 16, True, False, True): (1, 64, 3, 4), (192, 192, 8192, 32, 32, False, True, True): (3, 128, 3, 4), (192, 192, 8192, 32, 32, True, False, True): (1, 128, 2, 4), (192, 192, 16384, 16, 16, False, True, True): (1, 256, 3, 2), (192, 192, 16384, 16, 16, True, False, True): (1, 256, 3, 2), (192, 192, 16384, 32, 32, False, True, True): (2, 256, 3, 4), (192, 192, 16384, 32, 32, True, False, True): (2, 256, 3, 4), (192, 192, 32768, 16, 16, False, True, True): (2, 512, 3, 2), (192, 192, 32768, 16, 16, True, False, True): (1, 128, 3, 4), (192, 192, 32768, 32, 32, False, True, True): (2, 512, 3, 4), (192, 192, 32768, 32, 32, True, False, True): (2, 512, 3, 4), (192, 192, 65536, 16, 16, False, True, True): (2, 1024, 3, 2), (192, 192, 65536, 16, 16, True, False, True): (1, 256, 3, 4), (192, 192, 65536, 32, 32, False, True, True): (2, 1024, 3, 4), (192, 192, 65536, 32, 32, True, False, True): (2, 1024, 3, 4), (192, 192, 131072, 16, 16, False, True, True): (2, 512, 3, 4), (192, 192, 131072, 16, 16, True, False, True): (1, 512, 3, 4), (192, 192, 131072, 32, 32, False, True, True): (2, 1024, 3, 4), (192, 192, 131072, 32, 32, True, False, True): (2, 1024, 3, 4), (256, 256, 256, 16, 16, False, True, True): (1, 16, 3, 4), (256, 256, 256, 16, 16, True, False, True): (2, 16, 1, 4), (256, 256, 256, 32, 32, False, True, True): (1, 8, 4, 8), (256, 256, 256, 32, 32, True, False, True): (4, 8, 4, 4), (256, 256, 256, 64, 64, False, True, True): (1, 4, 4, 8), (256, 256, 256, 64, 64, True, False, True): (1, 4, 3, 8), (256, 256, 256, 128, 128, False, True, True): (7, 2, 1, 32), (256, 256, 256, 128, 128, True, False, True): (3, 2, 1, 32), (256, 256, 512, 16, 16, False, True, True): (1, 16, 5, 4), (256, 256, 512, 16, 16, True, False, True): (1, 16, 3, 2), (256, 256, 512, 32, 32, False, True, True): (4, 16, 4, 4), (256, 256, 512, 32, 32, True, False, True): (4, 16, 3, 4), (256, 256, 512, 64, 64, False, True, True): (1, 8, 3, 8), (256, 256, 512, 64, 64, True, False, True): (1, 8, 3, 8), (256, 256, 512, 128, 128, False, True, True): (1, 4, 1, 32), (256, 256, 512, 128, 128, True, False, True): (3, 4, 1, 32), (256, 256, 1024, 16, 16, False, True, True): (3, 32, 5, 2), (256, 256, 1024, 16, 16, True, False, True): (2, 32, 5, 2), (256, 256, 1024, 32, 32, False, True, True): (1, 32, 4, 4), (256, 256, 1024, 32, 32, True, False, True): (1, 32, 5, 4), (256, 256, 1024, 64, 64, False, True, True): (4, 16, 3, 8), (256, 256, 1024, 64, 64, True, False, True): (1, 16, 3, 8), (256, 256, 1024, 128, 128, False, True, True): (1, 8, 1, 32), (256, 256, 1024, 128, 128, True, False, True): (3, 8, 1, 32), (256, 256, 2048, 16, 16, False, True, True): (3, 32, 3, 4), (256, 256, 2048, 16, 16, True, False, True): (1, 64, 3, 2), (256, 256, 2048, 32, 32, False, True, True): (1, 64, 3, 4), (256, 256, 2048, 32, 32, True, False, True): (1, 64, 3, 4), (256, 256, 2048, 64, 64, False, True, True): (2, 32, 1, 8), (256, 256, 2048, 64, 64, True, False, True): (2, 32, 1, 8), (256, 256, 2048, 128, 128, False, True, True): (4, 16, 1, 32), (256, 256, 2048, 128, 128, True, False, True): (4, 16, 1, 32), (256, 256, 4096, 16, 16, False, True, True): (1, 32, 2, 4), (256, 256, 4096, 16, 16, True, False, True): (1, 32, 3, 4), (256, 256, 4096, 32, 32, False, True, True): (1, 128, 2, 4), (256, 256, 4096, 32, 32, True, False, True): (1, 128, 2, 4), (256, 256, 4096, 64, 64, False, True, True): (2, 64, 4, 8), (256, 256, 4096, 64, 64, True, False, True): (3, 64, 2, 8), (256, 256, 4096, 128, 128, False, True, True): (3, 32, 1, 32), (256, 256, 4096, 128, 128, True, False, True): (2, 32, 1, 32), (256, 256, 8192, 16, 16, False, True, True): (1, 64, 3, 4), (256, 256, 8192, 16, 16, True, False, True): (2, 128, 3, 2), (256, 256, 8192, 32, 32, False, True, True): (3, 128, 3, 4), (256, 256, 8192, 32, 32, True, False, True): (1, 128, 3, 4), (256, 256, 8192, 64, 64, False, True, True): (3, 128, 1, 4), (256, 256, 8192, 64, 64, True, False, True): (4, 128, 2, 8), (256, 256, 8192, 128, 128, False, True, True): (6, 64, 1, 32), (256, 256, 8192, 128, 128, True, False, True): (2, 64, 1, 32), (256, 256, 16384, 16, 16, False, True, True): (4, 128, 3, 4), (256, 256, 16384, 16, 16, True, False, True): (3, 128, 3, 4), (256, 256, 16384, 32, 32, False, True, True): (4, 256, 3, 4), (256, 256, 16384, 32, 32, True, False, True): (2, 256, 3, 4), (256, 256, 16384, 64, 64, False, True, True): (3, 256, 1, 4), (256, 256, 16384, 64, 64, True, False, True): (2, 256, 2, 4), (256, 256, 16384, 128, 128, False, True, True): (1, 128, 1, 32), (256, 256, 16384, 128, 128, True, False, True): (3, 128, 1, 32), (256, 256, 32768, 16, 16, False, True, True): (1, 256, 3, 4), (256, 256, 32768, 16, 16, True, False, True): (2, 128, 3, 4), (256, 256, 32768, 32, 32, False, True, True): (2, 512, 3, 4), (256, 256, 32768, 32, 32, True, False, True): (4, 512, 3, 4), (256, 256, 32768, 64, 64, False, True, True): (1, 512, 1, 8), (256, 256, 32768, 64, 64, True, False, True): (1, 512, 2, 4), (256, 256, 32768, 128, 128, False, True, True): (1, 256, 1, 32), (256, 256, 32768, 128, 128, True, False, True): (1, 256, 1, 32), (256, 256, 65536, 16, 16, False, True, True): (2, 512, 3, 4), (256, 256, 65536, 16, 16, True, False, True): (1, 256, 3, 4), (256, 256, 65536, 32, 32, False, True, True): (1, 1024, 3, 4), (256, 256, 65536, 32, 32, True, False, True): (2, 1024, 3, 4), (256, 256, 65536, 64, 64, False, True, True): (1, 1024, 2, 4), (256, 256, 65536, 64, 64, True, False, True): (1, 1024, 2, 4), (256, 256, 65536, 128, 128, False, True, True): (1, 512, 1, 32), (256, 256, 65536, 128, 128, True, False, True): (2, 512, 1, 32), (256, 256, 131072, 16, 16, False, True, True): (1, 1024, 3, 4), (256, 256, 131072, 16, 16, True, False, True): (1, 512, 3, 4), (256, 256, 131072, 32, 32, False, True, True): (1, 2048, 3, 4), (256, 256, 131072, 32, 32, True, False, True): (1, 2048, 3, 4), (256, 256, 131072, 64, 64, False, True, True): (1, 2048, 1, 8), (256, 256, 131072, 64, 64, True, False, True): (1, 2048, 2, 4), (256, 256, 131072, 128, 128, False, True, True): (1, 1024, 1, 32), (256, 256, 131072, 128, 128, True, False, True): (4, 1024, 1, 32), (384, 384, 256, 16, 16, False, True, True): (1, 8, 3, 4), (384, 384, 256, 16, 16, True, False, True): (1, 8, 3, 4), (384, 384, 256, 32, 32, False, True, True): (2, 8, 3, 8), (384, 384, 256, 32, 32, True, False, True): (1, 8, 3, 4), (384, 384, 256, 64, 64, False, True, True): (1, 4, 4, 8), (384, 384, 256, 64, 64, True, False, True): (2, 4, 3, 8), (384, 384, 512, 16, 16, False, True, True): (3, 16, 3, 2), (384, 384, 512, 16, 16, True, False, True): (3, 16, 3, 2), (384, 384, 512, 32, 32, False, True, True): (2, 8, 3, 4), (384, 384, 512, 32, 32, True, False, True): (1, 8, 3, 4), (384, 384, 512, 64, 64, False, True, True): (2, 8, 3, 8), (384, 384, 512, 64, 64, True, False, True): (2, 8, 4, 8), (384, 384, 1024, 16, 16, False, True, True): (3, 16, 3, 2), (384, 384, 1024, 16, 16, True, False, True): (4, 32, 3, 2), (384, 384, 1024, 32, 32, False, True, True): (1, 32, 3, 4), (384, 384, 1024, 32, 32, True, False, True): (2, 16, 3, 4), (384, 384, 1024, 64, 64, False, True, True): (2, 16, 3, 8), (384, 384, 1024, 64, 64, True, False, True): (4, 16, 4, 8), (384, 384, 2048, 16, 16, False, True, True): (3, 16, 3, 4), (384, 384, 2048, 16, 16, True, False, True): (1, 32, 3, 4), (384, 384, 2048, 32, 32, False, True, True): (3, 64, 2, 4), (384, 384, 2048, 32, 32, True, False, True): (1, 64, 3, 4), (384, 384, 2048, 64, 64, False, True, True): (4, 32, 4, 8), (384, 384, 2048, 64, 64, True, False, True): (5, 32, 4, 8), (384, 384, 4096, 16, 16, False, True, True): (1, 32, 3, 4), (384, 384, 4096, 16, 16, True, False, True): (3, 32, 3, 4), (384, 384, 4096, 32, 32, False, True, True): (2, 64, 3, 4), (384, 384, 4096, 32, 32, True, False, True): (2, 64, 3, 4), (384, 384, 4096, 64, 64, False, True, True): (2, 64, 3, 8), (384, 384, 4096, 64, 64, True, False, True): (2, 64, 3, 8), (384, 384, 8192, 16, 16, False, True, True): (1, 128, 3, 2), (384, 384, 8192, 16, 16, True, False, True): (1, 128, 3, 2), (384, 384, 8192, 32, 32, False, True, True): (1, 128, 3, 4), (384, 384, 8192, 32, 32, True, False, True): (1, 128, 3, 4), (384, 384, 8192, 64, 64, False, True, True): (3, 128, 3, 4), (384, 384, 8192, 64, 64, True, False, True): (2, 128, 3, 4), (384, 384, 16384, 16, 16, False, True, True): (1, 256, 3, 2), (384, 384, 16384, 16, 16, True, False, True): (1, 64, 3, 4), (384, 384, 16384, 32, 32, False, True, True): (2, 256, 3, 4), (384, 384, 16384, 32, 32, True, False, True): (4, 256, 3, 4), (384, 384, 16384, 64, 64, False, True, True): (2, 256, 3, 4), (384, 384, 16384, 64, 64, True, False, True): (1, 256, 3, 4), (384, 384, 32768, 16, 16, False, True, True): (1, 128, 3, 4), (384, 384, 32768, 16, 16, True, False, True): (1, 128, 3, 4), (384, 384, 32768, 32, 32, False, True, True): (1, 512, 3, 4), (384, 384, 32768, 32, 32, True, False, True): (1, 512, 2, 4), (384, 384, 32768, 64, 64, False, True, True): (1, 512, 3, 4), (384, 384, 32768, 64, 64, True, False, True): (1, 512, 3, 4), (384, 384, 65536, 16, 16, False, True, True): (1, 256, 3, 4), (384, 384, 65536, 16, 16, True, False, True): (1, 256, 3, 4), (384, 384, 65536, 32, 32, False, True, True): (1, 1024, 3, 4), (384, 384, 65536, 32, 32, True, False, True): (1, 512, 3, 4), (384, 384, 65536, 64, 64, False, True, True): (1, 1024, 3, 4), (384, 384, 65536, 64, 64, True, False, True): (1, 1024, 3, 4), (384, 384, 131072, 16, 16, False, True, True): (1, 512, 3, 4), (384, 384, 131072, 16, 16, True, False, True): (1, 512, 3, 4), (384, 384, 131072, 32, 32, False, True, True): (1, 1024, 3, 4), (384, 384, 131072, 32, 32, True, False, True): (1, 1024, 3, 4), (384, 384, 131072, 64, 64, False, True, True): (1, 2048, 3, 4), (384, 384, 131072, 64, 64, True, False, True): (1, 2048, 3, 4), (512, 512, 256, 16, 16, False, True, True): (1, 8, 4, 4), (512, 512, 256, 16, 16, True, False, True): (1, 8, 3, 2), (512, 512, 256, 32, 32, False, True, True): (4, 8, 3, 4), (512, 512, 256, 32, 32, True, False, True): (4, 8, 3, 4), (512, 512, 256, 64, 64, False, True, True): (3, 4, 3, 8), (512, 512, 256, 64, 64, True, False, True): (5, 4, 3, 8), (512, 512, 256, 128, 128, False, True, True): (1, 2, 1, 32), (512, 512, 256, 128, 128, True, False, True): (3, 2, 1, 32), (512, 512, 512, 16, 16, False, True, True): (2, 16, 3, 2), (512, 512, 512, 16, 16, True, False, True): (1, 8, 4, 4), (512, 512, 512, 32, 32, False, True, True): (3, 16, 3, 4), (512, 512, 512, 32, 32, True, False, True): (5, 16, 2, 4), (512, 512, 512, 64, 64, False, True, True): (1, 8, 3, 8), (512, 512, 512, 64, 64, True, False, True): (3, 8, 3, 8), (512, 512, 512, 128, 128, False, True, True): (1, 4, 1, 32), (512, 512, 512, 128, 128, True, False, True): (3, 4, 1, 16), (512, 512, 1024, 16, 16, False, True, True): (1, 16, 3, 4), (512, 512, 1024, 16, 16, True, False, True): (3, 16, 3, 4), (512, 512, 1024, 32, 32, False, True, True): (3, 32, 3, 4), (512, 512, 1024, 32, 32, True, False, True): (3, 32, 2, 4), (512, 512, 1024, 64, 64, False, True, True): (1, 16, 3, 8), (512, 512, 1024, 64, 64, True, False, True): (4, 16, 3, 8), (512, 512, 1024, 128, 128, False, True, True): (4, 8, 1, 32), (512, 512, 1024, 128, 128, True, False, True): (4, 8, 1, 32), (512, 512, 2048, 16, 16, False, True, True): (5, 16, 3, 4), (512, 512, 2048, 16, 16, True, False, True): (5, 16, 3, 4), (512, 512, 2048, 32, 32, False, True, True): (1, 32, 3, 4), (512, 512, 2048, 32, 32, True, False, True): (1, 32, 4, 4), (512, 512, 2048, 64, 64, False, True, True): (4, 32, 3, 8), (512, 512, 2048, 64, 64, True, False, True): (4, 32, 3, 8), (512, 512, 2048, 128, 128, False, True, True): (3, 16, 1, 32), (512, 512, 2048, 128, 128, True, False, True): (3, 16, 1, 32), (512, 512, 4096, 16, 16, False, True, True): (4, 32, 3, 4), (512, 512, 4096, 16, 16, True, False, True): (4, 64, 3, 2), (512, 512, 4096, 32, 32, False, True, True): (3, 64, 3, 4), (512, 512, 4096, 32, 32, True, False, True): (3, 64, 3, 4), (512, 512, 4096, 64, 64, False, True, True): (4, 64, 2, 4), (512, 512, 4096, 64, 64, True, False, True): (1, 64, 2, 4), (512, 512, 4096, 128, 128, False, True, True): (1, 32, 1, 32), (512, 512, 4096, 128, 128, True, False, True): (1, 32, 1, 32), (512, 512, 8192, 16, 16, False, True, True): (1, 64, 3, 4), (512, 512, 8192, 16, 16, True, False, True): (4, 64, 3, 4), (512, 512, 8192, 32, 32, False, True, True): (2, 128, 3, 4), (512, 512, 8192, 32, 32, True, False, True): (3, 128, 3, 4), (512, 512, 8192, 64, 64, False, True, True): (1, 128, 2, 4), (512, 512, 8192, 64, 64, True, False, True): (1, 128, 2, 4), (512, 512, 8192, 128, 128, False, True, True): (6, 64, 1, 32), (512, 512, 8192, 128, 128, True, False, True): (4, 64, 1, 32), (512, 512, 16384, 16, 16, False, True, True): (1, 128, 3, 4), (512, 512, 16384, 16, 16, True, False, True): (1, 64, 3, 4), (512, 512, 16384, 32, 32, False, True, True): (1, 256, 3, 4), (512, 512, 16384, 32, 32, True, False, True): (4, 256, 3, 4), (512, 512, 16384, 64, 64, False, True, True): (1, 256, 2, 4), (512, 512, 16384, 64, 64, True, False, True): (1, 256, 2, 4), (512, 512, 16384, 128, 128, False, True, True): (1, 128, 1, 32), (512, 512, 16384, 128, 128, True, False, True): (2, 128, 1, 32), (512, 512, 32768, 16, 16, False, True, True): (1, 256, 3, 4), (512, 512, 32768, 16, 16, True, False, True): (1, 128, 3, 4), (512, 512, 32768, 32, 32, False, True, True): (1, 512, 3, 4), (512, 512, 32768, 32, 32, True, False, True): (1, 512, 3, 4), (512, 512, 32768, 64, 64, False, True, True): (1, 512, 2, 4), (512, 512, 32768, 64, 64, True, False, True): (2, 512, 2, 4), (512, 512, 32768, 128, 128, False, True, True): (1, 256, 1, 32), (512, 512, 32768, 128, 128, True, False, True): (2, 256, 1, 32), (512, 512, 65536, 16, 16, False, True, True): (1, 512, 3, 4), (512, 512, 65536, 16, 16, True, False, True): (1, 256, 3, 4), (512, 512, 65536, 32, 32, False, True, True): (1, 1024, 3, 4), (512, 512, 65536, 32, 32, True, False, True): (1, 1024, 3, 4), (512, 512, 65536, 64, 64, False, True, True): (1, 1024, 2, 4), (512, 512, 65536, 64, 64, True, False, True): (1, 1024, 2, 4), (512, 512, 65536, 128, 128, False, True, True): (1, 512, 1, 32), (512, 512, 65536, 128, 128, True, False, True): (4, 512, 1, 32), (512, 512, 131072, 16, 16, False, True, True): (1, 512, 3, 4), (512, 512, 131072, 16, 16, True, False, True): (1, 512, 3, 4), (512, 512, 131072, 32, 32, False, True, True): (1, 2048, 3, 4), (512, 512, 131072, 32, 32, True, False, True): (1, 2048, 3, 4), (512, 512, 131072, 64, 64, False, True, True): (1, 2048, 2, 4), (512, 512, 131072, 64, 64, True, False, True): (1, 2048, 2, 4), (512, 512, 131072, 128, 128, False, True, True): (1, 1024, 1, 32), (512, 512, 131072, 128, 128, True, False, True): (2, 1024, 1, 32), (768, 768, 256, 16, 16, False, True, True): (1, 4, 5, 4), (768, 768, 256, 16, 16, True, False, True): (3, 8, 3, 2), (768, 768, 256, 32, 32, False, True, True): (2, 4, 3, 4), (768, 768, 256, 32, 32, True, False, True): (3, 8, 4, 4), (768, 768, 256, 64, 64, False, True, True): (1, 4, 4, 8), (768, 768, 256, 64, 64, True, False, True): (3, 4, 3, 8), (768, 768, 256, 128, 128, False, True, True): (3, 2, 1, 32), (768, 768, 256, 128, 128, True, False, True): (2, 2, 2, 32), (768, 768, 512, 16, 16, False, True, True): (2, 4, 5, 4), (768, 768, 512, 16, 16, True, False, True): (2, 4, 4, 4), (768, 768, 512, 32, 32, False, True, True): (1, 8, 3, 4), (768, 768, 512, 32, 32, True, False, True): (3, 8, 4, 4), (768, 768, 512, 64, 64, False, True, True): (2, 8, 3, 8), (768, 768, 512, 64, 64, True, False, True): (5, 8, 3, 8), (768, 768, 512, 128, 128, False, True, True): (2, 4, 1, 32), (768, 768, 512, 128, 128, True, False, True): (2, 4, 2, 32), (768, 768, 1024, 16, 16, False, True, True): (2, 16, 4, 2), (768, 768, 1024, 16, 16, True, False, True): (4, 32, 3, 1), (768, 768, 1024, 32, 32, False, True, True): (1, 32, 2, 4), (768, 768, 1024, 32, 32, True, False, True): (1, 16, 5, 4), (768, 768, 1024, 64, 64, False, True, True): (2, 16, 3, 8), (768, 768, 1024, 64, 64, True, False, True): (2, 16, 3, 8), (768, 768, 1024, 128, 128, False, True, True): (1, 8, 2, 32), (768, 768, 1024, 128, 128, True, False, True): (1, 8, 1, 32), (768, 768, 2048, 16, 16, False, True, True): (1, 16, 3, 4), (768, 768, 2048, 16, 16, True, False, True): (1, 16, 3, 4), (768, 768, 2048, 32, 32, False, True, True): (1, 32, 3, 4), (768, 768, 2048, 32, 32, True, False, True): (5, 32, 3, 4), (768, 768, 2048, 64, 64, False, True, True): (1, 32, 3, 8), (768, 768, 2048, 64, 64, True, False, True): (1, 32, 3, 4), (768, 768, 2048, 128, 128, False, True, True): (3, 16, 1, 32), (768, 768, 2048, 128, 128, True, False, True): (4, 16, 1, 32), (768, 768, 4096, 16, 16, False, True, True): (1, 64, 3, 2), (768, 768, 4096, 16, 16, True, False, True): (3, 64, 3, 2), (768, 768, 4096, 32, 32, False, True, True): (1, 64, 3, 4), (768, 768, 4096, 32, 32, True, False, True): (1, 64, 3, 4), (768, 768, 4096, 64, 64, False, True, True): (4, 64, 3, 4), (768, 768, 4096, 64, 64, True, False, True): (4, 64, 3, 4), (768, 768, 4096, 128, 128, False, True, True): (1, 32, 1, 32), (768, 768, 4096, 128, 128, True, False, True): (1, 32, 2, 32), (768, 768, 8192, 16, 16, False, True, True): (1, 128, 3, 2), (768, 768, 8192, 16, 16, True, False, True): (2, 32, 3, 4), (768, 768, 8192, 32, 32, False, True, True): (2, 128, 3, 4), (768, 768, 8192, 32, 32, True, False, True): (1, 128, 2, 4), (768, 768, 8192, 64, 64, False, True, True): (1, 128, 3, 4), (768, 768, 8192, 64, 64, True, False, True): (2, 128, 3, 4), (768, 768, 8192, 128, 128, False, True, True): (1, 64, 1, 32), (768, 768, 8192, 128, 128, True, False, True): (2, 64, 1, 32), (768, 768, 16384, 16, 16, False, True, True): (3, 64, 3, 4), (768, 768, 16384, 16, 16, True, False, True): (1, 64, 3, 4), (768, 768, 16384, 32, 32, False, True, True): (2, 256, 3, 4), (768, 768, 16384, 32, 32, True, False, True): (4, 256, 2, 4), (768, 768, 16384, 64, 64, False, True, True): (1, 256, 3, 4), (768, 768, 16384, 64, 64, True, False, True): (1, 256, 3, 4), (768, 768, 16384, 128, 128, False, True, True): (1, 128, 1, 32), (768, 768, 16384, 128, 128, True, False, True): (2, 128, 1, 32), (768, 768, 32768, 16, 16, False, True, True): (1, 128, 3, 4), (768, 768, 32768, 16, 16, True, False, True): (2, 128, 3, 4), (768, 768, 32768, 32, 32, False, True, True): (2, 256, 3, 4), (768, 768, 32768, 32, 32, True, False, True): (1, 256, 3, 4), (768, 768, 32768, 64, 64, False, True, True): (1, 512, 3, 4), (768, 768, 32768, 64, 64, True, False, True): (1, 512, 3, 4), (768, 768, 32768, 128, 128, False, True, True): (1, 256, 1, 32), (768, 768, 32768, 128, 128, True, False, True): (1, 256, 1, 32), (768, 768, 50432, 16, 16, False, True, True): (1, 197, 3, 4), (768, 768, 50432, 32, 32, False, True, True): (1, 394, 3, 4), (768, 768, 50432, 64, 64, False, True, True): (1, 788, 3, 4), (768, 768, 50432, 128, 128, False, True, True): (3, 394, 1, 32), (768, 768, 65536, 16, 16, False, True, True): (1, 256, 3, 4), (768, 768, 65536, 16, 16, True, False, True): (1, 256, 3, 4), (768, 768, 65536, 32, 32, False, True, True): (1, 512, 3, 4), (768, 768, 65536, 32, 32, True, False, True): (1, 512, 3, 4), (768, 768, 65536, 64, 64, False, True, True): (1, 1024, 3, 4), (768, 768, 65536, 64, 64, True, False, True): (1, 1024, 3, 4), (768, 768, 65536, 128, 128, False, True, True): (1, 512, 1, 32), (768, 768, 65536, 128, 128, True, False, True): (1, 512, 1, 32), (768, 768, 131072, 16, 16, False, True, True): (1, 512, 3, 4), (768, 768, 131072, 16, 16, True, False, True): (1, 512, 3, 4), (768, 768, 131072, 32, 32, False, True, True): (1, 1024, 3, 4), (768, 768, 131072, 32, 32, True, False, True): (1, 1024, 3, 4), (768, 768, 131072, 64, 64, False, True, True): (1, 2048, 3, 4), (768, 768, 131072, 64, 64, True, False, True): (1, 2048, 3, 4), (768, 768, 131072, 128, 128, False, True, True): (1, 1024, 1, 32), (768, 768, 131072, 128, 128, True, False, True): (1, 1024, 1, 32), (768, 3072, 256, 16, 16, False, True, True): (1, 2, 4, 4), (768, 3072, 256, 16, 16, True, False, True): (1, 4, 3, 4), (768, 3072, 256, 32, 32, False, True, True): (1, 4, 3, 4), (768, 3072, 256, 32, 32, True, False, True): (3, 4, 3, 4), (768, 3072, 256, 64, 64, False, True, True): (1, 4, 3, 8), (768, 3072, 256, 64, 64, True, False, True): (1, 4, 3, 8), (768, 3072, 256, 128, 128, False, True, True): (2, 2, 2, 32), (768, 3072, 256, 128, 128, True, False, True): (2, 2, 1, 32), (768, 3072, 512, 16, 16, False, True, True): (2, 4, 3, 4), (768, 3072, 512, 16, 16, True, False, True): (1, 8, 3, 2), (768, 3072, 512, 32, 32, False, True, True): (3, 8, 4, 4), (768, 3072, 512, 32, 32, True, False, True): (3, 8, 3, 4), (768, 3072, 512, 64, 64, False, True, True): (1, 8, 4, 8), (768, 3072, 512, 64, 64, True, False, True): (1, 8, 3, 8), (768, 3072, 512, 128, 128, False, True, True): (1, 4, 2, 32), (768, 3072, 512, 128, 128, True, False, True): (1, 4, 1, 32), (768, 3072, 1024, 16, 16, False, True, True): (4, 16, 3, 2), (768, 3072, 1024, 16, 16, True, False, True): (4, 16, 3, 2), (768, 3072, 1024, 32, 32, False, True, True): (4, 16, 5, 4), (768, 3072, 1024, 32, 32, True, False, True): (4, 16, 5, 4), (768, 3072, 1024, 64, 64, False, True, True): (2, 16, 3, 8), (768, 3072, 1024, 64, 64, True, False, True): (2, 16, 3, 8), (768, 3072, 1024, 128, 128, False, True, True): (1, 8, 1, 32), (768, 3072, 1024, 128, 128, True, False, True): (1, 8, 1, 32), (768, 3072, 2048, 16, 16, False, True, True): (2, 16, 3, 4), (768, 3072, 2048, 16, 16, True, False, True): (2, 16, 3, 4), (768, 3072, 2048, 32, 32, False, True, True): (4, 32, 5, 4), (768, 3072, 2048, 32, 32, True, False, True): (2, 32, 3, 4), (768, 3072, 2048, 64, 64, False, True, True): (2, 32, 3, 8), (768, 3072, 2048, 64, 64, True, False, True): (2, 32, 3, 8), (768, 3072, 2048, 128, 128, False, True, True): (1, 16, 1, 32), (768, 3072, 2048, 128, 128, True, False, True): (2, 16, 1, 32), (768, 3072, 4096, 16, 16, False, True, True): (1, 32, 5, 4), (768, 3072, 4096, 16, 16, True, False, True): (3, 64, 3, 2), (768, 3072, 4096, 32, 32, False, True, True): (5, 64, 3, 4), (768, 3072, 4096, 32, 32, True, False, True): (5, 64, 3, 4), (768, 3072, 4096, 64, 64, False, True, True): (1, 64, 3, 8), (768, 3072, 4096, 64, 64, True, False, True): (5, 64, 3, 4), (768, 3072, 4096, 128, 128, False, True, True): (1, 32, 1, 32), (768, 3072, 4096, 128, 128, True, False, True): (1, 32, 1, 32), (768, 3072, 8192, 16, 16, False, True, True): (1, 128, 3, 2), (768, 3072, 8192, 16, 16, True, False, True): (1, 128, 3, 2), (768, 3072, 8192, 32, 32, False, True, True): (1, 128, 3, 4), (768, 3072, 8192, 32, 32, True, False, True): (1, 64, 3, 4), (768, 3072, 8192, 64, 64, False, True, True): (3, 128, 3, 4), (768, 3072, 8192, 64, 64, True, False, True): (3, 128, 3, 4), (768, 3072, 8192, 128, 128, False, True, True): (4, 64, 2, 32), (768, 3072, 8192, 128, 128, True, False, True): (2, 64, 1, 32), (768, 3072, 16384, 16, 16, False, True, True): (1, 256, 2, 2), (768, 3072, 16384, 16, 16, True, False, True): (1, 64, 3, 4), (768, 3072, 16384, 32, 32, False, True, True): (8, 128, 3, 4), (768, 3072, 16384, 32, 32, True, False, True): (1, 128, 3, 4), (768, 3072, 16384, 64, 64, False, True, True): (1, 256, 3, 4), (768, 3072, 16384, 64, 64, True, False, True): (3, 256, 3, 4), (768, 3072, 16384, 128, 128, False, True, True): (3, 128, 1, 32), (768, 3072, 16384, 128, 128, True, False, True): (2, 128, 2, 32), (768, 3072, 32768, 16, 16, False, True, True): (1, 512, 3, 1), (768, 3072, 32768, 16, 16, True, False, True): (1, 128, 3, 4), (768, 3072, 32768, 32, 32, False, True, True): (1, 256, 3, 4), (768, 3072, 32768, 32, 32, True, False, True): (1, 256, 3, 4), (768, 3072, 32768, 64, 64, False, True, True): (2, 512, 3, 4), (768, 3072, 32768, 64, 64, True, False, True): (1, 512, 3, 4), (768, 3072, 32768, 128, 128, False, True, True): (1, 256, 1, 32), (768, 3072, 32768, 128, 128, True, False, True): (2, 256, 2, 32), (768, 3072, 50432, 16, 16, False, True, True): (1, 197, 3, 4), (768, 3072, 50432, 16, 16, True, False, True): (1, 197, 3, 4), (768, 3072, 50432, 32, 32, False, True, True): (1, 788, 2, 4), (768, 3072, 50432, 32, 32, True, False, True): (1, 394, 3, 4), (768, 3072, 50432, 64, 64, False, True, True): (1, 788, 3, 4), (768, 3072, 50432, 64, 64, True, False, True): (2, 788, 3, 4), (768, 3072, 50432, 128, 128, False, True, True): (1, 394, 1, 32), (768, 3072, 50432, 128, 128, True, False, True): (2, 394, 2, 32), (768, 3072, 65536, 16, 16, False, True, True): (1, 1024, 3, 1), (768, 3072, 65536, 16, 16, True, False, True): (1, 256, 3, 4), (768, 3072, 65536, 32, 32, False, True, True): (1, 512, 3, 4), (768, 3072, 65536, 32, 32, True, False, True): (1, 512, 3, 4), (768, 3072, 65536, 64, 64, False, True, True): (2, 1024, 3, 4), (768, 3072, 65536, 64, 64, True, False, True): (5, 1024, 3, 4), (768, 3072, 65536, 128, 128, False, True, True): (1, 512, 1, 32), (768, 3072, 65536, 128, 128, True, False, True): (2, 512, 2, 32), (768, 3072, 131072, 16, 16, False, True, True): (1, 2048, 3, 1), (768, 3072, 131072, 16, 16, True, False, True): (1, 512, 3, 4), (768, 3072, 131072, 32, 32, False, True, True): (1, 1024, 3, 4), (768, 3072, 131072, 32, 32, True, False, True): (1, 1024, 3, 4), (768, 3072, 131072, 64, 64, False, True, True): (1, 2048, 3, 4), (768, 3072, 131072, 64, 64, True, False, True): (2, 2048, 3, 4), (768, 3072, 131072, 128, 128, False, True, True): (1, 1024, 1, 32), (768, 3072, 131072, 128, 128, True, False, True): (1, 1024, 2, 32), (1024, 1024, 256, 16, 16, False, True, True): (4, 8, 3, 2), (1024, 1024, 256, 16, 16, True, False, True): (2, 8, 3, 2), (1024, 1024, 256, 32, 32, False, True, True): (1, 8, 3, 4), (1024, 1024, 256, 32, 32, True, False, True): (1, 8, 3, 4), (1024, 1024, 256, 64, 64, False, True, True): (1, 4, 3, 8), (1024, 1024, 256, 64, 64, True, False, True): (2, 4, 3, 8), (1024, 1024, 256, 128, 128, False, True, True): (3, 2, 1, 32), (1024, 1024, 256, 128, 128, True, False, True): (5, 2, 1, 32), (1024, 1024, 512, 16, 16, False, True, True): (3, 8, 3, 4), (1024, 1024, 512, 16, 16, True, False, True): (3, 8, 3, 4), (1024, 1024, 512, 32, 32, False, True, True): (1, 16, 3, 4), (1024, 1024, 512, 32, 32, True, False, True): (3, 16, 3, 4), (1024, 1024, 512, 64, 64, False, True, True): (6, 8, 3, 8), (1024, 1024, 512, 64, 64, True, False, True): (8, 8, 3, 8), (1024, 1024, 512, 128, 128, False, True, True): (1, 4, 1, 32), (1024, 1024, 512, 128, 128, True, False, True): (1, 4, 1, 32), (1024, 1024, 1024, 16, 16, False, True, True): (4, 8, 3, 4), (1024, 1024, 1024, 16, 16, True, False, True): (1, 8, 3, 4), (1024, 1024, 1024, 32, 32, False, True, True): (4, 16, 4, 4), (1024, 1024, 1024, 32, 32, True, False, True): (5, 16, 3, 4), (1024, 1024, 1024, 64, 64, False, True, True): (6, 16, 3, 8), (1024, 1024, 1024, 64, 64, True, False, True): (3, 16, 2, 4), (1024, 1024, 1024, 128, 128, False, True, True): (1, 8, 1, 32), (1024, 1024, 1024, 128, 128, True, False, True): (2, 8, 1, 32), (1024, 1024, 2048, 16, 16, False, True, True): (4, 16, 3, 4), (1024, 1024, 2048, 16, 16, True, False, True): (1, 16, 3, 4), (1024, 1024, 2048, 32, 32, False, True, True): (1, 32, 3, 4), (1024, 1024, 2048, 32, 32, True, False, True): (2, 32, 3, 4), (1024, 1024, 2048, 64, 64, False, True, True): (4, 32, 2, 4), (1024, 1024, 2048, 64, 64, True, False, True): (8, 32, 2, 4), (1024, 1024, 2048, 128, 128, False, True, True): (1, 16, 1, 32), (1024, 1024, 2048, 128, 128, True, False, True): (1, 16, 1, 32), (1024, 1024, 4096, 16, 16, False, True, True): (4, 32, 3, 4), (1024, 1024, 4096, 16, 16, True, False, True): (1, 64, 3, 2), (1024, 1024, 4096, 32, 32, False, True, True): (1, 64, 3, 4), (1024, 1024, 4096, 32, 32, True, False, True): (1, 64, 3, 4), (1024, 1024, 4096, 64, 64, False, True, True): (2, 64, 2, 4), (1024, 1024, 4096, 64, 64, True, False, True): (2, 64, 2, 4), (1024, 1024, 4096, 128, 128, False, True, True): (1, 32, 1, 32), (1024, 1024, 4096, 128, 128, True, False, True): (4, 32, 1, 32), (1024, 1024, 8192, 16, 16, False, True, True): (1, 128, 3, 1), (1024, 1024, 8192, 16, 16, True, False, True): (1, 128, 3, 1), (1024, 1024, 8192, 32, 32, False, True, True): (1, 128, 3, 4), (1024, 1024, 8192, 32, 32, True, False, True): (1, 128, 3, 4), (1024, 1024, 8192, 64, 64, False, True, True): (2, 128, 2, 4), (1024, 1024, 8192, 64, 64, True, False, True): (2, 128, 2, 4), (1024, 1024, 8192, 128, 128, False, True, True): (1, 64, 1, 32), (1024, 1024, 8192, 128, 128, True, False, True): (4, 64, 1, 32), (1024, 1024, 16384, 16, 16, False, True, True): (1, 128, 2, 4), (1024, 1024, 16384, 16, 16, True, False, True): (4, 256, 3, 1), (1024, 1024, 16384, 32, 32, False, True, True): (1, 256, 3, 4), (1024, 1024, 16384, 32, 32, True, False, True): (1, 256, 3, 4), (1024, 1024, 16384, 64, 64, False, True, True): (1, 256, 2, 4), (1024, 1024, 16384, 64, 64, True, False, True): (1, 256, 2, 4), (1024, 1024, 16384, 128, 128, False, True, True): (1, 128, 1, 32), (1024, 1024, 16384, 128, 128, True, False, True): (4, 128, 1, 32), (1024, 1024, 32768, 16, 16, False, True, True): (1, 256, 2, 4), (1024, 1024, 32768, 16, 16, True, False, True): (4, 512, 3, 1), (1024, 1024, 32768, 32, 32, False, True, True): (1, 512, 3, 4), (1024, 1024, 32768, 32, 32, True, False, True): (1, 512, 3, 4), (1024, 1024, 32768, 64, 64, False, True, True): (1, 512, 2, 4), (1024, 1024, 32768, 64, 64, True, False, True): (1, 512, 2, 4), (1024, 1024, 32768, 128, 128, False, True, True): (1, 256, 1, 32), (1024, 1024, 32768, 128, 128, True, False, True): (1, 256, 1, 32), (1024, 1024, 65536, 16, 16, False, True, True): (1, 512, 2, 4), (1024, 1024, 65536, 16, 16, True, False, True): (1, 1024, 3, 1), (1024, 1024, 65536, 32, 32, False, True, True): (1, 1024, 3, 4), (1024, 1024, 65536, 32, 32, True, False, True): (1, 512, 3, 4), (1024, 1024, 65536, 64, 64, False, True, True): (1, 1024, 2, 4), (1024, 1024, 65536, 64, 64, True, False, True): (1, 1024, 2, 4), (1024, 1024, 65536, 128, 128, False, True, True): (1, 512, 1, 32), (1024, 1024, 65536, 128, 128, True, False, True): (1, 512, 1, 32), (1024, 1024, 131072, 16, 16, False, True, True): (4, 2048, 3, 1), (1024, 1024, 131072, 16, 16, True, False, True): (4, 2048, 3, 1), (1024, 1024, 131072, 32, 32, False, True, True): (1, 2048, 3, 4), (1024, 1024, 131072, 32, 32, True, False, True): (1, 1024, 3, 4), (1024, 1024, 131072, 64, 64, False, True, True): (1, 2048, 2, 4), (1024, 1024, 131072, 64, 64, True, False, True): (1, 2048, 2, 4), (1024, 1024, 131072, 128, 128, False, True, True): (1, 1024, 1, 32), (1024, 1024, 131072, 128, 128, True, False, True): (1, 1024, 1, 32), (1280, 5120, 65792, 16, 16, False, True, True): (1, 1028, 3, 1), (1280, 5120, 65792, 16, 16, True, False, True): (1, 257, 3, 4), (1280, 5120, 65792, 32, 32, False, True, True): (1, 514, 3, 4), (1280, 5120, 65792, 32, 32, True, False, True): (1, 514, 3, 4), (1280, 5120, 65792, 64, 64, False, True, True): (2, 1028, 3, 4), (1280, 5120, 65792, 64, 64, True, False, True): (1, 1028, 3, 4), (1280, 5120, 65792, 128, 128, False, True, True): (2, 514, 2, 32), (1280, 5120, 65792, 128, 128, True, False, True): (1, 514, 2, 32), (1536, 1536, 256, 16, 16, False, True, True): (5, 4, 3, 2), (1536, 1536, 256, 16, 16, True, False, True): (2, 2, 3, 4), (1536, 1536, 256, 32, 32, False, True, True): (1, 8, 2, 4), (1536, 1536, 256, 32, 32, True, False, True): (2, 4, 3, 4), (1536, 1536, 256, 64, 64, False, True, True): (1, 4, 3, 8), (1536, 1536, 256, 64, 64, True, False, True): (2, 4, 3, 8), (1536, 1536, 256, 128, 128, False, True, True): (1, 2, 1, 32), (1536, 1536, 256, 128, 128, True, False, True): (2, 2, 2, 32), (1536, 1536, 512, 16, 16, False, True, True): (1, 8, 3, 2), (1536, 1536, 512, 16, 16, True, False, True): (1, 8, 3, 2), (1536, 1536, 512, 32, 32, False, True, True): (1, 16, 3, 4), (1536, 1536, 512, 32, 32, True, False, True): (1, 16, 3, 4), (1536, 1536, 512, 64, 64, False, True, True): (3, 8, 3, 8), (1536, 1536, 512, 64, 64, True, False, True): (3, 8, 3, 8), (1536, 1536, 512, 128, 128, False, True, True): (1, 4, 1, 32), (1536, 1536, 512, 128, 128, True, False, True): (2, 4, 2, 32), (1536, 1536, 1024, 16, 16, False, True, True): (2, 8, 3, 4), (1536, 1536, 1024, 16, 16, True, False, True): (2, 8, 3, 4), (1536, 1536, 1024, 32, 32, False, True, True): (1, 16, 3, 4), (1536, 1536, 1024, 32, 32, True, False, True): (1, 16, 3, 4), (1536, 1536, 1024, 64, 64, False, True, True): (2, 16, 3, 8), (1536, 1536, 1024, 64, 64, True, False, True): (2, 16, 3, 8), (1536, 1536, 1024, 128, 128, False, True, True): (3, 8, 1, 32), (1536, 1536, 1024, 128, 128, True, False, True): (1, 8, 2, 32), (1536, 1536, 2048, 16, 16, False, True, True): (1, 32, 3, 2), (1536, 1536, 2048, 16, 16, True, False, True): (1, 32, 3, 2), (1536, 1536, 2048, 32, 32, False, True, True): (3, 32, 2, 4), (1536, 1536, 2048, 32, 32, True, False, True): (4, 32, 3, 4), (1536, 1536, 2048, 64, 64, False, True, True): (1, 32, 3, 4), (1536, 1536, 2048, 64, 64, True, False, True): (1, 32, 3, 4), (1536, 1536, 2048, 128, 128, False, True, True): (1, 16, 1, 32), (1536, 1536, 2048, 128, 128, True, False, True): (2, 16, 1, 32), (1536, 1536, 4096, 16, 16, False, True, True): (1, 64, 3, 2), (1536, 1536, 4096, 16, 16, True, False, True): (1, 16, 3, 4), (1536, 1536, 4096, 32, 32, False, True, True): (1, 64, 2, 4), (1536, 1536, 4096, 32, 32, True, False, True): (1, 64, 2, 4), (1536, 1536, 4096, 64, 64, False, True, True): (1, 64, 3, 4), (1536, 1536, 4096, 64, 64, True, False, True): (1, 64, 3, 4), (1536, 1536, 4096, 128, 128, False, True, True): (1, 32, 1, 32), (1536, 1536, 4096, 128, 128, True, False, True): (4, 32, 2, 32), (1536, 1536, 8192, 16, 16, False, True, True): (1, 32, 3, 4), (1536, 1536, 8192, 16, 16, True, False, True): (5, 32, 3, 4), (1536, 1536, 8192, 32, 32, False, True, True): (1, 128, 2, 4), (1536, 1536, 8192, 32, 32, True, False, True): (1, 128, 2, 4), (1536, 1536, 8192, 64, 64, False, True, True): (1, 128, 3, 4), (1536, 1536, 8192, 64, 64, True, False, True): (1, 128, 3, 4), (1536, 1536, 8192, 128, 128, False, True, True): (1, 64, 1, 32), (1536, 1536, 8192, 128, 128, True, False, True): (4, 64, 2, 32), (1536, 1536, 16384, 16, 16, False, True, True): (1, 64, 3, 4), (1536, 1536, 16384, 16, 16, True, False, True): (1, 64, 3, 4), (1536, 1536, 16384, 32, 32, False, True, True): (1, 256, 2, 4), (1536, 1536, 16384, 32, 32, True, False, True): (1, 128, 3, 4), (1536, 1536, 16384, 64, 64, False, True, True): (1, 256, 3, 4), (1536, 1536, 16384, 64, 64, True, False, True): (3, 256, 3, 4), (1536, 1536, 16384, 128, 128, False, True, True): (1, 128, 1, 32), (1536, 1536, 16384, 128, 128, True, False, True): (4, 128, 2, 32), (1536, 1536, 32768, 16, 16, False, True, True): (1, 128, 3, 4), (1536, 1536, 32768, 16, 16, True, False, True): (1, 128, 3, 4), (1536, 1536, 32768, 32, 32, False, True, True): (1, 256, 3, 4), (1536, 1536, 32768, 32, 32, True, False, True): (1, 256, 3, 4), (1536, 1536, 32768, 64, 64, False, True, True): (1, 512, 3, 4), (1536, 1536, 32768, 64, 64, True, False, True): (1, 512, 3, 4), (1536, 1536, 32768, 128, 128, False, True, True): (1, 256, 1, 32), (1536, 1536, 32768, 128, 128, True, False, True): (4, 256, 2, 32), (1536, 1536, 65536, 16, 16, False, True, True): (5, 256, 3, 4), (1536, 1536, 65536, 16, 16, True, False, True): (1, 256, 3, 4), (1536, 1536, 65536, 32, 32, False, True, True): (1, 512, 3, 4), (1536, 1536, 65536, 32, 32, True, False, True): (1, 512, 3, 4), (1536, 1536, 65536, 64, 64, False, True, True): (1, 1024, 3, 4), (1536, 1536, 65536, 64, 64, True, False, True): (1, 1024, 3, 4), (1536, 1536, 65536, 128, 128, False, True, True): (1, 512, 1, 32), (1536, 1536, 65536, 128, 128, True, False, True): (4, 512, 2, 32), (1536, 1536, 131072, 16, 16, False, True, True): (3, 512, 3, 4), (1536, 1536, 131072, 16, 16, True, False, True): (1, 512, 3, 4), (1536, 1536, 131072, 32, 32, False, True, True): (1, 1024, 3, 4), (1536, 1536, 131072, 32, 32, True, False, True): (1, 1024, 3, 4), (1536, 1536, 131072, 64, 64, False, True, True): (1, 2048, 3, 4), (1536, 1536, 131072, 64, 64, True, False, True): (1, 2048, 3, 4), (1536, 1536, 131072, 128, 128, False, True, True): (1, 1024, 1, 32), (1536, 1536, 131072, 128, 128, True, False, True): (4, 1024, 2, 32), (2048, 2048, 256, 16, 16, False, True, True): (1, 4, 3, 4), (2048, 2048, 256, 16, 16, True, False, True): (1, 4, 3, 4), (2048, 2048, 256, 32, 32, False, True, True): (3, 8, 3, 4), (2048, 2048, 256, 32, 32, True, False, True): (3, 8, 3, 4), (2048, 2048, 256, 64, 64, False, True, True): (4, 4, 4, 8), (2048, 2048, 256, 64, 64, True, False, True): (8, 4, 4, 8), (2048, 2048, 256, 128, 128, False, True, True): (3, 2, 1, 32), (2048, 2048, 256, 128, 128, True, False, True): (3, 2, 1, 32), (2048, 2048, 512, 16, 16, False, True, True): (4, 8, 3, 2), (2048, 2048, 512, 16, 16, True, False, True): (4, 8, 3, 2), (2048, 2048, 512, 32, 32, False, True, True): (3, 8, 3, 4), (2048, 2048, 512, 32, 32, True, False, True): (1, 16, 2, 4), (2048, 2048, 512, 64, 64, False, True, True): (4, 8, 2, 4), (2048, 2048, 512, 64, 64, True, False, True): (4, 8, 2, 4), (2048, 2048, 512, 128, 128, False, True, True): (1, 4, 1, 32), (2048, 2048, 512, 128, 128, True, False, True): (4, 4, 1, 32), (2048, 2048, 1024, 16, 16, False, True, True): (4, 8, 3, 4), (2048, 2048, 1024, 16, 16, True, False, True): (4, 8, 3, 4), (2048, 2048, 1024, 32, 32, False, True, True): (4, 16, 3, 4), (2048, 2048, 1024, 32, 32, True, False, True): (1, 16, 3, 4), (2048, 2048, 1024, 64, 64, False, True, True): (2, 16, 2, 4), (2048, 2048, 1024, 64, 64, True, False, True): (2, 16, 2, 4), (2048, 2048, 1024, 128, 128, False, True, True): (8, 8, 1, 32), (2048, 2048, 1024, 128, 128, True, False, True): (4, 8, 1, 32), (2048, 2048, 2048, 16, 16, False, True, True): (4, 32, 3, 1), (2048, 2048, 2048, 16, 16, True, False, True): (3, 32, 3, 2), (2048, 2048, 2048, 32, 32, False, True, True): (1, 32, 3, 4), (2048, 2048, 2048, 32, 32, True, False, True): (1, 32, 3, 4), (2048, 2048, 2048, 64, 64, False, True, True): (2, 32, 2, 4), (2048, 2048, 2048, 64, 64, True, False, True): (2, 32, 2, 4), (2048, 2048, 2048, 128, 128, False, True, True): (6, 16, 1, 32), (2048, 2048, 2048, 128, 128, True, False, True): (4, 16, 1, 32), (2048, 2048, 4096, 16, 16, False, True, True): (4, 64, 3, 1), (2048, 2048, 4096, 16, 16, True, False, True): (1, 64, 3, 1), (2048, 2048, 4096, 32, 32, False, True, True): (1, 64, 3, 4), (2048, 2048, 4096, 32, 32, True, False, True): (4, 64, 3, 4), (2048, 2048, 4096, 64, 64, False, True, True): (2, 64, 2, 4), (2048, 2048, 4096, 64, 64, True, False, True): (2, 64, 2, 4), (2048, 2048, 4096, 128, 128, False, True, True): (4, 32, 1, 32), (2048, 2048, 4096, 128, 128, True, False, True): (4, 32, 1, 32), (2048, 2048, 8192, 16, 16, False, True, True): (4, 128, 3, 1), (2048, 2048, 8192, 16, 16, True, False, True): (1, 128, 3, 1), (2048, 2048, 8192, 32, 32, False, True, True): (4, 128, 3, 4), (2048, 2048, 8192, 32, 32, True, False, True): (4, 64, 3, 4), (2048, 2048, 8192, 64, 64, False, True, True): (1, 128, 2, 4), (2048, 2048, 8192, 64, 64, True, False, True): (2, 128, 2, 4), (2048, 2048, 8192, 128, 128, False, True, True): (1, 64, 1, 32), (2048, 2048, 8192, 128, 128, True, False, True): (4, 64, 1, 32), (2048, 2048, 16384, 16, 16, False, True, True): (4, 256, 3, 1), (2048, 2048, 16384, 16, 16, True, False, True): (1, 256, 3, 1), (2048, 2048, 16384, 32, 32, False, True, True): (1, 256, 3, 4), (2048, 2048, 16384, 32, 32, True, False, True): (1, 128, 3, 4), (2048, 2048, 16384, 64, 64, False, True, True): (1, 256, 2, 4), (2048, 2048, 16384, 64, 64, True, False, True): (1, 256, 2, 4), (2048, 2048, 16384, 128, 128, False, True, True): (1, 128, 1, 32), (2048, 2048, 16384, 128, 128, True, False, True): (4, 128, 1, 32), (2048, 2048, 32768, 16, 16, False, True, True): (8, 512, 3, 1), (2048, 2048, 32768, 16, 16, True, False, True): (1, 512, 3, 1), (2048, 2048, 32768, 32, 32, False, True, True): (1, 512, 3, 4), (2048, 2048, 32768, 32, 32, True, False, True): (1, 256, 3, 4), (2048, 2048, 32768, 64, 64, False, True, True): (1, 512, 2, 4), (2048, 2048, 32768, 64, 64, True, False, True): (1, 512, 2, 4), (2048, 2048, 32768, 128, 128, False, True, True): (1, 256, 1, 32), (2048, 2048, 32768, 128, 128, True, False, True): (4, 256, 1, 32), (2048, 2048, 65536, 16, 16, False, True, True): (4, 1024, 3, 1), (2048, 2048, 65536, 16, 16, True, False, True): (1, 1024, 3, 1), (2048, 2048, 65536, 32, 32, False, True, True): (1, 1024, 3, 4), (2048, 2048, 65536, 32, 32, True, False, True): (1, 512, 3, 4), (2048, 2048, 65536, 64, 64, False, True, True): (1, 1024, 2, 4), (2048, 2048, 65536, 64, 64, True, False, True): (1, 1024, 2, 4), (2048, 2048, 65536, 128, 128, False, True, True): (1, 512, 1, 32), (2048, 2048, 65536, 128, 128, True, False, True): (4, 512, 1, 32), (2048, 2048, 131072, 16, 16, False, True, True): (4, 2048, 3, 1), (2048, 2048, 131072, 16, 16, True, False, True): (1, 2048, 3, 1), (2048, 2048, 131072, 32, 32, False, True, True): (1, 2048, 3, 4), (2048, 2048, 131072, 32, 32, True, False, True): (1, 1024, 3, 4), (2048, 2048, 131072, 64, 64, False, True, True): (1, 2048, 2, 4), (2048, 2048, 131072, 64, 64, True, False, True): (1, 2048, 2, 4), (2048, 2048, 131072, 128, 128, False, True, True): (1, 1024, 1, 32), (2048, 2048, 131072, 128, 128, True, False, True): (4, 1024, 1, 32), (3072, 768, 256, 16, 16, False, True, True): (4, 4, 3, 2), (3072, 768, 256, 16, 16, True, False, True): (1, 2, 6, 4), (3072, 768, 256, 32, 32, False, True, True): (1, 4, 6, 4), (3072, 768, 256, 32, 32, True, False, True): (5, 4, 3, 4), (3072, 768, 256, 64, 64, False, True, True): (4, 4, 3, 8), (3072, 768, 256, 64, 64, True, False, True): (4, 4, 3, 8), (3072, 768, 256, 128, 128, False, True, True): (1, 2, 1, 32), (3072, 768, 256, 128, 128, True, False, True): (5, 2, 1, 32), (3072, 768, 512, 16, 16, False, True, True): (4, 4, 3, 4), (3072, 768, 512, 16, 16, True, False, True): (1, 4, 3, 4), (3072, 768, 512, 32, 32, False, True, True): (3, 8, 3, 4), (3072, 768, 512, 32, 32, True, False, True): (3, 8, 3, 4), (3072, 768, 512, 64, 64, False, True, True): (2, 8, 3, 8), (3072, 768, 512, 64, 64, True, False, True): (2, 8, 3, 8), (3072, 768, 512, 128, 128, False, True, True): (1, 4, 2, 32), (3072, 768, 512, 128, 128, True, False, True): (1, 4, 1, 32), (3072, 768, 1024, 16, 16, False, True, True): (1, 16, 3, 2), (3072, 768, 1024, 16, 16, True, False, True): (3, 16, 3, 2), (3072, 768, 1024, 32, 32, False, True, True): (1, 16, 3, 4), (3072, 768, 1024, 32, 32, True, False, True): (3, 16, 3, 4), (3072, 768, 1024, 64, 64, False, True, True): (4, 16, 3, 8), (3072, 768, 1024, 64, 64, True, False, True): (4, 16, 3, 4), (3072, 768, 1024, 128, 128, False, True, True): (5, 8, 1, 32), (3072, 768, 1024, 128, 128, True, False, True): (5, 8, 1, 32), (3072, 768, 2048, 16, 16, False, True, True): (4, 32, 3, 2), (3072, 768, 2048, 16, 16, True, False, True): (1, 32, 3, 2), (3072, 768, 2048, 32, 32, False, True, True): (1, 32, 3, 4), (3072, 768, 2048, 32, 32, True, False, True): (1, 32, 2, 4), (3072, 768, 2048, 64, 64, False, True, True): (2, 32, 3, 4), (3072, 768, 2048, 64, 64, True, False, True): (4, 32, 3, 4), (3072, 768, 2048, 128, 128, False, True, True): (1, 16, 1, 32), (3072, 768, 2048, 128, 128, True, False, True): (1, 16, 1, 32), (3072, 768, 4096, 16, 16, False, True, True): (3, 64, 3, 2), (3072, 768, 4096, 16, 16, True, False, True): (1, 64, 3, 2), (3072, 768, 4096, 32, 32, False, True, True): (1, 64, 3, 4), (3072, 768, 4096, 32, 32, True, False, True): (1, 32, 3, 4), (3072, 768, 4096, 64, 64, False, True, True): (2, 64, 3, 4), (3072, 768, 4096, 64, 64, True, False, True): (2, 64, 3, 4), (3072, 768, 4096, 128, 128, False, True, True): (1, 32, 1, 32), (3072, 768, 4096, 128, 128, True, False, True): (1, 32, 1, 32), (3072, 768, 8192, 16, 16, False, True, True): (4, 128, 3, 1), (3072, 768, 8192, 16, 16, True, False, True): (1, 32, 3, 4), (3072, 768, 8192, 32, 32, False, True, True): (1, 64, 3, 4), (3072, 768, 8192, 32, 32, True, False, True): (1, 64, 3, 4), (3072, 768, 8192, 64, 64, False, True, True): (2, 128, 3, 4), (3072, 768, 8192, 64, 64, True, False, True): (2, 128, 3, 4), (3072, 768, 8192, 128, 128, False, True, True): (1, 64, 1, 32), (3072, 768, 8192, 128, 128, True, False, True): (1, 64, 1, 32), (3072, 768, 16384, 16, 16, False, True, True): (4, 256, 3, 1), (3072, 768, 16384, 16, 16, True, False, True): (1, 64, 3, 4), (3072, 768, 16384, 32, 32, False, True, True): (1, 128, 3, 4), (3072, 768, 16384, 32, 32, True, False, True): (1, 128, 3, 4), (3072, 768, 16384, 64, 64, False, True, True): (2, 256, 3, 4), (3072, 768, 16384, 64, 64, True, False, True): (2, 256, 3, 4), (3072, 768, 16384, 128, 128, False, True, True): (1, 128, 1, 32), (3072, 768, 16384, 128, 128, True, False, True): (1, 128, 1, 32), (3072, 768, 32768, 16, 16, False, True, True): (4, 512, 3, 1), (3072, 768, 32768, 16, 16, True, False, True): (1, 128, 3, 4), (3072, 768, 32768, 32, 32, False, True, True): (1, 256, 3, 4), (3072, 768, 32768, 32, 32, True, False, True): (1, 256, 3, 4), (3072, 768, 32768, 64, 64, False, True, True): (2, 512, 3, 4), (3072, 768, 32768, 64, 64, True, False, True): (2, 512, 3, 4), (3072, 768, 32768, 128, 128, False, True, True): (1, 256, 1, 32), (3072, 768, 32768, 128, 128, True, False, True): (1, 256, 1, 32), (3072, 768, 50432, 16, 16, False, True, True): (4, 788, 3, 1), (3072, 768, 50432, 16, 16, True, False, True): (1, 197, 3, 4), (3072, 768, 50432, 32, 32, False, True, True): (1, 394, 3, 4), (3072, 768, 50432, 32, 32, True, False, True): (1, 394, 3, 4), (3072, 768, 50432, 64, 64, False, True, True): (1, 788, 3, 4), (3072, 768, 50432, 64, 64, True, False, True): (2, 788, 3, 4), (3072, 768, 50432, 128, 128, False, True, True): (1, 394, 1, 32), (3072, 768, 50432, 128, 128, True, False, True): (1, 394, 1, 32), (3072, 768, 65536, 16, 16, False, True, True): (4, 1024, 3, 1), (3072, 768, 65536, 16, 16, True, False, True): (1, 256, 3, 4), (3072, 768, 65536, 32, 32, False, True, True): (1, 512, 3, 4), (3072, 768, 65536, 32, 32, True, False, True): (1, 512, 3, 4), (3072, 768, 65536, 64, 64, False, True, True): (2, 1024, 3, 4), (3072, 768, 65536, 64, 64, True, False, True): (2, 1024, 3, 4), (3072, 768, 65536, 128, 128, False, True, True): (1, 512, 1, 32), (3072, 768, 65536, 128, 128, True, False, True): (1, 512, 1, 32), (3072, 768, 131072, 16, 16, False, True, True): (4, 2048, 3, 1), (3072, 768, 131072, 16, 16, True, False, True): (1, 512, 3, 4), (3072, 768, 131072, 32, 32, False, True, True): (1, 1024, 3, 4), (3072, 768, 131072, 32, 32, True, False, True): (1, 1024, 3, 4), (3072, 768, 131072, 64, 64, False, True, True): (2, 2048, 3, 4), (3072, 768, 131072, 64, 64, True, False, True): (2, 2048, 3, 4), (3072, 768, 131072, 128, 128, False, True, True): (1, 1024, 1, 32), (3072, 768, 131072, 128, 128, True, False, True): (1, 1024, 1, 32), (3072, 3072, 256, 16, 16, False, True, True): (1, 4, 5, 2), (3072, 3072, 256, 16, 16, True, False, True): (1, 4, 3, 2), (3072, 3072, 256, 32, 32, False, True, True): (1, 4, 4, 4), (3072, 3072, 256, 32, 32, True, False, True): (1, 4, 3, 4), (3072, 3072, 256, 64, 64, False, True, True): (2, 4, 3, 8), (3072, 3072, 256, 64, 64, True, False, True): (2, 4, 3, 8), (3072, 3072, 256, 128, 128, False, True, True): (6, 2, 1, 32), (3072, 3072, 256, 128, 128, True, False, True): (8, 2, 2, 32), (3072, 3072, 512, 16, 16, False, True, True): (2, 4, 3, 4), (3072, 3072, 512, 16, 16, True, False, True): (2, 4, 3, 4), (3072, 3072, 512, 32, 32, False, True, True): (2, 8, 3, 4), (3072, 3072, 512, 32, 32, True, False, True): (2, 8, 3, 4), (3072, 3072, 512, 64, 64, False, True, True): (2, 8, 3, 8), (3072, 3072, 512, 64, 64, True, False, True): (2, 8, 3, 8), (3072, 3072, 512, 128, 128, False, True, True): (5, 4, 1, 32), (3072, 3072, 512, 128, 128, True, False, True): (5, 4, 2, 32), (3072, 3072, 1024, 16, 16, False, True, True): (1, 16, 3, 2), (3072, 3072, 1024, 16, 16, True, False, True): (1, 16, 3, 2), (3072, 3072, 1024, 32, 32, False, True, True): (2, 16, 3, 4), (3072, 3072, 1024, 32, 32, True, False, True): (1, 16, 3, 4), (3072, 3072, 1024, 64, 64, False, True, True): (1, 16, 3, 4), (3072, 3072, 1024, 64, 64, True, False, True): (1, 16, 3, 4), (3072, 3072, 1024, 128, 128, False, True, True): (1, 8, 1, 32), (3072, 3072, 1024, 128, 128, True, False, True): (3, 8, 2, 32), (3072, 3072, 2048, 16, 16, False, True, True): (1, 32, 3, 2), (3072, 3072, 2048, 16, 16, True, False, True): (1, 16, 2, 4), (3072, 3072, 2048, 32, 32, False, True, True): (1, 32, 2, 4), (3072, 3072, 2048, 32, 32, True, False, True): (1, 32, 3, 4), (3072, 3072, 2048, 64, 64, False, True, True): (1, 32, 3, 4), (3072, 3072, 2048, 64, 64, True, False, True): (1, 32, 3, 4), (3072, 3072, 2048, 128, 128, False, True, True): (1, 16, 1, 32), (3072, 3072, 2048, 128, 128, True, False, True): (4, 16, 2, 32), (3072, 3072, 4096, 16, 16, False, True, True): (2, 16, 3, 4), (3072, 3072, 4096, 16, 16, True, False, True): (2, 16, 3, 4), (3072, 3072, 4096, 32, 32, False, True, True): (1, 64, 2, 4), (3072, 3072, 4096, 32, 32, True, False, True): (1, 32, 3, 4), (3072, 3072, 4096, 64, 64, False, True, True): (1, 64, 3, 4), (3072, 3072, 4096, 64, 64, True, False, True): (1, 64, 3, 4), (3072, 3072, 4096, 128, 128, False, True, True): (1, 32, 1, 32), (3072, 3072, 4096, 128, 128, True, False, True): (2, 32, 2, 32), (3072, 3072, 8192, 16, 16, False, True, True): (2, 32, 3, 4), (3072, 3072, 8192, 16, 16, True, False, True): (2, 32, 3, 4), (3072, 3072, 8192, 32, 32, False, True, True): (1, 64, 3, 4), (3072, 3072, 8192, 32, 32, True, False, True): (1, 64, 3, 4), (3072, 3072, 8192, 64, 64, False, True, True): (1, 128, 3, 4), (3072, 3072, 8192, 64, 64, True, False, True): (1, 128, 3, 4), (3072, 3072, 8192, 128, 128, False, True, True): (1, 64, 1, 32), (3072, 3072, 8192, 128, 128, True, False, True): (4, 64, 2, 32), (3072, 3072, 16384, 16, 16, False, True, True): (2, 64, 3, 4), (3072, 3072, 16384, 16, 16, True, False, True): (1, 64, 3, 4), (3072, 3072, 16384, 32, 32, False, True, True): (1, 128, 3, 4), (3072, 3072, 16384, 32, 32, True, False, True): (1, 128, 3, 4), (3072, 3072, 16384, 64, 64, False, True, True): (1, 256, 3, 4), (3072, 3072, 16384, 64, 64, True, False, True): (1, 256, 3, 4), (3072, 3072, 16384, 128, 128, False, True, True): (1, 128, 1, 32), (3072, 3072, 16384, 128, 128, True, False, True): (4, 128, 2, 32), (3072, 3072, 32768, 16, 16, False, True, True): (3, 128, 3, 4), (3072, 3072, 32768, 16, 16, True, False, True): (1, 128, 3, 4), (3072, 3072, 32768, 32, 32, False, True, True): (1, 256, 3, 4), (3072, 3072, 32768, 32, 32, True, False, True): (1, 256, 3, 4), (3072, 3072, 32768, 64, 64, False, True, True): (1, 512, 3, 4), (3072, 3072, 32768, 64, 64, True, False, True): (1, 512, 3, 4), (3072, 3072, 32768, 128, 128, False, True, True): (1, 256, 1, 32), (3072, 3072, 32768, 128, 128, True, False, True): (4, 256, 2, 32), (3072, 3072, 65536, 16, 16, False, True, True): (5, 256, 3, 4), (3072, 3072, 65536, 16, 16, True, False, True): (2, 256, 3, 4), (3072, 3072, 65536, 32, 32, False, True, True): (1, 512, 3, 4), (3072, 3072, 65536, 32, 32, True, False, True): (1, 512, 3, 4), (3072, 3072, 65536, 64, 64, False, True, True): (1, 1024, 3, 4), (3072, 3072, 65536, 64, 64, True, False, True): (1, 1024, 3, 4), (3072, 3072, 65536, 128, 128, False, True, True): (1, 512, 1, 32), (3072, 3072, 65536, 128, 128, True, False, True): (4, 512, 2, 32), (3072, 3072, 131072, 16, 16, False, True, True): (5, 512, 3, 4), (3072, 3072, 131072, 16, 16, True, False, True): (1, 512, 3, 4), (3072, 3072, 131072, 32, 32, False, True, True): (1, 1024, 3, 4), (3072, 3072, 131072, 32, 32, True, False, True): (1, 1024, 3, 4), (3072, 3072, 131072, 64, 64, False, True, True): (1, 2048, 3, 4), (3072, 3072, 131072, 64, 64, True, False, True): (1, 2048, 3, 4), (3072, 3072, 131072, 128, 128, False, True, True): (1, 1024, 1, 32), (3072, 3072, 131072, 128, 128, True, False, True): (4, 1024, 2, 32), (4096, 4096, 256, 16, 16, False, True, True): (1, 4, 3, 2), (4096, 4096, 256, 16, 16, True, False, True): (1, 2, 3, 4), (4096, 4096, 256, 32, 32, False, True, True): (4, 4, 4, 4), (4096, 4096, 256, 32, 32, True, False, True): (4, 4, 4, 4), (4096, 4096, 256, 64, 64, False, True, True): (1, 4, 3, 8), (4096, 4096, 256, 64, 64, True, False, True): (4, 4, 2, 4), (4096, 4096, 256, 128, 128, False, True, True): (1, 2, 1, 32), (4096, 4096, 256, 128, 128, True, False, True): (3, 2, 1, 32), (4096, 4096, 512, 16, 16, False, True, True): (1, 4, 3, 4), (4096, 4096, 512, 16, 16, True, False, True): (5, 8, 3, 2), (4096, 4096, 512, 32, 32, False, True, True): (4, 8, 3, 4), (4096, 4096, 512, 32, 32, True, False, True): (4, 8, 3, 4), (4096, 4096, 512, 64, 64, False, True, True): (1, 8, 2, 4), (4096, 4096, 512, 64, 64, True, False, True): (1, 8, 2, 4), (4096, 4096, 512, 128, 128, False, True, True): (4, 4, 1, 32), (4096, 4096, 512, 128, 128, True, False, True): (4, 4, 1, 32), (4096, 4096, 1024, 16, 16, False, True, True): (1, 8, 3, 4), (4096, 4096, 1024, 16, 16, True, False, True): (1, 8, 3, 4), (4096, 4096, 1024, 32, 32, False, True, True): (1, 16, 3, 4), (4096, 4096, 1024, 32, 32, True, False, True): (1, 16, 3, 4), (4096, 4096, 1024, 64, 64, False, True, True): (4, 16, 2, 4), (4096, 4096, 1024, 64, 64, True, False, True): (4, 16, 2, 4), (4096, 4096, 1024, 128, 128, False, True, True): (4, 8, 1, 32), (4096, 4096, 1024, 128, 128, True, False, True): (4, 8, 1, 32), (4096, 4096, 2048, 16, 16, False, True, True): (1, 32, 3, 1), (4096, 4096, 2048, 16, 16, True, False, True): (6, 8, 3, 4), (4096, 4096, 2048, 32, 32, False, True, True): (1, 32, 3, 4), (4096, 4096, 2048, 32, 32, True, False, True): (1, 32, 3, 4), (4096, 4096, 2048, 64, 64, False, True, True): (4, 32, 2, 4), (4096, 4096, 2048, 64, 64, True, False, True): (4, 32, 2, 4), (4096, 4096, 2048, 128, 128, False, True, True): (4, 16, 1, 32), (4096, 4096, 2048, 128, 128, True, False, True): (4, 16, 1, 32), (4096, 4096, 4096, 16, 16, False, True, True): (1, 16, 3, 4), (4096, 4096, 4096, 16, 16, True, False, True): (1, 64, 3, 1), (4096, 4096, 4096, 32, 32, False, True, True): (1, 64, 3, 4), (4096, 4096, 4096, 32, 32, True, False, True): (1, 32, 3, 4), (4096, 4096, 4096, 64, 64, False, True, True): (4, 64, 2, 4), (4096, 4096, 4096, 64, 64, True, False, True): (4, 64, 2, 4), (4096, 4096, 4096, 128, 128, False, True, True): (4, 32, 1, 32), (4096, 4096, 4096, 128, 128, True, False, True): (4, 32, 1, 32), (4096, 4096, 8192, 16, 16, False, True, True): (4, 128, 3, 1), (4096, 4096, 8192, 16, 16, True, False, True): (1, 128, 3, 1), (4096, 4096, 8192, 32, 32, False, True, True): (1, 128, 3, 4), (4096, 4096, 8192, 32, 32, True, False, True): (1, 64, 3, 4), (4096, 4096, 8192, 64, 64, False, True, True): (4, 128, 2, 4), (4096, 4096, 8192, 64, 64, True, False, True): (4, 128, 2, 4), (4096, 4096, 8192, 128, 128, False, True, True): (4, 64, 1, 32), (4096, 4096, 8192, 128, 128, True, False, True): (4, 64, 1, 32), (4096, 4096, 16384, 16, 16, False, True, True): (1, 64, 3, 4), (4096, 4096, 16384, 16, 16, True, False, True): (1, 256, 3, 1), (4096, 4096, 16384, 32, 32, False, True, True): (1, 256, 3, 4), (4096, 4096, 16384, 32, 32, True, False, True): (1, 128, 3, 4), (4096, 4096, 16384, 64, 64, False, True, True): (4, 256, 2, 4), (4096, 4096, 16384, 64, 64, True, False, True): (4, 256, 2, 4), (4096, 4096, 16384, 128, 128, False, True, True): (4, 128, 1, 32), (4096, 4096, 16384, 128, 128, True, False, True): (4, 128, 1, 32), (4096, 4096, 32768, 16, 16, False, True, True): (1, 128, 3, 4), (4096, 4096, 32768, 16, 16, True, False, True): (1, 512, 3, 1), (4096, 4096, 32768, 32, 32, False, True, True): (1, 512, 3, 4), (4096, 4096, 32768, 32, 32, True, False, True): (1, 256, 3, 4), (4096, 4096, 32768, 64, 64, False, True, True): (4, 512, 2, 4), (4096, 4096, 32768, 64, 64, True, False, True): (4, 512, 2, 4), (4096, 4096, 32768, 128, 128, False, True, True): (4, 256, 1, 32), (4096, 4096, 32768, 128, 128, True, False, True): (4, 256, 1, 32), (4096, 4096, 65536, 16, 16, False, True, True): (1, 256, 3, 4), (4096, 4096, 65536, 16, 16, True, False, True): (1, 1024, 3, 1), (4096, 4096, 65536, 32, 32, False, True, True): (1, 1024, 3, 4), (4096, 4096, 65536, 32, 32, True, False, True): (1, 512, 3, 4), (4096, 4096, 65536, 64, 64, False, True, True): (4, 1024, 2, 4), (4096, 4096, 65536, 64, 64, True, False, True): (2, 1024, 2, 4), (4096, 4096, 65536, 128, 128, False, True, True): (4, 512, 1, 32), (4096, 4096, 65536, 128, 128, True, False, True): (4, 512, 1, 32), (4096, 4096, 131072, 16, 16, False, True, True): (2, 2048, 3, 1), (4096, 4096, 131072, 16, 16, True, False, True): (1, 2048, 3, 1), (4096, 4096, 131072, 32, 32, False, True, True): (2, 2048, 3, 4), (4096, 4096, 131072, 32, 32, True, False, True): (1, 1024, 3, 4), (4096, 4096, 131072, 64, 64, False, True, True): (2, 2048, 2, 4), (4096, 4096, 131072, 64, 64, True, False, True): (2, 2048, 2, 4), (4096, 4096, 131072, 128, 128, False, True, True): (4, 1024, 1, 32), (4096, 4096, 131072, 128, 128, True, False, True): (4, 1024, 1, 32), (5120, 1280, 65792, 16, 16, False, True, True): (2, 1028, 3, 1), (5120, 1280, 65792, 16, 16, True, False, True): (1, 257, 3, 4), (5120, 1280, 65792, 32, 32, False, True, True): (1, 514, 3, 4), (5120, 1280, 65792, 32, 32, True, False, True): (1, 514, 3, 4), (5120, 1280, 65792, 64, 64, False, True, True): (1, 1028, 3, 4), (5120, 1280, 65792, 64, 64, True, False, True): (5, 1028, 3, 4), (5120, 1280, 65792, 128, 128, False, True, True): (1, 514, 1, 32), (5120, 1280, 65792, 128, 128, True, False, True): (4, 514, 2, 32), (6144, 6144, 256, 16, 16, False, True, True): (2, 2, 3, 4), (6144, 6144, 256, 16, 16, True, False, True): (2, 2, 3, 4), (6144, 6144, 256, 32, 32, False, True, True): (2, 4, 3, 4), (6144, 6144, 256, 32, 32, True, False, True): (2, 4, 3, 4), (6144, 6144, 256, 64, 64, False, True, True): (1, 4, 3, 4), (6144, 6144, 256, 64, 64, True, False, True): (1, 4, 3, 4), (6144, 6144, 256, 128, 128, False, True, True): (1, 2, 1, 32), (6144, 6144, 256, 128, 128, True, False, True): (5, 2, 2, 32), (6144, 6144, 512, 16, 16, False, True, True): (4, 8, 3, 2), (6144, 6144, 512, 16, 16, True, False, True): (4, 8, 3, 2), (6144, 6144, 512, 32, 32, False, True, True): (2, 8, 3, 4), (6144, 6144, 512, 32, 32, True, False, True): (2, 8, 3, 4), (6144, 6144, 512, 64, 64, False, True, True): (1, 8, 3, 4), (6144, 6144, 512, 64, 64, True, False, True): (1, 8, 3, 4), (6144, 6144, 512, 128, 128, False, True, True): (1, 4, 1, 32), (6144, 6144, 512, 128, 128, True, False, True): (4, 4, 2, 32), (6144, 6144, 1024, 16, 16, False, True, True): (4, 16, 3, 2), (6144, 6144, 1024, 16, 16, True, False, True): (4, 4, 3, 4), (6144, 6144, 1024, 32, 32, False, True, True): (1, 16, 3, 4), (6144, 6144, 1024, 32, 32, True, False, True): (1, 16, 3, 4), (6144, 6144, 1024, 64, 64, False, True, True): (1, 16, 3, 4), (6144, 6144, 1024, 64, 64, True, False, True): (1, 16, 3, 4), (6144, 6144, 1024, 128, 128, False, True, True): (1, 8, 1, 32), (6144, 6144, 1024, 128, 128, True, False, True): (4, 8, 2, 32), (6144, 6144, 2048, 16, 16, False, True, True): (1, 8, 3, 4), (6144, 6144, 2048, 16, 16, True, False, True): (4, 8, 3, 4), (6144, 6144, 2048, 32, 32, False, True, True): (1, 16, 3, 4), (6144, 6144, 2048, 32, 32, True, False, True): (1, 16, 3, 4), (6144, 6144, 2048, 64, 64, False, True, True): (1, 32, 3, 4), (6144, 6144, 2048, 64, 64, True, False, True): (3, 32, 3, 4), (6144, 6144, 2048, 128, 128, False, True, True): (1, 16, 1, 32), (6144, 6144, 2048, 128, 128, True, False, True): (1, 16, 2, 32), (6144, 6144, 4096, 16, 16, False, True, True): (3, 16, 3, 4), (6144, 6144, 4096, 16, 16, True, False, True): (4, 16, 3, 4), (6144, 6144, 4096, 32, 32, False, True, True): (1, 32, 3, 4), (6144, 6144, 4096, 32, 32, True, False, True): (1, 32, 3, 4), (6144, 6144, 4096, 64, 64, False, True, True): (1, 64, 3, 4), (6144, 6144, 4096, 64, 64, True, False, True): (1, 64, 3, 4), (6144, 6144, 4096, 128, 128, False, True, True): (1, 32, 1, 32), (6144, 6144, 4096, 128, 128, True, False, True): (4, 32, 2, 32), (6144, 6144, 8192, 16, 16, False, True, True): (1, 32, 3, 4), (6144, 6144, 8192, 16, 16, True, False, True): (4, 32, 3, 4), (6144, 6144, 8192, 32, 32, False, True, True): (1, 64, 3, 4), (6144, 6144, 8192, 32, 32, True, False, True): (1, 64, 3, 4), (6144, 6144, 8192, 64, 64, False, True, True): (1, 128, 3, 4), (6144, 6144, 8192, 64, 64, True, False, True): (1, 128, 3, 4), (6144, 6144, 8192, 128, 128, False, True, True): (1, 64, 1, 32), (6144, 6144, 8192, 128, 128, True, False, True): (4, 64, 2, 32), (6144, 6144, 16384, 16, 16, False, True, True): (1, 64, 3, 4), (6144, 6144, 16384, 16, 16, True, False, True): (4, 64, 3, 4), (6144, 6144, 16384, 32, 32, False, True, True): (1, 128, 3, 4), (6144, 6144, 16384, 32, 32, True, False, True): (1, 128, 3, 4), (6144, 6144, 16384, 64, 64, False, True, True): (1, 256, 3, 4), (6144, 6144, 16384, 64, 64, True, False, True): (1, 256, 3, 4), (6144, 6144, 16384, 128, 128, False, True, True): (1, 128, 1, 32), (6144, 6144, 16384, 128, 128, True, False, True): (4, 128, 2, 32), (6144, 6144, 32768, 16, 16, False, True, True): (1, 128, 3, 4), (6144, 6144, 32768, 16, 16, True, False, True): (4, 128, 3, 4), (6144, 6144, 32768, 32, 32, False, True, True): (1, 256, 3, 4), (6144, 6144, 32768, 32, 32, True, False, True): (1, 256, 3, 4), (6144, 6144, 32768, 64, 64, False, True, True): (1, 512, 3, 4), (6144, 6144, 32768, 64, 64, True, False, True): (1, 512, 3, 4), (6144, 6144, 32768, 128, 128, False, True, True): (1, 256, 1, 32), (6144, 6144, 32768, 128, 128, True, False, True): (4, 256, 2, 32), (6144, 6144, 65536, 16, 16, False, True, True): (1, 256, 3, 4), (6144, 6144, 65536, 16, 16, True, False, True): (2, 256, 3, 4), (6144, 6144, 65536, 32, 32, False, True, True): (1, 512, 3, 4), (6144, 6144, 65536, 32, 32, True, False, True): (1, 512, 3, 4), (6144, 6144, 65536, 64, 64, False, True, True): (1, 1024, 3, 4), (6144, 6144, 65536, 64, 64, True, False, True): (1, 1024, 3, 4), (6144, 6144, 65536, 128, 128, False, True, True): (1, 512, 1, 32), (6144, 6144, 65536, 128, 128, True, False, True): (4, 512, 2, 32), (6144, 6144, 131072, 16, 16, False, True, True): (1, 512, 3, 4), (6144, 6144, 131072, 16, 16, True, False, True): (2, 512, 3, 4), (6144, 6144, 131072, 32, 32, False, True, True): (1, 1024, 3, 4), (6144, 6144, 131072, 32, 32, True, False, True): (1, 1024, 3, 4), (6144, 6144, 131072, 64, 64, False, True, True): (1, 2048, 3, 4), (6144, 6144, 131072, 64, 64, True, False, True): (1, 2048, 3, 4), (6144, 6144, 131072, 128, 128, False, True, True): (1, 1024, 1, 32), (6144, 6144, 131072, 128, 128, True, False, True): (4, 1024, 2, 32), (8192, 8192, 256, 16, 16, False, True, True): (2, 2, 4, 4), (8192, 8192, 256, 16, 16, True, False, True): (1, 1, 3, 4), (8192, 8192, 256, 32, 32, False, True, True): (2, 4, 3, 4), (8192, 8192, 256, 32, 32, True, False, True): (2, 4, 3, 4), (8192, 8192, 256, 64, 64, False, True, True): (4, 4, 2, 4), (8192, 8192, 256, 64, 64, True, False, True): (4, 4, 2, 4), (8192, 8192, 256, 128, 128, False, True, True): (1, 2, 1, 32), (8192, 8192, 256, 128, 128, True, False, True): (4, 2, 1, 32), (8192, 8192, 512, 16, 16, False, True, True): (1, 4, 3, 4), (8192, 8192, 512, 16, 16, True, False, True): (3, 4, 3, 4), (8192, 8192, 512, 32, 32, False, True, True): (1, 8, 3, 4), (8192, 8192, 512, 32, 32, True, False, True): (6, 8, 3, 4), (8192, 8192, 512, 64, 64, False, True, True): (4, 8, 2, 4), (8192, 8192, 512, 64, 64, True, False, True): (4, 8, 2, 4), (8192, 8192, 512, 128, 128, False, True, True): (4, 4, 1, 32), (8192, 8192, 512, 128, 128, True, False, True): (4, 4, 1, 32), (8192, 8192, 1024, 16, 16, False, True, True): (1, 4, 3, 4), (8192, 8192, 1024, 16, 16, True, False, True): (1, 32, 3, 1), (8192, 8192, 1024, 32, 32, False, True, True): (1, 16, 3, 4), (8192, 8192, 1024, 32, 32, True, False, True): (1, 16, 3, 4), (8192, 8192, 1024, 64, 64, False, True, True): (4, 16, 2, 4), (8192, 8192, 1024, 64, 64, True, False, True): (4, 16, 2, 4), (8192, 8192, 1024, 128, 128, False, True, True): (4, 8, 1, 32), (8192, 8192, 1024, 128, 128, True, False, True): (4, 8, 1, 32), (8192, 8192, 2048, 16, 16, False, True, True): (4, 8, 3, 4), (8192, 8192, 2048, 16, 16, True, False, True): (1, 32, 3, 1), (8192, 8192, 2048, 32, 32, False, True, True): (1, 32, 3, 4), (8192, 8192, 2048, 32, 32, True, False, True): (1, 16, 4, 4), (8192, 8192, 2048, 64, 64, False, True, True): (4, 32, 2, 4), (8192, 8192, 2048, 64, 64, True, False, True): (4, 32, 2, 4), (8192, 8192, 2048, 128, 128, False, True, True): (4, 16, 1, 32), (8192, 8192, 2048, 128, 128, True, False, True): (4, 16, 1, 32), (8192, 8192, 4096, 16, 16, False, True, True): (3, 16, 3, 4), (8192, 8192, 4096, 16, 16, True, False, True): (2, 64, 3, 1), (8192, 8192, 4096, 32, 32, False, True, True): (1, 64, 3, 4), (8192, 8192, 4096, 32, 32, True, False, True): (1, 32, 3, 4), (8192, 8192, 4096, 64, 64, False, True, True): (4, 64, 2, 4), (8192, 8192, 4096, 64, 64, True, False, True): (2, 64, 2, 4), (8192, 8192, 4096, 128, 128, False, True, True): (4, 32, 1, 32), (8192, 8192, 4096, 128, 128, True, False, True): (4, 32, 1, 32), (8192, 8192, 8192, 16, 16, False, True, True): (2, 128, 3, 1), (8192, 8192, 8192, 16, 16, True, False, True): (2, 128, 3, 1), (8192, 8192, 8192, 32, 32, False, True, True): (1, 128, 3, 4), (8192, 8192, 8192, 32, 32, True, False, True): (1, 64, 3, 4), (8192, 8192, 8192, 64, 64, False, True, True): (4, 128, 2, 4), (8192, 8192, 8192, 64, 64, True, False, True): (2, 128, 2, 4), (8192, 8192, 8192, 128, 128, False, True, True): (4, 64, 1, 32), (8192, 8192, 8192, 128, 128, True, False, True): (4, 64, 1, 32), (8192, 8192, 16384, 16, 16, False, True, True): (1, 64, 3, 4), (8192, 8192, 16384, 16, 16, True, False, True): (1, 256, 3, 1), (8192, 8192, 16384, 32, 32, False, True, True): (1, 256, 3, 4), (8192, 8192, 16384, 32, 32, True, False, True): (1, 128, 3, 4), (8192, 8192, 16384, 64, 64, False, True, True): (2, 256, 2, 4), (8192, 8192, 16384, 64, 64, True, False, True): (2, 256, 2, 4), (8192, 8192, 16384, 128, 128, False, True, True): (4, 128, 1, 32), (8192, 8192, 16384, 128, 128, True, False, True): (4, 128, 1, 32), (8192, 8192, 32768, 16, 16, False, True, True): (1, 512, 3, 1), (8192, 8192, 32768, 16, 16, True, False, True): (1, 512, 3, 1), (8192, 8192, 32768, 32, 32, False, True, True): (1, 512, 3, 4), (8192, 8192, 32768, 32, 32, True, False, True): (1, 256, 3, 4), (8192, 8192, 32768, 64, 64, False, True, True): (2, 512, 2, 4), (8192, 8192, 32768, 64, 64, True, False, True): (2, 512, 2, 4), (8192, 8192, 32768, 128, 128, False, True, True): (4, 256, 1, 32), (8192, 8192, 32768, 128, 128, True, False, True): (4, 256, 1, 32), (8192, 8192, 65536, 16, 16, False, True, True): (1, 256, 3, 4), (8192, 8192, 65536, 16, 16, True, False, True): (1, 1024, 3, 1), (8192, 8192, 65536, 32, 32, False, True, True): (1, 1024, 3, 4), (8192, 8192, 65536, 32, 32, True, False, True): (1, 512, 3, 4), (8192, 8192, 65536, 64, 64, False, True, True): (4, 1024, 2, 4), (8192, 8192, 65536, 64, 64, True, False, True): (2, 1024, 2, 4), (8192, 8192, 65536, 128, 128, False, True, True): (4, 512, 1, 32), (8192, 8192, 65536, 128, 128, True, False, True): (4, 512, 1, 32), (8192, 8192, 131072, 16, 16, False, True, True): (1, 2048, 3, 1), (8192, 8192, 131072, 16, 16, True, False, True): (2, 2048, 3, 1), (8192, 8192, 131072, 32, 32, False, True, True): (4, 2048, 3, 4), (8192, 8192, 131072, 32, 32, True, False, True): (1, 1024, 3, 4), (8192, 8192, 131072, 64, 64, False, True, True): (2, 2048, 2, 4), (8192, 8192, 131072, 64, 64, True, False, True): (2, 2048, 2, 4), (8192, 8192, 131072, 128, 128, False, True, True): (4, 1024, 1, 32), (8192, 8192, 131072, 128, 128, True, False, True): (4, 1024, 1, 32), (16384, 16384, 256, 16, 16, False, True, True): (1, 2, 3, 4), (16384, 16384, 256, 16, 16, True, False, True): (1, 2, 3, 4), (16384, 16384, 256, 32, 32, False, True, True): (1, 4, 3, 4), (16384, 16384, 256, 32, 32, True, False, True): (1, 4, 3, 4), (16384, 16384, 256, 64, 64, False, True, True): (2, 4, 2, 4), (16384, 16384, 256, 64, 64, True, False, True): (2, 4, 2, 4), (16384, 16384, 256, 128, 128, False, True, True): (2, 2, 1, 32), (16384, 16384, 256, 128, 128, True, False, True): (2, 2, 1, 32), (16384, 16384, 512, 16, 16, False, True, True): (1, 2, 3, 4), (16384, 16384, 512, 16, 16, True, False, True): (5, 2, 3, 4), (16384, 16384, 512, 32, 32, False, True, True): (1, 8, 3, 4), (16384, 16384, 512, 32, 32, True, False, True): (1, 4, 3, 4), (16384, 16384, 512, 64, 64, False, True, True): (4, 8, 2, 4), (16384, 16384, 512, 64, 64, True, False, True): (4, 8, 2, 4), (16384, 16384, 512, 128, 128, False, True, True): (4, 4, 1, 32), (16384, 16384, 512, 128, 128, True, False, True): (4, 4, 1, 32), (16384, 16384, 1024, 16, 16, False, True, True): (1, 4, 3, 4), (16384, 16384, 1024, 16, 16, True, False, True): (2, 16, 3, 1), (16384, 16384, 1024, 32, 32, False, True, True): (1, 16, 3, 4), (16384, 16384, 1024, 32, 32, True, False, True): (1, 8, 3, 4), (16384, 16384, 1024, 64, 64, False, True, True): (4, 16, 2, 4), (16384, 16384, 1024, 64, 64, True, False, True): (4, 16, 2, 4), (16384, 16384, 1024, 128, 128, False, True, True): (4, 8, 1, 32), (16384, 16384, 1024, 128, 128, True, False, True): (4, 8, 1, 32), (16384, 16384, 2048, 16, 16, False, True, True): (1, 8, 3, 4), (16384, 16384, 2048, 16, 16, True, False, True): (2, 32, 3, 1), (16384, 16384, 2048, 32, 32, False, True, True): (1, 32, 3, 4), (16384, 16384, 2048, 32, 32, True, False, True): (1, 16, 3, 4), (16384, 16384, 2048, 64, 64, False, True, True): (4, 32, 2, 4), (16384, 16384, 2048, 64, 64, True, False, True): (2, 32, 2, 4), (16384, 16384, 2048, 128, 128, False, True, True): (4, 16, 1, 32), (16384, 16384, 2048, 128, 128, True, False, True): (4, 16, 1, 32), (16384, 16384, 4096, 16, 16, False, True, True): (1, 16, 3, 4), (16384, 16384, 4096, 16, 16, True, False, True): (2, 64, 3, 1), (16384, 16384, 4096, 32, 32, False, True, True): (1, 64, 3, 4), (16384, 16384, 4096, 32, 32, True, False, True): (1, 32, 3, 4), (16384, 16384, 4096, 64, 64, False, True, True): (4, 64, 2, 4), (16384, 16384, 4096, 64, 64, True, False, True): (2, 64, 2, 4), (16384, 16384, 4096, 128, 128, False, True, True): (4, 32, 1, 32), (16384, 16384, 4096, 128, 128, True, False, True): (4, 32, 1, 32), (16384, 16384, 8192, 16, 16, False, True, True): (1, 128, 3, 1), (16384, 16384, 8192, 16, 16, True, False, True): (2, 128, 3, 1), (16384, 16384, 8192, 32, 32, False, True, True): (1, 128, 3, 4), (16384, 16384, 8192, 32, 32, True, False, True): (1, 64, 3, 4), (16384, 16384, 8192, 64, 64, False, True, True): (2, 128, 2, 4), (16384, 16384, 8192, 64, 64, True, False, True): (2, 128, 2, 4), (16384, 16384, 8192, 128, 128, False, True, True): (4, 64, 1, 32), (16384, 16384, 8192, 128, 128, True, False, True): (4, 64, 1, 32), (16384, 16384, 16384, 16, 16, False, True, True): (1, 64, 3, 4), (16384, 16384, 16384, 16, 16, True, False, True): (2, 256, 3, 1), (16384, 16384, 16384, 32, 32, False, True, True): (1, 256, 3, 4), (16384, 16384, 16384, 32, 32, True, False, True): (1, 128, 3, 4), (16384, 16384, 16384, 64, 64, False, True, True): (2, 256, 2, 4), (16384, 16384, 16384, 64, 64, True, False, True): (2, 256, 2, 4), (16384, 16384, 16384, 128, 128, False, True, True): (4, 128, 1, 32), (16384, 16384, 16384, 128, 128, True, False, True): (4, 128, 1, 32), (16384, 16384, 32768, 16, 16, False, True, True): (1, 512, 3, 1), (16384, 16384, 32768, 16, 16, True, False, True): (1, 128, 3, 4), (16384, 16384, 32768, 32, 32, False, True, True): (2, 512, 3, 4), (16384, 16384, 32768, 32, 32, True, False, True): (1, 256, 4, 4), (16384, 16384, 32768, 64, 64, False, True, True): (2, 512, 2, 4), (16384, 16384, 32768, 64, 64, True, False, True): (2, 512, 2, 4), (16384, 16384, 32768, 128, 128, False, True, True): (4, 256, 1, 32), (16384, 16384, 32768, 128, 128, True, False, True): (4, 256, 1, 32), (16384, 16384, 65536, 16, 16, False, True, True): (1, 256, 3, 4), (16384, 16384, 65536, 16, 16, True, False, True): (1, 1024, 3, 1), (16384, 16384, 65536, 32, 32, False, True, True): (1, 1024, 3, 4), (16384, 16384, 65536, 32, 32, True, False, True): (1, 512, 4, 4), (16384, 16384, 65536, 64, 64, False, True, True): (2, 1024, 2, 4), (16384, 16384, 65536, 64, 64, True, False, True): (2, 1024, 2, 4), (16384, 16384, 65536, 128, 128, False, True, True): (4, 512, 1, 32), (16384, 16384, 65536, 128, 128, True, False, True): (4, 512, 1, 32), (16384, 16384, 131072, 16, 16, False, True, True): (1, 1024, 4, 4), (16384, 16384, 131072, 16, 16, True, False, True): (2, 2048, 3, 1), (16384, 16384, 131072, 32, 32, False, True, True): (1, 1024, 2, 4), (16384, 16384, 131072, 32, 32, True, False, True): (1, 1024, 2, 4), (16384, 16384, 131072, 64, 64, False, True, True): (4, 2048, 2, 4), (16384, 16384, 131072, 64, 64, True, False, True): (2, 2048, 2, 4), (16384, 16384, 131072, 128, 128, False, True, True): (4, 1024, 1, 32), (16384, 16384, 131072, 128, 128, True, False, True): (4, 1024, 1, 32), }, ("bsr_dense_addmm", "NVIDIA A100-SXM4-80GB", (0, torch.float32, 0.56)): { (192, 192, 256, 64, 64, False, True, True): (1, 4, 3, 8), (192, 192, 256, 64, 64, True, False, True): (1, 4, 3, 8), (192, 192, 512, 64, 64, False, True, True): (2, 8, 3, 8), (192, 192, 512, 64, 64, True, False, True): (5, 8, 3, 8), (192, 192, 1024, 64, 64, False, True, True): (2, 16, 4, 8), (192, 192, 1024, 64, 64, True, False, True): (1, 16, 3, 8), (192, 192, 2048, 64, 64, False, True, True): (3, 32, 3, 8), (192, 192, 2048, 64, 64, True, False, True): (5, 32, 5, 8), (192, 192, 4096, 64, 64, False, True, True): (3, 64, 2, 8), (192, 192, 4096, 64, 64, True, False, True): (1, 64, 3, 8), (192, 192, 8192, 64, 64, False, True, True): (3, 128, 3, 8), (192, 192, 8192, 64, 64, True, False, True): (6, 128, 3, 4), (192, 192, 16384, 64, 64, False, True, True): (1, 256, 1, 8), (192, 192, 16384, 64, 64, True, False, True): (1, 256, 3, 4), (192, 192, 32768, 64, 64, False, True, True): (1, 512, 1, 8), (192, 192, 32768, 64, 64, True, False, True): (1, 512, 3, 4), (192, 192, 65536, 64, 64, False, True, True): (1, 1024, 1, 8), (192, 192, 65536, 64, 64, True, False, True): (1, 1024, 3, 4), (192, 192, 131072, 64, 64, False, True, True): (1, 2048, 1, 8), (192, 192, 131072, 64, 64, True, False, True): (3, 2048, 1, 4), (384, 384, 256, 128, 128, False, True, True): (1, 2, 1, 32), (384, 384, 256, 128, 128, True, False, True): (1, 2, 1, 32), (384, 384, 512, 128, 128, False, True, True): (1, 4, 1, 32), (384, 384, 512, 128, 128, True, False, True): (2, 4, 1, 32), (384, 384, 1024, 128, 128, False, True, True): (1, 8, 1, 32), (384, 384, 1024, 128, 128, True, False, True): (4, 8, 1, 32), (384, 384, 2048, 128, 128, False, True, True): (1, 16, 1, 32), (384, 384, 2048, 128, 128, True, False, True): (1, 16, 1, 32), (384, 384, 4096, 128, 128, False, True, True): (1, 32, 1, 32), (384, 384, 4096, 128, 128, True, False, True): (2, 32, 2, 32), (384, 384, 8192, 128, 128, False, True, True): (1, 64, 1, 32), (384, 384, 8192, 128, 128, True, False, True): (1, 64, 2, 32), (384, 384, 16384, 128, 128, False, True, True): (1, 128, 1, 32), (384, 384, 16384, 128, 128, True, False, True): (4, 128, 1, 32), (384, 384, 32768, 128, 128, False, True, True): (3, 256, 1, 32), (384, 384, 32768, 128, 128, True, False, True): (3, 256, 1, 32), (384, 384, 65536, 128, 128, False, True, True): (3, 512, 1, 32), (384, 384, 65536, 128, 128, True, False, True): (3, 512, 1, 32), (384, 384, 131072, 128, 128, False, True, True): (1, 1024, 1, 32), (384, 384, 131072, 128, 128, True, False, True): (3, 1024, 1, 32), }, ("bsr_dense_addmm", "NVIDIA A100-SXM4-80GB", (0, torch.int8, 0.5)): { (1280, 5120, 65792, 32, 32, False, True, True): (1, 1028, 1, 8), (1280, 5120, 65792, 32, 32, True, False, True): (1, 514, 3, 2), (1280, 5120, 65792, 64, 64, False, True, True): (2, 514, 1, 4), (1280, 5120, 65792, 64, 64, True, False, True): (1, 514, 3, 2), (1280, 5120, 65792, 128, 128, False, True, True): (2, 514, 1, 8), (1280, 5120, 65792, 128, 128, True, False, True): (1, 514, 2, 4), (1280, 5120, 65792, 256, 256, False, True, True): (1, 257, 1, 32), (1280, 5120, 65792, 256, 256, True, False, True): (1, 257, 1, 32), (5120, 1280, 65792, 32, 32, False, True, True): (3, 1028, 1, 8), (5120, 1280, 65792, 32, 32, True, False, True): (1, 514, 1, 2), (5120, 1280, 65792, 64, 64, False, True, True): (1, 514, 1, 4), (5120, 1280, 65792, 64, 64, True, False, True): (2, 514, 2, 2), (5120, 1280, 65792, 128, 128, False, True, True): (2, 514, 1, 8), (5120, 1280, 65792, 128, 128, True, False, True): (2, 514, 2, 4), (5120, 1280, 65792, 256, 256, False, True, True): (1, 257, 1, 32), (5120, 1280, 65792, 256, 256, True, False, True): (1, 257, 1, 32), }, ("scatter_mm", "NVIDIA A100-SXM4-80GB", (0, torch.bfloat16, 0.5)): { (256, 256, 256, 16, 16): (1, 1, 16, 16, 1, 2), (256, 256, 256, 32, 32): (1, 1, 16, 16, 1, 4), (256, 256, 256, 64, 64): (1, 1, 16, 16, 1, 1), (256, 256, 256, 128, 128): (2, 4, 16, 64, 1, 4), (256, 256, 512, 16, 16): (1, 1, 16, 16, 1, 4), (256, 256, 512, 32, 32): (1, 1, 16, 32, 1, 4), (256, 256, 512, 64, 64): (1, 1, 16, 32, 1, 1), (256, 256, 512, 128, 128): (1, 1, 32, 32, 1, 4), (256, 256, 1024, 16, 16): (1, 1, 16, 16, 1, 4), (256, 256, 1024, 32, 32): (1, 2, 16, 32, 1, 1), (256, 256, 1024, 64, 64): (1, 1, 32, 32, 1, 2), (256, 256, 1024, 128, 128): (1, 1, 32, 64, 1, 4), (256, 256, 2048, 16, 16): (1, 1, 16, 64, 1, 8), (256, 256, 2048, 32, 32): (2, 1, 32, 64, 1, 2), (256, 256, 2048, 64, 64): (1, 1, 32, 32, 1, 1), (256, 256, 2048, 128, 128): (1, 1, 64, 64, 1, 4), (256, 256, 4096, 16, 16): (1, 1, 16, 64, 1, 1), (256, 256, 4096, 32, 32): (2, 2, 32, 64, 1, 2), (256, 256, 4096, 64, 64): (1, 1, 32, 128, 1, 4), (256, 256, 4096, 128, 128): (1, 1, 64, 64, 1, 4), (256, 256, 8192, 16, 16): (1, 2, 16, 64, 1, 2), (256, 256, 8192, 32, 32): (1, 1, 32, 64, 1, 2), (256, 256, 8192, 64, 64): (1, 1, 32, 64, 1, 2), (256, 256, 8192, 128, 128): (1, 1, 64, 64, 1, 4), (256, 256, 16384, 16, 16): (1, 1, 16, 64, 1, 2), (256, 256, 16384, 32, 32): (1, 1, 32, 64, 1, 2), (256, 256, 16384, 64, 64): (1, 1, 64, 64, 1, 2), (256, 256, 16384, 128, 128): (2, 16, 64, 64, 1, 4), (256, 256, 32768, 16, 16): (1, 1, 16, 128, 1, 2), (256, 256, 32768, 32, 32): (1, 1, 32, 64, 1, 2), (256, 256, 32768, 64, 64): (1, 1, 64, 64, 1, 2), (256, 256, 32768, 128, 128): (2, 32, 64, 64, 1, 4), (256, 256, 65536, 16, 16): (1, 1, 16, 64, 1, 1), (256, 256, 65536, 32, 32): (1, 1, 32, 64, 1, 2), (256, 256, 65536, 64, 64): (1, 1, 64, 32, 1, 1), (256, 256, 65536, 128, 128): (2, 32, 64, 64, 1, 4), (256, 256, 131072, 16, 16): (1, 1, 16, 64, 1, 1), (256, 256, 131072, 32, 32): (1, 1, 32, 64, 1, 2), (256, 256, 131072, 64, 64): (4, 1, 64, 32, 1, 1), (256, 256, 131072, 128, 128): (2, 64, 64, 64, 1, 4), (512, 512, 256, 16, 16): (1, 1, 16, 16, 1, 2), (512, 512, 256, 32, 32): (1, 1, 16, 32, 1, 1), (512, 512, 256, 64, 64): (1, 2, 16, 32, 1, 1), (512, 512, 256, 128, 128): (2, 16, 64, 16, 2, 4), (512, 512, 512, 16, 16): (1, 1, 16, 16, 1, 4), (512, 512, 512, 32, 32): (1, 1, 16, 32, 1, 1), (512, 512, 512, 64, 64): (1, 1, 32, 32, 1, 2), (512, 512, 512, 128, 128): (2, 8, 32, 64, 1, 4), (512, 512, 1024, 16, 16): (1, 1, 16, 64, 1, 8), (512, 512, 1024, 32, 32): (1, 1, 32, 32, 3, 1), (512, 512, 1024, 64, 64): (1, 4, 32, 64, 1, 2), (512, 512, 1024, 128, 128): (1, 4, 64, 64, 1, 4), (512, 512, 2048, 16, 16): (1, 1, 16, 64, 1, 2), (512, 512, 2048, 32, 32): (1, 1, 32, 64, 1, 2), (512, 512, 2048, 64, 64): (1, 1, 64, 64, 3, 4), (512, 512, 2048, 128, 128): (1, 1, 64, 64, 1, 4), (512, 512, 4096, 16, 16): (1, 1, 16, 64, 1, 2), (512, 512, 4096, 32, 32): (2, 64, 32, 64, 1, 2), (512, 512, 4096, 64, 64): (1, 1, 64, 64, 3, 4), (512, 512, 4096, 128, 128): (1, 1, 64, 64, 1, 4), (512, 512, 8192, 16, 16): (1, 2, 16, 128, 1, 2), (512, 512, 8192, 32, 32): (1, 1, 32, 64, 1, 2), (512, 512, 8192, 64, 64): (1, 1, 64, 64, 1, 2), (512, 512, 8192, 128, 128): (1, 1, 64, 64, 1, 4), (512, 512, 16384, 16, 16): (1, 2, 16, 128, 1, 2), (512, 512, 16384, 32, 32): (1, 1, 32, 64, 1, 2), (512, 512, 16384, 64, 64): (1, 1, 64, 64, 3, 2), (512, 512, 16384, 128, 128): (2, 1, 64, 64, 1, 4), (512, 512, 32768, 16, 16): (1, 2, 16, 128, 1, 2), (512, 512, 32768, 32, 32): (1, 1, 32, 64, 1, 2), (512, 512, 32768, 64, 64): (1, 1, 64, 64, 3, 4), (512, 512, 32768, 128, 128): (2, 1, 64, 64, 1, 4), (512, 512, 65536, 16, 16): (1, 2, 16, 128, 1, 2), (512, 512, 65536, 32, 32): (1, 1, 32, 64, 1, 2), (512, 512, 65536, 64, 64): (1, 1, 64, 64, 3, 4), (512, 512, 65536, 128, 128): (2, 1, 64, 64, 1, 4), (512, 512, 131072, 16, 16): (1, 1, 16, 64, 1, 1), (512, 512, 131072, 32, 32): (1, 1, 32, 64, 1, 2), (512, 512, 131072, 64, 64): (1, 1, 64, 64, 3, 4), (512, 512, 131072, 128, 128): (2, 4, 64, 64, 1, 4), (1024, 1024, 256, 16, 16): (1, 1, 16, 16, 1, 4), (1024, 1024, 256, 32, 32): (2, 16, 32, 16, 3, 4), (1024, 1024, 256, 64, 64): (1, 4, 32, 32, 1, 2), (1024, 1024, 256, 128, 128): (1, 4, 128, 16, 3, 16), (1024, 1024, 512, 16, 16): (1, 1, 16, 64, 1, 2), (1024, 1024, 512, 32, 32): (2, 2, 32, 64, 1, 2), (1024, 1024, 512, 64, 64): (2, 8, 64, 64, 3, 4), (1024, 1024, 512, 128, 128): (1, 4, 64, 64, 1, 8), (1024, 1024, 1024, 16, 16): (1, 1, 16, 64, 1, 2), (1024, 1024, 1024, 32, 32): (1, 1, 32, 64, 1, 2), (1024, 1024, 1024, 64, 64): (1, 8, 64, 64, 3, 4), (1024, 1024, 1024, 128, 128): (1, 8, 64, 64, 1, 4), (1024, 1024, 2048, 16, 16): (1, 2, 16, 64, 1, 2), (1024, 1024, 2048, 32, 32): (1, 1, 32, 64, 1, 2), (1024, 1024, 2048, 64, 64): (2, 16, 64, 64, 2, 2), (1024, 1024, 2048, 128, 128): (2, 32, 64, 64, 1, 4), (1024, 1024, 4096, 16, 16): (2, 16, 16, 128, 1, 2), (1024, 1024, 4096, 32, 32): (1, 16, 32, 64, 3, 2), (1024, 1024, 4096, 64, 64): (1, 1, 64, 64, 3, 4), (1024, 1024, 4096, 128, 128): (2, 64, 128, 64, 1, 4), (1024, 1024, 8192, 16, 16): (2, 16, 16, 128, 1, 2), (1024, 1024, 8192, 32, 32): (1, 16, 32, 64, 3, 2), (1024, 1024, 8192, 64, 64): (1, 1, 64, 64, 3, 4), (1024, 1024, 8192, 128, 128): (2, 1, 64, 64, 1, 4), (1024, 1024, 16384, 16, 16): (1, 2, 16, 128, 1, 2), (1024, 1024, 16384, 32, 32): (1, 16, 32, 64, 3, 2), (1024, 1024, 16384, 64, 64): (1, 1, 64, 64, 3, 4), (1024, 1024, 16384, 128, 128): (2, 16, 128, 64, 1, 4), (1024, 1024, 32768, 16, 16): (1, 1, 16, 128, 1, 2), (1024, 1024, 32768, 32, 32): (1, 1, 32, 128, 1, 2), (1024, 1024, 32768, 64, 64): (1, 32, 64, 32, 2, 1), (1024, 1024, 32768, 128, 128): (2, 8, 128, 64, 1, 4), (1024, 1024, 65536, 16, 16): (3, 2, 16, 128, 1, 2), (1024, 1024, 65536, 32, 32): (1, 1, 32, 128, 1, 2), (1024, 1024, 65536, 64, 64): (2, 4, 64, 32, 2, 1), (1024, 1024, 65536, 128, 128): (2, 8, 128, 64, 1, 4), (1024, 1024, 131072, 16, 16): (2, 1, 16, 128, 1, 2), (1024, 1024, 131072, 32, 32): (1, 1, 32, 128, 1, 2), (1024, 1024, 131072, 64, 64): (1, 4, 64, 32, 2, 1), (1024, 1024, 131072, 128, 128): (4, 1, 128, 64, 1, 4), (2048, 2048, 256, 16, 16): (1, 1, 16, 64, 1, 8), (2048, 2048, 256, 32, 32): (1, 1, 32, 32, 3, 1), (2048, 2048, 256, 64, 64): (1, 1, 32, 32, 2, 1), (2048, 2048, 256, 128, 128): (1, 4, 64, 64, 1, 8), (2048, 2048, 512, 16, 16): (1, 2, 16, 64, 1, 2), (2048, 2048, 512, 32, 32): (1, 2, 32, 64, 1, 4), (2048, 2048, 512, 64, 64): (1, 4, 64, 64, 1, 8), (2048, 2048, 512, 128, 128): (1, 4, 64, 64, 1, 4), (2048, 2048, 1024, 16, 16): (1, 2, 16, 128, 1, 2), (2048, 2048, 1024, 32, 32): (1, 1, 32, 64, 1, 2), (2048, 2048, 1024, 64, 64): (1, 8, 64, 64, 1, 4), (2048, 2048, 1024, 128, 128): (1, 8, 128, 64, 1, 4), (2048, 2048, 2048, 16, 16): (3, 4, 16, 128, 1, 2), (2048, 2048, 2048, 32, 32): (1, 16, 32, 64, 5, 2), (2048, 2048, 2048, 64, 64): (1, 1, 64, 64, 3, 4), (2048, 2048, 2048, 128, 128): (1, 8, 128, 64, 1, 4), (2048, 2048, 4096, 16, 16): (1, 2, 16, 128, 1, 2), (2048, 2048, 4096, 32, 32): (1, 8, 32, 64, 3, 2), (2048, 2048, 4096, 64, 64): (1, 1, 64, 64, 3, 4), (2048, 2048, 4096, 128, 128): (1, 8, 128, 64, 1, 4), (2048, 2048, 8192, 16, 16): (2, 4, 16, 128, 1, 2), (2048, 2048, 8192, 32, 32): (1, 4, 32, 128, 3, 2), (2048, 2048, 8192, 64, 64): (1, 8, 64, 64, 3, 2), (2048, 2048, 8192, 128, 128): (1, 8, 128, 64, 1, 4), (2048, 2048, 16384, 16, 16): (1, 2, 16, 128, 1, 2), (2048, 2048, 16384, 32, 32): (1, 4, 32, 128, 3, 2), (2048, 2048, 16384, 64, 64): (1, 8, 64, 64, 3, 2), (2048, 2048, 16384, 128, 128): (1, 4, 128, 64, 1, 4), (2048, 2048, 32768, 16, 16): (3, 2, 16, 128, 1, 2), (2048, 2048, 32768, 32, 32): (1, 1, 32, 128, 3, 2), (2048, 2048, 32768, 64, 64): (1, 1, 64, 64, 3, 2), (2048, 2048, 32768, 128, 128): (1, 4, 128, 64, 1, 4), (2048, 2048, 65536, 16, 16): (1, 2, 16, 128, 1, 2), (2048, 2048, 65536, 32, 32): (1, 4, 32, 128, 1, 2), (2048, 2048, 65536, 64, 64): (1, 1, 64, 64, 3, 2), (2048, 2048, 65536, 128, 128): (1, 2, 128, 64, 1, 4), (2048, 2048, 131072, 16, 16): (4, 2, 16, 128, 1, 2), (2048, 2048, 131072, 32, 32): (1, 1, 32, 128, 3, 2), (2048, 2048, 131072, 64, 64): (1, 1, 64, 64, 3, 2), (2048, 2048, 131072, 128, 128): (1, 2, 128, 64, 1, 4), (4096, 4096, 256, 16, 16): (1, 1, 16, 64, 1, 2), (4096, 4096, 256, 32, 32): (1, 1, 32, 64, 3, 4), (4096, 4096, 256, 64, 64): (1, 1, 64, 64, 3, 4), (4096, 4096, 256, 128, 128): (3, 4, 128, 32, 1, 4), (4096, 4096, 512, 16, 16): (1, 2, 16, 128, 1, 2), (4096, 4096, 512, 32, 32): (1, 2, 32, 64, 3, 2), (4096, 4096, 512, 64, 64): (1, 4, 64, 64, 1, 4), (4096, 4096, 512, 128, 128): (1, 4, 128, 64, 1, 4), (4096, 4096, 1024, 16, 16): (1, 2, 16, 128, 1, 2), (4096, 4096, 1024, 32, 32): (1, 8, 32, 64, 3, 2), (4096, 4096, 1024, 64, 64): (1, 4, 64, 64, 1, 4), (4096, 4096, 1024, 128, 128): (2, 4, 128, 64, 1, 4), (4096, 4096, 2048, 16, 16): (1, 1, 16, 128, 1, 2), (4096, 4096, 2048, 32, 32): (1, 4, 32, 128, 1, 4), (4096, 4096, 2048, 64, 64): (1, 1, 64, 64, 3, 4), (4096, 4096, 2048, 128, 128): (1, 16, 128, 64, 1, 4), (4096, 4096, 4096, 16, 16): (1, 1, 16, 64, 3, 1), (4096, 4096, 4096, 32, 32): (1, 4, 32, 64, 3, 2), (4096, 4096, 4096, 64, 64): (1, 1, 64, 64, 3, 4), (4096, 4096, 4096, 128, 128): (5, 1, 128, 64, 1, 4), (4096, 4096, 8192, 16, 16): (1, 1, 16, 128, 1, 2), (4096, 4096, 8192, 32, 32): (1, 1, 32, 128, 3, 2), (4096, 4096, 8192, 64, 64): (1, 1, 64, 64, 3, 4), (4096, 4096, 8192, 128, 128): (2, 1, 128, 64, 1, 4), (4096, 4096, 16384, 16, 16): (1, 1, 16, 128, 1, 2), (4096, 4096, 16384, 32, 32): (1, 1, 32, 128, 3, 2), (4096, 4096, 16384, 64, 64): (1, 1, 64, 64, 4, 4), (4096, 4096, 16384, 128, 128): (2, 1, 128, 64, 1, 4), (4096, 4096, 32768, 16, 16): (3, 1, 16, 128, 1, 2), (4096, 4096, 32768, 32, 32): (1, 1, 32, 128, 3, 2), (4096, 4096, 32768, 64, 64): (1, 1, 64, 64, 3, 4), (4096, 4096, 32768, 128, 128): (2, 1, 128, 64, 1, 4), (4096, 4096, 65536, 16, 16): (2, 2, 16, 128, 1, 2), (4096, 4096, 65536, 32, 32): (1, 1, 32, 128, 4, 2), (4096, 4096, 65536, 64, 64): (1, 1, 64, 64, 4, 4), (4096, 4096, 65536, 128, 128): (2, 1, 128, 64, 1, 4), (4096, 4096, 131072, 16, 16): (2, 1, 16, 128, 1, 2), (4096, 4096, 131072, 32, 32): (1, 1, 32, 128, 3, 2), (4096, 4096, 131072, 64, 64): (1, 1, 64, 64, 3, 4), (4096, 4096, 131072, 128, 128): (2, 1, 128, 64, 1, 4), (8192, 8192, 256, 16, 16): (1, 2, 16, 64, 1, 2), (8192, 8192, 256, 32, 32): (1, 1, 32, 64, 1, 2), (8192, 8192, 256, 64, 64): (1, 2, 64, 64, 1, 4), (8192, 8192, 256, 128, 128): (3, 16, 128, 16, 1, 2), (8192, 8192, 512, 16, 16): (1, 2, 16, 128, 1, 2), (8192, 8192, 512, 32, 32): (1, 4, 32, 64, 3, 2), (8192, 8192, 512, 64, 64): (2, 8, 64, 64, 4, 4), (8192, 8192, 512, 128, 128): (1, 8, 128, 64, 1, 4), (8192, 8192, 1024, 16, 16): (4, 2, 16, 128, 1, 2), (8192, 8192, 1024, 32, 32): (1, 8, 32, 128, 1, 2), (8192, 8192, 1024, 64, 64): (1, 16, 64, 64, 3, 2), (8192, 8192, 1024, 128, 128): (2, 16, 128, 64, 2, 4), (8192, 8192, 2048, 16, 16): (2, 1, 16, 64, 4, 1), (8192, 8192, 2048, 32, 32): (1, 16, 32, 64, 5, 2), (8192, 8192, 2048, 64, 64): (1, 16, 64, 64, 3, 2), (8192, 8192, 2048, 128, 128): (2, 16, 128, 64, 2, 4), (8192, 8192, 4096, 16, 16): (1, 1, 16, 64, 4, 1), (8192, 8192, 4096, 32, 32): (1, 16, 32, 64, 5, 2), (8192, 8192, 4096, 64, 64): (1, 16, 64, 64, 3, 2), (8192, 8192, 4096, 128, 128): (2, 64, 128, 64, 2, 4), (8192, 8192, 8192, 16, 16): (1, 1, 16, 64, 4, 1), (8192, 8192, 8192, 32, 32): (1, 8, 32, 128, 5, 4), (8192, 8192, 8192, 64, 64): (1, 8, 64, 64, 3, 2), (8192, 8192, 8192, 128, 128): (2, 8, 128, 64, 1, 4), (8192, 8192, 16384, 16, 16): (1, 1, 16, 64, 4, 1), (8192, 8192, 16384, 32, 32): (1, 8, 32, 64, 5, 2), (8192, 8192, 16384, 64, 64): (1, 8, 64, 64, 3, 2), (8192, 8192, 16384, 128, 128): (1, 8, 128, 64, 1, 4), (8192, 8192, 32768, 16, 16): (1, 1, 16, 64, 4, 1), (8192, 8192, 32768, 32, 32): (1, 8, 32, 64, 5, 2), (8192, 8192, 32768, 64, 64): (3, 8, 64, 64, 3, 2), (8192, 8192, 32768, 128, 128): (2, 8, 128, 64, 1, 4), (8192, 8192, 65536, 16, 16): (1, 1, 16, 64, 4, 1), (8192, 8192, 65536, 32, 32): (5, 4, 32, 64, 3, 2), (8192, 8192, 65536, 64, 64): (1, 8, 64, 64, 3, 2), (8192, 8192, 65536, 128, 128): (2, 8, 128, 64, 1, 4), (8192, 8192, 131072, 16, 16): (2, 1, 16, 64, 4, 1), (8192, 8192, 131072, 32, 32): (1, 4, 32, 64, 5, 2), (8192, 8192, 131072, 64, 64): (1, 4, 64, 128, 3, 4), (8192, 8192, 131072, 128, 128): (2, 8, 128, 64, 1, 4), (16384, 16384, 256, 16, 16): (1, 2, 16, 128, 1, 2), (16384, 16384, 256, 32, 32): (1, 4, 32, 64, 3, 2), (16384, 16384, 256, 64, 64): (2, 4, 64, 64, 4, 4), (16384, 16384, 256, 128, 128): (1, 4, 128, 64, 1, 16), (16384, 16384, 512, 16, 16): (1, 2, 16, 128, 3, 2), (16384, 16384, 512, 32, 32): (1, 4, 32, 128, 5, 4), (16384, 16384, 512, 64, 64): (1, 8, 64, 64, 3, 2), (16384, 16384, 512, 128, 128): (2, 8, 128, 64, 1, 4), (16384, 16384, 1024, 16, 16): (1, 2, 16, 128, 1, 2), (16384, 16384, 1024, 32, 32): (1, 8, 32, 64, 5, 2), (16384, 16384, 1024, 64, 64): (1, 16, 64, 64, 3, 2), (16384, 16384, 1024, 128, 128): (5, 16, 128, 64, 2, 4), (16384, 16384, 2048, 16, 16): (1, 2, 16, 128, 1, 2), (16384, 16384, 2048, 32, 32): (1, 8, 32, 64, 5, 2), (16384, 16384, 2048, 64, 64): (1, 16, 64, 64, 3, 2), (16384, 16384, 2048, 128, 128): (4, 32, 128, 64, 2, 4), (16384, 16384, 4096, 16, 16): (3, 2, 16, 128, 1, 2), (16384, 16384, 4096, 32, 32): (1, 4, 32, 64, 5, 2), (16384, 16384, 4096, 64, 64): (2, 16, 64, 64, 3, 2), (16384, 16384, 4096, 128, 128): (3, 32, 128, 64, 2, 4), (16384, 16384, 8192, 16, 16): (1, 2, 16, 128, 1, 2), (16384, 16384, 8192, 32, 32): (1, 4, 32, 64, 5, 2), (16384, 16384, 8192, 64, 64): (4, 8, 64, 64, 3, 2), (16384, 16384, 8192, 128, 128): (5, 8, 128, 64, 1, 4), (16384, 16384, 16384, 16, 16): (1, 2, 16, 128, 1, 2), (16384, 16384, 16384, 32, 32): (1, 4, 32, 64, 5, 2), (16384, 16384, 16384, 64, 64): (2, 4, 64, 128, 3, 4), (16384, 16384, 16384, 128, 128): (4, 8, 128, 64, 1, 4), (16384, 16384, 32768, 16, 16): (4, 2, 16, 128, 1, 2), (16384, 16384, 32768, 32, 32): (1, 4, 32, 64, 5, 2), (16384, 16384, 32768, 64, 64): (1, 8, 64, 64, 3, 2), (16384, 16384, 32768, 128, 128): (2, 512, 128, 64, 2, 4), (16384, 16384, 65536, 16, 16): (3, 2, 16, 128, 1, 2), (16384, 16384, 65536, 32, 32): (1, 4, 32, 64, 5, 2), (16384, 16384, 65536, 64, 64): (1, 4, 64, 128, 3, 4), (16384, 16384, 65536, 128, 128): (2, 1024, 128, 64, 2, 4), (16384, 16384, 131072, 16, 16): (1, 2, 16, 128, 1, 2), (16384, 16384, 131072, 32, 32): (1, 4, 32, 64, 5, 2), (16384, 16384, 131072, 64, 64): (3, 4, 64, 128, 3, 4), (16384, 16384, 131072, 128, 128): (4, 2048, 128, 64, 2, 4), }, ("scatter_mm", "NVIDIA A100-SXM4-80GB", (0, torch.float16, 0.5)): { (256, 256, 256, 16, 16): (5, 4, 16, 16, 1, 4), (256, 256, 256, 32, 32): (5, 2, 32, 16, 1, 4), (256, 256, 256, 64, 64): (4, 1, 32, 32, 1, 8), (256, 256, 256, 128, 128): (2, 1, 32, 32, 1, 4), (256, 256, 512, 16, 16): (2, 2, 16, 32, 1, 4), (256, 256, 512, 32, 32): (4, 8, 32, 32, 1, 8), (256, 256, 512, 64, 64): (4, 8, 32, 64, 1, 4), (256, 256, 512, 128, 128): (4, 8, 32, 64, 1, 4), (256, 256, 1024, 16, 16): (4, 2, 16, 64, 1, 2), (256, 256, 1024, 32, 32): (4, 16, 32, 64, 1, 2), (256, 256, 1024, 64, 64): (4, 16, 32, 64, 1, 4), (256, 256, 1024, 128, 128): (4, 16, 64, 64, 1, 8), (256, 256, 2048, 16, 16): (2, 16, 16, 64, 1, 8), (256, 256, 2048, 32, 32): (4, 16, 32, 64, 1, 2), (256, 256, 2048, 64, 64): (4, 16, 32, 64, 1, 4), (256, 256, 2048, 128, 128): (4, 16, 64, 64, 1, 4), (256, 256, 4096, 16, 16): (4, 32, 16, 64, 1, 1), (256, 256, 4096, 32, 32): (2, 64, 32, 64, 1, 2), (256, 256, 4096, 64, 64): (4, 64, 64, 64, 1, 4), (256, 256, 4096, 128, 128): (4, 32, 64, 64, 1, 4), (256, 256, 8192, 16, 16): (4, 64, 16, 64, 1, 1), (256, 256, 8192, 32, 32): (4, 128, 32, 64, 1, 2), (256, 256, 8192, 64, 64): (4, 64, 64, 64, 1, 4), (256, 256, 8192, 128, 128): (4, 64, 64, 64, 1, 4), (256, 256, 16384, 16, 16): (4, 128, 16, 64, 1, 1), (256, 256, 16384, 32, 32): (2, 128, 32, 64, 1, 2), (256, 256, 16384, 64, 64): (4, 32, 32, 128, 1, 4), (256, 256, 16384, 128, 128): (4, 16, 64, 64, 1, 4), (256, 256, 32768, 16, 16): (4, 64, 16, 64, 1, 1), (256, 256, 32768, 32, 32): (2, 256, 32, 64, 1, 2), (256, 256, 32768, 64, 64): (4, 32, 32, 128, 1, 4), (256, 256, 32768, 128, 128): (4, 32, 64, 64, 1, 4), (256, 256, 65536, 16, 16): (4, 128, 16, 64, 1, 1), (256, 256, 65536, 32, 32): (4, 1, 32, 64, 1, 2), (256, 256, 65536, 64, 64): (2, 1, 64, 64, 1, 2), (256, 256, 65536, 128, 128): (4, 32, 64, 64, 1, 4), (256, 256, 131072, 16, 16): (4, 64, 16, 64, 1, 1), (256, 256, 131072, 32, 32): (2, 1, 32, 64, 1, 2), (256, 256, 131072, 64, 64): (4, 32, 32, 128, 1, 4), (256, 256, 131072, 128, 128): (4, 32, 64, 64, 1, 4), (512, 512, 256, 16, 16): (4, 16, 16, 16, 1, 4), (512, 512, 256, 32, 32): (2, 4, 32, 16, 1, 4), (512, 512, 256, 64, 64): (2, 16, 64, 16, 3, 8), (512, 512, 256, 128, 128): (4, 16, 64, 16, 1, 4), (512, 512, 512, 16, 16): (1, 1, 16, 64, 1, 8), (512, 512, 512, 32, 32): (2, 4, 16, 32, 1, 1), (512, 512, 512, 64, 64): (2, 1, 32, 32, 1, 2), (512, 512, 512, 128, 128): (4, 8, 32, 64, 1, 4), (512, 512, 1024, 16, 16): (2, 8, 16, 64, 1, 8), (512, 512, 1024, 32, 32): (4, 16, 32, 64, 1, 2), (512, 512, 1024, 64, 64): (4, 16, 64, 64, 1, 4), (512, 512, 1024, 128, 128): (2, 8, 64, 64, 1, 4), (512, 512, 2048, 16, 16): (4, 16, 16, 64, 1, 4), (512, 512, 2048, 32, 32): (4, 16, 32, 64, 1, 2), (512, 512, 2048, 64, 64): (4, 16, 64, 64, 1, 8), (512, 512, 2048, 128, 128): (4, 16, 64, 64, 1, 4), (512, 512, 4096, 16, 16): (4, 32, 16, 128, 1, 2), (512, 512, 4096, 32, 32): (4, 32, 32, 64, 1, 2), (512, 512, 4096, 64, 64): (4, 32, 64, 64, 1, 4), (512, 512, 4096, 128, 128): (4, 32, 64, 64, 1, 4), (512, 512, 8192, 16, 16): (2, 32, 16, 128, 1, 2), (512, 512, 8192, 32, 32): (4, 64, 32, 64, 1, 2), (512, 512, 8192, 64, 64): (4, 128, 64, 64, 1, 2), (512, 512, 8192, 128, 128): (4, 64, 64, 64, 1, 4), (512, 512, 16384, 16, 16): (4, 32, 16, 64, 1, 1), (512, 512, 16384, 32, 32): (4, 64, 32, 64, 1, 2), (512, 512, 16384, 64, 64): (4, 16, 64, 64, 1, 4), (512, 512, 16384, 128, 128): (4, 32, 64, 64, 1, 4), (512, 512, 32768, 16, 16): (7, 16, 16, 128, 1, 2), (512, 512, 32768, 32, 32): (4, 64, 32, 64, 1, 2), (512, 512, 32768, 64, 64): (2, 32, 64, 64, 3, 2), (512, 512, 32768, 128, 128): (2, 32, 64, 64, 1, 4), (512, 512, 65536, 16, 16): (2, 32, 16, 64, 1, 1), (512, 512, 65536, 32, 32): (4, 64, 32, 64, 1, 2), (512, 512, 65536, 64, 64): (3, 32, 64, 64, 3, 2), (512, 512, 65536, 128, 128): (4, 16, 64, 64, 1, 4), (512, 512, 131072, 16, 16): (3, 32, 16, 128, 1, 2), (512, 512, 131072, 32, 32): (4, 64, 32, 64, 1, 2), (512, 512, 131072, 64, 64): (2, 32, 64, 64, 3, 2), (512, 512, 131072, 128, 128): (3, 1, 64, 64, 1, 4), (1024, 1024, 256, 16, 16): (4, 16, 16, 16, 1, 4), (1024, 1024, 256, 32, 32): (4, 16, 32, 16, 1, 4), (1024, 1024, 256, 64, 64): (4, 4, 64, 32, 1, 16), (1024, 1024, 256, 128, 128): (4, 16, 64, 16, 1, 8), (1024, 1024, 512, 16, 16): (2, 8, 16, 64, 1, 8), (1024, 1024, 512, 32, 32): (3, 2, 32, 64, 1, 2), (1024, 1024, 512, 64, 64): (4, 8, 32, 64, 1, 8), (1024, 1024, 512, 128, 128): (4, 8, 64, 64, 1, 8), (1024, 1024, 1024, 16, 16): (2, 2, 16, 64, 1, 2), (1024, 1024, 1024, 32, 32): (2, 8, 32, 64, 1, 2), (1024, 1024, 1024, 64, 64): (2, 8, 32, 128, 1, 4), (1024, 1024, 1024, 128, 128): (2, 8, 64, 64, 1, 4), (1024, 1024, 2048, 16, 16): (2, 16, 16, 128, 3, 2), (1024, 1024, 2048, 32, 32): (4, 32, 32, 64, 1, 2), (1024, 1024, 2048, 64, 64): (4, 16, 64, 64, 1, 4), (1024, 1024, 2048, 128, 128): (4, 32, 64, 64, 1, 4), (1024, 1024, 4096, 16, 16): (4, 16, 16, 128, 1, 2), (1024, 1024, 4096, 32, 32): (3, 32, 32, 64, 1, 2), (1024, 1024, 4096, 64, 64): (4, 32, 64, 64, 1, 4), (1024, 1024, 4096, 128, 128): (4, 32, 64, 64, 1, 4), (1024, 1024, 8192, 16, 16): (5, 16, 16, 128, 1, 2), (1024, 1024, 8192, 32, 32): (2, 32, 32, 64, 3, 2), (1024, 1024, 8192, 64, 64): (1, 16, 64, 64, 3, 2), (1024, 1024, 8192, 128, 128): (4, 32, 64, 64, 1, 4), (1024, 1024, 16384, 16, 16): (4, 16, 16, 128, 1, 2), (1024, 1024, 16384, 32, 32): (1, 32, 32, 64, 3, 2), (1024, 1024, 16384, 64, 64): (4, 16, 64, 64, 3, 2), (1024, 1024, 16384, 128, 128): (4, 32, 128, 64, 1, 4), (1024, 1024, 32768, 16, 16): (3, 16, 16, 128, 1, 2), (1024, 1024, 32768, 32, 32): (1, 8, 32, 64, 3, 2), (1024, 1024, 32768, 64, 64): (4, 16, 64, 64, 3, 2), (1024, 1024, 32768, 128, 128): (4, 8, 128, 64, 2, 4), (1024, 1024, 65536, 16, 16): (1, 2, 16, 128, 1, 2), (1024, 1024, 65536, 32, 32): (2, 4, 32, 64, 3, 2), (1024, 1024, 65536, 64, 64): (5, 16, 64, 64, 3, 2), (1024, 1024, 65536, 128, 128): (5, 8, 128, 64, 2, 4), (1024, 1024, 131072, 16, 16): (5, 2, 16, 128, 1, 2), (1024, 1024, 131072, 32, 32): (1, 2, 32, 64, 3, 2), (1024, 1024, 131072, 64, 64): (5, 16, 64, 64, 3, 2), (1024, 1024, 131072, 128, 128): (2, 1, 128, 64, 2, 4), (2048, 2048, 256, 16, 16): (4, 4, 16, 64, 1, 8), (2048, 2048, 256, 32, 32): (4, 8, 32, 32, 1, 8), (2048, 2048, 256, 64, 64): (4, 16, 64, 16, 1, 8), (2048, 2048, 256, 128, 128): (4, 4, 128, 32, 3, 8), (2048, 2048, 512, 16, 16): (2, 2, 16, 64, 1, 2), (2048, 2048, 512, 32, 32): (2, 4, 32, 64, 3, 2), (2048, 2048, 512, 64, 64): (4, 4, 64, 64, 1, 8), (2048, 2048, 512, 128, 128): (4, 8, 64, 64, 1, 4), (2048, 2048, 1024, 16, 16): (1, 8, 16, 64, 1, 2), (2048, 2048, 1024, 32, 32): (2, 16, 32, 64, 3, 2), (2048, 2048, 1024, 64, 64): (4, 8, 64, 64, 1, 4), (2048, 2048, 1024, 128, 128): (4, 8, 128, 64, 1, 4), (2048, 2048, 2048, 16, 16): (5, 4, 16, 128, 1, 2), (2048, 2048, 2048, 32, 32): (1, 16, 32, 64, 3, 2), (2048, 2048, 2048, 64, 64): (2, 8, 64, 64, 1, 4), (2048, 2048, 2048, 128, 128): (2, 8, 128, 64, 1, 4), (2048, 2048, 4096, 16, 16): (4, 2, 16, 128, 1, 2), (2048, 2048, 4096, 32, 32): (2, 16, 32, 64, 3, 2), (2048, 2048, 4096, 64, 64): (2, 8, 64, 64, 3, 2), (2048, 2048, 4096, 128, 128): (4, 8, 128, 64, 1, 4), (2048, 2048, 8192, 16, 16): (5, 4, 16, 128, 1, 2), (2048, 2048, 8192, 32, 32): (2, 8, 32, 64, 3, 2), (2048, 2048, 8192, 64, 64): (4, 8, 64, 64, 3, 2), (2048, 2048, 8192, 128, 128): (4, 8, 128, 64, 1, 4), (2048, 2048, 16384, 16, 16): (3, 2, 16, 128, 1, 2), (2048, 2048, 16384, 32, 32): (2, 4, 32, 128, 3, 2), (2048, 2048, 16384, 64, 64): (4, 8, 64, 64, 3, 2), (2048, 2048, 16384, 128, 128): (4, 4, 128, 64, 1, 4), (2048, 2048, 32768, 16, 16): (3, 2, 16, 128, 1, 2), (2048, 2048, 32768, 32, 32): (3, 4, 32, 128, 3, 2), (2048, 2048, 32768, 64, 64): (6, 4, 64, 64, 3, 2), (2048, 2048, 32768, 128, 128): (3, 4, 128, 64, 1, 4), (2048, 2048, 65536, 16, 16): (6, 2, 16, 128, 1, 2), (2048, 2048, 65536, 32, 32): (1, 2, 32, 128, 1, 2), (2048, 2048, 65536, 64, 64): (5, 4, 64, 64, 3, 2), (2048, 2048, 65536, 128, 128): (5, 1, 128, 64, 2, 4), (2048, 2048, 131072, 16, 16): (3, 2, 16, 128, 1, 2), (2048, 2048, 131072, 32, 32): (2, 1, 32, 128, 3, 2), (2048, 2048, 131072, 64, 64): (4, 1, 64, 64, 3, 2), (2048, 2048, 131072, 128, 128): (3, 1, 128, 64, 2, 4), (4096, 4096, 256, 16, 16): (5, 8, 16, 32, 1, 4), (4096, 4096, 256, 32, 32): (4, 16, 32, 16, 2, 4), (4096, 4096, 256, 64, 64): (2, 1, 64, 64, 3, 4), (4096, 4096, 256, 128, 128): (4, 4, 128, 32, 1, 4), (4096, 4096, 512, 16, 16): (4, 2, 16, 128, 1, 2), (4096, 4096, 512, 32, 32): (4, 8, 32, 64, 1, 2), (4096, 4096, 512, 64, 64): (4, 4, 64, 64, 1, 4), (4096, 4096, 512, 128, 128): (4, 8, 128, 64, 2, 4), (4096, 4096, 1024, 16, 16): (1, 2, 16, 128, 1, 2), (4096, 4096, 1024, 32, 32): (6, 8, 32, 64, 3, 2), (4096, 4096, 1024, 64, 64): (2, 16, 64, 64, 4, 4), (4096, 4096, 1024, 128, 128): (2, 4, 128, 64, 2, 4), (4096, 4096, 2048, 16, 16): (3, 1, 16, 128, 1, 2), (4096, 4096, 2048, 32, 32): (1, 4, 32, 64, 5, 2), (4096, 4096, 2048, 64, 64): (3, 16, 64, 64, 3, 2), (4096, 4096, 2048, 128, 128): (4, 32, 128, 64, 2, 4), (4096, 4096, 4096, 16, 16): (1, 2, 16, 128, 1, 2), (4096, 4096, 4096, 32, 32): (1, 4, 32, 64, 3, 2), (4096, 4096, 4096, 64, 64): (1, 1, 64, 64, 4, 4), (4096, 4096, 4096, 128, 128): (2, 1, 128, 128, 1, 8), (4096, 4096, 8192, 16, 16): (3, 1, 16, 128, 1, 2), (4096, 4096, 8192, 32, 32): (2, 2, 32, 64, 5, 2), (4096, 4096, 8192, 64, 64): (4, 16, 64, 64, 3, 2), (4096, 4096, 8192, 128, 128): (4, 16, 128, 64, 2, 4), (4096, 4096, 16384, 16, 16): (1, 2, 16, 128, 1, 2), (4096, 4096, 16384, 32, 32): (4, 2, 32, 64, 5, 2), (4096, 4096, 16384, 64, 64): (4, 16, 64, 64, 3, 2), (4096, 4096, 16384, 128, 128): (4, 16, 128, 64, 2, 4), (4096, 4096, 32768, 16, 16): (3, 1, 16, 128, 1, 2), (4096, 4096, 32768, 32, 32): (3, 1, 32, 128, 1, 4), (4096, 4096, 32768, 64, 64): (3, 1, 64, 64, 3, 4), (4096, 4096, 32768, 128, 128): (5, 16, 128, 64, 2, 4), (4096, 4096, 65536, 16, 16): (5, 1, 16, 128, 1, 2), (4096, 4096, 65536, 32, 32): (5, 1, 32, 128, 1, 4), (4096, 4096, 65536, 64, 64): (1, 1, 64, 64, 3, 4), (4096, 4096, 65536, 128, 128): (3, 16, 128, 64, 2, 4), (4096, 4096, 131072, 16, 16): (3, 1, 16, 128, 1, 2), (4096, 4096, 131072, 32, 32): (3, 1, 32, 128, 3, 2), (4096, 4096, 131072, 64, 64): (2, 1, 64, 64, 3, 4), (4096, 4096, 131072, 128, 128): (1, 1, 128, 64, 1, 4), (8192, 8192, 256, 16, 16): (4, 16, 16, 16, 1, 4), (8192, 8192, 256, 32, 32): (1, 16, 32, 16, 4, 4), (8192, 8192, 256, 64, 64): (4, 16, 64, 16, 3, 8), (8192, 8192, 256, 128, 128): (4, 16, 128, 16, 1, 2), (8192, 8192, 512, 16, 16): (2, 8, 16, 64, 1, 4), (8192, 8192, 512, 32, 32): (4, 8, 32, 64, 3, 2), (8192, 8192, 512, 64, 64): (2, 8, 64, 64, 4, 4), (8192, 8192, 512, 128, 128): (4, 8, 128, 64, 2, 4), (8192, 8192, 1024, 16, 16): (4, 16, 16, 64, 1, 8), (8192, 8192, 1024, 32, 32): (2, 8, 32, 64, 5, 2), (8192, 8192, 1024, 64, 64): (1, 16, 64, 64, 3, 2), (8192, 8192, 1024, 128, 128): (5, 16, 128, 64, 2, 4), (8192, 8192, 2048, 16, 16): (7, 2, 16, 128, 1, 2), (8192, 8192, 2048, 32, 32): (1, 16, 32, 64, 5, 2), (8192, 8192, 2048, 64, 64): (4, 16, 64, 64, 3, 2), (8192, 8192, 2048, 128, 128): (6, 16, 128, 64, 2, 4), (8192, 8192, 4096, 16, 16): (4, 2, 16, 128, 1, 2), (8192, 8192, 4096, 32, 32): (2, 8, 32, 64, 5, 2), (8192, 8192, 4096, 64, 64): (3, 16, 64, 64, 3, 2), (8192, 8192, 4096, 128, 128): (3, 64, 128, 64, 2, 4), (8192, 8192, 8192, 16, 16): (4, 2, 16, 128, 1, 2), (8192, 8192, 8192, 32, 32): (1, 4, 32, 128, 5, 4), (8192, 8192, 8192, 64, 64): (4, 4, 64, 64, 1, 4), (8192, 8192, 8192, 128, 128): (2, 2, 128, 128, 3, 8), (8192, 8192, 16384, 16, 16): (1, 2, 16, 128, 1, 2), (8192, 8192, 16384, 32, 32): (4, 8, 32, 64, 5, 2), (8192, 8192, 16384, 64, 64): (5, 8, 64, 64, 3, 2), (8192, 8192, 16384, 128, 128): (3, 16, 128, 64, 2, 4), (8192, 8192, 32768, 16, 16): (7, 2, 16, 128, 1, 2), (8192, 8192, 32768, 32, 32): (3, 4, 32, 64, 3, 2), (8192, 8192, 32768, 64, 64): (2, 8, 64, 64, 3, 2), (8192, 8192, 32768, 128, 128): (6, 16, 128, 64, 2, 4), (8192, 8192, 65536, 16, 16): (9, 2, 16, 128, 1, 2), (8192, 8192, 65536, 32, 32): (7, 4, 32, 64, 5, 2), (8192, 8192, 65536, 64, 64): (4, 8, 64, 64, 3, 2), (8192, 8192, 65536, 128, 128): (3, 16, 128, 64, 2, 4), (8192, 8192, 131072, 16, 16): (9, 2, 16, 128, 1, 2), (8192, 8192, 131072, 32, 32): (1, 8, 32, 64, 5, 2), (8192, 8192, 131072, 64, 64): (1, 8, 64, 64, 3, 2), (8192, 8192, 131072, 128, 128): (4, 16, 128, 64, 2, 4), (16384, 16384, 256, 16, 16): (5, 16, 16, 16, 1, 4), (16384, 16384, 256, 32, 32): (4, 16, 32, 16, 4, 4), (16384, 16384, 256, 64, 64): (4, 16, 64, 16, 3, 8), (16384, 16384, 256, 128, 128): (4, 16, 128, 16, 1, 2), (16384, 16384, 512, 16, 16): (2, 8, 16, 64, 1, 4), (16384, 16384, 512, 32, 32): (1, 4, 32, 64, 5, 2), (16384, 16384, 512, 64, 64): (4, 8, 64, 64, 1, 4), (16384, 16384, 512, 128, 128): (3, 8, 128, 64, 2, 4), (16384, 16384, 1024, 16, 16): (4, 2, 16, 128, 1, 2), (16384, 16384, 1024, 32, 32): (4, 8, 32, 64, 5, 2), (16384, 16384, 1024, 64, 64): (6, 16, 64, 64, 3, 2), (16384, 16384, 1024, 128, 128): (3, 16, 128, 64, 2, 4), (16384, 16384, 2048, 16, 16): (3, 2, 16, 128, 1, 2), (16384, 16384, 2048, 32, 32): (1, 8, 32, 64, 5, 2), (16384, 16384, 2048, 64, 64): (5, 16, 64, 64, 3, 2), (16384, 16384, 2048, 128, 128): (2, 32, 128, 64, 2, 4), (16384, 16384, 4096, 16, 16): (2, 2, 16, 128, 1, 2), (16384, 16384, 4096, 32, 32): (1, 4, 32, 64, 3, 2), (16384, 16384, 4096, 64, 64): (2, 8, 64, 64, 3, 2), (16384, 16384, 4096, 128, 128): (3, 16, 128, 64, 2, 4), (16384, 16384, 8192, 16, 16): (3, 2, 16, 128, 1, 2), (16384, 16384, 8192, 32, 32): (2, 4, 32, 64, 5, 2), (16384, 16384, 8192, 64, 64): (4, 8, 64, 64, 3, 2), (16384, 16384, 8192, 128, 128): (8, 32, 128, 64, 2, 4), (16384, 16384, 16384, 16, 16): (1, 2, 16, 256, 1, 4), (16384, 16384, 16384, 32, 32): (1, 4, 32, 128, 3, 4), (16384, 16384, 16384, 64, 64): (5, 4, 64, 64, 1, 4), (16384, 16384, 16384, 128, 128): (4, 8, 128, 64, 2, 4), (16384, 16384, 32768, 16, 16): (2, 2, 16, 128, 1, 2), (16384, 16384, 32768, 32, 32): (1, 4, 32, 64, 3, 2), (16384, 16384, 32768, 64, 64): (5, 4, 64, 64, 1, 4), (16384, 16384, 32768, 128, 128): (5, 8, 128, 64, 2, 4), (16384, 16384, 65536, 16, 16): (8, 2, 16, 128, 1, 2), (16384, 16384, 65536, 32, 32): (6, 4, 32, 64, 5, 2), (16384, 16384, 65536, 64, 64): (2, 4, 64, 64, 1, 4), (16384, 16384, 65536, 128, 128): (4, 8, 128, 64, 2, 4), (16384, 16384, 131072, 16, 16): (3, 1, 16, 128, 1, 2), (16384, 16384, 131072, 32, 32): (1, 4, 32, 64, 3, 2), (16384, 16384, 131072, 64, 64): (4, 4, 64, 64, 1, 4), (16384, 16384, 131072, 128, 128): (1, 8, 128, 64, 2, 4), (32768, 32768, 256, 16, 16): (4, 16, 16, 16, 1, 4), (32768, 32768, 512, 16, 16): (4, 2, 16, 128, 1, 2), (32768, 32768, 1024, 16, 16): (3, 2, 16, 128, 1, 2), (32768, 32768, 2048, 16, 16): (4, 2, 16, 128, 1, 2), (32768, 32768, 4096, 16, 16): (5, 4, 16, 64, 1, 1), (32768, 32768, 8192, 16, 16): (4, 4, 16, 64, 1, 1), (32768, 32768, 16384, 16, 16): (4, 4, 16, 64, 1, 1), (32768, 32768, 32768, 16, 16): (5, 4, 16, 64, 1, 1), }, ("scatter_mm", "NVIDIA A100-SXM4-80GB", (0, torch.float32, 0.5)): { (256, 256, 256, 16, 16): (1, 1, 16, 16, 1, 8), (256, 256, 256, 32, 32): (1, 1, 16, 16, 1, 4), (256, 256, 256, 64, 64): (1, 1, 16, 16, 1, 4), (256, 256, 256, 128, 128): (1, 1, 16, 16, 1, 1), (256, 256, 512, 16, 16): (1, 1, 16, 16, 1, 4), (256, 256, 512, 32, 32): (1, 16, 16, 16, 1, 1), (256, 256, 512, 64, 64): (1, 1, 16, 16, 1, 1), (256, 256, 512, 128, 128): (1, 1, 32, 32, 1, 4), (256, 256, 1024, 16, 16): (1, 1, 16, 32, 1, 2), (256, 256, 1024, 32, 32): (1, 4, 16, 16, 1, 1), (256, 256, 1024, 64, 64): (1, 1, 32, 32, 1, 4), (256, 256, 1024, 128, 128): (1, 1, 32, 32, 1, 4), (256, 256, 2048, 16, 16): (1, 2, 16, 32, 1, 2), (256, 256, 2048, 32, 32): (1, 1, 16, 32, 1, 2), (256, 256, 2048, 64, 64): (2, 1, 16, 32, 1, 2), (256, 256, 2048, 128, 128): (1, 1, 16, 16, 1, 1), (256, 256, 4096, 16, 16): (1, 1, 16, 32, 1, 2), (256, 256, 4096, 32, 32): (1, 1, 16, 32, 1, 2), (256, 256, 4096, 64, 64): (1, 1, 32, 32, 1, 4), (256, 256, 4096, 128, 128): (3, 1, 32, 64, 1, 4), (256, 256, 8192, 16, 16): (1, 32, 16, 64, 1, 2), (256, 256, 8192, 32, 32): (1, 1, 32, 64, 1, 4), (256, 256, 8192, 64, 64): (1, 1, 32, 64, 1, 4), (256, 256, 8192, 128, 128): (2, 1, 64, 32, 1, 4), (256, 256, 16384, 16, 16): (1, 1, 16, 64, 1, 2), (256, 256, 16384, 32, 32): (1, 1, 32, 64, 1, 4), (256, 256, 16384, 64, 64): (1, 128, 64, 64, 1, 4), (256, 256, 16384, 128, 128): (2, 1, 64, 32, 1, 4), (256, 256, 32768, 16, 16): (2, 128, 16, 64, 1, 1), (256, 256, 32768, 32, 32): (1, 1, 32, 64, 1, 4), (256, 256, 32768, 64, 64): (1, 128, 64, 64, 1, 4), (256, 256, 32768, 128, 128): (2, 1, 64, 64, 1, 4), (256, 256, 65536, 16, 16): (1, 1, 16, 64, 1, 2), (256, 256, 65536, 32, 32): (1, 1, 32, 64, 1, 4), (256, 256, 65536, 64, 64): (2, 1, 64, 64, 1, 4), (256, 256, 65536, 128, 128): (1, 1, 128, 32, 1, 4), (256, 256, 131072, 16, 16): (3, 128, 16, 64, 1, 1), (256, 256, 131072, 32, 32): (1, 1, 32, 64, 1, 4), (256, 256, 131072, 64, 64): (2, 1, 64, 64, 1, 4), (256, 256, 131072, 128, 128): (1, 8192, 64, 16, 1, 4), (512, 512, 256, 16, 16): (1, 2, 16, 16, 1, 1), (512, 512, 256, 32, 32): (1, 4, 16, 16, 1, 1), (512, 512, 256, 64, 64): (1, 16, 16, 16, 1, 1), (512, 512, 256, 128, 128): (1, 1, 16, 32, 1, 4), (512, 512, 512, 16, 16): (1, 8, 16, 32, 1, 2), (512, 512, 512, 32, 32): (1, 8, 16, 32, 1, 2), (512, 512, 512, 64, 64): (1, 2, 16, 32, 1, 2), (512, 512, 512, 128, 128): (1, 1, 32, 32, 1, 4), (512, 512, 1024, 16, 16): (1, 1, 16, 32, 1, 2), (512, 512, 1024, 32, 32): (1, 1, 16, 32, 1, 2), (512, 512, 1024, 64, 64): (1, 1, 16, 32, 1, 2), (512, 512, 1024, 128, 128): (1, 1, 64, 32, 1, 4), (512, 512, 2048, 16, 16): (1, 16, 16, 64, 1, 2), (512, 512, 2048, 32, 32): (1, 1, 32, 32, 1, 4), (512, 512, 2048, 64, 64): (1, 1, 32, 32, 1, 4), (512, 512, 2048, 128, 128): (2, 1, 32, 32, 1, 4), (512, 512, 4096, 16, 16): (2, 64, 16, 64, 1, 1), (512, 512, 4096, 32, 32): (1, 64, 32, 64, 1, 4), (512, 512, 4096, 64, 64): (1, 1, 32, 32, 1, 4), (512, 512, 4096, 128, 128): (1, 1, 64, 32, 1, 4), (512, 512, 8192, 16, 16): (2, 64, 16, 64, 1, 1), (512, 512, 8192, 32, 32): (1, 256, 32, 32, 1, 1), (512, 512, 8192, 64, 64): (1, 64, 64, 64, 1, 4), (512, 512, 8192, 128, 128): (2, 1, 64, 32, 1, 8), (512, 512, 16384, 16, 16): (2, 64, 16, 64, 1, 1), (512, 512, 16384, 32, 32): (1, 128, 32, 32, 1, 1), (512, 512, 16384, 64, 64): (1, 64, 64, 64, 1, 4), (512, 512, 16384, 128, 128): (3, 1, 64, 32, 1, 8), (512, 512, 32768, 16, 16): (2, 64, 16, 64, 1, 1), (512, 512, 32768, 32, 32): (1, 128, 32, 32, 1, 1), (512, 512, 32768, 64, 64): (1, 64, 64, 64, 1, 4), (512, 512, 32768, 128, 128): (2, 1, 64, 32, 1, 8), (512, 512, 65536, 16, 16): (2, 32, 16, 64, 1, 1), (512, 512, 65536, 32, 32): (1, 128, 32, 32, 1, 1), (512, 512, 65536, 64, 64): (1, 64, 64, 64, 1, 4), (512, 512, 65536, 128, 128): (2, 1, 64, 32, 1, 8), (512, 512, 131072, 16, 16): (2, 32, 16, 64, 1, 1), (512, 512, 131072, 32, 32): (1, 128, 32, 32, 1, 1), (512, 512, 131072, 64, 64): (3, 64, 64, 64, 1, 4), (512, 512, 131072, 128, 128): (1, 8192, 64, 16, 1, 4), (1024, 1024, 256, 16, 16): (1, 4, 16, 32, 1, 2), (1024, 1024, 256, 32, 32): (1, 4, 16, 32, 1, 2), (1024, 1024, 256, 64, 64): (1, 1, 16, 32, 1, 2), (1024, 1024, 256, 128, 128): (1, 1, 16, 16, 1, 1), (1024, 1024, 512, 16, 16): (1, 8, 16, 32, 1, 2), (1024, 1024, 512, 32, 32): (1, 8, 16, 32, 1, 1), (1024, 1024, 512, 64, 64): (1, 8, 32, 32, 1, 4), (1024, 1024, 512, 128, 128): (2, 1, 32, 32, 1, 4), (1024, 1024, 1024, 16, 16): (1, 16, 16, 32, 1, 2), (1024, 1024, 1024, 32, 32): (1, 16, 32, 64, 1, 4), (1024, 1024, 1024, 64, 64): (1, 16, 32, 64, 1, 4), (1024, 1024, 1024, 128, 128): (1, 1, 32, 32, 1, 4), (1024, 1024, 2048, 16, 16): (2, 32, 16, 64, 1, 1), (1024, 1024, 2048, 32, 32): (1, 32, 32, 64, 1, 4), (1024, 1024, 2048, 64, 64): (1, 32, 64, 64, 1, 4), (1024, 1024, 2048, 128, 128): (1, 1, 32, 64, 1, 4), (1024, 1024, 4096, 16, 16): (2, 16, 16, 64, 1, 1), (1024, 1024, 4096, 32, 32): (1, 64, 32, 32, 1, 1), (1024, 1024, 4096, 64, 64): (1, 64, 64, 64, 1, 4), (1024, 1024, 4096, 128, 128): (2, 64, 64, 32, 1, 8), (1024, 1024, 8192, 16, 16): (2, 16, 16, 64, 1, 1), (1024, 1024, 8192, 32, 32): (1, 64, 32, 32, 1, 1), (1024, 1024, 8192, 64, 64): (1, 64, 64, 64, 1, 4), (1024, 1024, 8192, 128, 128): (4, 1, 32, 64, 1, 4), (1024, 1024, 16384, 16, 16): (2, 16, 16, 64, 1, 1), (1024, 1024, 16384, 32, 32): (1, 64, 32, 32, 1, 1), (1024, 1024, 16384, 64, 64): (1, 32, 64, 64, 1, 4), (1024, 1024, 16384, 128, 128): (2, 64, 64, 32, 1, 4), (1024, 1024, 32768, 16, 16): (2, 16, 16, 64, 1, 1), (1024, 1024, 32768, 32, 32): (1, 64, 32, 32, 1, 1), (1024, 1024, 32768, 64, 64): (1, 32, 64, 64, 1, 4), (1024, 1024, 32768, 128, 128): (4, 1, 32, 64, 1, 4), (1024, 1024, 65536, 16, 16): (2, 16, 16, 64, 1, 1), (1024, 1024, 65536, 32, 32): (1, 32, 32, 32, 1, 1), (1024, 1024, 65536, 64, 64): (2, 32, 64, 64, 1, 4), (1024, 1024, 65536, 128, 128): (4, 1, 64, 32, 1, 4), (1024, 1024, 131072, 16, 16): (2, 16, 16, 64, 1, 1), (1024, 1024, 131072, 32, 32): (1, 32, 32, 32, 1, 1), (1024, 1024, 131072, 64, 64): (1, 16, 64, 64, 1, 4), (1024, 1024, 131072, 128, 128): (1, 8192, 64, 16, 1, 4), (2048, 2048, 256, 16, 16): (1, 4, 16, 32, 1, 2), (2048, 2048, 256, 32, 32): (1, 8, 16, 32, 1, 1), (2048, 2048, 256, 64, 64): (1, 8, 32, 32, 1, 4), (2048, 2048, 256, 128, 128): (1, 4, 64, 64, 1, 8), (2048, 2048, 512, 16, 16): (2, 8, 16, 32, 1, 2), (2048, 2048, 512, 32, 32): (2, 8, 32, 64, 1, 4), (2048, 2048, 512, 64, 64): (2, 4, 64, 64, 1, 4), (2048, 2048, 512, 128, 128): (1, 8, 32, 64, 1, 4), (2048, 2048, 1024, 16, 16): (2, 16, 16, 64, 3, 1), (2048, 2048, 1024, 32, 32): (1, 32, 32, 32, 1, 1), (2048, 2048, 1024, 64, 64): (1, 16, 64, 64, 1, 4), (2048, 2048, 1024, 128, 128): (2, 4, 64, 64, 1, 8), (2048, 2048, 2048, 16, 16): (2, 16, 16, 64, 1, 1), (2048, 2048, 2048, 32, 32): (1, 32, 32, 32, 1, 1), (2048, 2048, 2048, 64, 64): (1, 16, 64, 64, 1, 4), (2048, 2048, 2048, 128, 128): (2, 32, 32, 64, 1, 4), (2048, 2048, 4096, 16, 16): (3, 2, 16, 64, 1, 1), (2048, 2048, 4096, 32, 32): (3, 4, 32, 32, 1, 1), (2048, 2048, 4096, 64, 64): (1, 16, 64, 64, 1, 4), (2048, 2048, 4096, 128, 128): (2, 32, 64, 32, 1, 4), (2048, 2048, 8192, 16, 16): (3, 4, 16, 64, 1, 1), (2048, 2048, 8192, 32, 32): (2, 4, 32, 32, 1, 1), (2048, 2048, 8192, 64, 64): (2, 32, 64, 32, 1, 2), (2048, 2048, 8192, 128, 128): (4, 1, 32, 64, 1, 4), (2048, 2048, 16384, 16, 16): (3, 4, 16, 64, 1, 1), (2048, 2048, 16384, 32, 32): (1, 4, 32, 32, 1, 1), (2048, 2048, 16384, 64, 64): (2, 8, 64, 32, 1, 2), (2048, 2048, 16384, 128, 128): (2, 8, 64, 32, 1, 4), (2048, 2048, 32768, 16, 16): (2, 4, 16, 64, 1, 1), (2048, 2048, 32768, 32, 32): (2, 8, 32, 32, 1, 1), (2048, 2048, 32768, 64, 64): (1, 16, 64, 32, 1, 2), (2048, 2048, 32768, 128, 128): (4, 1, 32, 64, 1, 4), (2048, 2048, 65536, 16, 16): (3, 4, 16, 64, 1, 1), (2048, 2048, 65536, 32, 32): (1, 8, 32, 32, 1, 1), (2048, 2048, 65536, 64, 64): (1, 8, 64, 32, 1, 2), (2048, 2048, 65536, 128, 128): (4, 1, 64, 32, 1, 4), (2048, 2048, 131072, 16, 16): (2, 4, 16, 64, 1, 1), (2048, 2048, 131072, 32, 32): (1, 8, 32, 32, 1, 1), (2048, 2048, 131072, 64, 64): (3, 1, 64, 32, 1, 2), (2048, 2048, 131072, 128, 128): (1, 8192, 128, 16, 1, 8), (4096, 4096, 256, 16, 16): (2, 4, 16, 32, 1, 2), (4096, 4096, 256, 32, 32): (1, 4, 32, 64, 1, 4), (4096, 4096, 256, 64, 64): (1, 4, 64, 64, 1, 4), (4096, 4096, 256, 128, 128): (1, 4, 32, 64, 1, 4), (4096, 4096, 512, 16, 16): (2, 8, 16, 64, 3, 1), (4096, 4096, 512, 32, 32): (2, 16, 32, 32, 1, 1), (4096, 4096, 512, 64, 64): (1, 8, 64, 64, 1, 4), (4096, 4096, 512, 128, 128): (1, 8, 32, 64, 1, 4), (4096, 4096, 1024, 16, 16): (1, 8, 16, 64, 3, 1), (4096, 4096, 1024, 32, 32): (1, 16, 32, 32, 1, 1), (4096, 4096, 1024, 64, 64): (1, 16, 64, 32, 1, 2), (4096, 4096, 1024, 128, 128): (1, 16, 32, 64, 1, 4), (4096, 4096, 2048, 16, 16): (1, 16, 16, 64, 3, 1), (4096, 4096, 2048, 32, 32): (1, 16, 32, 32, 1, 1), (4096, 4096, 2048, 64, 64): (3, 16, 64, 32, 1, 2), (4096, 4096, 2048, 128, 128): (4, 8, 32, 64, 1, 4), (4096, 4096, 4096, 16, 16): (1, 8, 16, 64, 3, 1), (4096, 4096, 4096, 32, 32): (1, 1, 32, 32, 1, 1), (4096, 4096, 4096, 64, 64): (2, 16, 64, 32, 1, 2), (4096, 4096, 4096, 128, 128): (4, 8, 32, 64, 1, 4), (4096, 4096, 8192, 16, 16): (1, 8, 16, 64, 3, 1), (4096, 4096, 8192, 32, 32): (2, 1, 32, 32, 1, 1), (4096, 4096, 8192, 64, 64): (1, 16, 64, 32, 1, 2), (4096, 4096, 8192, 128, 128): (2, 1, 32, 64, 1, 4), (4096, 4096, 16384, 16, 16): (1, 8, 16, 64, 3, 1), (4096, 4096, 16384, 32, 32): (1, 1, 32, 32, 1, 1), (4096, 4096, 16384, 64, 64): (2, 8, 64, 32, 1, 2), (4096, 4096, 16384, 128, 128): (2, 1, 32, 64, 1, 4), (4096, 4096, 32768, 16, 16): (1, 8, 16, 64, 3, 1), (4096, 4096, 32768, 32, 32): (1, 1, 32, 32, 1, 1), (4096, 4096, 32768, 64, 64): (1, 8, 64, 32, 1, 2), (4096, 4096, 32768, 128, 128): (2, 1, 32, 64, 1, 4), (4096, 4096, 65536, 16, 16): (1, 8, 16, 64, 3, 1), (4096, 4096, 65536, 32, 32): (3, 1, 32, 32, 1, 1), (4096, 4096, 65536, 64, 64): (3, 4, 64, 32, 1, 2), (4096, 4096, 65536, 128, 128): (2, 1, 32, 64, 1, 4), (4096, 4096, 131072, 16, 16): (1, 8, 16, 64, 3, 1), (4096, 4096, 131072, 32, 32): (1, 1, 32, 32, 1, 1), (4096, 4096, 131072, 64, 64): (2, 8, 64, 32, 1, 2), (4096, 4096, 131072, 128, 128): (1, 8192, 128, 16, 1, 8), (8192, 8192, 256, 16, 16): (2, 4, 16, 64, 3, 1), (8192, 8192, 256, 32, 32): (1, 8, 32, 32, 1, 1), (8192, 8192, 256, 64, 64): (1, 4, 64, 64, 1, 4), (8192, 8192, 256, 128, 128): (1, 4, 32, 64, 1, 4), (8192, 8192, 512, 16, 16): (1, 4, 16, 64, 3, 1), (8192, 8192, 512, 32, 32): (1, 16, 32, 32, 1, 1), (8192, 8192, 512, 64, 64): (2, 4, 64, 64, 1, 4), (8192, 8192, 512, 128, 128): (2, 1, 32, 64, 1, 4), (8192, 8192, 1024, 16, 16): (3, 8, 16, 64, 3, 1), (8192, 8192, 1024, 32, 32): (1, 16, 32, 32, 1, 1), (8192, 8192, 1024, 64, 64): (1, 8, 64, 32, 1, 2), (8192, 8192, 1024, 128, 128): (2, 4, 32, 64, 1, 4), (8192, 8192, 2048, 16, 16): (1, 8, 16, 64, 3, 1), (8192, 8192, 2048, 32, 32): (1, 16, 32, 32, 1, 1), (8192, 8192, 2048, 64, 64): (2, 8, 64, 32, 1, 2), (8192, 8192, 2048, 128, 128): (4, 1, 32, 64, 1, 4), (8192, 8192, 4096, 16, 16): (1, 8, 16, 64, 3, 1), (8192, 8192, 4096, 32, 32): (1, 16, 32, 32, 1, 1), (8192, 8192, 4096, 64, 64): (1, 4, 64, 32, 1, 2), (8192, 8192, 4096, 128, 128): (3, 1, 32, 64, 1, 4), (8192, 8192, 8192, 16, 16): (1, 8, 16, 64, 3, 1), (8192, 8192, 8192, 32, 32): (1, 8, 32, 32, 1, 1), (8192, 8192, 8192, 64, 64): (1, 8, 64, 32, 1, 2), (8192, 8192, 8192, 128, 128): (4, 1, 32, 64, 1, 4), (8192, 8192, 16384, 16, 16): (3, 4, 16, 64, 3, 1), (8192, 8192, 16384, 32, 32): (1, 8, 32, 32, 1, 1), (8192, 8192, 16384, 64, 64): (2, 2, 64, 32, 1, 2), (8192, 8192, 16384, 128, 128): (7, 1, 32, 64, 1, 4), (8192, 8192, 32768, 16, 16): (1, 4, 16, 64, 3, 1), (8192, 8192, 32768, 32, 32): (1, 8, 32, 32, 1, 1), (8192, 8192, 32768, 64, 64): (3, 2, 64, 32, 1, 2), (8192, 8192, 32768, 128, 128): (6, 1, 32, 64, 1, 4), (8192, 8192, 65536, 16, 16): (1, 4, 16, 64, 3, 1), (8192, 8192, 65536, 32, 32): (4, 8, 32, 32, 1, 1), (8192, 8192, 65536, 64, 64): (1, 2, 64, 32, 1, 2), (8192, 8192, 65536, 128, 128): (4, 1, 32, 64, 1, 4), (8192, 8192, 131072, 16, 16): (1, 4, 16, 64, 3, 1), (8192, 8192, 131072, 32, 32): (1, 8, 32, 32, 1, 1), (8192, 8192, 131072, 64, 64): (5, 4, 64, 32, 1, 2), (8192, 8192, 131072, 128, 128): (1, 4096, 128, 16, 1, 8), (16384, 16384, 256, 16, 16): (1, 4, 16, 64, 3, 1), (16384, 16384, 256, 32, 32): (1, 8, 32, 32, 1, 1), (16384, 16384, 256, 64, 64): (1, 4, 64, 32, 1, 2), (16384, 16384, 256, 128, 128): (1, 4, 32, 64, 1, 4), (16384, 16384, 512, 16, 16): (1, 8, 16, 64, 3, 1), (16384, 16384, 512, 32, 32): (1, 16, 32, 32, 1, 1), (16384, 16384, 512, 64, 64): (1, 4, 64, 32, 1, 2), (16384, 16384, 512, 128, 128): (3, 1, 32, 64, 1, 4), (16384, 16384, 1024, 16, 16): (1, 8, 16, 64, 3, 1), (16384, 16384, 1024, 32, 32): (1, 16, 32, 32, 1, 1), (16384, 16384, 1024, 64, 64): (2, 4, 64, 32, 1, 2), (16384, 16384, 1024, 128, 128): (1, 2, 32, 64, 1, 4), (16384, 16384, 2048, 16, 16): (1, 4, 16, 64, 3, 1), (16384, 16384, 2048, 32, 32): (1, 16, 32, 32, 1, 1), (16384, 16384, 2048, 64, 64): (3, 4, 64, 32, 1, 2), (16384, 16384, 2048, 128, 128): (2, 1, 32, 64, 1, 4), (16384, 16384, 4096, 16, 16): (4, 8, 16, 64, 3, 1), (16384, 16384, 4096, 32, 32): (5, 16, 32, 32, 1, 1), (16384, 16384, 4096, 64, 64): (3, 2, 64, 32, 1, 2), (16384, 16384, 4096, 128, 128): (2, 1, 32, 64, 1, 4), (16384, 16384, 8192, 16, 16): (1, 4, 16, 64, 3, 1), (16384, 16384, 8192, 32, 32): (1, 4, 32, 32, 1, 1), (16384, 16384, 8192, 64, 64): (1, 2, 64, 32, 1, 2), (16384, 16384, 8192, 128, 128): (2, 1, 32, 64, 1, 4), (16384, 16384, 16384, 16, 16): (1, 8, 16, 64, 3, 1), (16384, 16384, 16384, 32, 32): (1, 4, 32, 32, 1, 1), (16384, 16384, 16384, 64, 64): (1, 2, 64, 32, 1, 2), (16384, 16384, 16384, 128, 128): (3, 1, 32, 64, 1, 4), (16384, 16384, 32768, 16, 16): (1, 4, 16, 64, 3, 1), (16384, 16384, 32768, 32, 32): (1, 2, 32, 32, 1, 1), (16384, 16384, 32768, 64, 64): (3, 2, 64, 32, 1, 2), (16384, 16384, 32768, 128, 128): (3, 1, 32, 64, 1, 4), (16384, 16384, 65536, 16, 16): (1, 8, 16, 64, 3, 1), (16384, 16384, 65536, 32, 32): (1, 4, 32, 32, 1, 1), (16384, 16384, 65536, 64, 64): (4, 4, 64, 32, 1, 2), (16384, 16384, 65536, 128, 128): (5, 1, 32, 64, 1, 4), (16384, 16384, 131072, 16, 16): (1, 2, 16, 64, 3, 1), (16384, 16384, 131072, 32, 32): (1, 4, 32, 32, 1, 1), (16384, 16384, 131072, 64, 64): (1, 2, 64, 32, 1, 2), (16384, 16384, 131072, 128, 128): (1, 4096, 128, 16, 1, 8), }, # END GENERATED DATA } if __name__ == "__main__": for dtype in [torch.int8]: for op in ["_int_bsr_dense_addmm"]: main(op=op, force=False, dtype=dtype) for dtype in [torch.float16, torch.bfloat16, torch.float32, torch.int8]: for op in ["bsr_dense_addmm"]: main(op=op, force=False, dtype=dtype) ```
======================================================================================================================= SOURCE CODE FILE: semi_structured.py LINES: 1 SIZE: 28.02 KB PATH: scripts\freecad_env\Lib\site-packages\torch\sparse\semi_structured.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import warnings from collections import namedtuple from typing import Any, Callable, Optional import torch from torch.sparse._semi_structured_conversions import ( sparse_semi_structured_from_dense_cutlass, sparse_semi_structured_to_dense_cutlass, ) from torch.sparse._semi_structured_ops import ( fallback_dispatcher, semi_sparse_addmm, semi_sparse_detach, semi_sparse_indices, semi_sparse_linear, semi_sparse_mm, semi_sparse_scaled_mm, semi_sparse_t, semi_sparse_values, semi_sparse_view, ) __all__ = [ "SparseSemiStructuredTensor", "SparseSemiStructuredTensorCUTLASS", "SparseSemiStructuredTensorCUSPARSELT", "to_sparse_semi_structured", ] _SEMI_STRUCTURED_SPARSE_CONFIG = namedtuple( "_SEMI_STRUCTURED_SPARSE_CONFIG", "sparse_min_rows sparse_min_cols dense_min_rows dense_min_cols", ) class SparseSemiStructuredTensor(torch.Tensor): """ This class implementes semi-structured sparsity as a Tensor subclass. Semi-structured sparsity describes a sparsity pattern where n in every 2n elements are sparse, depending on the datatype. It is also referred to as 2:4 sparsity or fine-grained structured sparsity. There are two backends available for semi_structred sparsity, either cuSPARSELt or CUTLASS. This class is meant to serve as a base class for both implementations. SparseSemiStructuredCUTLASS and SparseSemiStructuredCUSPARSELT both inherit from this class and define three backend-specific items. Note that as such, this class cannot be insantiated directly. -`_DTYPE_SHAPE_CONSTRAINTS` - A dictionary holding backend specific dense/sparse min shape constraints - `def from_dense()` - backend specific compression routines - `def _mm()` - backend specifc mm op (either torch._cslt_sparse_mm or torch._sparse_semi_structured_(mm|addmm)) """ _DEFAULT_ALG_ID: int = 0 _DTYPE_SHAPE_CONSTRAINTS: dict[torch.dtype, _SEMI_STRUCTURED_SPARSE_CONFIG] _FORCE_CUTLASS: bool = False _FUSE_TRANSPOSE: bool = False _PROTOTYPE_WARNING_SHOWN: bool = False BACKEND: str SPARSE_DISPATCH: dict[Callable, Callable] packed: Optional[torch.Tensor] meta: Optional[torch.Tensor] packed_t: Optional[torch.Tensor] meta_t: Optional[torch.Tensor] compressed_swizzled_bitmask: Optional[torch.Tensor] fuse_transpose_cusparselt: bool alg_id_cusparselt: int __slots__ = ["packed", "meta", "packed_t", "meta_t", "compressed_swizzled_bitmask"] @staticmethod def __new__( # noqa: PYI034 cls, shape: torch.Size, packed: Optional[torch.Tensor], meta: Optional[torch.Tensor], packed_t: Optional[torch.Tensor], meta_t: Optional[torch.Tensor], compressed_swizzled_bitmask: Optional[torch.Tensor], fuse_transpose_cusparselt: bool = False, alg_id_cusparselt: int = 0, requires_grad: bool = False, ): """ Create a new instance of the tensor subclass from the compressed sparse representation. We have the option to create the subclass with the compressed representations of both X and X', for training. For inference, we only need a single representation (either X or X'), while the corresponding other set will be None. Depending on the backend selected, certain fields will be set to None. (CUSPARSELT vs CUTLASS) Args: shape: The shape of the original dense tensor packed: The compressed representation of the original dense tensor meta: The metadata of the original dense tensor, if it is stored separately packed_t: The compressed representation of the transposed original dense tensor meta_t: The metadata of the transposed original dense tensor, if it is stored separately compressed_swizzled_bitmask: The masks used by the CUTLASS backend to determine which threads should participate in the computation. Used for pointwise ops. fuse_transpose_cusparselt: When running with cuSPARSELt, we have the option to fuse a transposition with a matmul, which is useful in the case of 2:4 sparse training. alg_id_cusparselt: The algorithm id to use when using cuSPARSELT, will have effect on performance Returns: torch.Tensor: A torch.Tensor wrapper subclass. Raises: ValueError: If all of the tensor arguments are None. """ if not cls._PROTOTYPE_WARNING_SHOWN: warnings.warn( ( "The PyTorch API of SparseSemiStructuredTensor is in prototype stage " "and will change in the near future. Please open a Github issue " "for features requests and see our documentation on the torch.sparse " "module for further information about the project." ), UserWarning, ) cls._PROTOTYPE_WARNING_SHOWN = True # Because this only runs onces, we also load the dispatch table here as well. # We can't define the dispatch table explicitly because of torch.ops import errors, so we do this instead # But this is useful since it allows users to overload the dispatch table for debugging / testing. cls._load_dispatch_table() # we can also register the classes with dynamo when the warning is shown. torch._dynamo.allow_in_graph(cls) if packed is not None: previous_tensor = packed elif packed_t is not None: previous_tensor = packed_t else: raise ValueError("At least one of packed or packed_t must be provided") kwargs = { "device": previous_tensor.device, "dtype": previous_tensor.dtype, "layout": previous_tensor.layout, "requires_grad": requires_grad, } tensor = torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs) # type: ignore[attr-defined] tensor.packed = packed tensor.meta = meta tensor.packed_t = packed_t tensor.meta_t = meta_t tensor.compressed_swizzled_bitmask = compressed_swizzled_bitmask tensor.fuse_transpose_cusparselt = fuse_transpose_cusparselt tensor.alg_id_cusparselt = alg_id_cusparselt return tensor def __repr__(self) -> str: # type: ignore[override] assert hasattr(self, "shape") return f"{self.__class__.__name__}(shape={self.shape})" def __tensor_flatten__( self, ) -> tuple[list[str], tuple[torch.Size, bool, int, bool]]: inner_tensors = list( filter(lambda x: getattr(self, x) is not None, self.__slots__) ) tensor_meta = ( self.shape, self.fuse_transpose_cusparselt, self.alg_id_cusparselt, self.requires_grad, ) return inner_tensors, tensor_meta @classmethod def __tensor_unflatten__( cls, inner_tensors, tensor_meta: tuple[torch.Size, bool, int, bool], outer_size, outer_stride, ) -> torch.Tensor: shape, fuse_transpose_cusparselt, alg_id_cusparselt, requires_grad = tensor_meta return cls( shape=shape, packed=inner_tensors.get("packed", None), meta=inner_tensors.get("meta", None), packed_t=inner_tensors.get("packed_t", None), meta_t=inner_tensors.get("meta_t", None), compressed_swizzled_bitmask=inner_tensors.get( "compressed_swizzled_bitmask", None ), fuse_transpose_cusparselt=fuse_transpose_cusparselt, alg_id_cusparselt=alg_id_cusparselt, requires_grad=requires_grad, ) __torch_function__ = torch._C._disabled_torch_function_impl @classmethod def __torch_dispatch__(cls, func, types, args, kwargs) -> Any: if func._overloadpacket not in cls.SPARSE_DISPATCH: raise NotImplementedError( f"{cls.__name__} only supports a specific set of operations, " f"can't perform requested op ({func.__name__})" ) return cls.SPARSE_DISPATCH[func._overloadpacket](func, types, args, kwargs) @classmethod def _load_dispatch_table(cls, custom_dispatch_table=None) -> None: """ Loads the op overload sparse dispatch table for the current class. """ if getattr(cls, "SPARSE_DISPATCH", None) is None: cls.SPARSE_DISPATCH = { torch.ops.aten.values: semi_sparse_values, torch.ops.aten.indices: semi_sparse_indices, torch.ops.aten.is_same_size: fallback_dispatcher, torch.ops.aten.detach_: fallback_dispatcher, torch.ops.aten.detach: semi_sparse_detach, torch.ops.aten.t: semi_sparse_t, torch.ops.aten.view: semi_sparse_view, torch.ops.aten.mm: semi_sparse_mm, torch.ops.aten.matmul: semi_sparse_mm, torch.ops.aten.addmm: semi_sparse_addmm, torch.ops.aten.linear: semi_sparse_linear, torch.ops.aten._to_copy: fallback_dispatcher, torch.ops.aten._scaled_mm: semi_sparse_scaled_mm, } if custom_dispatch_table is not None: cls.SPARSE_DISPATCH.update(custom_dispatch_table) @classmethod def _validate_device_dim_dtype_shape(cls, original_tensor: torch.Tensor) -> None: """ Assert that the given tensor is valid for semi-structured sparse compression. """ # check device if not original_tensor.is_cuda: raise RuntimeError( f"Error original_tensor.device= {original_tensor.device} is not supported! " "Only CUDA tensors are currently supported." ) # check dim if original_tensor.dim() != 2: raise RuntimeError( f"Error original_tensor.dim = {original_tensor.dim()} is not supported! " "Only 2d tensors are currently supported." ) # check contiguous if not original_tensor.is_contiguous(): raise RuntimeError( "Error original_tensor is not contiguous!" "Only contiguous tensors are currently supported." ) # check dtype if original_tensor.dtype not in cls._DTYPE_SHAPE_CONSTRAINTS: raise RuntimeError( f"Error original_tensor.dtype {original_tensor.dtype} is not a supported dtype for {cls}!" ) # check shape m, n = original_tensor.shape min_rows = cls._DTYPE_SHAPE_CONSTRAINTS[original_tensor.dtype].sparse_min_rows min_cols = cls._DTYPE_SHAPE_CONSTRAINTS[original_tensor.dtype].sparse_min_cols if m < min_rows or m % min_rows or n < min_cols or n % min_cols: # TODO in the future we can add in padding to support sparse dimensions that aren't perfect multiples raise RuntimeError( f"Error original_tensor.shape {original_tensor.shape} is not supported! " f"Both dimensions must be larger or equal than and a multiple of ({min_rows}, {min_cols})" ) @classmethod def _pad_dense_input(cls, dense_input: torch.Tensor) -> torch.Tensor: """ Calculates padding for dense tensor and pads tensor if necessary. If padding is not required, this function returns the original tensor. """ # only 2d matmul assert dense_input.dim() == 2 # check shape m, n = dense_input.shape min_rows = cls._DTYPE_SHAPE_CONSTRAINTS[dense_input.dtype].dense_min_rows min_cols = cls._DTYPE_SHAPE_CONSTRAINTS[dense_input.dtype].dense_min_cols # calculate padding to_pad_m = -m % min_rows if m < min_rows or m % min_rows else 0 to_pad_n = -n % min_cols if n < min_cols or n % min_rows else 0 if to_pad_m or to_pad_n: return torch.nn.functional.pad(dense_input, (0, to_pad_n, 0, to_pad_m)) else: return dense_input def to_dense(self): # type:ignore[override] col = self.shape[-1] return torch.mm(self, torch.eye(col, dtype=self.dtype, device=self.device)) @classmethod def from_dense(cls, original_tensor: torch.Tensor) -> "SparseSemiStructuredTensor": raise NotImplementedError def _mm( self, B: torch.Tensor, *, bias: Optional[torch.Tensor] = None, **kwargs, ) -> torch.Tensor: raise NotImplementedError def to_sparse_semi_structured( original_tensor: torch.Tensor, transposed: bool = False, ) -> SparseSemiStructuredTensor: """ This function converts a dense tensor into a sparse semi-structured tensor. It will return a SparseSemiStructuredTensor, a subclass of torch.Tensor. This function will check to ensure the dense tensor has the right dtype, size, dims, and device. We currently only support semi-structured sparse tensors for 2d CUDA tensors. Additionally, your tensor must be a positive multiple of the mininum sparse block size, given in `_DTYPE_TO_SHAPE_CONSTRAINTS` for each dtype (float32, float16, bfloat16, int8). Args: original_tensor (Tensor): the dense tensor to convert transposed (bool, optional): deprecated arg to be removed in another release. Do not use. Returns: SparseSemiStructuredTensor: A sparse semi-structured tensor created from the given original_tensor Raises: None Example: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) >>> A = torch.Tensor([0, 0, 1, 1]).tile((128, 32)).half().cuda() tensor([[0., 0., 1., ..., 0., 1., 1.], [0., 0., 1., ..., 0., 1., 1.], [0., 0., 1., ..., 0., 1., 1.], ..., [0., 0., 1., ..., 0., 1., 1.], [0., 0., 1., ..., 0., 1., 1.], [0., 0., 1., ..., 0., 1., 1.]], device='cuda:0', dtype=torch.float16) >>> A_sparse = to_sparse_semi_structured(A) SparseSemiStructuredTensor(shape=torch.Size([128, 128])) >>> A_sparse.values() tensor([[1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.], ..., [1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.]], device='cuda:0', dtype=torch.float16), >>> A_sparse.indices() tensor([[-4370, -4370, -4370, ..., -4370, -4370, -4370], [-4370, -4370, -4370, ..., -4370, -4370, -4370], [-4370, -4370, -4370, ..., -4370, -4370, -4370], ..., [-4370, -4370, -4370, ..., -4370, -4370, -4370], [-4370, -4370, -4370, ..., -4370, -4370, -4370], [-4370, -4370, -4370, ..., -4370, -4370, -4370]], device='cuda:0', dtype=torch.int16)) """ if transposed: warnings.warn( "Setting transpose from `to_sparse_semi_structured` is deprecated " "and will be removed in a future release. " "`SparseSemiStructuredTensor` only support contiguous input tensors.", FutureWarning, stacklevel=2, ) # set from _FORCE_CUTLASS flag SPARSE_SUBCLASS = ( torch.sparse.SparseSemiStructuredTensorCUTLASS if SparseSemiStructuredTensor._FORCE_CUTLASS else torch.sparse.SparseSemiStructuredTensorCUSPARSELT ) return SPARSE_SUBCLASS.from_dense(original_tensor) class SparseSemiStructuredTensorCUTLASS(SparseSemiStructuredTensor): """ This class implements semi-structured sparsity for the CUTLASS backend. In this implementation, the specified elements and metadata are stored seprately, in packed and meta respectively. When _FORCE_CUTLASS is set, or when cuSPARSELt is not available, this subclass calls into _sparse_semi_structured_(mm|addmm) and sparse_semi_structured_from_dense for conversion to the compressed format. """ BACKEND = "cutlass" _DTYPE_SHAPE_CONSTRAINTS = { torch.int8: _SEMI_STRUCTURED_SPARSE_CONFIG(16, 128, 16, 16), torch.float16: _SEMI_STRUCTURED_SPARSE_CONFIG(32, 64, 8, 8), torch.bfloat16: _SEMI_STRUCTURED_SPARSE_CONFIG(32, 64, 8, 8), torch.float32: _SEMI_STRUCTURED_SPARSE_CONFIG(32, 32, 4, 4), } @classmethod def from_dense( cls, original_tensor: torch.Tensor ) -> "SparseSemiStructuredTensorCUTLASS": cls._validate_device_dim_dtype_shape(original_tensor) ( sparse_tensor_cutlass, meta_tensor_cutlass, ) = sparse_semi_structured_from_dense_cutlass(original_tensor) return cls( original_tensor.shape, packed=sparse_tensor_cutlass, meta=meta_tensor_cutlass, packed_t=None, meta_t=None, compressed_swizzled_bitmask=None, requires_grad=original_tensor.requires_grad, ) def to_dense(self): # type: ignore[override] assert self.meta is not None and self.packed is not None return ( sparse_semi_structured_to_dense_cutlass( self.packed, self.meta, ) if self.meta.ndim == 2 else super().to_dense() ) @classmethod def prune_dense_static_sort( cls, original_tensor: torch.Tensor, algorithm="" ) -> "SparseSemiStructuredTensor": """ This function takes in a unpruned dense tensor and runs a (branchless) static sort across a 4x4 tile. It greedily picks the largest values in the tile, upholding the 2:4 sparsity constraint across both rows and columns. The algorithm used to prune the matrix is implemented in `_sparse_semi_structured_tile`. Then it creates the packed and meta tensors for the compressed sparse representation of the pruned dense tensor. It also calculates the packed_t and meta_t tensors for the compressed sparse representation of the transposed pruned dense tensor. Since we cannot transpose the compressed representations, we store both for the fw/bw pass respectively. Finally, this function also computes a compressed swizzled bitmask that encodes the sparsity pattern This can be used in the backward pass to mask the gradients. [9 1 7 4] [9 0 7 0] [1 2 3 0] [0 2 0 0] [8 3 5 4] -> prune 4x4 tile -> [8 0 0 4] -> pack to CUTLASS semi-structured -> packed [1 2 6 2] [0 0 6 2] -> metadata -> pack to transposed CUTLASS -> packed_t semi-structured representation -> metadata_t -> compute swizzled bitmask -> compressed_swizzled_bitmask The equivalent PyTorch code to create the same five outputs from the dense tensor can be found below: ``` from torch.sparse import SparseSemiStructuredTensorCUTLASS from torch.sparse._semi_structured_conversions import _sparse_semi_structured_tile, _compute_compressed_swizzled_bitmask pruned = _sparse_semi_structured_tile(dense) packed_cutlass, meta_cutlass = sparse_semi_structured_from_dense_cutlass(pruned) packed_t_cutlass, meta_t_cutlass = sparse_semi_structured_from_dense_cutlass(pruned.t().contiguous()) bitmask = _compute_compressed_swizzled_bitmask(pruned) SparseSemiStructuredTensorCUTLASS(dense.shape, packed_cutlass, meta_cutlass, packed_t_cutlass, meta_t_cutlass, bitmask) ``` """ # We can either pack to the CUTLASS or cuSPARSELt representation, depending on the use_cutlass flag. ( packed, meta, packed_t, meta_t, compressed_swizzled_bitmask, ) = torch._sparse_semi_structured_tile( original_tensor, algorithm=algorithm, use_cutlass=True ) return cls( original_tensor.shape, packed=packed, meta=meta, packed_t=packed_t, meta_t=meta_t, compressed_swizzled_bitmask=compressed_swizzled_bitmask, requires_grad=False, ) def _mm( self, B: torch.Tensor, *, bias: Optional[torch.Tensor] = None, **kwargs ) -> torch.Tensor: if isinstance(B, SparseSemiStructuredTensor): raise ValueError( "`SparseSemiStructuredTensor @ SparseSemiStructuredTensor` is not supported by the hardware" ) cls_name = self.__class__.__name__ if self.ndim != 2 or B.ndim != 2: raise NotImplementedError( f"`{cls_name}` matmul: Broadcasting is not implemented" ) if self.packed is None or self.meta is None: raise NotImplementedError( f"`{cls_name}` matmul: operation is not supported" ) else: if bias is None: res = torch._sparse_semi_structured_mm(self.packed, self.meta, B) else: res = torch._sparse_semi_structured_addmm( bias, self.packed, self.meta, B ) return res[: self.shape[0]] class SparseSemiStructuredTensorCUSPARSELT(SparseSemiStructuredTensor): """ The cuSPARSELt backend expects the specified elements and the metadata to be stored in a single tensor: packed = [ specified elements of original tensor | metadata ] For an original tensor of size (m, k) we expect the first m * k // 2 elements to be the kept elements The rest of the tensor is metadata. Since there is only one tensor, we only use the packed and packed_t attributes respectively. cuSPARSELt also supports transposition fusion, which is necessary for performant 2:4 sparse training, as well as specifying alg_id, a config that affects the performance of the matmul depending on matmul sizes. """ BACKEND = "cusparselt" _DTYPE_SHAPE_CONSTRAINTS = { torch.float8_e4m3fn: _SEMI_STRUCTURED_SPARSE_CONFIG(32, 32, 16, 16), torch.int8: _SEMI_STRUCTURED_SPARSE_CONFIG(32, 32, 16, 16), torch.float16: _SEMI_STRUCTURED_SPARSE_CONFIG(16, 16, 8, 8), torch.bfloat16: _SEMI_STRUCTURED_SPARSE_CONFIG(16, 16, 8, 8), } @classmethod def from_dense( cls, original_tensor: torch.Tensor ) -> "SparseSemiStructuredTensorCUSPARSELT": cls._validate_device_dim_dtype_shape(original_tensor) return cls( shape=original_tensor.shape, packed=torch._cslt_compress(original_tensor), meta=None, packed_t=None, meta_t=None, compressed_swizzled_bitmask=None, fuse_transpose_cusparselt=SparseSemiStructuredTensor._FUSE_TRANSPOSE, alg_id_cusparselt=SparseSemiStructuredTensor._DEFAULT_ALG_ID, requires_grad=original_tensor.requires_grad, ) @classmethod def prune_dense_static_sort( cls, original_tensor: torch.Tensor, algorithm="" ) -> "SparseSemiStructuredTensor": """ This function does the same thing as described in SparseSemiStructuredCUTLASS, but uses the cuSPASRELt metadata layout and sparse matmul. The only functional difference is that cuSPARSELt stores `metadata` and `packed` together into a single tensor. [9 1 7 4] [9 0 7 0] [1 2 3 0] [0 2 0 0] [8 3 5 4] -> prune 4x4 tile -> [8 0 0 4] -> pack to cuSPARSELT semi-structured -> packed [1 2 6 2] [0 0 6 2] -> pack to transposed cuSPARSELt -> packed_t semi-structured representation -> compute swizzled bitmask -> compressed_swizzled_bitmask The equivalent PyTorch code to create the same three outputs from the dense tensor can be found below: ``` from torch.sparse import SparseSemiStructuredTensorCUSPARSELT from torch.sparse._semi_structured_conversions import _sparse_semi_structured_tile, _compute_compressed_swizzled_bitmask pruned = _sparse_semi_structured_tile(dense) packed_cusparselt = torch._cslt_compress(pruned) packed_t_cusparselt = torch._cslt_compress(pruned.t().contiguous()) bitmask = _compute_compressed_swizzled_bitmask(pruned) SparseSemiStructuredTensorCUSPARSELT(dense.shape, packed_cutlass, None, packed_t_cutlass, None, bitmask) ``` """ ( packed, meta, packed_t, meta_t, compressed_swizzled_bitmask, ) = torch._sparse_semi_structured_tile( original_tensor, algorithm=algorithm, use_cutlass=False ) return cls( original_tensor.shape, packed=packed, meta=meta, packed_t=packed_t, meta_t=meta_t, compressed_swizzled_bitmask=compressed_swizzled_bitmask, requires_grad=False, ) def _mm( self, B: torch.Tensor, *, bias: Optional[torch.Tensor] = None, **kwargs ) -> torch.Tensor: if isinstance(B, SparseSemiStructuredTensor): raise ValueError( "`SparseSemiStructuredTensor @ SparseSemiStructuredTensor` is not supported by the hardware" ) if self.ndim != 2 or B.ndim != 2: raise NotImplementedError( f"`{self.__class__.__name__}` matmul: Broadcasting is not implemented" ) if B.dtype != self.dtype: raise NotImplementedError( f"`{self.__class__.__name__}` matmul: trying to do `A={tuple(self.shape)} @ B={tuple(B.shape)}`, " f"with A.dtype={self.dtype} and B.dtype={B.dtype}. " "This operation is only supported when A and B have the same data type." ) if bias is not None and bias.dtype != self.dtype: raise NotImplementedError( f"`{self.__class__.__name__}` matmul: trying to do `A={tuple(self.shape)} @ B={tuple(B.shape)} + C`, " f"with A.dtype=B.dtype={self.dtype} and C.dtype={B.dtype}. " "This operation is only supported when A, B and C have the same data type." ) # Force fp8 mm to error to be consistent with torch if self.dtype == torch.float8_e4m3fn: raise NotImplementedError( f"`{self.__class__.__name__}` matmul: trying to do `A={tuple(self.shape)} @ B={tuple(B.shape)}`, " f"with A.dtype=B.dtype={self.dtype}. " "mm is not supported for float8_e4m3fn, please use `torch._scaled_mm` instead." ) if self.packed is None: raise NotImplementedError( f"`{self.__class__.__name__}` matmul: operation is not supported" ) else: res = torch._cslt_sparse_mm( self.packed, B, bias=bias, transpose_result=self.fuse_transpose_cusparselt, alg_id=self.alg_id_cusparselt, ) return res.t() if self.fuse_transpose_cusparselt else res ```
================================================================================================================= SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 33.47 KB PATH: scripts\freecad_env\Lib\site-packages\torch\special\__init__.py ENCODING: utf-8 ```py import torch from torch._C import _add_docstr, _special # type: ignore[attr-defined] from torch._torch_docs import common_args, multi_dim_common __all__ = [ "airy_ai", "bessel_j0", "bessel_j1", "bessel_y0", "bessel_y1", "chebyshev_polynomial_t", "chebyshev_polynomial_u", "chebyshev_polynomial_v", "chebyshev_polynomial_w", "digamma", "entr", "erf", "erfc", "erfcx", "erfinv", "exp2", "expit", "expm1", "gammainc", "gammaincc", "gammaln", "hermite_polynomial_h", "hermite_polynomial_he", "i0", "i0e", "i1", "i1e", "laguerre_polynomial_l", "legendre_polynomial_p", "log1p", "log_ndtr", "log_softmax", "logit", "logsumexp", "modified_bessel_i0", "modified_bessel_i1", "modified_bessel_k0", "modified_bessel_k1", "multigammaln", "ndtr", "ndtri", "polygamma", "psi", "round", "shifted_chebyshev_polynomial_t", "shifted_chebyshev_polynomial_u", "shifted_chebyshev_polynomial_v", "shifted_chebyshev_polynomial_w", "scaled_modified_bessel_k0", "scaled_modified_bessel_k1", "sinc", "softmax", "spherical_bessel_j0", "xlog1py", "xlogy", "zeta", ] Tensor = torch.Tensor entr = _add_docstr( _special.special_entr, r""" entr(input, *, out=None) -> Tensor Computes the entropy on :attr:`input` (as defined below), elementwise. .. math:: \begin{align} \text{entr(x)} = \begin{cases} -x * \ln(x) & x > 0 \\ 0 & x = 0.0 \\ -\infty & x < 0 \end{cases} \end{align} """ + """ Args: input (Tensor): the input tensor. Keyword args: out (Tensor, optional): the output tensor. Example:: >>> a = torch.arange(-0.5, 1, 0.5) >>> a tensor([-0.5000, 0.0000, 0.5000]) >>> torch.special.entr(a) tensor([ -inf, 0.0000, 0.3466]) """, ) psi = _add_docstr( _special.special_psi, r""" psi(input, *, out=None) -> Tensor Alias for :func:`torch.special.digamma`. """, ) digamma = _add_docstr( _special.special_digamma, r""" digamma(input, *, out=None) -> Tensor Computes the logarithmic derivative of the gamma function on `input`. .. math:: \digamma(x) = \frac{d}{dx} \ln\left(\Gamma\left(x\right)\right) = \frac{\Gamma'(x)}{\Gamma(x)} """ + r""" Args: input (Tensor): the tensor to compute the digamma function on Keyword args: {out} .. note:: This function is similar to SciPy's `scipy.special.digamma`. .. note:: From PyTorch 1.8 onwards, the digamma function returns `-Inf` for `0`. Previously it returned `NaN` for `0`. Example:: >>> a = torch.tensor([1, 0.5]) >>> torch.special.digamma(a) tensor([-0.5772, -1.9635]) """.format( **common_args ), ) gammaln = _add_docstr( _special.special_gammaln, r""" gammaln(input, *, out=None) -> Tensor Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`. .. math:: \text{out}_{i} = \ln \Gamma(|\text{input}_{i}|) """ + """ Args: {input} Keyword args: {out} Example:: >>> a = torch.arange(0.5, 2, 0.5) >>> torch.special.gammaln(a) tensor([ 0.5724, 0.0000, -0.1208]) """.format( **common_args ), ) polygamma = _add_docstr( _special.special_polygamma, r""" polygamma(n, input, *, out=None) -> Tensor Computes the :math:`n^{th}` derivative of the digamma function on :attr:`input`. :math:`n \geq 0` is called the order of the polygamma function. .. math:: \psi^{(n)}(x) = \frac{d^{(n)}}{dx^{(n)}} \psi(x) .. note:: This function is implemented only for nonnegative integers :math:`n \geq 0`. """ + """ Args: n (int): the order of the polygamma function {input} Keyword args: {out} Example:: >>> a = torch.tensor([1, 0.5]) >>> torch.special.polygamma(1, a) tensor([1.64493, 4.9348]) >>> torch.special.polygamma(2, a) tensor([ -2.4041, -16.8288]) >>> torch.special.polygamma(3, a) tensor([ 6.4939, 97.4091]) >>> torch.special.polygamma(4, a) tensor([ -24.8863, -771.4742]) """.format( **common_args ), ) erf = _add_docstr( _special.special_erf, r""" erf(input, *, out=None) -> Tensor Computes the error function of :attr:`input`. The error function is defined as follows: .. math:: \mathrm{erf}(x) = \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt """ + r""" Args: {input} Keyword args: {out} Example:: >>> torch.special.erf(torch.tensor([0, -1., 10.])) tensor([ 0.0000, -0.8427, 1.0000]) """.format( **common_args ), ) erfc = _add_docstr( _special.special_erfc, r""" erfc(input, *, out=None) -> Tensor Computes the complementary error function of :attr:`input`. The complementary error function is defined as follows: .. math:: \mathrm{erfc}(x) = 1 - \frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^2} dt """ + r""" Args: {input} Keyword args: {out} Example:: >>> torch.special.erfc(torch.tensor([0, -1., 10.])) tensor([ 1.0000, 1.8427, 0.0000]) """.format( **common_args ), ) erfcx = _add_docstr( _special.special_erfcx, r""" erfcx(input, *, out=None) -> Tensor Computes the scaled complementary error function for each element of :attr:`input`. The scaled complementary error function is defined as follows: .. math:: \mathrm{erfcx}(x) = e^{x^2} \mathrm{erfc}(x) """ + r""" """ + r""" Args: {input} Keyword args: {out} Example:: >>> torch.special.erfcx(torch.tensor([0, -1., 10.])) tensor([ 1.0000, 5.0090, 0.0561]) """.format( **common_args ), ) erfinv = _add_docstr( _special.special_erfinv, r""" erfinv(input, *, out=None) -> Tensor Computes the inverse error function of :attr:`input`. The inverse error function is defined in the range :math:`(-1, 1)` as: .. math:: \mathrm{erfinv}(\mathrm{erf}(x)) = x """ + r""" Args: {input} Keyword args: {out} Example:: >>> torch.special.erfinv(torch.tensor([0, 0.5, -1.])) tensor([ 0.0000, 0.4769, -inf]) """.format( **common_args ), ) logit = _add_docstr( _special.special_logit, r""" logit(input, eps=None, *, out=None) -> Tensor Returns a new tensor with the logit of the elements of :attr:`input`. :attr:`input` is clamped to [eps, 1 - eps] when eps is not None. When eps is None and :attr:`input` < 0 or :attr:`input` > 1, the function will yields NaN. .. math:: \begin{align} y_{i} &= \ln(\frac{z_{i}}{1 - z_{i}}) \\ z_{i} &= \begin{cases} x_{i} & \text{if eps is None} \\ \text{eps} & \text{if } x_{i} < \text{eps} \\ x_{i} & \text{if } \text{eps} \leq x_{i} \leq 1 - \text{eps} \\ 1 - \text{eps} & \text{if } x_{i} > 1 - \text{eps} \end{cases} \end{align} """ + r""" Args: {input} eps (float, optional): the epsilon for input clamp bound. Default: ``None`` Keyword args: {out} Example:: >>> a = torch.rand(5) >>> a tensor([0.2796, 0.9331, 0.6486, 0.1523, 0.6516]) >>> torch.special.logit(a, eps=1e-6) tensor([-0.9466, 2.6352, 0.6131, -1.7169, 0.6261]) """.format( **common_args ), ) logsumexp = _add_docstr( _special.special_logsumexp, r""" logsumexp(input, dim, keepdim=False, *, out=None) Alias for :func:`torch.logsumexp`. """.format( **multi_dim_common ), ) expit = _add_docstr( _special.special_expit, r""" expit(input, *, out=None) -> Tensor Computes the expit (also known as the logistic sigmoid function) of the elements of :attr:`input`. .. math:: \text{out}_{i} = \frac{1}{1 + e^{-\text{input}_{i}}} """ + r""" Args: {input} Keyword args: {out} Example:: >>> t = torch.randn(4) >>> t tensor([ 0.9213, 1.0887, -0.8858, -1.7683]) >>> torch.special.expit(t) tensor([ 0.7153, 0.7481, 0.2920, 0.1458]) """.format( **common_args ), ) exp2 = _add_docstr( _special.special_exp2, r""" exp2(input, *, out=None) -> Tensor Computes the base two exponential function of :attr:`input`. .. math:: y_{i} = 2^{x_{i}} """ + r""" Args: {input} Keyword args: {out} Example:: >>> torch.special.exp2(torch.tensor([0, math.log2(2.), 3, 4])) tensor([ 1., 2., 8., 16.]) """.format( **common_args ), ) expm1 = _add_docstr( _special.special_expm1, r""" expm1(input, *, out=None) -> Tensor Computes the exponential of the elements minus 1 of :attr:`input`. .. math:: y_{i} = e^{x_{i}} - 1 .. note:: This function provides greater precision than exp(x) - 1 for small values of x. """ + r""" Args: {input} Keyword args: {out} Example:: >>> torch.special.expm1(torch.tensor([0, math.log(2.)])) tensor([ 0., 1.]) """.format( **common_args ), ) xlog1py = _add_docstr( _special.special_xlog1py, r""" xlog1py(input, other, *, out=None) -> Tensor Computes ``input * log1p(other)`` with the following cases. .. math:: \text{out}_{i} = \begin{cases} \text{NaN} & \text{if } \text{other}_{i} = \text{NaN} \\ 0 & \text{if } \text{input}_{i} = 0.0 \text{ and } \text{other}_{i} != \text{NaN} \\ \text{input}_{i} * \text{log1p}(\text{other}_{i})& \text{otherwise} \end{cases} Similar to SciPy's `scipy.special.xlog1py`. """ + r""" Args: input (Number or Tensor) : Multiplier other (Number or Tensor) : Argument .. note:: At least one of :attr:`input` or :attr:`other` must be a tensor. Keyword args: {out} Example:: >>> x = torch.zeros(5,) >>> y = torch.tensor([-1, 0, 1, float('inf'), float('nan')]) >>> torch.special.xlog1py(x, y) tensor([0., 0., 0., 0., nan]) >>> x = torch.tensor([1, 2, 3]) >>> y = torch.tensor([3, 2, 1]) >>> torch.special.xlog1py(x, y) tensor([1.3863, 2.1972, 2.0794]) >>> torch.special.xlog1py(x, 4) tensor([1.6094, 3.2189, 4.8283]) >>> torch.special.xlog1py(2, y) tensor([2.7726, 2.1972, 1.3863]) """.format( **common_args ), ) xlogy = _add_docstr( _special.special_xlogy, r""" xlogy(input, other, *, out=None) -> Tensor Computes ``input * log(other)`` with the following cases. .. math:: \text{out}_{i} = \begin{cases} \text{NaN} & \text{if } \text{other}_{i} = \text{NaN} \\ 0 & \text{if } \text{input}_{i} = 0.0 \\ \text{input}_{i} * \log{(\text{other}_{i})} & \text{otherwise} \end{cases} Similar to SciPy's `scipy.special.xlogy`. """ + r""" Args: input (Number or Tensor) : Multiplier other (Number or Tensor) : Argument .. note:: At least one of :attr:`input` or :attr:`other` must be a tensor. Keyword args: {out} Example:: >>> x = torch.zeros(5,) >>> y = torch.tensor([-1, 0, 1, float('inf'), float('nan')]) >>> torch.special.xlogy(x, y) tensor([0., 0., 0., 0., nan]) >>> x = torch.tensor([1, 2, 3]) >>> y = torch.tensor([3, 2, 1]) >>> torch.special.xlogy(x, y) tensor([1.0986, 1.3863, 0.0000]) >>> torch.special.xlogy(x, 4) tensor([1.3863, 2.7726, 4.1589]) >>> torch.special.xlogy(2, y) tensor([2.1972, 1.3863, 0.0000]) """.format( **common_args ), ) i0 = _add_docstr( _special.special_i0, r""" i0(input, *, out=None) -> Tensor Computes the zeroth order modified Bessel function of the first kind for each element of :attr:`input`. .. math:: \text{out}_{i} = I_0(\text{input}_{i}) = \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!)^2} """ + r""" Args: input (Tensor): the input tensor Keyword args: {out} Example:: >>> torch.i0(torch.arange(5, dtype=torch.float32)) tensor([ 1.0000, 1.2661, 2.2796, 4.8808, 11.3019]) """.format( **common_args ), ) i0e = _add_docstr( _special.special_i0e, r""" i0e(input, *, out=None) -> Tensor Computes the exponentially scaled zeroth order modified Bessel function of the first kind (as defined below) for each element of :attr:`input`. .. math:: \text{out}_{i} = \exp(-|x|) * i0(x) = \exp(-|x|) * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!)^2} """ + r""" Args: {input} Keyword args: {out} Example:: >>> torch.special.i0e(torch.arange(5, dtype=torch.float32)) tensor([1.0000, 0.4658, 0.3085, 0.2430, 0.2070]) """.format( **common_args ), ) i1 = _add_docstr( _special.special_i1, r""" i1(input, *, out=None) -> Tensor Computes the first order modified Bessel function of the first kind (as defined below) for each element of :attr:`input`. .. math:: \text{out}_{i} = \frac{(\text{input}_{i})}{2} * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!) * (k+1)!} """ + r""" Args: {input} Keyword args: {out} Example:: >>> torch.special.i1(torch.arange(5, dtype=torch.float32)) tensor([0.0000, 0.5652, 1.5906, 3.9534, 9.7595]) """.format( **common_args ), ) i1e = _add_docstr( _special.special_i1e, r""" i1e(input, *, out=None) -> Tensor Computes the exponentially scaled first order modified Bessel function of the first kind (as defined below) for each element of :attr:`input`. .. math:: \text{out}_{i} = \exp(-|x|) * i1(x) = \exp(-|x|) * \frac{(\text{input}_{i})}{2} * \sum_{k=0}^{\infty} \frac{(\text{input}_{i}^2/4)^k}{(k!) * (k+1)!} """ + r""" Args: {input} Keyword args: {out} Example:: >>> torch.special.i1e(torch.arange(5, dtype=torch.float32)) tensor([0.0000, 0.2079, 0.2153, 0.1968, 0.1788]) """.format( **common_args ), ) ndtr = _add_docstr( _special.special_ndtr, r""" ndtr(input, *, out=None) -> Tensor Computes the area under the standard Gaussian probability density function, integrated from minus infinity to :attr:`input`, elementwise. .. math:: \text{ndtr}(x) = \frac{1}{\sqrt{2 \pi}}\int_{-\infty}^{x} e^{-\frac{1}{2}t^2} dt """ + r""" Args: {input} Keyword args: {out} Example:: >>> torch.special.ndtr(torch.tensor([-3., -2, -1, 0, 1, 2, 3])) tensor([0.0013, 0.0228, 0.1587, 0.5000, 0.8413, 0.9772, 0.9987]) """.format( **common_args ), ) ndtri = _add_docstr( _special.special_ndtri, r""" ndtri(input, *, out=None) -> Tensor Computes the argument, x, for which the area under the Gaussian probability density function (integrated from minus infinity to x) is equal to :attr:`input`, elementwise. .. math:: \text{ndtri}(p) = \sqrt{2}\text{erf}^{-1}(2p - 1) .. note:: Also known as quantile function for Normal Distribution. """ + r""" Args: {input} Keyword args: {out} Example:: >>> torch.special.ndtri(torch.tensor([0, 0.25, 0.5, 0.75, 1])) tensor([ -inf, -0.6745, 0.0000, 0.6745, inf]) """.format( **common_args ), ) log_ndtr = _add_docstr( _special.special_log_ndtr, r""" log_ndtr(input, *, out=None) -> Tensor Computes the log of the area under the standard Gaussian probability density function, integrated from minus infinity to :attr:`input`, elementwise. .. math:: \text{log\_ndtr}(x) = \log\left(\frac{1}{\sqrt{2 \pi}}\int_{-\infty}^{x} e^{-\frac{1}{2}t^2} dt \right) """ + r""" Args: {input} Keyword args: {out} Example:: >>> torch.special.log_ndtr(torch.tensor([-3., -2, -1, 0, 1, 2, 3])) tensor([-6.6077 -3.7832 -1.841 -0.6931 -0.1728 -0.023 -0.0014]) """.format( **common_args ), ) log1p = _add_docstr( _special.special_log1p, r""" log1p(input, *, out=None) -> Tensor Alias for :func:`torch.log1p`. """, ) sinc = _add_docstr( _special.special_sinc, r""" sinc(input, *, out=None) -> Tensor Computes the normalized sinc of :attr:`input.` .. math:: \text{out}_{i} = \begin{cases} 1, & \text{if}\ \text{input}_{i}=0 \\ \sin(\pi \text{input}_{i}) / (\pi \text{input}_{i}), & \text{otherwise} \end{cases} """ + r""" Args: {input} Keyword args: {out} Example:: >>> t = torch.randn(4) >>> t tensor([ 0.2252, -0.2948, 1.0267, -1.1566]) >>> torch.special.sinc(t) tensor([ 0.9186, 0.8631, -0.0259, -0.1300]) """.format( **common_args ), ) round = _add_docstr( _special.special_round, r""" round(input, *, out=None) -> Tensor Alias for :func:`torch.round`. """, ) softmax = _add_docstr( _special.special_softmax, r""" softmax(input, dim, *, dtype=None) -> Tensor Computes the softmax function. Softmax is defined as: :math:`\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}` It is applied to all slices along dim, and will re-scale them so that the elements lie in the range `[0, 1]` and sum to 1. Args: input (Tensor): input dim (int): A dimension along which softmax will be computed. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. If specified, the input tensor is cast to :attr:`dtype` before the operation is performed. This is useful for preventing data type overflows. Default: None. Examples:: >>> t = torch.ones(2, 2) >>> torch.special.softmax(t, 0) tensor([[0.5000, 0.5000], [0.5000, 0.5000]]) """, ) log_softmax = _add_docstr( _special.special_log_softmax, r""" log_softmax(input, dim, *, dtype=None) -> Tensor Computes softmax followed by a logarithm. While mathematically equivalent to log(softmax(x)), doing these two operations separately is slower and numerically unstable. This function is computed as: .. math:: \text{log\_softmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right) """ + r""" Args: input (Tensor): input dim (int): A dimension along which log_softmax will be computed. dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. If specified, the input tensor is cast to :attr:`dtype` before the operation is performed. This is useful for preventing data type overflows. Default: None. Example:: >>> t = torch.ones(2, 2) >>> torch.special.log_softmax(t, 0) tensor([[-0.6931, -0.6931], [-0.6931, -0.6931]]) """, ) zeta = _add_docstr( _special.special_zeta, r""" zeta(input, other, *, out=None) -> Tensor Computes the Hurwitz zeta function, elementwise. .. math:: \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x} """ + r""" Args: input (Tensor): the input tensor corresponding to `x`. other (Tensor): the input tensor corresponding to `q`. .. note:: The Riemann zeta function corresponds to the case when `q = 1` Keyword args: {out} Example:: >>> x = torch.tensor([2., 4.]) >>> torch.special.zeta(x, 1) tensor([1.6449, 1.0823]) >>> torch.special.zeta(x, torch.tensor([1., 2.])) tensor([1.6449, 0.0823]) >>> torch.special.zeta(2, torch.tensor([1., 2.])) tensor([1.6449, 0.6449]) """.format( **common_args ), ) multigammaln = _add_docstr( _special.special_multigammaln, r""" multigammaln(input, p, *, out=None) -> Tensor Computes the `multivariate log-gamma function <https://en.wikipedia.org/wiki/Multivariate_gamma_function>`_ with dimension :math:`p` element-wise, given by .. math:: \log(\Gamma_{p}(a)) = C + \displaystyle \sum_{i=1}^{p} \log\left(\Gamma\left(a - \frac{i - 1}{2}\right)\right) where :math:`C = \log(\pi) \cdot \frac{p (p - 1)}{4}` and :math:`\Gamma(-)` is the Gamma function. All elements must be greater than :math:`\frac{p - 1}{2}`, otherwise the behavior is undefiend. """ + """ Args: input (Tensor): the tensor to compute the multivariate log-gamma function p (int): the number of dimensions Keyword args: {out} Example:: >>> a = torch.empty(2, 3).uniform_(1, 2) >>> a tensor([[1.6835, 1.8474, 1.1929], [1.0475, 1.7162, 1.4180]]) >>> torch.special.multigammaln(a, 2) tensor([[0.3928, 0.4007, 0.7586], [1.0311, 0.3901, 0.5049]]) """.format( **common_args ), ) gammainc = _add_docstr( _special.special_gammainc, r""" gammainc(input, other, *, out=None) -> Tensor Computes the regularized lower incomplete gamma function: .. math:: \text{out}_{i} = \frac{1}{\Gamma(\text{input}_i)} \int_0^{\text{other}_i} t^{\text{input}_i-1} e^{-t} dt where both :math:`\text{input}_i` and :math:`\text{other}_i` are weakly positive and at least one is strictly positive. If both are zero or either is negative then :math:`\text{out}_i=\text{nan}`. :math:`\Gamma(\cdot)` in the equation above is the gamma function, .. math:: \Gamma(\text{input}_i) = \int_0^\infty t^{(\text{input}_i-1)} e^{-t} dt. See :func:`torch.special.gammaincc` and :func:`torch.special.gammaln` for related functions. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>` and float inputs. .. note:: The backward pass with respect to :attr:`input` is not yet supported. Please open an issue on PyTorch's Github to request it. """ + r""" Args: input (Tensor): the first non-negative input tensor other (Tensor): the second non-negative input tensor Keyword args: {out} Example:: >>> a1 = torch.tensor([4.0]) >>> a2 = torch.tensor([3.0, 4.0, 5.0]) >>> a = torch.special.gammaincc(a1, a2) tensor([0.3528, 0.5665, 0.7350]) tensor([0.3528, 0.5665, 0.7350]) >>> b = torch.special.gammainc(a1, a2) + torch.special.gammaincc(a1, a2) tensor([1., 1., 1.]) """.format( **common_args ), ) gammaincc = _add_docstr( _special.special_gammaincc, r""" gammaincc(input, other, *, out=None) -> Tensor Computes the regularized upper incomplete gamma function: .. math:: \text{out}_{i} = \frac{1}{\Gamma(\text{input}_i)} \int_{\text{other}_i}^{\infty} t^{\text{input}_i-1} e^{-t} dt where both :math:`\text{input}_i` and :math:`\text{other}_i` are weakly positive and at least one is strictly positive. If both are zero or either is negative then :math:`\text{out}_i=\text{nan}`. :math:`\Gamma(\cdot)` in the equation above is the gamma function, .. math:: \Gamma(\text{input}_i) = \int_0^\infty t^{(\text{input}_i-1)} e^{-t} dt. See :func:`torch.special.gammainc` and :func:`torch.special.gammaln` for related functions. Supports :ref:`broadcasting to a common shape <broadcasting-semantics>` and float inputs. .. note:: The backward pass with respect to :attr:`input` is not yet supported. Please open an issue on PyTorch's Github to request it. """ + r""" Args: input (Tensor): the first non-negative input tensor other (Tensor): the second non-negative input tensor Keyword args: {out} Example:: >>> a1 = torch.tensor([4.0]) >>> a2 = torch.tensor([3.0, 4.0, 5.0]) >>> a = torch.special.gammaincc(a1, a2) tensor([0.6472, 0.4335, 0.2650]) >>> b = torch.special.gammainc(a1, a2) + torch.special.gammaincc(a1, a2) tensor([1., 1., 1.]) """.format( **common_args ), ) airy_ai = _add_docstr( _special.special_airy_ai, r""" airy_ai(input, *, out=None) -> Tensor Airy function :math:`\text{Ai}\left(\text{input}\right)`. """ + r""" Args: {input} Keyword args: {out} """.format( **common_args ), ) bessel_j0 = _add_docstr( _special.special_bessel_j0, r""" bessel_j0(input, *, out=None) -> Tensor Bessel function of the first kind of order :math:`0`. """ + r""" Args: {input} Keyword args: {out} """.format( **common_args ), ) bessel_j1 = _add_docstr( _special.special_bessel_j1, r""" bessel_j1(input, *, out=None) -> Tensor Bessel function of the first kind of order :math:`1`. """ + r""" Args: {input} Keyword args: {out} """.format( **common_args ), ) bessel_y0 = _add_docstr( _special.special_bessel_y0, r""" bessel_y0(input, *, out=None) -> Tensor Bessel function of the second kind of order :math:`0`. """ + r""" Args: {input} Keyword args: {out} """.format( **common_args ), ) bessel_y1 = _add_docstr( _special.special_bessel_y1, r""" bessel_y1(input, *, out=None) -> Tensor Bessel function of the second kind of order :math:`1`. """ + r""" Args: {input} Keyword args: {out} """.format( **common_args ), ) chebyshev_polynomial_t = _add_docstr( _special.special_chebyshev_polynomial_t, r""" chebyshev_polynomial_t(input, n, *, out=None) -> Tensor Chebyshev polynomial of the first kind :math:`T_{n}(\text{input})`. If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` is returned. If :math:`n < 6` or :math:`|\text{input}| > 1` the recursion: .. math:: T_{n + 1}(\text{input}) = 2 \times \text{input} \times T_{n}(\text{input}) - T_{n - 1}(\text{input}) is evaluated. Otherwise, the explicit trigonometric formula: .. math:: T_{n}(\text{input}) = \text{cos}(n \times \text{arccos}(x)) is evaluated. """ + r""" Args: {input} n (Tensor): Degree of the polynomial. Keyword args: {out} """.format( **common_args ), ) chebyshev_polynomial_u = _add_docstr( _special.special_chebyshev_polynomial_u, r""" chebyshev_polynomial_t(input, n, *, out=None) -> Tensor Chebyshev polynomial of the second kind :math:`U_{n}(\text{input})`. If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`2 \times \text{input}` is returned. If :math:`n < 6` or :math:`|\text{input}| > 1`, the recursion: .. math:: T_{n + 1}(\text{input}) = 2 \times \text{input} \times T_{n}(\text{input}) - T_{n - 1}(\text{input}) is evaluated. Otherwise, the explicit trigonometric formula: .. math:: \frac{\text{sin}((n + 1) \times \text{arccos}(\text{input}))}{\text{sin}(\text{arccos}(\text{input}))} is evaluated. """ + r""" Args: {input} n (Tensor): Degree of the polynomial. Keyword args: {out} """.format( **common_args ), ) chebyshev_polynomial_v = _add_docstr( _special.special_chebyshev_polynomial_v, r""" chebyshev_polynomial_v(input, n, *, out=None) -> Tensor Chebyshev polynomial of the third kind :math:`V_{n}^{\ast}(\text{input})`. """ + r""" Args: {input} n (Tensor): Degree of the polynomial. Keyword args: {out} """.format( **common_args ), ) chebyshev_polynomial_w = _add_docstr( _special.special_chebyshev_polynomial_w, r""" chebyshev_polynomial_w(input, n, *, out=None) -> Tensor Chebyshev polynomial of the fourth kind :math:`W_{n}^{\ast}(\text{input})`. """ + r""" Args: {input} n (Tensor): Degree of the polynomial. Keyword args: {out} """.format( **common_args ), ) hermite_polynomial_h = _add_docstr( _special.special_hermite_polynomial_h, r""" hermite_polynomial_h(input, n, *, out=None) -> Tensor Physicist's Hermite polynomial :math:`H_{n}(\text{input})`. If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` is returned. Otherwise, the recursion: .. math:: H_{n + 1}(\text{input}) = 2 \times \text{input} \times H_{n}(\text{input}) - H_{n - 1}(\text{input}) is evaluated. """ + r""" Args: {input} n (Tensor): Degree of the polynomial. Keyword args: {out} """.format( **common_args ), ) hermite_polynomial_he = _add_docstr( _special.special_hermite_polynomial_he, r""" hermite_polynomial_he(input, n, *, out=None) -> Tensor Probabilist's Hermite polynomial :math:`He_{n}(\text{input})`. If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` is returned. Otherwise, the recursion: .. math:: He_{n + 1}(\text{input}) = 2 \times \text{input} \times He_{n}(\text{input}) - He_{n - 1}(\text{input}) is evaluated. """ + r""" Args: {input} n (Tensor): Degree of the polynomial. Keyword args: {out} """.format( **common_args ), ) laguerre_polynomial_l = _add_docstr( _special.special_laguerre_polynomial_l, r""" laguerre_polynomial_l(input, n, *, out=None) -> Tensor Laguerre polynomial :math:`L_{n}(\text{input})`. If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` is returned. Otherwise, the recursion: .. math:: L_{n + 1}(\text{input}) = 2 \times \text{input} \times L_{n}(\text{input}) - L_{n - 1}(\text{input}) is evaluated. """ + r""" Args: {input} n (Tensor): Degree of the polynomial. Keyword args: {out} """.format( **common_args ), ) legendre_polynomial_p = _add_docstr( _special.special_legendre_polynomial_p, r""" legendre_polynomial_p(input, n, *, out=None) -> Tensor Legendre polynomial :math:`P_{n}(\text{input})`. If :math:`n = 0`, :math:`1` is returned. If :math:`n = 1`, :math:`\text{input}` is returned. Otherwise, the recursion: .. math:: P_{n + 1}(\text{input}) = 2 \times \text{input} \times P_{n}(\text{input}) - P_{n - 1}(\text{input}) is evaluated. """ + r""" Args: {input} n (Tensor): Degree of the polynomial. Keyword args: {out} """.format( **common_args ), ) modified_bessel_i0 = _add_docstr( _special.special_modified_bessel_i0, r""" modified_bessel_i0(input, *, out=None) -> Tensor Modified Bessel function of the first kind of order :math:`0`. """ + r""" Args: {input} Keyword args: {out} """.format( **common_args ), ) modified_bessel_i1 = _add_docstr( _special.special_modified_bessel_i1, r""" modified_bessel_i1(input, *, out=None) -> Tensor Modified Bessel function of the first kind of order :math:`1`. """ + r""" Args: {input} Keyword args: {out} """.format( **common_args ), ) modified_bessel_k0 = _add_docstr( _special.special_modified_bessel_k0, r""" modified_bessel_k0(input, *, out=None) -> Tensor Modified Bessel function of the second kind of order :math:`0`. """ + r""" Args: {input} Keyword args: {out} """.format( **common_args ), ) modified_bessel_k1 = _add_docstr( _special.special_modified_bessel_k1, r""" modified_bessel_k1(input, *, out=None) -> Tensor Modified Bessel function of the second kind of order :math:`1`. """ + r""" Args: {input} Keyword args: {out} """.format( **common_args ), ) scaled_modified_bessel_k0 = _add_docstr( _special.special_scaled_modified_bessel_k0, r""" scaled_modified_bessel_k0(input, *, out=None) -> Tensor Scaled modified Bessel function of the second kind of order :math:`0`. """ + r""" Args: {input} Keyword args: {out} """.format( **common_args ), ) scaled_modified_bessel_k1 = _add_docstr( _special.special_scaled_modified_bessel_k1, r""" scaled_modified_bessel_k1(input, *, out=None) -> Tensor Scaled modified Bessel function of the second kind of order :math:`1`. """ + r""" Args: {input} Keyword args: {out} """.format( **common_args ), ) shifted_chebyshev_polynomial_t = _add_docstr( _special.special_shifted_chebyshev_polynomial_t, r""" shifted_chebyshev_polynomial_t(input, n, *, out=None) -> Tensor Chebyshev polynomial of the first kind :math:`T_{n}^{\ast}(\text{input})`. """ + r""" Args: {input} n (Tensor): Degree of the polynomial. Keyword args: {out} """.format( **common_args ), ) shifted_chebyshev_polynomial_u = _add_docstr( _special.special_shifted_chebyshev_polynomial_u, r""" shifted_chebyshev_polynomial_u(input, n, *, out=None) -> Tensor Chebyshev polynomial of the second kind :math:`U_{n}^{\ast}(\text{input})`. """ + r""" Args: {input} n (Tensor): Degree of the polynomial. Keyword args: {out} """.format( **common_args ), ) shifted_chebyshev_polynomial_v = _add_docstr( _special.special_shifted_chebyshev_polynomial_v, r""" shifted_chebyshev_polynomial_v(input, n, *, out=None) -> Tensor Chebyshev polynomial of the third kind :math:`V_{n}^{\ast}(\text{input})`. """ + r""" Args: {input} n (Tensor): Degree of the polynomial. Keyword args: {out} """.format( **common_args ), ) shifted_chebyshev_polynomial_w = _add_docstr( _special.special_shifted_chebyshev_polynomial_w, r""" shifted_chebyshev_polynomial_w(input, n, *, out=None) -> Tensor Chebyshev polynomial of the fourth kind :math:`W_{n}^{\ast}(\text{input})`. """ + r""" Args: {input} n (Tensor): Degree of the polynomial. Keyword args: {out} """.format( **common_args ), ) spherical_bessel_j0 = _add_docstr( _special.special_spherical_bessel_j0, r""" spherical_bessel_j0(input, *, out=None) -> Tensor Spherical Bessel function of the first kind of order :math:`0`. """ + r""" Args: {input} Keyword args: {out} """.format( **common_args ), ) ```
======================================================================================================== SOURCE CODE FILE: storage.py LINES: 29 SIZE: 52.44 KB PATH: scripts\freecad_env\Lib\site-packages\torch\storage.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs from __future__ import annotations import collections import copy import functools import io import threading import warnings from typing import Any, cast, Optional as _Optional, TYPE_CHECKING, TypeVar, Union from typing_extensions import Self import torch from torch._utils import _to, _type from torch.types import _bool, _int, Storage if TYPE_CHECKING: from torch._prims_common import DeviceLikeType __all__ = ["TypedStorage", "UntypedStorage"] try: import numpy as np HAS_NUMPY = True except ModuleNotFoundError: HAS_NUMPY = False np = None # type: ignore[assignment] _share_memory_lock = threading.Lock() _share_memory_map: dict[int, threading.RLock] = {} T = TypeVar("T", bound="Union[_StorageBase, TypedStorage]") class _StorageBase: _cdata: Any is_sparse: _bool = False is_sparse_csr: _bool = False device: torch.device # Used when # (1) stashing FakeTensor device onto storage in torch.serialization.skip_data # (2) stashing device onto storage to propagate to FakeTensor when torch.load under FakeTensorMode _fake_device: _Optional[torch.device] = None # Used when loading with FakeTensorMode to give information about offset of storage in torch.saved-file _checkpoint_offset: _Optional[int] = None def __init__(self, *args, **kwargs): pass def __len__(self) -> _int: raise NotImplementedError def __getitem__(self, idx): raise NotImplementedError def __setitem__(self, *args, **kwargs): raise NotImplementedError def copy_(self, source: T, non_blocking: _Optional[_bool] = None) -> T: raise NotImplementedError def new(self) -> Union[_StorageBase, TypedStorage]: raise NotImplementedError def nbytes(self) -> _int: raise NotImplementedError def size(self) -> _int: return self.nbytes() def type( self, dtype: _Optional[str] = None, non_blocking: _bool = False ) -> Union[_StorageBase, TypedStorage]: return _type(self, dtype, non_blocking) def cuda( self, device=None, non_blocking=False ) -> Union[_StorageBase, TypedStorage]: """Returns a copy of this object in CUDA memory. If this object is already in CUDA memory and on the correct device, then no copy is performed and the original object is returned. Args: device (int): The destination GPU id. Defaults to the current device. non_blocking (bool): If ``True`` and the source is in pinned memory, the copy will be asynchronous with respect to the host. Otherwise, the argument has no effect. """ device2 = torch.device("cuda", device) if device else torch.device("cuda") return self.to(device=device2, non_blocking=non_blocking) def hpu(self, device=None, non_blocking=False) -> Union[_StorageBase, TypedStorage]: """Returns a copy of this object in HPU memory. If this object is already in HPU memory and on the correct device, then no copy is performed and the original object is returned. Args: device (int): The destination HPU id. Defaults to the current device. non_blocking (bool): If ``True`` and the source is in pinned memory, the copy will be asynchronous with respect to the host. Otherwise, the argument has no effect. """ device2 = torch.device("hpu", device) if device else torch.device("hpu") return self.to(device=device2, non_blocking=non_blocking) def element_size(self) -> _int: raise NotImplementedError def get_device(self) -> _int: return self.device.index def data_ptr(self) -> _int: raise NotImplementedError def resizable(self) -> _bool: raise NotImplementedError # Defined in torch/csrc/generic/StorageSharing.cpp def _share_filename_cpu_(self, *args, **kwargs): raise NotImplementedError def _share_fd_cpu_(self, *args, **kwargs): raise NotImplementedError @classmethod def _new_using_filename_cpu(cls, size: _int) -> Self: raise NotImplementedError @classmethod def _new_using_fd_cpu(cls, size: _int) -> Self: raise NotImplementedError @classmethod def from_buffer(cls, *args, **kwargs) -> Self: raise NotImplementedError @classmethod def _new_shared_filename_cpu( cls, manager, obj, size, *, device=None, dtype=None, ) -> Self: raise NotImplementedError @classmethod def _release_ipc_counter_cuda(cls, *args, **kwargs) -> Self: raise NotImplementedError @classmethod def _new_with_weak_ptr(cls, *args, **kwargs) -> Self: raise NotImplementedError def _shared_decref(self) -> Union[_StorageBase, TypedStorage]: raise NotImplementedError def _write_file(self, *args, **kwargs): raise NotImplementedError def resize_(self, size: _int): raise NotImplementedError def _weak_ref(self, *args, **kwargs) -> Union[_StorageBase, TypedStorage]: raise NotImplementedError def _set_from_file(self, *args, **kwargs): raise NotImplementedError def _set_cdata(self, *args, **kwargs): raise NotImplementedError def _share_cuda_(self, *args, **kwargs): raise NotImplementedError def is_shared(self) -> _bool: raise NotImplementedError @classmethod def _new_shared_cuda(cls, *args, **kwargs) -> Self: raise NotImplementedError def _shared_incref(self, *args, **kwargs): raise NotImplementedError @classmethod def _free_weak_ref(cls, *args, **kwargs): raise NotImplementedError @property def is_cuda(self): raise NotImplementedError @property def is_hpu(self): raise NotImplementedError @classmethod def from_file(cls, filename, shared, nbytes) -> Union[_StorageBase, TypedStorage]: raise NotImplementedError @classmethod def _expired(cls, *args, **kwargs) -> Union[_StorageBase, TypedStorage]: raise NotImplementedError def _byteswap(self, *args, **kwargs): raise NotImplementedError def _get_filename(self, *args, **kwargs) -> _Optional[str]: raise NotImplementedError def __repr__(self): info_str = f"[{torch.typename(self)}(device={self.device}) of size {len(self)}]" if self.device.type == "meta": return "...\n" + info_str data_str = " " + "\n ".join(str(self[i]) for i in range(self.size())) return data_str + "\n" + info_str def __iter__(self): return iter(self[i] for i in range(self.size())) def __copy__(self): return self.clone() def __deepcopy__(self, memo): memo = memo.setdefault("torch", {}) if self._cdata in memo: return memo[self._cdata] new_storage = self.clone() memo[self._cdata] = new_storage return new_storage def __reduce__(self): b = io.BytesIO() torch.save(self, b, _use_new_zipfile_serialization=False) return (_load_from_bytes, (b.getvalue(),)) def __sizeof__(self): return super().__sizeof__() + self.size() def clone(self): """Return a copy of this storage.""" return type(self)(self.nbytes(), device=self.device).copy_(self) def tolist(self): """Return a list containing the elements of this storage.""" return list(self) def cpu(self): """Return a CPU copy of this storage if it's not already on the CPU.""" if self.device.type != "cpu": return torch.UntypedStorage(self.size()).copy_(self, False) return self def mps(self): """Return a MPS copy of this storage if it's not already on the MPS.""" if self.device.type != "mps": return torch.UntypedStorage(self.size(), device="mps").copy_(self, False) return self def _to(self, dtype): if not isinstance(dtype, torch.dtype): raise TypeError(f"Argument 'dtype' must be torch.dtype, not {type(dtype)}") storage = ( torch.tensor([], dtype=torch.uint8, device=self.device) .set_(cast(Storage, self)) .to(dtype) ._typed_storage() ) if storage.data_ptr() == self.data_ptr(): storage = storage.clone() return storage def to(self, *, device: DeviceLikeType, non_blocking: _bool = False): if not isinstance(device, torch.device): device = torch.device(device) return _to(self, device, non_blocking) def double(self): """Casts this storage to double type.""" return self._to(torch.double) def float(self): """Casts this storage to float type.""" return self._to(torch.float) def half(self): """Casts this storage to half type.""" return self._to(torch.half) def long(self): """Casts this storage to long type.""" return self._to(torch.long) def int(self): """Casts this storage to int type.""" return self._to(torch.int) def short(self): """Casts this storage to short type.""" return self._to(torch.short) def char(self): """Casts this storage to char type.""" return self._to(torch.int8) def byte(self): """Casts this storage to byte type.""" return self._to(torch.uint8) def bool(self): """Casts this storage to bool type.""" return self._to(torch.bool) def bfloat16(self): """Casts this storage to bfloat16 type.""" return self._to(torch.bfloat16) def complex_double(self): """Casts this storage to complex double type.""" return self._to(torch.cdouble) def complex_float(self): """Casts this storage to complex float type.""" return self._to(torch.cfloat) def float8_e5m2(self): """Casts this storage to float8_e5m2 type""" return self._to(torch.float8_e5m2) def float8_e4m3fn(self): """Casts this storage to float8_e4m3fn type""" return self._to(torch.float8_e4m3fn) def float8_e5m2fnuz(self): """Casts this storage to float8_e5m2fnuz type""" return self._to(torch.float8_e5m2fnuz) def float8_e4m3fnuz(self): """Casts this storage to float8_e4m3fnuz type""" return self._to(torch.float8_e4m3fnuz) def is_pinned(self, device: Union[str, torch.device] = "cuda"): r"""Determine whether the CPU storage is already pinned on device. Args: device (str or torch.device): The device to pin memory on (default: ``'cuda'``). This argument is discouraged and subject to deprecated. Returns: A boolean variable. """ return ( torch.tensor([], dtype=torch.uint8, device=self.device) .set_(cast(Storage, self)) .is_pinned(device) ) def pin_memory(self, device: Union[str, torch.device] = "cuda"): r"""Copy the CPU storage to pinned memory, if it's not already pinned. Args: device (str or torch.device): The device to pin memory on (default: ``'cuda'``). This argument is discouraged and subject to deprecated. Returns: A pinned CPU storage. """ if self.device.type != "cpu": raise TypeError(f"cannot pin '{self.type()}' only CPU memory can be pinned") pinned_tensor = ( torch.tensor([], dtype=torch.uint8, device=self.device) .set_(cast(Storage, self)) .pin_memory(device) ) return pinned_tensor.untyped_storage() def share_memory_(self): """See :meth:`torch.UntypedStorage.share_memory_`""" from torch.multiprocessing import get_sharing_strategy if self.device.type in ["cuda", torch._C._get_privateuse1_backend_name()]: pass # CUDA or PrivateUse1 doesn't use POSIX shared memory elif get_sharing_strategy() == "file_system": self._share_filename_cpu_() else: self._share_fd_cpu_() return self @classmethod def _new_shared(cls, size, *, device="cpu"): """Create a new storage in shared memory with the same data type.""" from torch.multiprocessing import get_sharing_strategy device = torch.device(device) if device.type in ["cuda", torch._C._get_privateuse1_backend_name(), "hpu"]: return cls(size, device=device) elif get_sharing_strategy() == "file_system": return cls._new_using_filename_cpu(size) else: return cls._new_using_fd_cpu(size) def untyped(self): return self def byteswap(self, dtype): """Swap bytes in underlying data.""" elem_size = torch._utils._element_size(dtype) # for complex types, don't swap first and second numbers if dtype.is_complex: elem_size = max(int(elem_size / 2), 1) self._byteswap(elem_size) def _share_memory_lock_protected(fn): @functools.wraps(fn) def wrapper(self, *args, **kwargs): to_free = None to_wait = None with _share_memory_lock: key = self._cdata if key in _share_memory_map: to_wait = _share_memory_map[key] else: _share_memory_map[key] = threading.RLock() _share_memory_map[key].acquire() to_free = key # If we're already in the process of sharing the storage, wait # for it to be done. if to_wait is not None: with to_wait: pass try: return fn(self, *args, **kwargs) finally: # If we acquired the storage lock here and we're done working on it # we can now release it and free the entry. if to_free is not None: # Ensure that the cdata from the storage didn't change and only # the data_ptr did. assert self._cdata == to_free with _share_memory_lock: _share_memory_map[to_free].release() del _share_memory_map[to_free] return wrapper class UntypedStorage(torch._C.StorageBase, _StorageBase): def __getitem__(self, *args, **kwargs): if self.device.type == "meta": raise NotImplementedError("Not available for 'meta' device type") return super().__getitem__(*args, **kwargs) @property def is_cuda(self): return self.device.type == "cuda" @property def is_hpu(self): return self.device.type == "hpu" @property def filename(self) -> _Optional[str]: """Returns the file name associated with this storage. The file name will be a string if the storage is on CPU and was created via :meth:`~torch.from_file()` with ``shared`` as ``True``. This attribute is ``None`` otherwise. """ return self._get_filename() @_share_memory_lock_protected def share_memory_(self, *args, **kwargs): """ Moves the storage to shared memory. This is a no-op for storages already in shared memory and for CUDA storages, which do not need to be moved for sharing across processes. Storages in shared memory cannot be resized. Note that to mitigate issues like `this <https://github.com/pytorch/pytorch/issues/95606>`_ it is thread safe to call this function from multiple threads on the same object. It is NOT thread safe though to call any other function on self without proper synchronization. Please see :doc:`/notes/multiprocessing` for more details. .. note:: When all references to a storage in shared memory are deleted, the associated shared memory object will also be deleted. PyTorch has a special cleanup process to ensure that this happens even if the current process exits unexpectedly. It is worth noting the difference between :meth:`share_memory_` and :meth:`from_file` with ``shared = True`` #. ``share_memory_`` uses `shm_open(3) <https://man7.org/linux/man-pages/man3/shm_open.3.html>`_ to create a POSIX shared memory object while :meth:`from_file` uses `open(2) <https://man7.org/linux/man-pages/man2/open.2.html>`_ to open the filename passed by the user. #. Both use an `mmap(2) call <https://man7.org/linux/man-pages/man2/mmap.2.html>`_ with ``MAP_SHARED`` to map the file/object into the current virtual address space #. ``share_memory_`` will call ``shm_unlink(3)`` on the object after mapping it to make sure the shared memory object is freed when no process has the object open. ``torch.from_file(shared=True)`` does not unlink the file. This file is persistent and will remain until it is deleted by the user. Returns: ``self`` """ return super().share_memory_(*args, **kwargs) @_share_memory_lock_protected def _share_fd_cpu_(self, *args, **kwargs): return super()._share_fd_cpu_(*args, **kwargs) @_share_memory_lock_protected def _share_filename_cpu_(self, *args, **kwargs): return super()._share_filename_cpu_(*args, **kwargs) def _load_from_bytes(b): return torch.load(io.BytesIO(b), weights_only=False) @functools.cache def _new_dtypes(): # These are dtypes serialized as UntypedStorage unlike those in # _dtype_to_storage_type_map return { torch.float8_e5m2, torch.float8_e4m3fn, torch.float8_e5m2fnuz, torch.float8_e4m3fnuz, torch.float8_e8m0fnu, torch.bits8, torch.bits16, torch.bits1x8, torch.bits2x4, torch.bits4x2, torch.complex32, torch.uint16, torch.uint32, torch.uint64, } @functools.cache def _dtype_to_storage_type_map(): # NOTE: We should no longer add dtypes to this map. This map # is only used for BC/FC with older PyTorch versions. Going forward, # new dtypes of TypedStorage should not translate to a legacy # <type>Storage class. Instead, new dtypes of TypedStorage should # be serialized as an UntypedStorage paired with a torch.dtype return { torch.double: "DoubleStorage", torch.float: "FloatStorage", torch.half: "HalfStorage", torch.long: "LongStorage", torch.int: "IntStorage", torch.int16: "ShortStorage", torch.int8: "CharStorage", torch.uint8: "ByteStorage", torch.bool: "BoolStorage", torch.bfloat16: "BFloat16Storage", torch.cdouble: "ComplexDoubleStorage", torch.cfloat: "ComplexFloatStorage", torch.qint8: "QInt8Storage", torch.qint32: "QInt32Storage", torch.quint8: "QUInt8Storage", torch.quint4x2: "QUInt4x2Storage", torch.quint2x4: "QUInt2x4Storage", } @functools.cache def _storage_type_to_dtype_map(): dtype_map = {val: key for key, val in _dtype_to_storage_type_map().items()} return dtype_map def _get_storage_from_sequence(sequence, dtype, device): if dtype in [ torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8, ]: interpret_dtypes = { torch.quint8: torch.uint8, torch.quint4x2: torch.uint8, torch.quint2x4: torch.uint8, torch.qint32: torch.int32, torch.qint8: torch.int8, } tmp_tensor = torch.tensor( sequence, dtype=interpret_dtypes[dtype], device=device ) else: tmp_tensor = torch.tensor(sequence, dtype=dtype, device=device) return tmp_tensor._typed_storage()._untyped_storage def _isint(x): if HAS_NUMPY: return isinstance(x, (int, np.integer)) else: return isinstance(x, int) _always_warn_typed_storage_removal = False def _get_always_warn_typed_storage_removal(): return _always_warn_typed_storage_removal def _set_always_warn_typed_storage_removal(always_warn): global _always_warn_typed_storage_removal assert isinstance(always_warn, bool) _always_warn_typed_storage_removal = always_warn def _warn_typed_storage_removal(stacklevel=2): global _always_warn_typed_storage_removal def is_first_time(): if not hasattr(_warn_typed_storage_removal, "has_warned"): return True else: return not _warn_typed_storage_removal.__dict__["has_warned"] if _get_always_warn_typed_storage_removal() or is_first_time(): message = ( "TypedStorage is deprecated. It will be removed in the future and " "UntypedStorage will be the only storage class. This should only matter " "to you if you are using storages directly. To access UntypedStorage " "directly, use tensor.untyped_storage() instead of tensor.storage()" ) warnings.warn(message, UserWarning, stacklevel=stacklevel + 1) _warn_typed_storage_removal.__dict__["has_warned"] = True def _reset_warn_typed_storage_removal(): _warn_typed_storage_removal.__dict__["has_warned"] = False def _get_device_from_module(module: str): last_part = module.rsplit(".", 1)[-1] if last_part in ["cuda", torch._C._get_privateuse1_backend_name(), "hpu"]: return last_part else: return "cpu" class TypedStorage: is_sparse: _bool = False # Used when stashing FakeTensor device onto storage in torch.save(metadata_only=True) _fake_device: _Optional[torch.device] = None dtype: torch.dtype @property def _dtype(self): return self.dtype @property def filename(self) -> _Optional[str]: """Returns the file name associated with this storage if the storage was memory mapped from a file. or ``None`` if the storage was not created by memory mapping a file.""" return self._untyped_storage.filename def fill_(self, value): _warn_typed_storage_removal() self._setitem(slice(0, self._size()), value) return self def __new__( cls, *args, wrap_storage=None, dtype=None, device=None, _internal=False, ): if not _internal: _warn_typed_storage_removal() if cls == torch.storage._LegacyStorage: raise RuntimeError( "Only child classes of _LegacyStorage can be instantiated" ) if cls == TypedStorage: return super().__new__(cls) else: arg_error_msg = ( f"{cls}.__new__ received an invalid combination " f"of arguments. Expected one of:\n" " * no arguments\n" " * (int size)\n" " * (Sequence data)\n" " * (*, UntypedStorage wrap_storage)" ) if device is not None: raise RuntimeError( arg_error_msg + "\nKeyword argument 'device' cannot be specified" ) if dtype is not None: raise RuntimeError( arg_error_msg + "\nKeyword argument 'dtype' cannot be specified" ) if wrap_storage is None: if len(args) > 1: raise RuntimeError( arg_error_msg + "\nToo many positional arguments" ) if ( len(args) == 1 and not _isint(args[0]) and not isinstance(args[0], collections.abc.Sequence) ): raise TypeError( arg_error_msg + f"\nArgument type not recognized: {type(args[0])}" ) return TypedStorage( *args, dtype=cls._dtype, device=_get_device_from_module(cls.__module__), _internal=True, ) else: if len(args) != 0: raise RuntimeError( arg_error_msg + "\nNo positional arguments should be given when using " "'wrap_storage'" ) if not isinstance(wrap_storage, torch.UntypedStorage): raise TypeError( arg_error_msg + f"\nArgument 'wrap_storage' must be UntypedStorage, but got {type(wrap_storage)}" ) cls_device = _get_device_from_module(cls.__module__) if wrap_storage.device.type != cls_device: raise RuntimeError( arg_error_msg + f"\nDevice of 'wrap_storage' must be {cls_device}" f", but got {wrap_storage.device.type}" ) return TypedStorage( *args, wrap_storage=wrap_storage, dtype=cls.dtype, _internal=True, ) def __init__( self, *args, device=None, dtype=None, wrap_storage=None, _internal=False, ): if not _internal: _warn_typed_storage_removal() arg_error_msg = ( "TypedStorage.__init__ received an invalid combination " "of arguments. Expected one of:\n" " * (*, torch.device device, torch.dtype dtype)\n" " * (int size, *, torch.device device, torch.dtype dtype)\n" " * (Sequence data, *, torch.device device, torch.dtype dtype)\n" " * (*, UntypedStorage wrap_storage, torch.dtype dtype)" ) if wrap_storage is not None: if len(args) != 0: raise RuntimeError( arg_error_msg + "\nNo positional arguments should be given when using " "'wrap_storage'" ) if dtype is None: raise RuntimeError( arg_error_msg + "\nArgument 'dtype' must be specified" ) if not isinstance(dtype, torch.dtype): raise TypeError( arg_error_msg + f"\nArgument 'dtype' must be torch.dtype, not {type(dtype)}" ) if device is not None: raise RuntimeError( arg_error_msg + "\nArgument 'device' should not be specified when 'wrap_storage' is given" ) self.dtype = dtype if not isinstance(wrap_storage, torch.UntypedStorage): raise TypeError( arg_error_msg + f"\nArgument 'wrap_storage' must be UntypedStorage, but got {type(wrap_storage)}" ) self._untyped_storage = wrap_storage else: self.dtype = torch.get_default_dtype() if dtype is None else dtype device = torch.device("cpu" if device is None else device) if self.dtype in [ torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8, ]: if device.type == "cuda": raise RuntimeError( "Cannot create CUDA storage with quantized dtype" ) if len(args) == 0: self._untyped_storage = torch.UntypedStorage(device=device) elif len(args) == 1: if _isint(args[0]): self._untyped_storage = torch.UntypedStorage( int(args[0]) * self._element_size(), device=device ) elif isinstance(args[0], collections.abc.Sequence): self._untyped_storage = _get_storage_from_sequence( args[0], self.dtype, device ) else: raise TypeError( arg_error_msg + f"\nArgument type not recognized: {type(args[0])}" ) else: raise RuntimeError(arg_error_msg + "\nToo many positional arguments") @property def is_cuda(self): _warn_typed_storage_removal() return self._untyped_storage.device.type == "cuda" @property def is_hpu(self): _warn_typed_storage_removal() return self._untyped_storage.device.type == "hpu" def untyped(self): """Return the internal :class:`torch.UntypedStorage`.""" _warn_typed_storage_removal() return self._untyped_storage def _new_wrapped_storage(self, untyped_storage) -> Self: assert type(untyped_storage) == torch.UntypedStorage if type(self) == TypedStorage: return cast( Self, TypedStorage( wrap_storage=untyped_storage, dtype=self.dtype, _internal=True ), ) else: return type(self)(wrap_storage=untyped_storage) def __len__(self): _warn_typed_storage_removal() return self._size() def _maybe_wrap_index(self, idx, is_stop=False): if idx is None: if is_stop: return self._size() else: return 0 else: if type(idx) != int: raise TypeError(f"can't index a {type(self)} with {type(idx)}") if is_stop: if (idx > self._size()) or (idx < -self._size()): raise IndexError( f"index {idx} out of range for storage of size {self.size()}" ) if idx > 0: return idx else: return idx % self._size() else: if (idx >= self._size()) or (idx < -self._size()): raise IndexError( f"index {idx} out of range for storage of size {self.size()}" ) return idx % self._size() def __setitem__(self, idx, value): _warn_typed_storage_removal() return self._setitem(idx, value) def _setitem(self, idx, value): if not isinstance(idx, (int, slice)): raise RuntimeError(f"can't index a {type(self)} with {type(idx)}") if torch.is_storage(value): raise RuntimeError(f"cannot set item with value type {type(value)}") if self.dtype in [ torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8, ]: interpret_dtypes = { torch.quint8: torch.uint8, torch.quint4x2: torch.uint8, torch.quint2x4: torch.uint8, torch.qint32: torch.int32, torch.qint8: torch.int8, } tmp_dtype = interpret_dtypes[self.dtype] tmp_tensor = torch.tensor( [], dtype=tmp_dtype, device=self._untyped_storage.device ) tmp_tensor.set_( TypedStorage( wrap_storage=self._untyped_storage, dtype=tmp_dtype, _internal=True ) ) else: tmp_tensor = torch.tensor( [], dtype=self.dtype, device=self._untyped_storage.device ).set_(self) tmp_tensor[idx] = value def __getitem__(self, idx): _warn_typed_storage_removal() return self._getitem(idx) def _getitem(self, idx): if self._untyped_storage.device.type == "meta": raise NotImplementedError("Not available for 'meta' device type") # NOTE: Before TypedStorage existed, indexing with a slice used to be # possible for <type>Storage objects. However, it would return # a storage view, which would be a hassle to implement in TypedStorage, # so it was disabled if isinstance(idx, slice): raise RuntimeError( "slices are only supported in UntypedStorage.__getitem__" ) elif not isinstance(idx, int): raise RuntimeError(f"can't index a {type(self)} with {type(idx)}") if self.dtype in [ torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8, ]: interpret_dtypes = { torch.quint8: torch.uint8, torch.quint4x2: torch.uint8, torch.quint2x4: torch.uint8, torch.qint32: torch.int32, torch.qint8: torch.int8, } return TypedStorage( wrap_storage=self._untyped_storage, dtype=interpret_dtypes[self.dtype], _internal=True, )._getitem(idx) idx_wrapped = self._maybe_wrap_index(idx) from torch._subclasses.fake_tensor import unset_fake_temporarily with unset_fake_temporarily(): tmp_tensor = torch.tensor( [], dtype=self.dtype, device=self._untyped_storage.device ).set_(self) return tmp_tensor[idx_wrapped].item() def copy_(self, source: T, non_blocking: _Optional[bool] = None): _warn_typed_storage_removal() if isinstance(source, TypedStorage): self._untyped_storage.copy_(source._untyped_storage, non_blocking) else: self._untyped_storage.copy_(source, non_blocking) return self def nbytes(self): _warn_typed_storage_removal() return self._nbytes() # For internal use only, to avoid deprecation warning def _nbytes(self): return self._untyped_storage.nbytes() def type( self, dtype: _Optional[str] = None, non_blocking: bool = False, ) -> Union[_StorageBase, TypedStorage, str]: _warn_typed_storage_removal() if dtype is None: legacy_class = self._get_legacy_storage_class() if legacy_class is not None: return legacy_class.__module__ + "." + legacy_class.__name__ return ".".join([self.__module__, type(self).__name__]) else: return self._untyped_storage.type(dtype, non_blocking) def cuda(self, device=None, non_blocking=False) -> Self: _warn_typed_storage_removal() if self.dtype in [ torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8, ]: raise RuntimeError("Cannot create CUDA storage with quantized dtype") cuda_storage = self._untyped_storage.cuda(device, non_blocking) return self._new_wrapped_storage(cuda_storage) def hpu(self, device=None, non_blocking=False) -> Self: _warn_typed_storage_removal() if self.dtype in [ torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8, ]: raise RuntimeError("Cannot create HPU storage with quantized dtype") hpu_storage = self._untyped_storage.hpu(device, non_blocking) return self._new_wrapped_storage(hpu_storage) def to(self, *, device: DeviceLikeType, non_blocking: bool = False) -> Self: _warn_typed_storage_removal() if not isinstance(device, torch.device): device = torch.device(device) if self.dtype in [ torch.quint8, torch.quint4x2, torch.quint2x4, torch.qint32, torch.qint8, ]: raise RuntimeError( f"Cannot create {device.type.upper()} storage with quantized dtype" ) to_storage = self._untyped_storage.to(device=device, non_blocking=non_blocking) return self._new_wrapped_storage(to_storage) def element_size(self): _warn_typed_storage_removal() return self._element_size() # For internal use only, to avoid deprecation warning def _element_size(self): return torch._utils._element_size(self.dtype) def get_device(self) -> _int: _warn_typed_storage_removal() return self._untyped_storage.get_device() def __str__(self): _warn_typed_storage_removal() info_str = ( f"[{torch.typename(self)}(dtype={self.dtype}, " f"device={self.device}) of size {len(self)}]" ) if self.device.type == "meta": return "...\n" + info_str else: data_str = " " + "\n ".join(str(self[i]) for i in range(self.size())) return data_str + "\n" + info_str def __repr__(self): _warn_typed_storage_removal() return str(self) def __iter__(self): _warn_typed_storage_removal() return iter(self[i] for i in range(self.size())) def __copy__(self): _warn_typed_storage_removal() return self._new_wrapped_storage(copy.copy(self._untyped_storage)) def __deepcopy__(self, memo): _warn_typed_storage_removal() return self._deepcopy(memo) # For internal use only, to avoid deprecation warning def _deepcopy(self, memo): return self._new_wrapped_storage(copy.deepcopy(self._untyped_storage, memo)) def __sizeof__(self): _warn_typed_storage_removal() return super().__sizeof__() + self.nbytes() def clone(self): """Return a copy of this storage.""" _warn_typed_storage_removal() return self._new_wrapped_storage(self._untyped_storage.clone()) def tolist(self): """Return a list containing the elements of this storage.""" _warn_typed_storage_removal() return list(self) def cpu(self): """Return a CPU copy of this storage if it's not already on the CPU.""" _warn_typed_storage_removal() return self._new_wrapped_storage(self._untyped_storage.cpu()) def is_pinned(self, device: Union[str, torch.device] = "cuda"): r"""Determine whether the CPU TypedStorage is already pinned on device. Args: device (str or torch.device): The device to pin memory on (default: ``'cuda'``). This argument is discouraged and subject to deprecated. Returns: A boolean variable. """ _warn_typed_storage_removal() return self._untyped_storage.is_pinned(device) def pin_memory(self, device: Union[str, torch.device] = "cuda"): r"""Copy the CPU TypedStorage to pinned memory, if it's not already pinned. Args: device (str or torch.device): The device to pin memory on (default: ``'cuda'``). This argument is discouraged and subject to deprecated. Returns: A pinned CPU storage. """ _warn_typed_storage_removal() return self._new_wrapped_storage( self._untyped_storage.pin_memory(device=device) ) def share_memory_(self): """See :meth:`torch.UntypedStorage.share_memory_`""" _warn_typed_storage_removal() return self._share_memory_() # For internal use only, to avoid deprecation warning def _share_memory_(self): self._untyped_storage.share_memory_() return self def _new_shared(self, size, *, device=None): """Create a new storage in shared memory with the same data type.""" if device is None: device = "cpu" device = torch.device(device) untyped_storage = torch.UntypedStorage._new_shared( size * self._element_size(), device=device ) return TypedStorage( wrap_storage=untyped_storage, dtype=self.dtype, _internal=True ) @property def _cdata(self): return self._untyped_storage._cdata @property def device(self): _warn_typed_storage_removal() return self._untyped_storage.device def size(self): _warn_typed_storage_removal() return self._size() # For internal use only, to avoid deprecation warning def _size(self): # NB: don't indirect through __len__, as that requires # an int to be returned return self._untyped_storage.nbytes() // self._element_size() def pickle_storage_type(self): _warn_typed_storage_removal() return self._pickle_storage_type() # For internal use only, to avoid deprecation warning def _pickle_storage_type(self): try: return _dtype_to_storage_type_map()[self.dtype] except KeyError as e: raise KeyError(f"dtype {self.dtype} is not recognized") from e def __reduce__(self): b = io.BytesIO() torch.save(self, b, _use_new_zipfile_serialization=False) return (_load_from_bytes, (b.getvalue(),)) def data_ptr(self): _warn_typed_storage_removal() return self._data_ptr() # For internal use only, to avoid deprecation warning def _data_ptr(self): return self._untyped_storage.data_ptr() def resizable(self): _warn_typed_storage_removal() return self._untyped_storage.resizable() def resize_(self, size): _warn_typed_storage_removal() self._resize_(size) # For internal use only, to avoid deprecation warning def _resize_(self, size): self._untyped_storage.resize_(size * self._element_size()) @classmethod def _free_weak_ref(cls, *args, **kwargs): return UntypedStorage._free_weak_ref(*args, **kwargs) def _weak_ref(self, *args, **kwargs): return self._untyped_storage._weak_ref(*args, **kwargs) @classmethod def from_buffer(cls, *args, **kwargs): _warn_typed_storage_removal() return cls._from_buffer(*args, **kwargs) @classmethod def _from_buffer(cls, *args, dtype=None, device=None, **kwargs): if cls == TypedStorage: dtype = torch.get_default_dtype() if dtype is None else dtype device = torch.device("cpu" if device is None else device) if device.type != "cpu": raise RuntimeError( f"TypedStorage.from_buffer: Not available for device {device.type}" ) untyped_storage: torch.UntypedStorage = torch.UntypedStorage.from_buffer( *args, dtype=dtype, **kwargs ) else: if dtype is not None or len(args) == 5: raise RuntimeError( "from_buffer: 'dtype' can only be specified in " "UntypedStorage.from_buffer and TypedStorage.from_buffer" ) if device is not None: raise RuntimeError( "from_buffer: 'device' can only be specified in " "UntypedStorage.from_buffer and TypedStorage.from_buffer" ) dtype = cls._dtype untyped_storage = torch.UntypedStorage.from_buffer( *args, dtype=dtype, **kwargs ) return TypedStorage(wrap_storage=untyped_storage, dtype=dtype, _internal=True) def _to(self, dtype): if not isinstance(dtype, torch.dtype): raise TypeError(f"Argument 'dtype' must be torch.dtype, not {type(dtype)}") storage = ( torch.tensor([], dtype=self.dtype, device=self.device) .set_(self) .to(dtype) ._typed_storage() ) if storage.data_ptr() == self.data_ptr(): storage = storage.clone() return storage def double(self): """Casts this storage to double type.""" _warn_typed_storage_removal() return self._to(torch.double) def float(self): """Casts this storage to float type.""" _warn_typed_storage_removal() return self._to(torch.float) def half(self): """Casts this storage to half type.""" _warn_typed_storage_removal() return self._to(torch.half) def long(self): """Casts this storage to long type.""" _warn_typed_storage_removal() return self._to(torch.long) def int(self): """Casts this storage to int type.""" _warn_typed_storage_removal() return self._to(torch.int) def short(self): """Casts this storage to short type.""" _warn_typed_storage_removal() return self._to(torch.short) def char(self): """Casts this storage to char type.""" _warn_typed_storage_removal() return self._to(torch.int8) def byte(self): """Casts this storage to byte type.""" _warn_typed_storage_removal() return self._to(torch.uint8) def bool(self): """Casts this storage to bool type.""" _warn_typed_storage_removal() return self._to(torch.bool) def bfloat16(self): """Casts this storage to bfloat16 type.""" _warn_typed_storage_removal() return self._to(torch.bfloat16) def complex_double(self): """Casts this storage to complex double type.""" _warn_typed_storage_removal() return self._to(torch.cdouble) def complex_float(self): """Casts this storage to complex float type.""" _warn_typed_storage_removal() return self._to(torch.cfloat) def float8_e5m2(self): """Casts this storage to float8_e5m2 type""" _warn_typed_storage_removal() return self._to(torch.float8_e5m2) def float8_e4m3fn(self): """Casts this storage to float8_e4m3fn type""" _warn_typed_storage_removal() return self._to(torch.float8_e4m3fn) def float8_e5m2fnuz(self): """Casts this storage to float8_e5m2fnuz type""" _warn_typed_storage_removal() return self._to(torch.float8_e5m2fnuz) def float8_e4m3fnuz(self): """Casts this storage to float8_e4m3fnuz type""" _warn_typed_storage_removal() return self._to(torch.float8_e4m3fnuz) @classmethod def from_file(cls, filename, shared, size): """from_file(filename, shared=False, size=0) -> Storage Creates a CPU storage backed by a memory-mapped file. If ``shared`` is ``True``, then memory is shared between all processes. All changes are written to the file. If ``shared`` is ``False``, then the changes on the storage do not affect the file. ``size`` is the number of elements in the storage. If ``shared`` is ``False``, then the file must contain at least ``size * sizeof(Type)`` bytes (``Type`` is the type of storage). If ``shared`` is ``True`` the file will be created if needed. Args: filename (str): file name to map shared (bool): whether to share memory (whether ``MAP_SHARED`` or ``MAP_PRIVATE`` is passed to the underlying `mmap(2) call <https://man7.org/linux/man-pages/man2/mmap.2.html>`_) size (int): number of elements in the storage """ _warn_typed_storage_removal() if cls == TypedStorage: raise RuntimeError("from_file can only be called on derived classes") untyped_storage = UntypedStorage.from_file( filename, shared, size * torch._utils._element_size(cls.dtype) ) storage = cls(wrap_storage=untyped_storage) return storage @classmethod def _expired(cls, *args, **kwargs): return UntypedStorage._expired(*args, **kwargs) def _write_file(self, *args, **kwargs): return self._untyped_storage._write_file(*args, **kwargs) def _set_from_file(self, *args, **kwargs): return self._untyped_storage._set_from_file(*args, **kwargs) def _set_cdata(self, *args, **kwargs): return self._untyped_storage._set_cdata(*args, **kwargs) def _share_cuda_(self, *args, **kwargs): return self._untyped_storage._share_cuda_(*args, **kwargs) def is_shared(self): _warn_typed_storage_removal() return self._is_shared() # For internal use only, to avoid deprecation warning def _is_shared(self): return self._untyped_storage.is_shared() @classmethod def _new_shared_cuda(cls, *args, **kwargs): return torch.UntypedStorage._new_shared_cuda(*args, **kwargs) def _share_filename_cpu_(self, *args, **kwargs): ( manager_handle, storage_handle, size, ) = self._untyped_storage._share_filename_cpu_(*args, **kwargs) return manager_handle, storage_handle, size // self._element_size() def _shared_decref(self): self._untyped_storage._shared_decref() return self @classmethod def _release_ipc_counter(cls, *args, device=None, **kwargs): return torch.UntypedStorage._release_ipc_counter_cuda(*args, **kwargs) def _shared_incref(self, *args, **kwargs): return self._untyped_storage._shared_incref(*args, **kwargs) def _share_fd_cpu_(self, *args, **kwargs): fd, size = self._untyped_storage._share_fd_cpu_(*args, **kwargs) return fd, size // self._element_size() def _get_legacy_storage_class(self): if self.dtype not in _dtype_to_storage_type_map(): return None storage_name = _dtype_to_storage_type_map()[self.dtype] if self.device.type not in [ "cpu", "cuda", "hpu", torch._C._get_privateuse1_backend_name(), ]: return None module = ( torch if self.device.type == "cpu" else getattr(torch, self.device.type) ) try: return getattr(module, storage_name) except AttributeError: return None TypedStorage.type.__doc__ = _type.__doc__ TypedStorage.cuda.__doc__ = _StorageBase.cuda.__doc__ TypedStorage.hpu.__doc__ = _StorageBase.hpu.__doc__ TypedStorage.to.__doc__ = _to.__doc__ class _LegacyStorageMeta(type): dtype: torch.dtype def __instancecheck__(cls, instance): if type(instance) == TypedStorage: cls_device = _get_device_from_module(cls.__module__) return (cls_device == instance.device.type) and ( cls.dtype == instance.dtype ) return False class _LegacyStorage(TypedStorage, metaclass=_LegacyStorageMeta): @classmethod def _new_shared(cls, size): """Create a new storage in shared memory with the same data type.""" untyped_storage = torch.UntypedStorage._new_shared(size * cls()._element_size()) return cls(wrap_storage=untyped_storage) @classmethod def _release_ipc_counter(cls, *args, **kwargs): return torch.UntypedStorage._release_ipc_counter_cuda(*args, **kwargs) @classmethod def _new_shared_filename(cls, manager, obj, size): bytes_size = size * torch._utils._element_size(cls.dtype) return cls( wrap_storage=torch.UntypedStorage._new_shared_filename_cpu( manager, obj, bytes_size ) ) def _get_dtype_from_pickle_storage_type(pickle_storage_type: str): try: return _storage_type_to_dtype_map()[pickle_storage_type] except KeyError as e: raise KeyError( f'pickle storage type "{pickle_storage_type}" is not recognized' ) from e ```
================================================================================================================= SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.19 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\__init__.py ENCODING: utf-8 ```py from torch._C import FileCheck as FileCheck from . import _utils from ._comparison import assert_allclose, assert_close as assert_close from ._creation import make_tensor as make_tensor ```
==================================================================================================================== SOURCE CODE FILE: _comparison.py LINES: 27 SIZE: 63.34 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_comparison.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import abc import cmath import collections.abc import contextlib from collections.abc import Collection, Sequence from typing import Any, Callable, NoReturn, Optional, Union from typing_extensions import deprecated import torch try: import numpy as np HAS_NUMPY = True except ModuleNotFoundError: HAS_NUMPY = False np = None # type: ignore[assignment] class ErrorMeta(Exception): """Internal testing exception that makes that carries error metadata.""" def __init__( self, type: type[Exception], msg: str, *, id: tuple[Any, ...] = () ) -> None: super().__init__( "If you are a user and see this message during normal operation " "please file an issue at https://github.com/pytorch/pytorch/issues. " "If you are a developer and working on the comparison functions, please `raise ErrorMeta.to_error()` " "for user facing errors." ) self.type = type self.msg = msg self.id = id def to_error( self, msg: Optional[Union[str, Callable[[str], str]]] = None ) -> Exception: if not isinstance(msg, str): generated_msg = self.msg if self.id: generated_msg += f"\n\nThe failure occurred for item {''.join(str([item]) for item in self.id)}" msg = msg(generated_msg) if callable(msg) else generated_msg return self.type(msg) # Some analysis of tolerance by logging tests from test_torch.py can be found in # https://github.com/pytorch/pytorch/pull/32538. # {dtype: (rtol, atol)} _DTYPE_PRECISIONS = { torch.float16: (0.001, 1e-5), torch.bfloat16: (0.016, 1e-5), torch.float32: (1.3e-6, 1e-5), torch.float64: (1e-7, 1e-7), torch.complex32: (0.001, 1e-5), torch.complex64: (1.3e-6, 1e-5), torch.complex128: (1e-7, 1e-7), } # The default tolerances of torch.float32 are used for quantized dtypes, because quantized tensors are compared in # their dequantized and floating point representation. For more details see `TensorLikePair._compare_quantized_values` _DTYPE_PRECISIONS.update( dict.fromkeys( (torch.quint8, torch.quint2x4, torch.quint4x2, torch.qint8, torch.qint32), _DTYPE_PRECISIONS[torch.float32], ) ) def default_tolerances( *inputs: Union[torch.Tensor, torch.dtype], dtype_precisions: Optional[dict[torch.dtype, tuple[float, float]]] = None, ) -> tuple[float, float]: """Returns the default absolute and relative testing tolerances for a set of inputs based on the dtype. See :func:`assert_close` for a table of the default tolerance for each dtype. Returns: (Tuple[float, float]): Loosest tolerances of all input dtypes. """ dtypes = [] for input in inputs: if isinstance(input, torch.Tensor): dtypes.append(input.dtype) elif isinstance(input, torch.dtype): dtypes.append(input) else: raise TypeError( f"Expected a torch.Tensor or a torch.dtype, but got {type(input)} instead." ) dtype_precisions = dtype_precisions or _DTYPE_PRECISIONS rtols, atols = zip(*[dtype_precisions.get(dtype, (0.0, 0.0)) for dtype in dtypes]) return max(rtols), max(atols) def get_tolerances( *inputs: Union[torch.Tensor, torch.dtype], rtol: Optional[float], atol: Optional[float], id: tuple[Any, ...] = (), ) -> tuple[float, float]: """Gets absolute and relative to be used for numeric comparisons. If both ``rtol`` and ``atol`` are specified, this is a no-op. If both are not specified, the return value of :func:`default_tolerances` is used. Raises: ErrorMeta: With :class:`ValueError`, if only ``rtol`` or ``atol`` is specified. Returns: (Tuple[float, float]): Valid absolute and relative tolerances. """ if (rtol is None) ^ (atol is None): # We require both tolerance to be omitted or specified, because specifying only one might lead to surprising # results. Imagine setting atol=0.0 and the tensors still match because rtol>0.0. raise ErrorMeta( ValueError, f"Both 'rtol' and 'atol' must be either specified or omitted, " f"but got no {'rtol' if rtol is None else 'atol'}.", id=id, ) elif rtol is not None and atol is not None: return rtol, atol else: return default_tolerances(*inputs) def _make_mismatch_msg( *, default_identifier: str, identifier: Optional[Union[str, Callable[[str], str]]] = None, extra: Optional[str] = None, abs_diff: float, abs_diff_idx: Optional[Union[int, tuple[int, ...]]] = None, atol: float, rel_diff: float, rel_diff_idx: Optional[Union[int, tuple[int, ...]]] = None, rtol: float, ) -> str: """Makes a mismatch error message for numeric values. Args: default_identifier (str): Default description of the compared values, e.g. "Tensor-likes". identifier (Optional[Union[str, Callable[[str], str]]]): Optional identifier that overrides ``default_identifier``. Can be passed as callable in which case it will be called with ``default_identifier`` to create the description at runtime. extra (Optional[str]): Extra information to be placed after the message header and the mismatch statistics. abs_diff (float): Absolute difference. abs_diff_idx (Optional[Union[int, Tuple[int, ...]]]): Optional index of the absolute difference. atol (float): Allowed absolute tolerance. Will only be added to mismatch statistics if it or ``rtol`` are ``> 0``. rel_diff (float): Relative difference. rel_diff_idx (Optional[Union[int, Tuple[int, ...]]]): Optional index of the relative difference. rtol (float): Allowed relative tolerance. Will only be added to mismatch statistics if it or ``atol`` are ``> 0``. """ equality = rtol == 0 and atol == 0 def make_diff_msg( *, type: str, diff: float, idx: Optional[Union[int, tuple[int, ...]]], tol: float, ) -> str: if idx is None: msg = f"{type.title()} difference: {diff}" else: msg = f"Greatest {type} difference: {diff} at index {idx}" if not equality: msg += f" (up to {tol} allowed)" return msg + "\n" if identifier is None: identifier = default_identifier elif callable(identifier): identifier = identifier(default_identifier) msg = f"{identifier} are not {'equal' if equality else 'close'}!\n\n" if extra: msg += f"{extra.strip()}\n" msg += make_diff_msg(type="absolute", diff=abs_diff, idx=abs_diff_idx, tol=atol) msg += make_diff_msg(type="relative", diff=rel_diff, idx=rel_diff_idx, tol=rtol) return msg.strip() def make_scalar_mismatch_msg( actual: Union[bool, int, float, complex], expected: Union[bool, int, float, complex], *, rtol: float, atol: float, identifier: Optional[Union[str, Callable[[str], str]]] = None, ) -> str: """Makes a mismatch error message for scalars. Args: actual (Union[bool, int, float, complex]): Actual scalar. expected (Union[bool, int, float, complex]): Expected scalar. rtol (float): Relative tolerance. atol (float): Absolute tolerance. identifier (Optional[Union[str, Callable[[str], str]]]): Optional description for the scalars. Can be passed as callable in which case it will be called by the default value to create the description at runtime. Defaults to "Scalars". """ abs_diff = abs(actual - expected) rel_diff = float("inf") if expected == 0 else abs_diff / abs(expected) return _make_mismatch_msg( default_identifier="Scalars", identifier=identifier, extra=f"Expected {expected} but got {actual}.", abs_diff=abs_diff, atol=atol, rel_diff=rel_diff, rtol=rtol, ) def make_tensor_mismatch_msg( actual: torch.Tensor, expected: torch.Tensor, matches: torch.Tensor, *, rtol: float, atol: float, identifier: Optional[Union[str, Callable[[str], str]]] = None, ): """Makes a mismatch error message for tensors. Args: actual (torch.Tensor): Actual tensor. expected (torch.Tensor): Expected tensor. matches (torch.Tensor): Boolean mask of the same shape as ``actual`` and ``expected`` that indicates the location of matches. rtol (float): Relative tolerance. atol (float): Absolute tolerance. identifier (Optional[Union[str, Callable[[str], str]]]): Optional description for the tensors. Can be passed as callable in which case it will be called by the default value to create the description at runtime. Defaults to "Tensor-likes". """ def unravel_flat_index(flat_index: int) -> tuple[int, ...]: if not matches.shape: return () inverse_index = [] for size in matches.shape[::-1]: div, mod = divmod(flat_index, size) flat_index = div inverse_index.append(mod) return tuple(inverse_index[::-1]) number_of_elements = matches.numel() total_mismatches = number_of_elements - int(torch.sum(matches)) extra = ( f"Mismatched elements: {total_mismatches} / {number_of_elements} " f"({total_mismatches / number_of_elements:.1%})" ) actual_flat = actual.flatten() expected_flat = expected.flatten() matches_flat = matches.flatten() if not actual.dtype.is_floating_point and not actual.dtype.is_complex: # TODO: Instead of always upcasting to int64, it would be sufficient to cast to the next higher dtype to avoid # overflow actual_flat = actual_flat.to(torch.int64) expected_flat = expected_flat.to(torch.int64) abs_diff = torch.abs(actual_flat - expected_flat) # Ensure that only mismatches are used for the max_abs_diff computation abs_diff[matches_flat] = 0 max_abs_diff, max_abs_diff_flat_idx = torch.max(abs_diff, 0) rel_diff = abs_diff / torch.abs(expected_flat) # Ensure that only mismatches are used for the max_rel_diff computation rel_diff[matches_flat] = 0 max_rel_diff, max_rel_diff_flat_idx = torch.max(rel_diff, 0) return _make_mismatch_msg( default_identifier="Tensor-likes", identifier=identifier, extra=extra, abs_diff=max_abs_diff.item(), abs_diff_idx=unravel_flat_index(int(max_abs_diff_flat_idx)), atol=atol, rel_diff=max_rel_diff.item(), rel_diff_idx=unravel_flat_index(int(max_rel_diff_flat_idx)), rtol=rtol, ) class UnsupportedInputs(Exception): # noqa: B903 """Exception to be raised during the construction of a :class:`Pair` in case it doesn't support the inputs.""" class Pair(abc.ABC): """ABC for all comparison pairs to be used in conjunction with :func:`assert_equal`. Each subclass needs to overwrite :meth:`Pair.compare` that performs the actual comparison. Each pair receives **all** options, so select the ones applicable for the subclass and forward the rest to the super class. Raising an :class:`UnsupportedInputs` during constructions indicates that the pair is not able to handle the inputs and the next pair type will be tried. All other errors should be raised as :class:`ErrorMeta`. After the instantiation, :meth:`Pair._make_error_meta` can be used to automatically handle overwriting the message with a user supplied one and id handling. """ def __init__( self, actual: Any, expected: Any, *, id: tuple[Any, ...] = (), **unknown_parameters: Any, ) -> None: self.actual = actual self.expected = expected self.id = id self._unknown_parameters = unknown_parameters @staticmethod def _inputs_not_supported() -> NoReturn: raise UnsupportedInputs @staticmethod def _check_inputs_isinstance(*inputs: Any, cls: Union[type, tuple[type, ...]]): """Checks if all inputs are instances of a given class and raise :class:`UnsupportedInputs` otherwise.""" if not all(isinstance(input, cls) for input in inputs): Pair._inputs_not_supported() def _fail( self, type: type[Exception], msg: str, *, id: tuple[Any, ...] = () ) -> NoReturn: """Raises an :class:`ErrorMeta` from a given exception type and message and the stored id. .. warning:: If you use this before the ``super().__init__(...)`` call in the constructor, you have to pass the ``id`` explicitly. """ raise ErrorMeta(type, msg, id=self.id if not id and hasattr(self, "id") else id) @abc.abstractmethod def compare(self) -> None: """Compares the inputs and raises an :class`ErrorMeta` in case they mismatch.""" def extra_repr(self) -> Sequence[Union[str, tuple[str, Any]]]: """Returns extra information that will be included in the representation. Should be overwritten by all subclasses that use additional options. The representation of the object will only be surfaced in case we encounter an unexpected error and thus should help debug the issue. Can be a sequence of key-value-pairs or attribute names. """ return [] def __repr__(self) -> str: head = f"{type(self).__name__}(" tail = ")" body = [ f" {name}={value!s}," for name, value in [ ("id", self.id), ("actual", self.actual), ("expected", self.expected), *[ (extra, getattr(self, extra)) if isinstance(extra, str) else extra for extra in self.extra_repr() ], ] ] return "\n".join((head, *body, *tail)) class ObjectPair(Pair): """Pair for any type of inputs that will be compared with the `==` operator. .. note:: Since this will instantiate for any kind of inputs, it should only be used as fallback after all other pairs couldn't handle the inputs. """ def compare(self) -> None: try: equal = self.actual == self.expected except Exception as error: # We are not using `self._raise_error_meta` here since we need the exception chaining raise ErrorMeta( ValueError, f"{self.actual} == {self.expected} failed with:\n{error}.", id=self.id, ) from error if not equal: self._fail(AssertionError, f"{self.actual} != {self.expected}") class NonePair(Pair): """Pair for ``None`` inputs.""" def __init__(self, actual: Any, expected: Any, **other_parameters: Any) -> None: if not (actual is None or expected is None): self._inputs_not_supported() super().__init__(actual, expected, **other_parameters) def compare(self) -> None: if not (self.actual is None and self.expected is None): self._fail( AssertionError, f"None mismatch: {self.actual} is not {self.expected}" ) class BooleanPair(Pair): """Pair for :class:`bool` inputs. .. note:: If ``numpy`` is available, also handles :class:`numpy.bool_` inputs. """ def __init__( self, actual: Any, expected: Any, *, id: tuple[Any, ...], **other_parameters: Any, ) -> None: actual, expected = self._process_inputs(actual, expected, id=id) super().__init__(actual, expected, **other_parameters) @property def _supported_types(self) -> tuple[type, ...]: cls: list[type] = [bool] if HAS_NUMPY: cls.append(np.bool_) return tuple(cls) def _process_inputs( self, actual: Any, expected: Any, *, id: tuple[Any, ...] ) -> tuple[bool, bool]: self._check_inputs_isinstance(actual, expected, cls=self._supported_types) actual, expected = ( self._to_bool(bool_like, id=id) for bool_like in (actual, expected) ) return actual, expected def _to_bool(self, bool_like: Any, *, id: tuple[Any, ...]) -> bool: if isinstance(bool_like, bool): return bool_like elif isinstance(bool_like, np.bool_): return bool_like.item() else: raise ErrorMeta( TypeError, f"Unknown boolean type {type(bool_like)}.", id=id ) def compare(self) -> None: if self.actual is not self.expected: self._fail( AssertionError, f"Booleans mismatch: {self.actual} is not {self.expected}", ) class NumberPair(Pair): """Pair for Python number (:class:`int`, :class:`float`, and :class:`complex`) inputs. .. note:: If ``numpy`` is available, also handles :class:`numpy.number` inputs. Kwargs: rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default values based on the type are selected with the below table. atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default values based on the type are selected with the below table. equal_nan (bool): If ``True``, two ``NaN`` values are considered equal. Defaults to ``False``. check_dtype (bool): If ``True``, the type of the inputs will be checked for equality. Defaults to ``False``. The following table displays correspondence between Python number type and the ``torch.dtype``'s. See :func:`assert_close` for the corresponding tolerances. +------------------+-------------------------------+ | ``type`` | corresponding ``torch.dtype`` | +==================+===============================+ | :class:`int` | :attr:`~torch.int64` | +------------------+-------------------------------+ | :class:`float` | :attr:`~torch.float64` | +------------------+-------------------------------+ | :class:`complex` | :attr:`~torch.complex64` | +------------------+-------------------------------+ """ _TYPE_TO_DTYPE = { int: torch.int64, float: torch.float64, complex: torch.complex128, } _NUMBER_TYPES = tuple(_TYPE_TO_DTYPE.keys()) def __init__( self, actual: Any, expected: Any, *, id: tuple[Any, ...] = (), rtol: Optional[float] = None, atol: Optional[float] = None, equal_nan: bool = False, check_dtype: bool = False, **other_parameters: Any, ) -> None: actual, expected = self._process_inputs(actual, expected, id=id) super().__init__(actual, expected, id=id, **other_parameters) self.rtol, self.atol = get_tolerances( *[self._TYPE_TO_DTYPE[type(input)] for input in (actual, expected)], rtol=rtol, atol=atol, id=id, ) self.equal_nan = equal_nan self.check_dtype = check_dtype @property def _supported_types(self) -> tuple[type, ...]: cls = list(self._NUMBER_TYPES) if HAS_NUMPY: cls.append(np.number) return tuple(cls) def _process_inputs( self, actual: Any, expected: Any, *, id: tuple[Any, ...] ) -> tuple[Union[int, float, complex], Union[int, float, complex]]: self._check_inputs_isinstance(actual, expected, cls=self._supported_types) actual, expected = ( self._to_number(number_like, id=id) for number_like in (actual, expected) ) return actual, expected def _to_number( self, number_like: Any, *, id: tuple[Any, ...] ) -> Union[int, float, complex]: if HAS_NUMPY and isinstance(number_like, np.number): return number_like.item() elif isinstance(number_like, self._NUMBER_TYPES): return number_like # type: ignore[return-value] else: raise ErrorMeta( TypeError, f"Unknown number type {type(number_like)}.", id=id ) def compare(self) -> None: if self.check_dtype and type(self.actual) is not type(self.expected): self._fail( AssertionError, f"The (d)types do not match: {type(self.actual)} != {type(self.expected)}.", ) if self.actual == self.expected: return if self.equal_nan and cmath.isnan(self.actual) and cmath.isnan(self.expected): return abs_diff = abs(self.actual - self.expected) tolerance = self.atol + self.rtol * abs(self.expected) if cmath.isfinite(abs_diff) and abs_diff <= tolerance: return self._fail( AssertionError, make_scalar_mismatch_msg( self.actual, self.expected, rtol=self.rtol, atol=self.atol ), ) def extra_repr(self) -> Sequence[str]: return ( "rtol", "atol", "equal_nan", "check_dtype", ) class TensorLikePair(Pair): """Pair for :class:`torch.Tensor`-like inputs. Kwargs: allow_subclasses (bool): rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default values based on the type are selected. See :func:assert_close: for details. atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default values based on the type are selected. See :func:assert_close: for details. equal_nan (bool): If ``True``, two ``NaN`` values are considered equal. Defaults to ``False``. check_device (bool): If ``True`` (default), asserts that corresponding tensors are on the same :attr:`~torch.Tensor.device`. If this check is disabled, tensors on different :attr:`~torch.Tensor.device`'s are moved to the CPU before being compared. check_dtype (bool): If ``True`` (default), asserts that corresponding tensors have the same ``dtype``. If this check is disabled, tensors with different ``dtype``'s are promoted to a common ``dtype`` (according to :func:`torch.promote_types`) before being compared. check_layout (bool): If ``True`` (default), asserts that corresponding tensors have the same ``layout``. If this check is disabled, tensors with different ``layout``'s are converted to strided tensors before being compared. check_stride (bool): If ``True`` and corresponding tensors are strided, asserts that they have the same stride. """ def __init__( self, actual: Any, expected: Any, *, id: tuple[Any, ...] = (), allow_subclasses: bool = True, rtol: Optional[float] = None, atol: Optional[float] = None, equal_nan: bool = False, check_device: bool = True, check_dtype: bool = True, check_layout: bool = True, check_stride: bool = False, **other_parameters: Any, ): actual, expected = self._process_inputs( actual, expected, id=id, allow_subclasses=allow_subclasses ) super().__init__(actual, expected, id=id, **other_parameters) self.rtol, self.atol = get_tolerances( actual, expected, rtol=rtol, atol=atol, id=self.id ) self.equal_nan = equal_nan self.check_device = check_device self.check_dtype = check_dtype self.check_layout = check_layout self.check_stride = check_stride def _process_inputs( self, actual: Any, expected: Any, *, id: tuple[Any, ...], allow_subclasses: bool ) -> tuple[torch.Tensor, torch.Tensor]: directly_related = isinstance(actual, type(expected)) or isinstance( expected, type(actual) ) if not directly_related: self._inputs_not_supported() if not allow_subclasses and type(actual) is not type(expected): self._inputs_not_supported() actual, expected = (self._to_tensor(input) for input in (actual, expected)) for tensor in (actual, expected): self._check_supported(tensor, id=id) return actual, expected def _to_tensor(self, tensor_like: Any) -> torch.Tensor: if isinstance(tensor_like, torch.Tensor): return tensor_like try: return torch.as_tensor(tensor_like) except Exception: self._inputs_not_supported() def _check_supported(self, tensor: torch.Tensor, *, id: tuple[Any, ...]) -> None: if tensor.layout not in { torch.strided, torch.jagged, torch.sparse_coo, torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc, }: raise ErrorMeta( ValueError, f"Unsupported tensor layout {tensor.layout}", id=id ) def compare(self) -> None: actual, expected = self.actual, self.expected self._compare_attributes(actual, expected) if any(input.device.type == "meta" for input in (actual, expected)): return actual, expected = self._equalize_attributes(actual, expected) self._compare_values(actual, expected) def _compare_attributes( self, actual: torch.Tensor, expected: torch.Tensor, ) -> None: """Checks if the attributes of two tensors match. Always checks - the :attr:`~torch.Tensor.shape`, - whether both inputs are quantized or not, - and if they use the same quantization scheme. Checks for - :attr:`~torch.Tensor.layout`, - :meth:`~torch.Tensor.stride`, - :attr:`~torch.Tensor.device`, and - :attr:`~torch.Tensor.dtype` are optional and can be disabled through the corresponding ``check_*`` flag during construction of the pair. """ def raise_mismatch_error( attribute_name: str, actual_value: Any, expected_value: Any ) -> NoReturn: self._fail( AssertionError, f"The values for attribute '{attribute_name}' do not match: {actual_value} != {expected_value}.", ) if actual.shape != expected.shape: raise_mismatch_error("shape", actual.shape, expected.shape) if actual.is_quantized != expected.is_quantized: raise_mismatch_error( "is_quantized", actual.is_quantized, expected.is_quantized ) elif actual.is_quantized and actual.qscheme() != expected.qscheme(): raise_mismatch_error("qscheme()", actual.qscheme(), expected.qscheme()) if actual.layout != expected.layout: if self.check_layout: raise_mismatch_error("layout", actual.layout, expected.layout) elif ( actual.layout == torch.strided and self.check_stride and actual.stride() != expected.stride() ): raise_mismatch_error("stride()", actual.stride(), expected.stride()) if self.check_device and actual.device != expected.device: raise_mismatch_error("device", actual.device, expected.device) if self.check_dtype and actual.dtype != expected.dtype: raise_mismatch_error("dtype", actual.dtype, expected.dtype) def _equalize_attributes( self, actual: torch.Tensor, expected: torch.Tensor ) -> tuple[torch.Tensor, torch.Tensor]: """Equalizes some attributes of two tensors for value comparison. If ``actual`` and ``expected`` are ... - ... not on the same :attr:`~torch.Tensor.device`, they are moved CPU memory. - ... not of the same ``dtype``, they are promoted to a common ``dtype`` (according to :func:`torch.promote_types`). - ... not of the same ``layout``, they are converted to strided tensors. Args: actual (Tensor): Actual tensor. expected (Tensor): Expected tensor. Returns: (Tuple[Tensor, Tensor]): Equalized tensors. """ # The comparison logic uses operators currently not supported by the MPS backends. # See https://github.com/pytorch/pytorch/issues/77144 for details. # TODO: Remove this conversion as soon as all operations are supported natively by the MPS backend if actual.is_mps or expected.is_mps: # type: ignore[attr-defined] actual = actual.cpu() expected = expected.cpu() if actual.device != expected.device: actual = actual.cpu() expected = expected.cpu() if actual.dtype != expected.dtype: actual_dtype = actual.dtype expected_dtype = expected.dtype # For uint64, this is not sound in general, which is why promote_types doesn't # allow it, but for easy testing, we're unlikely to get confused # by large uint64 overflowing into negative int64 if actual_dtype in [torch.uint64, torch.uint32, torch.uint16]: actual_dtype = torch.int64 if expected_dtype in [torch.uint64, torch.uint32, torch.uint16]: expected_dtype = torch.int64 dtype = torch.promote_types(actual_dtype, expected_dtype) actual = actual.to(dtype) expected = expected.to(dtype) if actual.layout != expected.layout: # These checks are needed, since Tensor.to_dense() fails on tensors that are already strided actual = actual.to_dense() if actual.layout != torch.strided else actual expected = ( expected.to_dense() if expected.layout != torch.strided else expected ) return actual, expected def _compare_values(self, actual: torch.Tensor, expected: torch.Tensor) -> None: if actual.is_quantized: compare_fn = self._compare_quantized_values elif actual.is_sparse: compare_fn = self._compare_sparse_coo_values elif actual.layout in { torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc, }: compare_fn = self._compare_sparse_compressed_values elif actual.layout == torch.jagged: actual, expected = actual.values(), expected.values() compare_fn = self._compare_regular_values_close else: compare_fn = self._compare_regular_values_close compare_fn( actual, expected, rtol=self.rtol, atol=self.atol, equal_nan=self.equal_nan ) def _compare_quantized_values( self, actual: torch.Tensor, expected: torch.Tensor, *, rtol: float, atol: float, equal_nan: bool, ) -> None: """Compares quantized tensors by comparing the :meth:`~torch.Tensor.dequantize`'d variants for closeness. .. note:: A detailed discussion about why only the dequantized variant is checked for closeness rather than checking the individual quantization parameters for closeness and the integer representation for equality can be found in https://github.com/pytorch/pytorch/issues/68548. """ return self._compare_regular_values_close( actual.dequantize(), expected.dequantize(), rtol=rtol, atol=atol, equal_nan=equal_nan, identifier=lambda default_identifier: f"Quantized {default_identifier.lower()}", ) def _compare_sparse_coo_values( self, actual: torch.Tensor, expected: torch.Tensor, *, rtol: float, atol: float, equal_nan: bool, ) -> None: """Compares sparse COO tensors by comparing - the number of sparse dimensions, - the number of non-zero elements (nnz) for equality, - the indices for equality, and - the values for closeness. """ if actual.sparse_dim() != expected.sparse_dim(): self._fail( AssertionError, ( f"The number of sparse dimensions in sparse COO tensors does not match: " f"{actual.sparse_dim()} != {expected.sparse_dim()}" ), ) if actual._nnz() != expected._nnz(): self._fail( AssertionError, ( f"The number of specified values in sparse COO tensors does not match: " f"{actual._nnz()} != {expected._nnz()}" ), ) self._compare_regular_values_equal( actual._indices(), expected._indices(), identifier="Sparse COO indices", ) self._compare_regular_values_close( actual._values(), expected._values(), rtol=rtol, atol=atol, equal_nan=equal_nan, identifier="Sparse COO values", ) def _compare_sparse_compressed_values( self, actual: torch.Tensor, expected: torch.Tensor, *, rtol: float, atol: float, equal_nan: bool, ) -> None: """Compares sparse compressed tensors by comparing - the number of non-zero elements (nnz) for equality, - the plain indices for equality, - the compressed indices for equality, and - the values for closeness. """ format_name, compressed_indices_method, plain_indices_method = { torch.sparse_csr: ( "CSR", torch.Tensor.crow_indices, torch.Tensor.col_indices, ), torch.sparse_csc: ( "CSC", torch.Tensor.ccol_indices, torch.Tensor.row_indices, ), torch.sparse_bsr: ( "BSR", torch.Tensor.crow_indices, torch.Tensor.col_indices, ), torch.sparse_bsc: ( "BSC", torch.Tensor.ccol_indices, torch.Tensor.row_indices, ), }[actual.layout] if actual._nnz() != expected._nnz(): self._fail( AssertionError, ( f"The number of specified values in sparse {format_name} tensors does not match: " f"{actual._nnz()} != {expected._nnz()}" ), ) # Compressed and plain indices in the CSR / CSC / BSR / BSC sparse formats can be `torch.int32` _or_ # `torch.int64`. While the same dtype is enforced for the compressed and plain indices of a single tensor, it # can be different between two tensors. Thus, we need to convert them to the same dtype, or the comparison will # fail. actual_compressed_indices = compressed_indices_method(actual) expected_compressed_indices = compressed_indices_method(expected) indices_dtype = torch.promote_types( actual_compressed_indices.dtype, expected_compressed_indices.dtype ) self._compare_regular_values_equal( actual_compressed_indices.to(indices_dtype), expected_compressed_indices.to(indices_dtype), identifier=f"Sparse {format_name} {compressed_indices_method.__name__}", ) self._compare_regular_values_equal( plain_indices_method(actual).to(indices_dtype), plain_indices_method(expected).to(indices_dtype), identifier=f"Sparse {format_name} {plain_indices_method.__name__}", ) self._compare_regular_values_close( actual.values(), expected.values(), rtol=rtol, atol=atol, equal_nan=equal_nan, identifier=f"Sparse {format_name} values", ) def _compare_regular_values_equal( self, actual: torch.Tensor, expected: torch.Tensor, *, equal_nan: bool = False, identifier: Optional[Union[str, Callable[[str], str]]] = None, ) -> None: """Checks if the values of two tensors are equal.""" self._compare_regular_values_close( actual, expected, rtol=0, atol=0, equal_nan=equal_nan, identifier=identifier ) def _compare_regular_values_close( self, actual: torch.Tensor, expected: torch.Tensor, *, rtol: float, atol: float, equal_nan: bool, identifier: Optional[Union[str, Callable[[str], str]]] = None, ) -> None: """Checks if the values of two tensors are close up to a desired tolerance.""" matches = torch.isclose( actual, expected, rtol=rtol, atol=atol, equal_nan=equal_nan ) if torch.all(matches): return if actual.shape == torch.Size([]): msg = make_scalar_mismatch_msg( actual.item(), expected.item(), rtol=rtol, atol=atol, identifier=identifier, ) else: msg = make_tensor_mismatch_msg( actual, expected, matches, rtol=rtol, atol=atol, identifier=identifier ) self._fail(AssertionError, msg) def extra_repr(self) -> Sequence[str]: return ( "rtol", "atol", "equal_nan", "check_device", "check_dtype", "check_layout", "check_stride", ) def originate_pairs( actual: Any, expected: Any, *, pair_types: Sequence[type[Pair]], sequence_types: tuple[type, ...] = (collections.abc.Sequence,), mapping_types: tuple[type, ...] = (collections.abc.Mapping,), id: tuple[Any, ...] = (), **options: Any, ) -> list[Pair]: """Originates pairs from the individual inputs. ``actual`` and ``expected`` can be possibly nested :class:`~collections.abc.Sequence`'s or :class:`~collections.abc.Mapping`'s. In this case the pairs are originated by recursing through them. Args: actual (Any): Actual input. expected (Any): Expected input. pair_types (Sequence[Type[Pair]]): Sequence of pair types that will be tried to construct with the inputs. First successful pair will be used. sequence_types (Tuple[Type, ...]): Optional types treated as sequences that will be checked elementwise. mapping_types (Tuple[Type, ...]): Optional types treated as mappings that will be checked elementwise. id (Tuple[Any, ...]): Optional id of a pair that will be included in an error message. **options (Any): Options passed to each pair during construction. Raises: ErrorMeta: With :class`AssertionError`, if the inputs are :class:`~collections.abc.Sequence`'s, but their length does not match. ErrorMeta: With :class`AssertionError`, if the inputs are :class:`~collections.abc.Mapping`'s, but their set of keys do not match. ErrorMeta: With :class`TypeError`, if no pair is able to handle the inputs. ErrorMeta: With any expected exception that happens during the construction of a pair. Returns: (List[Pair]): Originated pairs. """ # We explicitly exclude str's here since they are self-referential and would cause an infinite recursion loop: # "a" == "a"[0][0]... if ( isinstance(actual, sequence_types) and not isinstance(actual, str) and isinstance(expected, sequence_types) and not isinstance(expected, str) ): actual_len = len(actual) # type: ignore[arg-type] expected_len = len(expected) # type: ignore[arg-type] if actual_len != expected_len: raise ErrorMeta( AssertionError, f"The length of the sequences mismatch: {actual_len} != {expected_len}", id=id, ) pairs = [] for idx in range(actual_len): pairs.extend( originate_pairs( actual[idx], # type: ignore[index] expected[idx], # type: ignore[index] pair_types=pair_types, sequence_types=sequence_types, mapping_types=mapping_types, id=(*id, idx), **options, ) ) return pairs elif isinstance(actual, mapping_types) and isinstance(expected, mapping_types): actual_keys = set(actual.keys()) # type: ignore[attr-defined] expected_keys = set(expected.keys()) # type: ignore[attr-defined] if actual_keys != expected_keys: missing_keys = expected_keys - actual_keys additional_keys = actual_keys - expected_keys raise ErrorMeta( AssertionError, ( f"The keys of the mappings do not match:\n" f"Missing keys in the actual mapping: {sorted(missing_keys)}\n" f"Additional keys in the actual mapping: {sorted(additional_keys)}" ), id=id, ) keys: Collection = actual_keys # Since the origination aborts after the first failure, we try to be deterministic with contextlib.suppress(Exception): keys = sorted(keys) pairs = [] for key in keys: pairs.extend( originate_pairs( actual[key], # type: ignore[index] expected[key], # type: ignore[index] pair_types=pair_types, sequence_types=sequence_types, mapping_types=mapping_types, id=(*id, key), **options, ) ) return pairs else: for pair_type in pair_types: try: return [pair_type(actual, expected, id=id, **options)] # Raising an `UnsupportedInputs` during origination indicates that the pair type is not able to handle the # inputs. Thus, we try the next pair type. except UnsupportedInputs: continue # Raising an `ErrorMeta` during origination is the orderly way to abort and so we simply re-raise it. This # is only in a separate branch, because the one below would also except it. except ErrorMeta: raise # Raising any other exception during origination is unexpected and will give some extra information about # what happened. If applicable, the exception should be expected in the future. except Exception as error: raise RuntimeError( f"Originating a {pair_type.__name__}() at item {''.join(str([item]) for item in id)} with\n\n" f"{type(actual).__name__}(): {actual}\n\n" f"and\n\n" f"{type(expected).__name__}(): {expected}\n\n" f"resulted in the unexpected exception above. " f"If you are a user and see this message during normal operation " "please file an issue at https://github.com/pytorch/pytorch/issues. " "If you are a developer and working on the comparison functions, " "please except the previous error and raise an expressive `ErrorMeta` instead." ) from error else: raise ErrorMeta( TypeError, f"No comparison pair was able to handle inputs of type {type(actual)} and {type(expected)}.", id=id, ) def not_close_error_metas( actual: Any, expected: Any, *, pair_types: Sequence[type[Pair]] = (ObjectPair,), sequence_types: tuple[type, ...] = (collections.abc.Sequence,), mapping_types: tuple[type, ...] = (collections.abc.Mapping,), **options: Any, ) -> list[ErrorMeta]: """Asserts that inputs are equal. ``actual`` and ``expected`` can be possibly nested :class:`~collections.abc.Sequence`'s or :class:`~collections.abc.Mapping`'s. In this case the comparison happens elementwise by recursing through them. Args: actual (Any): Actual input. expected (Any): Expected input. pair_types (Sequence[Type[Pair]]): Sequence of :class:`Pair` types that will be tried to construct with the inputs. First successful pair will be used. Defaults to only using :class:`ObjectPair`. sequence_types (Tuple[Type, ...]): Optional types treated as sequences that will be checked elementwise. mapping_types (Tuple[Type, ...]): Optional types treated as mappings that will be checked elementwise. **options (Any): Options passed to each pair during construction. """ # Hide this function from `pytest`'s traceback __tracebackhide__ = True try: pairs = originate_pairs( actual, expected, pair_types=pair_types, sequence_types=sequence_types, mapping_types=mapping_types, **options, ) except ErrorMeta as error_meta: # Explicitly raising from None to hide the internal traceback raise error_meta.to_error() from None # noqa: RSE102 error_metas: list[ErrorMeta] = [] for pair in pairs: try: pair.compare() except ErrorMeta as error_meta: error_metas.append(error_meta) # Raising any exception besides `ErrorMeta` while comparing is unexpected and will give some extra information # about what happened. If applicable, the exception should be expected in the future. except Exception as error: raise RuntimeError( f"Comparing\n\n" f"{pair}\n\n" f"resulted in the unexpected exception above. " f"If you are a user and see this message during normal operation " "please file an issue at https://github.com/pytorch/pytorch/issues. " "If you are a developer and working on the comparison functions, " "please except the previous error and raise an expressive `ErrorMeta` instead." ) from error # [ErrorMeta Cycles] # ErrorMeta objects in this list capture # tracebacks that refer to the frame of this function. # The local variable `error_metas` refers to the error meta # objects, creating a reference cycle. Frames in the traceback # would not get freed until cycle collection, leaking cuda memory in tests. # We break the cycle by removing the reference to the error_meta objects # from this frame as it returns. error_metas = [error_metas] return error_metas.pop() def assert_close( actual: Any, expected: Any, *, allow_subclasses: bool = True, rtol: Optional[float] = None, atol: Optional[float] = None, equal_nan: bool = False, check_device: bool = True, check_dtype: bool = True, check_layout: bool = True, check_stride: bool = False, msg: Optional[Union[str, Callable[[str], str]]] = None, ): r"""Asserts that ``actual`` and ``expected`` are close. If ``actual`` and ``expected`` are strided, non-quantized, real-valued, and finite, they are considered close if .. math:: \lvert \text{actual} - \text{expected} \rvert \le \texttt{atol} + \texttt{rtol} \cdot \lvert \text{expected} \rvert Non-finite values (``-inf`` and ``inf``) are only considered close if and only if they are equal. ``NaN``'s are only considered equal to each other if ``equal_nan`` is ``True``. In addition, they are only considered close if they have the same - :attr:`~torch.Tensor.device` (if ``check_device`` is ``True``), - ``dtype`` (if ``check_dtype`` is ``True``), - ``layout`` (if ``check_layout`` is ``True``), and - stride (if ``check_stride`` is ``True``). If either ``actual`` or ``expected`` is a meta tensor, only the attribute checks will be performed. If ``actual`` and ``expected`` are sparse (either having COO, CSR, CSC, BSR, or BSC layout), their strided members are checked individually. Indices, namely ``indices`` for COO, ``crow_indices`` and ``col_indices`` for CSR and BSR, or ``ccol_indices`` and ``row_indices`` for CSC and BSC layouts, respectively, are always checked for equality whereas the values are checked for closeness according to the definition above. If ``actual`` and ``expected`` are quantized, they are considered close if they have the same :meth:`~torch.Tensor.qscheme` and the result of :meth:`~torch.Tensor.dequantize` is close according to the definition above. ``actual`` and ``expected`` can be :class:`~torch.Tensor`'s or any tensor-or-scalar-likes from which :class:`torch.Tensor`'s can be constructed with :func:`torch.as_tensor`. Except for Python scalars the input types have to be directly related. In addition, ``actual`` and ``expected`` can be :class:`~collections.abc.Sequence`'s or :class:`~collections.abc.Mapping`'s in which case they are considered close if their structure matches and all their elements are considered close according to the above definition. .. note:: Python scalars are an exception to the type relation requirement, because their :func:`type`, i.e. :class:`int`, :class:`float`, and :class:`complex`, is equivalent to the ``dtype`` of a tensor-like. Thus, Python scalars of different types can be checked, but require ``check_dtype=False``. Args: actual (Any): Actual input. expected (Any): Expected input. allow_subclasses (bool): If ``True`` (default) and except for Python scalars, inputs of directly related types are allowed. Otherwise type equality is required. rtol (Optional[float]): Relative tolerance. If specified ``atol`` must also be specified. If omitted, default values based on the :attr:`~torch.Tensor.dtype` are selected with the below table. atol (Optional[float]): Absolute tolerance. If specified ``rtol`` must also be specified. If omitted, default values based on the :attr:`~torch.Tensor.dtype` are selected with the below table. equal_nan (Union[bool, str]): If ``True``, two ``NaN`` values will be considered equal. check_device (bool): If ``True`` (default), asserts that corresponding tensors are on the same :attr:`~torch.Tensor.device`. If this check is disabled, tensors on different :attr:`~torch.Tensor.device`'s are moved to the CPU before being compared. check_dtype (bool): If ``True`` (default), asserts that corresponding tensors have the same ``dtype``. If this check is disabled, tensors with different ``dtype``'s are promoted to a common ``dtype`` (according to :func:`torch.promote_types`) before being compared. check_layout (bool): If ``True`` (default), asserts that corresponding tensors have the same ``layout``. If this check is disabled, tensors with different ``layout``'s are converted to strided tensors before being compared. check_stride (bool): If ``True`` and corresponding tensors are strided, asserts that they have the same stride. msg (Optional[Union[str, Callable[[str], str]]]): Optional error message to use in case a failure occurs during the comparison. Can also passed as callable in which case it will be called with the generated message and should return the new message. Raises: ValueError: If no :class:`torch.Tensor` can be constructed from an input. ValueError: If only ``rtol`` or ``atol`` is specified. AssertionError: If corresponding inputs are not Python scalars and are not directly related. AssertionError: If ``allow_subclasses`` is ``False``, but corresponding inputs are not Python scalars and have different types. AssertionError: If the inputs are :class:`~collections.abc.Sequence`'s, but their length does not match. AssertionError: If the inputs are :class:`~collections.abc.Mapping`'s, but their set of keys do not match. AssertionError: If corresponding tensors do not have the same :attr:`~torch.Tensor.shape`. AssertionError: If ``check_layout`` is ``True``, but corresponding tensors do not have the same :attr:`~torch.Tensor.layout`. AssertionError: If only one of corresponding tensors is quantized. AssertionError: If corresponding tensors are quantized, but have different :meth:`~torch.Tensor.qscheme`'s. AssertionError: If ``check_device`` is ``True``, but corresponding tensors are not on the same :attr:`~torch.Tensor.device`. AssertionError: If ``check_dtype`` is ``True``, but corresponding tensors do not have the same ``dtype``. AssertionError: If ``check_stride`` is ``True``, but corresponding strided tensors do not have the same stride. AssertionError: If the values of corresponding tensors are not close according to the definition above. The following table displays the default ``rtol`` and ``atol`` for different ``dtype``'s. In case of mismatching ``dtype``'s, the maximum of both tolerances is used. +---------------------------+------------+----------+ | ``dtype`` | ``rtol`` | ``atol`` | +===========================+============+==========+ | :attr:`~torch.float16` | ``1e-3`` | ``1e-5`` | +---------------------------+------------+----------+ | :attr:`~torch.bfloat16` | ``1.6e-2`` | ``1e-5`` | +---------------------------+------------+----------+ | :attr:`~torch.float32` | ``1.3e-6`` | ``1e-5`` | +---------------------------+------------+----------+ | :attr:`~torch.float64` | ``1e-7`` | ``1e-7`` | +---------------------------+------------+----------+ | :attr:`~torch.complex32` | ``1e-3`` | ``1e-5`` | +---------------------------+------------+----------+ | :attr:`~torch.complex64` | ``1.3e-6`` | ``1e-5`` | +---------------------------+------------+----------+ | :attr:`~torch.complex128` | ``1e-7`` | ``1e-7`` | +---------------------------+------------+----------+ | :attr:`~torch.quint8` | ``1.3e-6`` | ``1e-5`` | +---------------------------+------------+----------+ | :attr:`~torch.quint2x4` | ``1.3e-6`` | ``1e-5`` | +---------------------------+------------+----------+ | :attr:`~torch.quint4x2` | ``1.3e-6`` | ``1e-5`` | +---------------------------+------------+----------+ | :attr:`~torch.qint8` | ``1.3e-6`` | ``1e-5`` | +---------------------------+------------+----------+ | :attr:`~torch.qint32` | ``1.3e-6`` | ``1e-5`` | +---------------------------+------------+----------+ | other | ``0.0`` | ``0.0`` | +---------------------------+------------+----------+ .. note:: :func:`~torch.testing.assert_close` is highly configurable with strict default settings. Users are encouraged to :func:`~functools.partial` it to fit their use case. For example, if an equality check is needed, one might define an ``assert_equal`` that uses zero tolerances for every ``dtype`` by default: >>> import functools >>> assert_equal = functools.partial(torch.testing.assert_close, rtol=0, atol=0) >>> assert_equal(1e-9, 1e-10) Traceback (most recent call last): ... AssertionError: Scalars are not equal! <BLANKLINE> Expected 1e-10 but got 1e-09. Absolute difference: 9.000000000000001e-10 Relative difference: 9.0 Examples: >>> # tensor to tensor comparison >>> expected = torch.tensor([1e0, 1e-1, 1e-2]) >>> actual = torch.acos(torch.cos(expected)) >>> torch.testing.assert_close(actual, expected) >>> # scalar to scalar comparison >>> import math >>> expected = math.sqrt(2.0) >>> actual = 2.0 / math.sqrt(2.0) >>> torch.testing.assert_close(actual, expected) >>> # numpy array to numpy array comparison >>> import numpy as np >>> expected = np.array([1e0, 1e-1, 1e-2]) >>> actual = np.arccos(np.cos(expected)) >>> torch.testing.assert_close(actual, expected) >>> # sequence to sequence comparison >>> import numpy as np >>> # The types of the sequences do not have to match. They only have to have the same >>> # length and their elements have to match. >>> expected = [torch.tensor([1.0]), 2.0, np.array(3.0)] >>> actual = tuple(expected) >>> torch.testing.assert_close(actual, expected) >>> # mapping to mapping comparison >>> from collections import OrderedDict >>> import numpy as np >>> foo = torch.tensor(1.0) >>> bar = 2.0 >>> baz = np.array(3.0) >>> # The types and a possible ordering of mappings do not have to match. They only >>> # have to have the same set of keys and their elements have to match. >>> expected = OrderedDict([("foo", foo), ("bar", bar), ("baz", baz)]) >>> actual = {"baz": baz, "bar": bar, "foo": foo} >>> torch.testing.assert_close(actual, expected) >>> expected = torch.tensor([1.0, 2.0, 3.0]) >>> actual = expected.clone() >>> # By default, directly related instances can be compared >>> torch.testing.assert_close(torch.nn.Parameter(actual), expected) >>> # This check can be made more strict with allow_subclasses=False >>> torch.testing.assert_close( ... torch.nn.Parameter(actual), expected, allow_subclasses=False ... ) Traceback (most recent call last): ... TypeError: No comparison pair was able to handle inputs of type <class 'torch.nn.parameter.Parameter'> and <class 'torch.Tensor'>. >>> # If the inputs are not directly related, they are never considered close >>> torch.testing.assert_close(actual.numpy(), expected) Traceback (most recent call last): ... TypeError: No comparison pair was able to handle inputs of type <class 'numpy.ndarray'> and <class 'torch.Tensor'>. >>> # Exceptions to these rules are Python scalars. They can be checked regardless of >>> # their type if check_dtype=False. >>> torch.testing.assert_close(1.0, 1, check_dtype=False) >>> # NaN != NaN by default. >>> expected = torch.tensor(float("Nan")) >>> actual = expected.clone() >>> torch.testing.assert_close(actual, expected) Traceback (most recent call last): ... AssertionError: Scalars are not close! <BLANKLINE> Expected nan but got nan. Absolute difference: nan (up to 1e-05 allowed) Relative difference: nan (up to 1.3e-06 allowed) >>> torch.testing.assert_close(actual, expected, equal_nan=True) >>> expected = torch.tensor([1.0, 2.0, 3.0]) >>> actual = torch.tensor([1.0, 4.0, 5.0]) >>> # The default error message can be overwritten. >>> torch.testing.assert_close(actual, expected, msg="Argh, the tensors are not close!") Traceback (most recent call last): ... AssertionError: Argh, the tensors are not close! >>> # If msg is a callable, it can be used to augment the generated message with >>> # extra information >>> torch.testing.assert_close( ... actual, expected, msg=lambda msg: f"Header\n\n{msg}\n\nFooter" ... ) Traceback (most recent call last): ... AssertionError: Header <BLANKLINE> Tensor-likes are not close! <BLANKLINE> Mismatched elements: 2 / 3 (66.7%) Greatest absolute difference: 2.0 at index (1,) (up to 1e-05 allowed) Greatest relative difference: 1.0 at index (1,) (up to 1.3e-06 allowed) <BLANKLINE> Footer """ # Hide this function from `pytest`'s traceback __tracebackhide__ = True error_metas = not_close_error_metas( actual, expected, pair_types=( NonePair, BooleanPair, NumberPair, TensorLikePair, ), allow_subclasses=allow_subclasses, rtol=rtol, atol=atol, equal_nan=equal_nan, check_device=check_device, check_dtype=check_dtype, check_layout=check_layout, check_stride=check_stride, msg=msg, ) if error_metas: # TODO: compose all metas into one AssertionError raise error_metas[0].to_error(msg) @deprecated( "`torch.testing.assert_allclose()` is deprecated since 1.12 and will be removed in a future release. " "Please use `torch.testing.assert_close()` instead. " "You can find detailed upgrade instructions in https://github.com/pytorch/pytorch/issues/61844.", category=FutureWarning, ) def assert_allclose( actual: Any, expected: Any, rtol: Optional[float] = None, atol: Optional[float] = None, equal_nan: bool = True, msg: str = "", ) -> None: """ .. warning:: :func:`torch.testing.assert_allclose` is deprecated since ``1.12`` and will be removed in a future release. Please use :func:`torch.testing.assert_close` instead. You can find detailed upgrade instructions `here <https://github.com/pytorch/pytorch/issues/61844>`_. """ if not isinstance(actual, torch.Tensor): actual = torch.tensor(actual) if not isinstance(expected, torch.Tensor): expected = torch.tensor(expected, dtype=actual.dtype) if rtol is None and atol is None: rtol, atol = default_tolerances( actual, expected, dtype_precisions={ torch.float16: (1e-3, 1e-3), torch.float32: (1e-4, 1e-5), torch.float64: (1e-5, 1e-8), }, ) torch.testing.assert_close( actual, expected, rtol=rtol, atol=atol, equal_nan=equal_nan, check_device=True, check_dtype=False, check_stride=False, msg=msg or None, ) ```
================================================================================================================== SOURCE CODE FILE: _creation.py LINES: 1 SIZE: 12.18 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_creation.py ENCODING: utf-8 ```py """ This module contains tensor creation utilities. """ import collections.abc import functools import math import warnings from typing import cast, Optional, Union import torch _INTEGRAL_TYPES = [ torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64, torch.uint16, torch.uint32, torch.uint64, ] _FLOATING_TYPES = [torch.float16, torch.bfloat16, torch.float32, torch.float64] _FLOATING_8BIT_TYPES = [ torch.float8_e4m3fn, torch.float8_e5m2, torch.float8_e4m3fnuz, torch.float8_e5m2fnuz, ] _COMPLEX_TYPES = [torch.complex32, torch.complex64, torch.complex128] _BOOLEAN_OR_INTEGRAL_TYPES = [torch.bool, *_INTEGRAL_TYPES] _FLOATING_OR_COMPLEX_TYPES = [*_FLOATING_TYPES, *_COMPLEX_TYPES] def _uniform_random_(t: torch.Tensor, low: float, high: float) -> torch.Tensor: # uniform_ requires to-from <= std::numeric_limits<scalar_t>::max() # Work around this by scaling the range before and after the PRNG if high - low >= torch.finfo(t.dtype).max: return t.uniform_(low / 2, high / 2).mul_(2) else: return t.uniform_(low, high) def make_tensor( *shape: Union[int, torch.Size, list[int], tuple[int, ...]], dtype: torch.dtype, device: Union[str, torch.device], low: Optional[float] = None, high: Optional[float] = None, requires_grad: bool = False, noncontiguous: bool = False, exclude_zero: bool = False, memory_format: Optional[torch.memory_format] = None, ) -> torch.Tensor: r"""Creates a tensor with the given :attr:`shape`, :attr:`device`, and :attr:`dtype`, and filled with values uniformly drawn from ``[low, high)``. If :attr:`low` or :attr:`high` are specified and are outside the range of the :attr:`dtype`'s representable finite values then they are clamped to the lowest or highest representable finite value, respectively. If ``None``, then the following table describes the default values for :attr:`low` and :attr:`high`, which depend on :attr:`dtype`. +---------------------------+------------+----------+ | ``dtype`` | ``low`` | ``high`` | +===========================+============+==========+ | boolean type | ``0`` | ``2`` | +---------------------------+------------+----------+ | unsigned integral type | ``0`` | ``10`` | +---------------------------+------------+----------+ | signed integral types | ``-9`` | ``10`` | +---------------------------+------------+----------+ | floating types | ``-9`` | ``9`` | +---------------------------+------------+----------+ | complex types | ``-9`` | ``9`` | +---------------------------+------------+----------+ Args: shape (Tuple[int, ...]): Single integer or a sequence of integers defining the shape of the output tensor. dtype (:class:`torch.dtype`): The data type of the returned tensor. device (Union[str, torch.device]): The device of the returned tensor. low (Optional[Number]): Sets the lower limit (inclusive) of the given range. If a number is provided it is clamped to the least representable finite value of the given dtype. When ``None`` (default), this value is determined based on the :attr:`dtype` (see the table above). Default: ``None``. high (Optional[Number]): Sets the upper limit (exclusive) of the given range. If a number is provided it is clamped to the greatest representable finite value of the given dtype. When ``None`` (default) this value is determined based on the :attr:`dtype` (see the table above). Default: ``None``. .. deprecated:: 2.1 Passing ``low==high`` to :func:`~torch.testing.make_tensor` for floating or complex types is deprecated since 2.1 and will be removed in 2.3. Use :func:`torch.full` instead. requires_grad (Optional[bool]): If autograd should record operations on the returned tensor. Default: ``False``. noncontiguous (Optional[bool]): If `True`, the returned tensor will be noncontiguous. This argument is ignored if the constructed tensor has fewer than two elements. Mutually exclusive with ``memory_format``. exclude_zero (Optional[bool]): If ``True`` then zeros are replaced with the dtype's small positive value depending on the :attr:`dtype`. For bool and integer types zero is replaced with one. For floating point types it is replaced with the dtype's smallest positive normal number (the "tiny" value of the :attr:`dtype`'s :func:`~torch.finfo` object), and for complex types it is replaced with a complex number whose real and imaginary parts are both the smallest positive normal number representable by the complex type. Default ``False``. memory_format (Optional[torch.memory_format]): The memory format of the returned tensor. Mutually exclusive with ``noncontiguous``. Raises: ValueError: If ``requires_grad=True`` is passed for integral `dtype` ValueError: If ``low >= high``. ValueError: If either :attr:`low` or :attr:`high` is ``nan``. ValueError: If both :attr:`noncontiguous` and :attr:`memory_format` are passed. TypeError: If :attr:`dtype` isn't supported by this function. Examples: >>> # xdoctest: +SKIP >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) >>> from torch.testing import make_tensor >>> # Creates a float tensor with values in [-1, 1) >>> make_tensor((3,), device='cpu', dtype=torch.float32, low=-1, high=1) >>> # xdoctest: +SKIP tensor([ 0.1205, 0.2282, -0.6380]) >>> # Creates a bool tensor on CUDA >>> make_tensor((2, 2), device='cuda', dtype=torch.bool) tensor([[False, False], [False, True]], device='cuda:0') """ def modify_low_high( low: Optional[float], high: Optional[float], *, lowest_inclusive: float, highest_exclusive: float, default_low: float, default_high: float, ) -> tuple[float, float]: """ Modifies (and raises ValueError when appropriate) low and high values given by the user (input_low, input_high) if required. """ def clamp(a: float, l: float, h: float) -> float: return min(max(a, l), h) low = low if low is not None else default_low high = high if high is not None else default_high if any(isinstance(value, float) and math.isnan(value) for value in [low, high]): raise ValueError( f"`low` and `high` cannot be NaN, but got {low=} and {high=}" ) elif low == high and dtype in _FLOATING_OR_COMPLEX_TYPES: warnings.warn( "Passing `low==high` to `torch.testing.make_tensor` for floating or complex types " "is deprecated since 2.1 and will be removed in 2.3. " "Use `torch.full(...)` instead.", FutureWarning, stacklevel=3, ) elif low >= high: raise ValueError(f"`low` must be less than `high`, but got {low} >= {high}") elif high < lowest_inclusive or low >= highest_exclusive: raise ValueError( f"The value interval specified by `low` and `high` is [{low}, {high}), " f"but {dtype} only supports [{lowest_inclusive}, {highest_exclusive})" ) low = clamp(low, lowest_inclusive, highest_exclusive) high = clamp(high, lowest_inclusive, highest_exclusive) if dtype in _BOOLEAN_OR_INTEGRAL_TYPES: # 1. `low` is ceiled to avoid creating values smaller than `low` and thus outside the specified interval # 2. Following the same reasoning as for 1., `high` should be floored. However, the higher bound of # `torch.randint` is exclusive, and thus we need to ceil here as well. return math.ceil(low), math.ceil(high) return low, high if len(shape) == 1 and isinstance(shape[0], collections.abc.Sequence): shape = shape[0] # type: ignore[assignment] shape = cast(tuple[int, ...], tuple(shape)) if noncontiguous and memory_format is not None: raise ValueError( f"The parameters `noncontiguous` and `memory_format` are mutually exclusive, " f"but got {noncontiguous=} and {memory_format=}" ) if requires_grad and dtype in _BOOLEAN_OR_INTEGRAL_TYPES: raise ValueError( f"`requires_grad=True` is not supported for boolean and integral dtypes, but got {dtype=}" ) noncontiguous = noncontiguous and functools.reduce(lambda x, y: x * y, shape, 1) > 1 if noncontiguous: # Double the size of the shape in the last dimension, so that we have # non-identical values when we make the non-contiguous operation. shape = cast(tuple[int, ...], (*shape[:-1], 2 * shape[-1])) if dtype is torch.bool: low, high = cast( tuple[int, int], modify_low_high( low, high, lowest_inclusive=0, highest_exclusive=2, default_low=0, default_high=2, ), ) result = torch.randint(low, high, shape, device=device, dtype=dtype) elif dtype in _BOOLEAN_OR_INTEGRAL_TYPES: low, high = cast( tuple[int, int], modify_low_high( low, high, lowest_inclusive=torch.iinfo(dtype).min, highest_exclusive=torch.iinfo(dtype).max # In theory, `highest_exclusive` should always be the maximum value + 1. However, `torch.randint` # internally converts the bounds to an int64 and would overflow. In other words: `torch.randint` cannot # sample 2**63 - 1, i.e. the maximum value of `torch.int64` and we need to account for that here. + (1 if dtype is not torch.int64 else 0), # This is incorrect for `torch.uint8`, but since we clamp to `lowest`, i.e. 0 for `torch.uint8`, # _after_ we use the default value, we don't need to special case it here default_low=-9, default_high=10, ), ) result = torch.randint(low, high, shape, device=device, dtype=dtype) elif dtype in _FLOATING_OR_COMPLEX_TYPES: low, high = modify_low_high( low, high, lowest_inclusive=torch.finfo(dtype).min, highest_exclusive=torch.finfo(dtype).max, default_low=-9, default_high=9, ) result = torch.empty(shape, device=device, dtype=dtype) _uniform_random_( torch.view_as_real(result) if dtype in _COMPLEX_TYPES else result, low, high ) elif dtype in _FLOATING_8BIT_TYPES: low, high = modify_low_high( low, high, lowest_inclusive=torch.finfo(dtype).min, highest_exclusive=torch.finfo(dtype).max, default_low=-9, default_high=9, ) result = torch.empty(shape, device=device, dtype=torch.float32) _uniform_random_(result, low, high) result = result.to(dtype) else: raise TypeError( f"The requested dtype '{dtype}' is not supported by torch.testing.make_tensor()." " To request support, file an issue at: https://github.com/pytorch/pytorch/issues" ) if noncontiguous: # Offset by 1 to also catch offsetting issues result = result[..., 1::2] elif memory_format is not None: result = result.clone(memory_format=memory_format) if exclude_zero: result[result == 0] = ( 1 if dtype in _BOOLEAN_OR_INTEGRAL_TYPES else torch.finfo(dtype).tiny ) if dtype in _FLOATING_OR_COMPLEX_TYPES: result.requires_grad = requires_grad return result ```
=========================================================================================================================== SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.00 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\__init__.py ENCODING: utf-8 ```py ```
====================================================================================================================================== SOURCE CODE FILE: autocast_test_lists.py LINES: 1 SIZE: 28.19 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\autocast_test_lists.py ENCODING: utf-8 ```py # mypy: ignore-errors import collections import torch from torch.testing._internal.common_utils import TEST_WITH_ROCM from torch.testing._internal.common_utils import TestCase class AutocastTestLists: def _rnn_cell_args(self, n, num_chunks, is_lstm, dev, dtype): input = (torch.randn((n, n), device=dev, dtype=torch.float32),) hx = ((torch.randn((n, n), device=dev, dtype=torch.float32), torch.randn((n, n), device=dev, dtype=torch.float32)) if is_lstm else torch.randn((n, n), device=dev, dtype=torch.float32),) weights = (torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_ih torch.randn((num_chunks * n, n), device=dev, dtype=torch.float32), # weight_hh torch.randn((num_chunks * n), device=dev, dtype=torch.float32), # bias_ih torch.randn((num_chunks * n), device=dev, dtype=torch.float32)) # bias_hh # returns args as a tuple return input + hx + weights # Supplies ops and arguments for test_autocast_* in test/test_cuda.py def __init__(self, dev): super().__init__() n = 8 # Utility arguments, created as one-element tuples pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) pointwise2_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) mat0_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),) mat1_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),) mat2_fp16 = (torch.randn((n, n), dtype=torch.float16, device=dev),) dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n)) conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev), torch.randn(dimset, dtype=torch.float32, device=dev)) for dimset in dimsets] bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),) element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),) pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) # The lists below organize ops that autocast needs to test. # self.list_name corresponds to test_autocast_list_name in test/test_cuda.py. # Each op is associated with a tuple of valid arguments. # In addition, cudnn conv ops are not supported on ROCm and hence will # be skipped by passing TEST_WITH_ROCM flag to those ops in self.torch_fp16 list. # Some ops implement built-in type promotion. These don't need autocasting, # but autocasting relies on their promotion, so we include tests to double-check. self.torch_expect_builtin_promote = [ ("eq", pointwise0_fp32 + pointwise1_fp16, torch.bool), ("ge", pointwise0_fp32 + pointwise1_fp16, torch.bool), ("gt", pointwise0_fp32 + pointwise1_fp16, torch.bool), ("le", pointwise0_fp32 + pointwise1_fp16, torch.bool), ("lt", pointwise0_fp32 + pointwise1_fp16, torch.bool), ("ne", pointwise0_fp32 + pointwise1_fp16, torch.bool), ("add", pointwise0_fp32 + pointwise1_fp16, torch.float32), ("div", pointwise0_fp32 + pointwise1_fp16, torch.float32), ("mul", pointwise0_fp32 + pointwise1_fp16, torch.float32), ("cat", (pointwise0_fp16 + pointwise1_fp32,), torch.float32), ("equal", pointwise0_fp32 + pointwise1_fp16, torch.float32), ("stack", (pointwise0_fp16 + pointwise1_fp32,), torch.float32), ] self.methods_expect_builtin_promote = [ ("__eq__", pointwise0_fp32 + pointwise1_fp16, torch.bool), ("__ge__", pointwise0_fp32 + pointwise1_fp16, torch.bool), ("__gt__", pointwise0_fp32 + pointwise1_fp16, torch.bool), ("__le__", pointwise0_fp32 + pointwise1_fp16, torch.bool), ("__lt__", pointwise0_fp32 + pointwise1_fp16, torch.bool), ("__ne__", pointwise0_fp32 + pointwise1_fp16, torch.bool), ("__add__", pointwise0_fp32 + pointwise1_fp16, torch.float32), ("__div__", pointwise0_fp32 + pointwise1_fp16, torch.float32), ("__mul__", pointwise0_fp32 + pointwise1_fp16, torch.float32), ] # The remaining lists organize ops that autocast treats explicitly. self.torch_fp16 = [ # deprecated _convolution ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, (0, 0), 1, False, True, True)), # the current _convolution ("_convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, (0, 0), 1, False, True, True, True)), ("conv1d", conv_args_fp32[0]), ("conv2d", conv_args_fp32[1]), ("conv3d", conv_args_fp32[2]), ("conv_tbc", conv_args_fp32[0] + bias_fp32), ("conv_transpose1d", conv_args_fp32[0]), ("conv_transpose2d", conv_args_fp32[1]), ("conv_transpose3d", conv_args_fp32[2]), ("convolution", conv_args_fp32[1] + bias_fp32 + ((1, 1), (0, 0), (1, 1), False, (0, 0), 1)), ("cudnn_convolution", conv_args_fp32[1] + ((0, 0), (1, 1), (1, 1), 1, False, True, True), TEST_WITH_ROCM), ("cudnn_convolution_transpose", conv_args_fp32[1] + ((0, 0), (0, 0), (1, 1), (1, 1), 1, False, True, True), TEST_WITH_ROCM), ("prelu", pointwise0_fp32 + element0_fp32), ("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32), ("addmv", pointwise0_fp32 + mat2_fp32 + pointwise1_fp32), ("addr", mat0_fp32 + pointwise0_fp32 + pointwise1_fp32), ("matmul", mat0_fp32 + mat1_fp32), ("einsum", "bkhd,bqhd->bqkh", mat0_fp32 + mat1_fp32), ("mm", mat0_fp32 + mat1_fp32), ("mv", mat0_fp32 + pointwise0_fp32), ("chain_matmul", mat0_fp32 + mat1_fp32 + mat2_fp32), ("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32), torch.randn((n, n, n), device=dev, dtype=torch.float32))), ("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), torch.randn((n, n, n), device=dev, dtype=torch.float32), torch.randn((n, n, n), device=dev, dtype=torch.float32))), ("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), torch.randn((n, n, n), device=dev, dtype=torch.float32))), # _thnn_fused_lstm_cell and _thnn_fused_gru_cell are not Python-exposed as far as I can tell. # ("_thnn_fused_lstm_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32), # ("_thnn_fused_gru_cell", mat0_fp32 + mat1_fp32 + mat2_fp32 + pointwise0_fp32 + pointwise1_fp32), ("lstm_cell", self._rnn_cell_args(n, num_chunks=4, is_lstm=True, dev=dev, dtype=torch.float32)), ("gru_cell", self._rnn_cell_args(n, num_chunks=3, is_lstm=False, dev=dev, dtype=torch.float32)), ("rnn_tanh_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)), ("rnn_relu_cell", self._rnn_cell_args(n, num_chunks=1, is_lstm=False, dev=dev, dtype=torch.float32)), ] self.torch_fp32 = [ ("acos", (pointwise0_fp16[0].clamp(-.9, 0.9),)), ("asin", (pointwise0_fp16[0].clamp(-.9, 0.9),)), ("cosh", pointwise0_fp16), ("erfinv", (pointwise0_fp16[0].clamp(-.9, .9),)), ("exp", pointwise0_fp16), ("expm1", pointwise0_fp16), ("log", (pointwise0_fp16[0].clamp(0.1, 100.0),)), ("log10", (pointwise0_fp16[0].clamp(0.1, 100.0),)), ("log2", (pointwise0_fp16[0].clamp(0.1, 100.0),)), ("log1p", (pointwise0_fp16[0].clamp(-0.9, 100.0),)), ("reciprocal", pointwise0_fp16), ("rsqrt", (pointwise0_fp16[0].clamp(0.0, 100.0),)), ("sinh", pointwise0_fp16), ("tan", (pointwise0_fp16[0].clamp(-3.1 / 2, 3.1 / 2),)), ("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + pointwise1_fp16), ("pow", ((pointwise0_fp16[0] + 1.).clamp(0.0, 100.0),) + (1.7,)), # ("pow", (1.7,) + pointwise0_fp16), # This variant has a backend, but is not documented in the API. ("softmax", pointwise0_fp16 + (0,)), ("log_softmax", pointwise0_fp16 + (0,)), ("layer_norm", pointwise0_fp16 + ((pointwise0_fp16[0].numel(),),)), ("group_norm", mat0_fp16 + (1,)), ("norm", pointwise0_fp16), ("norm", pointwise0_fp16, {"dim": 0}), # these need magma # ("norm", mat0_fp16, {"p": "nuc"}), # ("norm", mat0_fp16, {"p": "nuc", "dim": 0}), ("norm", pointwise0_fp16, {"p": 1}), ("norm", pointwise0_fp16, {"p": 1, "dim": 0}), ("cosine_similarity", mat0_fp16 + mat1_fp16), ("poisson_nll_loss", mat0_fp16 + mat1_fp16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))), ("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.float16), torch.tensor([[1, 3, 4]], device=dev, dtype=torch.float16), torch.tensor([1], device=dev, dtype=torch.int))), ("hinge_embedding_loss", mat0_fp16 + (torch.ones(n, device=dev, dtype=torch.int),)), ("kl_div", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)), ("margin_ranking_loss", mat0_fp16 + mat1_fp16 + (torch.ones((n,), device=dev, dtype=torch.float16),)), ("triplet_margin_loss", mat0_fp16 + mat1_fp16 + mat2_fp16), ("binary_cross_entropy_with_logits", mat0_fp16 + (torch.rand((n, n), device=dev, dtype=torch.float16),)), ("cumprod", pointwise0_fp16 + (0,)), ("cumsum", pointwise0_fp16 + (0,)), ("dist", pointwise0_fp16 + pointwise1_fp16), ("pdist", mat0_fp16), ("cdist", mat0_fp16 + mat1_fp16), ("prod", pointwise0_fp16), ("prod", pointwise0_fp16 + (0,)), ("renorm", mat0_fp16 + (2, 0, 1.0)), ("sum", pointwise0_fp16), ("sum", mat0_fp16 + (1,)), ("logsumexp", mat0_fp16 + (1,)), ] self.torch_need_autocast_promote = [ ("addcdiv", pointwise0_fp32 + pointwise1_fp16 + (pointwise2_fp16[0].clamp(0.1, 100),)), ("addcmul", pointwise0_fp32 + pointwise1_fp16 + pointwise2_fp16), ("atan2", pointwise0_fp32 + (pointwise1_fp16[0].clamp(0.1, 100),)), ("bilinear", (torch.randn((1, 2), dtype=torch.float16, device=dev), torch.randn((1, 2), dtype=torch.float32, device=dev), torch.randn((1, 2, 2), dtype=torch.float16, device=dev), torch.randn((1,), dtype=torch.float32, device=dev))), ("cross", (torch.randn(3, dtype=torch.float32, device=dev), torch.randn(3, dtype=torch.float16, device=dev))), ("dot", pointwise0_fp16 + pointwise1_fp32), ("vdot", pointwise0_fp16 + pointwise1_fp32), ("grid_sampler", (torch.randn((2, 3, 33, 22), dtype=torch.float16, device=dev), torch.randn((2, 22, 11, 2), dtype=torch.float32, device=dev), 0, 0, False)), ("index_put", pointwise0_fp32 + ((torch.tensor([1], device=dev, dtype=torch.long),), torch.randn(1, device=dev, dtype=torch.float16))), ("index_put", pointwise0_fp16 + ((torch.tensor([1], device=dev, dtype=torch.long),), torch.randn(1, device=dev, dtype=torch.float32))), ("tensordot", (torch.randn((2, 2, 2), dtype=torch.float32, device=dev), torch.randn((2, 2, 2), dtype=torch.float16, device=dev))), ("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float32, device=dev), 0, torch.randint(0, 2, (2, 2, 2), device=dev), torch.randn((2, 2, 2), dtype=torch.float16, device=dev))), ("scatter_add", (torch.zeros(2, 2, 2, dtype=torch.float16, device=dev), 0, torch.randint(0, 2, (2, 2, 2), device=dev), torch.randn((2, 2, 2), dtype=torch.float32, device=dev))), ] self.nn_fp16 = [ ("linear", mat0_fp32 + mat1_fp32 + mat2_fp32), ] self.nn_fp32 = [ ("softplus", pointwise0_fp16), ("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.float), torch.zeros((n,), device=dev, dtype=torch.long))), ("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.half), torch.zeros((n, n, n), device=dev, dtype=torch.long))), ("l1_loss", mat0_fp16 + mat1_fp16), ("smooth_l1_loss", mat0_fp16 + mat1_fp16), ("mse_loss", mat0_fp16 + mat1_fp16), ("multilabel_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)), ("soft_margin_loss", mat0_fp16 + (torch.ones((n, n), device=dev, dtype=torch.long),)), ("multi_margin_loss", mat0_fp16 + (torch.ones((n,), device=dev, dtype=torch.long),)), ] self.linalg_fp16 = [ ("linalg_vecdot", mat0_fp32 + mat0_fp32), ("linalg_multi_dot", (mat0_fp32 + mat1_fp32 + mat2_fp32,)), ] self.methods_fp16 = [ ("__matmul__", mat0_fp32 + mat1_fp32) ] self.methods_fp32 = [ ("__pow__", (torch.rand(n, device=dev, dtype=torch.float16), 1.5)), ] self.banned = [ ("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.float32), torch.rand((n, n), device=dev, dtype=torch.float32)), torch._C._nn), ] class AutocastCPUTestLists: # Supplies ops and arguments for test_autocast_* in test/test_cpu.py def __init__(self, dev): super().__init__() n = 8 # Utility arguments, created as one-element tuples pointwise0_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),) pointwise1_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),) mat0_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),) mat1_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),) mat2_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),) pointwise0_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) pointwise1_fp16 = (torch.randn(n, dtype=torch.float16, device=dev),) dummy_dimsets = ((n,), (n, n), (n, n, n), (n, n, n, n), (n, n, n, n, n)) dummy_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev),) for dimset in dummy_dimsets] dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n)) conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev), torch.randn(dimset, dtype=torch.float32, device=dev)) for dimset in dimsets] element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),) pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) dummy_fp32 = [ # noqa: F841 (torch.randn(dimset, dtype=torch.float32, device=dev),) for dimset in dummy_dimsets ] # The lists below organize ops that autocast needs to test. # self.list_name corresponds to test_autocast_list_name in test/test_cpu.py. # Each op is associated with a tuple of valid arguments. # Some ops implement built-in type promotion. These don't need autocasting, # but autocasting relies on their promotion, so we include tests to double-check. self.torch_expect_builtin_promote = [ ("eq", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), ("ge", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), ("gt", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), ("le", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), ("lt", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), ("ne", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), ("add", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), ("div", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), ("mul", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), ] self.methods_expect_builtin_promote = [ ("__eq__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), ("__ge__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), ("__gt__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), ("__le__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), ("__lt__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), ("__ne__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.bool), ("__add__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), ("__div__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), ("__mul__", pointwise0_fp32 + pointwise1_bf16, pointwise0_fp32 + pointwise1_fp16, torch.float32), ] # The remaining lists organize ops that autocast treats explicitly. self.torch_16 = [ ("conv1d", conv_args_fp32[0]), ("conv2d", conv_args_fp32[1]), ("conv3d", conv_args_fp32[2]), ("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), torch.randn((n, n, n), device=dev, dtype=torch.float32))), ("mm", mat0_fp32 + mat1_fp32), ("matmul", mat0_fp32 + mat1_fp32), ("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), torch.randn((n, n, n), device=dev, dtype=torch.float32), torch.randn((n, n, n), device=dev, dtype=torch.float32))), ("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32), ("_addmm_activation", mat1_fp32 + mat2_fp32 + mat3_fp32, {"beta": 1, "alpha": 1, "use_gelu": True}), ("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32), torch.randn((n, n, n), device=dev, dtype=torch.float32))), ("conv_tbc", (torch.randn((10, 7, 3), device=dev, dtype=torch.float32), torch.randn((5, 3, 5), device=dev, dtype=torch.float32), torch.randn(5, device=dev, dtype=torch.float32), 0)), ("conv_transpose1d", conv_args_fp32[0]), ("conv_transpose2d", conv_args_fp32[1]), ("conv_transpose3d", conv_args_fp32[2]), ("prelu", pointwise0_fp32 + element0_fp32), ("_native_multi_head_attention", (torch.randn((n, n, n), device=dev, dtype=torch.float32), torch.randn((n, n, n), device=dev, dtype=torch.float32), torch.randn((n, n, n), device=dev, dtype=torch.float32), n, 4, torch.randn((3 * n, n), device=dev, dtype=torch.float32), torch.randn((3 * n), device=dev, dtype=torch.float32), torch.randn((n, n), device=dev, dtype=torch.float32), torch.randn((n), device=dev, dtype=torch.float32))), ] self.torch_fp32 = [ ("poisson_nll_loss", mat0_bf16 + mat1_bf16 + (True, False, 1.e-8, torch.nn._reduction.get_enum('mean'))), ("cosine_embedding_loss", (torch.tensor([[1, 2, 3]], device=dev, dtype=torch.bfloat16), torch.tensor([[1, 3, 4]], device=dev, dtype=torch.bfloat16), torch.tensor([1], device=dev, dtype=torch.int))), ("hinge_embedding_loss", mat0_bf16 + (torch.ones(n, device=dev, dtype=torch.int),)), ("margin_ranking_loss", mat0_bf16 + mat1_bf16 + (torch.ones((n,), device=dev, dtype=torch.bfloat16),)), ("triplet_margin_loss", mat0_bf16 + mat1_bf16 + mat2_bf16), ("binary_cross_entropy_with_logits", mat0_bf16 + (torch.rand((n, n), device=dev, dtype=torch.bfloat16),)), ] self.nn_16 = [ ("linear", mat0_fp32 + mat1_fp32, {}), ] self.nn_fp32 = [ ("avg_pool3d", dummy_bf16[3], {"kernel_size": (3, 3, 3), "stride": (1, 1, 1)}), ("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.bfloat16),) + (torch.rand((n, n), device=dev, dtype=torch.bfloat16),)), ("reflection_pad1d", dummy_bf16[2], {"padding": (3, 3)}), ("nll_loss", (torch.rand((n, n), device=dev, dtype=torch.bfloat16), torch.zeros((n,), device=dev, dtype=torch.long))), ("nll_loss2d", (torch.rand((n, n, n, n), device=dev, dtype=torch.bfloat16), torch.zeros((n, n, n), device=dev, dtype=torch.long))), ("l1_loss", mat0_bf16 + mat1_bf16), ("smooth_l1_loss", mat0_bf16 + mat1_bf16), ("mse_loss", mat0_bf16 + mat1_bf16), ("multilabel_margin_loss", mat0_bf16 + (torch.ones((n, n), device=dev, dtype=torch.long),)), ("soft_margin_loss", mat0_bf16 + (torch.ones((n, n), device=dev, dtype=torch.long),)), ("multi_margin_loss", mat0_bf16 + (torch.ones((n,), device=dev, dtype=torch.long),)), ("huber_loss", mat0_bf16 + mat1_bf16), ] self.torch_need_autocast_promote = [ ("cat", (pointwise0_bf16 + pointwise1_fp32,), (pointwise0_fp16 + pointwise1_fp32,)), ("stack", (pointwise0_bf16 + pointwise1_fp32,), (pointwise0_fp16 + pointwise1_fp32,)), ] class TestAutocast(TestCase): def args_maybe_kwargs(self, op_with_args): if len(op_with_args) == 2: return op_with_args[0], op_with_args[1], {} else: return op_with_args[0], op_with_args[1], op_with_args[2] def _run_autocast_outofplace( self, op, args, run_as_type, device, out_type=None, module=torch, add_kwargs=None, amp_dtype=torch.bfloat16, ): # helper to cast args def cast(val, to_type): if isinstance(val, torch.Tensor): return val.to(to_type) if val.is_floating_point() else val elif isinstance(val, collections.abc.Iterable): return type(val)(cast(v, to_type) for v in val) else: return val if add_kwargs is None: add_kwargs = {} self.assertFalse(torch.is_autocast_enabled(device_type=device)) with torch.amp.autocast(device_type=device, dtype=amp_dtype): self.assertTrue(torch.is_autocast_enabled(device_type=device)) out_type = out_type if out_type is not None else run_as_type output = output_method = None # Try module.* variant, if requested: if module is not None and hasattr(module, op): output = getattr(module, op)(*args, **add_kwargs) if isinstance(output, torch.Tensor): self.assertTrue( out_type == output.dtype, f"autocast for torch.{op} produced {output.dtype}, should produce {out_type}", ) # Try Tensor.* variant: if hasattr(torch.Tensor, op): output_method = getattr(args[0], op)(*args[1:], **add_kwargs) if isinstance(output_method, torch.Tensor): self.assertTrue( out_type == output_method.dtype, f"autocast for torch.{op} produced {output_method.dtype}, should produce torch.{out_type}", ) self.assertTrue( (output is not None) or (output_method is not None), f"{op} not found as an attribute on either Tensor or the requested module {module}", ) # Accounts for ops that return Tensors, iterables, and other non-Tensors. # For example, lstm_cell returns a tuple and equal returns bool. def compare(first, second): if isinstance(first, torch.Tensor): return torch.equal(first, second) elif isinstance(first, collections.abc.Iterable): return all(compare(f, s) for f, s in zip(first, second)) else: return first == second # If both torch.* and Tensor.* variants were found, check outputs are identical if (output is not None) and (output_method is not None): self.assertTrue(type(output) == type(output_method)) comparison = compare(output, output_method) self.assertTrue( comparison, f"torch.{op} result did not match Tensor.{op} result" ) # Compare numerics to Python-side "autocasting" that (we expect) does the same thing # as the C++-side autocasting, and should be bitwise accurate. output_to_compare = output if output is not None else output_method with torch.amp.autocast(device_type=device, enabled=False): self.assertFalse( torch.is_autocast_enabled(device_type=device) ) if module is not None and hasattr(module, op): control = getattr(module, op)( *cast(args, run_as_type), **add_kwargs ) else: control = getattr(args[0].to(run_as_type), op)( *cast(args[1:], run_as_type), **add_kwargs ) self.assertTrue(type(output_to_compare) == type(control)) comparison = compare(output_to_compare, control) self.assertTrue(comparison, f"torch.{op} result did not match control") self.assertTrue(torch.is_autocast_enabled(device_type=device)) self.assertFalse(torch.is_autocast_enabled(device_type=device)) ```
======================================================================================================================================= SOURCE CODE FILE: autograd_function_db.py LINES: 1 SIZE: 19.77 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\autograd_function_db.py ENCODING: utf-8 ```py # mypy: ignore-errors import torch from functools import partial from torch.testing import make_tensor from torch.testing._internal.opinfo.core import ( OpInfo, SampleInput, ) from torch.testing._internal.common_dtype import all_types_and import numpy as np # Note: [autograd.Function db] # # This is a collection of autograd.Function test cases written as OpInfos # so they can easily be consumed by OpInfo-based tests to check if a subsystem # supports autograd.Function. # # Axes: # - saves {output, input, intermediate, non-tensor} # - {inputs, output} x {single tensor, tensors, arbitrary objects} # - Uses {mark_dirty, mark_non_differentiable, once_differentiable} def to_numpy(tensor): return tensor.cpu().numpy() class NumpyCube(torch.autograd.Function): @staticmethod def forward(input): input_np = to_numpy(input) dinput = torch.tensor(3 * input_np ** 2, device=input.device) return torch.tensor(input_np ** 3, device=input.device), dinput @staticmethod def setup_context(ctx, inputs, output): ctx.save_for_backward(inputs[0], output[1]) ctx.save_for_forward(inputs[0], output[1]) @staticmethod def backward(ctx, grad_output, grad_saved): input, dinput = ctx.saved_tensors return NumpyMul.apply(grad_output, dinput) + 6 * NumpyMul.apply(grad_saved, input) @staticmethod def vmap(info, in_dims, input): result = NumpyCube.apply(input) return result, (in_dims[0], in_dims[0]) @staticmethod def jvp(ctx, input_tangent): input, dinput = ctx.saved_tensors return NumpyMul.apply(input_tangent, dinput), 6 * NumpyMul.apply(input_tangent, input) class CubeGenVmap(torch.autograd.Function): generate_vmap_rule = True @staticmethod def forward(x): return x ** 3, 3 * x ** 2 @staticmethod def setup_context(ctx, inputs, outputs): ctx.save_for_backward(inputs[0], outputs[1]) ctx.save_for_forward(inputs[0], outputs[1]) @staticmethod def backward(ctx, grad_output, grad_saved): _input, dinput = ctx.saved_tensors result = grad_output * dinput + 6 * dinput return result @staticmethod def jvp(ctx, input_tangent): input, dinput = ctx.saved_tensors return MulGenVmap.apply(input_tangent, dinput), 6 * NumpyMul.apply(input_tangent, input) def sample_inputs_numpy_cube(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg(1, low=0.8, high=2), args=()) class NumpyCubeNotComposable(torch.autograd.Function): @staticmethod def forward(input): input_np = to_numpy(input) return torch.tensor(input_np ** 3, device=input.device), input_np @staticmethod def setup_context(ctx, inputs, output): _, input_np = output ctx.input_np = input_np ctx.device = inputs[0].device @staticmethod @torch.autograd.function.once_differentiable def backward(ctx, grad_output, grad_saved): result_np = 3 * (ctx.input_np ** 2) return torch.tensor(result_np, device=ctx.device) class NumpyMul(torch.autograd.Function): @staticmethod def forward(x, y): return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device) @staticmethod def setup_context(ctx, inputs, output): ctx.save_for_backward(*inputs) ctx.save_for_forward(*inputs) @staticmethod def backward(ctx, grad_output): x, y = ctx.saved_tensors gx = None if ctx.needs_input_grad[0]: gx = NumpyMul.apply(grad_output, y) gy = None if ctx.needs_input_grad[1]: gy = NumpyMul.apply(grad_output, x) return gx, gy @staticmethod def vmap(info, in_dims, x, y): x_bdim, y_bdim = in_dims x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1) y = y.movedim(y_bdim, -1) if y_bdim is not None else y.unsqueeze(-1) result = NumpyMul.apply(x, y) result = result.movedim(-1, 0) return result, 0 @staticmethod def jvp(ctx, x_tangent, y_tangent): x, y = ctx.saved_tensors return x_tangent * y + y_tangent * x def sample_inputs_numpy_mul(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Broadcasting yield SampleInput(make_arg(4, low=0.9, high=2), args=(make_arg(3, 4, low=0.9, high=2),)) def sample_inputs_numpy_mul_scalar(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg(4, low=0.9, high=2), args=(), kwargs={"scalar": 3.14}) class MulGenVmap(torch.autograd.Function): generate_vmap_rule = True @staticmethod def forward(x, y): return x * y @staticmethod def setup_context(ctx, inputs, outputs): ctx.save_for_backward(*inputs) ctx.save_for_forward(*inputs) @staticmethod def backward(ctx, grad_output): x, y = ctx.saved_tensors gx = None if ctx.needs_input_grad[0]: gx = MulGenVmap.apply(grad_output, y) gy = None if ctx.needs_input_grad[1]: gy = MulGenVmap.apply(grad_output, x) return gx, gy @staticmethod def jvp(ctx, x_tangent, y_tangent): x, y = ctx.saved_tensors return x_tangent * y + y_tangent * x class NumpyExp_(torch.autograd.Function): @staticmethod def forward(x): x_np = to_numpy(x) np.exp(x_np, x_np) return x @staticmethod def setup_context(ctx, inputs, output): x, = inputs ctx.mark_dirty(x) ctx.save_for_backward(output) ctx.save_for_forward(output) @staticmethod def backward(ctx, grad_output): output, = ctx.saved_tensors return NumpyMul.apply(grad_output, output) @staticmethod def vmap(info, in_dims, x): NumpyExp_.apply(x) return x, in_dims[0] @staticmethod def jvp(ctx, x_tangent): # Doesn't call numpy operations because I didn't want to write NumpyMul_ output, = ctx.saved_tensors x_tangent.mul_(output) return x_tangent class NumpySort(torch.autograd.Function): @staticmethod def forward(x, dim): device = x.device x = to_numpy(x) ind = np.argsort(x, axis=dim) ind_inv = np.argsort(ind, axis=dim) return ( torch.tensor(x, device=device), torch.tensor(ind, device=device), torch.tensor(ind_inv, device=device), ) @staticmethod def setup_context(ctx, inputs, output): _x, dim = inputs _, ind, ind_inv = output ctx.mark_non_differentiable(ind, ind_inv) ctx.save_for_backward(ind, ind_inv) ctx.save_for_forward(ind, ind_inv) ctx.dim = dim @staticmethod def backward(ctx, grad_output, _0, _1): ind, ind_inv = ctx.saved_tensors return NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim), None @staticmethod def vmap(info, in_dims, x, dim): x_bdim, _ = in_dims x = x.movedim(x_bdim, 0) # wrap dim dim = dim if dim >= 0 else dim + x.dim() - 1 return NumpySort.apply(x, dim + 1), (0, 0, 0) @staticmethod def jvp(ctx, x_tangent, _): ind, ind_inv = ctx.saved_tensors return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim), None, None class SortGenVmap(torch.autograd.Function): generate_vmap_rule = True @staticmethod def forward(x, dim): ind = torch.argsort(x, dim=dim) ind_inv = torch.argsort(ind, axis=dim) result = torch.take_along_dim(x, ind, dim=dim) return result, ind, ind_inv @staticmethod def setup_context(ctx, inputs, outputs): x, dim = inputs _, ind, ind_inv = outputs ctx.mark_non_differentiable(ind, ind_inv) ctx.save_for_backward(ind, ind_inv) ctx.save_for_forward(ind, ind_inv) ctx.dim = dim @staticmethod def backward(ctx, grad_output, _0, _1): ind, ind_inv = ctx.saved_tensors return TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim), None @staticmethod def jvp(ctx, x_tangent, _): ind, ind_inv = ctx.saved_tensors return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim), None, None def sample_inputs_numpy_sort(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg(3, 5), args=(1,)) def sample_inputs_numpy_take(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) tensor = make_arg(3, 5) dim = 1 _, ind, ind_inv = NumpySort.apply(tensor, 1) yield SampleInput(tensor, args=(ind, ind_inv, dim)) class NumpyTake(torch.autograd.Function): @staticmethod def forward(x, ind, ind_inv, dim): device = x.device x = to_numpy(x) ind = to_numpy(ind) return torch.tensor(np.take_along_axis(x, ind, dim), device=device) @staticmethod def setup_context(ctx, inputs, output): _x, ind, ind_inv, dim = inputs ctx.save_for_backward(ind, ind_inv) ctx.save_for_forward(ind, ind_inv) ctx.dim = dim @staticmethod def backward(ctx, grad_output): ind, ind_inv = ctx.saved_tensors result = NumpyTake.apply(grad_output, ind_inv, ind, ctx.dim) return result, None, None, None @staticmethod def vmap(info, in_dims, x, ind, ind_inv, dim): x_bdim, ind_bdim, ind_inv_bdim, _ = in_dims # wrap dim logical_dim = x.dim() if x_bdim is None else x_bdim - 1 dim = dim if dim >= 0 else dim + logical_dim def expand_bdim(x, x_bdim): if x_bdim is None: return x.expand(info.batch_size, *x.shape) return x.movedim(x_bdim, 0) x = expand_bdim(x, x_bdim) ind = expand_bdim(ind, ind_bdim) ind_inv = expand_bdim(ind_inv, ind_inv_bdim) return NumpyTake.apply(x, ind, ind_inv, dim + 1), 0 @staticmethod def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _): assert ind_tangent is None assert ind_inv_tangent is None ind, ind_inv = ctx.saved_tensors return NumpyTake.apply(x_tangent, ind, ind_inv, ctx.dim) class TakeGenVmap(torch.autograd.Function): generate_vmap_rule = True @staticmethod def forward(x, ind, ind_inv, dim): return torch.take_along_dim(x, ind, dim) @staticmethod def setup_context(ctx, inputs, outputs): _x, ind, ind_inv, dim = inputs ctx.save_for_backward(ind, ind_inv) ctx.save_for_forward(ind, ind_inv) ctx.dim = dim @staticmethod def backward(ctx, grad_output): ind, ind_inv = ctx.saved_tensors result = TakeGenVmap.apply(grad_output, ind_inv, ind, ctx.dim) return result, None, None, None @staticmethod def jvp(ctx, x_tangent, ind_tangent, ind_inv_tangent, _): ind, ind_inv = ctx.saved_tensors return TakeGenVmap.apply(x_tangent, ind, ind_inv, ctx.dim) class Select(torch.autograd.Function): @staticmethod def forward(x, idx): return x[idx] @staticmethod def setup_context(ctx, inputs, output): x, idx = inputs ctx.x_shape = x.shape ctx.idx = idx @staticmethod def backward(ctx, grad_output): result = grad_output.new_zeros(ctx.x_shape) result[ctx.idx] = grad_output return result, None @staticmethod def vmap(info, in_dims, x, idx): x_bdim, _ = in_dims x = x.movedim(x_bdim, 1) return Select.apply(x, idx), 0 @staticmethod def jvp(ctx, x_tangent, _): return Select.apply(x_tangent, ctx.idx) class SelectGenVmap(torch.autograd.Function): generate_vmap_rule = True @staticmethod def forward(x, idx): return x[idx] @staticmethod def setup_context(ctx, inputs, outputs): x, idx = inputs ctx.x_shape = x.shape ctx.idx = idx @staticmethod def backward(ctx, grad_output): result = grad_output.new_zeros(ctx.x_shape) result[ctx.idx] = grad_output return result, None @staticmethod def jvp(ctx, x_tangent, _): return SelectGenVmap.apply(x_tangent, ctx.idx) def sample_inputs_select(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg(3, 5), args=(2,)) class ScaleGradGenVmap(torch.autograd.Function): generate_vmap_rule = True scale = 3.14 @staticmethod def forward(x): return x.clone() @staticmethod def setup_context(ctx, inputs, outputs): pass @staticmethod def backward(ctx, grad_output): return grad_output * ScaleGradGenVmap.scale @staticmethod def jvp(ctx, x_tangent): return x_tangent * ScaleGradGenVmap.scale class ZeroGradientsGenVmap(torch.autograd.Function): generate_vmap_rule = True @staticmethod def forward(x, y): return x.clone(), y.clone() @staticmethod def setup_context(ctx, inputs, outputs): pass @staticmethod def backward(ctx, gx, gy): # Intentionally returning torch.zeros instead of zeros_like or new_zeros. # Also intentionally not None. return ( # Intentionally too-large gradient torch.zeros(3, 4, *gx.shape, dtype=gx.dtype, device=gx.device), torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device), ) @staticmethod def jvp(ctx, gx, gy): # Intentionally returning torch.zeros instead of zeros_like or new_zeros. # Also intentionally not None. return ( torch.zeros(gx.shape, dtype=gx.dtype, device=gx.device), torch.zeros(gy.shape, dtype=gy.dtype, device=gy.device), ) def sample_inputs_forward_default_args(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg(3, 5)) class ForwardHasDefaultArgs(torch.autograd.Function): @staticmethod def forward(x, idx=(2,)): return x[idx] @staticmethod def setup_context(ctx, inputs, output): x, idx = inputs ctx.x_shape = x.shape ctx.idx = idx @staticmethod def backward(ctx, grad_output): result = grad_output.new_zeros(ctx.x_shape) result[ctx.idx] = grad_output return result, None @staticmethod def vmap(info, in_dims, x, idx): x_bdim, _ = in_dims x = x.movedim(x_bdim, 1) return ForwardHasDefaultArgs.apply(x, idx), 0 @staticmethod def jvp(ctx, x_tangent, _): return ForwardHasDefaultArgs.apply(x_tangent, ctx.idx) autograd_function_db = [ OpInfo( 'NumpyCubeAutogradFunction', op=NumpyCube.apply, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_numpy_cube, dtypes=all_types_and(torch.bool, torch.half), supports_out=False, ), OpInfo( 'NumpyExpMarkDirtyAutogradFunction', op=lambda x: NumpyExp_.apply(x.clone()), inplace_variant=NumpyExp_.apply, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_numpy_cube, dtypes=all_types_and(torch.bool, torch.half), supports_out=False, ), OpInfo( 'NumpyMulAutogradFunction', op=NumpyMul.apply, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_numpy_mul, dtypes=all_types_and(torch.bool, torch.half), supports_out=False, ), OpInfo( 'NumpyCubeNotComposableAutogradFunction', op=lambda x: NumpyCubeNotComposable.apply(x)[0], supports_forward_ad=False, supports_fwgrad_bwgrad=False, sample_inputs_func=sample_inputs_numpy_cube, dtypes=all_types_and(torch.bool, torch.half), supports_out=False, ), OpInfo( 'NumpySortAutogradFunction', op=NumpySort.apply, supports_forward_ad=False, supports_fwgrad_bwgrad=False, sample_inputs_func=sample_inputs_numpy_sort, dtypes=all_types_and(torch.bool, torch.half), supports_out=False, gradcheck_wrapper=lambda y, ind: y, ), OpInfo( 'NumpyTakeAutogradFunction', op=NumpyTake.apply, supports_forward_ad=False, supports_fwgrad_bwgrad=False, sample_inputs_func=sample_inputs_numpy_take, dtypes=all_types_and(torch.bool, torch.half), supports_out=False, ), OpInfo( 'SelectAutogradFunction', op=Select.apply, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_select, dtypes=all_types_and(torch.bool, torch.half), supports_out=False, ), OpInfo( 'CubeGenVmapAutogradFunction', op=CubeGenVmap.apply, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_numpy_cube, dtypes=all_types_and(torch.bool, torch.half), supports_out=False, ), OpInfo( 'MulGenVmapAutogradFunction', op=MulGenVmap.apply, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_numpy_mul, dtypes=all_types_and(torch.bool, torch.half), supports_out=False, ), OpInfo( 'SortGenVmapAutogradFunction', op=SortGenVmap.apply, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_numpy_sort, dtypes=all_types_and(torch.bool, torch.half), supports_out=False, gradcheck_wrapper=lambda y, ind: y, ), OpInfo( 'SelectGenVmapAutogradFunction', op=SelectGenVmap.apply, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_select, dtypes=all_types_and(torch.bool, torch.half), supports_out=False, ), OpInfo( 'ScaleGradGenVmapAutogradFunction', op=ScaleGradGenVmap.apply, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_numpy_cube, dtypes=all_types_and(torch.bool, torch.half), supports_out=False, ), OpInfo( 'ZeroGradientsGenVmapAutogradFunction', op=ZeroGradientsGenVmap.apply, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_numpy_mul, dtypes=all_types_and(torch.bool, torch.half), supports_out=False, ), OpInfo( 'ForwardHasDefaultArgsAutogradFunction', op=ForwardHasDefaultArgs.apply, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_forward_default_args, dtypes=all_types_and(torch.bool, torch.half), supports_out=False, ), ] ```
======================================================================================================================================== SOURCE CODE FILE: check_kernel_launches.py LINES: 4 SIZE: 6.05 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\check_kernel_launches.py ENCODING: utf-8 ```py # mypy: ignore-errors import os import re import sys __all__ = [ "check_code_for_cuda_kernel_launches", "check_cuda_kernel_launches", ] # FILES TO EXCLUDE (match is done with suffix using `endswith`) # You wouldn't drive without a seatbelt, though, so why would you # launch a kernel without some safety? Use this as a quick workaround # for a problem with the checker, fix the checker, then de-exclude # the files in question. exclude_files: list[str] = [] # Without using a C++ AST we can't 100% detect kernel launches, so we # model them as having the pattern "<<<parameters>>>(arguments);" # We then require that `C10_CUDA_KERNEL_LAUNCH_CHECK` be # the next statement. # # We model the next statement as ending at the next `}` or `;`. # If we see `}` then a clause ended (bad) if we see a semi-colon then # we expect the launch check just before it. # # Since the kernel launch can include lambda statements, it's important # to find the correct end-paren of the kernel launch. Doing this with # pure regex requires recursive regex, which aren't part of the Python # standard library. To avoid an additional dependency, we build a prefix # regex that finds the start of a kernel launch, use a paren-matching # algorithm to find the end of the launch, and then another regex to # determine if a launch check is present. # Finds potential starts of kernel launches kernel_launch_start = re.compile( r"^.*<<<[^>]+>>>\s*\(", flags=re.MULTILINE ) # This pattern should start at the character after the final paren of the # kernel launch. It returns a match if the launch check is not the next statement has_check = re.compile( r"\s*;(?![^;}]*C10_CUDA_KERNEL_LAUNCH_CHECK\(\);)", flags=re.MULTILINE ) def find_matching_paren(s: str, startpos: int) -> int: """Given a string "prefix (unknown number of characters) suffix" and the position of the first `(` returns the index of the character 1 past the `)`, accounting for paren nesting """ opening = 0 for i, c in enumerate(s[startpos:]): if c == '(': opening += 1 elif c == ')': opening -= 1 if opening == 0: return startpos + i + 1 raise IndexError("Closing parens not found!") def should_exclude_file(filename) -> bool: for exclude_suffix in exclude_files: if filename.endswith(exclude_suffix): return True return False def check_code_for_cuda_kernel_launches(code, filename=None): """Checks code for CUDA kernel launches without cuda error checks. Args: filename - Filename of file containing the code. Used only for display purposes, so you can put anything here. code - The code to check Returns: The number of unsafe kernel launches in the code """ if filename is None: filename = "##Python Function Call##" # We break the code apart and put it back together to add # helpful line numberings for identifying problem areas code = enumerate(code.split("\n")) # Split by line breaks code = [f"{lineno}: {linecode}" for lineno, linecode in code] # Number the lines code = '\n'.join(code) # Put it back together num_launches_without_checks = 0 for m in kernel_launch_start.finditer(code): end_paren = find_matching_paren(code, m.end() - 1) if has_check.match(code, end_paren): num_launches_without_checks += 1 context = code[m.start():end_paren + 1] print(f"Missing C10_CUDA_KERNEL_LAUNCH_CHECK in '{filename}'. Context:\n{context}", file=sys.stderr) return num_launches_without_checks def check_file(filename): """Checks a file for CUDA kernel launches without cuda error checks Args: filename - File to check Returns: The number of unsafe kernel launches in the file """ if not (filename.endswith((".cu", ".cuh"))): return 0 if should_exclude_file(filename): return 0 with open(filename) as fo: contents = fo.read() unsafeCount = check_code_for_cuda_kernel_launches(contents, filename) return unsafeCount def check_cuda_kernel_launches(): """Checks all pytorch code for CUDA kernel launches without cuda error checks Returns: The number of unsafe kernel launches in the codebase """ torch_dir = os.path.dirname(os.path.realpath(__file__)) torch_dir = os.path.dirname(torch_dir) # Go up to parent torch torch_dir = os.path.dirname(torch_dir) # Go up to parent caffe2 kernels_without_checks = 0 files_without_checks = [] for root, dirnames, filenames in os.walk(torch_dir): # `$BASE/build` and `$BASE/torch/include` are generated # so we don't want to flag their contents if root == os.path.join(torch_dir, "build") or root == os.path.join(torch_dir, "torch/include"): # Curtail search by modifying dirnames and filenames in place # Yes, this is the way to do this, see `help(os.walk)` dirnames[:] = [] continue for x in filenames: filename = os.path.join(root, x) file_result = check_file(filename) if file_result > 0: kernels_without_checks += file_result files_without_checks.append(filename) if kernels_without_checks > 0: count_str = f"Found {kernels_without_checks} instances in " \ f"{len(files_without_checks)} files where kernel " \ "launches didn't have checks." print(count_str, file=sys.stderr) print("Files without checks:", file=sys.stderr) for x in files_without_checks: print(f"\t{x}", file=sys.stderr) print(count_str, file=sys.stderr) return kernels_without_checks if __name__ == "__main__": unsafe_launches = check_cuda_kernel_launches() sys.exit(0 if unsafe_launches == 0 else 1) ```
=================================================================================================================================== SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.02 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\codegen\__init__.py ENCODING: utf-8 ```py # mypy: ignore-errors ```
============================================================================================================================== SOURCE CODE FILE: common_cuda.py LINES: 2 SIZE: 13.64 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\common_cuda.py ENCODING: utf-8 ```py # mypy: ignore-errors r"""This file is allowed to initialize CUDA context when imported.""" import functools import torch import torch.cuda from torch.testing._internal.common_utils import LazyVal, TEST_NUMBA, TEST_WITH_ROCM, TEST_CUDA, IS_WINDOWS import inspect import contextlib import os import unittest CUDA_ALREADY_INITIALIZED_ON_IMPORT = torch.cuda.is_initialized() TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2 CUDA_DEVICE = torch.device("cuda:0") if TEST_CUDA else None # note: if ROCm is targeted, TEST_CUDNN is code for TEST_MIOPEN if TEST_WITH_ROCM: TEST_CUDNN = LazyVal(lambda: TEST_CUDA) else: TEST_CUDNN = LazyVal(lambda: TEST_CUDA and torch.backends.cudnn.is_acceptable(torch.tensor(1., device=CUDA_DEVICE))) TEST_CUDNN_VERSION = LazyVal(lambda: torch.backends.cudnn.version() if TEST_CUDNN else 0) SM53OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (5, 3)) SM60OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (6, 0)) SM70OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (7, 0)) SM75OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (7, 5)) SM80OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (8, 0)) SM89OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (8, 9)) SM90OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (9, 0)) SM100OrLater = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() >= (10, 0)) IS_THOR = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability()[0] == 10 and torch.cuda.get_device_capability()[1] > 0) IS_JETSON = LazyVal(lambda: torch.cuda.is_available() and (torch.cuda.get_device_capability() in [(7, 2), (8, 7)] or IS_THOR)) IS_SM89 = LazyVal(lambda: torch.cuda.is_available() and torch.cuda.get_device_capability() == (8, 9)) def CDNA2OrLater(): if TEST_WITH_ROCM: gcn_arch_name = torch.cuda.get_device_properties('cuda').gcnArchName return any(arch in gcn_arch_name for arch in {"gfx90a", "gfx942"}) return False def evaluate_gfx_arch_exact(matching_arch): if not torch.cuda.is_available(): return False gcn_arch_name = torch.cuda.get_device_properties('cuda').gcnArchName arch = os.environ.get('PYTORCH_DEBUG_FLASH_ATTENTION_GCN_ARCH_OVERRIDE', gcn_arch_name) return arch == matching_arch GFX90A_Exact = LazyVal(lambda: evaluate_gfx_arch_exact('gfx90a:sramecc+:xnack-')) GFX942_Exact = LazyVal(lambda: evaluate_gfx_arch_exact('gfx942:sramecc+:xnack-')) def evaluate_platform_supports_flash_attention(): if TEST_WITH_ROCM: return evaluate_gfx_arch_exact('gfx90a:sramecc+:xnack-') or evaluate_gfx_arch_exact('gfx942:sramecc+:xnack-') if TEST_CUDA: return not IS_WINDOWS and SM80OrLater return False def evaluate_platform_supports_efficient_attention(): if TEST_WITH_ROCM: return evaluate_gfx_arch_exact('gfx90a:sramecc+:xnack-') or evaluate_gfx_arch_exact('gfx942:sramecc+:xnack-') if TEST_CUDA: return True return False def evaluate_platform_supports_cudnn_attention(): return (not TEST_WITH_ROCM) and SM80OrLater and (TEST_CUDNN_VERSION >= 90000) PLATFORM_SUPPORTS_FLASH_ATTENTION: bool = LazyVal(lambda: evaluate_platform_supports_flash_attention()) PLATFORM_SUPPORTS_MEM_EFF_ATTENTION: bool = LazyVal(lambda: evaluate_platform_supports_efficient_attention()) PLATFORM_SUPPORTS_CUDNN_ATTENTION: bool = LazyVal(lambda: evaluate_platform_supports_cudnn_attention()) # This condition always evaluates to PLATFORM_SUPPORTS_MEM_EFF_ATTENTION but for logical clarity we keep it separate PLATFORM_SUPPORTS_FUSED_ATTENTION: bool = LazyVal(lambda: PLATFORM_SUPPORTS_FLASH_ATTENTION or PLATFORM_SUPPORTS_CUDNN_ATTENTION or PLATFORM_SUPPORTS_MEM_EFF_ATTENTION) PLATFORM_SUPPORTS_FUSED_SDPA: bool = TEST_CUDA and not TEST_WITH_ROCM PLATFORM_SUPPORTS_BF16: bool = LazyVal(lambda: TEST_CUDA and SM80OrLater) def evaluate_platform_supports_fp8(): if torch.cuda.is_available(): if torch.version.hip: ROCM_VERSION = tuple(int(v) for v in torch.version.hip.split('.')[:2]) archs = ['gfx94'] if ROCM_VERSION >= (6, 3): archs.extend(['gfx120']) if ROCM_VERSION >= (6, 5): archs.append('gfx95') for arch in archs: if arch in torch.cuda.get_device_properties(0).gcnArchName: return True else: return SM90OrLater or torch.cuda.get_device_capability() == (8, 9) return False PLATFORM_SUPPORTS_FP8: bool = LazyVal(lambda: evaluate_platform_supports_fp8()) PLATFORM_SUPPORTS_MX_GEMM: bool = LazyVal(lambda: TEST_CUDA and SM100OrLater) if TEST_NUMBA: try: import numba.cuda TEST_NUMBA_CUDA = numba.cuda.is_available() except Exception: TEST_NUMBA_CUDA = False TEST_NUMBA = False else: TEST_NUMBA_CUDA = False # Used below in `initialize_cuda_context_rng` to ensure that CUDA context and # RNG have been initialized. __cuda_ctx_rng_initialized = False # after this call, CUDA context and RNG must have been initialized on each GPU def initialize_cuda_context_rng(): global __cuda_ctx_rng_initialized assert TEST_CUDA, 'CUDA must be available when calling initialize_cuda_context_rng' if not __cuda_ctx_rng_initialized: # initialize cuda context and rng for memory tests for i in range(torch.cuda.device_count()): torch.randn(1, device=f"cuda:{i}") __cuda_ctx_rng_initialized = True @contextlib.contextmanager def tf32_off(): old_allow_tf32_matmul = torch.backends.cuda.matmul.allow_tf32 try: torch.backends.cuda.matmul.allow_tf32 = False with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False): yield finally: torch.backends.cuda.matmul.allow_tf32 = old_allow_tf32_matmul @contextlib.contextmanager def tf32_on(self, tf32_precision=1e-5): old_allow_tf32_matmul = torch.backends.cuda.matmul.allow_tf32 old_precision = self.precision try: torch.backends.cuda.matmul.allow_tf32 = True self.precision = tf32_precision with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True): yield finally: torch.backends.cuda.matmul.allow_tf32 = old_allow_tf32_matmul self.precision = old_precision @contextlib.contextmanager def tf32_enabled(): """ Context manager to temporarily enable TF32 for CUDA operations. Restores the previous TF32 state after exiting the context. """ old_allow_tf32_matmul = torch.backends.cuda.matmul.allow_tf32 try: torch.backends.cuda.matmul.allow_tf32 = True with torch.backends.cudnn.flags( enabled=None, benchmark=None, deterministic=None, allow_tf32=True ): yield finally: torch.backends.cuda.matmul.allow_tf32 = old_allow_tf32_matmul # This is a wrapper that wraps a test to run this test twice, one with # allow_tf32=True, another with allow_tf32=False. When running with # allow_tf32=True, it will use reduced precision as specified by the # argument. For example: # @dtypes(torch.float32, torch.float64, torch.complex64, torch.complex128) # @tf32_on_and_off(0.005) # def test_matmul(self, device, dtype): # a = ...; b = ...; # c = torch.matmul(a, b) # self.assertEqual(c, expected) # In the above example, when testing torch.float32 and torch.complex64 on CUDA # on a CUDA >= 11 build on an >=Ampere architecture, the matmul will be running at # TF32 mode and TF32 mode off, and on TF32 mode, the assertEqual will use reduced # precision to check values. # # This decorator can be used for function with or without device/dtype, such as # @tf32_on_and_off(0.005) # def test_my_op(self) # @tf32_on_and_off(0.005) # def test_my_op(self, device) # @tf32_on_and_off(0.005) # def test_my_op(self, device, dtype) # @tf32_on_and_off(0.005) # def test_my_op(self, dtype) # if neither device nor dtype is specified, it will check if the system has ampere device # if device is specified, it will check if device is cuda # if dtype is specified, it will check if dtype is float32 or complex64 # tf32 and fp32 are different only when all the three checks pass def tf32_on_and_off(tf32_precision=1e-5): def with_tf32_disabled(self, function_call): with tf32_off(): function_call() def with_tf32_enabled(self, function_call): with tf32_on(self, tf32_precision): function_call() def wrapper(f): params = inspect.signature(f).parameters arg_names = tuple(params.keys()) @functools.wraps(f) def wrapped(*args, **kwargs): for k, v in zip(arg_names, args): kwargs[k] = v cond = torch.cuda.is_tf32_supported() if 'device' in kwargs: cond = cond and (torch.device(kwargs['device']).type == 'cuda') if 'dtype' in kwargs: cond = cond and (kwargs['dtype'] in {torch.float32, torch.complex64}) if cond: with_tf32_disabled(kwargs['self'], lambda: f(**kwargs)) with_tf32_enabled(kwargs['self'], lambda: f(**kwargs)) else: f(**kwargs) return wrapped return wrapper # This is a wrapper that wraps a test to run it with TF32 turned off. # This wrapper is designed to be used when a test uses matmul or convolutions # but the purpose of that test is not testing matmul or convolutions. # Disabling TF32 will enforce torch.float tensors to be always computed # at full precision. def with_tf32_off(f): @functools.wraps(f) def wrapped(*args, **kwargs): with tf32_off(): return f(*args, **kwargs) return wrapped def _get_magma_version(): if 'Magma' not in torch.__config__.show(): return (0, 0) position = torch.__config__.show().find('Magma ') version_str = torch.__config__.show()[position + len('Magma '):].split('\n')[0] return tuple(int(x) for x in version_str.split(".")) def _get_torch_cuda_version(): if torch.version.cuda is None: return (0, 0) cuda_version = str(torch.version.cuda) return tuple(int(x) for x in cuda_version.split(".")) def _get_torch_rocm_version(): if not TEST_WITH_ROCM: return (0, 0) rocm_version = str(torch.version.hip) rocm_version = rocm_version.split("-")[0] # ignore git sha return tuple(int(x) for x in rocm_version.split(".")) def _check_cusparse_generic_available(): return not TEST_WITH_ROCM def _check_hipsparse_generic_available(): if not TEST_WITH_ROCM: return False if not torch.version.hip: return False rocm_version = str(torch.version.hip) rocm_version = rocm_version.split("-")[0] # ignore git sha rocm_version_tuple = tuple(int(x) for x in rocm_version.split(".")) return not (rocm_version_tuple is None or rocm_version_tuple < (5, 1)) TEST_CUSPARSE_GENERIC = _check_cusparse_generic_available() TEST_HIPSPARSE_GENERIC = _check_hipsparse_generic_available() # Shared by test_torch.py and test_multigpu.py def _create_scaling_models_optimizers(device="cuda", optimizer_ctor=torch.optim.SGD, optimizer_kwargs=None): # Create a module+optimizer that will use scaling, and a control module+optimizer # that will not use scaling, against which the scaling-enabled module+optimizer can be compared. mod_control = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device) mod_scaling = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device) with torch.no_grad(): for c, s in zip(mod_control.parameters(), mod_scaling.parameters()): s.copy_(c) kwargs = {"lr": 1.0} if optimizer_kwargs is not None: kwargs.update(optimizer_kwargs) opt_control = optimizer_ctor(mod_control.parameters(), **kwargs) opt_scaling = optimizer_ctor(mod_scaling.parameters(), **kwargs) return mod_control, mod_scaling, opt_control, opt_scaling # Shared by test_torch.py, test_cuda.py and test_multigpu.py def _create_scaling_case(device="cuda", dtype=torch.float, optimizer_ctor=torch.optim.SGD, optimizer_kwargs=None): data = [(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)), (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)), (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)), (torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device))] loss_fn = torch.nn.MSELoss().to(device) skip_iter = 2 return _create_scaling_models_optimizers( device=device, optimizer_ctor=optimizer_ctor, optimizer_kwargs=optimizer_kwargs, ) + (data, loss_fn, skip_iter) def xfailIfSM89(func): return func if not IS_SM89 else unittest.expectedFailure(func) # Importing this module should NOT eagerly initialize CUDA if not CUDA_ALREADY_INITIALIZED_ON_IMPORT: assert not torch.cuda.is_initialized() ```
===================================================================================================================================== SOURCE CODE FILE: common_device_type.py LINES: 1 SIZE: 72.97 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\common_device_type.py ENCODING: utf-8 ```py # mypy: ignore-errors import copy import gc import inspect import os import runpy import sys import threading import unittest from collections import namedtuple from collections.abc import Iterable, Sequence from enum import Enum from functools import partial, wraps from typing import Any, Callable, ClassVar, Optional, TypeVar, Union from typing_extensions import ParamSpec import torch from torch._inductor.utils import GPU_TYPES from torch.testing._internal.common_cuda import ( _get_torch_cuda_version, _get_torch_rocm_version, TEST_CUSPARSE_GENERIC, TEST_HIPSPARSE_GENERIC, ) from torch.testing._internal.common_dtype import get_all_dtypes from torch.testing._internal.common_utils import ( _TestParametrizer, clear_tracked_input, compose_parametrize_fns, dtype_name, get_tracked_input, IS_FBCODE, is_privateuse1_backend_available, IS_REMOTE_GPU, IS_SANDCASTLE, IS_WINDOWS, NATIVE_DEVICES, PRINT_REPRO_ON_FAILURE, skipCUDANonDefaultStreamIf, skipIfTorchDynamo, TEST_HPU, TEST_MKL, TEST_MPS, TEST_WITH_ASAN, TEST_WITH_MIOPEN_SUGGEST_NHWC, TEST_WITH_ROCM, TEST_WITH_TORCHINDUCTOR, TEST_WITH_TSAN, TEST_WITH_UBSAN, TEST_XPU, TestCase, ) _T = TypeVar("_T") _P = ParamSpec("_P") try: import psutil # type: ignore[import] HAS_PSUTIL = True except ModuleNotFoundError: HAS_PSUTIL = False psutil = None # Note [Writing Test Templates] # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # This note was written shortly after the PyTorch 1.9 release. # If you notice it's out-of-date or think it could be improved then please # file an issue. # # PyTorch has its own framework for instantiating test templates. That is, for # taking test classes that look similar to unittest or pytest # compatible test classes and optionally doing the following: # # - instantiating a version of the test class for each available device type # (often the CPU, CUDA, and META device types) # - further instantiating a version of each test that's always specialized # on the test class's device type, and optionally specialized further # on datatypes or operators # # This functionality is similar to pytest's parametrize functionality # (see https://docs.pytest.org/en/6.2.x/parametrize.html), but with considerable # additional logic that specializes the instantiated test classes for their # device types (see CPUTestBase and CUDATestBase below), supports a variety # of composable decorators that allow for test filtering and setting # tolerances, and allows tests parametrized by operators to instantiate # only the subset of device type x dtype that operator supports. # # This framework was built to make it easier to write tests that run on # multiple device types, multiple datatypes (dtypes), and for multiple # operators. It's also useful for controlling which tests are run. For example, # only tests that use a CUDA device can be run on platforms with CUDA. # Let's dive in with an example to get an idea for how it works: # # -------------------------------------------------------- # A template class (looks like a regular unittest TestCase) # class TestClassFoo(TestCase): # # # A template test that can be specialized with a device # # NOTE: this test case is not runnable by unittest or pytest because it # # accepts an extra positional argument, "device", that they do not understand # def test_bar(self, device): # pass # # # Function that instantiates a template class and its tests # instantiate_device_type_tests(TestCommon, globals()) # -------------------------------------------------------- # # In the above code example we see a template class and a single test template # that can be instantiated with a device. The function # instantiate_device_type_tests(), called at file scope, instantiates # new test classes, one per available device type, and new tests in those # classes from these templates. It actually does this by removing # the class TestClassFoo and replacing it with classes like TestClassFooCPU # and TestClassFooCUDA, instantiated test classes that inherit from CPUTestBase # and CUDATestBase respectively. Additional device types, like XLA, # (see https://github.com/pytorch/xla) can further extend the set of # instantiated test classes to create classes like TestClassFooXLA. # # The test template, test_bar(), is also instantiated. In this case the template # is only specialized on a device, so (depending on the available device # types) it might become test_bar_cpu() in TestClassFooCPU and test_bar_cuda() # in TestClassFooCUDA. We can think of the instantiated test classes as # looking like this: # # -------------------------------------------------------- # # An instantiated test class for the CPU device type # class TestClassFooCPU(CPUTestBase): # # # An instantiated test that calls the template with the string representation # # of a device from the test class's device type # def test_bar_cpu(self): # test_bar(self, 'cpu') # # # An instantiated test class for the CUDA device type # class TestClassFooCUDA(CUDATestBase): # # # An instantiated test that calls the template with the string representation # # of a device from the test class's device type # def test_bar_cuda(self): # test_bar(self, 'cuda:0') # -------------------------------------------------------- # # These instantiated test classes ARE discoverable and runnable by both # unittest and pytest. One thing that may be confusing, however, is that # attempting to run "test_bar" will not work, despite it appearing in the # original template code. This is because "test_bar" is no longer discoverable # after instantiate_device_type_tests() runs, as the above snippet shows. # Instead "test_bar_cpu" and "test_bar_cuda" may be run directly, or both # can be run with the option "-k test_bar". # # Removing the template class and adding the instantiated classes requires # passing "globals()" to instantiate_device_type_tests(), because it # edits the file's Python objects. # # As mentioned, tests can be additionally parametrized on dtypes or # operators. Datatype parametrization uses the @dtypes decorator and # require a test template like this: # # -------------------------------------------------------- # # A template test that can be specialized with a device and a datatype (dtype) # @dtypes(torch.float32, torch.int64) # def test_car(self, device, dtype) # pass # -------------------------------------------------------- # # If the CPU and CUDA device types are available this test would be # instantiated as 4 tests that cover the cross-product of the two dtypes # and two device types: # # - test_car_cpu_float32 # - test_car_cpu_int64 # - test_car_cuda_float32 # - test_car_cuda_int64 # # The dtype is passed as a torch.dtype object. # # Tests parametrized on operators (actually on OpInfos, more on that in a # moment...) use the @ops decorator and require a test template like this: # -------------------------------------------------------- # # A template test that can be specialized with a device, dtype, and OpInfo # @ops(op_db) # def test_car(self, device, dtype, op) # pass # -------------------------------------------------------- # # See the documentation for the @ops decorator below for additional details # on how to use it and see the note [OpInfos] in # common_methods_invocations.py for more details on OpInfos. # # A test parametrized over the entire "op_db", which contains hundreds of # OpInfos, will likely have hundreds or thousands of instantiations. The # test will be instantiated on the cross-product of device types, operators, # and the dtypes the operator supports on that device type. The instantiated # tests will have names like: # # - test_car_add_cpu_float32 # - test_car_sub_cuda_int64 # # The first instantiated test calls the original test_car() with the OpInfo # for torch.add as its "op" argument, the string 'cpu' for its "device" argument, # and the dtype torch.float32 for is "dtype" argument. The second instantiated # test calls the test_car() with the OpInfo for torch.sub, a CUDA device string # like 'cuda:0' or 'cuda:1' for its "device" argument, and the dtype # torch.int64 for its "dtype argument." # # In addition to parametrizing over device, dtype, and ops via OpInfos, the # @parametrize decorator is supported for arbitrary parametrizations: # -------------------------------------------------------- # # A template test that can be specialized with a device, dtype, and value for x # @parametrize("x", range(5)) # def test_car(self, device, dtype, x) # pass # -------------------------------------------------------- # # See the documentation for @parametrize in common_utils.py for additional details # on this. Note that the instantiate_device_type_tests() function will handle # such parametrizations; there is no need to additionally call # instantiate_parametrized_tests(). # # Clever test filtering can be very useful when working with parametrized # tests. "-k test_car" would run every instantiated variant of the test_car() # test template, and "-k test_car_add" runs every variant instantiated with # torch.add. # # It is important to use the passed device and dtype as appropriate. Use # helper functions like make_tensor() that require explicitly specifying # the device and dtype so they're not forgotten. # # Test templates can use a variety of composable decorators to specify # additional options and requirements, some are listed here: # # - @deviceCountAtLeast(<minimum number of devices to run test with>) # Passes a list of strings representing all available devices of # the test class's device type as the test template's "device" argument. # If there are fewer devices than the value passed to the decorator # the test is skipped. # - @dtypes(<list of tuples of dtypes>) # In addition to accepting multiple dtypes, the @dtypes decorator # can accept a sequence of tuple pairs of dtypes. The test template # will be called with each tuple for its "dtype" argument. # - @onlyNativeDeviceTypes # Skips the test if the device is not a native device type (currently CPU, CUDA, Meta) # - @onlyCPU # Skips the test if the device is not a CPU device # - @onlyCUDA # Skips the test if the device is not a CUDA device # - @onlyMPS # Skips the test if the device is not a MPS device # - @skipCPUIfNoLapack # Skips the test if the device is a CPU device and LAPACK is not installed # - @skipCPUIfNoMkl # Skips the test if the device is a CPU device and MKL is not installed # - @skipCUDAIfNoMagma # Skips the test if the device is a CUDA device and MAGMA is not installed # - @skipCUDAIfRocm # Skips the test if the device is a CUDA device and ROCm is being used # Note [Adding a Device Type] # ~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # To add a device type: # # (1) Create a new "TestBase" extending DeviceTypeTestBase. # See CPUTestBase and CUDATestBase below. # (2) Define the "device_type" attribute of the base to be the # appropriate string. # (3) Add logic to this file that appends your base class to # device_type_test_bases when your device type is available. # (4) (Optional) Write setUpClass/tearDownClass class methods that # instantiate dependencies (see MAGMA in CUDATestBase). # (5) (Optional) Override the "instantiate_test" method for total # control over how your class creates tests. # # setUpClass is called AFTER tests have been created and BEFORE and ONLY IF # they are run. This makes it useful for initializing devices and dependencies. # Note [Overriding methods in generic tests] # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Device generic tests look a lot like normal test classes, but they differ # from ordinary classes in some important ways. In particular, overriding # methods in generic tests doesn't work quite the way you expect. # # class TestFooDeviceType(TestCase): # # Intention is to override # def assertEqual(self, x, y): # # This DOESN'T WORK! # super().assertEqual(x, y) # # If you try to run this code, you'll get an error saying that TestFooDeviceType # is not in scope. This is because after instantiating our classes, we delete # it from the parent scope. Instead, you need to hardcode a direct invocation # of the desired subclass call, e.g., # # class TestFooDeviceType(TestCase): # # Intention is to override # def assertEqual(self, x, y): # TestCase.assertEqual(x, y) # # However, a less error-prone way of customizing the behavior of TestCase # is to either (1) add your functionality to TestCase and make it toggled # by a class attribute, or (2) create your own subclass of TestCase, and # then inherit from it for your generic test. def _dtype_test_suffix(dtypes): """Returns the test suffix for a dtype, sequence of dtypes, or None.""" if isinstance(dtypes, (list, tuple)): if len(dtypes) == 0: return "" return "_" + "_".join(dtype_name(d) for d in dtypes) elif dtypes: return f"_{dtype_name(dtypes)}" else: return "" def _update_param_kwargs(param_kwargs, name, value): """Adds a kwarg with the specified name and value to the param_kwargs dict.""" # Make name plural (e.g. devices / dtypes) if the value is composite. plural_name = f"{name}s" # Clear out old entries of the arg if any. if name in param_kwargs: del param_kwargs[name] if plural_name in param_kwargs: del param_kwargs[plural_name] if isinstance(value, (list, tuple)): param_kwargs[plural_name] = value elif value is not None: param_kwargs[name] = value # Leave param_kwargs as-is when value is None. class DeviceTypeTestBase(TestCase): device_type: str = "generic_device_type" # Flag to disable test suite early due to unrecoverable error such as CUDA error. _stop_test_suite = False # Precision is a thread-local setting since it may be overridden per test _tls = threading.local() _tls.precision = TestCase._precision _tls.rel_tol = TestCase._rel_tol @property def precision(self): return self._tls.precision @precision.setter def precision(self, prec): self._tls.precision = prec @property def rel_tol(self): return self._tls.rel_tol @rel_tol.setter def rel_tol(self, prec): self._tls.rel_tol = prec # Returns a string representing the device that single device tests should use. # Note: single device tests use this device exclusively. @classmethod def get_primary_device(cls): return cls.device_type @classmethod def _init_and_get_primary_device(cls): try: return cls.get_primary_device() except Exception: # For CUDATestBase, XPUTestBase, XLATestBase, and possibly others, the primary device won't be available # until setUpClass() sets it. Call that manually here if needed. if hasattr(cls, "setUpClass"): cls.setUpClass() return cls.get_primary_device() # Returns a list of strings representing all available devices of this # device type. The primary device must be the first string in the list # and the list must contain no duplicates. # Note: UNSTABLE API. Will be replaced once PyTorch has a device generic # mechanism of acquiring all available devices. @classmethod def get_all_devices(cls): return [cls.get_primary_device()] # Returns the dtypes the test has requested. # Prefers device-specific dtype specifications over generic ones. @classmethod def _get_dtypes(cls, test): if not hasattr(test, "dtypes"): return None default_dtypes = test.dtypes.get("all") msg = f"@dtypes is mandatory when using @dtypesIf however '{test.__name__}' didn't specify it" assert default_dtypes is not None, msg return test.dtypes.get(cls.device_type, default_dtypes) def _get_precision_override(self, test, dtype): if not hasattr(test, "precision_overrides"): return self.precision return test.precision_overrides.get(dtype, self.precision) def _get_tolerance_override(self, test, dtype): if not hasattr(test, "tolerance_overrides"): return self.precision, self.rel_tol return test.tolerance_overrides.get(dtype, tol(self.precision, self.rel_tol)) def _apply_precision_override_for_test(self, test, param_kwargs): dtype = param_kwargs["dtype"] if "dtype" in param_kwargs else None dtype = param_kwargs["dtypes"] if "dtypes" in param_kwargs else dtype if dtype: self.precision = self._get_precision_override(test, dtype) self.precision, self.rel_tol = self._get_tolerance_override(test, dtype) # Creates device-specific tests. @classmethod def instantiate_test(cls, name, test, *, generic_cls=None): def instantiate_test_helper( cls, name, *, test, param_kwargs=None, decorator_fn=lambda _: [] ): # Add the device param kwarg if the test needs device or devices. param_kwargs = {} if param_kwargs is None else param_kwargs test_sig_params = inspect.signature(test).parameters if "device" in test_sig_params or "devices" in test_sig_params: device_arg: str = cls._init_and_get_primary_device() if hasattr(test, "num_required_devices"): device_arg = cls.get_all_devices() _update_param_kwargs(param_kwargs, "device", device_arg) # Apply decorators based on param kwargs. for decorator in decorator_fn(param_kwargs): test = decorator(test) # Constructs the test @wraps(test) def instantiated_test(self, param_kwargs=param_kwargs): # Sets precision and runs test # Note: precision is reset after the test is run guard_precision = self.precision guard_rel_tol = self.rel_tol try: self._apply_precision_override_for_test(test, param_kwargs) result = test(self, **param_kwargs) except RuntimeError as rte: # check if rte should stop entire test suite. self._stop_test_suite = self._should_stop_test_suite() # Check if test has been decorated with `@expectedFailure` # Using `__unittest_expecting_failure__` attribute, see # https://github.com/python/cpython/blob/ffa505b580464/Lib/unittest/case.py#L164 # In that case, make it fail with "unexpected success" by suppressing exception if ( getattr(test, "__unittest_expecting_failure__", False) and self._stop_test_suite ): import sys print( "Suppressing fatal exception to trigger unexpected success", file=sys.stderr, ) return # raise the runtime error as is for the test suite to record. raise rte finally: self.precision = guard_precision self.rel_tol = guard_rel_tol return result assert not hasattr(cls, name), f"Redefinition of test {name}" setattr(cls, name, instantiated_test) def default_parametrize_fn(test, generic_cls, device_cls): # By default, no parametrization is needed. yield (test, "", {}, lambda _: []) # Parametrization decorators set the parametrize_fn attribute on the test. parametrize_fn = getattr(test, "parametrize_fn", default_parametrize_fn) # If one of the @dtypes* decorators is present, also parametrize over the dtypes set by it. dtypes = cls._get_dtypes(test) if dtypes is not None: def dtype_parametrize_fn(test, generic_cls, device_cls, dtypes=dtypes): for dtype in dtypes: param_kwargs: dict[str, Any] = {} _update_param_kwargs(param_kwargs, "dtype", dtype) # Note that an empty test suffix is set here so that the dtype can be appended # later after the device. yield (test, "", param_kwargs, lambda _: []) parametrize_fn = compose_parametrize_fns( dtype_parametrize_fn, parametrize_fn ) # Instantiate the parametrized tests. for ( test, # noqa: B020 test_suffix, param_kwargs, decorator_fn, ) in parametrize_fn(test, generic_cls, cls): test_suffix = "" if test_suffix == "" else "_" + test_suffix cls_device_type = ( cls.device_type if cls.device_type != "privateuse1" else torch._C._get_privateuse1_backend_name() ) device_suffix = "_" + cls_device_type # Note: device and dtype suffix placement # Special handling here to place dtype(s) after device according to test name convention. dtype_kwarg = None if "dtype" in param_kwargs or "dtypes" in param_kwargs: dtype_kwarg = ( param_kwargs["dtypes"] if "dtypes" in param_kwargs else param_kwargs["dtype"] ) test_name = ( f"{name}{test_suffix}{device_suffix}{_dtype_test_suffix(dtype_kwarg)}" ) instantiate_test_helper( cls=cls, name=test_name, test=test, param_kwargs=param_kwargs, decorator_fn=decorator_fn, ) def run(self, result=None): super().run(result=result) # Early terminate test if _stop_test_suite is set. if self._stop_test_suite: result.stop() class CPUTestBase(DeviceTypeTestBase): device_type = "cpu" # No critical error should stop CPU test suite def _should_stop_test_suite(self): return False class CUDATestBase(DeviceTypeTestBase): device_type = "cuda" _do_cuda_memory_leak_check = True _do_cuda_non_default_stream = True primary_device: ClassVar[str] cudnn_version: ClassVar[Any] no_magma: ClassVar[bool] no_cudnn: ClassVar[bool] def has_cudnn(self): return not self.no_cudnn @classmethod def get_primary_device(cls): return cls.primary_device @classmethod def get_all_devices(cls): primary_device_idx = int(cls.get_primary_device().split(":")[1]) num_devices = torch.cuda.device_count() prim_device = cls.get_primary_device() cuda_str = "cuda:{0}" non_primary_devices = [ cuda_str.format(idx) for idx in range(num_devices) if idx != primary_device_idx ] return [prim_device] + non_primary_devices @classmethod def setUpClass(cls): # has_magma shows up after cuda is initialized t = torch.ones(1).cuda() cls.no_magma = not torch.cuda.has_magma # Determines if cuDNN is available and its version cls.no_cudnn = not torch.backends.cudnn.is_acceptable(t) cls.cudnn_version = None if cls.no_cudnn else torch.backends.cudnn.version() # Acquires the current device as the primary (test) device cls.primary_device = f"cuda:{torch.cuda.current_device()}" # See Note [Lazy Tensor tests in device agnostic testing] lazy_ts_backend_init = False class LazyTestBase(DeviceTypeTestBase): device_type = "lazy" def _should_stop_test_suite(self): return False @classmethod def setUpClass(cls): import torch._lazy import torch._lazy.metrics import torch._lazy.ts_backend global lazy_ts_backend_init if not lazy_ts_backend_init: # Need to connect the TS backend to lazy key before running tests torch._lazy.ts_backend.init() lazy_ts_backend_init = True class MPSTestBase(DeviceTypeTestBase): device_type = "mps" primary_device: ClassVar[str] @classmethod def get_primary_device(cls): return cls.primary_device @classmethod def get_all_devices(cls): # currently only one device is supported on MPS backend prim_device = cls.get_primary_device() return [prim_device] @classmethod def setUpClass(cls): cls.primary_device = "mps:0" def _should_stop_test_suite(self): return False class XPUTestBase(DeviceTypeTestBase): device_type = "xpu" primary_device: ClassVar[str] @classmethod def get_primary_device(cls): return cls.primary_device @classmethod def get_all_devices(cls): # currently only one device is supported on MPS backend prim_device = cls.get_primary_device() return [prim_device] @classmethod def setUpClass(cls): cls.primary_device = f"xpu:{torch.xpu.current_device()}" def _should_stop_test_suite(self): return False class HPUTestBase(DeviceTypeTestBase): device_type = "hpu" primary_device: ClassVar[str] @classmethod def get_primary_device(cls): return cls.primary_device @classmethod def setUpClass(cls): cls.primary_device = "hpu:0" class PrivateUse1TestBase(DeviceTypeTestBase): primary_device: ClassVar[str] device_mod = None device_type = "privateuse1" @classmethod def get_primary_device(cls): return cls.primary_device @classmethod def get_all_devices(cls): primary_device_idx = int(cls.get_primary_device().split(":")[1]) num_devices = cls.device_mod.device_count() prim_device = cls.get_primary_device() device_str = f"{cls.device_type}:{{0}}" non_primary_devices = [ device_str.format(idx) for idx in range(num_devices) if idx != primary_device_idx ] return [prim_device] + non_primary_devices @classmethod def setUpClass(cls): cls.device_type = torch._C._get_privateuse1_backend_name() cls.device_mod = getattr(torch, cls.device_type, None) assert ( cls.device_mod is not None ), f"""torch has no module of `{cls.device_type}`, you should register a module by `torch._register_device_module`.""" cls.primary_device = f"{cls.device_type}:{cls.device_mod.current_device()}" # Adds available device-type-specific test base classes def get_device_type_test_bases(): # set type to List[Any] due to mypy list-of-union issue: # https://github.com/python/mypy/issues/3351 test_bases: list[Any] = [] if IS_SANDCASTLE or IS_FBCODE: if IS_REMOTE_GPU: # Skip if sanitizer is enabled if not TEST_WITH_ASAN and not TEST_WITH_TSAN and not TEST_WITH_UBSAN: test_bases.append(CUDATestBase) else: test_bases.append(CPUTestBase) else: test_bases.append(CPUTestBase) if torch.cuda.is_available(): test_bases.append(CUDATestBase) if is_privateuse1_backend_available(): test_bases.append(PrivateUse1TestBase) # Disable MPS testing in generic device testing temporarily while we're # ramping up support. # elif torch.backends.mps.is_available(): # test_bases.append(MPSTestBase) return test_bases device_type_test_bases = get_device_type_test_bases() def filter_desired_device_types(device_type_test_bases, except_for=None, only_for=None): # device type cannot appear in both except_for and only_for intersect = set(except_for if except_for else []) & set( only_for if only_for else [] ) assert ( not intersect ), f"device ({intersect}) appeared in both except_for and only_for" # Replace your privateuse1 backend name with 'privateuse1' if is_privateuse1_backend_available(): privateuse1_backend_name = torch._C._get_privateuse1_backend_name() except_for = ( ["privateuse1" if x == privateuse1_backend_name else x for x in except_for] if except_for is not None else None ) only_for = ( ["privateuse1" if x == privateuse1_backend_name else x for x in only_for] if only_for is not None else None ) if except_for: device_type_test_bases = filter( lambda x: x.device_type not in except_for, device_type_test_bases ) if only_for: device_type_test_bases = filter( lambda x: x.device_type in only_for, device_type_test_bases ) return list(device_type_test_bases) # Note [How to extend DeviceTypeTestBase to add new test device] # The following logic optionally allows downstream projects like pytorch/xla to # add more test devices. # Instructions: # - Add a python file (e.g. pytorch/xla/test/pytorch_test_base.py) in downstream project. # - Inside the file, one should inherit from `DeviceTypeTestBase` class and define # a new DeviceTypeTest class (e.g. `XLATestBase`) with proper implementation of # `instantiate_test` method. # - DO NOT import common_device_type inside the file. # `runpy.run_path` with `globals()` already properly setup the context so that # `DeviceTypeTestBase` is already available. # - Set a top-level variable `TEST_CLASS` equal to your new class. # E.g. TEST_CLASS = XLATensorBase # - To run tests with new device type, set `TORCH_TEST_DEVICE` env variable to path # to this file. Multiple paths can be separated by `:`. # See pytorch/xla/test/pytorch_test_base.py for a more detailed example. _TORCH_TEST_DEVICES = os.environ.get("TORCH_TEST_DEVICES", None) if _TORCH_TEST_DEVICES: for path in _TORCH_TEST_DEVICES.split(":"): # runpy (a stdlib module) lacks annotations mod = runpy.run_path(path, init_globals=globals()) # type: ignore[func-returns-value] device_type_test_bases.append(mod["TEST_CLASS"]) PYTORCH_CUDA_MEMCHECK = os.getenv("PYTORCH_CUDA_MEMCHECK", "0") == "1" PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY = "PYTORCH_TESTING_DEVICE_ONLY_FOR" PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY = "PYTORCH_TESTING_DEVICE_EXCEPT_FOR" PYTORCH_TESTING_DEVICE_FOR_CUSTOM_KEY = "PYTORCH_TESTING_DEVICE_FOR_CUSTOM" def get_desired_device_type_test_bases( except_for=None, only_for=None, include_lazy=False, allow_mps=False, allow_xpu=False ): # allow callers to specifically opt tests into being tested on MPS, similar to `include_lazy` test_bases = device_type_test_bases.copy() if allow_mps and TEST_MPS and MPSTestBase not in test_bases: test_bases.append(MPSTestBase) if allow_xpu and TEST_XPU and XPUTestBase not in test_bases: test_bases.append(XPUTestBase) if TEST_HPU and HPUTestBase not in test_bases: test_bases.append(HPUTestBase) # Filter out the device types based on user inputs desired_device_type_test_bases = filter_desired_device_types( test_bases, except_for, only_for ) if include_lazy: # Note [Lazy Tensor tests in device agnostic testing] # Right now, test_view_ops.py runs with LazyTensor. # We don't want to opt every device-agnostic test into using the lazy device, # because many of them will fail. # So instead, the only way to opt a specific device-agnostic test file into # lazy tensor testing is with include_lazy=True if IS_FBCODE: print( "TorchScript backend not yet supported in FBCODE/OVRSOURCE builds", file=sys.stderr, ) else: desired_device_type_test_bases.append(LazyTestBase) def split_if_not_empty(x: str): return x.split(",") if x else [] # run some cuda testcases on other devices if available # Usage: # export PYTORCH_TESTING_DEVICE_FOR_CUSTOM=privateuse1 env_custom_only_for = split_if_not_empty( os.getenv(PYTORCH_TESTING_DEVICE_FOR_CUSTOM_KEY, "") ) if env_custom_only_for: desired_device_type_test_bases += filter( lambda x: x.device_type in env_custom_only_for, test_bases ) desired_device_type_test_bases = list(set(desired_device_type_test_bases)) # Filter out the device types based on environment variables if available # Usage: # export PYTORCH_TESTING_DEVICE_ONLY_FOR=cuda,cpu # export PYTORCH_TESTING_DEVICE_EXCEPT_FOR=xla env_only_for = split_if_not_empty( os.getenv(PYTORCH_TESTING_DEVICE_ONLY_FOR_KEY, "") ) env_except_for = split_if_not_empty( os.getenv(PYTORCH_TESTING_DEVICE_EXCEPT_FOR_KEY, "") ) return filter_desired_device_types( desired_device_type_test_bases, env_except_for, env_only_for ) # Adds 'instantiated' device-specific test cases to the given scope. # The tests in these test cases are derived from the generic tests in # generic_test_class. This function should be used instead of # instantiate_parametrized_tests() if the test class contains # device-specific tests (NB: this supports additional @parametrize usage). # # See note "Writing Test Templates" # TODO: remove "allow_xpu" option after Interl GPU support all test case instantiate by this function. def instantiate_device_type_tests( generic_test_class, scope, except_for=None, only_for=None, include_lazy=False, allow_mps=False, allow_xpu=False, ): # Removes the generic test class from its enclosing scope so its tests # are not discoverable. del scope[generic_test_class.__name__] # Creates an 'empty' version of the generic_test_class # Note: we don't inherit from the generic_test_class directly because # that would add its tests to our test classes and they would be # discovered (despite not being runnable). Inherited methods also # can't be removed later, and we can't rely on load_tests because # pytest doesn't support it (as of this writing). empty_name = generic_test_class.__name__ + "_base" empty_class = type(empty_name, generic_test_class.__bases__, {}) # Acquires members names # See Note [Overriding methods in generic tests] generic_members = set(generic_test_class.__dict__.keys()) - set( empty_class.__dict__.keys() ) generic_tests = [x for x in generic_members if x.startswith("test")] # Creates device-specific test cases for base in get_desired_device_type_test_bases( except_for, only_for, include_lazy, allow_mps, allow_xpu ): class_name = generic_test_class.__name__ + base.device_type.upper() # type set to Any and suppressed due to unsupport runtime class: # https://github.com/python/mypy/wiki/Unsupported-Python-Features device_type_test_class: Any = type(class_name, (base, empty_class), {}) for name in generic_members: if name in generic_tests: # Instantiates test member test = getattr(generic_test_class, name) # XLA-compat shim (XLA's instantiate_test takes doesn't take generic_cls) sig = inspect.signature(device_type_test_class.instantiate_test) if len(sig.parameters) == 3: # Instantiates the device-specific tests device_type_test_class.instantiate_test( name, copy.deepcopy(test), generic_cls=generic_test_class ) else: device_type_test_class.instantiate_test(name, copy.deepcopy(test)) else: # Ports non-test member assert ( name not in device_type_test_class.__dict__ ), f"Redefinition of directly defined member {name}" nontest = getattr(generic_test_class, name) setattr(device_type_test_class, name, nontest) # The dynamically-created test class derives from the test template class # and the empty class. Arrange for both setUpClass and tearDownClass methods # to be called. This allows the parameterized test classes to support setup # and teardown. @classmethod def _setUpClass(cls): base.setUpClass() empty_class.setUpClass() @classmethod def _tearDownClass(cls): empty_class.tearDownClass() base.tearDownClass() device_type_test_class.setUpClass = _setUpClass device_type_test_class.tearDownClass = _tearDownClass # Mimics defining the instantiated class in the caller's file # by setting its module to the given class's and adding # the module to the given scope. # This lets the instantiated class be discovered by unittest. device_type_test_class.__module__ = generic_test_class.__module__ scope[class_name] = device_type_test_class # Category of dtypes to run an OpInfo-based test for # Example use: @ops(dtype=OpDTypes.supported) # # There are 7 categories: # - supported: Every dtype supported by the operator. Use for exhaustive # testing of all dtypes. # - unsupported: Run tests on dtypes not supported by the operator. e.g. for # testing the operator raises an error and doesn't crash. # - supported_backward: Every dtype supported by the operator's backward pass. # - unsupported_backward: Run tests on dtypes not supported by the operator's backward pass. # - any_one: Runs a test for one dtype the operator supports. Prioritizes dtypes the # operator supports in both forward and backward. # - none: Useful for tests that are not dtype-specific. No dtype will be passed to the test # when this is selected. # - any_common_cpu_cuda_one: Pick a dtype that supports both CPU and CUDA. class OpDTypes(Enum): supported = 0 # Test all supported dtypes (default) unsupported = 1 # Test only unsupported dtypes supported_backward = 2 # Test all supported backward dtypes unsupported_backward = 3 # Test only unsupported backward dtypes any_one = 4 # Test precisely one supported dtype none = 5 # Instantiate no dtype variants (no dtype kwarg needed) any_common_cpu_cuda_one = ( 6 # Test precisely one supported dtype that is common to both cuda and cpu ) # Arbitrary order ANY_DTYPE_ORDER = ( torch.float32, torch.float64, torch.complex64, torch.complex128, torch.float16, torch.bfloat16, torch.long, torch.int32, torch.int16, torch.int8, torch.uint8, torch.bool, ) def _serialize_sample(sample_input): # NB: For OpInfos, SampleInput.summary() prints in a cleaner way. if getattr(sample_input, "summary", None) is not None: return sample_input.summary() return str(sample_input) # Decorator that defines the OpInfos a test template should be instantiated for. # # Example usage: # # @ops(unary_ufuncs) # def test_numerics(self, device, dtype, op): # <test_code> # # This will instantiate variants of test_numerics for each given OpInfo, # on each device the OpInfo's operator supports, and for every dtype supported by # that operator. There are a few caveats to the dtype rule, explained below. # # The @ops decorator can accept two # additional arguments, "dtypes" and "allowed_dtypes". If "dtypes" is specified # then the test variants are instantiated for those dtypes, regardless of # what the operator supports. If given "allowed_dtypes" then test variants # are instantiated only for the intersection of allowed_dtypes and the dtypes # they would otherwise be instantiated with. That is, allowed_dtypes composes # with the options listed above and below. # # The "dtypes" argument can also accept additional values (see OpDTypes above): # OpDTypes.supported - the test is instantiated for all dtypes the operator # supports # OpDTypes.unsupported - the test is instantiated for all dtypes the operator # doesn't support # OpDTypes.supported_backward - the test is instantiated for all dtypes the # operator's gradient formula supports # OpDTypes.unsupported_backward - the test is instantiated for all dtypes the # operator's gradient formula doesn't support # OpDTypes.any_one - the test is instantiated for one dtype the # operator supports. The dtype supports forward and backward if possible. # OpDTypes.none - the test is instantiated without any dtype. The test signature # should not include a dtype kwarg in this case. # OpDTypes.any_common_cpu_cuda_one - the test is instantiated for a dtype # that supports both CPU and CUDA. # # These options allow tests to have considerable control over the dtypes # they're instantiated for. class ops(_TestParametrizer): def __init__( self, op_list, *, dtypes: Union[OpDTypes, Sequence[torch.dtype]] = OpDTypes.supported, allowed_dtypes: Optional[Sequence[torch.dtype]] = None, skip_if_dynamo=True, ): self.op_list = list(op_list) self.opinfo_dtypes = dtypes self.allowed_dtypes = ( set(allowed_dtypes) if allowed_dtypes is not None else None ) self.skip_if_dynamo = skip_if_dynamo def _parametrize_test(self, test, generic_cls, device_cls): """Parameterizes the given test function across each op and its associated dtypes.""" if device_cls is None: raise RuntimeError( "The @ops decorator is only intended to be used in a device-specific " "context; use it with instantiate_device_type_tests() instead of " "instantiate_parametrized_tests()" ) op = check_exhausted_iterator = object() for op in self.op_list: # Determine the set of dtypes to use. dtypes: Union[set[torch.dtype], set[None]] if isinstance(self.opinfo_dtypes, Sequence): dtypes = set(self.opinfo_dtypes) elif self.opinfo_dtypes == OpDTypes.unsupported_backward: dtypes = set(get_all_dtypes()).difference( op.supported_backward_dtypes(device_cls.device_type) ) elif self.opinfo_dtypes == OpDTypes.supported_backward: dtypes = op.supported_backward_dtypes(device_cls.device_type) elif self.opinfo_dtypes == OpDTypes.unsupported: dtypes = set(get_all_dtypes()).difference( op.supported_dtypes(device_cls.device_type) ) elif self.opinfo_dtypes == OpDTypes.supported: dtypes = set(op.supported_dtypes(device_cls.device_type)) elif self.opinfo_dtypes == OpDTypes.any_one: # Tries to pick a dtype that supports both forward or backward supported = op.supported_dtypes(device_cls.device_type) supported_backward = op.supported_backward_dtypes( device_cls.device_type ) supported_both = supported.intersection(supported_backward) dtype_set = supported_both if len(supported_both) > 0 else supported for dtype in ANY_DTYPE_ORDER: if dtype in dtype_set: dtypes = {dtype} break else: dtypes = {} elif self.opinfo_dtypes == OpDTypes.any_common_cpu_cuda_one: # Tries to pick a dtype that supports both CPU and CUDA supported = set(op.dtypes).intersection(op.dtypesIfCUDA) if supported: dtypes = { next(dtype for dtype in ANY_DTYPE_ORDER if dtype in supported) } else: dtypes = {} elif self.opinfo_dtypes == OpDTypes.none: dtypes = {None} else: raise RuntimeError(f"Unknown OpDType: {self.opinfo_dtypes}") if self.allowed_dtypes is not None: dtypes = dtypes.intersection(self.allowed_dtypes) # Construct the test name; device / dtype parts are handled outside. # See [Note: device and dtype suffix placement] test_name = op.formatted_name # Filter sample skips / xfails to only those that apply to the OpInfo. # These are defined on the test function via decorators. sample_skips_and_xfails = getattr(test, "sample_skips_and_xfails", None) if sample_skips_and_xfails is not None: sample_skips_and_xfails = [ rule for rule in sample_skips_and_xfails if rule.op_match_fn(device_cls.device_type, op) ] for dtype in dtypes: # Construct parameter kwargs to pass to the test. param_kwargs = {"op": op} _update_param_kwargs(param_kwargs, "dtype", dtype) # NOTE: test_wrapper exists because we don't want to apply # op-specific decorators to the original test. # Test-specific decorators are applied to the original test, # however. try: @wraps(test) def test_wrapper(*args, **kwargs): try: return test(*args, **kwargs) except unittest.SkipTest as e: raise e except Exception as e: tracked_input = get_tracked_input() if PRINT_REPRO_ON_FAILURE and tracked_input is not None: e_tracked = Exception( # noqa: TRY002 f"Caused by {tracked_input.type_desc} " f"at index {tracked_input.index}: " f"{_serialize_sample(tracked_input.val)}" ) e_tracked._tracked_input = tracked_input # type: ignore[attr] raise e_tracked from e raise e finally: clear_tracked_input() if self.skip_if_dynamo and not TEST_WITH_TORCHINDUCTOR: test_wrapper = skipIfTorchDynamo( "Policy: we don't run OpInfo tests w/ Dynamo" )(test_wrapper) # Initialize info for the last input seen. This is useful for tracking # down which inputs caused a test failure. Note that TrackedInputIter is # responsible for managing this. test.tracked_input = None decorator_fn = partial( op.get_decorators, generic_cls.__name__, test.__name__, device_cls.device_type, dtype, ) if sample_skips_and_xfails is not None: test_wrapper.sample_skips_and_xfails = sample_skips_and_xfails yield (test_wrapper, test_name, param_kwargs, decorator_fn) except Exception as ex: # Provides an error message for debugging before rethrowing the exception print(f"Failed to instantiate {test_name} for op {op.name}!") raise ex if op is check_exhausted_iterator: raise ValueError( "An empty op_list was passed to @ops. " "Note that this may result from reuse of a generator." ) # Decorator that skips a test if the given condition is true. # Notes: # (1) Skip conditions stack. # (2) Skip conditions can be bools or strings. If a string the # test base must have defined the corresponding attribute to be False # for the test to run. If you want to use a string argument you should # probably define a new decorator instead (see below). # (3) Prefer the existing decorators to defining the 'device_type' kwarg. class skipIf: def __init__(self, dep, reason, device_type=None): self.dep = dep self.reason = reason self.device_type = device_type def __call__(self, fn): @wraps(fn) def dep_fn(slf, *args, **kwargs): if ( self.device_type is None or self.device_type == slf.device_type or ( isinstance(self.device_type, Iterable) and slf.device_type in self.device_type ) ): if (isinstance(self.dep, str) and getattr(slf, self.dep, True)) or ( isinstance(self.dep, bool) and self.dep ): raise unittest.SkipTest(self.reason) return fn(slf, *args, **kwargs) return dep_fn # Skips a test on CPU if the condition is true. class skipCPUIf(skipIf): def __init__(self, dep, reason): super().__init__(dep, reason, device_type="cpu") # Skips a test on CUDA if the condition is true. class skipCUDAIf(skipIf): def __init__(self, dep, reason): super().__init__(dep, reason, device_type="cuda") # Skips a test on XPU if the condition is true. class skipXPUIf(skipIf): def __init__(self, dep, reason): super().__init__(dep, reason, device_type="xpu") # Skips a test on XPU or CUDA if the condition is true. class skipGPUIf(skipIf): def __init__(self, dep, reason): super().__init__(dep, reason, device_type=GPU_TYPES) # Skips a test on Lazy if the condition is true. class skipLazyIf(skipIf): def __init__(self, dep, reason): super().__init__(dep, reason, device_type="lazy") # Skips a test on Meta if the condition is true. class skipMetaIf(skipIf): def __init__(self, dep, reason): super().__init__(dep, reason, device_type="meta") # Skips a test on MPS if the condition is true. class skipMPSIf(skipIf): def __init__(self, dep, reason): super().__init__(dep, reason, device_type="mps") class skipHPUIf(skipIf): def __init__(self, dep, reason): super().__init__(dep, reason, device_type="hpu") # Skips a test on XLA if the condition is true. class skipXLAIf(skipIf): def __init__(self, dep, reason): super().__init__(dep, reason, device_type="xla") class skipPRIVATEUSE1If(skipIf): def __init__(self, dep, reason): device_type = torch._C._get_privateuse1_backend_name() super().__init__(dep, reason, device_type=device_type) def _has_sufficient_memory(device, size): if torch.device(device).type == "cuda": if not torch.cuda.is_available(): return False gc.collect() torch.cuda.empty_cache() # torch.cuda.mem_get_info, aka cudaMemGetInfo, returns a tuple of (free memory, total memory) of a GPU if device == "cuda": device = "cuda:0" return ( torch.cuda.memory.mem_get_info(device)[0] * torch.cuda.memory.get_per_process_memory_fraction(device) ) >= size if device == "xla": raise unittest.SkipTest("TODO: Memory availability checks for XLA?") if device == "xpu": raise unittest.SkipTest("TODO: Memory availability checks for Intel GPU?") if device != "cpu": raise unittest.SkipTest("Unknown device type") # CPU if not HAS_PSUTIL: raise unittest.SkipTest("Need psutil to determine if memory is sufficient") # The sanitizers have significant memory overheads if TEST_WITH_ASAN or TEST_WITH_TSAN or TEST_WITH_UBSAN: effective_size = size * 10 else: effective_size = size if psutil.virtual_memory().available < effective_size: gc.collect() return psutil.virtual_memory().available >= effective_size def largeTensorTest(size, device=None, inductor=TEST_WITH_TORCHINDUCTOR): """Skip test if the device has insufficient memory to run the test size may be a number of bytes, a string of the form "N GB", or a callable If the test is a device generic test, available memory on the primary device will be checked. It can also be overriden by the optional `device=` argument. In other tests, the `device=` argument needs to be specified. """ if isinstance(size, str): assert size.endswith(("GB", "gb")), "only bytes or GB supported" size = 1024**3 * int(size[:-2]) def inner(fn): @wraps(fn) def dep_fn(self, *args, **kwargs): size_bytes: int = size(self, *args, **kwargs) if callable(size) else size _device = device if _device is None: if hasattr(self, "get_primary_device"): _device = self.get_primary_device() else: _device = self.device # If this is running with GPU cpp_wrapper, the autotuning step will generate # an additional array of the same size as the input. if inductor and torch._inductor.config.cpp_wrapper and _device != "cpu": size_bytes *= 2 if not _has_sufficient_memory(_device, size_bytes): raise unittest.SkipTest(f"Insufficient {_device} memory") return fn(self, *args, **kwargs) return dep_fn return inner class expectedFailure: def __init__(self, device_type): self.device_type = device_type def __call__(self, fn): @wraps(fn) def efail_fn(slf, *args, **kwargs): if ( not hasattr(slf, "device_type") and hasattr(slf, "device") and isinstance(slf.device, str) ): target_device_type = slf.device else: target_device_type = slf.device_type if self.device_type is None or self.device_type == target_device_type: try: fn(slf, *args, **kwargs) except Exception: return else: slf.fail("expected test to fail, but it passed") return fn(slf, *args, **kwargs) return efail_fn class onlyOn: def __init__(self, device_type): self.device_type = device_type def __call__(self, fn): @wraps(fn) def only_fn(slf, *args, **kwargs): if self.device_type != slf.device_type: reason = f"Only runs on {self.device_type}" raise unittest.SkipTest(reason) return fn(slf, *args, **kwargs) return only_fn # Decorator that provides all available devices of the device type to the test # as a list of strings instead of providing a single device string. # Skips the test if the number of available devices of the variant's device # type is less than the 'num_required_devices' arg. class deviceCountAtLeast: def __init__(self, num_required_devices): self.num_required_devices = num_required_devices def __call__(self, fn): assert not hasattr( fn, "num_required_devices" ), f"deviceCountAtLeast redefinition for {fn.__name__}" fn.num_required_devices = self.num_required_devices @wraps(fn) def multi_fn(slf, devices, *args, **kwargs): if len(devices) < self.num_required_devices: reason = f"fewer than {self.num_required_devices} devices detected" raise unittest.SkipTest(reason) return fn(slf, devices, *args, **kwargs) return multi_fn # Only runs the test on the native device type (currently CPU, CUDA, Meta and PRIVATEUSE1) def onlyNativeDeviceTypes(fn: Callable[_P, _T]) -> Callable[_P, _T]: @wraps(fn) def only_fn(self, *args: _P.args, **kwargs: _P.kwargs) -> _T: if self.device_type not in NATIVE_DEVICES: reason = f"onlyNativeDeviceTypes: doesn't run on {self.device_type}" raise unittest.SkipTest(reason) return fn(self, *args, **kwargs) return only_fn # Only runs the test on the native device types and devices specified in the devices list def onlyNativeDeviceTypesAnd(devices=None): def decorator(fn): @wraps(fn) def only_fn(self, *args, **kwargs): if ( self.device_type not in NATIVE_DEVICES and self.device_type not in devices ): reason = f"onlyNativeDeviceTypesAnd {devices} : doesn't run on {self.device_type}" raise unittest.SkipTest(reason) return fn(self, *args, **kwargs) return only_fn return decorator # Specifies per-dtype precision overrides. # Ex. # # @precisionOverride({torch.half : 1e-2, torch.float : 1e-4}) # @dtypes(torch.half, torch.float, torch.double) # def test_X(self, device, dtype): # ... # # When the test is instantiated its class's precision will be set to the # corresponding override, if it exists. # self.precision can be accessed directly, and it also controls the behavior of # functions like self.assertEqual(). # # Note that self.precision is a scalar value, so if you require multiple # precisions (or are working with multiple dtypes) they should be specified # explicitly and computed using self.precision (e.g. # self.precision *2, max(1, self.precision)). class precisionOverride: def __init__(self, d): assert isinstance( d, dict ), "precisionOverride not given a dtype : precision dict!" for dtype in d.keys(): assert isinstance( dtype, torch.dtype ), f"precisionOverride given unknown dtype {dtype}" self.d = d def __call__(self, fn): fn.precision_overrides = self.d return fn # Specifies per-dtype tolerance overrides tol(atol, rtol). It has priority over # precisionOverride. # Ex. # # @toleranceOverride({torch.float : tol(atol=1e-2, rtol=1e-3}, # torch.double : tol{atol=1e-4, rtol = 0}) # @dtypes(torch.half, torch.float, torch.double) # def test_X(self, device, dtype): # ... # # When the test is instantiated its class's tolerance will be set to the # corresponding override, if it exists. # self.rtol and self.precision can be accessed directly, and they also control # the behavior of functions like self.assertEqual(). # # The above example sets atol = 1e-2 and rtol = 1e-3 for torch.float and # atol = 1e-4 and rtol = 0 for torch.double. tol = namedtuple("tol", ["atol", "rtol"]) class toleranceOverride: def __init__(self, d): assert isinstance(d, dict), "toleranceOverride not given a dtype : tol dict!" for dtype, prec in d.items(): assert isinstance( dtype, torch.dtype ), f"toleranceOverride given unknown dtype {dtype}" assert isinstance( prec, tol ), "toleranceOverride not given a dtype : tol dict!" self.d = d def __call__(self, fn): fn.tolerance_overrides = self.d return fn # Decorator that instantiates a variant of the test for each given dtype. # Notes: # (1) Tests that accept the dtype argument MUST use this decorator. # (2) Can be overridden for CPU or CUDA, respectively, using dtypesIfCPU # or dtypesIfCUDA. # (3) Can accept an iterable of dtypes or an iterable of tuples # of dtypes. # Examples: # @dtypes(torch.float32, torch.float64) # @dtypes((torch.long, torch.float32), (torch.int, torch.float64)) class dtypes: def __init__(self, *args, device_type="all"): if len(args) > 0 and isinstance(args[0], (list, tuple)): for arg in args: assert isinstance(arg, (list, tuple)), ( "When one dtype variant is a tuple or list, " "all dtype variants must be. " f"Received non-list non-tuple dtype {str(arg)}" ) assert all( isinstance(dtype, torch.dtype) for dtype in arg ), f"Unknown dtype in {str(arg)}" else: assert all( isinstance(arg, torch.dtype) for arg in args ), f"Unknown dtype in {str(args)}" self.args = args self.device_type = device_type def __call__(self, fn): d = getattr(fn, "dtypes", {}) assert self.device_type not in d, f"dtypes redefinition for {self.device_type}" d[self.device_type] = self.args fn.dtypes = d return fn # Overrides specified dtypes on the CPU. class dtypesIfCPU(dtypes): def __init__(self, *args): super().__init__(*args, device_type="cpu") # Overrides specified dtypes on CUDA. class dtypesIfCUDA(dtypes): def __init__(self, *args): super().__init__(*args, device_type="cuda") class dtypesIfMPS(dtypes): def __init__(self, *args): super().__init__(*args, device_type="mps") class dtypesIfHPU(dtypes): def __init__(self, *args): super().__init__(*args, device_type="hpu") class dtypesIfPRIVATEUSE1(dtypes): def __init__(self, *args): super().__init__(*args, device_type=torch._C._get_privateuse1_backend_name()) def onlyCPU(fn): return onlyOn("cpu")(fn) def onlyCUDA(fn): return onlyOn("cuda")(fn) def onlyMPS(fn): return onlyOn("mps")(fn) def onlyXPU(fn): return onlyOn("xpu")(fn) def onlyHPU(fn): return onlyOn("hpu")(fn) def onlyPRIVATEUSE1(fn): device_type = torch._C._get_privateuse1_backend_name() device_mod = getattr(torch, device_type, None) if device_mod is None: reason = f"Skip as torch has no module of {device_type}" return unittest.skip(reason)(fn) return onlyOn(device_type)(fn) def onlyCUDAAndPRIVATEUSE1(fn): @wraps(fn) def only_fn(self, *args, **kwargs): if self.device_type not in ("cuda", torch._C._get_privateuse1_backend_name()): reason = f"onlyCUDAAndPRIVATEUSE1: doesn't run on {self.device_type}" raise unittest.SkipTest(reason) return fn(self, *args, **kwargs) return only_fn def disablecuDNN(fn): @wraps(fn) def disable_cudnn(self, *args, **kwargs): if self.device_type == "cuda" and self.has_cudnn(): with torch.backends.cudnn.flags(enabled=False): return fn(self, *args, **kwargs) return fn(self, *args, **kwargs) return disable_cudnn def disableMkldnn(fn): @wraps(fn) def disable_mkldnn(self, *args, **kwargs): if torch.backends.mkldnn.is_available(): with torch.backends.mkldnn.flags(enabled=False): return fn(self, *args, **kwargs) return fn(self, *args, **kwargs) return disable_mkldnn def expectedFailureCPU(fn): return expectedFailure("cpu")(fn) def expectedFailureCUDA(fn): return expectedFailure("cuda")(fn) def expectedFailureXPU(fn): return expectedFailure("xpu")(fn) def expectedFailureMeta(fn): return skipIfTorchDynamo()(expectedFailure("meta")(fn)) def expectedFailureXLA(fn): return expectedFailure("xla")(fn) def expectedFailureHPU(fn): return expectedFailure("hpu")(fn) def expectedFailureMPS(fn): return expectedFailure("mps")(fn) def expectedFailureMPSPre15(fn): import platform version = float(".".join(platform.mac_ver()[0].split(".")[:2]) or -1) if not version or version < 1.0: # cpu or other unsupported device return fn if version < 15.0: return expectedFailure("mps")(fn) return fn def expectedFailureMPSPre14(fn): import platform version = float(".".join(platform.mac_ver()[0].split(".")[:2]) or -1) if not version or version < 1.0: # cpu or other unsupported device return fn if version < 14.0: return expectedFailure("mps")(fn) return fn # Skips a test on CPU if LAPACK is not available. def skipCPUIfNoLapack(fn): return skipCPUIf(not torch._C.has_lapack, "PyTorch compiled without Lapack")(fn) # Skips a test on CPU if FFT is not available. def skipCPUIfNoFFT(fn): return skipCPUIf(not torch._C.has_spectral, "PyTorch is built without FFT support")( fn ) # Skips a test on CPU if MKL is not available. def skipCPUIfNoMkl(fn): return skipCPUIf(not TEST_MKL, "PyTorch is built without MKL support")(fn) # Skips a test on CPU if MKL Sparse is not available (it's not linked on Windows). def skipCPUIfNoMklSparse(fn): return skipCPUIf( IS_WINDOWS or not TEST_MKL, "PyTorch is built without MKL support" )(fn) # Skips a test on CPU if mkldnn is not available. def skipCPUIfNoMkldnn(fn): return skipCPUIf( not torch.backends.mkldnn.is_available(), "PyTorch is built without mkldnn support", )(fn) # Skips a test on CUDA if MAGMA is not available. def skipCUDAIfNoMagma(fn): return skipCUDAIf("no_magma", "no MAGMA library detected")( skipCUDANonDefaultStreamIf(True)(fn) ) def has_cusolver(): return not TEST_WITH_ROCM def has_hipsolver(): rocm_version = _get_torch_rocm_version() # hipSOLVER is disabled on ROCM < 5.3 return rocm_version >= (5, 3) # Skips a test on CUDA/ROCM if cuSOLVER/hipSOLVER is not available def skipCUDAIfNoCusolver(fn): return skipCUDAIf( not has_cusolver() and not has_hipsolver(), "cuSOLVER not available" )(fn) # Skips a test if both cuSOLVER and MAGMA are not available def skipCUDAIfNoMagmaAndNoCusolver(fn): if has_cusolver(): return fn else: # cuSolver is disabled on cuda < 10.1.243, tests depend on MAGMA return skipCUDAIfNoMagma(fn) # Skips a test if both cuSOLVER/hipSOLVER and MAGMA are not available def skipCUDAIfNoMagmaAndNoLinalgsolver(fn): if has_cusolver() or has_hipsolver(): return fn else: # cuSolver is disabled on cuda < 10.1.243, tests depend on MAGMA return skipCUDAIfNoMagma(fn) # Skips a test on CUDA when using ROCm. def skipCUDAIfRocm(func=None, *, msg="test doesn't currently work on the ROCm stack"): def dec_fn(fn): reason = f"skipCUDAIfRocm: {msg}" return skipCUDAIf(TEST_WITH_ROCM, reason=reason)(fn) if func: return dec_fn(func) return dec_fn # Skips a test on CUDA when not using ROCm. def skipCUDAIfNotRocm(fn): return skipCUDAIf( not TEST_WITH_ROCM, "test doesn't currently work on the CUDA stack" )(fn) # Skips a test on CUDA if ROCm is unavailable or its version is lower than requested. def skipCUDAIfRocmVersionLessThan(version=None): def dec_fn(fn): @wraps(fn) def wrap_fn(self, *args, **kwargs): if self.device_type == "cuda": if not TEST_WITH_ROCM: reason = "ROCm not available" raise unittest.SkipTest(reason) rocm_version_tuple = _get_torch_rocm_version() if ( rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version) ): reason = ( f"ROCm {rocm_version_tuple} is available but {version} required" ) raise unittest.SkipTest(reason) return fn(self, *args, **kwargs) return wrap_fn return dec_fn # Skips a test on CUDA when using ROCm. def skipCUDAIfNotMiopenSuggestNHWC(fn): return skipCUDAIf( not TEST_WITH_MIOPEN_SUGGEST_NHWC, "test doesn't currently work without MIOpen NHWC activation", )(fn) # Skips a test for specified CUDA versions, given in the form of a list of [major, minor]s. def skipCUDAVersionIn(versions: Optional[list[tuple[int, int]]] = None): def dec_fn(fn): @wraps(fn) def wrap_fn(self, *args, **kwargs): version = _get_torch_cuda_version() if version == (0, 0): # cpu or rocm return fn(self, *args, **kwargs) if version in (versions or []): reason = f"test skipped for CUDA version {version}" raise unittest.SkipTest(reason) return fn(self, *args, **kwargs) return wrap_fn return dec_fn # Skips a test for CUDA versions less than specified, given in the form of [major, minor]. def skipCUDAIfVersionLessThan(versions: Optional[tuple[int, int]] = None): def dec_fn(fn): @wraps(fn) def wrap_fn(self, *args, **kwargs): version = _get_torch_cuda_version() if version == (0, 0): # cpu or rocm return fn(self, *args, **kwargs) if version < versions: reason = f"test skipped for CUDA versions < {version}" raise unittest.SkipTest(reason) return fn(self, *args, **kwargs) return wrap_fn return dec_fn # Skips a test on CUDA if cuDNN is unavailable or its version is lower than requested. def skipCUDAIfCudnnVersionLessThan(version=0): def dec_fn(fn): @wraps(fn) def wrap_fn(self, *args, **kwargs): if self.device_type == "cuda": if self.no_cudnn: reason = "cuDNN not available" raise unittest.SkipTest(reason) if self.cudnn_version is None or self.cudnn_version < version: reason = f"cuDNN version {self.cudnn_version} is available but {version} required" raise unittest.SkipTest(reason) return fn(self, *args, **kwargs) return wrap_fn return dec_fn # Skips a test on CUDA if cuSparse generic API is not available def skipCUDAIfNoCusparseGeneric(fn): return skipCUDAIf(not TEST_CUSPARSE_GENERIC, "cuSparse Generic API not available")( fn ) def skipCUDAIfNoHipsparseGeneric(fn): return skipCUDAIf( not TEST_HIPSPARSE_GENERIC, "hipSparse Generic API not available" )(fn) def skipCUDAIfNoSparseGeneric(fn): return skipCUDAIf( not (TEST_CUSPARSE_GENERIC or TEST_HIPSPARSE_GENERIC), "Sparse Generic API not available", )(fn) def skipCUDAIfNoCudnn(fn): return skipCUDAIfCudnnVersionLessThan(0)(fn) def skipCUDAIfMiopen(fn): return skipCUDAIf(torch.version.hip is not None, "Marked as skipped for MIOpen")(fn) def skipCUDAIfNoMiopen(fn): return skipCUDAIf(torch.version.hip is None, "MIOpen is not available")( skipCUDAIfNoCudnn(fn) ) def skipLazy(fn): return skipLazyIf(True, "test doesn't work with lazy tensors")(fn) def skipMeta(fn): return skipMetaIf(True, "test doesn't work with meta tensors")(fn) def skipXLA(fn): return skipXLAIf(True, "Marked as skipped for XLA")(fn) def skipMPS(fn): return skipMPSIf(True, "test doesn't work on MPS backend")(fn) def skipHPU(fn): return skipHPUIf(True, "test doesn't work on HPU backend")(fn) def skipPRIVATEUSE1(fn): return skipPRIVATEUSE1If(True, "test doesn't work on privateuse1 backend")(fn) # TODO: the "all" in the name isn't true anymore for quite some time as we have also have for example XLA and MPS now. # This should probably enumerate all available device type test base classes. def get_all_device_types() -> list[str]: return ["cpu"] if not torch.cuda.is_available() else ["cpu", "cuda"] flex_attention_supported_platform = unittest.skipUnless( torch.cuda.is_available() and torch.utils._triton.has_triton() and torch.cuda.get_device_capability() >= (8, 0), "Requires CUDA and Triton", ) ```
========================================================================================================================================= SOURCE CODE FILE: common_dist_composable.py LINES: 1 SIZE: 3.60 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\common_dist_composable.py ENCODING: utf-8 ```py # mypy: ignore-errors # Owner(s): ["oncall: distributed"] import torch import torch.nn as nn class UnitModule(nn.Module): def __init__(self, device: torch.device): super().__init__() self.l1 = nn.Linear(100, 100, device=device) self.seq = nn.Sequential( nn.ReLU(), nn.Linear(100, 100, device=device), nn.ReLU(), ) self.l2 = nn.Linear(100, 100, device=device) def forward(self, x): return self.l2(self.seq(self.l1(x))) class CompositeModel(nn.Module): def __init__(self, device: torch.device): super().__init__() self.l1 = nn.Linear(100, 100, device=device) self.u1 = UnitModule(device) self.u2 = UnitModule(device) self.l2 = nn.Linear(100, 100, device=device) def forward(self, x): return self.l2(self.u2(self.u1(self.l1(x)))) class UnitParamModule(nn.Module): def __init__(self, device: torch.device): super().__init__() self.l = nn.Linear(100, 100, device=device) self.seq = nn.Sequential( nn.ReLU(), nn.Linear(100, 100, device=device), nn.ReLU(), ) self.p = nn.Parameter(torch.randn((100, 100), device=device)) def forward(self, x): return torch.mm(self.seq(self.l(x)), self.p) class CompositeParamModel(nn.Module): def __init__(self, device: torch.device): super().__init__() self.l = nn.Linear(100, 100, device=device) self.u1 = UnitModule(device) self.u2 = UnitModule(device) self.p = nn.Parameter(torch.randn((100, 100), device=device)) self.register_buffer( "buffer", torch.randn((100, 100), device=device), persistent=True ) def forward(self, x): a = self.u2(self.u1(self.l(x))) b = self.p return torch.mm(a, b) class FakeSequential(nn.Module): # Define this class to achieve a desired nested wrapping using the module # wrap policy with `nn.Sequential` def __init__(self, *modules: tuple[nn.Module, ...]) -> None: super().__init__() self._module_sequence = list(modules) def forward(self, x: torch.Tensor) -> torch.Tensor: for module in self._module_sequence: x = module(x) return x class NestedSequentialModel(nn.Module): def __init__(self, device: torch.device) -> None: super().__init__() # This nested structure exercises traversal order to catch differences # between valid traversals (e.g. BFS and DFS variations). self.seq1 = nn.Sequential( nn.Linear(1, 1, device=device), FakeSequential( nn.Linear(1, 1, device=device), nn.ReLU(), FakeSequential( nn.Linear(1, 1, device=device), ), nn.ReLU(), ), nn.Linear(1, 2, device=device), ) self.lin = nn.Linear(2, 2, device=device) self.seq2 = nn.Sequential( nn.ReLU(), nn.Linear(2, 3, device=device), FakeSequential( nn.Linear(3, 2, bias=False, device=device), nn.Linear(2, 4, bias=False, device=device), ), ) # FIXME(rec): forward() is not a method, it's a local function inside __init__ # that is never used. It should probabkly be outdented by four spaces, or removed. def forward(self, x: torch.Tensor) -> torch.Tensor: return self.seq2(self.lin(self.seq1(x))) ```
===================================================================================================================================== SOURCE CODE FILE: common_distributed.py LINES: 12 SIZE: 56.43 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\common_distributed.py ENCODING: utf-8 ```py # mypy: ignore-errors import abc import faulthandler import itertools import logging import multiprocessing import os import queue import subprocess import sys import tempfile import threading import time import traceback import types import unittest from contextlib import contextmanager from dataclasses import dataclass from datetime import timedelta from enum import Enum from functools import partial, reduce, wraps from io import StringIO from typing import NamedTuple, Optional, Union, Any, Callable from unittest.mock import patch from torch._logging._internal import trace_log import torch import torch._dynamo.test_case import torch.cuda.nccl import torch.distributed as c10d from torch._C._autograd import DeviceType from torch._C._distributed_c10d import _SymmetricMemory import torch.nn as nn from torch.testing._internal.common_utils import ( FILE_SCHEMA, find_free_port, IS_SANDCASTLE, retry_on_connect_failures, skip_but_pass_in_sandcastle, skip_but_pass_in_sandcastle_if, TEST_WITH_ROCM, TEST_WITH_TSAN, TestCase, run_tests, TEST_HPU, TEST_XPU, ) from torch.testing._internal.distributed.multi_threaded_pg import ( _install_threaded_pg, _uninstall_threaded_pg, ProcessLocalGroup, ) import operator logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) class TestSkip(NamedTuple): exit_code: int message: str TEST_SKIPS = { "backend_unavailable": TestSkip( 72, "Skipped because distributed backend is not available." ), "small_worldsize": TestSkip(73, "Skipped due to small world size."), "odd_worldsize": TestSkip(87, "Skipped due to odd world size."), "no_cuda": TestSkip(74, "CUDA is not available."), "multi-gpu-1": TestSkip(75, "Need at least 1 CUDA device"), "multi-gpu-2": TestSkip(77, "Need at least 2 CUDA devices"), "multi-gpu-3": TestSkip(80, "Need at least 3 CUDA devices"), "multi-gpu-4": TestSkip(81, "Need at least 4 CUDA devices"), "multi-gpu-5": TestSkip(82, "Need at least 5 CUDA devices"), "multi-gpu-6": TestSkip(83, "Need at least 6 CUDA devices"), "multi-gpu-7": TestSkip(84, "Need at least 7 CUDA devices"), "multi-gpu-8": TestSkip(85, "Need at least 8 CUDA devices"), "nccl": TestSkip(76, "c10d not compiled with NCCL support"), "skipIfRocm": TestSkip(78, "Test skipped for ROCm"), "no_peer_access": TestSkip(79, "Test skipped because no GPU peer access"), "generic": TestSkip( 86, "Test skipped at subprocess level, look at subprocess log for skip reason" ), "importerror": TestSkip(88, "Test skipped due to missing import"), "no_accelerator": TestSkip(89, "accelerator is not available."), } @dataclass class DistTestCases: # Backends that do not support a specific collective skip_collective = {} skip_collective["allgather_coalesced"] = {"nccl", "mpi", "ucc"} skip_collective["reduce"] = set() skip_collective["sendrecv anysource"] = {"nccl", "ucc"} skip_collective["cpu barrier"] = {"nccl", "ucc"} # Sets showing that something is implemented backend_feature = {} backend_feature["gpu"] = {"nccl", "gloo", "ucc"} backend_feature["cuda"] = {"nccl", "gloo", "ucc"} backend_feature["ddp"] = {"nccl", "gloo", "ucc"} backend_feature["subgroup"] = {"nccl", "gloo", "ucc"} backend_feature["plugin"] = set() if TEST_HPU: backend_feature["hpu"] = {"hccl"} if TEST_XPU: backend_feature["xpu"] = {"xccl"} def skip_if_no_gpu(func): """Skips if the world size exceeds the number of GPUs, ensuring that if the test is run, each rank has its own GPU via ``torch.cuda.device(rank)``.""" @wraps(func) def wrapper(*args, **kwargs): if not torch.cuda.is_available(): sys.exit(TEST_SKIPS["no_cuda"].exit_code) world_size = int(os.environ["WORLD_SIZE"]) if torch.cuda.device_count() < world_size: sys.exit(TEST_SKIPS[f"multi-gpu-{world_size}"].exit_code) if TEST_HPU and torch.hpu.device_count < world_size: sys.exit(TEST_SKIPS[f"multi-gpu-{world_size}"].exit_code) if TEST_XPU and torch.xpu.device_count < world_size: sys.exit(TEST_SKIPS[f"multi-xpu-{world_size}"].exit_code) return func(*args, **kwargs) return wrapper # TODO (kwen2501): what is the purpose of this decorator? Tests with this # decorator were always skipped. So they may be outdated already. # Oct 2024: bumping the small-world criteria to < 8, as we are increasing the # number of GPUs in CI from 2 to 4, and we need to continue skipping those tests # to keep CI green. But this is just a temporary solution. We should clean up # those tests somehow. def skip_if_small_worldsize(func): @wraps(func) def wrapper(*args, **kwargs): if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) < 8: sys.exit(TEST_SKIPS["small_worldsize"].exit_code) return func(*args, **kwargs) return wrapper def skip_if_odd_worldsize(func): @wraps(func) def wrapper(*args, **kwargs): if (os.environ["BACKEND"] != "mpi") and int(os.environ["WORLD_SIZE"]) % 2 == 1: sys.exit(TEST_SKIPS["odd_worldsize"].exit_code) return func(*args, **kwargs) return wrapper def require_n_gpus_for_nccl_backend(n, backend): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): if backend == "nccl" and torch.cuda.device_count() < n: sys.exit(TEST_SKIPS[f"multi-gpu-{n}"].exit_code) else: return func(*args, **kwargs) return wrapper return decorator def import_transformers_or_skip(): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): try: from transformers import ( # noqa: F401 AutoModelForMaskedLM, BertConfig, ) return func(*args, **kwargs) except ImportError: sys.exit(TEST_SKIPS["importerror"].exit_code) return wrapper return decorator def at_least_x_gpu(x): return torch.cuda.is_available() and torch.cuda.device_count() >= x def skip_if_lt_x_gpu(x): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): if torch.cuda.is_available() and torch.cuda.device_count() >= x: return func(*args, **kwargs) if TEST_HPU and torch.hpu.device_count() >= x: return func(*args, **kwargs) if TEST_XPU and torch.xpu.device_count() >= x: return func(*args, **kwargs) sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code) return wrapper return decorator # This decorator helps avoiding initializing cuda while testing other backends def nccl_skip_if_lt_x_gpu(backend, x): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): if backend != "nccl": return func(*args, **kwargs) if torch.cuda.is_available() and torch.cuda.device_count() >= x: return func(*args, **kwargs) sys.exit(TEST_SKIPS[f"multi-gpu-{x}"].exit_code) return wrapper return decorator def verify_ddp_error_logged(model_DDP, err_substr): # Verify error was logged in ddp_logging_data. ddp_logging_data = model_DDP._get_ddp_logging_data() assert "iteration" in ddp_logging_data assert "has_error" in ddp_logging_data assert "error" in ddp_logging_data logging_err = ddp_logging_data["error"] # Remove C++ stacktrace if needed. actual = ( err_substr if err_substr.find("\nException raised from ") == -1 else err_substr.split("\nException raised from ")[0] ) assert ( actual in logging_err ), f"Did not find expected {actual} in ddp logging data error: {logging_err}" def with_nccl_blocking_wait(func): """ Convenience decorator to set/unset TORCH_NCCL_BLOCKING_WAIT flag. Note that use of this decorator will override the setting of TORCH_NCCL_ASYNC_ERROR_HANDLING for the particular test. After the test, both TORCH_NCCL_BLOCKING_WAIT and TORCH_NCCL_ASYNC_ERROR_HANDLING will be restored to their original values. """ @wraps(func) def wrapper(*args, **kwargs): # Save and unset TORCH_NCCL_ASYNC_ERROR_HANDLING try: cached_nccl_async_error_handling: Union[str, None] = os.environ[ "TORCH_NCCL_ASYNC_ERROR_HANDLING" ] del os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] except KeyError: # TORCH_NCCL_ASYNC_ERROR_HANDLING was unset cached_nccl_async_error_handling = None # Save val of TORCH_NCCL_BLOCKING_WAIT and set it. try: cached_nccl_blocking_wait: Union[str, None] = os.environ[ "TORCH_NCCL_BLOCKING_WAIT" ] except KeyError: cached_nccl_blocking_wait = None finally: os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" try: ret = func(*args, **kwargs) return ret finally: # restore old values. if cached_nccl_async_error_handling is not None: os.environ[ "TORCH_NCCL_ASYNC_ERROR_HANDLING" ] = cached_nccl_async_error_handling if cached_nccl_blocking_wait is not None: os.environ["TORCH_NCCL_BLOCKING_WAIT"] = cached_nccl_blocking_wait return wrapper def with_dist_debug_levels(levels): """ Runs a test for each distributed debug level specified in levels. """ def decorator(func): @wraps(func) def wrapper(*args, **kwargs): old_level = os.environ.get("TORCH_DISTRIBUTED_DEBUG", None) for level in levels: os.environ["TORCH_DISTRIBUTED_DEBUG"] = level c10d.set_debug_level_from_env() ret = func(*args, **kwargs) c10d.barrier() if old_level is not None: os.environ["TORCH_DISTRIBUTED_DEBUG"] = old_level # Only returns test return for last test, but since these are # unittests the return value is not really used and earlier tests # would've raised had they failed. return ret return wrapper return decorator def requires_gloo(): return skip_but_pass_in_sandcastle_if( not c10d.is_gloo_available(), "c10d was not compiled with the Gloo backend", ) def requires_nccl_version(version, msg): if not c10d.is_nccl_available(): return skip_but_pass_in_sandcastle( "c10d was not compiled with the NCCL backend", ) else: return skip_but_pass_in_sandcastle_if( torch.cuda.nccl.version() < version, f"Requires NCCL version greater than or equal to: {version}, found: {torch.cuda.nccl.version()}, reason: {msg}", ) def requires_nccl(): return skip_but_pass_in_sandcastle_if( not c10d.is_nccl_available(), "c10d was not compiled with the NCCL backend", ) def requires_ucc(): return skip_but_pass_in_sandcastle_if( not c10d.is_ucc_available(), "c10d was not compiled with the UCC backend", ) def requires_mpi(): return skip_but_pass_in_sandcastle_if( not c10d.is_mpi_available(), "c10d was not compiled with the MPI backend", ) def requires_multicast_support(): has_multicast_support = ( torch.cuda.is_available() and _SymmetricMemory.has_multicast_support(DeviceType.CUDA, 0) ) return skip_but_pass_in_sandcastle_if( not has_multicast_support, "multicast support is not available", ) def skip_if_rocm_multiprocess(func): """Skips a test for ROCm""" func.skip_if_rocm_multiprocess = True @wraps(func) def wrapper(*args, **kwargs): if not TEST_WITH_ROCM: return func(*args, **kwargs) sys.exit(TEST_SKIPS["skipIfRocm"].exit_code) return wrapper def skip_if_win32(): return skip_but_pass_in_sandcastle_if( sys.platform == "win32", "This unit test case is not supported on Windows platform", ) def sm_is_or_higher_than(device: torch.device, major: int, minor: int) -> bool: """ Returns True if the device's compute capability is (major, minor) or higher. Error out if the device is not a CUDA device. Returns False if device is a RoCM device. """ if device.type != "cuda": raise ValueError("sm_is_or_later() is only supported for CUDA devices") if torch.version.hip is not None: # ROCm devices may have different compute capability codes return False return torch.cuda.get_device_capability(device) >= (major, minor) @retry_on_connect_failures def create_tcp_store( addr="localhost", world_size=1, is_master=True, timeout=timedelta(minutes=5), wait_for_workers=True, jit_class=False, use_libuv=True, ): """ Creates a TCP store. Retries if the chosen port is already in use. """ port = find_free_port() if jit_class: timeout_millisecond = int(timeout / timedelta(milliseconds=1)) return torch.classes.dist_c10d.TCPStore( addr, port, world_size, is_master, timeout_millisecond ) else: return c10d.TCPStore( addr, port, world_size, is_master, wait_for_workers=wait_for_workers, use_libuv=use_libuv ) if TEST_WITH_TSAN: # TSAN runs much slower. TIMEOUT_DEFAULT = 500 else: TIMEOUT_DEFAULT = int(os.getenv('DISTRIBUTED_TESTS_DEFAULT_TIMEOUT', '300')) TIMEOUT_OVERRIDE = {"test_ddp_uneven_inputs": 400} # https://github.com/pytorch/pytorch/issues/75665 if TEST_WITH_ROCM: TIMEOUT_OVERRIDE["test_join_kwargs"] = 200 def create_device(interface=None): if sys.platform == "win32" or interface is None: return c10d.ProcessGroupGloo.create_device(hostname="127.0.0.1") else: return c10d.ProcessGroupGloo.create_device(interface=interface) def get_timeout(test_id) -> int: return TIMEOUT_OVERRIDE.get(test_id.split(".")[-1], TIMEOUT_DEFAULT) @contextmanager def captured_output(): new_out, new_err = StringIO(), StringIO() old_out, old_err = sys.stdout, sys.stderr try: sys.stdout, sys.stderr = new_out, new_err yield sys.stdout, sys.stderr finally: sys.stdout, sys.stderr = old_out, old_err def simple_sparse_reduce_tests(rank: int, world_size: int, num_inputs: int = 1): """ Generate a number of basic test cases for sparse reduction. These cover tensors with a varying number of sparse dimensions and a varying number of dense dimensions. The only reduction operation we support is sum. """ def generate(rank: int, world_size: int, sparse_dims: int = 1, dense_dims: int = 0): # First sparse dimension is [0..rank]. # Subsequent dimensions are always 0, so we know there is # a non-empty intersection between any two sparse tensors. indices = torch.reshape(torch.arange(rank + 1), (1, rank + 1)) shape = [world_size] + [2 for _ in range(dense_dims)] for _ in range(sparse_dims - 1): indices = torch.cat((indices, torch.zeros(1, rank + 1))) shape.append(world_size) values = torch.ones([rank + 1] + [2 for _ in range(dense_dims)]) return torch.sparse_coo_tensor(indices, values, shape) def compute_sum(fn, world_size: int): return reduce( operator.add, [fn(rank, world_size) for rank in range(world_size)] ) return [ ( [ fn(num_inputs * rank + i, num_inputs * world_size) for i in range(num_inputs) ], [compute_sum(fn, num_inputs * world_size) for i in range(num_inputs)], ) for fn in [ partial(generate, sparse_dims=1), partial(generate, sparse_dims=2), partial(generate, sparse_dims=3), partial(generate, dense_dims=1), partial(generate, dense_dims=2), partial(generate, dense_dims=3), ] ] # HELPER FOR MULTIGPU TESTS def init_multigpu_helper(world_size: int, backend: str): """Multigpu tests are designed to simulate the multi nodes with multi GPUs on each node. Nccl backend requires equal #GPUs in each process. On a single node, all visible GPUs are evenly divided to subsets, each process only uses a subset. """ nGPUs = torch.cuda.device_count() if TEST_HPU: nGPUs = torch.hpu.device_count() if TEST_XPU: nGPUs = torch.xpu.device_count() visible_devices = range(nGPUs) # If rank is less than or equal to number of available GPU's # then each rank can be mapped to corresponding GPU. nGPUs_per_process = 1 if world_size > nGPUs: nGPUs_per_process = nGPUs // world_size rank_to_GPU = { i: list(visible_devices[i * nGPUs_per_process : (i + 1) * nGPUs_per_process]) for i in range(world_size) } return rank_to_GPU tmp_dir: Optional[tempfile.TemporaryDirectory] = None def initialize_temp_directories(init_method: Optional[str] = None) -> None: global tmp_dir tmp_dir = tempfile.TemporaryDirectory() os.environ["TEMP_DIR"] = tmp_dir.name os.mkdir(os.path.join(tmp_dir.name, "barrier")) os.mkdir(os.path.join(tmp_dir.name, "test_dir")) init_dir_path = os.path.join(tmp_dir.name, "init_dir") os.mkdir(init_dir_path) # Set init method if specified. if init_method is not None: os.environ["INIT_METHOD"] = init_method else: os.environ["INIT_METHOD"] = FILE_SCHEMA + os.path.join( init_dir_path, "shared_init_file" ) def cleanup_temp_dir() -> None: if tmp_dir is not None: tmp_dir.cleanup() # Most tests operate with this worldsize DEFAULT_WORLD_SIZE = 4 # [How does MultiProcessTestCase work?] # Each MultiProcessTestCase instance uses 1 + `world_size()` processes, by # default `world_size()` returns 4. Let's take `test_rpc_spawn.py` as an # example which inherits from this class. Its `Setup()` methods calls into # `MultiProcessTestCase._spawn_processes()` which spawns `world_size()` # subprocesses. During the spawn, the main process passes the test name to # subprocesses, and the name is acquired from self.id(). The subprocesses # then use the provided test function name to retrieve the function attribute # from the test instance and run it. The main process simply waits for all # subprocesses to join. class MultiProcessTestCase(TestCase): MAIN_PROCESS_RANK = -1 # This exit code is used to indicate that the test code had an error and # exited abnormally. There are certain tests that might use sys.exit() to # simulate failures and in those cases, we can't have an exit code of 0, # but we still want to ensure we didn't run into any other errors. TEST_ERROR_EXIT_CODE = 10 # do not early terminate for distributed tests. def _should_stop_test_suite(self) -> bool: return False # Many test cases init a process group but do not destroy it. This property # determines whether this base test class should call # `destroy_process_group` on behalf of the test. Its value is customizable # by derived TestCase's but it is a pan-TestCase value (cannot be customized # for each test). @property def destroy_pg_upon_exit(self) -> bool: return True @property def world_size(self) -> int: return DEFAULT_WORLD_SIZE def join_or_run(self, fn): @wraps(fn) def wrapper(self): if self.rank == self.MAIN_PROCESS_RANK: self._join_processes(fn) else: fn() return types.MethodType(wrapper, self) # The main process spawns N subprocesses that run the test. # Constructor patches current instance test method to # assume the role of the main process and join its subprocesses, # or run the underlying test function. def __init__(self, method_name: str = "runTest", methodName: str = "runTest") -> None: # methodName is the correct naming in unittest and testslide uses keyword arguments. # So we need to use both to 1) not break BC and, 2) support testslide. if methodName != "runTest": method_name = methodName super().__init__(method_name) try: fn = getattr(self, method_name) setattr(self, method_name, self.join_or_run(fn)) except AttributeError as e: if methodName != 'runTest': # we allow instantiation with no explicit method name # but not an *incorrect* or missing method name raise ValueError(f"no such test method in {self.__class__}: {methodName}") from e def setUp(self) -> None: super().setUp() self.skip_return_code_checks = [] # type: ignore[var-annotated] self.processes = [] # type: ignore[var-annotated] self.rank = self.MAIN_PROCESS_RANK self.file_name = tempfile.NamedTemporaryFile(delete=False).name # pid to pipe consisting of error message from process. self.pid_to_pipe = {} # type: ignore[var-annotated] def tearDown(self) -> None: super().tearDown() for p in self.processes: p.terminate() # Each Process instance holds a few open file descriptors. The unittest # runner creates a new TestCase instance for each test method and keeps # it alive until the end of the entire suite. We must thus reset the # processes to prevent an effective file descriptor leak. self.processes = [] def _current_test_name(self) -> str: # self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank' return self.id().split(".")[-1] def _start_processes(self, proc) -> None: self.processes = [] for rank in range(int(self.world_size)): parent_conn, child_conn = torch.multiprocessing.Pipe() process = proc( target=self.__class__._run, name="process " + str(rank), args=(rank, self._current_test_name(), self.file_name, child_conn), kwargs={ "fake_pg": getattr(self, "fake_pg", False), } ) process.start() logger.info("Started process %s with pid %s", rank, process.pid) self.pid_to_pipe[process.pid] = parent_conn self.processes.append(process) def _spawn_processes(self) -> None: proc = torch.multiprocessing.get_context("spawn").Process self._start_processes(proc) class Event(Enum): GET_TRACEBACK = 1 @staticmethod def _event_listener(parent_pipe, signal_pipe, rank: int): logger.info("Starting event listener thread for rank %s", rank) while True: ready_pipes = multiprocessing.connection.wait([parent_pipe, signal_pipe]) if parent_pipe in ready_pipes: if parent_pipe.closed: logger.info( "Pipe closed for process %s, stopping event listener thread", rank ) return event = parent_pipe.recv() logger.info("Received event %s on process %s", event, rank) if event == MultiProcessTestCase.Event.GET_TRACEBACK: # Return traceback to the parent process. with tempfile.NamedTemporaryFile(mode="r+") as tmp_file: faulthandler.dump_traceback(tmp_file) # Flush buffers and seek to read from the beginning tmp_file.flush() tmp_file.seek(0) parent_pipe.send(tmp_file.read()) logger.info("Process %s sent traceback", rank) if signal_pipe in ready_pipes: return @classmethod def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe, **kwargs) -> None: self = cls(test_name) self.rank = rank self.file_name = file_name self.run_test(test_name, parent_pipe) def run_test(self, test_name: str, parent_pipe) -> None: # Start event listener thread. signal_recv_pipe, signal_send_pipe = torch.multiprocessing.Pipe(duplex=False) event_listener_thread = threading.Thread( target=MultiProcessTestCase._event_listener, args=(parent_pipe, signal_recv_pipe, self.rank), daemon=True, ) event_listener_thread.start() if sys.platform != "win32" and sys.platform != "darwin": # Register signal handler to dump stack traces on FATALs. # Windows and MacOS do not support the signal handlers. torch._C._set_print_stack_traces_on_fatal_signal(True) # Show full C++ stacktraces when a Python error originating from C++ is raised. os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1" # self.id() == e.g. '__main__.TestDistributed.test_get_rank' # We're retrieving a corresponding test and executing it. try: getattr(self, test_name)() except unittest.SkipTest as se: logger.info( "Process %s skipping test %s for following reason: %s", self.rank, test_name, str(se) ) sys.exit(TEST_SKIPS["generic"].exit_code) except Exception: logger.error( "Caught exception: \n%s exiting " "process %s with exit code: %s", traceback.format_exc(), self.rank, MultiProcessTestCase.TEST_ERROR_EXIT_CODE ) # Send error to parent process. parent_pipe.send(traceback.format_exc()) sys.exit(MultiProcessTestCase.TEST_ERROR_EXIT_CODE) finally: if signal_send_pipe is not None: signal_send_pipe.send(None) assert event_listener_thread is not None event_listener_thread.join() # Close pipe after done with test. parent_pipe.close() if self.destroy_pg_upon_exit: try: # Some tests do destroy the pgs, and destroy can't be called twice. # This avoids spewing warnings about improperly shutting down. c10d.destroy_process_group() except (AssertionError, ValueError): pass def _get_timedout_process_traceback(self) -> None: pipes = [] for i, process in enumerate(self.processes): if process.exitcode is None: pipe = self.pid_to_pipe[process.pid] try: pipe.send(MultiProcessTestCase.Event.GET_TRACEBACK) pipes.append((i, pipe)) except ConnectionError as e: logger.error( "Encountered error while trying to get traceback for process %s: %s", i, e ) # Wait for results. for rank, pipe in pipes: try: # Wait for traceback if pipe.poll(5): if pipe.closed: logger.info( "Pipe closed for process %s, cannot retrieve traceback", rank ) continue traceback = pipe.recv() logger.error( "Process %s timed out with traceback: \n\n%s", rank, traceback ) else: logger.error( "Could not retrieve traceback for timed out process: %s", rank ) except ConnectionError as e: logger.error( "Encountered error while trying to get traceback for process %s: %s", rank, e ) def _join_processes(self, fn) -> None: timeout = get_timeout(self.id()) start_time = time.time() subprocess_error = False try: while True: # check to see if any subprocess exited with an error early. for (i, p) in enumerate(self.processes): # This is the exit code processes exit with if they # encountered an exception. if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE: print( f"Process {i} terminated with exit code {p.exitcode}, terminating remaining processes." ) active_children = torch.multiprocessing.active_children() for ac in active_children: ac.terminate() subprocess_error = True break if subprocess_error: break # All processes have joined cleanly if they all a valid exitcode if all(p.exitcode is not None for p in self.processes): break # Check if we should time out the test. If so, we terminate each process. elapsed = time.time() - start_time if elapsed > timeout: self._get_timedout_process_traceback() print( f"Timing out after {timeout} seconds and killing subprocesses." ) for p in self.processes: p.terminate() break # Sleep to avoid excessive busy polling. time.sleep(0.1) elapsed_time = time.time() - start_time if fn in self.skip_return_code_checks: self._check_no_test_errors(elapsed_time) else: self._check_return_codes(elapsed_time) finally: # Close all pipes for pipe in self.pid_to_pipe.values(): pipe.close() def _check_no_test_errors(self, elapsed_time) -> None: """ Checks that we didn't have any errors thrown in the child processes. """ for i, p in enumerate(self.processes): if p.exitcode is None: raise RuntimeError( f"Process {i} timed out after {elapsed_time} seconds" ) self.assertNotEqual(self.TEST_ERROR_EXIT_CODE, p.exitcode) def _check_return_codes(self, elapsed_time) -> None: """ Checks that the return codes of all spawned processes match, and skips tests if they returned a return code indicating a skipping condition. """ # If no processes are spawned, there is nothing to check. if not self.processes: logger.warning("Note: no subprocesses were spawned, test was likely skipped.") return first_process = self.processes[0] # first, we check if there are errors in actual processes # (via TEST_ERROR_EXIT CODE), and raise an exception for those. # the reason we do this is to attempt to raise a more helpful error # message than "Process x terminated/timed out" # TODO: we should pipe the exception of the failed subprocess here. # Currently, the actual exception is displayed as a logging output. errored_processes = [ (i, p) for i, p in enumerate(self.processes) if p.exitcode == MultiProcessTestCase.TEST_ERROR_EXIT_CODE ] if errored_processes: error = "" for i, process in errored_processes: # Get error from pipe. error_message = self.pid_to_pipe[process.pid].recv() error += ( f"Process {i} exited with error code {MultiProcessTestCase.TEST_ERROR_EXIT_CODE} " f"and exception:\n{error_message}\n" ) raise RuntimeError(error) # If no process exited uncleanly, we check for timeouts, and then ensure # each process exited cleanly. for i, p in enumerate(self.processes): if p.exitcode is None: raise RuntimeError( f"Process {i} terminated or timed out after {elapsed_time} seconds" ) self.assertEqual( p.exitcode, first_process.exitcode, msg=f"Expect process {i} exit code to match Process 0 exit code of {first_process.exitcode}, but got {p.exitcode}", ) for skip in TEST_SKIPS.values(): if first_process.exitcode == skip.exit_code: if IS_SANDCASTLE: # Don't use unittest.skip to skip the test on sandcastle # since it creates tasks for skipped tests assuming there # is some follow-up needed. Instead just "pass" the test # with an appropriate message. logger.info( "Skipping %s on sandcastle for the following reason: %s", self.id(), skip.message ) return else: raise unittest.SkipTest(skip.message) self.assertEqual( first_process.exitcode, 0, msg=f"Expected zero exit code but got {first_process.exitcode} for pid: {first_process.pid}", ) @property def is_master(self) -> bool: return self.rank == 0 # Utility base class for distributed Multi Process Test cases # This abstracts the PG creation and deletion, the backends are selected based # on device type. The tests functions can be instantiated per device type using # common_device_type.instantiate_device_type_tests # other backends can add entry in backend() function class DistributedTestBase(MultiProcessTestCase): def setUp(self): super().setUp() self._spawn_processes() def tearDown(self): try: os.remove(self.file_name) except OSError: pass def backend(self, device) -> str: if "cuda" in device: return "nccl" elif "hpu" in device : # intel gaudi return "hccl" elif "xpu" in device: return "xccl" else : return "gloo" def create_pg(self, device): num_visible_devices = torch.get_device_module(device).device_count() store = torch.distributed.FileStore(self.file_name, num_visible_devices) torch.distributed.init_process_group( backend=self.backend(device), world_size=self.world_size, rank=self.rank, store=store ) if "nccl" in self.backend(device) or "xccl" in self.backend(device): torch.accelerator.set_device_index(self.rank) return torch.distributed.distributed_c10d._get_default_group() def rank_to_device(self, device): num_visible_devices = torch.get_device_module(device).device_count() return {i: [i % num_visible_devices] for i in range(self.world_size)} def run_subtests( cls_inst, subtest_config: dict[str, list[Any]], test_fn: Callable, *test_args, **test_kwargs: Any, ): """ Runs a test function given by ``test_fn`` as a subtest according to the configurations specified by ``subtest_config``. This amortizes the costly setup overhead (including process spawn and initializing the process group) over the subtests. Args: subtest_config (Dict[str, List[Any]]): A mapping from subtest keyword argument name to a list of its possible values. test_fn (Callable): A callable that runs the actual test. test_args: Positional arguments to pass to ``test_fn``. test_kwargs: Keyword arguments to pass to ``test_fn``. """ # Convert the config mapping to a list to have a fixed order subtest_config_items: list[tuple[str, list[Any]]] = list(subtest_config.items()) subtest_config_keys: list[str] = [item[0] for item in subtest_config_items] subtest_config_values: list[list[Any]] = [item[1] for item in subtest_config_items] for values in itertools.product(*subtest_config_values): # Map keyword to chosen value subtest_kwargs = dict(zip(subtest_config_keys, values)) with cls_inst.subTest(**subtest_kwargs): torch._dynamo.reset() test_fn(*test_args, **test_kwargs, **subtest_kwargs) torch._dynamo.reset() c10d.barrier() # Cannot use functools.cache as it requires python 3.9 EFA_PROBE_RESULT = None def has_efa() -> bool: """ If shell command `fi_info -p efa -t FI_EP_RDM` returns exit code 0 then we assume that the machine has Libfabric EFA interfaces and EFA software components installed, see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa-start.html. """ global EFA_PROBE_RESULT if EFA_PROBE_RESULT is not None: return EFA_PROBE_RESULT try: EFA_PROBE_RESULT = ( subprocess.run(["fi_info", "-p", "efa", "-t", "FI_EP_RDM"], check=False).returncode == 0 ) except FileNotFoundError: EFA_PROBE_RESULT = False return EFA_PROBE_RESULT def tp_transports(): """ If the machine has Libfabric EFA interfaces and EFA software components installed it may cause 'RuntimeError: In operator() at tensorpipe/common/ibv.h:172 "": Operation not supported' if tensorpipe uses InfiniBand transport, so we exclude it from tensorpipe transports, see https://github.com/pytorch/pytorch/issues/73885 and https://github.com/pytorch/pytorch/issues/65022 """ return ["shm", "uv"] if has_efa() else None def spawn_threads_and_init_comms( func=None, timeout=TIMEOUT_DEFAULT, world_size=DEFAULT_WORLD_SIZE ): """ Wrapper to use with a test method """ if func is None: return partial( spawn_threads_and_init_comms, timeout=timeout, world_size=world_size ) def _run_test_method_with_multi_threads(world_size, callback): world = _install_threaded_pg() global_store = c10d.HashStore() def world_is_valid(): return world == c10d.distributed_c10d._world def worker(rank, world_pg, store): c10d.init_process_group( backend="threaded", rank=rank, world_size=world_size, store=store ) try: callback() except BaseException as ex: # Exceptions are handled in MultiThreadedTestCase MultiThreadedTestCase.exception_queue.put((rank, sys.exc_info())) ProcessLocalGroup.exception_handle(ex) # trigger _terminate event and awaken worker threads finally: if world_is_valid(): c10d.destroy_process_group() threads = [] for rank in range(world_size): t = threading.Thread(target=worker, args=(rank, world, global_store)) t.start() threads.append(t) return threads @wraps(func) def wrapper(self, *args, **kwargs): # TODO: get test name from kwargs torch._C._distributed_c10d._set_thread_isolation_mode(True) try: threads = _run_test_method_with_multi_threads(world_size, lambda: func(self, *args, **kwargs)) # join and error handling MultiThreadedTestCase._join_threads(threads, func) finally: torch._C._distributed_c10d._set_thread_isolation_mode(False) return wrapper class MultiThreadedTestCase(TestCase): """ Test runner that runs all tests with the in-proc process group using multiple threads with the threaded process group. Each test spawns world_size threads and run the test method in each thread. Difference from regular MultiProcess test runner: Must explicitly defines SetUp and call self._spawn_threads() to run the tests. Cannot use setUp / tearDown (must use perThreadSetup / perThreadShutdown) to set up / tear down each thread when running each test. No global state possible How bad of a limitation is this? """ exception_queue = queue.Queue() MAIN_THREAD_RANK = -1 def join_or_run(self, fn): @wraps(fn) def wrapper(self): if self.rank == self.MAIN_THREAD_RANK: self._join_threads(self.threads, fn) else: fn() return types.MethodType(wrapper, self) def __init__(self, method_name: str = "runTest", methodName: str = "runTest") -> None: # methodName is the correct naming in unittest and testslide uses keyword arguments. # So we need to use both to 1) not break BC and, 2) support testslide. if methodName != "runTest": method_name = methodName super().__init__(method_name) try: fn = getattr(self, method_name) setattr(self, method_name, self.join_or_run(fn)) except AttributeError as e: if methodName != 'runTest': # we allow instantiation with no explicit method name # but not an *incorrect* or missing method name raise ValueError(f"no such test method in {self.__class__}: {methodName}") from e def perThreadSetUp(self): # super().setUp() # TestCase.setUp() calls torch.manual_seed() pass def perThreadTearDown(self): pass def setUp(self) -> None: """ setUp only set up things in the main thread, if you want to configure things in the spawned threads, use perThreadSetUp """ super().setUp() self.rank = self.MAIN_THREAD_RANK self.threads = [] # Show full C++ stacktraces when a Python error originating from C++ is raised. os.environ["TORCH_SHOW_CPP_STACKTRACES"] = "1" def tearDown(self): """ tearDown only set up things in the main thread, if you want to configure things in the spawned threads, use perThreadTearDown """ super().tearDown() self.threads = [] def _spawn_threads(self): """ class method to spawn threads and run test, use this method in the SetUp of your TestCase """ torch._C._distributed_c10d._set_thread_isolation_mode(True) test_name = self._current_test_name # for each test case, we need to create thread local world, and a global store world = _install_threaded_pg() self.__class__.global_store = c10d.HashStore() def world_is_valid(): return world == c10d.distributed_c10d._world if not world_is_valid(): raise RuntimeError("Invalid world") for rank in range(self.world_size): t = threading.Thread(target=self.__class__._run, args=(test_name, rank, self.world_size)) t.start() self.threads.append(t) @classmethod def _run(cls, test_name, rank, world_size, **kwargs): self = cls(test_name) self.rank = rank # precision/rel_tol is a thread-local setting since it may be overridden per test, need to make # every thread have the same value. This would be relevant when we use op db tests, where it # needs those states to be set i.e. using instantiate_device_type_tests() # TODO: figure out a better way to do this if hasattr(self, "_tls"): self._tls = threading.local() self._tls.precision = TestCase._precision self._tls.rel_tol = TestCase._rel_tol self.run_test_with_threaded_pg(test_name, rank, world_size) def run_test_with_threaded_pg(self, test_name, rank, world_size): """ Run the current test associated with `test_name` using the threaded process group. """ c10d.init_process_group( backend="threaded", rank=rank, world_size=world_size, store=self.__class__.global_store ) self.perThreadSetUp() try: getattr(self, test_name)() except BaseException as ex: self.exception_queue.put((rank, sys.exc_info())) ProcessLocalGroup.exception_handle(ex) # trigger _terminate event and awaken worker threads finally: c10d.destroy_process_group() self.perThreadTearDown() @classmethod def _join_threads(cls, threads, fn): timeout = TIMEOUT_DEFAULT try: for idx, thread in enumerate(threads): thread.join(max(0, timeout)) if thread.is_alive(): MultiThreadedTestCase.exception_queue.put( ( idx, ( TimeoutError, TimeoutError( f"Rank failed to join in under {timeout} seconds" ), None, ), ) ) ProcessLocalGroup.reset() failed_ranks = [] while not cls.exception_queue.empty(): failure = cls.exception_queue.get() failed_ranks.append(failure) finally: _uninstall_threaded_pg() torch._C._distributed_c10d._set_thread_isolation_mode(False) cls._check_return_codes(failed_ranks, timeout, fn) @classmethod def _check_return_codes(cls, failed_ranks, timeout, fn): # Print based on exceptions raised from threads # SkipTest: print info for each thread # TimeoutError: raise RuntimeError for any timed out thread # Normal Exception: print error for each thread that raises exception # and raise a RuntimeError error_msg = "" skip_code = -1 for rank, exc_info in failed_ranks: exc = exc_info[1] if isinstance(exc, unittest.SkipTest): logger.info( "Thread %s skipping test %s for following reason: %s", rank, fn, str(exc) ) if skip_code < 0: skip_code = TEST_SKIPS["generic"].exit_code elif isinstance(exc, TimeoutError): msg = f"Thread {rank} terminated or timed out after {timeout} seconds\n" logger.error(msg) raise RuntimeError(msg) elif isinstance(exc, Exception): msg = "".join(traceback.format_exception(*exc_info)) logger.error( "Caught exception: \n%s exiting thread %s", msg, rank ) error_msg += ( f"Thread {rank} exited with exception:\n{msg}\n" ) elif isinstance(exc, SystemExit): if type(exc.code) == int and skip_code < 0: skip_code = exc.code # check exceptions if len(error_msg) > 0: raise RuntimeError(error_msg) # check skip if skip_code > 0: for skip in TEST_SKIPS.values(): if skip_code == skip.exit_code: if IS_SANDCASTLE: # "pass" the test with an appropriate message. logger.info( "Skipping %s on sandcastle for the following reason: %s", fn, skip.message ) return else: raise unittest.SkipTest(skip.message) @property def world_size(self) -> int: return DEFAULT_WORLD_SIZE @property def _current_test_name(self) -> str: # self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank' return self.id().split(".")[-1] def assertEqualOnRank(self, x, y, msg=None, *, rank=0): """ The reason why we have this util function instead of self.assertEqual is all threads are sharing one CPU RNG so the assertion result is only reliable on rank 0 """ if self.rank == rank: self.assertEqual(x, y, msg) def assertNotEqualOnRank(self, x, y, msg=None, *, rank=0): if self.rank == rank: self.assertNotEqual(x, y) class SaveForwardInputsModule(nn.Module): def __init__( self, forward_inputs: dict[nn.Module, torch.Tensor], cast_forward_inputs: bool, ) -> None: super().__init__() self.l = nn.Linear(100, 100) self.forward_inputs = forward_inputs self.cast_forward_inputs = cast_forward_inputs def forward(self, x: torch.Tensor) -> torch.Tensor: self.forward_inputs[self] = x return self.l(x.to(self.l.weight.dtype) if self.cast_forward_inputs else x) class SaveForwardInputsModel(nn.Module): def __init__( self, forward_inputs: dict[nn.Module, torch.Tensor], cast_forward_inputs: bool, ) -> None: super().__init__() self.c1 = SaveForwardInputsModule(forward_inputs, cast_forward_inputs) self.c2 = SaveForwardInputsModule(forward_inputs, cast_forward_inputs) self.forward_inputs = forward_inputs def forward(self, x: torch.Tensor) -> torch.Tensor: self.forward_inputs[self] = x return self.c2(self.c1(x)) @contextmanager def _dynamo_dist_per_rank_init(rank, world_size, init_pg=True, fake_pg=False): # To avoid multiple inheritance from _dynamo.test_case.TestCase and MultiProcessTestCase, # Just manually implement the most important part of the dynamo behavior to reset/clear. if not fake_pg: torch.accelerator.set_device_index(rank) os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '6789' if init_pg: if fake_pg: store = torch.testing._internal.distributed.fake_pg.FakeStore() c10d.init_process_group( backend="fake", world_size=world_size, rank=rank, store=store, ) else: c10d.init_process_group("nccl", rank=rank, world_size=world_size) torch._dynamo.reset() torch._dynamo.utils.counters.clear() try: yield finally: torch._dynamo.reset() torch._dynamo.utils.counters.clear() if init_pg: c10d.destroy_process_group() class DynamoDistributedSingleProcTestCase(torch._dynamo.test_case.TestCase): """ Test harness for single-process dynamo distributed tests, initializes dist process group. Prefer this for simple tests, as it's easier to debug. """ @classmethod def setUpClass(cls): super().setUpClass() # _exit_stack is set up in TestCase cls._exit_stack.enter_context( patch.dict( os.environ, { "MASTER_ADDR": "localhost", "MASTER_PORT": "12355", }, ) ) cls.rank = 0 cls.device = f"cuda:{cls.rank}" cls.device_ids = None if "cuda" in cls.device else [cls.rank] c10d.init_process_group("nccl", rank=cls.rank, world_size=1) @classmethod def tearDownClass(cls): c10d.destroy_process_group() super().tearDownClass() class DynamoDistributedMultiProcTestCase(MultiProcessTestCase): """ Use this for tests that actually run on multiple GPUs. Decorate tests with @skip_if_lt_x_gpu(ngpu) Note: MultiProcTestCase spawns processes per test and is slow. Prefer MultiThreadedTestCase for most tests. Perhaps use this one sparingly for integration tests. """ def setUp(self): super().setUp() self._spawn_processes() def tearDown(self): super().tearDown() try: os.remove(self.file_name) except OSError: pass @property def world_size(self) -> int: return torch.cuda.device_count() @classmethod def _run(cls, rank: int, test_name: str, file_name: str, parent_pipe, **kwargs) -> None: trace_log.addHandler(logging.NullHandler()) # The rest is copypasta from MultiProcessTestCase._run self = cls(test_name) self.rank = rank self.file_name = file_name self.run_test(test_name, parent_pipe) class MultiProcContinousTest(TestCase): # Class variables: # number of test processes world_size: int = 2 # rank of the current process rank: int = -1 # unset state # Rendezvous file rdvz_file: Optional[str] = None # timeout configured per class timeout: timedelta = timedelta(seconds=120) @classmethod @abc.abstractmethod def backend_str(cls) -> str: """ ProcessGroup backend str. To be customized by sub test classes, e.g. "nccl". Here we raise error. """ raise NotImplementedError("Please implement backend_str in your test class") @classmethod def opts(cls, high_priority_stream=False): """ ProcessGroup init options. To be customized by sub test classes, e.g. ProcessGroupNCCLOpTest Here we return None. """ return None @classmethod def setUpClass(cls): """ Class-scope test fixture. Run once for entire test class, before any test starts. Set up the process group. """ super().setUpClass() if not 0 <= cls.rank < cls.world_size: raise RuntimeError( "Rank must be set and in the range of 0 to world_size. " f"World size: {cls.world_size} Rank: {cls.rank}" ) if cls.rdvz_file: store = c10d.FileStore(cls.rdvz_file, cls.world_size) else: # torchrun takes care of rendezvous store = None opts = cls.opts() backend = cls.backend_str() print(f"Testing {backend=}") # create nccl processgroup with opts c10d.init_process_group( backend=backend, world_size=cls.world_size, rank=cls.rank, store=store, pg_options=opts, timeout=cls.timeout, ) cls.pg = c10d.distributed_c10d._get_default_group() print(f"Rank {cls.rank} setup complete") @classmethod def tearDownClass(cls): """ Class-scope test fixture. Run once for entire test class, after all tests finish. Tear down the process group. """ c10d.destroy_process_group() super().tearDownClass() # Clear up the rendezvous file if cls.rdvz_file: try: os.remove(cls.rdvz_file) except OSError: pass print(f"Rank {cls.rank} teardown complete") @classmethod def run_rank( cls, rank: int, world_size: int, rdvz_file: Optional[str] = None, ): """ This is an entry point for each rank to run the tests in `MultiProcContinousTest`. In this entry point, we set the class variables for the test class. Then we run all tests. Note: - This helper only works for a subclass of `MultiProcContinousTest`. Example: - See `test_c10d_ops_nccl.py`. """ # set class variables for the test class cls.rank = rank cls.world_size = world_size cls.rdvz_file = rdvz_file # Launch tests via `common_utils` infra run_tests() ```
=============================================================================================================================== SOURCE CODE FILE: common_dtype.py LINES: 1 SIZE: 4.96 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\common_dtype.py ENCODING: utf-8 ```py # mypy: ignore-errors import torch # Functions and classes for describing the dtypes a function supports # NOTE: these helpers should correspond to PyTorch's C++ dispatch macros # Verifies each given dtype is a torch.dtype def _validate_dtypes(*dtypes): for dtype in dtypes: assert isinstance(dtype, torch.dtype) return dtypes # class for tuples corresponding to a PyTorch dispatch macro class _dispatch_dtypes(tuple): __slots__ = () def __add__(self, other): assert isinstance(other, tuple) return _dispatch_dtypes(tuple.__add__(self, other)) _empty_types = _dispatch_dtypes(()) def empty_types(): return _empty_types _floating_types = _dispatch_dtypes((torch.float32, torch.float64)) def floating_types(): return _floating_types _floating_types_and_half = _floating_types + (torch.half,) def floating_types_and_half(): return _floating_types_and_half def floating_types_and(*dtypes): return _floating_types + _validate_dtypes(*dtypes) _floating_and_complex_types = _floating_types + (torch.cfloat, torch.cdouble) def floating_and_complex_types(): return _floating_and_complex_types def floating_and_complex_types_and(*dtypes): return _floating_and_complex_types + _validate_dtypes(*dtypes) _double_types = _dispatch_dtypes((torch.float64, torch.complex128)) def double_types(): return _double_types # NB: Does not contain uint16/uint32/uint64 for BC reasons _integral_types = _dispatch_dtypes( (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64) ) def integral_types(): return _integral_types def integral_types_and(*dtypes): return _integral_types + _validate_dtypes(*dtypes) _all_types = _floating_types + _integral_types def all_types(): return _all_types def all_types_and(*dtypes): return _all_types + _validate_dtypes(*dtypes) _complex_types = _dispatch_dtypes((torch.cfloat, torch.cdouble)) def complex_types(): return _complex_types def complex_types_and(*dtypes): return _complex_types + _validate_dtypes(*dtypes) _all_types_and_complex = _all_types + _complex_types def all_types_and_complex(): return _all_types_and_complex def all_types_and_complex_and(*dtypes): return _all_types_and_complex + _validate_dtypes(*dtypes) _all_types_and_half = _all_types + (torch.half,) def all_types_and_half(): return _all_types_and_half _float8_types = _dispatch_dtypes( ( torch.float8_e4m3fn, torch.float8_e4m3fnuz, torch.float8_e5m2, torch.float8_e5m2fnuz, ) ) def float8_types(): return _float8_types def float8_types_and(*dtypes): return _float8_types + _validate_dtypes(*dtypes) def all_types_complex_float8_and(*dtypes): return _all_types + _complex_types + _float8_types + _validate_dtypes(*dtypes) def custom_types(*dtypes): """Create a list of arbitrary dtypes""" return _empty_types + _validate_dtypes(*dtypes) # The functions below are used for convenience in our test suite and thus have no corresponding C++ dispatch macro # See AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS. def get_all_dtypes( include_half=True, include_bfloat16=True, include_bool=True, include_complex=True, include_complex32=False, include_qint=False, ) -> list[torch.dtype]: dtypes = get_all_int_dtypes() + get_all_fp_dtypes( include_half=include_half, include_bfloat16=include_bfloat16 ) if include_bool: dtypes.append(torch.bool) if include_complex: dtypes += get_all_complex_dtypes(include_complex32) if include_qint: dtypes += get_all_qint_dtypes() return dtypes def get_all_math_dtypes(device) -> list[torch.dtype]: return ( get_all_int_dtypes() + get_all_fp_dtypes( include_half=device.startswith("cuda"), include_bfloat16=False ) + get_all_complex_dtypes() ) def get_all_complex_dtypes(include_complex32=False) -> list[torch.dtype]: return ( [torch.complex32, torch.complex64, torch.complex128] if include_complex32 else [torch.complex64, torch.complex128] ) def get_all_int_dtypes() -> list[torch.dtype]: return [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64] def get_all_fp_dtypes(include_half=True, include_bfloat16=True) -> list[torch.dtype]: dtypes = [torch.float32, torch.float64] if include_half: dtypes.append(torch.float16) if include_bfloat16: dtypes.append(torch.bfloat16) return dtypes def get_all_qint_dtypes() -> list[torch.dtype]: return [torch.qint8, torch.quint8, torch.qint32, torch.quint4x2, torch.quint2x4] float_to_corresponding_complex_type_map = { torch.float16: torch.complex32, torch.float32: torch.complex64, torch.float64: torch.complex128, } ```
============================================================================================================================== SOURCE CODE FILE: common_fsdp.py LINES: 3 SIZE: 58.17 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\common_fsdp.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs # Owner(s): ["oncall: distributed"] import contextlib import os import re import sys import time import warnings from abc import ABC, abstractmethod from contextlib import nullcontext from copy import deepcopy from enum import auto, Enum from functools import wraps from typing import Any, Callable, cast, no_type_check, Optional, Union from unittest import mock import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._composable import checkpoint from torch.distributed.device_mesh import DeviceMesh from torch.distributed.fsdp import ( CPUOffload, fully_shard, FullyShardedDataParallel as FSDP, ) from torch.distributed.fsdp._common_utils import TrainingState from torch.distributed.fsdp._fully_shard._fsdp_param_group import ( FSDPParamGroup, RegisterPostBackwardFunction, ) from torch.distributed.fsdp._init_utils import NO_RESHARD_AFTER_FORWARD_STRATEGIES from torch.distributed.fsdp.fully_sharded_data_parallel import ( BackwardPrefetch, MixedPrecision, ShardingStrategy, ) from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler from torch.distributed.fsdp.wrap import always_wrap_policy, ModuleWrapPolicy, wrap from torch.distributed.tensor import distribute_tensor, DTensor, Shard from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, RowwiseParallel, SequenceParallel, ) from torch.nn import TransformerDecoderLayer, TransformerEncoderLayer from torch.nn.parallel.distributed import DistributedDataParallel as DDP from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, run_subtests, TEST_SKIPS, ) from torch.testing._internal.common_utils import ( FILE_SCHEMA, get_cycles_per_ms, TEST_CUDA, TEST_HPU, TEST_XPU, ) from torch.utils._triton import has_triton DEVICE_COUNT = 4 # default if TEST_CUDA: DEVICE_TYPE = "cuda" DISTRIBUTED_BACKEND = "nccl" DEVICE_COUNT = torch.cuda.device_count() elif TEST_HPU: DEVICE_TYPE = "hpu:0" DISTRIBUTED_BACKEND = "hccl" elif TEST_XPU: DEVICE_TYPE = "xpu" DISTRIBUTED_BACKEND = "xccl" DEVICE_COUNT = torch.xpu.device_count() else: DEVICE_TYPE = "cpu" DISTRIBUTED_BACKEND = "gloo" DEVICE_COUNT = 1 class FSDPInitMode(Enum): # No FSDP wrapping NO_FSDP = auto() # FSDP recursive wrapping RECURSIVE = auto() # TODO: FSDP non-recursive wrapping # NONRECURSIVE = auto() class DEVICEInitMode(Enum): # Move model to DEVICE before passing to the FSDP constructor DEVICE_BEFORE = auto() # Move model to DEVICE after passing to the FSDP constructor DEVICE_AFTER = auto() # Keep on CPU DEVICE_NEVER = auto() class FSDPTestModel(nn.Module, ABC): """This defines the interface expected from all models used commonly for FSDP unit tests.""" @abstractmethod def get_input(self, device) -> tuple[torch.Tensor, ...]: """Returns an input for the model as as tuple.""" ... @abstractmethod def get_loss(self, input, output) -> torch.Tensor: """Returns the loss given the input and output.""" ... @abstractmethod def run_backward(self, loss) -> None: """Runs the backward pass (e.g. including ``loss.backward()``).""" ... @staticmethod @abstractmethod def init(*args: Any, **kwargs: Any) -> nn.Module: """Initializes an instance of this model.""" ... def _assert_module_states( model: nn.Module, process_group: dist.ProcessGroup, assert_fn: Callable, ): """ All-gathers module states across ranks and calls ``assert_fn`` on each pair of corresponding states from rank 0 and a nonzero rank. For example, if ``assert_fn`` is ``self.assertEqual()``, then this checks that all module states are equal across ranks. """ # Include names for debugging convenience named_module_states = [ (param_name, param.detach().cpu()) for param_name, param in model.named_parameters() ] named_module_states += [ (buffer_name, buffer.detach().cpu()) for buffer_name, buffer in model.named_buffers() ] world_size = dist.get_world_size(process_group) olist = [None for _ in range(world_size)] dist.all_gather_object(olist, named_module_states, group=process_group) rank0_states = olist[0] assert rank0_states is not None # mypy for state in olist[1:]: assert state is not None # mypy for (_, p1), (_, p2) in zip(rank0_states, state): assert_fn(p1, p2) def get_devtype(): return torch.device(DEVICE_TYPE) def _zero_model( model: nn.Module, zero_buffers: bool = False, summon_full=True, ): """Zeros the parameters and optionally buffers of ``model`` in place.""" ctx = FSDP.summon_full_params(model) if summon_full else nullcontext() with ctx: for param in model.parameters(): with torch.no_grad(): param.zero_() if zero_buffers: for buffer in model.buffers(): with torch.no_grad(): buffer.zero_() def _get_state_dict(model, cpu_offload=False, half=False): if not cpu_offload: model = model.to(DEVICE_TYPE) if half: model.half() return model.state_dict() def subtest_name(test_name_mapping, *args): return "_".join( [test_name_mapping[str(s)] if s is not None else "none" for s in args] ) def _broadcast_state_dict(rank, state_dict): # For non-FSDP roots, some parts of the model state on rank 0 may # not be on CPU, so we move everything to CPU to avoid issues like: # https://github.com/pytorch/pytorch/issues/77113. for param_name, param in state_dict.items(): if param.device != torch.device("cpu"): state_dict[param_name] = param.cpu() olist = [state_dict if rank == 0 else None] dist.broadcast_object_list(olist) state_dict = cast(dict[str, torch.Tensor], olist[0]) # Ensure that the state is on DEVICE for param_name in state_dict.keys(): state_dict[param_name] = state_dict[param_name].to(DEVICE_TYPE) return state_dict def get_full_params(model: nn.Module, recurse: bool = True): """ Returns the full unsharded parameters of ``model``. Any FSDP-managed parameters offloaded to CPU are moved to GPU in the returned list. Args: recurse (bool): If ``False``, only unshards the parameters immediate to ``model``; if ``True``, recurses through the module hierarchy rooted at ``model``. """ with FSDP.summon_full_params(model, recurse=recurse): return deepcopy(list(model.parameters())) def _move_to_device(model: nn.Module, move_to_device: bool): return model.to(DEVICE_TYPE) if move_to_device else model def _maybe_wrap_fsdp(model: nn.Module, wrap_fsdp: bool, *args, **kwargs): return model if not wrap_fsdp else FSDP(model, *args, **kwargs) class DummyProcessGroup: def __init__(self, rank: int, size: int): self._rank = rank self._size = size def rank(self) -> int: return self._rank def size(self) -> int: return self._size def allreduce(self, *args, **kwargs): dist_wait = mock.Mock() def get_future(): future: torch.futures.Future = torch.futures.Future() future.set_result(1) return future dist_wait.get_future = get_future return dist_wait class TransformerWithSharedParams(FSDPTestModel): def __init__( self, group: dist.ProcessGroup, device_init_mode: DEVICEInitMode, add_bn: bool, deterministic: bool, ): super().__init__() self.rank = group.rank() self.world_size = group.size() if deterministic: torch.manual_seed(0) d_vocab = 23 d_model = 16 self.embed_tokens = nn.Embedding(d_vocab, d_model) self.transformer = nn.Transformer( d_model=d_model, num_encoder_layers=2, num_decoder_layers=2, dim_feedforward=8, dropout=0.1, ) self.output_proj = nn.Linear(d_model, d_vocab) # share the embedding and output projection weights self.output_proj.weight = self.embed_tokens.weight self.register_buffer( "vocab_bias", self.embed_tokens.weight.new_ones((d_model,)) ) self.register_buffer( "long_buffer", torch.zeros_like(self.vocab_bias, dtype=torch.long), # type: ignore[arg-type] ) # type: ignore[arg-type] self.bs = 2 self.bn = torch.nn.BatchNorm1d(self.bs) if add_bn else torch.nn.Identity() if device_init_mode == DEVICEInitMode.DEVICE_BEFORE: self = self.to(DEVICE_TYPE) if deterministic: self.eval() def get_input(self, device): torch.manual_seed(1 + self.rank) # keep everything deterministic src = torch.arange(12, device=device).view(6, self.bs) # T x B tgt = torch.arange(self.bs * 4, device=device).view(4, self.bs) # T x B return (src, tgt) def forward(self, src_ids, tgt_ids): src = self.embed_tokens(src_ids) src = src + self.vocab_bias + self.long_buffer.type_as(src) # type: ignore[operator] tgt = self.embed_tokens(tgt_ids) tgt = self.bn(tgt) x = self.transformer(src, tgt) return self.output_proj(x) def get_loss(self, input, output): _, tgt = input return nn.functional.cross_entropy( output.view(-1, output.size(-1)), tgt.view(-1), reduction="sum" ) def run_backward(self, loss): loss.backward() @staticmethod def init( group: dist.ProcessGroup, fsdp_init_mode: FSDPInitMode, device_init_mode: DEVICEInitMode, fsdp_kwargs: Optional[dict[str, Any]] = None, deterministic: bool = False, add_bn: bool = True, ) -> Union[nn.Module, FSDP]: """ Initializes a :class:`TransformerWithSharedParams` instance. Args: fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap any modules with FSDP. If ``RECURSIVE``, then wraps with top-level FSDP. By default, the top-level FSDP uses the ``ModuleWrapPolicy`` for encoder and decoder layers, but a different auto wrap policy may be specified via ``fsdp_kwargs``. device_init_mode (DEVICEInitMode): Determines model movement to DEVICE. fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments forwarded to the FSDP constructor. deterministic (bool): Whether to make the model deterministic across constructions. add_bn (bool): Whether to include batch norm in the model. """ if fsdp_kwargs is None: fsdp_kwargs = {} if fsdp_init_mode == FSDPInitMode.NO_FSDP: if isinstance(group, tuple): pg = group[0] else: pg = group return TransformerWithSharedParams( pg, device_init_mode, add_bn, deterministic ) elif fsdp_init_mode == FSDPInitMode.RECURSIVE: # Default to the `ModuleWrapPolicy` if "auto_wrap_policy" not in fsdp_kwargs: auto_wrap_policy = ModuleWrapPolicy( { TransformerEncoderLayer, TransformerDecoderLayer, } ) else: auto_wrap_policy = fsdp_kwargs.pop("auto_wrap_policy") if ( "sharding_strategy" in fsdp_kwargs and fsdp_kwargs["sharding_strategy"] in {ShardingStrategy.HYBRID_SHARD, ShardingStrategy._HYBRID_SHARD_ZERO2} and not isinstance(group, tuple) ): fsdp_pg = None else: fsdp_pg = group if isinstance(group, tuple): tformer_pg = group[0] else: tformer_pg = group m = TransformerWithSharedParams( tformer_pg, device_init_mode, add_bn, deterministic ) fsdp_model = FSDP( m, fsdp_pg, auto_wrap_policy=auto_wrap_policy, **fsdp_kwargs, ) if device_init_mode == DEVICEInitMode.DEVICE_AFTER: fsdp_model = fsdp_model.to(DEVICE_TYPE) return fsdp_model raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}") def get_ignored_modules(self): return [self.transformer] class NestedWrappedModule(FSDPTestModel): def __init__( self, group: dist.ProcessGroup, wrap_fsdp: bool, device_init_mode: DEVICEInitMode, deterministic: bool, **fsdp_kwargs, ): super().__init__() self.rank = group.rank() self.world_size = group.size() move_to_device = device_init_mode == DEVICEInitMode.DEVICE_BEFORE def _maybe_wrap(layer): if wrap_fsdp: return FSDP(layer, group, **fsdp_kwargs) return layer if deterministic: torch.manual_seed(0) self.module = nn.Sequential( _move_to_device(nn.Linear(8, 4), move_to_device), _maybe_wrap( nn.Sequential( _maybe_wrap(_move_to_device(nn.Linear(4, 16), move_to_device)), _move_to_device(nn.Linear(16, 16), move_to_device), ), ), _maybe_wrap(_move_to_device(nn.Linear(16, 4), move_to_device)), _move_to_device(nn.Linear(4, 8), move_to_device), ) def get_input(self, device): torch.manual_seed(1 + self.rank) # keep everything deterministic return (torch.rand(4, 8, device=device),) def forward(self, x): return self.module(x) def get_loss(self, input, output): loss = output.sum() return loss def run_backward(self, loss): loss.backward() @staticmethod def init( group: dist.ProcessGroup, fsdp_init_mode: FSDPInitMode, device_init_mode: DEVICEInitMode, fsdp_kwargs: Optional[dict[str, Any]] = None, deterministic: bool = False, ) -> nn.Module: """ Initializes a :class:`NestedWrappedModule` instance. Args: fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap any modules with FSDP. If ``RECURSIVE``, then wraps some nested modules with FSDP but not the top-level module. The model may later be wrapped with a top-level FSDP external to this method if desired. device_init_mode (DEVICEInitMode): Determines model movement to DEVICE. fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments forwarded to the FSDP constructor. deterministic (bool): Whether to make the model deterministic across constructions. """ if fsdp_kwargs is None: fsdp_kwargs = {} if fsdp_init_mode == FSDPInitMode.NO_FSDP: return NestedWrappedModule( group, wrap_fsdp=False, device_init_mode=device_init_mode, deterministic=deterministic, ) elif fsdp_init_mode == FSDPInitMode.RECURSIVE: # Does not wrap with top-level FSDP fsdp_model = NestedWrappedModule( group, wrap_fsdp=True, device_init_mode=device_init_mode, deterministic=deterministic, **fsdp_kwargs, ) if device_init_mode == DEVICEInitMode.DEVICE_AFTER: fsdp_model = fsdp_model.to(DEVICE_TYPE) return fsdp_model raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}") class AlwaysWrapNestedWrappedModule(NestedWrappedModule): @staticmethod def init( group: dist.ProcessGroup, fsdp_init_mode: FSDPInitMode, device_init_mode: DEVICEInitMode, fsdp_kwargs: Optional[dict[str, Any]] = None, deterministic: bool = False, ): """ Initializes a :class:`NestedWrappedModule` instance, but unlike :meth:`NestedWrappedModule.init`, for the ``RECURSIVE`` init mode, this wraps with top-level FSDP and the ``always_wrap_policy()`` auto wrap policy. """ model = super( AlwaysWrapNestedWrappedModule, AlwaysWrapNestedWrappedModule ).init( group=group, fsdp_init_mode=FSDPInitMode.NO_FSDP, device_init_mode=device_init_mode, fsdp_kwargs=fsdp_kwargs, deterministic=deterministic, ) if fsdp_init_mode == FSDPInitMode.NO_FSDP: return model elif fsdp_init_mode == FSDPInitMode.RECURSIVE: fsdp_kwargs = fsdp_kwargs or {} fsdp_model = FSDP(model, auto_wrap_policy=always_wrap_policy, **fsdp_kwargs) if device_init_mode == DEVICEInitMode.DEVICE_AFTER: fsdp_model = fsdp_model.to(DEVICE_TYPE) return fsdp_model class NonUniformReqGradNWM(NestedWrappedModule): def __init__( self, group: dist.ProcessGroup, wrap_fsdp: bool, device_init_mode: DEVICEInitMode, deterministic: bool, **fsdp_kwargs, ): super(NestedWrappedModule, self).__init__() # This `__init__` only differs from `NestedWrappedModule.__init__` in that # the last two `nn.Linear` layers are FSDP wrapped in a `nn.Sequential` # container. This arrangement results in all elements of the last two parameters # residing on a single rank. Freezing all parameters except those two allows us # to verify that `ShardedGradScaler` accommodates situations where some ranks # have no (non-zero sized) parameter shards. self.rank = group.rank() self.world_size = group.size() move_to_device = device_init_mode == DEVICEInitMode.DEVICE_BEFORE def _maybe_wrap(layer): if wrap_fsdp: return FSDP(layer, group, **fsdp_kwargs) return layer if deterministic: torch.manual_seed(0) self.module = nn.Sequential( _move_to_device(nn.Linear(8, 4), move_to_device), _maybe_wrap( nn.Sequential( _maybe_wrap(_move_to_device(nn.Linear(4, 16), move_to_device)), _move_to_device(nn.Linear(16, 16), move_to_device), ), ), _maybe_wrap( nn.Sequential( _move_to_device(nn.Linear(16, 4), move_to_device), _move_to_device(nn.Linear(4, 8), move_to_device), ), ), ) @staticmethod def _set_nonuniform_req_grad(model, req_grad_mask) -> None: for n, p in model.named_parameters(): if not re.match(req_grad_mask, n): p.requires_grad_(False) @staticmethod def init( group: dist.ProcessGroup, fsdp_init_mode: FSDPInitMode, device_init_mode: DEVICEInitMode, fsdp_kwargs: Optional[dict[str, Any]] = None, deterministic: bool = False, ): """ Initializes a :class:`NestedWrappedModule` instance, but unlike :meth:`NestedWrappedModule.init`, it wraps a second :class:`torch.nn.Sequential` container to enable the desired non-uniform ``requires_grad`` ``use_orig_params=True`` tests. For both ``RECURSIVE`` and ``NO_FSDP`` init modes, freezes all parameters except the last two to validate ``ShardedGradScaler`` support for ranks with no (non-zero sized) local shards in FSDP ``use_orig_params=True`` mode. """ # The parameters that should remain unfrozen are in `module.2.1`. The regex # pattern below matches the relevant parameter names both with and without # an interstitial FSDP module indicator (`_fsdp_wrapped_module`) present. req_grad_pattern = re.compile(r"module\.2.*\.1.*") if fsdp_init_mode == FSDPInitMode.NO_FSDP: ddp_model = NonUniformReqGradNWM( group, wrap_fsdp=False, device_init_mode=device_init_mode, deterministic=deterministic, ) NonUniformReqGradNWM._set_nonuniform_req_grad(ddp_model, req_grad_pattern) return ddp_model elif fsdp_init_mode == FSDPInitMode.RECURSIVE: if fsdp_kwargs is None: fsdp_kwargs = {} fsdp_model = NonUniformReqGradNWM( group, wrap_fsdp=True, device_init_mode=device_init_mode, deterministic=deterministic, **fsdp_kwargs, ) if device_init_mode == DEVICEInitMode.DEVICE_AFTER: fsdp_model = fsdp_model.to(DEVICE_TYPE) NonUniformReqGradNWM._set_nonuniform_req_grad(fsdp_model, req_grad_pattern) return fsdp_model raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}") class ModuleWithDelay(FSDPTestModel): """This class wraps a :class:`FSDPTestModel` to optionally add a delay after computing the loss and/or before the gradient reduction.""" def __init__( self, module: nn.Module, delay_after_loss_ms: int, delay_before_reduction_ms: int, ): super().__init__() self.delay_after_loss_ms = delay_after_loss_ms self.delay_before_reduction_ms = delay_before_reduction_ms self.module = module def get_input(self, device): return self.module.get_input(device) # type: ignore[operator] def forward(self, x): return self.module(x) def get_loss(self, input, output): loss = self.module.get_loss(input, output) # type: ignore[operator] if self.delay_after_loss_ms > 0: if TEST_HPU or TEST_XPU: time.sleep(self.delay_after_loss_ms / 1000) elif TEST_CUDA: torch.cuda._sleep(int(self.delay_after_loss_ms * get_cycles_per_ms())) return loss def run_backward(self, loss): orig_reduce_scatter = torch.distributed.reduce_scatter_tensor def _delayed_reduce_scatter(*args, **kwargs): if self.delay_before_reduction_ms > 0: if TEST_CUDA: torch.cuda._sleep( int(self.delay_before_reduction_ms * get_cycles_per_ms()) ) elif TEST_HPU or TEST_XPU: time.sleep(self.delay_before_reduction_ms / 1000) return orig_reduce_scatter(*args, **kwargs) with mock.patch( "torch.distributed.reduce_scatter_tensor", _delayed_reduce_scatter ): self.module.run_backward(loss) # type: ignore[operator] @staticmethod def init( module_class: type[FSDPTestModel], *model_args: Any, delay_after_loss_ms: int, delay_before_reduction_ms: int, **model_kwargs: Any, ): """ Args: module_class (Type[FSDPTestModel]): Wrapped module class to which to add delays. model_args: Positional arguments forwarded to the ``module_class`` ``init()``. delay_after_loss_ms (int): Delay after computing the loss/before the optimizer step (in ms). delay_before_reduction_ms (int): Delay before reduce-scattering gradients (in ms). model_kwargs: Keyword arguments forwarded to the ``module_class`` ``init()``. """ return ModuleWithDelay( module_class.init(*model_args, **model_kwargs), delay_after_loss_ms, delay_before_reduction_ms, ) class NestedWrappedModuleWithDelay(ModuleWithDelay): @staticmethod def init( # type: ignore[override] group: dist.ProcessGroup, fsdp_init_mode: FSDPInitMode, device_init_mode: DEVICEInitMode = DEVICEInitMode.DEVICE_AFTER, fsdp_kwargs: Optional[dict[str, Any]] = None, deterministic: bool = False, delay_after_loss_ms: int = 0, delay_before_reduction_ms: int = 0, ): return ModuleWithDelay.init( NestedWrappedModule, group=group, fsdp_init_mode=fsdp_init_mode, device_init_mode=device_init_mode, fsdp_kwargs=fsdp_kwargs, deterministic=deterministic, delay_after_loss_ms=delay_after_loss_ms, delay_before_reduction_ms=delay_before_reduction_ms, ) class DummyDDP(nn.Module): def __init__(self, module): super().__init__() self.module = module def forward(self, *args, **kwargs): return self.module(*args, **kwargs) class MixtureOfExperts(NestedWrappedModule): def __init__( self, group: dist.ProcessGroup, wrap_fsdp: bool, device_init_mode: DEVICEInitMode, delay_before_free_ms: int, deterministic: bool, **fsdp_kwargs, ): super().__init__( group=group, wrap_fsdp=wrap_fsdp, device_init_mode=device_init_mode, deterministic=deterministic, ) self.group = group self.delay_before_free_ms = delay_before_free_ms self.wrap_fsdp = wrap_fsdp self.move_to_device = device_init_mode == DEVICEInitMode.DEVICE_BEFORE if deterministic: # Give each rank different expert parameters torch.manual_seed(42 + self.rank) d_expert = 23 d_shared = 12 d_input = 8 expert = _move_to_device(nn.Linear(d_expert, d_shared), self.move_to_device) self.num_expert_params = sum(p.numel() for p in expert.parameters()) for p in expert.parameters(): p.expert = True # type: ignore[attr-defined] if deterministic: # Keep all other parameters the same across ranks torch.manual_seed(0) shared = _move_to_device(nn.Linear(d_shared, d_expert), self.move_to_device) if wrap_fsdp: # we create a process group of size 1 for the expert params expert_group = torch.distributed.new_group( [group.rank()] ) # world size 1 means no shard expert = FSDP(expert, expert_group, **fsdp_kwargs) # type: ignore[assignment] shared = FSDP(shared, group, **fsdp_kwargs) # type: ignore[assignment] self.module = nn.Sequential( _move_to_device(nn.Linear(d_input, d_shared), self.move_to_device), shared, expert, _move_to_device(nn.Linear(d_shared, d_input), self.move_to_device), ) def forward(self, x): if self.delay_before_free_ms > 0: expert = self.module[2] if isinstance(expert, FSDP): orig_reshard = torch.distributed.fsdp._runtime_utils._reshard def _delayed_reshard(*args, **kwargs): if TEST_CUDA: torch.cuda._sleep( int(self.delay_before_free_ms * get_cycles_per_ms()) ) elif TEST_HPU or TEST_XPU: time.sleep(self.delay_before_free_ms / 1000) return orig_reshard(*args, **kwargs) # This patch covers any `import torch..._reshard` uses. with mock.patch( "torch.distributed.fsdp._runtime_utils._reshard", _delayed_reshard ): return self.module(x) return self.module(x) def run_backward(self, loss): loss.backward() # Manually reduce gradients if not wrapped in FullyShardedDataParallel if not self.wrap_fsdp: with torch.no_grad(): for p in self.parameters(): if hasattr(p, "expert"): continue # these params don't need grad reduction if p.grad is not None: p.grad.div_(self.world_size) torch.distributed.all_reduce(p.grad, group=self.group) @staticmethod def init( group: dist.ProcessGroup, fsdp_init_mode: FSDPInitMode, device_init_mode: DEVICEInitMode, fsdp_kwargs: Optional[dict[str, Any]] = None, deterministic: bool = False, delay_before_free_ms: int = 0, ): """ Initializes a :class:`MixtureOfExperts` instance. Args: fsdp_init_mode (FSDPInitMode): If ``NO_FSDP``, then does not wrap any modules with FSDP. If ``RECURSIVE``, then wraps some nested modules with FSDP, including the expert and shared layers, but not the top-level module. The model may later be wrapped with a top-level FSDP external to this method if desired. device_init_mode (DEVICEInitMode): Determines model movement to DEVICE. fsdp_kwargs (Optional[Dict[str, Any]]): Optional keyword arguments forwarded to the FSDP constructor. deterministic (bool): Whether to make the model deterministic across constructions. delay_before_free_ms (int): Delay before resharding expert parameters in the forward pass (in ms). """ if fsdp_kwargs is None: fsdp_kwargs = {} if fsdp_init_mode == FSDPInitMode.NO_FSDP: return MixtureOfExperts( group, wrap_fsdp=False, device_init_mode=device_init_mode, delay_before_free_ms=delay_before_free_ms, deterministic=deterministic, ) elif fsdp_init_mode == FSDPInitMode.RECURSIVE: # Does not wrap with top-level FSDP fsdp_model = MixtureOfExperts( group, wrap_fsdp=True, device_init_mode=device_init_mode, delay_before_free_ms=delay_before_free_ms, deterministic=deterministic, **fsdp_kwargs, ) if device_init_mode == DEVICEInitMode.DEVICE_AFTER: fsdp_model = fsdp_model.to(DEVICE_TYPE) return fsdp_model raise ValueError(f"Unsupported FSDP init mode: {fsdp_init_mode}") class MLP(nn.Module): def __init__( self, dim: int, device: Optional[torch.device] = None, *, bias: bool = True, with_buffer: bool = False, dim_multiplier: int = 4, ): super().__init__() self.in_proj = nn.Linear(dim, dim_multiplier * dim, device=device, bias=bias) self.out_proj = nn.Linear(dim_multiplier * dim, dim, device=device, bias=bias) if with_buffer: self.register_buffer("buffer", torch.randn((dim,), device=device)) else: self.buffer = None def forward(self, x: torch.Tensor) -> torch.Tensor: z = self.in_proj(x) z = F.relu(z) z = self.out_proj(z) z = F.relu(z) if self.buffer is not None: z = z + self.buffer return z def reset_parameters(self): if self.buffer is not None: torch.nn.init.normal_(self.buffer) class MLPStack(nn.Sequential): def __init__(self, mlp_dim: int, *, with_seq_parallel: bool = False): modules: list[nn.Module] = [ # Use multiplier of 3 to exercise uneven case MLP(mlp_dim, dim_multiplier=3), MLP(mlp_dim), MLP(mlp_dim, dim_multiplier=3), ] if with_seq_parallel: modules.append(nn.LayerNorm(mlp_dim, bias=False)) super().__init__(*modules) self.with_seq_parallel = with_seq_parallel def parallelize( self, tp_mesh: DeviceMesh, dp_mesh: DeviceMesh, use_activation_checkpointing: bool, **fsdp_kwargs, ) -> "MLPStack": parallelize_plan = { # Pass `use_local_output=False` to keep as DTensor to preserve # uneven activation dims "0.in_proj": ColwiseParallel(use_local_output=False), "0.out_proj": RowwiseParallel(use_local_output=False), "1.in_proj": ColwiseParallel(use_local_output=False), "1.out_proj": RowwiseParallel(use_local_output=False), "2.in_proj": ColwiseParallel(use_local_output=False), "2.out_proj": RowwiseParallel(output_layouts=Shard(1)) if self.with_seq_parallel else RowwiseParallel(), } if self.with_seq_parallel: parallelize_plan["3"] = SequenceParallel(sequence_dim=1) parallelize_module(self, device_mesh=tp_mesh, parallelize_plan=parallelize_plan) for module in self: if isinstance(module, nn.LayerNorm): continue if use_activation_checkpointing: checkpoint(module) fully_shard(module, mesh=dp_mesh, **fsdp_kwargs) fully_shard(self, mesh=dp_mesh, **fsdp_kwargs) return self class DoubleLinear(nn.Module): """ This can be used for returning multiple outputs from a module (``use_second_linear=True``) or for having an unused module (``False``). """ def __init__(self, dim: int, use_second_linear: bool = True): super().__init__() self.lin1 = nn.Linear(dim, dim) self.lin2 = nn.Linear(dim, dim) self.relu = nn.ReLU() self.use_second_linear = use_second_linear def forward( self, x: torch.Tensor ) -> Union[tuple[torch.Tensor, torch.Tensor], torch.Tensor]: if self.use_second_linear: return self.relu(self.lin1(x)), self.relu(self.lin2(x)) return self.relu(self.lin1(x)) # NOTE: For these patch methods, if we want safety under multi-threading (e.g. # when using multi-threaded process group), then we want: # (1) a barrier immediately after reading the original value to ensure that all # threads see the same original value # (2) a barrier immediately before restoring the original value to ensure that # all threads use the patched value inside the context @contextlib.contextmanager def patch_all_gather(new_all_gather_into_tensor: Callable): orig_all_gather = dist.all_gather_into_tensor dist.barrier() dist.all_gather_into_tensor = new_all_gather_into_tensor try: yield finally: dist.barrier() dist.all_gather_into_tensor = orig_all_gather @contextlib.contextmanager def patch_reduce_scatter(new_reduce_scatter_tensor: Callable): orig_reduce_scatter = dist.reduce_scatter_tensor dist.barrier() dist.reduce_scatter_tensor = new_reduce_scatter_tensor try: yield finally: dist.barrier() dist.reduce_scatter_tensor = orig_reduce_scatter @contextlib.contextmanager def patch_all_reduce(new_all_reduce: Callable): orig_all_reduce = dist.all_reduce dist.barrier() dist.all_reduce = new_all_reduce try: yield finally: dist.barrier() dist.all_reduce = orig_all_reduce @no_type_check @contextlib.contextmanager def patch_unshard(new_unshard: Callable): orig_unshard = FSDPParamGroup.unshard dist.barrier() FSDPParamGroup.unshard = new_unshard try: yield finally: dist.barrier() FSDPParamGroup.unshard = orig_unshard @no_type_check @contextlib.contextmanager def patch_reshard(new_reshard: Callable): orig_reshard = FSDPParamGroup.reshard dist.barrier() FSDPParamGroup.reshard = new_reshard try: yield finally: dist.barrier() FSDPParamGroup.reshard = orig_reshard @no_type_check @contextlib.contextmanager def patch_post_backward(new_post_backward: Callable): orig_post_backward = FSDPParamGroup.post_backward dist.barrier() FSDPParamGroup.post_backward = new_post_backward try: yield finally: dist.barrier() FSDPParamGroup.post_backward = orig_post_backward @no_type_check @contextlib.contextmanager def patch_register_post_backward_hook_backward(new_backward: Callable): orig_backward = RegisterPostBackwardFunction.backward dist.barrier() RegisterPostBackwardFunction.backward = new_backward try: yield finally: dist.barrier() RegisterPostBackwardFunction.backward = orig_backward def reduce_scatter_with_assert( cls, orig_reduce_scatter: Callable, assert_fn: Callable, # `assert_fn(output: Tensor)` *args: Any, **kwargs: Any, ): if len(args) > 0: output = args[0] elif "output" in kwargs: output = kwargs["output"] else: raise AssertionError( f"Cannot get reduce-scatter output from\nargs: {args}\nkwargs: {kwargs}" ) assert_fn(output) return orig_reduce_scatter(*args, **kwargs) def check_sharded_parity( cls, # unit test class replicated_module: nn.Module, sharded_module: nn.Module, prefixes_to_ignore: tuple[str, ...] = (), ): for (replicated_name, replicated_param), (sharded_name, sharded_param) in zip( replicated_module.named_parameters(), sharded_module.named_parameters() ): clean_sharded_name = sharded_name for prefix in prefixes_to_ignore: clean_sharded_name = clean_sharded_name.replace(prefix, "") cls.assertEqual(replicated_name, clean_sharded_name) cls.assertIsInstance(sharded_param, DTensor) assert isinstance(sharded_param, DTensor) # mypy mesh, placements = sharded_param.device_mesh, sharded_param.placements if tuple(placements) == (Shard(0), Shard(0)): raise AssertionError( "FSDP's (Shard(0), Shard(0)) layout differs from distribute_tensor(), " "so we cannot check for equality using it" ) sharded_ref_param = distribute_tensor(replicated_param, mesh, placements) cls.assertEqual(sharded_param.to_local(), sharded_ref_param.to_local()) if replicated_param.grad is None: cls.assertIsNone(sharded_param.grad) continue cls.assertIsNotNone(sharded_param.grad) sharded_ref_grad = distribute_tensor(replicated_param.grad, mesh, placements) cls.assertIsInstance(sharded_param.grad, DTensor) assert isinstance(sharded_param.grad, DTensor) # mypy cls.assertEqual(sharded_param.grad.to_local(), sharded_ref_grad.to_local()) class FSDPTestMultiThread(MultiThreadedTestCase): @property def world_size(self): return DEVICE_COUNT def setUp(self): super().setUp() self._spawn_threads() def run_subtests(self, *args, **kwargs): return run_subtests(self, *args, **kwargs) def perThreadSetUp(self): torch._dynamo.reset() def perThreadTearDown(self): torch._dynamo.reset() class FSDPTest(MultiProcessTestCase): def setUp(self): super().setUp() # Set TORCH_NCCL_DESYNC_DEBUG=0 to disable the NCCL `workCleanupLoop()`, # which can cause unit test flakiness: # https://github.com/pytorch/pytorch/issues/90848 os.environ["TORCH_NCCL_DESYNC_DEBUG"] = "0" self._spawn_processes() @property def world_size(self): return DEVICE_COUNT @property def process_group(self): return dist.distributed_c10d._get_default_group() @property def destroy_pg_upon_exit(self) -> bool: # Overriding base test class: do not auto destroy PG upon exit. return False @property def init_method(self): return f"{FILE_SCHEMA}{self.file_name}" def _check_cpu_offload(self, fsdp_model, cpu_offload): self.assertEqual(cpu_offload, fsdp_model.cpu_offload) def _check_backward_prefetch(self, fsdp_model, backward_prefetch): self.assertEqual(backward_prefetch, fsdp_model.backward_prefetch) def _check_forward_prefetch(self, fsdp_model, forward_prefetch): self.assertEqual(forward_prefetch, fsdp_model.forward_prefetch) def run_subtests(self, *args, **kwargs): return run_subtests(self, *args, **kwargs) @classmethod def _run(cls, rank, test_name, file_name, pipe, **kwargs): self = cls(test_name) self.rank = rank self.file_name = file_name fake_pg = kwargs.get("fake_pg", False) print(f"dist init r={self.rank}, world={self.world_size}") # Specify gloo backend to make 'init_process_group()' succeed, # Actual tests will be skipped if there is no enough GPUs. try: if fake_pg: store = torch.testing._internal.distributed.fake_pg.FakeStore() dist.init_process_group( backend="fake", world_size=self.world_size, rank=rank, store=store, ) else: dist.init_process_group( init_method=self.init_method, backend=DISTRIBUTED_BACKEND, world_size=int(self.world_size), rank=self.rank, ) except RuntimeError as e: if "recompile" in e.args[0]: sys.exit(TEST_SKIPS["backend_unavailable"].exit_code) raise device_ids = None device_id = self.rank % DEVICE_COUNT if TEST_CUDA or TEST_XPU: torch.accelerator.set_device_index(device_id) device_ids = [device_id] # Execute barrier prior to running test to ensure that every process # has finished initialization and that the following test # immediately exiting due to a skip doesn't cause flakiness. dist.barrier(device_ids=device_ids) torch._dynamo.reset() self.run_test(test_name, pipe) torch._dynamo.reset() dist.barrier(device_ids=device_ids) dist.destroy_process_group() def _train_for_several_steps( self, model: nn.Module, num_steps: int, autocast: bool, lr: float = 0.01, fsdp_cpu_offload: Optional[CPUOffload] = None, save_model: bool = False, mixed_precision: Optional[MixedPrecision] = None, enable_sharded_grad_scaler: bool = False, use_pure_fp16: bool = False, sharded_grad_scaler_kwargs: Optional[dict[str, Any]] = None, ): cpu_offload_params = fsdp_cpu_offload and fsdp_cpu_offload.offload_params model_device = next(model.parameters()).device if sharded_grad_scaler_kwargs is None: sharded_grad_scaler_kwargs = {} sharded_grad_scaler = ShardedGradScaler( enabled=enable_sharded_grad_scaler, **sharded_grad_scaler_kwargs ) # use SGD with momentum instead of Adam, since Adam is scale invariant # and this makes it bad for tests optim = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9) for _ in range(num_steps): optim.zero_grad() with torch.amp.autocast(DEVICE_TYPE, enabled=autocast): # Inputs always cuda regardless of cpu offloading, or model.device input = model.module.get_input(torch.device(DEVICE_TYPE)) # type: ignore[operator, union-attr] if use_pure_fp16 or (mixed_precision and not isinstance(model, FSDP)): if isinstance(input, torch.Tensor): input = input.half() else: input = tuple(x.half() for x in input) output = model(*input) # Post-forward, if CPU offloading model param should be on CPU. if ( cpu_offload_params and isinstance(model, FSDP) # If not resharding after forward, the parameters are still # exposed as unsharded views into the GPU flat parameter and model.sharding_strategy not in NO_RESHARD_AFTER_FORWARD_STRATEGIES ): for p in model.parameters(): # Params should always be on CPU self.assertEqual(p.device, torch.device("cpu")) loss = model.module.get_loss(input, output).to(model_device) # type: ignore[operator, union-attr] loss = sharded_grad_scaler.scale(loss) if not mixed_precision and not use_pure_fp16: assert ( loss.dtype == torch.float32 ), "loss data type should be float32, as the original \ parameter data type is float32." else: if use_pure_fp16: self.assertEqual(loss.dtype, torch.float16) # FSDP loss is fp16, DDP AMP loss is fp32 elif isinstance(model, FSDP): assert mixed_precision is not None # mypy self.assertEqual(loss.dtype, mixed_precision.param_dtype) else: self.assertEqual(loss.dtype, torch.float32) model.module.run_backward(loss) # type: ignore[operator, union-attr] # Post-backward, if CPU offloading model params should be on CPU. if cpu_offload_params and isinstance(model, FSDP): for p in model.parameters(): # Params should always be on CPU self.assertEqual(p.device, torch.device("cpu")) # Unscale the gradients and step sharded_grad_scaler.step(optim) # Update the scale factor sharded_grad_scaler.update() # if save_model, simulate save + load. if save_model: state_dict = {k: v.clone() for k, v in model.state_dict().items()} # Zero params, if save/load state_dict did not work properly, this # would break the parity test with DDP. _zero_model(model) model.load_state_dict(state_dict) if isinstance(model, FSDP): model._assert_state(TrainingState.IDLE) return loss.detach() # type: ignore[possibly-undefined] def _test_fsdp_parity( self, model_class: type[FSDPTestModel], fsdp_init_mode: FSDPInitMode, device_init_mode: DEVICEInitMode, ref_init_fn: Optional[Callable] = None, num_iters: int = 2, save_model: bool = True, cpu_offload: CPUOffload = CPUOffload(), backward_prefetch: Optional[BackwardPrefetch] = None, sharding_strategy: Optional[ShardingStrategy] = None, mixed_precision: Optional[MixedPrecision] = None, forward_prefetch: bool = False, use_orig_params: bool = False, enable_sharded_grad_scaler: bool = False, use_pure_fp16: bool = False, init_kwargs: Optional[dict[str, Any]] = None, sharded_grad_scaler_kwargs: Optional[dict[str, Any]] = None, **fsdp_kwargs, ): """ Tests FSDP training against a reference, which defaults to DDP but may be customized with ``ref_init_fn``. Args: model_class (Type[FSDPTestModel]): A model class that inherits from ``FSDPTestModel``, which defines the expected interface. fsdp_init_mode (FSDPInitMode): The mode to initialize the FSDP-wrapped model. This should not be ``NO_FSDP``. ref_init_fn (Optional[Callable]): A callable to invoke that wraps a non-wrapped model to construct the reference model, where this wrapper should provide data parallel semantics. If ``None``, then the callable defaults to the DDP constructor. """ assert ( fsdp_init_mode != FSDPInitMode.NO_FSDP ), "Expects an FSDP init mode that wraps with FSDP" if init_kwargs is None: init_kwargs = {} lr = 1e-2 rank = self.process_group.rank() # Establish reference behavior with DDP model = model_class.init( self.process_group, FSDPInitMode.NO_FSDP, DEVICEInitMode.DEVICE_BEFORE, deterministic=True, **init_kwargs, ) if ref_init_fn is None: if TEST_HPU: ref_model = DDP( model, device_ids=[DEVICE_TYPE], output_device=DEVICE_TYPE ) else: ref_model = DDP(model, device_ids=[rank], output_device=rank) else: ref_model = ref_init_fn(model) if use_pure_fp16: ref_model = ref_model.half() ref_loss = self._train_for_several_steps( ref_model, num_iters, autocast=mixed_precision is not None, lr=lr, fsdp_cpu_offload=cpu_offload, mixed_precision=mixed_precision, enable_sharded_grad_scaler=enable_sharded_grad_scaler, use_pure_fp16=use_pure_fp16, sharded_grad_scaler_kwargs=sharded_grad_scaler_kwargs, ) ddp_params = list(ref_model.parameters()) # Check against FSDP behavior fsdp_kwargs.update( { "cpu_offload": cpu_offload, "backward_prefetch": backward_prefetch, "sharding_strategy": sharding_strategy, "mixed_precision": mixed_precision, "forward_prefetch": forward_prefetch, "use_orig_params": use_orig_params, } ) try: fsdp_model = model_class.init( self.process_group, fsdp_init_mode, device_init_mode, fsdp_kwargs, deterministic=True, **init_kwargs, ) except Exception as e: raise ValueError(f"Initializing {model_class} raised error {str(e)}") from e if not isinstance(fsdp_model, FSDP): # Enforce that we wrap with top-level FSDP since we are comparing # assuming a data parallel reference and some test models may not # do so in their `init()` method fsdp_model = FSDP(fsdp_model, self.process_group, **fsdp_kwargs) if use_pure_fp16: # Change the model parameter dtype after FSDP initialization fsdp_model = fsdp_model.half() if device_init_mode == DEVICEInitMode.DEVICE_AFTER: fsdp_model = fsdp_model.to(DEVICE_TYPE) offload_params = cpu_offload is not None and cpu_offload.offload_params # Offloading parameters with `DEVICE_AFTER` should raise an error during # lazy initialization due to the parameter devices not being CPU; # otherwise, all parameter devices should be CPU expects_device_error = ( offload_params and device_init_mode == DEVICEInitMode.DEVICE_AFTER ) expects_cpu_device = ( offload_params and device_init_mode != DEVICEInitMode.DEVICE_AFTER ) if expects_cpu_device: cpu_device = torch.device("cpu") for param in fsdp_model.parameters(): self.assertEqual(param.device, cpu_device) context = ( self.assertRaisesRegex( RuntimeError, "An FSDP-managed module with parameter CPU offloading enabled " f"has parameters on {DEVICE_TYPE}", ) if expects_device_error else nullcontext() ) with context: fsdp_loss = self._train_for_several_steps( fsdp_model, num_iters, autocast=False, lr=lr, fsdp_cpu_offload=cpu_offload, save_model=save_model, mixed_precision=mixed_precision, enable_sharded_grad_scaler=enable_sharded_grad_scaler, use_pure_fp16=use_pure_fp16, sharded_grad_scaler_kwargs=sharded_grad_scaler_kwargs, ) # No need to check for parameter and loss parity if expecting an error if expects_device_error: return # Check parameter devices are CPU if offloading to CPU before calling # `get_full_params()`, which will cast the parameters to FP32 if offload_params: cpu_device = torch.device("cpu") for param in fsdp_model.parameters(): self.assertEqual(param.device, cpu_device) fsdp_loss = fsdp_loss.to(DEVICE_TYPE) fsdp_unsharded_params = get_full_params(fsdp_model) # Do not check dtype since the reference DDP loss may not be the same # dtype as the FSDP loss in the case of mixed precision torch.testing.assert_close(ref_loss, fsdp_loss, check_dtype=False) # Do not check for parameter parity if using mixed precision since (1) # the DDP parameters are in FP16 (from `half()`) while the FSDP # parameters are in FP32 (from `summon_full_params()`) and (2) DDP runs # the optimizer in FP16 while FSDP runs it in FP32 # TODO: Disable checking the parameters for pure FP16 due to floating # point inaccuracy. Note that this means that the backward pass is not # checked: https://github.com/pytorch/pytorch/issues/90784 if mixed_precision is None and not use_pure_fp16: self.assertEqual( ddp_params, fsdp_unsharded_params, exact_device=True, msg="FSDP did not match DDP", ) def compiled_fsdp_test(compile_compute_on_module: Optional[type] = None): def fully_shard_with_compiled_compute(*args, **kwargs): torch.distributed.fsdp.fully_shard(*args, **kwargs) # type: ignore[operator] if compile_compute_on_module is None or isinstance( args[0], compile_compute_on_module ): args[0].compile() class FullyShardMode(Enum): EAGER = auto() COMPILED_COMPUTE = auto() def decorator(func): @wraps(func) def wrapper(*args, **kwargs): original_fully_shard: Any = torch.distributed.fsdp.fully_shard for mode in FullyShardMode: if mode != FullyShardMode.EAGER and not has_triton(): warnings.warn("Inductor on GPU needs Triton and recent GPU arch") continue # barrier to ensure thread reading the same value original_skip_fsdp_hooks = torch._dynamo.config.skip_fsdp_hooks original_compile_threads = torch._inductor.config.compile_threads torch.distributed.barrier() if mode == FullyShardMode.EAGER: fully_shard_patch = original_fully_shard elif mode == FullyShardMode.COMPILED_COMPUTE: torch._dynamo.config.skip_fsdp_hooks = True torch._inductor.config.compile_threads = 1 fully_shard_patch = fully_shard_with_compiled_compute # type: ignore[assignment] else: raise NotImplementedError( f"Need to implement FullyShardMode={mode}" ) # fully_shard is imported as a global # through `from ... import fully_shard` func.__globals__[original_fully_shard.__name__] = fully_shard_patch func(*args, **kwargs) # other threads use patched func before this thread restores torch.distributed.barrier() func.__globals__[original_fully_shard.__name__] = original_fully_shard torch._dynamo.config.skip_fsdp_hooks = original_skip_fsdp_hooks torch._inductor.config.compile_threads = original_compile_threads return wrapper return decorator class SkipModule(nn.Module): def __init__(self) -> None: super().__init__() self.lin = nn.Linear(10, 10, bias=False) def forward(self, x): return self.lin(x) class NestedLinear(nn.Module): def __init__(self, fsdp_wrap): super().__init__() if fsdp_wrap: self.nested_linear = wrap(nn.Linear(10, 10, bias=False).to(DEVICE_TYPE)) else: self.nested_linear = nn.Linear(10, 10, bias=False).to(DEVICE_TYPE) def forward(self, x): return self.nested_linear(x) class SkipModel(nn.Module): def __init__(self, double_nest): super().__init__() self.linear = nn.Linear(10, 10, bias=False).to(DEVICE_TYPE) self.linear_skip = SkipModule().to(DEVICE_TYPE) self.nested_linear = wrap( NestedLinear(fsdp_wrap=double_nest), device_id=DEVICE_TYPE ) def forward(self, x): x = self.linear(x) x = self.linear_skip(x) x = self.nested_linear(x) return x ```
============================================================================================================================= SOURCE CODE FILE: common_jit.py LINES: 10 SIZE: 15.79 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\common_jit.py ENCODING: utf-8 ```py # mypy: ignore-errors # Torch import torch import torch.cuda import torch.jit import torch.jit._logging import torch.jit.frontend import torch.jit.quantized # Testing utils from torch.testing._internal.common_dtype import floating_and_complex_types_and from torch.testing._internal.common_utils import TestCase, \ freeze_rng_state, TemporaryFileName, enable_profiling_mode_for_profiling_tests, is_iterable_of_tensors from torch.testing._internal.common_utils import enable_profiling_mode # noqa: F401 # Standard library from itertools import chain from typing import Union from torch._C import TensorType import io def check_output_types(self, func, ref_outputs, args, kwargs): graph = getattr(func, 'last_graph', None) types = [o.type() for o in graph.outputs()] self.assertTrue(len(types) == 1) t = types[0] torch._C._jit_assert_is_instance(ref_outputs, t) # Test names in this set are only checked for a single derivative nn_functional_single_grad = frozenset('test_nn_' + name for name in [ 'pdist', 'multilabel_margin_loss', 'max_unpool3d', 'multi_margin_loss', 'binary_cross_entropy', 'binary_cross_entropy_size_average', 'ctc_loss', 'grid_sample', ]) def check_against_reference(self, func, reference_func, output_func, args, kwargs=None, allow_unused=True, check_types=True, no_grad=False, no_gradgrad=False): """Verifies a function performs identically to some reference implementation. Commonly, this is used to verify that a JIT implementation (output_func) matches the behavior of the eager implementation (reference_func). """ kwargs = kwargs if kwargs else {} def allSum(vs): if isinstance(vs, torch.Tensor): vs = (vs,) return sum((i + 1) * v.sum().abs() if v.dtype.is_complex else (i + 1) * v.sum() for i, v in enumerate(vs) if v is not None and v.dtype in floating_and_complex_types_and(torch.half, torch.bfloat16)) def clone_tensor(t, preserve_requires_grad): require_grad = preserve_requires_grad and t.requires_grad return t.detach().clone().requires_grad_(require_grad) def clone_inputs(preserve_requires_grad: bool): inputs: list[Union[torch.Tensor, list[torch.Tensor]]] = [] for arg in args: if isinstance(arg, torch.Tensor): inputs.append(clone_tensor(arg, preserve_requires_grad)) elif is_iterable_of_tensors(arg): inputs.append([clone_tensor(t, preserve_requires_grad) for t in arg]) else: inputs.append(arg) return inputs # Returns tensors in args that requires_grad, including tensors in TensorList args def get_recording_tensors(args): recording_tensors: list[torch.Tensor] = [] for arg in args: if isinstance(arg, torch.Tensor) and arg.requires_grad: recording_tensors.append(arg) elif is_iterable_of_tensors(arg): recording_tensors.extend(filter(lambda t: t.requires_grad, arg)) return recording_tensors # test no gradients case nograd_inputs = clone_inputs(preserve_requires_grad=False) outputs = self.runAndSaveRNG(reference_func, nograd_inputs, kwargs) with enable_profiling_mode_for_profiling_tests(): outputs_test = self.runAndSaveRNG(func, nograd_inputs, kwargs) self.assertEqual(outputs, outputs_test) if check_types: check_output_types(self, func, outputs_test, nograd_inputs, kwargs) if no_grad: # skip grad tests return with enable_profiling_mode_for_profiling_tests(): # test single grad case recording_inputs = clone_inputs(preserve_requires_grad=True) recording_tensors = get_recording_tensors(recording_inputs) outputs = output_func(self.runAndSaveRNG(reference_func, recording_inputs, kwargs)) grads = torch.autograd.grad(allSum(outputs), recording_tensors, allow_unused=allow_unused) outputs_test = output_func(self.runAndSaveRNG(func, recording_inputs, kwargs)) grads_test = torch.autograd.grad(allSum(outputs_test), recording_tensors, allow_unused=allow_unused) self.assertEqual(outputs, outputs_test) self.assertEqual(grads, grads_test) # test the grad grad case if self._testMethodName in nn_functional_single_grad or no_gradgrad: return outputs = output_func(self.runAndSaveRNG(reference_func, recording_inputs, kwargs)) l1 = allSum(outputs) grads = torch.autograd.grad(l1, recording_tensors, create_graph=True, allow_unused=allow_unused) l2 = (allSum(grads) * l1) grads2 = torch.autograd.grad(l2, recording_tensors, allow_unused=allow_unused) recording_inputs = clone_inputs(preserve_requires_grad=True) recording_tensors = get_recording_tensors(recording_inputs) outputs_test = output_func(self.runAndSaveRNG(func, recording_inputs, kwargs)) l1_test = allSum(outputs_test) grads_test = torch.autograd.grad( l1_test, recording_tensors, create_graph=True, allow_unused=allow_unused) l2_test = (allSum(grads_test) * l1_test) grads2_test = torch.autograd.grad(l2_test, recording_tensors, allow_unused=allow_unused) self.assertEqual(outputs, outputs_test) self.assertEqual(grads, grads_test) for g2, g2_test in zip(grads2, grads2_test): if g2 is None and g2_test is None: continue self.assertEqual(g2, g2_test, atol=5e-4, rtol=1e-4) class JitCommonTestCase(TestCase): def createFunctionFromGraph(self, trace): graph = trace if isinstance(trace, torch._C.Graph) else trace.graph() return torch._C._create_function_from_graph("forward", graph) def assertExportImport(self, trace, inputs): m = self.createFunctionFromGraph(trace) self.assertExportImportModule(m, inputs) def assertExportImportModule(self, m, inputs): m_import = self.getExportImportCopy(m) a = self.runAndSaveRNG(m, inputs) b = self.runAndSaveRNG(m_import, inputs) self.assertEqual(a, b, "Results of original model and " "exported/imported version of model differed") def runAndSaveRNG(self, func, inputs, kwargs=None): kwargs = kwargs if kwargs else {} with freeze_rng_state(): results = func(*inputs, **kwargs) return results def getExportImportCopy(self, m, also_test_file=True, map_location=None): buffer = io.BytesIO() torch.jit.save(m, buffer) buffer.seek(0) imported = torch.jit.load(buffer, map_location=map_location) if not also_test_file: return imported with TemporaryFileName() as fname: torch.jit.save(imported, fname) return torch.jit.load(fname, map_location=map_location) def autoDiffErrorMessage(self, should_autodiff_node, nodes_not_in_diff_graph, fusion_nodes_not_found, non_fusible_nodes_being_fused, fusion_nodes_found, nodes_in_diff_graph): err_msg = "\nFailure in testing nodes' autodifferentiation. " if should_autodiff_node: err_msg += "One or more nodes were expected to be autodiffed, " \ "but were not found in specified fusible/nonfusible " \ "DifferentiableGraph groups. \nSpecifically:" # The node is intended to appear in a differentiable graph but doesn't diff_nodes_missing = [] # The node is intended to appear in a differentiable graph # outside of a fusion group but instead is in a fusion group diff_nodes_in_fusion = [] # The node is intended to appear in a fusion group but doesn't fusion_nodes_missing = [] # The node is intended to appear in a fusion group but instead # is just in an outer differentiable graph fusion_nodes_in_diff = [] for node in nodes_not_in_diff_graph: if node in non_fusible_nodes_being_fused: diff_nodes_in_fusion.append(node) else: diff_nodes_missing.append(node) for node in fusion_nodes_not_found: if node in nodes_in_diff_graph: fusion_nodes_in_diff.append(node) else: fusion_nodes_missing.append(node) if len(diff_nodes_missing) > 0: err_msg += f"\n {diff_nodes_missing} were not in one of the " \ "DifferentiableGraphs when they were expected to be. " \ "Did you intend for these nodes to be autodiffed? " \ "If not, remove them from the list of nonfusible nodes." if len(diff_nodes_in_fusion) > 0: err_msg += f"\n {diff_nodes_in_fusion} were found in one of the FusionGroups " \ "when they were expected to be just in a DifferentiableGraph. If it was " \ "intended for these nodes to be in FusionGroups, reclassify these nodes as " \ "fusible nodes. If these nodes were not intended to be fused, your " \ "autodifferentiation logic might be wrong." if len(fusion_nodes_missing) > 0: err_msg += f"\n {fusion_nodes_missing} were not in one of the FusionGroups " \ "of the DifferentiableGraphs when they were expected to be. " \ "They were also not found in an outer DifferentiableGraph. Did you " \ "intend for these nodes to be autodifferentiated? If not, you should " \ "remove these nodes from the test's fusible nodes. Otherwise your " \ "autodifferentiation logic might be wrong." if len(fusion_nodes_in_diff) > 0: err_msg += f"\n {fusion_nodes_in_diff} were not in one of the FusionGroups " \ "of the DifferentiableGraphs when they were expected to be, " \ "instead they were found just in an outer DifferentiableGraph. " \ "Did you intend for these nodes to be fused? If not, you should " \ "move these nodes into the test's nonfusible nodes. Otherwise your " \ "autodifferentiation logic might be wrong." else: err_msg += "One or more nodes were not expected to be autodiffed " \ "but were found in a DifferentiableGraph or in a FusionGroup " \ "of a DifferentiableGraph. Did you intend for these nodes to be " \ "autodiffed? If so, change this test to expect autodifferentiation. " \ "\nSpecifically:" if len(fusion_nodes_found) > 0: err_msg += f"\n {fusion_nodes_found} were not expected to be in " \ "one of the DifferentiableGraphs, but appeared in a FusionGroup " \ "of a DifferentiableGraph. " if len(nodes_in_diff_graph) > 0: err_msg += f"\n {nodes_in_diff_graph} were not expected to " \ "be in one of the DifferentiableGraphs but were." return err_msg def assertAutodiffNode(self, graph, should_autodiff_node, nonfusible_nodes, fusible_nodes): diff_nodes = graph.findAllNodes('prim::DifferentiableGraph') diff_subgraphs = [node.g('Subgraph') for node in diff_nodes] # Note: currently no tests have fusible_nodes fusion_nodes = list(chain.from_iterable([g.findAllNodes('prim::FusionGroup') for g in diff_subgraphs])) fusion_subgraphs = [node.g('Subgraph') for node in fusion_nodes] # For any non-fusible node, it must show up in one of the DifferentiableGraphs. nodes_in_diff_graph = [] nodes_not_in_diff_graph = [] non_fusible_nodes_being_fused = [] for node in nonfusible_nodes: if any(g.findNode(node) is not None for g in diff_subgraphs): nodes_in_diff_graph.append(node) else: nodes_not_in_diff_graph.append(node) if any(g.findNode(node) is not None for g in fusion_subgraphs): non_fusible_nodes_being_fused.append(node) found_all_nonfusible_nodes = len(nodes_in_diff_graph) == len(nonfusible_nodes) # For any fusible node, it must show up in one of the FusionGroups in one of the DifferentiableGraphs. fusion_nodes_found = [] fusion_nodes_not_found = [] for node in fusible_nodes: if any(g.findNode(node) is not None for g in fusion_subgraphs): fusion_nodes_found.append(node) else: fusion_nodes_not_found.append(node) found_all_fusible_nodes = len(fusion_nodes_found) == len(fusible_nodes) if should_autodiff_node is not None: err_msg = self.autoDiffErrorMessage(should_autodiff_node, nodes_not_in_diff_graph, fusion_nodes_not_found, non_fusible_nodes_being_fused, fusion_nodes_found, nodes_in_diff_graph) self.assertEqual(should_autodiff_node, found_all_nonfusible_nodes and found_all_fusible_nodes, err_msg) def checkShapeAnalysis(self, out_sizes: Union[list[int], list[list[int]]], traced_graph, assert_propagation, constant_prop=True): # repropagte input shapes provided by tracing, prev_symbolic_shapes_test_enabled = torch._C._jit_symbolic_shapes_test_mode_enabled() for enable_test_mode in [True, False]: # here we are testing allowing/disallowing substituting in complete shapes as constants, # disallowing constants helps stress test partial eval and substitution pipeline torch._C._jit_set_symbolic_shapes_test_mode(enable_test_mode) torch._C._jit_erase_non_input_shape_information(traced_graph) if constant_prop: torch._C._jit_pass_constant_propagation(traced_graph) torch._C._jit_pass_propagate_shapes_on_graph(traced_graph) # Add sizes to default tensor type to avoid checking something out of scope # and difficulties with tracer leaving in other parts of tensor type output = next(traced_graph.outputs()).type() def test_type(type, actual_size): sizes = type.symbolic_sizes() out_type = TensorType.get().with_sizes(sizes) actual_type = TensorType.get().with_sizes(actual_size) # always check actual shape is a subtype of the output self.assertTrue(actual_type.isSubtypeOf(out_type)) # and then if assertion flag is provided, check shape analysis # is successful if assert_propagation: self.assertEqual(out_type.sizes(), actual_size) if output.isSubtypeOf(torch._C.TensorType.get()): test_type(output, out_sizes) else: tuple_elements = output.elements() for i in range(len(tuple_elements)): test_type(tuple_elements[i], out_sizes[i]) torch._C._jit_set_symbolic_shapes_test_mode(prev_symbolic_shapes_test_enabled) ```
============================================================================================================================================= SOURCE CODE FILE: common_methods_invocations.py LINES: 1 SIZE: 1192.54 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\common_methods_invocations.py ENCODING: utf-8 ```py # mypy: ignore-errors from functools import wraps, partial from itertools import product, chain, islice import itertools import functools import copy import operator import random import unittest import math import enum import torch import numpy as np import numpy.typing as npt from torch import inf, nan from typing import Any, Union from collections.abc import Sequence from torch.testing import make_tensor from torch.testing._internal.common_dtype import ( _dispatch_dtypes, floating_types, floating_types_and, complex_types, floating_and_complex_types, floating_and_complex_types_and, all_types_and_complex_and, all_types_and, all_types_and_complex, integral_types_and, empty_types, complex_types_and, integral_types, custom_types, all_types_complex_float8_and, ) from torch.testing._internal.common_device_type import \ (onlyCPU, onlyCUDA, onlyNativeDeviceTypes, disablecuDNN, skipCUDAIfNoMagma, skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfNoCusolver, skipCPUIfNoLapack, skipCPUIfNoFFT, skipCUDAIf, precisionOverride, skipCPUIfNoMklSparse, toleranceOverride, tol) from torch.testing._internal.common_cuda import ( PLATFORM_SUPPORTS_FLASH_ATTENTION, PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, SM53OrLater, SM80OrLater, SM89OrLater, with_tf32_off, TEST_CUDNN, _get_torch_cuda_version, _get_torch_rocm_version, ) from torch.testing._internal.common_utils import ( make_fullrank_matrices_with_distinct_singular_values, TEST_WITH_ROCM, IS_FBCODE, IS_WINDOWS, IS_MACOS, TEST_SCIPY, torch_to_numpy_dtype_dict, numpy_to_torch_dtype, TEST_WITH_ASAN, GRADCHECK_NONDET_TOL, slowTest, TEST_WITH_SLOW, TEST_WITH_TORCHINDUCTOR ) from torch.testing._utils import wrapper_set_seed import torch._refs as refs # noqa: F401 import torch._refs.nn.functional import torch._refs.special import torch._refs.linalg import torch._prims as prims # noqa: F401 from torch.utils import _pytree as pytree from packaging import version from torch.testing._internal.opinfo.core import ( # noqa: F401 L, M, S, XS, _NOTHING, _getattr_qual, DecorateInfo, SampleInput, ErrorInput, AliasInfo, NumericsFilter, OpInfo, _generate_reduction_inputs, _generate_reduction_kwargs, sample_inputs_reduction, ReductionOpInfo, reference_inputs_elementwise_binary, make_error_inputs_elementwise_binary, generate_elementwise_binary_tensors, generate_elementwise_binary_arbitrarily_strided_tensors, generate_elementwise_binary_small_value_tensors, generate_elementwise_binary_large_value_tensors, generate_elementwise_binary_extremal_value_tensors, generate_elementwise_binary_broadcasting_tensors, generate_elementwise_binary_with_scalar_samples, generate_elementwise_binary_with_scalar_and_type_promotion_samples, generate_elementwise_binary_noncontiguous_tensors, sample_inputs_elementwise_binary, BinaryUfuncInfo, sample_inputs_elementwise_unary, generate_elementwise_unary_tensors, generate_elementwise_unary_small_value_tensors, generate_elementwise_unary_large_value_tensors, generate_elementwise_unary_extremal_value_tensors, reference_inputs_elementwise_unary, UnaryUfuncInfo, sample_inputs_spectral_ops, SpectralFuncType, SpectralFuncInfo, ShapeFuncInfo, sample_inputs_foreach, ForeachFuncInfo, gradcheck_wrapper_hermitian_input, gradcheck_wrapper_triangular_input, gradcheck_wrapper_triangular_input_real_positive_diagonal, gradcheck_wrapper_masked_operation, gradcheck_wrapper_masked_pointwise_operation, clone_sample, ) from torch.testing._internal.opinfo.refs import ( # NOQA: F401 _find_referenced_opinfo, _inherit_constructor_args, PythonRefInfo, ReductionPythonRefInfo, ElementwiseUnaryPythonRefInfo, ElementwiseBinaryPythonRefInfo, ) from torch.testing._internal.opinfo.utils import ( np_unary_ufunc_integer_promotion_wrapper, reference_reduction_numpy, prod_numpy ) from torch.testing._internal import opinfo from torch.testing._internal.opinfo.definitions.linalg import ( sample_inputs_linalg_cholesky, sample_inputs_linalg_cholesky_inverse, sample_inputs_cross, sample_inputs_linalg_qr_geqrf, sample_inputs_linalg_invertible, sample_inputs_lu_solve, sample_inputs_legacy_solve, sample_inputs_svd, sample_inputs_linalg_det_logdet_slogdet, sample_inputs_linalg_lu, sample_inputs_diagonal_diag_embed, error_inputs_diagonal_diag_embed, ) from torch.testing._internal.opinfo.definitions.special import ( sample_inputs_i0_i1, sample_inputs_polygamma, reference_polygamma, ) from torch.testing._internal.opinfo.definitions._masked import ( sample_inputs_softmax_variant, ) from torch.testing._internal.opinfo.definitions.sparse import ( error_inputs_sparse_like_fns, sample_inputs_sparse_like_fns, error_inputs_sparse_mul, sample_inputs_sparse_mul, error_inputs_sparse_reduction_sum, sample_inputs_sparse_reduction_sum ) if TEST_SCIPY: from scipy import stats import scipy.spatial import scipy.special # test if a tensor is close to an integer def close_to_int(x, eps=0.1): if x.is_complex(): y = torch.abs(torch.view_as_complex(torch.frac(torch.view_as_real(x)))) else: y = torch.abs(torch.frac(x)) return (y < eps) | (y > (1 - eps)) def sample_inputs_slice(op_info, device, dtype, requires_grad, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, low=None, high=None, requires_grad=requires_grad) yield SampleInput(make_input(3), 0) yield SampleInput(make_input(20, 30, 40), dim=1, start=1, end=-2) yield SampleInput(make_input(20, 30, 40), dim=1, start=1, end=-2, step=3) yield SampleInput(make_input(20, 30, 40), dim=0, start=-10, end=-2, step=2) def sample_inputs_tensor_split(op_info, device, dtype, requires_grad, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, low=None, high=None, requires_grad=requires_grad) args_cases = ( # Cases with tensor indices. (torch.tensor([1, 2, 3]),), (torch.tensor(1),), (torch.tensor([1, 2, 3]), 1), (torch.tensor([1, 4, 2, 5, 3, 6])[::2], 1), # Cases with list of indices. ((2, 4),), ((2, 4), 1), ((2, 4), -1), # Cases with integer section. (3,), (3, 1), (3, -1), ) for args in args_cases: yield SampleInput(make_input((S, S, S)), args=args) def sample_inputs_hsplit(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) yield SampleInput(make_arg(6), 2) yield SampleInput(make_arg(S, S, S), [1, 2, 3]) def sample_inputs_vsplit(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) yield SampleInput(make_arg(6, S), 2) yield SampleInput(make_arg(S, S, S), [1, 2, 3]) def sample_inputs_dsplit(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) yield SampleInput(make_arg(S, S, S), [1, 2, 3]) yield SampleInput(make_arg(S, S, 6), 2) def error_inputs_hsplit(op_info, device, **kwargs): make_arg = partial(make_tensor, dtype=torch.float32, device=device) err_msg1 = ("torch.hsplit requires a tensor with at least 1 dimension, " "but got a tensor with 0 dimensions!") yield ErrorInput(SampleInput(make_arg(()), 0), error_regex=err_msg1) err_msg2 = (f"torch.hsplit attempted to split along dimension 1, " f"but the size of the dimension {S} " f"is not divisible by the split_size 0!") yield ErrorInput(SampleInput(make_arg((S, S, S)), 0), error_regex=err_msg2) # Incorrect type for indices_or_section argument err_msg3 = ("received an invalid combination of arguments.") yield ErrorInput( SampleInput(make_arg((S, S, S)), "abc"), error_type=TypeError, error_regex=err_msg3) def error_inputs_vsplit(op_info, device, **kwargs): make_arg = partial(make_tensor, dtype=torch.float32, device=device) err_msg1 = ("torch.vsplit requires a tensor with at least 2 dimension, " "but got a tensor with 1 dimensions!") yield ErrorInput(SampleInput(make_arg(S), 0), error_regex=err_msg1) err_msg2 = (f"torch.vsplit attempted to split along dimension 0, " f"but the size of the dimension {S} " f"is not divisible by the split_size 0!") yield ErrorInput(SampleInput(make_arg(S, S, S), 0), error_regex=err_msg2) # Incorrect type for indices_or_section argument err_msg3 = ("received an invalid combination of arguments.") yield ErrorInput(SampleInput(make_arg(S, S, S), "abc"), error_type=TypeError, error_regex=err_msg3) def error_inputs_dsplit(op_info, device, **kwargs): make_arg = partial(make_tensor, dtype=torch.float32, device=device) err_msg1 = ("torch.dsplit requires a tensor with at least 3 dimension, " "but got a tensor with 1 dimensions!") yield ErrorInput(SampleInput(make_arg(S), 0), error_regex=err_msg1) err_msg2 = (f"torch.dsplit attempted to split along dimension 2, " f"but the size of the dimension {S} " f"is not divisible by the split_size 0!") yield ErrorInput(SampleInput(make_arg(S, S, S), 0), error_regex=err_msg2) def sample_inputs_as_strided(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # input shape, output shape, output stride, output storage offset test_cases = ( ((1,), (1,), (1,), 0), ((3, 3), (2, 2), (1, 2), 0), ((3, 3), (2, 2), (1, 2), 1), ((16,), (2, 2, 2, 2), (1, 1, 1, 1), 0), ((16,), (2, 1, 1, 2), (1, 7, 7, 1), 0), ) for input_shape, output_shape, stride, storage_offset in test_cases: input_t = make_arg(input_shape) kwargs = dict(storage_offset=storage_offset) yield SampleInput(input_t, args=(output_shape, stride), kwargs=kwargs) def sample_inputs_as_strided_partial_views(op_info, device, dtype, requires_grad, **kwargs): def make_arg(): base = make_tensor((20,), device=device, dtype=dtype) return base[5:15].requires_grad_(requires_grad) # as_strided on offset, partial views yield SampleInput(make_arg(), (2, 2), (1, 2)) yield SampleInput(make_arg(), (2, 2), (1, 2), storage_offset=0) yield SampleInput(make_arg(), (2, 2), (1, 2), storage_offset=10) def sample_inputs_as_strided_scatter(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # input shape, output shape, output stride, output storage offset test_cases = [ ((1,), (), (), 0), ((1,), (1,), (1,), 0), ((3, 3), (2, 2), (1, 2), 0), ((3, 3), (2, 2), (1, 2), 1), ((3, 3), (2, 2), (2, 1), 0), # Scatter to larger dimensions ((16,), (2, 2, 2, 2), (8, 4, 2, 1), 0), # Scatter to larger dimensions with strides inverted ((16,), (2, 1, 1, 2), (1, 2, 4, 8), 0), ] for input_shape, output_shape, stride, storage_offset in test_cases: input_t = make_arg(input_shape) input_src = make_arg(output_shape) yield SampleInput(input_t, input_src, output_shape, stride, storage_offset=storage_offset) def error_inputs_as_strided_scatter(op_info, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float32, requires_grad=False) # Create a small tensor and try to scatter it out of bounds input_t = make_arg([4, 4]) input_src = make_arg([2, 2]) yield ErrorInput( SampleInput(input_t, input_src, [2, 2], [200, 200], storage_offset=0), error_regex="itemsize 4 requiring a storage size of 1604 are out of bounds for storage of size 64" ) def sample_inputs_combinations(op_info, device, dtype, requires_grad, **kwargs): inputs = ( (0,), (0, 1), (0, 1, 2, 3), ) rvals = [1, 2, 4] products = product(inputs, rvals, [False, True]) for input_data, r, with_replacement in products: input_t = torch.tensor(input_data, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(input_t, r=r, with_replacement=with_replacement) def sample_inputs_cartesian_prod(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(torch.tensor, device=device, dtype=dtype, requires_grad=requires_grad) # constructs 1-D tensors with varying number of elements a = make_arg((0,)) b = make_arg((0, 1)) c = make_arg((0, 1, 2, 3)) # sample with only 1 tensor yield SampleInput(a) # sample with 2 tensors yield SampleInput(a, b) # sample with 3 tensors yield SampleInput(a, b, c) def sample_inputs_cosine_similarity(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as input_shape, dict of dim and eps cases: tuple[tuple, dict] = ( # type: ignore[assignment] ((S, S), {'dim': 1}), ((S, 2), {'dim': -1}), ((S,), {'dim': 0, 'eps': 0.5}), ((), {'dim': 0}), ((S, S, M), {'dim': 2}), ((S, S), {}) ) for input_shape, kwargs in cases: yield SampleInput(make_arg(input_shape), args=(make_arg(input_shape),), kwargs=kwargs) # Test for Broadcasting yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1}) yield SampleInput(make_arg((1, 2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -2}) yield SampleInput(make_arg((2, 3)), args=(make_arg((2, 1, 3)),), kwargs={'dim': -1}) def sample_inputs_item(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) cases = ( (), (()), (1), ((1,)), ) for shape in cases: yield SampleInput(make_arg(shape)) def error_inputs_item(op, device, **kwargs): make_arg = partial(make_tensor, dtype=torch.float32, device=device, requires_grad=False) cases = ( (M), ((S,)), (S, S), (S, M, L), ) for shape in cases: yield ErrorInput( SampleInput(make_arg(shape)), error_type=RuntimeError, error_regex="elements cannot be converted to Scalar") def sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) # Ordered as: input shape, kwargs for training, momentum, eps cases: tuple[tuple[int], dict] = ( # type: ignore[assignment] ((S, S, S), {'training': True, 'momentum': 0.5, 'eps': 0.6}), ((3, 2, 4), {'training': False, 'momentum': -1.2}), ((3, 1), {'training': True, 'momentum': 0.0}), ((0,), {'training': True}), ((0,), {'training': False}), ((3, 2, 3, 4), {'training': True, 'momentum': -1.0, 'eps': 0.5}), ((3, 2, 3, 4), {'training': False, 'momentum': -1.0, 'eps': 0.5}), ((2, 1), {}), ) for input_shape, kwargs in cases: # args: running mean, running var, weight and bias should necessarily be of shape: (channels,) channels = input_shape[1] if len(input_shape) > 1 else 0 weight = make_arg(channels) if channels > 0 else None bias = make_arg(channels) if channels > 0 else None running_mean = make_arg_without_requires_grad(channels, low=0) running_var = make_arg_without_requires_grad(channels, low=0) yield SampleInput( make_arg(input_shape), args=( running_mean, running_var, weight, bias ), kwargs=kwargs ) # Checking for permutations of weights and biases as `None` weights = [channels, None, None] biases = [None, channels, None] is_training = [True, False, False] for weight, bias, training in zip(weights, biases, is_training): yield SampleInput( make_arg(input_shape), args=( running_mean, running_var, make_arg(channels), make_arg(channels) ), kwargs={'training': training} ) # Test case for no optional kwargs # running_mean and running_var are required in evaluation mode (training: False) but not in training mode yield SampleInput(make_arg((1, 2, 3)), args=(None, None, None, None), kwargs={'training': True}) def sample_inputs_softmax_backward_data(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad ) cases = [ ((S,), 0), ((S, S), 0), ((S, M, S), -1), ] input_dtypes = [dtype] if dtype == torch.float and device == 'cuda': input_dtypes += [torch.float16] for (shape, dim), input_dtype in product(cases, input_dtypes): input = make_arg(shape) output = torch.nn.functional.softmax(input, dim=dim, dtype=input_dtype) yield SampleInput(make_arg(shape), output, dim, input_dtype) def sample_inputs_native_batch_norm(op_info, device, dtype, requires_grad, **kwargs): samples = sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs) for sample in samples: # torch.native_batch_norm does not support 0 numel tensors # IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1) if sample.input.numel() == 0: continue args = sample.args training = sample.kwargs.get('training', True) momentum = sample.kwargs.get('momentum', 0.5) eps = sample.kwargs.get('eps', 1e-5) yield SampleInput(sample.input, args=(args[2], args[3], args[0], args[1], training, momentum, eps)) def sample_inputs__native_batch_norm_legit(op_info, device, dtype, requires_grad, **kwargs): samples = sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs) for sample in samples: # torch.native_batch_norm does not support 0 numel tensors # IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1) if sample.input.numel() == 0: continue args = sample.args training = sample.kwargs.get('training', True) momentum = sample.kwargs.get('momentum', 0.5) eps = sample.kwargs.get('eps', 1e-5) if args[0] is not None and args[1] is not None: yield SampleInput(sample.input, args=(args[2], args[3], args[0], args[1], training, momentum, eps)) else: yield SampleInput(sample.input, args=(args[2], args[3], training, momentum, eps)) def sample_inputs__batch_norm_with_update(op_info, device, dtype, requires_grad, **kwargs): samples = sample_inputs_batch_norm(op_info, device, dtype, requires_grad, **kwargs) for sample in samples: # torch.native_batch_norm does not support 0 numel tensors # IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1) if sample.input.numel() == 0: continue args = sample.args momentum = sample.kwargs.get('momentum', 0.5) eps = sample.kwargs.get('eps', 1e-5) if any(args[i] is None for i in range(4)): continue yield SampleInput(sample.input, args=(args[2], args[3], args[0], args[1], momentum, eps)) def sample_inputs_nn_activation_relu(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ( (()), ((S, )), ((S, S)), ((S, M, S)) ) for shape in cases: yield SampleInput(make_arg(shape)) def sample_inputs_prelu(op_info, device, dtype, requires_grad, **kwargs): op_kwargs = op_info.sample_kwargs(device, dtype, None)[0] yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad, op_kwargs=op_kwargs) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ( (()), ((S, )), ((S, S)), ((S, M, S)) ) for shape in cases: for weight in [-1., 0., 0.8, 1.]: weight_tensor = torch.tensor(weight, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg(shape), args=(weight_tensor,)) channel_size = shape[1] if len(shape) >= 2 else 1 yield SampleInput(make_arg(shape), args=(make_arg((channel_size,)),)) weight_tensor = torch.tensor(1., device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg((S, S)), kwargs=dict(weight=weight_tensor,)) yield SampleInput(make_arg((S, S)), kwargs=dict(weight=make_arg((S,)),)) def reference_inputs_prelu(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_prelu(op, device, dtype, requires_grad, **kwargs) yield from reference_inputs_elementwise_unary(op, device, dtype, requires_grad, **kwargs) def sample_kwargs_prelu_scalar_weight(device, dtype, input): weight = torch.rand((), device=device, dtype=dtype) # NumPy does not support bfloat16, so we default to float32 (only for NumPy) in that case if dtype == torch.bfloat16: weight_cpu = weight.to(dtype=torch.float32, device="cpu") else: weight_cpu = weight.cpu() np_weight = weight_cpu.numpy() return ({'weight': weight}, {'weight': np_weight}) def error_inputs_prelu(op, device): # Weight has numel != 1, but self.ndim is zero-dim tensor inp = make_tensor((), device=device, dtype=torch.float32) weight = make_tensor((2,), device=device, dtype=torch.float32) yield ErrorInput(SampleInput(inp, kwargs={'weight': weight}), error_regex="Not allow zero-dim input tensor.") # Weight has numel != 1, but numel does not match channel size inp = make_tensor((2, 8, 3), device=device, dtype=torch.float32) weight = make_tensor((9,), device=device, dtype=torch.float32) yield ErrorInput(SampleInput(inp, kwargs={'weight': weight}), error_regex="Mismatch of parameter numbers and input channel size.") # Weight is neither a scalar nor 1-D tensor inp = make_tensor((2, 8, 3), device=device, dtype=torch.float32) weight = make_tensor((2, 4), device=device, dtype=torch.float32) yield ErrorInput(SampleInput(inp, kwargs={'weight': weight}), error_regex="prelu: Expected `weight` to be a scalar or 1D tensor, but got: ndim = 2") # src and index tensors must have the same # of dimensions def sample_inputs_norm(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # ord = inf is tested in inputs_norm_inf as it fails on some tests cases = [ ((S, S), (2,), '2'), ((S, S), (0,), '0'), ((S, S), (0.5,), '0_5'), ((S, S), (1,), '1'), ((S, S), (3,), '3'), ((S, S), (-1,), 'neg_1'), ((S, S), (-2,), 'neg_2'), ((S, S), (-0.5,), 'neg_0_5'), ((S, S), (-1.5,), 'neg_1_5'), ] cases_nonzero_input = ( ((S, S, S), (1.5,), '1_5_default'), ((S, S, S), (1.5, 1), '1_5_dim'), ((S, S, S), (1.5, -1), '1_5_neg_dim'), ((S, S, S), (1.5, 1, True), 'keepdim_1_5_dim'), ((S, S, S), (1.5, -1, True), 'keepdim_1_5_neg_dim'), ) cases_posdim = ( ((S, S), (-2, 1,), 'neg_2_dim'), ((S, S), (-1, 1,), 'neg_1_dim'), ((S, S), (0, 1,), '0_dim'), ((S, S), (1, 1,), '1_dim'), ((S, S), (2, 1,), '2_dim'), ((S, S), (3, 1,), '3_dim'), ((S, S, S), (2, 1), '2_dim'), ((S, S, S), (3, 1), '3_dim'), ((S, S, S), (2, 1, True), 'keepdim_2_dim'), ((S, S, S), (3, 1, True), 'keepdim_3_dim'), ((), (2, 0), '2_dim_scalar'), ((), (3, 0), '3_dim_scalar'), ((), (2, 0, True), 'keepdim_2_dim_scalar'), ((), (3, 0, True), 'keepdim_3_dim_scalar'), ) cases_negdim = ((shape, args[:1] + (-args[1],) + args[2:], name.replace("_dim", "_neg_dim")) for shape, args, name in cases_posdim) for shape, args, name in itertools.chain(cases, cases_posdim, cases_negdim): yield SampleInput(make_arg(shape), args=args, name=name) for shape, args, name in cases_nonzero_input: yield SampleInput(make_arg(shape, exclude_zero=True), args=args, name=name) def sample_inputs_norm_fro(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ( ((S, S), (), 'default'), ((S, S), ('fro',), 'fro_default'), ((S, S), ('fro', [0, 1],), 'fro'), ) for shape, args, name in cases: yield SampleInput(make_arg(shape), args=args, name=name) def sample_inputs_norm_nuc(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ( ((S, S), ('nuc',), 'nuc'), ((S, S, S), ('nuc', [1, 2]), 'nuc_batched'), ) for shape, args, name in cases: yield SampleInput(make_arg(shape), args=args, name=name) def sample_inputs_norm_inf(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ( ((S, S), (-inf,), '-inf'), ((S, S), (inf,), 'inf'), ((S, S), (inf, 1,), 'inf_2_dim'), ((S, S), (inf, -1,), 'inf_2_neg_dim'), ) for shape, args, name in cases: yield SampleInput(make_arg(shape), args=args, name=name) def sample_inputs_equal(op, device, dtype, requires_grad, **kwargs): make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) shapes = ( ((), ()), ((S,), ()), ((), (S,)), ((S, 1), (S,)), ((M, S), ()), ((S, S), (S, S)) ) for shape_lhs, shape_rhs in shapes: lhs = make_arg(shape_lhs) rhs = make_arg(shape_rhs) broadcasts_input = shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs) yield SampleInput(lhs, args=(rhs,), broadcasts_input=broadcasts_input) if shape_lhs == shape_rhs: yield SampleInput(lhs, args=(lhs.clone().detach_(),)) def sample_inputs_jiterator(op, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) shapes = ( ((), ()), ((S,), ()), ((S, 1), (S,)), ((M, S), ()), ((S, M, S), (M, S)), ((S, M, S), (S, M, S)), ((M, 1, S), (M, S)), ((M, 1, S), (1, M, S)), ((0, 1, 3), (0, 10, 3)) ) num_inputs = kwargs.get('num_inputs') sample_kwargs = kwargs.get('sample_kwargs', {}) for shape_lhs, shape_rhs in shapes: lhs = make_arg(shape_lhs) args = [make_arg(shape_rhs) for _ in range(num_inputs - 1)] broadcasts_input = (shape_lhs != torch.broadcast_shapes(shape_lhs, shape_rhs)) yield SampleInput(lhs, args=tuple(args), kwargs=sample_kwargs, broadcasts_input=broadcasts_input) def sample_inputs_broadcast_shapes(op, device, dtype, requires_grad, **kwargs): shapes = ( ((), ()), ((S,), ()), ((S, 1), (S,)), ((S, 1), S), ((M, S), ()), ((S, M, S), (M, S)), ((S, M, S), (S, M, S)), ((M, 1, S), (M, S)), ((M, 1, S), (1, M, S)), ((0, 1, 3), (0, 10, 3)) ) for shape in shapes: inp, *arg0 = shape yield SampleInput(inp, args=tuple(arg0)) def sample_inputs_add_sub(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) # Adds alpha kwarg cases make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) if dtype is not torch.bool: yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': 2}) else: yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': True}) neg_alpha = -3.125 if (dtype.is_floating_point or dtype.is_complex) else -3 lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) if dtype is not torch.bool: yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': neg_alpha}) else: yield SampleInput(lhs, args=(rhs,), kwargs={'alpha': False}) def error_inputs_arange(op, device, **kwargs): yield ErrorInput(SampleInput(0, args=(3, 0)), error_type=RuntimeError, error_regex='step must be nonzer') yield ErrorInput(SampleInput(0, args=(-3, 2)), error_type=RuntimeError, error_regex='bound inconsistent with step sign') yield ErrorInput(SampleInput(0, args=(3, -2)), error_type=RuntimeError, error_regex='bound inconsistent with step sign') yield ErrorInput(SampleInput(0, args=(float('inf'), 2)), error_type=RuntimeError, error_regex='unsupported range') yield ErrorInput(SampleInput(float('-inf'), args=(1, 2)), error_type=RuntimeError, error_regex='unsupported range') def sample_inputs_arange(op, device, dtype, requires_grad, **kwargs): int_samples = ( # positive direction (-1, 2, 2), # negative direction (2, -3, -1), # start == end (1, 1, 1), (1, 1, -1), # divides evenly (0, -8, -4), (1, 5, 2), # bool (False, True, True), # default step (0, 1, None), # default start (None, 3, None), ) def to_float(start, end, step): start = start + 0.1 if start is not None else None end = end + 0.1 step = float(step) if step is not None else None return start, end, step float_samples = ( # includes endpoint (0., -8. - 1e-6, -4.), (1., 5. + 1e-6, 2.), (0., -8., -4.), (1., 5., 2.), *(to_float(start, end, step) for (start, end, step) in int_samples), ) large_samples = ( (0, 10000, None), ) samples = int_samples + float_samples if dtype not in (torch.int8, torch.uint8): samples += large_samples for start, end, step in samples: if start is None: assert step is None # Pass end as positional arg yield SampleInput(end, kwargs={"dtype": dtype, "device": device}) # (Similar to) calling torch.arange(end=3) yield SampleInput(0, kwargs={"end": end, "dtype": dtype, "device": device}) elif step is None: yield SampleInput(start, args=(end,), kwargs={"dtype": dtype, "device": device}) else: yield SampleInput(start, args=(end, step), kwargs={"dtype": dtype, "device": device}) yield SampleInput(2) yield SampleInput(1, args=(3, 1)) def sample_inputs_randn(op, device, dtype, requires_grad, **kwargs): shapes = ( (M,), (S, S) ) for shape in shapes: yield SampleInput(input=shape, kwargs=dict(dtype=dtype, device=device, requires_grad=requires_grad)) def sample_inputs_normal(op, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) samples = ( ((S, S), 0, 5), ((S, S, S), -2, 0.5), ) for shape, mean, std in samples: yield SampleInput(make_arg(shape), args=(mean, std)) def error_inputs_normal(op, device, **kwargs): t = torch.zeros([10], device=device) invalid_std = -1 yield ErrorInput( SampleInput(t, args=(0, invalid_std)), error_type=RuntimeError, error_regex=fr"normal expects std >= 0.0, but found std {invalid_std}", ) def sample_inputs_cauchy(op, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) samples = ( ((M,), 0, 0.5), ((S, S), 0, 1), ((S, S, S), -2, 1), ) for shape, median, gamma in samples: yield SampleInput(make_arg(shape), args=(median, gamma)) def error_inputs_cauchy(op, device, **kwargs): t = torch.zeros([10], device=device) invalid_scale = 0 yield ErrorInput( SampleInput(t, args=(0, invalid_scale,)), error_type=RuntimeError, error_regex=fr"cauchy_ expects sigma > 0.0, but found sigma={invalid_scale}", ) def sample_inputs_exponential(op, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) samples = ( ((M,), 0.5), ((S, S), 1), ((S, S, S), 1.5), ) for shape, rate in samples: yield SampleInput(make_arg(shape), args=(rate,)) def error_inputs_exponential(op, device, **kwargs): t = torch.zeros([10], device=device) invalid_rate = 0 yield ErrorInput( SampleInput(t, args=(invalid_rate,)), error_type=RuntimeError, error_regex=fr"exponential_ expects lambda > 0.0, but found lambda={invalid_rate}", ) def sample_inputs_geometric(op, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) samples = ( ((M,), 0.2), ((S, S), 0.5), ((S, S, S), 0.8), ) for shape, rate in samples: yield SampleInput(make_arg(shape), args=(rate,)) def error_inputs_geometric(op, device, **kwargs): t = torch.zeros([10], device=device) neg_prob = -1 yield ErrorInput( SampleInput(t, args=(neg_prob,)), error_type=RuntimeError, error_regex=fr"geometric_ expects p to be in \(0, 1\), but got p={neg_prob}", ) def sample_inputs_log_normal(op, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) samples = ( ((M,), 0, 0.25), ((S, S), 0.5, 1), ((S, S, S), 0, 0.5), ) for shape, mean, std in samples: yield SampleInput(make_arg(shape), args=(mean, std)) def error_inputs_log_normal(op, device, **kwargs): t = torch.zeros([10], device=device) invalid_std = 0 yield ErrorInput( SampleInput(t, args=(0, invalid_std)), error_type=RuntimeError, error_regex=fr"log_normal_ expects std > 0.0, but found std={invalid_std}", ) def sample_inputs_uniform(op, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=False) samples = ( ((M,), -100, 100), ((S, S), 0, 1), ((S, S, S), 1, 2), ) for shape, hi, lo in samples: yield SampleInput(make_arg(shape), args=(hi, lo)) def sample_inputs_ones_zeros(op, device, dtype, requires_grad, **kwargs): # this is a bit messy, as we want the args to be tuples # so if we pass size as a tuple, we have a tuple containing a tuple sizes = ( (M,), (S, S), ) for size in sizes: yield SampleInput(size, kwargs={'dtype': dtype, 'device': device}) def sample_inputs_full(op, device, dtype, requires_grad, **kwargs): def get_val(dtype): return make_tensor([], dtype=dtype, device="cpu").item() sizes = ( (M,), (S, S), ) fill_values = [get_val(dtype), get_val(torch.int)] for size, fill_value in product(sizes, fill_values): yield SampleInput(size, fill_value, dtype=dtype, device=device) def error_inputs_uniform(op, device, **kwargs): t = torch.zeros([10], device=device) yield ErrorInput( SampleInput(t, args=(3, -1)), error_type=RuntimeError, error_regex=r"uniform_ expects to return a \[from, to\) range, but found from=3 > to=-1", ) def error_inputs_linspace(op, device, **kwargs): yield ErrorInput(SampleInput(0, args=(3, -1)), error_type=RuntimeError, error_regex='number of steps must be non-negative') yield ErrorInput( SampleInput(0, args=(3, 1.)), error_type=TypeError, error_regex="received an invalid combination of arguments - got \\(int, int, float", ) yield ErrorInput( SampleInput(torch.tensor([1, 1], device=device), args=(torch.tensor([3, 3], device=device), 1)), error_type=RuntimeError, error_regex="only supports 0-dimensional start and end tensors" ) def sample_inputs_linspace(op, device, dtype, requires_grad, **kwargs): ends = (-3, 0, 1, 4, 50) starts = (-2., 0, 4.3, 50) nsteps = (0, 1, 50) # Extra case to replicate off-by-one issue on CUDA cases = list(product(starts, ends, nsteps)) + [(0, 7, 50)] for start, end, nstep in cases: if dtype == torch.uint8 and (end < 0 or start < 0): continue yield SampleInput(start, args=(end, nstep), kwargs={"dtype": dtype, "device": device}) yield SampleInput(1, args=(3, 1)) def sample_inputs_linspace_tensor_overload(op, device, dtype, requires_grad, **kwargs): ends = (-3, 0, 1, 4, 50) starts = (-2., 0, 4.3, 50) nsteps = (0, 1, 50) is_start_end_tensors = ((True, True), (True, False), (False, True)) make_arg = partial(torch.tensor, device=device, requires_grad=False) # Extra case to replicate off-by-one issue on CUDA cases = list(product(starts, ends, nsteps, is_start_end_tensors)) + [(0, 7, 50, (True, True))] for start, end, nstep, (is_start_tensor, is_end_tensor) in cases: if dtype == torch.uint8 and (end < 0 or start < 0): continue tensor_options = {"dtype": dtype, "device": device} if is_start_tensor: start = make_arg(start, dtype=torch.float32 if isinstance(start, float) else torch.int64) if is_end_tensor: end = make_arg(end, dtype=torch.float32 if isinstance(end, float) else torch.int64) yield SampleInput(start, args=(end, nstep), kwargs=tensor_options) yield SampleInput(1, args=(3, 1)) def sample_inputs_logspace(op, device, dtype, requires_grad, **kwargs): ends = (-3, 0, 1.2, 2, 4) starts = (-2., 0, 1, 2, 4.3) nsteps = (0, 1, 2, 4) bases = (2., 1.1) if dtype in (torch.int8, torch.uint8) else (None, 2., 3., 1.1, 5.) for start, end, nstep, base in product(starts, ends, nsteps, bases): if dtype == torch.uint8 and end < 0 or start < 0: continue if nstep == 1 and isinstance(start, float) and not (dtype.is_complex or dtype.is_floating_point): # https://github.com/pytorch/pytorch/issues/82242 continue if base is None: yield SampleInput(start, args=(end, nstep), kwargs={"dtype": dtype, "device": device}) else: yield SampleInput(start, args=(end, nstep, base), kwargs={"dtype": dtype, "device": device}) yield SampleInput(1, args=(3, 1, 2.)) def sample_inputs_logspace_tensor_overload(op, device, dtype, requires_grad, **kwargs): ends = (-3, 0, 1.2, 2, 4) starts = (-2., 0, 1, 2, 4.3) nsteps = (0, 1, 2, 4) bases = (2., 1.1) if dtype in (torch.int8, torch.uint8) else (None, 2., 3., 1.1, 5.) is_start_end_tensors = ((True, True), (True, False), (False, True)) make_arg = partial(torch.tensor, device=device) for start, end, nstep, base, (is_start_tensor, is_end_tensor) in product(starts, ends, nsteps, bases, is_start_end_tensors): if dtype == torch.uint8 and end < 0 or start < 0: continue if nstep == 1 and isinstance(start, float) and not (dtype.is_complex or dtype.is_floating_point): # https://github.com/pytorch/pytorch/issues/82242 continue tensor_options = {"dtype": dtype, "device": device} if (is_start_tensor): start = make_arg(start, dtype=torch.float32 if isinstance(start, float) else torch.int64) if (is_end_tensor): end = make_arg(end, dtype=torch.float32 if isinstance(end, float) else torch.int64) if base is None: yield SampleInput(start, args=(end, nstep), kwargs=tensor_options) else: yield SampleInput(start, args=(end, nstep, base), kwargs=tensor_options) yield SampleInput(1, args=(3, 1, 2.)) def sample_inputs_isclose(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) # Creates additional inputs to test the rtol, atol, and equal_nan params rtols = [0., 1e-7] atols = [0., 1e-7] equal_nans = [False, True] products = product(rtols, atols, equal_nans) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) for rtol, atol, equal_nan in products: lhs = make_arg((S, S), **op.lhs_make_tensor_kwargs) rhs = make_arg((S, S), **op.rhs_make_tensor_kwargs) yield SampleInput(lhs, args=(rhs,), kwargs=dict(rtol=rtol, atol=atol, equal_nan=equal_nan)) def error_inputs_isclose(op, device, **kwargs): make_float_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) yield ErrorInput(SampleInput(make_float_arg(()), args=(make_float_arg(()),), kwargs={'rtol': -0.4}), error_type=RuntimeError, error_regex='rtol must be greater than or equal to zero') yield ErrorInput(SampleInput(make_float_arg(()), args=(make_float_arg(()),), kwargs={'atol': -0.4}), error_type=RuntimeError, error_regex='atol must be greater than or equal to zero') def sample_inputs_t(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg((1, 2))) yield SampleInput(make_arg((2,))) yield SampleInput(make_arg(())) def sample_inputs_mm(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def make_arg_conj(size): return make_arg(size).conj().requires_grad_(requires_grad) first_shape, second_shape = (S, M), (M, S) yield SampleInput(make_arg(first_shape), args=(make_arg(second_shape),)) if dtype.is_complex: yield SampleInput(make_arg(first_shape), args=(make_arg_conj(second_shape),)) # Matmul of empty matrices yield SampleInput(make_arg((0, S)), args=(make_arg(S, M),)) yield SampleInput(make_arg((S, 0)), args=(make_arg(0, M),)) def sample_inputs_addmm(op_info, device, dtype, requires_grad, **kwargs): alpha_val = kwargs.get('alpha', 2 + 3j if dtype.is_complex else 0.6) beta_val = kwargs.get('beta', 1 + 2j if dtype.is_complex else 0.2) tests_list = [ ((2, 3), (2, 2), (2, 3), False), ((3, 3), (3, 3), (3, 3), False), ] tests_with_lhs_broadcasting = [ ((1,), (2, 2), (2, 3), True), ((), (2, 2), (2, 3), True), ] test_cases = tests_list + tests_with_lhs_broadcasting # type: ignore[operator] kwargs = dict(alpha=alpha_val, beta=beta_val) make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for shape_a, shape_b, shape_c, broadcasts_input in test_cases: yield SampleInput( make_arg(shape_a), make_arg(shape_b), make_arg(shape_c), **kwargs, ).with_metadata(broadcasts_input=broadcasts_input) if dtype.is_complex: shape = (3, 3) yield SampleInput( make_arg(shape), make_arg(shape, requires_grad=False).mH.requires_grad_(requires_grad), make_arg(shape), **kwargs, ) yield SampleInput( make_arg(shape), make_arg(shape), make_arg(shape, requires_grad=False).mH.requires_grad_(requires_grad), **kwargs, ) # addmm of empty matrices if dtype.is_floating_point: yield SampleInput(make_arg(S, M), make_arg(S, 0), make_arg(0, M), **kwargs) # empty matmul with broadcastable input yield SampleInput(make_arg(M), make_arg(S, 0), make_arg(0, M), **kwargs).with_metadata(broadcasts_input=True) def sample_inputs_sparse_sampled_addmm(op_info, device, dtype, requires_grad, **kwargs): alpha = 2 + 3j if dtype.is_complex else 0.6 beta = 1 + 2j if dtype.is_complex else 0.2 make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # sparse.sampled_addmm performs: alpha * (A @ B) * sparse_ones_like(C) + beta * C for m, n, k in itertools.product([0, 5], repeat=3): yield SampleInput( torch.eye(m, n, device=device, dtype=dtype) .to_sparse_csr() .requires_grad_(requires_grad), make_arg((m, k)), make_arg((k, n)), alpha=alpha, beta=beta, ) def sample_inputs_sparse_mm_reduce(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) reductions = ["sum", "mean", "amax", "amin"] for m, k, reduce in product([5, 7], [3, 11], reductions): yield SampleInput( torch.eye(m, m) .to(device=device, dtype=dtype) .to_sparse_csr() .requires_grad_(requires_grad), make_arg((m, k)), reduce, ) def sample_inputs_mv(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) yield SampleInput(make_arg(S, M), make_arg(M)) def sample_inputs_bmm(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) yield SampleInput(make_arg(M, S, M), make_arg(M, M, S)) def sample_inputs_dot_vdot(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def make_arg_conj(size): return make_arg(size).conj().requires_grad_(requires_grad) yield SampleInput(make_arg((S, )), make_arg((S, ))) if dtype.is_complex: # dot/vdot for (conj(input), conj(arg_tensor)) and (conj(input), arg_tensor) # is tested in test_conj_view (which tests operations with only conjugated input tensor # -- not conjugated arg tensors) yield SampleInput(make_arg((S, )), make_arg_conj((S, ))) def error_inputs_dot_vdot(op_info, device, is_ref=False, **kwargs): make_input = partial(make_tensor, device=device, dtype=torch.float32) yield ErrorInput(SampleInput(make_input(1), args=(make_input(3, dtype=torch.float16),)), error_regex='dot : expected both vectors to have same dtype') yield ErrorInput(SampleInput(make_input(1, 1), args=(make_input(3),)), error_regex='1D tensors expected') yield ErrorInput(SampleInput(make_input(9), args=(make_input(3),)), error_regex='inconsistent tensor size') if device != "cpu" and not is_ref: yield ErrorInput(SampleInput(make_input(3), args=(make_input(3, device="cpu"),)), error_regex='Expected all tensors to be on the same device') def sample_inputs_addmv(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) test_cases = (((S,), (S, M), (M,), 1, 1, False), ((S,), (S, M), (M,), 0.2, 0.6, False), ) test_cases_with_broadcast = (((1,), (S, M), (M,), 1, 1, True), ((1,), (S, M), (M,), 0.2, 0.6, True), ((), (S, M), (M,), 1, 1, True), ((), (S, M), (M,), 0.2, 0.6, True), ) cases = test_cases + test_cases_with_broadcast # addmv performs: beta * M + alpha * (mat @ vec) for size, mat, vec, beta, alpha, broadcasts_input in cases: yield SampleInput(make_arg(size), args=(make_arg(mat), make_arg(vec)), kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=broadcasts_input) def sample_inputs_addbmm(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # input_shape, batch1_shape, batch2_shape, beta_val, alpha_val, is_broadcasting test_cases = [((S, M), (S, S, S), (S, S, M), 1, 1, False), ((1,), (S, S, S), (S, S, M), 1, 1, True), ((S, M), (S, S, S), (S, S, M), 0.6, 0.2, False), ((1,), (S, S, S), (S, S, M), 0.6, 0.2, True), ((), (S, S, S), (S, S, M), 1, 1, True), ((), (S, S, S), (S, S, M), 0.6, 0.2, True), ] for input_shape, batch1_shape, batch2_shape, beta, alpha, is_broadcasting in test_cases: if dtype.is_complex: beta_complex, alpha_complex = beta * (1 + 2j), alpha * (2 + 3j) yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)), kwargs=dict(beta=beta_complex, alpha=alpha_complex), broadcasts_input=is_broadcasting) yield SampleInput(make_arg(input_shape), args=(make_arg(batch1_shape), make_arg(batch2_shape)), kwargs=dict(beta=beta, alpha=alpha), broadcasts_input=is_broadcasting) def sample_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) test_cases = [(((S, S), (S, S), (S, S)), False), (((S, S), (S, 1), (1, S)), False), (((1,), (S, S, 1), (1, S)), True), (((), (), ()), False), (((S, S), (), ()), True), (((), (S, S, 1), (1, S)), True) ] for input_args, broadcasts_input in test_cases: # addcdiv should accept inputs with zero value # Currently, it throws ZeroDivisionError when the denominator is zero # TODO: exclude_zeros can be removed after https://github.com/pytorch/pytorch/issues/73638 is fixed args = tuple(make_arg(arg, exclude_zero=True) if isinstance(arg, tuple) else arg for arg in input_args) yield SampleInput(*args).with_metadata(broadcasts_input=broadcasts_input) # addcdiv should accept inputs with zero value # Currently, it throws ZeroDivisionError when the denominator is zero # TODO: exclude_zeros can be removed after https://github.com/pytorch/pytorch/issues/73638 is fixed args = tuple(make_arg(arg, exclude_zero=True) if isinstance(arg, tuple) else arg for arg in input_args) yield SampleInput( *args, value=3.14 if dtype.is_floating_point or dtype.is_complex else 3 ).with_metadata(broadcasts_input=broadcasts_input) def reference_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwargs): yield from sample_inputs_addcmul_addcdiv( op_info, device, dtype, requires_grad, **kwargs) # type promotion cases supported_dtypes = op_info.supported_dtypes(device) make_arg = partial(make_tensor, device=device, requires_grad=requires_grad) types = ( (torch.float64, torch.complex128), (torch.bfloat16, torch.float32), ) values = ( None, True, False, 3.14, 3, 1.0, 1, 0.0, 0, -3.14, -3, 3.14 + 2.71j, ) for (type2, type3), value in product(types, values): if (type2 not in supported_dtypes or type3 not in supported_dtypes): continue # RuntimeError: value cannot be converted without overflow if (type(value) is complex and type2 is not torch.complex128): continue arg1 = make_arg([5, 5], dtype=dtype) arg2 = make_arg([5, 5], dtype=type2) arg3 = make_arg([1, 5], dtype=type3) # TypeError: addcdiv(): argument 'value' must be Number, not NoneType if value is not None: yield SampleInput(arg1, args=(arg2, arg3), kwargs=dict(value=value)) else: yield SampleInput(arg1, args=(arg2, arg3)) def sample_inputs_baddbmm(op_info, device, dtype, requires_grad, **kwargs): test_cases = [((S, S, M), (S, S, S), (S, S, M), 1, 1, False), ((1,), (S, S, S), (S, S, M), 1, 1, True), ((S, S, M), (S, S, S), (S, S, M), 0.6, 0.2, False), ((1,), (S, S, S), (S, S, M), 0.6, 0.2, True), ((), (S, S, S), (S, S, M), 1, 1, True), ((), (S, S, S), (S, S, M), 0.6, 0.2, True), ] make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) for (input_shape, batch1_shape, batch2_shape, alpha, beta, broadcasts_input) in test_cases: yield SampleInput( make_arg(input_shape), make_arg(batch1_shape), make_arg(batch2_shape), beta=beta, alpha=alpha ).with_metadata(broadcasts_input=broadcasts_input) if dtype.is_complex: yield SampleInput( make_arg(input_shape), make_arg(batch1_shape), make_arg(batch2_shape), beta=beta * (1 + 2j), alpha=alpha * (2 + 3j), ).with_metadata(broadcasts_input=broadcasts_input) if dtype.is_complex: shapes = [(S, S, S), (S, M, S), (S, S, M)] args = tuple(make_arg(s) for s in shapes) yield SampleInput( args[0].transpose_(-1, 1), args[1].transpose(-1, 1).conj().requires_grad_(requires_grad), args[2].transpose(-1, 1).conj().requires_grad_(requires_grad), beta=beta * (1 + 2j), alpha=alpha * (2 + 3j), ) # TODO: add reduction kwargs def sample_inputs_multilabel_soft_margin_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) shapes = ( (S,), (S, S), ) for shape in shapes: # Produce one with weight and one without. yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=False),), kwargs={}) yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=False),), kwargs={'weight': _make_tensor(shape, requires_grad=False)}) def sample_inputs_addr(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None ) yield SampleInput(make_arg(S, M), make_arg(S), make_arg(M)) yield SampleInput(make_arg(), make_arg(S), make_arg(M)).with_metadata(broadcasts_input=True) if dtype.is_complex: alpha, beta = 0.1 + 0.3j, 0.4 + 0.6j elif dtype.is_floating_point: alpha, beta = 0.2, 0.6 else: alpha, beta = 2, 3 yield SampleInput(make_arg(S, M), make_arg(S), make_arg(M), beta=beta, alpha=alpha) yield SampleInput( make_arg(), make_arg(S), make_arg(M), beta=beta, alpha=alpha, ).with_metadata(broadcasts_input=True) # These samples fail gradcheck if dtype.is_floating_point and not requires_grad: tensor_options = dict(device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput( torch.tensor([[math.nan]], **tensor_options), torch.tensor([0.0], **tensor_options), torch.tensor([0.0], **tensor_options), beta=0.0, alpha=0.0, ).with_metadata(broadcasts_input=True) yield SampleInput( torch.tensor([[0.0]], **tensor_options), torch.tensor([math.nan], **tensor_options), torch.tensor([math.nan], **tensor_options), beta=0.0, alpha=0.0, ).with_metadata(broadcasts_input=True) def sample_inputs_zero_(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ((), (S, S, S), (S,)) for shape in cases: yield SampleInput(make_arg(shape)) def sample_inputs_multi_margin_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) make_weight = partial(_make_tensor, requires_grad=False) inputs = ( ((), make_target([], low=0, high=1), {}), ((S,), make_target([], low=0, high=S), {"p": 1}), ((S,), make_target([1], low=0, high=S), {"p": 2}), ((S, M), make_target([S], low=0, high=M), {"margin": 1.0}), ((S, M), make_target([S], low=0, high=M), {"margin": -3.14}), ((M, S), make_target([M], low=0, high=S), {"weight": None}), ((M, S), make_target([M], low=0, high=S), {"weight": make_weight([S], low=-10., high=10.)}), ((M, S), make_target([M], low=0, high=S), {"reduction": "none"}), ((M, S), make_target([M], low=0, high=S), {"reduction": "mean"}), ((M, S), make_target([M], low=0, high=S), {"reduction": "sum"}), ) for input_shape, target, kwargs in inputs: yield SampleInput(_make_tensor(input_shape), args=(target,), kwargs=kwargs) def reference_inputs_multi_margin_loss(op_info, device, dtype, requires_grad, **kwargs): yield from sample_inputs_multi_margin_loss(op_info, device, dtype, requires_grad, **kwargs) _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) make_weight = partial(_make_tensor, requires_grad=False) inputs = ( ((), make_target([], low=0, high=1)), ((S,), make_target([], low=0, high=S)), ((S,), make_target([1], low=0, high=S)), ((M, S), make_target([M], low=0, high=S)), ) ps = (1, 2) margins = (0, 7, -3.14) weights = (False, True) reductions = (None, "none", "mean", "sum") for (input_shape, target), p, margin, weight, reduction in product(inputs, ps, margins, weights, reductions): input = _make_tensor(input_shape) weight_shape = [input.size(-1)] if input.ndim > 0 else [1] weight = make_weight(weight_shape, low=-10., high=10.) if weight else None kwargs = {"p": p, "margin": margin, "weight": weight} if reduction is not None: kwargs["reduction"] = reduction yield SampleInput(input, args=(target,), kwargs=kwargs) def error_inputs_multi_margin_loss(op, device, **kwargs): make_input = partial(make_tensor, device=device, dtype=torch.float32) # invalid reduction yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'reduction': 'abc'}), error_type=ValueError, error_regex='abc is not a valid value for reduction') # invalid input yield ErrorInput(SampleInput(make_input(5, 0), args=(make_input(5,),), kwargs={}), error_type=RuntimeError, error_regex=r'Expected non-empty vector or matrix with optional 0-dim batch size, but got: \[5, 0\]') yield ErrorInput(SampleInput(make_input(0,), args=(make_input(5,),), kwargs={}), error_type=RuntimeError, error_regex=r'Expected non-empty vector or matrix with optional 0-dim batch size, but got: \[0\]') # invalid target yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={}), error_type=RuntimeError, error_regex=r'inconsistent target size, expected 5 but got \[5, 4\]') # invalid target dtype yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={}), error_type=RuntimeError, error_regex='expected scalar type Long but found Float') # invalid weight yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'weight': make_input(())}), error_type=ValueError, error_regex='weight must be one-dimensional') yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'weight': make_input(5, 4)}), error_type=ValueError, error_regex='weight must be one-dimensional') yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'weight': make_input(5,)}), error_type=RuntimeError, error_regex=r'inconsistent weight size, expected 4 but got \[5\]') # invalid p yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5,),), kwargs={'p': 3}), error_type=ValueError, error_regex='only p == 1 and p == 2 supported') def sample_inputs_logsumexp(self, device, dtype, requires_grad, **kwargs): inputs = ( ((), (0,), True), ((S, S), (1,), True), ((S, S), (1,), False), ((S, S), (-2,), False), ((S, S), (0, 1), False), ) # Test large inputs to check numerical stability lows = (None, 1e3, 1e6) if dtype in (torch.float32, torch.float64, torch.complex64, torch.complex128) else (None,) for low in lows: high = low * 2 if low is not None else None for shape, dim, keepdim in inputs: t = make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) yield SampleInput(t, dim, keepdim) def reference_inputs_logsumexp(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_logsumexp(op, device, dtype, requires_grad, **kwargs) # https://github.com/pytorch/pytorch/issues/91843 t = torch.tensor([20, 30, 100], dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(t, 0, False) t = torch.tensor((), dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(t, 0, False) # tests masking # https://github.com/pytorch/pytorch/pull/91860#pullrequestreview-1241344073 t = torch.tensor(float("inf")) yield SampleInput(t, 0, True) def sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs): inputs = [ ((), {}), ((S, S), {}), ((0, S, 0), {}), ((S,), {'dtype': dtype, 'device': device}), # Hard-code some dtypes/devices. We want to test cases where the # (dtype, device) is different from the input's (dtype, device) ((S,), {'dtype': torch.double}), ((S,), {'device': 'cpu'}), ((S,), {'dtype': torch.double, 'device': 'cpu'}), ] if torch.cuda.is_available(): inputs.append(((S,), {'device': 'cuda'})) for shape, kwargs in inputs: t = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) yield SampleInput(t, **kwargs) def reference_inputs_like_fns(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_like_fns(op, device, dtype, requires_grad, **kwargs) # shape cases = ( (), (0,), (1, 0), (1, 1, 4, 5), (5, 3, 0, 1), (1, 4, 3, 1, 1) ) make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for shape in cases: yield SampleInput(make_arg(shape)) yield SampleInput(make_arg(shape).transpose(0, -1)) yield SampleInput(make_arg(shape, noncontiguous=True)) yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1)) def sample_inputs_multilabel_margin_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) inputs = ( ([], make_target([], low=0, high=1), {}), ([S], make_target([S], low=0, high=S), {}), ([M, S], make_target([M, S], low=0, high=S), {}), ([M, S], make_target([M, S], low=0, high=S), {"reduction": "none"}), ([M, S], make_target([M, S], low=0, high=S), {"reduction": "mean"}), ([M, S], make_target([M, S], low=0, high=S), {"reduction": "sum"}), ) for shape, target, kwargs in inputs: yield SampleInput(_make_tensor(shape), args=(target,), kwargs=kwargs) def reference_inputs_multilabel_margin_loss(op_info, device, dtype, requires_grad, **kwargs): yield from sample_inputs_multilabel_margin_loss(op_info, device, dtype, requires_grad, **kwargs) _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(_make_tensor, dtype=torch.long, requires_grad=False) make_target_tensor = partial(torch.tensor, device=device, dtype=torch.long, requires_grad=False) inputs = ( # random tests including -1 target labels ([], make_target([], low=-1, high=1)), ([S], make_target([S], low=-1, high=S)), ([M, S], make_target([M, S], low=-1, high=S)), # repeated target labels and -1 (labels after the first -1 are ignored) ([], make_target_tensor(-1)), ([7], make_target_tensor([2, 0, 6, -1, 4, -1, 6])), ([4, 5], make_target_tensor([[4, -1, 0, -1, 2], [0, 0, 4, 1, 4], [-1, 3, -1, 1, 0], [4, 3, 2, 1, 0]])), ) reductions = (None, "none", "mean", "sum") for (shape, target), reduction in product(inputs, reductions): kwargs = {} if reduction is not None: kwargs["reduction"] = reduction yield SampleInput(_make_tensor(shape), args=(target,), kwargs=kwargs) def error_inputs_multilabel_margin_loss(op, device, **kwargs): make_input = partial(make_tensor, device=device, dtype=torch.float32) # invalid reduction yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'reduction': 'abc'}), error_type=ValueError, error_regex='abc is not a valid value for reduction') # invalid input yield ErrorInput(SampleInput(make_input(5, 0), args=(make_input(5, 4),), kwargs={}), error_type=RuntimeError, error_regex=r'Expected non-empty vector or matrix with optional 0-dim batch size, but got: \[5, 0\]') yield ErrorInput(SampleInput(make_input(0,), args=(make_input(0,),), kwargs={}), error_type=RuntimeError, error_regex=r'Expected non-empty vector or matrix with optional 0-dim batch size, but got: \[0\]') # invalid target yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(4,),), kwargs={}), error_type=RuntimeError, error_regex=r'inconsistent target size: \[4\] for input of size: \[5, 4\]') yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input((),),), kwargs={}), error_type=RuntimeError, error_regex=r'inconsistent target size: \[\] for input of size: \[5, 4\]') def get_independent_tensor(tensor): return tensor.clone().requires_grad_(tensor.requires_grad) def sample_inputs_randint(self, device, dtype, requires_grad, **kwargs): low = 2 high = 10 for sample in sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs): sample.kwargs.setdefault('device', device) # With high yield SampleInput(high, sample.input.shape, *sample.args, **sample.kwargs) # With low and high yield SampleInput(low, high, sample.input.shape, *sample.args, **sample.kwargs) def sample_inputs_randint_like(self, device, dtype, requires_grad, **kwargs): low = 2 high = 10 for sample in sample_inputs_like_fns(self, device, dtype, requires_grad, **kwargs): # With high yield SampleInput( sample.input, high, *sample.args, **sample.kwargs) # With low and high yield SampleInput( get_independent_tensor(sample.input), low, high, *sample.args, **sample.kwargs) def sample_inputs_margin_ranking_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) shapes = ( (), (S,), (S, S), (S, S, S), ) margins = (0., 1.) reductions = ('sum', 'mean', 'none') for shape in shapes: for margin, reduction in product(margins, reductions): kwargs = {'margin': margin, 'reduction': reduction} yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=False), _make_tensor(shape, requires_grad=False)), kwargs=kwargs) def reference_inputs_margin_ranking_loss(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_margin_ranking_loss(op, device, dtype, requires_grad, **kwargs) make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) for reduction in ('sum', 'mean', 'none'): if dtype.is_floating_point: # only supports ints and floats # NaN propagation inp1 = make_input((10, )) inp1[2] = float('nan') inp2 = make_input((10, )) inp2[4] = float('nan') target = make_input((10, )) inp2[9] = float('nan') yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) # Inf handling inp1 = make_input((10, )) inp2[1] = float('inf') inp2 = make_input((10, )) inp2[4] = float('inf') target = make_input((10, )) inp2[7] = float('inf') yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) # Broadcasting inp1 = make_input((5, 2)) inp2 = make_input((5, 1)) target = make_input((1, 2)) yield SampleInput(inp1, args=(inp2, target), kwargs={'reduction': reduction}) def error_inputs_margin_ranking_loss(op, device, **kwargs): make_input = partial(make_tensor, device=device, dtype=torch.float32) # invalid reduction value. yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4), make_input(5, 4),), kwargs={'reduction': 'abc'}), error_type=ValueError, error_regex='is not a valid value') # invalid input shapes yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4), make_input(5,),)), error_regex='margin_ranking_loss : All input tensors should') def sample_inputs_new_fns(self, device, dtype, requires_grad, *, is_strided=False, **kwargs): other_dtype = torch.half if torch.backends.mps.is_available() else torch.double # input_shape, output_shape, strides, kwargs # lengths of output_shape and strides must be equal inputs = [ ((), (), (), {}), ((S, S), (2, 0), (3, 4), {}), ((0, S, 0), (3, 2, 2), (1, 2, 3), {}), ((S,), (2, 3), (7, 8), {'dtype': dtype, 'device': device}), # Hard-code some dtypes/devices. We want to test cases where the # (dtype, device) is different from the input's (dtype, device) ((S,), (10,), (S,), {'dtype': other_dtype}), ((S,), (1, 1, 12), (S, L, M), {'device': 'cpu'}), ((S,), (2, 2, 2), (L, M, S), {'dtype': other_dtype, 'device': 'cpu'}), ] if torch.cuda.is_available(): inputs.append(((S,), (7, 2), (3, 4), {'device': 'cuda'})) for input_shape, output_shape, strides, kwargs in inputs: t = make_tensor(input_shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) if is_strided: yield SampleInput(t, output_shape, strides, **kwargs) else: yield SampleInput(t, output_shape, **kwargs) def sample_inputs_empty_strided(op, device, dtype, requires_grad=False, **kwargs): inputs = [ ((), (), {'dtype': dtype, 'device': device}), ((S,), (4,), {'dtype': dtype, 'device': device}), ((S, S), (2, 1), {'dtype': dtype, 'device': device}), ((S, S, S), (2, 0, 1), {'dtype': dtype, 'device': device}), ] for shape, strides, kwargs in inputs: yield SampleInput(shape, strides, requires_grad=requires_grad, **kwargs) def sample_inputs_empty(op, device, dtype, requires_grad, **kwargs): # shape cases = ( (), (0,), (1,), (1, 3, 5), (5, 3, 1), (1, 0, 5, 1), ) for case in cases: yield SampleInput(case, device=device, dtype=dtype, requires_grad=requires_grad) def sample_inputs_empty_permuted(op, device, dtype, requires_grad, **kwargs): # shape cases = ( (), (0,), (1,), (1, 3, 5), (5, 3, 1), (1, 0, 5, 1), ) for case in cases: for layout in itertools.permutations(range(len(case))): yield SampleInput(case, layout, device=device, dtype=dtype, requires_grad=requires_grad) def error_inputs_empty_permuted(op_info, device, **kwargs): yield ErrorInput( SampleInput((2,), args=((0, 1),)), error_type=RuntimeError, error_regex="Number of dimensions in size does not match the length of the physical_layout" ) yield ErrorInput( SampleInput((2,), args=((3,),)), error_type=RuntimeError, error_regex="Dimension out of range" ) yield ErrorInput( SampleInput((2, 3), args=((0, 0),)), error_type=RuntimeError, error_regex="Duplicate dim not allowed" ) def sample_inputs_scalar_tensor(op, device, dtype, requires_grad, **kwargs): # Not including a scalar tensor in vals because meta tests start failing due to # lack of meta support for _local_scalar_dense # torch.tensor(2, device=device) vals = (-5, 0, 1) for item in vals: yield SampleInput(item, device=device, dtype=dtype, requires_grad=requires_grad) def sample_inputs_eye(op, device, dtype, requires_grad, **kwargs): # only ints >= 0 are allowed for both arguments, unless m is omitted sizes = (None, 0, 1, 2, 3, 4, 7, L, M, S) for n, m in product(sizes, sizes): if n is None: continue # TODO: no layout _kwargs = {'device': device, 'dtype': dtype, 'requires_grad': requires_grad} if m is None: yield SampleInput(n, args=(), kwargs=_kwargs) else: yield SampleInput(n, args=(m,), kwargs=_kwargs) def error_inputs_eye(op_info, device, **kwargs): # TODO: no layout _kwargs = {'device': device, 'dtype': torch.float32} yield ErrorInput( SampleInput(-1, args=(), kwargs=_kwargs), error_regex="n must be greater or equal to 0, got -1" ) yield ErrorInput( SampleInput(-7, args=(42,), kwargs=_kwargs), error_regex="n must be greater or equal to 0, got -7" ) yield ErrorInput( SampleInput(0, args=(-3,), kwargs=_kwargs), error_regex="m must be greater or equal to 0, got -3" ) def sample_inputs_new_full(self, device, dtype, requires_grad, **kwargs): def get_val(dtype): return make_tensor([], dtype=dtype, device="cpu").item() for sample in sample_inputs_new_fns(self, device, dtype, requires_grad, **kwargs): # The scalar we are passing to new_full must be the same dtype # as the one of the resulting tensor use_dtype = sample.kwargs['dtype'] if 'dtype' in sample.kwargs else dtype yield SampleInput( sample.input, *sample.args, get_val(use_dtype), **sample.kwargs) def sample_inputs_full_like(self, device, dtype, requires_grad, **kwargs): def get_val(dtype): return make_tensor([], dtype=dtype, device="cpu").item() inputs = [ ((), get_val(dtype), {}), ((S, S), get_val(dtype), {}), ((0, S, 0), get_val(dtype), {}), ((S,), get_val(dtype), {'dtype': dtype, 'device': device}), # Hard-code some dtypes/devices. We want to test cases where the # (dtype, device) is different from the input's (dtype, device) ((S,), get_val(torch.double), {'dtype': torch.double}), ((S,), get_val(dtype), {'device': 'cpu'}), ((S,), get_val(torch.double), {'dtype': torch.double, 'device': 'cpu'}), ] if torch.cuda.is_available(): inputs.append(((S,), get_val(dtype), {'device': 'cuda'})) for shape, fill_value, kwargs in inputs: t = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) yield SampleInput(t, fill_value, **kwargs) def sample_inputs_multinomial(self, device, dtype, requires_grad, **kwargs): cases = [ ([3], 3, {}), ([10], 3, {}), ([3, 10], 3, {}), ([3], 3, dict(replacement=False)), ([3], 3, dict(replacement=True)), ([3, 4], 4, dict(replacement=True)), ([3, 4], 4, dict(replacement=False)), ] for shape, num_samples, kwargs in cases: t = make_tensor(shape, dtype=dtype, device=device, low=0, high=None, requires_grad=requires_grad) yield SampleInput(t, num_samples, **kwargs) def sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs): def get_value_or_make_tensor(value_or_shape): if isinstance(value_or_shape, list): return make_tensor(value_or_shape, dtype=dtype, device=device, low=0, high=None, requires_grad=requires_grad) return value_or_shape for value_or_mean_shape, value_or_std_shape, kwargs in cases: mean = get_value_or_make_tensor(value_or_mean_shape) std = get_value_or_make_tensor(value_or_std_shape) yield SampleInput(mean, std, **kwargs) def sample_inputs_normal_tensor_first(self, device, dtype, requires_grad, **kwargs): # value_or_size, value_or_size, kwargs cases = [ ([], [], {}), ([3], [3], {}), ([3, 4, 2], [3, 4, 2], {}), ([2, 3], 1.1, {}), ([1, 2, 3], [5, 2, 3], {}), # broadcasting ] return sample_inputs_normal_common(self, device, dtype, requires_grad, cases, **kwargs) def sample_inputs_normal_tensor_second(self, device, dtype, requires_grad, **kwargs): yield SampleInput(1.6, 0.3, [2, 3], dtype=dtype, device=device) yield SampleInput(1.6, 0.3, [2, 2, 2], dtype=dtype, layout=torch.strided, device=device) yield SampleInput(2.7, make_tensor([4, 3], dtype=dtype, device=device, low=0, high=None, requires_grad=requires_grad)) def sample_inputs_bernoulli(self, device, dtype, requires_grad, **kwargs): shapes = [ [3], [], [0, 3], [2, 3, 4], ] for shape in shapes: t = make_tensor(shape, dtype=dtype, device=device, low=0, high=1, requires_grad=requires_grad) yield SampleInput(t) def error_inputs_bernoulli(op_info, device, **kwargs): # more than one element of the written-to tensor refers to a single memory location x = torch.rand((1,), device=device).expand((6,)) err_msg = 'unsupported operation' yield ErrorInput(SampleInput(torch.rand_like(x), kwargs={'out': x}), error_regex=err_msg) def sample_inputs_logcumsumexp(self, device, dtype, requires_grad, **kwargs): inputs = ( ((S, S, S), 0), ((S, S, S), 1), ((), 0), ) for large_number in (True, False): for shape, dim in inputs: t = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) if large_number and t.dim() > 0: t[0] = 10000 yield SampleInput(t, dim) def sample_inputs_trace(self, device, dtype, requires_grad, **kwargs): yield SampleInput( make_tensor((S, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)) def error_inputs_trace(op, device): yield ErrorInput(SampleInput(make_tensor((3, 4, 5), dtype=torch.float32, device=device)), error_regex="expected a matrix") def sample_inputs_renorm(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((S, S, S), (2, 1, 0.5)), ((S, S, S), (2, -1, 0.5)), ((S, S, S), (1, 2, 3)), ((S, S, S), (float('inf'), 2, 0.5)), ) for shape, args in cases: yield SampleInput(make_arg(shape), args=args) def sample_inputs_transpose_swapdims(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((1, 2, 3), (-1, -2)), ((1, 2, 3), (-1, 2)), ((1, 2, 3), (1, -2)), ((1, 2, 3), (1, 2)), ((), (0, 0)), ((1, ), (0, 0)), ((M, M), (0, 1)), ((S, S, S), (2, 0)), ) for shape, args in cases: yield SampleInput(make_arg(shape), args=args) def _numpy_ref_transpose(a, dim0, dim1): if a.ndim <= 1: return a return np.swapaxes(a, dim0, dim1) def sample_inputs_adjoint(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) shapes = ((1, 2, 3), (M, M), (S, S, S), (S, M, S), (M, S, M, S)) return (SampleInput(make_arg(shape)) for shape in shapes) def sample_inputs_T(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) shapes = ((M, M), (M, L)) return (SampleInput(make_arg(shape)) for shape in shapes) def error_inputs_T(self, device, has_ndims_error=False): make_arg = partial(make_tensor, device=device, dtype=torch.float32) # Deprecated behavior in regular PyTorch, but throws an error in primTorch: # https://github.com/pytorch/pytorch/issues/86968 if has_ndims_error: # ndims == 1 yield ErrorInput(SampleInput(make_arg(M)), error_regex=(r'The use of `x\.T` on tensors of dimension other than 0 or 2 ' r'to reverse their shape is not supported\.')) # ndims > 2 yield ErrorInput(SampleInput(make_arg(M, S, L)), error_regex=(r'The use of `x\.T` on tensors of dimension other than 0 or 2 ' r'to reverse their shape is not supported\.')) def sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad=False): """ This function produces two tensors of shape (*, m, k) and (*, n, k) with k <= min(m, n). Their matrix product could be used to generate tensor of shape (*, m, n) of rank k. """ make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) batches = [(), (2,)] size = [3, 4] for batch, m, n in product(batches, size, size): k = 2 a = make_arg((*batch, m, k)) b = make_arg((*batch, n, k)) yield a, b def sample_inputs_svd_lowrank(op_info, device, dtype, requires_grad=False, **kwargs): # Function that's well defined on the outputs for complex inputs def fn(usv): U, S, V = usv return U @ V.mH, S for (a, b) in sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad): *batch, m, k = a.shape n = b.shape[-2] # NOTE: since svd_lowrank relies on non rank-revealing SVD, # it inherits the problem of unstable behavior with repeated # singular values including zeros. # Since we want to avoid (repeated) zeros as singular values, # we can only use k for q. # This issues could be resolved with using a rank-revealing SVD # which does not include "zero" singular values. yield SampleInput(a, b, q=k, M=None).with_metadata(output_process_fn_grad=fn) for (a, b) in sample_inputs_singular_matrix_factors(op_info, device, dtype, requires_grad): *batch, m, k = a.shape n = b.shape[-2] M = make_tensor((*batch, m, n), dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(a, b, q=k, M=M).with_metadata(output_process_fn_grad=fn) def chunk_iter(iterable, size): it = iter(iterable) while True: chunk = tuple(islice(it, size)) if not chunk: break yield chunk def sample_inputs_pca_lowrank(op_info, device, dtype, requires_grad=False, **kwargs): # we reuse samples from svd_lowrank which come in group of two with # kwarg['M'] = None and with kwarg['M'] = <some tensor> samples = sample_inputs_svd_lowrank(op_info, device, dtype, requires_grad, **kwargs) for s1, s2 in chunk_iter(samples, 2): del s1.kwargs['M'] del s2.kwargs['M'] s1.kwargs['center'] = False s2.kwargs['center'] = True yield s1 yield s2 def np_sinc_with_fp16_as_fp32(x): # Wraps numpy's sinc function so that fp16 values are promoted to fp32 # before sinc is invoked. Context: numpy's sinc returns NaN when evaluated # at 0 for fp16. if x.dtype == np.float16: return np.sinc(x.astype(np.float32)) else: return np.sinc(x) def sample_inputs_broadcast_to(op_info, device, dtype, requires_grad, **kwargs): test_cases = ( ((S, 1, 1), (S, S, S)), ((S, 1, S), (S, S, S)), ((S, 1), (S, S, S)), ((1,), (S, S, S)), ((1, S), (1, 1, S)), ((), ()), ((), (1, 3, 2)), ) return ( SampleInput( make_tensor(size, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), shape, ) for size, shape in test_cases) def sample_inputs_broadcast_tensors(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) test_cases: tuple[tuple] = (((3,), (1, 2, 1), (1, 1), (5, 1, 1),),) for shape, *other_shapes in test_cases: yield SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes)) def reference_inputs_broadcast_tensors(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_broadcast_tensors(op, device, dtype, requires_grad, **kwargs) m = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) n = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad, noncontiguous=True) cases = ( ((), (1, 1), (1, 1, 7, 1), (3, 1, 1)), ((3, 5, 6), (1, 3, 5, 6), (1, 1, 1, 1, 6), (8, 3, 5, 6)) ) for a, b, c, d in cases: yield SampleInput(m(a), args=(m(b), m(c), m(d))) yield SampleInput(n(a), args=(n(b), n(c), n(d))) def sample_inputs_block_diag(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) test_cases: tuple[tuple] = ( ((1, S), (2, S), (3, S),), ((S, 1), (S, 2), (S, 3),), ((1,), (2,), (3,),), ((2, S), (S,)) ) for shape, *other_shapes in test_cases: yield SampleInput(make_arg(shape), args=tuple(make_arg(s) for s in other_shapes)) # We also want to test mixed complex-non-complex inputs to block_diag if dtype == torch.complex32 or dtype == torch.complex64: non_complex_dtype = torch.float32 if dtype == torch.complex32 else torch.float64 make_arg_non_complex = partial(make_tensor, dtype=non_complex_dtype, device=device, requires_grad=requires_grad) yield SampleInput(make_arg_non_complex(shape), args=tuple(make_arg(s) for s in other_shapes)) def sample_inputs_cdist(op_info, device, dtype, requires_grad, **kwargs): small_S = 2 test_cases = ( ((S, S, 2), (S, S + 1, 2)), ((S, S), (S, S)), ((S, S, S), (S, S, S)), ((3, 5), (3, 5)), ((2, 3, 5), (2, 3, 5)), ((1, 2, 3), (1, 2, 3)), ((1, 1), (S, 1)), ((0, 5), (4, 5)), ((4, 5), (0, 5)), ((0, 4, 5), (3, 5)), ((4, 5), (0, 3, 5)), ((0, 4, 5), (1, 3, 5)), ((1, 4, 5), (0, 3, 5)), # Using S here would make this one test take 9s ((small_S, small_S, small_S + 1, 2), (small_S, small_S, small_S + 2, 2)), ((small_S, 1, 1, small_S), (1, small_S, small_S)), ((1, 1, small_S), (small_S, 1, small_S, small_S)), ) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: # FIXME add an override for JIT and revert 0. back to 0 # since it's accepted by eager for p in [0., 1., 2., 3., 0.5, 1.5, 2.5, float("inf")]: for t1_size, t2_size in test_cases: # The args should never be non-contiguous as this is not supported in the backward yield SampleInput(make_arg(t1_size), make_arg(t2_size), p, cm) def _fill_np(a, value): a = a.copy() a.fill(value) return a def _fill_sample_kwargs(device, dtype, input): if dtype is torch.bool: value = True else: value = 3 return ({'value': value}, {'value': value}) def sample_inputs_comparison_ops(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_elementwise_binary(op, device, dtype, requires_grad, **kwargs) # Adds a sample input where both tensors have the same values make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) lhs = make_arg((S, S)) yield SampleInput(lhs, args=(lhs.clone(),)) def sample_inputs_stack(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # shape x number of tensors cases = ( ((3, 4), 1), ((1, 2, 1, 4), 3), ((0, 1, 0), 2),) for shape, num_tensors in cases: tensors = [make_arg(shape) for _ in range(num_tensors)] for dim in range(-1, len(shape) - 1): yield SampleInput(tensors, args=(dim,)) def sample_inputs_chunk_cat(op_info, device, dtype, requires_grad, **kwargs): # 1. If input tensors have different ndims, dim should be non-negative and be less than the ndims of every input tensors. # If all input tensors have the same ndims, we support both negative and non-negative dim. # 2. For wrapped_dim, all tensors should have the same size for 0,...,wrapped_dim-1 dimensions. # No requirements for (wrapped_dim, ...)-th dimension. # 3. Expect positive num_chunks # 4. Expect non-empty input tensor list and each input tensor should have at least 1 element make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) same_ndim_cases = ( ( [ torch.Size([1, 2, 3]), torch.Size([1, 2, 3]), ], -1, 5 ), ( [ torch.Size([1, 2, 129]), torch.Size([1, 2, 297]), ], -1, 5 ), ( [ torch.Size([1, 2, 3]), torch.Size([1, 2, 3]), ], 1, 5 ), ( [ torch.Size([3, 3, 2, 1]), torch.Size([1, 4, 2, 2]), torch.Size([2, 1, 3, 3]), ], 0, 2 ), ) for sizes, dim, num_chunks in same_ndim_cases: tensors = [make_arg(size) for size in sizes] yield SampleInput(tensors, args=(dim, num_chunks)) different_ndim_case = [ torch.Size([2, 3, 3]), torch.Size([2, 3, 1, 2]), torch.Size([2, 3]), torch.Size([2, 3, 2]), torch.Size([2, 3, 271]), ] max_dim, num_chunks = 2, 3 for dim in range(max_dim): tensors = [] for size in different_ndim_case: tensors.append(make_arg(size)) yield SampleInput(tensors, args=(dim, num_chunks)) def error_inputs_chunk_cat(op_info, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float32) # input tensors have different ndims but dim is negative sizes, dim, num_chunks = [torch.Size([2, 3]), torch.Size([4,])], -1, 3 tensors = [make_arg(size) for size in sizes] yield ErrorInput( SampleInput(tensors, args=(dim, num_chunks)), error_regex='_chunk_cat expects non-negative dim when input tensors have different ndims', ) # input tensors have different ndims but dim >= ndim of some input tensors sizes, dim, num_chunks = [torch.Size([2, 3]), torch.Size([4,])], 1, 3 tensors = [make_arg(size) for size in sizes] yield ErrorInput( SampleInput(tensors, args=(dim, num_chunks)), error_regex='_chunk_cat expects dim < ndim for all input tensors', ) # some tensors have different sizes for 0, ..., dim-1 dimensions. sizes, dim, num_chunks = [torch.Size([2, 3, 4]), torch.Size([4, 3])], 1, 3 tensors = [make_arg(size) for size in sizes] yield ErrorInput( SampleInput(tensors, args=(dim, num_chunks)), error_regex='_chunk_cat expects same sizes of 0,...,dim-1 dimensions for all tensors', ) # negative num_chunks sizes, dim, num_chunks = [torch.Size([2,]), torch.Size([3,])], 0, -1 tensors = [make_arg(size) for size in sizes] yield ErrorInput( SampleInput(tensors, args=(dim, num_chunks)), error_regex='_chunk_cat expects positive num_chunks', ) # zero as num_chunks sizes, dim, num_chunks = [torch.Size([2,]), torch.Size([3,])], 0, 0 tensors = [make_arg(size) for size in sizes] yield ErrorInput( SampleInput(tensors, args=(dim, num_chunks)), error_regex='_chunk_cat expects positive num_chunks', ) # empty input tensor list dim, num_chunks = 0, 1 yield ErrorInput( SampleInput([], args=(dim, num_chunks)), error_regex='_chunk_cat expects a non-empty input tensor list', ) # empty input tensor with 0 elements sizes, dim, num_chunks = [torch.Size([0,]), torch.Size([3,])], 0, 1 tensors = [make_arg(size) for size in sizes] yield ErrorInput( SampleInput(tensors, args=(dim, num_chunks)), error_regex='_chunk_cat expects non-empty tensor', ) def sample_inputs_cat_concat(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases: tuple[tuple, tuple, dict] = ( # type: ignore[assignment] ((S, S), (S, S), {'dim': -1}), ((S, S), (S, S), {'dim': 1}), ((M, S), (S, S), {'dim': 0}), # different shapes ((1, 2, 3), (1, 2, 3), {'dim': -2}), ((0,), (0,), {'dim': 0}), # empty tensor ((0,), (S, S), {'dim': 1}), # empty tensor with unempty and dim=1 (special case for legacy_cat_wrap_dim) ((0, S), (S, S), {'dim': 0}), ((1,), (1,), {}) # dim not passed, fallback to default ) for input_shape1, input_shape2, kwargs in cases: yield SampleInput([make_arg(input_shape1), make_arg(input_shape2)], kwargs=kwargs) # from coat_lite_mini yield SampleInput([make_arg((2, 2, 2, 2), memory_format=torch.channels_last)], args=(1,),) def error_inputs_cat(op_info, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float32) # error inputs for more than one element of the written-to tensor refer to a single memory location yield ErrorInput(SampleInput([make_arg((S, S)), make_arg((S, S))], kwargs={'out': make_arg((1, S)).expand((2 * S, S))}), error_regex='unsupported operation') # error inputs for empty tensors yield ErrorInput(SampleInput([], kwargs={'dim': 1}), error_regex='non-empty list of Tensors') # error inputs for different sizes yield ErrorInput(SampleInput([make_arg((S, S, L, L)), make_arg((S, 0, L - 1, L))], kwargs={'dim': 1}), error_regex='Sizes of tensors must match except in dimension') yield ErrorInput(SampleInput([make_arg((S, 0, L - 1, L)), make_arg((S, S, L, L))], kwargs={'dim': 1}), error_regex='Sizes of tensors must match except in dimension') # error inputs for different dimensions yield ErrorInput(SampleInput([make_arg((S - 1, 0)), make_arg((S, 0, L - 1, L))], kwargs={'dim': 1}), error_regex='Tensors must have same number of dimensions') yield ErrorInput(SampleInput([make_arg((S, 0, L - 1, L)), make_arg((S - 1, 0))], kwargs={'dim': 1}), error_regex='Tensors must have same number of dimensions') # error inputs for same memory locations x = torch.zeros((0), device=device) y = torch.randn((4, 6), device=device) err_msg = "the written-to tensor refer to a single memory location" yield ErrorInput(SampleInput((x, y), kwargs={'dim': 0, 'out': x}), error_regex=err_msg) yield ErrorInput(SampleInput((x, y), kwargs={'dim': 0, 'out': y}), error_regex=err_msg) z = torch.zeros((4, 6), device=device) yield ErrorInput(SampleInput((y, z), kwargs={'out': z[:2, :]}), error_regex=err_msg) # error inputs for different devices if torch.device(device).type == 'cuda': x_cuda = make_tensor((3, 3), device=device, dtype=torch.float32) y_cpu = make_tensor((3, 3), device='cpu', dtype=torch.float32) yield ErrorInput(SampleInput((x_cuda, y_cpu)), error_regex='Expected all tensors to be on the same device') # error inputs for different input sizes for more than 2 tensors yield ErrorInput(SampleInput([make_arg((L, 1)), make_arg((L, 1, 1)), make_arg((L, 1, 1))]), error_regex='Tensors must have same number of dimensions') yield ErrorInput(SampleInput([make_arg((S, 1, M)), make_arg((S, 1, 1)), make_arg((S, M, 1))], kwargs={'dim': 1}), error_regex='Sizes of tensors must match') # error inputs for None input yield ErrorInput(SampleInput((make_arg((S, 1, 1)), None)), error_type=TypeError, error_regex='got None') # error inputs for zero-dimensional tensors yield ErrorInput(SampleInput([make_arg(()), make_arg(())]), error_regex='zero-dimensional.*cannot be concatenated') # error inputs for different dtype of out tensors d = make_tensor((2, 3), device=device, dtype=torch.double) x = make_tensor((2, 3), device=device, dtype=torch.float32) yield ErrorInput(SampleInput(x, kwargs={'out': d}), error_type=TypeError, error_regex='invalid combination of arguments') def reference_inputs_cat(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_cat_concat(op, device, dtype, requires_grad, **kwargs) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Noncontiguous type promoting tensors a = make_arg((3, 4, 2)) b = make_arg((3, 2, 2), noncontiguous=True, dtype=torch.double) c = make_arg((3, 3, 2), dtype=torch.float16).permute(1, 0, 2) yield SampleInput((a, b, c), kwargs={'dim': 1}) # Special 1D tensor with dim length of 0 case a = make_arg((0,)) b = make_arg((3, 2, 2)) yield SampleInput((a, b, a)) yield SampleInput((a, a, a)) def _elementwise_type_promo_np(*args, type_promotion_kind): def _maybe_torch(x): if isinstance(x, np.ndarray): return torch.from_numpy(x) return x flattened = pytree.arg_tree_leaves(*args) transformed = tuple(_maybe_torch(a) for a in flattened) result_dtype, _ = prims.utils.elementwise_dtypes( *transformed, type_promotion_kind=type_promotion_kind) return torch_to_numpy_dtype_dict[result_dtype] def _cat_np(input_seq, dim=0): inputs = tuple(a for a in input_seq if not (a.ndim == 1 and a.size == 0)) if len(inputs) == 0: np_dtype = _elementwise_type_promo_np( input_seq, type_promotion_kind=prims.utils.ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH) return np.empty(0, dtype=np_dtype) return np.concatenate(inputs, axis=dim) def _floor_divide_np(a, b): dtype = _elementwise_type_promo_np( a, b, type_promotion_kind=prims.utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) if isinstance(a, np.ndarray): a = a.astype(dtype) if isinstance(b, np.ndarray): b = b.astype(dtype) return np.floor_divide(a, b) def sample_inputs_hstack_dstack_vstack(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) tensor_shapes = ( # First Tensor being 1-D is special # case for hstack ((S,), (S,), (S,)), ((S, S), (S, S), (S, S)), ) for s1, s2, s3 in tensor_shapes: tensors = (make_arg(s1,), make_arg(s2,), make_arg(s3)) yield SampleInput(tensors) def error_inputs_hstack_dstack_vstack(op, device): make_arg = partial(make_tensor, dtype=torch.int32, device=device, requires_grad=False) tensor_shapes = ( ((S,), (S, S, S, S), (S,)), ) for s1, s2, s3 in tensor_shapes: tensors = (make_arg(s1,), make_arg(s2,), make_arg(s3)) # Different dimension tensor yield ErrorInput(SampleInput(tensors), error_regex="Tensors must have same number of dimensions") # empty tensor list yield ErrorInput(SampleInput(()), error_regex="expects a non-empty TensorList") def sample_inputs_unbind(op_info, device, dtype, requires_grad, **kwargs): # Note: we don't do any tests where we unbind along 0-length dims # because in that case unbind returns and empty tuple, and that breaks # some assumptions in some backward tests in test_ops.py shape_dims = (((S,), 0), ((S, S), 0), ((S, S), 1), ((S, S), -1), ((S, 0, S), 0), ((S, S, S), 1), ) for shape, dim in shape_dims: yield SampleInput(make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad), args=(dim,)) def error_inputs_unbind(op_info, device): make_arg = partial(make_tensor, dtype=torch.int32, device=device, requires_grad=False) yield ErrorInput(SampleInput(make_arg(()), args=(0,)), error_type=IndexError, error_regex="Dimension specified as 0 but tensor has no dimensions") yield ErrorInput(SampleInput(make_arg((2,)), args=(2,)), error_type=IndexError, error_regex="Dimension out of range") def reference_unbind(t, dim): """A numpy implementation of torch.unbind""" return tuple(s.squeeze(dim) for s in np.split(t, t.shape[dim], dim)) def sample_inputs_gather(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) yield SampleInput( make_arg((M, S)), 0, gather_variable((S, S), 1, M, True, device=device)) yield SampleInput( make_arg((M, S)), 1, gather_variable((M, S // 2), 0, S, True, device=device)) # Empty index tensor case, see: https://github.com/pytorch/pytorch/pull/65006 yield SampleInput( make_arg((S,)), 0, torch.tensor([], dtype=torch.uint8, device=device)) yield SampleInput( make_arg((S,)), 0, torch.tensor([[], []], dtype=torch.uint8, device=device)) # 0D tensor case yield SampleInput( make_arg(()), 0, torch.tensor([0], dtype=torch.int64, device=device)) yield SampleInput( make_arg(()), 0, torch.tensor(0, dtype=torch.int64, device=device)) def _fill_indices(idx, dim, dim_size, elems_per_row, m, n, o): for i in range(1 if dim == 0 else m): for j in range(1 if dim == 1 else n): for k in range(1 if dim == 2 else o): ii = [i, j, k] ii[dim] = slice(0, idx.size(dim) + 1) idx[tuple(ii)] = torch.randperm(dim_size)[0:elems_per_row] def error_inputs_gather(op_info, device, **kwargs): # src is [1, 2] # [3, 4] src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) # idx is [0, 0] # [1, 0] idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) # Index should be smaller than self except on dimension 1 bad_src = make_tensor((1, 1), device=device, dtype=torch.float32) yield ErrorInput(SampleInput(bad_src, args=(1, idx,)), error_regex="Size does not match at dimension 0") # Index must have long dtype bad_idx = idx.to(torch.int32) yield ErrorInput(SampleInput(src, args=(1, bad_idx)), error_regex="Expected dtype int64 for index") # TODO: FIXME # out.dtype must match src.dtype # Creates new src & idx since SampleInputs can't share tensors src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) out = torch.empty((2, 2), device=device, dtype=torch.float64) yield ErrorInput(SampleInput(src, args=(1, idx), kwargs={'out': out}), error_regex="Expected out tensor to have dtype") # src and index tensors must have the same # of dimensions # idx too few dimensions src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) idx = torch.tensor((0, 0), device=device, dtype=torch.long) yield ErrorInput(SampleInput(src, args=(1, idx)), error_regex="Index tensor must have the same number of dimensions") # src too few dimensions src = torch.tensor((1, 2), device=device, dtype=torch.float32) idx = torch.tensor(((0, 0), (1, 0)), device=device, dtype=torch.long) yield ErrorInput(SampleInput(src, args=(0, idx)), error_regex="Index tensor must have the same number of dimensions") # index out of bounds # NOTE: this ErrorInput is guarded because bounds checking does not occur on CUDA devices if torch.device(device).type == 'cpu': src = torch.tensor(((1, 2), (3, 4)), device=device, dtype=torch.float32) idx = torch.tensor(((0, 23), (1, 0)), device=device, dtype=torch.long) yield ErrorInput(SampleInput(src, args=(1, idx,)), error_regex="index 23 is out of bounds for dimension") x = torch.rand((1,), device=device).expand((3,)) src = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) yield ErrorInput(SampleInput(src, args=(0, ind,), kwargs=dict(out=x)), error_type=RuntimeError, error_regex='unsupported operation') yield ErrorInput(SampleInput(src, args=(0, ind,), kwargs=dict(out=src)), error_type=RuntimeError, error_regex='unsupported operation') yield ErrorInput(SampleInput(ind.clone(), args=(0, ind[1:],), kwargs=dict(out=ind[:1])), error_type=RuntimeError, error_regex='unsupported operation') def error_inputs_take(op_info, device, **kwargs): x = torch.rand((1,), device=device).expand((3,)) src = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) yield ErrorInput(SampleInput(src, args=(ind,), kwargs=dict(out=x)), error_type=RuntimeError, error_regex='unsupported operation') yield ErrorInput(SampleInput(src, args=(ind,), kwargs=dict(out=src)), error_type=RuntimeError, error_regex='unsupported operation') yield ErrorInput(SampleInput(ind.clone(), args=(ind[1:],), kwargs=dict(out=ind[:-1])), error_type=RuntimeError, error_regex='unsupported operation') # Error inputs for scatter def error_inputs_scatter_and_scatter_add(op_info, device, **kwargs): # Error when self.dtype != src.dtype (and src is not a scalar) src = make_tensor((2, 5), device=device, dtype=torch.float32) idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.long) dst = torch.zeros((3, 5), device=device, dtype=torch.double) yield ErrorInput(SampleInput(dst, args=(0, idx, src)), error_regex="Expected self.dtype to be equal to src.dtype") # Index dtype must be long src = make_tensor((2, 5), device=device, dtype=torch.float32) idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.int32) dst = torch.zeros((3, 5), device=device, dtype=torch.float32) yield ErrorInput(SampleInput(dst, args=(0, idx, src)), error_regex="Expected dtype int64 for index") # Index and destination must have the same number of dimensions src = make_tensor((2, 5), device=device, dtype=torch.float32) idx = torch.tensor(((0, 1), (1, 2)), device=device, dtype=torch.long) dst = torch.zeros((3, 5, 3), device=device, dtype=torch.float32) yield ErrorInput(SampleInput(dst, args=(0, idx, src)), error_regex="Index tensor must have the same number of dimensions as self tensor") # Index and src must have the same number of dimensions when src is not a scalar src = make_tensor((2, 5, 2), device=device, dtype=torch.float32) idx = torch.tensor(((34, 1), (1, 2)), device=device, dtype=torch.long) dst = torch.zeros((3, 5), device=device, dtype=torch.float32) yield ErrorInput(SampleInput(dst, args=(0, idx, src)), error_regex="Index tensor must have the same number of dimensions as src tensor") # Index out of bounds # NOTE: this ErrorInput is guarded because bounds checking does not occur on CUDA devices if torch.device(device).type == 'cpu': src = make_tensor((2, 5), device=device, dtype=torch.float32) idx = torch.tensor(((34, 1), (1, 2)), device=device, dtype=torch.long) dst = torch.zeros((3, 5), device=device, dtype=torch.float32) yield ErrorInput(SampleInput(dst, args=(0, idx, src)), error_regex="index 34 is out of bounds for dimension 0 with size 3") def error_inputs_renorm(op_info, device, **kwargs): zero_d = torch.randn((), device=device) yield ErrorInput(SampleInput(zero_d, args=(0.5, 0, 1.0)), error_type=RuntimeError, error_regex="needs at least 2 dimensions, got 0 dimensions") def error_inputs_ormqr(op_info, device, **kwargs): zero_d = torch.randn((), device=device) yield ErrorInput(SampleInput(zero_d, args=(zero_d, zero_d)), error_type=RuntimeError, error_regex="input must have at least 2 dimensions") # https://github.com/pytorch/pytorch/issues/85218 tensor_0 = torch.full((5, 0,), 1, device=device) tensor_1 = torch.full((5,), 1, device=device) tensor_2 = torch.full((5, 5,), 1, device=device) bool_3 = True bool_4 = True yield ErrorInput(SampleInput(tensor_0, args=(tensor_1, tensor_2, bool_3, bool_4)), error_type=RuntimeError, error_regex=r"tau.shape\[-1\] must be less than or equal to input.shape\[-1\]") def error_inputs_diag(op_info, device, **kwargs): zero_d = torch.randn((), device=device) yield ErrorInput(SampleInput(zero_d, args=(0,)), error_type=RuntimeError, error_regex="1D or 2D") zero_d = torch.randn(1, 1, 1, device=device) yield ErrorInput(SampleInput(zero_d, args=(0,)), error_type=RuntimeError, error_regex="1D or 2D") def error_inputs_embedding(op_info, device, **kwargs): indices = torch.rand(2, 2, device=device).long() weights = [ torch.tensor(1.0, device=device), torch.tensor(1.0, device=device).reshape(1, 1, 1), ] for weight in weights: yield ErrorInput(SampleInput(weight, args=(indices,)), error_type=RuntimeError, error_regex="'weight' must be 2-D") def error_inputs_t(op_info, device, **kwargs): yield ErrorInput( SampleInput(torch.randn(2, 3, 4, 5, device=device)), error_regex="expects a tensor with <= 2", ) def error_inputs_multinomial(op_info, device, **kwargs): x = torch.empty(1, 2, 3, dtype=torch.double, device=device) yield ErrorInput(SampleInput(x, args=(2,)), error_regex="prob_dist must be 1 or 2 dim") x = torch.empty(1, 2, dtype=torch.long, device=device) yield ErrorInput(SampleInput(x, args=(2,)), error_regex="multinomial only supports floating-point dtypes for input") x = torch.empty(1, 2, dtype=torch.double, device=device) y = torch.empty(1, 2, dtype=torch.double, device=device) yield ErrorInput(SampleInput(x, args=(2,), kwargs=dict(out=y)), error_regex="multinomial expects Long tensor out") x = torch.empty(2, dtype=torch.double, device=device) yield ErrorInput(SampleInput(x, args=(0,)), error_regex="cannot sample n_sample <= 0 samples") x = torch.empty(2, dtype=torch.double, device=device) yield ErrorInput(SampleInput(x, args=(-1,)), error_regex="cannot sample n_sample <= 0 samples") x = torch.empty(2, dtype=torch.double, device=device) yield ErrorInput(SampleInput(x, args=(3, False,)), error_regex="cannot sample n_sample > prob_dist") x = torch.empty(16777217, dtype=torch.double, device=device) yield ErrorInput(SampleInput(x, args=(3,)), error_regex="number of categories cannot exceed") inputs = ((1., -1., 1.), (1., inf, 1.), (1., -inf, 1.), (1., 1., nan)) err_msg1 = "probability tensor contains either `inf`, `nan` or element < 0" err_msg2 = "invalid multinomial distribution" rep_arg = (False, True) if torch.device(device).type == 'cpu' else (False,) if torch.device(device).type == 'cpu': for rep in rep_arg: kwargs = {'num_samples': 2, 'replacement': rep} for shape in inputs: # error case when input tensor contains `inf`, `nan` or negative element yield ErrorInput(SampleInput(torch.tensor(shape), kwargs=kwargs), error_regex=err_msg1 if rep is False else err_msg2) # error case for the invalid multinomial distribution (sum of probabilities <= 0), 1-D input x = torch.zeros(3, device=device) yield ErrorInput(SampleInput(x, kwargs=kwargs), error_regex=err_msg2) # error case for the invalid multinomial distribution (sum of probabilities <= 0), 2-D input x = torch.zeros(3, 3, device=device) yield ErrorInput(SampleInput(x, kwargs=kwargs), error_regex=err_msg2) # error case for the invalid multinomial distribution x[1, :] = 1 yield ErrorInput(SampleInput(x, kwargs=kwargs), error_regex=err_msg2) def error_inputs_gradient(op_info, device, **kwargs): for dtype in [torch.long, torch.float32, torch.complex64]: t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], device=device, dtype=dtype) dim = (1, 0) spacing = [0.1] yield ErrorInput(SampleInput(t, kwargs=dict(spacing=spacing, dim=dim, edge_order=1)), error_type=RuntimeError, error_regex='torch.gradient expected spacing to be unspecified, a scalar ') yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=3)), error_type=RuntimeError, error_regex='torch.gradient only supports edge_order=1 and edge_order=2.') dim = (1, 1) spacing = 0.1 yield ErrorInput(SampleInput(t, kwargs=dict(spacing=spacing, dim=dim, edge_order=1)), error_type=RuntimeError, error_regex='dim 1 appears multiple times in the list of dims') dim = (0, 1) coordinates = [torch.tensor([1, 2, 4], device='cpu'), torch.tensor([1, 2, 4], device='meta')] yield ErrorInput(SampleInput(t, kwargs=dict(spacing=coordinates, dim=dim, edge_order=1)), error_type=RuntimeError, error_regex='torch.gradient expected each tensor to be on the same device,') yield ErrorInput(SampleInput(t, kwargs=dict(dim=3)), error_type=IndexError, error_regex='') t = torch.tensor([[1], [2], [3]]) yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=1)), error_type=RuntimeError, error_regex='torch.gradient expected each dimension size to be at least') t = torch.tensor([[1, 2], [3, 4]]) yield ErrorInput(SampleInput(t, kwargs=dict(edge_order=2)), error_type=RuntimeError, error_regex='torch.gradient expected each dimension size to be at least') def sample_inputs_rrelu(op_info, device, dtype, requires_grad, **kwargs): yield from sample_inputs_elementwise_unary( op_info, device, dtype, requires_grad, op_kwargs=dict(lower=0., upper=1., training=True)) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg(S)) yield SampleInput(make_arg(S), training=False) def error_inputs_rrelu(op_info, device, **kwargs): input = make_tensor((S, S), device=device, dtype=torch.float32) yield ErrorInput(SampleInput(input, kwargs={'lower': 0.3, 'upper': 0.1}), error_regex='Lower bound should be less than or equal to the upper bound') def error_inputs_masked_select(op_info, device, **kwargs): x = torch.rand((1,), device=device).expand((3,)) y = torch.rand((6,), device=device) mask = torch.tensor([True, False, True, True, False, False], device=device) yield ErrorInput(SampleInput(y, args=(mask,), kwargs=dict(out=x)), error_type=RuntimeError, error_regex='unsupported operation') yield ErrorInput(SampleInput(y, args=(mask,), kwargs=dict(out=y)), error_type=RuntimeError, error_regex='unsupported operation') yield ErrorInput(SampleInput(mask.clone(), args=(mask,), kwargs=dict(out=mask)), error_type=RuntimeError, error_regex='unsupported operation') def error_inputs_median(op_info, device, **kwargs): x = torch.tensor([[[[[[[[[[[[[[[[[[[[[[[[[nan], [nan]]]]]]]]]]]]]]]]]]]]]]]]], device=device) if device == 'cuda': yield ErrorInput(SampleInput(x, kwargs=dict(dim=(-1))), error_type=RuntimeError, error_regex='CUDA Tensors cannot have more than 25 dimensions') else: return def error_inputs_index_select(op_info, device, **kwargs): x = torch.rand((1, 6), device=device).expand((2, 6)) y = torch.rand((3, 6), device=device) ind = torch.tensor([0, 1], dtype=torch.int64, device=device) yield ErrorInput(SampleInput(y, args=(1, ind,), kwargs=dict(out=x)), error_type=RuntimeError, error_regex='unsupported operation') def error_inputs_index_add(op_info, device, **kwargs): result = torch.tensor([[1., 2.], [4., 5.], [7., 8.]]) source = torch.tensor([2., 4.]) yield ErrorInput(SampleInput(result, args=(0, torch.tensor([0, 2]), source)), error_type=RuntimeError, error_regex=r'source tensor shape must match self tensor shape, ' r'excluding the specified dimension. Got self.shape = \[3, 2\] source.shape = \[2\]') def error_inputs_logcumsumexp(op_info, device, **kwargs): dim = 3 srcs = [torch.randn(5, 2, device=device), torch.randn(0, 2, device=device)] for src in srcs: yield ErrorInput(SampleInput(src, args=(dim,)), error_type=IndexError, error_regex='Dimension out of range') def sample_inputs_take_along_dim(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) yield SampleInput( make_arg((S, S)), gather_variable((S, S), 1, S, True, device=device), 0) # `indices` broadcast yield SampleInput( make_arg((S, S)), gather_variable((1, S // 2), 0, S, True, device=device), 1) # `self` broadcast yield SampleInput( make_arg((1, S)), gather_variable((S, S // 2), 0, S, True, device=device), 1) # without `dim` arg yield SampleInput( make_arg((S, S)), gather_variable((S, S // 2), 0, S, True, device=device)) def error_inputs_aminmax_amax_amin(op_info, device, is_ref=False, **kwargs): # Error Inputs for zero-dim tensors, when 'dim' arg is not provided. shape = (S, 0, S) err_msg_amax_amin = "reduction" err_msg_aminmax = "cannot compute aminmax over an empty dimension as the operation has no identity" if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: yield ErrorInput(SampleInput(torch.rand(shape, device=device)), error_regex=err_msg_amax_amin) elif op_info.name in ['aminmax']: yield ErrorInput(SampleInput(torch.rand(shape, device=device)), error_regex=err_msg_aminmax) # Error Inputs for tensors with more than 64 dimension sizes = [1] * 65 err_msg1 = "only tensors with up to 64 dims are supported" yield ErrorInput(SampleInput(torch.randn(sizes, device=device), kwargs={'dim': -1}), error_regex=err_msg1) yield ErrorInput(SampleInput(torch.randn(sizes, device=device), kwargs={'dim': 64}), error_regex=err_msg1) # Error Inputs for repeated 'dim' if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: dims = [(0, 0), (0, -4)] err_msg2 = "in the list of dims" x = torch.randn(S, S, S, S, device=device) for dim in dims: yield ErrorInput(SampleInput(x, kwargs={'dim': dim}), error_regex=err_msg2) # Error Input for illegal dtype input5 = torch.randn(L, L, dtype=torch.float32, device=device) max_values = torch.empty(L, dtype=torch.float32, device=device) min_values = torch.empty(L, dtype=torch.double, device=device) illegal_values = torch.empty(L, dtype=torch.int, device=device) # Unlike regular PyTorch, amax and amin refs don't require input and out # dtypes to match exactly: # https://github.com/pytorch/pytorch/pull/87765#pullrequestreview-1162023824 if is_ref: err_msg_amax_amin2 = ("Attempting to cast from torch.float32 to out tensor with dtype " "torch.int32, but this can't be cast because it is not safe!") else: err_msg_amax_amin2 = ("Expected the dtype for input and out to match, but got Float " "for input's dtype and Int for out's dtype.") err_msg_aminmax2 = "Expected out tensor to have dtype float, but got double instead" if op_info.name in ['amax', 'amin', '_refs.amax', '_refs.amin']: yield ErrorInput(SampleInput(input5, kwargs={'dim': 0, 'out': illegal_values}), error_regex=err_msg_amax_amin2) elif op_info.name in ['aminmax']: yield ErrorInput(SampleInput(input5, kwargs={'dim': 0, 'out': (max_values, min_values)}), error_regex=err_msg_aminmax2) # Error Inputs for functions to raise an error on specified zero'd dimension as reduction dim err_msg3 = "reduction" # FIXME: eager and ref impl throw different types of errors error_type = IndexError if 'refs' not in op_info.name else RuntimeError yield ErrorInput(SampleInput(torch.rand(shape, device=device), kwargs={'dim': 1}), error_type=error_type, error_regex=err_msg3) def sample_inputs_aminmax(op_info, device, dtype, requires_grad, **kwargs): test_cases: tuple[tuple, dict] = ( # type: ignore[assignment] ((S, S, S), {}), ((S, S, S), {'dim': 1}), ((S, S, S), {'dim': 1, 'keepdim': True}), ((), {'dim': 0}), ((), {}), ((), {'dim': 0, 'keepdim': True}), ((S, 0, S), {'dim': 0}), ) for shape, kwargs in test_cases: yield SampleInput( make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad), **kwargs) def error_inputs_diff(op_info, device, **kwargs): t = torch.rand((1, 3), device=device) n = -1 yield ErrorInput(SampleInput(t, args=(n, ), kwargs=kwargs), error_type=RuntimeError, error_regex=f'order must be non-negative but got {n}') def sample_inputs_diff(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) test_cases = ( ((1,), 0, None, None), ((S,), 0, None, None), ((S, 1), 0, None, None), ((S, 1), 1, None, None), ((S, S), 0, None, None), ((S, S), 1, None, None), ((S, S), 0, (1, S), (2, S)), ((S, S), 0, None, (2, S)), ((XS, XS, XS), 1, None, None), ((XS, XS, XS), 2, None, None), ((XS, XS, XS), 1, (XS, 1, XS), (XS, 1, XS)), ((XS, XS, XS), 2, (XS, XS, 1), (XS, XS, 1)), ((XS, XS, XS), 2, (XS, XS, XS), (XS, XS, XS)),) for size, dim, size_prepend, size_append in test_cases: prepend_size = 0 if (size_prepend is None) else size_prepend[dim] append_size = 0 if (size_append is None) else size_append[dim] dim_size = size[dim] + prepend_size + append_size for n in range(dim_size): input_tensor = make_arg(size) prepend = make_arg(size_prepend) if size_prepend else None append = make_arg(size_append) if size_append else None yield SampleInput(input_tensor, n, dim, prepend, append) # add some samples with n > dim_size yield SampleInput(make_arg((XS, XS, XS)), S + 1, 1) yield SampleInput(make_arg((XS, XS, XS)), S * 3 + 2, 2, make_arg((XS, XS, XS)), make_arg((XS, XS, XS))) def sample_inputs_histogram(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) for size, bin_ct, weighted, density in product(sizes, range(1, 5), [False, True], [False, True]): input_tensor = make_arg(size) weight_tensor = make_arg(size) if weighted else None yield SampleInput(input_tensor, bin_ct, weight=weight_tensor, density=density) bins_tensor = make_arg((bin_ct + 1,)) sorted_bins, _bins_indices = torch.sort(bins_tensor) yield SampleInput(input_tensor, sorted_bins, weight=weight_tensor, density=density) def sample_inputs_histogramdd(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((S, S), (S, S, S), (S, 1, S), (S, 0, S)) bin_ct_patterns = ((1, 1, 1, 1, 1), (2, 3, 2, 3, 2), (3, 2, 3, 2, 3)) for size, bin_ct_pattern, weighted, density in product(sizes, bin_ct_patterns, [False, True], [False, True]): input_tensor = make_arg(size) bin_ct = bin_ct_pattern[:size[-1]] weight_tensor = make_arg(size[:-1]) if weighted else None yield SampleInput(input_tensor, bin_ct, weight=weight_tensor, density=density) bins_tensor = [make_arg(ct + 1) for ct in bin_ct] yield SampleInput(input_tensor, bins_tensor, weight=weight_tensor, density=density) def error_inputs_histogramdd(opinfo, device, **kwargs): invalid_bins = [1, 1, 1, 1, 1] make_arg = partial(make_tensor, dtype=torch.float, device=device, requires_grad=False) msg = "histogramdd: The size of bins must be equal to the innermost dimension of the input." yield ErrorInput(SampleInput(make_arg(5, 6), invalid_bins), error_regex=msg) def sample_inputs_histc(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) for size, min, max in product(sizes, [0, -10], [0, 10]): # construct sample input omitting bins arg yield SampleInput(make_arg(size), min=min, max=max) # construct sample inputs with a few different bins values for bins in [1, 3, 10]: yield SampleInput(make_arg(size), bins=bins, min=min, max=max) def sample_inputs_bincount(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for size, weighted in product((S, M), [False, True]): input_tensor = torch.randint(0, size, (size,), dtype=dtype, device=device) weight_tensor = make_arg((size,)) if weighted else None max_val = int(input_tensor.max().item()) for minlength in [0, max_val // 2, max_val, 2 * max_val]: yield SampleInput( input_tensor, weights=weight_tensor, minlength=minlength) def sample_inputs_bucketize(op_info, device, dtype, requires_grad, reference_inputs_mode=False, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = (((), S), ((S,), S), ((S, S), S), ((S, S, S), S), ((S, 1, S), S), ((S, 0, S), S)) if reference_inputs_mode: sizes += (((256,), 128), ((128,), 256), ((32, 32), 11), ((32, 4, 32), 33)) for (input_shape, nb), out_int32, right in product(sizes, [False, True], [False, True]): input_tensor = make_arg(input_shape) boundaries = make_arg(nb).msort() yield SampleInput(input_tensor, boundaries, out_int32=out_int32, right=right) reference_inputs_bucketize = partial(sample_inputs_bucketize, reference_inputs_mode=True) def error_inputs_bucketize(opinfo, device, **kwargs): make_arg = partial(make_tensor, dtype=torch.float, device=device, requires_grad=False) yield ErrorInput(SampleInput(make_arg((S, S, S)), make_arg((S, S))), error_regex="boundaries tensor must be 1 dimension") def sample_inputs_searchsorted(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) # (unsorted tensor size, (input sizes,), is_scalar) sizes = ( ((0,), ((0,),), False), ((M,), ((), (M,), (M, M)), False), ((0, 0), ((0, 0),), False), ((M, M), ((M, M),), False), ((0, 0, 0), ((0, 0, 0),), False), ((M, M, M), ((M, M, M),), False), ((L,), ((),), True), ) for (size, input_sizes, is_scalar), noncontiguous, out_int32, right in product( sizes, [False, True], [False, True], [False, True] ): unsorted_tensor = make_arg(size, noncontiguous=noncontiguous) for input_size in input_sizes: input = make_arg(input_size, noncontiguous=noncontiguous) if is_scalar: input = input.item() if np.prod(size) == 0: boundary_tensor = unsorted_tensor sorter = make_tensor(size, dtype=torch.int64, device=device, noncontiguous=noncontiguous) else: boundary_tensor, sorter = torch.sort(unsorted_tensor) side = "right" if right else "left" yield SampleInput(boundary_tensor, input, out_int32=out_int32, right=right) yield SampleInput(boundary_tensor, input, out_int32=out_int32, side=side) yield SampleInput(unsorted_tensor, input, out_int32=out_int32, right=right, sorter=sorter) yield SampleInput(unsorted_tensor, input, out_int32=out_int32, side=side, sorter=sorter) def sample_inputs_gradient(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) test_cases_float = ( ((S,), None, None, 1), ((S,), 2., None, 1), ((S, S), None, None, 2), ((S, S), [2.0, 2.1], None, 1), ((S, S), [2.0, 2.1], (0, 1), 1), ((4, 4, 4), [2., 1.], (0, 1), 2), ) for size, spacing, dim, edge_order in test_cases_float: t = make_arg(size) yield SampleInput(t, dim=dim, spacing=spacing, edge_order=edge_order) test_cases_tensor = ( ((3, 3, 3), ((1.1, 2.0, 3.5), (4.0, 2, 6.0)), (0, -1), 1), ((3, 3, 3), ((1.0, 3.0, 2.0), (8.0, 6.0, 1.0)), (0, 1), 2), ) for size, coordinates, dim, edge_order in test_cases_tensor: t = make_arg(size) coordinates_tensor_list = [] for coords in coordinates: # `coords` will always contain floating point values and Python 3.10 does not support this # implicit conversion to an integer using `__int__` # TODO: this can be simplified after https://github.com/pytorch/pytorch/issues/69316 is fixed a = torch.tensor(coords, device=device) coordinates_tensor_list.append(a.to(dtype)) yield SampleInput(t, dim=dim, spacing=coordinates_tensor_list, edge_order=edge_order) def sample_inputs_getitem(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) test_args = [ ([1, 2],), (slice(0, 3),), ([slice(0, 3), 1],), ([[0, 2, 3], [1, 3, 3], [0, 0, 2]],), ([[0, 0, 3], [1, 1, 3], [0, 0, 2]],), ([slice(None), slice(None), [0, 3]],), ([slice(None), [0, 3], slice(None)],), ([[0, 3], slice(None), slice(None)],), ([[0, 3], [1, 2], slice(None)],), ([[0, 3], ],), ([[0, 3], slice(None)],), ([[0, 3], Ellipsis],), ([[0, 2, 3], [1, 3, 3], torch.LongTensor([0, 0, 2])],), (index_variable(2, S, device=device),), (mask_not_all_zeros((S,)),), ] for args in test_args: yield SampleInput(make_arg((S, S, S)), args=args) yield SampleInput(make_arg((S, S, S, S)), args=([slice(None), [0, 1], slice(None), [0, 1]],)) def sample_inputs_index_put(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for accumulate in [False, True]: # Test with indices arg yield SampleInput( make_arg((S, S,)), # As defined in the docs, if accumulate is false, duplicate indices are not supported (index_variable(2 if accumulate else 1, S, device=device),), make_arg((2 if accumulate else 1, S)), accumulate=accumulate) # Test with mask arg mask = torch.zeros(S, dtype=torch.bool) if accumulate else mask_not_all_zeros((S,)) yield SampleInput( make_arg((S, S)), (mask, ), make_arg((S,)), accumulate=accumulate) def sample_inputs_sort(op_info, device, dtype, requires_grad, **kwargs): def small_3d_unique(): res = torch.randperm(S * S * S, dtype=torch.int64, device=device).view(S, S, S) res = res.to(dtype).requires_grad_(requires_grad) return res def large_1d_unique(): res = torch.randperm(L * L * L, dtype=torch.int64, device=device) res = res.to(dtype).requires_grad_(requires_grad) return res # Test case for large tensor. yield SampleInput(large_1d_unique()) # Test cases for small 3d tensors. # Imitates legacy tests from test/test_torch.py dims = range(-3, 3) flag = [True, False] for dim, descending, stable in product(dims, flag, flag): # default schema without stable sort if not (dtype == torch.bool and torch.device(device).type == 'cuda'): # bool and cuda requires stable sort for stable results, at least # for the return index yield SampleInput(small_3d_unique(), dim, descending) # schema with stable sort, no CUDA support yet if torch.device(device).type == 'cpu': yield SampleInput( small_3d_unique(), dim=dim, descending=descending, stable=stable) # Test cases for scalar tensor tensor_opt = dict(dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(torch.tensor(1, **tensor_opt)) yield SampleInput(torch.tensor(1, **tensor_opt), 0) yield SampleInput(torch.tensor(1, **tensor_opt), 0, True) # Test cases for empty tensor yield SampleInput(torch.tensor((), **tensor_opt)) yield SampleInput(torch.tensor((), **tensor_opt), 0) yield SampleInput(torch.tensor((), **tensor_opt), 0, True) # Test cases for stable sort yield SampleInput(small_3d_unique(), stable=True) yield SampleInput(small_3d_unique(), dim=0, stable=True) yield SampleInput(small_3d_unique(), dim=0, descending=True, stable=True) def sample_inputs_threshold(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((), (S,), (S, S), (S, S, S)) for x_size in sizes: # threshold and values args must be numbers yield SampleInput(make_arg(x_size), make_arg(()).item(), make_arg(()).item()) def sample_inputs_unique(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) for shape, sorted, return_inverse, return_counts, dim in \ product(sizes, [False, True], [False, True], [False, True], [None, -2, -1, 0, 1, 2]): # torch.unique cannot be called if the input tensor has a zero dimension which isn't the selected dim if 0 in shape and shape.index(0) is not dim: continue # skip invalid dim args if dim is not None and (dim < -len(shape) or dim >= len(shape)): continue kwargs = dict(sorted=sorted, return_inverse=return_inverse, return_counts=return_counts, dim=dim) # construct a test case with only one distinct value input_t = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(input_t, **kwargs) # construct a test case with mixed 0s and 1s input_t = make_arg(shape, dtype=torch.bool, requires_grad=False)\ .to(dtype).requires_grad_(requires_grad) yield SampleInput(input_t, **kwargs) # construct a test case with many different values yield SampleInput(make_arg(shape), **kwargs) def sample_inputs_unique_consecutive(*args, **kwargs): for sample_input in sample_inputs_unique(*args, **kwargs): if not sample_input.kwargs["sorted"]: sample_input.kwargs.pop("sorted") yield sample_input def sample_inputs_adaptive_avg_pool1d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as (input shape, output size) cases = ( ((0, 8, 8), (5,)), ((3, 8, 8), 5), ((3, 8, 8), 1) ) for input_shape, output_size in cases: # Batched yield SampleInput(make_arg(input_shape), args=(output_size,)) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) def error_inputs_adaptive_avg_pool1d(opinfo, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float32) # error inputs for empty output yield ErrorInput(SampleInput(make_arg((1, 2, 3)), output_size=()), error_regex="'output_size' should contain one int") # error inputs for output_size lesser than 0 yield ErrorInput(SampleInput(make_arg((1, 1, 1)), output_size=(-1,)), error_regex="elements of output_size must be greater than or equal to 0") def sample_inputs_adaptive_avg_pool2d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as (input shape, output size) cases = ( ((1, 8, 8, 8), (5, 7)), ((2, 8, 8, 8), (None, 7)), ((1, 8, 4, 3), (5, None)), ((1, 8, 4, 3), (None, None)), ((1, 8, 4, 3), (5)), ) for input_shape, output_size in cases: # Batched yield SampleInput(make_arg(input_shape), args=(output_size,)) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) def error_inputs_adaptive_avg_pool2d(opinfo, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float32) # error inputs for incorrect input dimension yield ErrorInput(SampleInput(make_arg((2, 2)), output_size=(2, 2)), error_type=ValueError, error_regex="Input dimension should be at least 3") # error inputs for empty output yield ErrorInput(SampleInput(make_arg((1, 2, 3, 4)), output_size=()), error_regex="output_size must be 2") # error inputs for output_size lesser than 0 yield ErrorInput(SampleInput(make_arg((1, 1, 1, 1)), output_size=(-1, 0)), error_regex="elements of output_size must be greater than or equal to 0") def sample_inputs_adaptive_avg_pool3d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as (input shape, output size) cases = ( ((0, 8, 8, 8, 8), (5, 7, 4)), ((1, 8, 4, 3, 7), (None, None, None)), ((1, 8, 4, 3, 7), (1, 1, 1)), ((3, 3, 8, 8, 6), (5, 7, None)), ((1, 3, 8, 8, 6), (5, None, 2)), ((3, 3, 8, 8, 6), (None, 3, 2)), ) for input_shape, output_size in cases: # Batched yield SampleInput(make_arg(input_shape), args=(output_size,)) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=(output_size,)) def error_inputs_adaptive_avg_pool3d(opinfo, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float32) # error inputs for incorrect input dimension yield ErrorInput(SampleInput(make_arg((2, 2, 2)), output_size=(2, 2, 2)), error_type=ValueError, error_regex="Input dimension should be at least 4") # error inputs for empty output yield ErrorInput(SampleInput(make_arg((1, 2, 3, 4)), output_size=()), error_regex="output_size must be 3") # error inputs for output_size lesser than 0 yield ErrorInput(SampleInput(make_arg((1, 1, 1, 1, 1)), output_size=(-1, 0, 2)), error_regex="elements of output_size must be greater than or equal to 0") def sample_inputs_adaptive_max_pool1d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as (input shape, output size) cases = ( # ((0, 8, 8), (5,)), # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] ((3, 4, 4), 3), ((3, 4, 4), 1) ) for shapes, return_idx in product(cases, (True, False)): # Batched yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) # Unbatched yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) def error_inputs_adaptive_max_pool1d(opinfo, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float32) # error inputs for empty output yield ErrorInput(SampleInput(make_arg((1, 2, 3)), output_size=()), error_regex="'output_size' should contain one int") # error inputs for output_size lesser than 0 yield ErrorInput(SampleInput(make_arg((1, 1, 1)), output_size=(-1,)), error_regex="Trying to create tensor with negative dimension") def sample_inputs_adaptive_max_pool2d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as (input shape, output size) cases = ( # ((0, 8, 8, 8), (5, 7)), # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] ((1, 4, 4, 4), (2, 3)), ((2, 4, 4, 4), (None, 3)), ((2, 4, 4, 4), (1, 1)), ((1, 4, 4, 3), (3, None)), ((1, 4, 4, 3), (None, None)), ((1, 4, 4, 3), (3)), ) for shapes, return_idx in product(cases, (True, False)): # Batched yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) # Unbatched yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) def error_inputs_adaptive_max_pool2d(opinfo, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float32) # error inputs for incorrect input dimension yield ErrorInput(SampleInput(make_arg((2, 2)), output_size=(2, 2)), error_type=ValueError, error_regex="Input dimension should be at least 3") # error inputs for empty output yield ErrorInput(SampleInput(make_arg((1, 2, 3, 4)), output_size=()), error_regex="internal error") # error inputs for output_size lesser than 0 yield ErrorInput(SampleInput(make_arg((1, 1, 1, 1)), output_size=(-1, 0)), error_regex="Trying to create tensor with negative dimension") def sample_inputs_adaptive_max_pool3d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as (input shape, output size) cases = ( # ((0, 8, 8, 8, 8), (5, 7, 4)), # 0 batch size doesn't work, cannot reshape tensor of 0 elements into shape [0, 8, -1] ((1, 4, 4, 3, 5), (None, None, None)), ((1, 4, 4, 3, 5), (1, 1, 1)), ((3, 3, 4, 4, 6), (2, 3, None)), ((1, 3, 4, 4, 6), (3, None, 2)), ((3, 3, 4, 4, 6), (None, 3, 2)), ) for shapes, return_idx in product(cases, (True, False)): # Batched yield SampleInput(make_arg(shapes[0]), args=(shapes[1], return_idx)) # Unbatched yield SampleInput(make_arg(shapes[0][1:]), args=(shapes[1], return_idx)) def error_inputs_adaptive_max_pool3d(opinfo, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float32) # error inputs for incorrect input dimension yield ErrorInput(SampleInput(make_arg((2, 2, 2)), output_size=(2, 2, 2)), error_type=ValueError, error_regex="Input dimension should be at least 4") # error inputs for empty output yield ErrorInput(SampleInput(make_arg((1, 2, 3, 4)), output_size=()), error_regex="internal error") # error inputs for output_size lesser than 0 yield ErrorInput(SampleInput(make_arg((1, 1, 1, 1, 1)), output_size=(-1, 0, 2)), error_regex="Trying to create tensor with negative dimension") class _TestParamsMaxPoolBase: def __init__(self) -> None: self.kwargs = { 'kernel_size': [3], 'stride': [2, None], 'ceil_mode': [True, False], 'padding': [0, 1], 'dilation': [1], 'return_indices': [True, False] } self.shapes = [ [1, 2, None], # batch [2], # channels [3, 6] # signal ] def _gen_shape(self): for shape in product(*self.shapes): # shape[0] is None indicates missing batch dimension if shape[0] is None: shape = shape[1:] yield shape, torch.contiguous_format # only 2d (N, C, H, W) rank 4 tensors support channels_last memory format if len(self.shapes) == 4 and len(shape) == 4: yield shape, torch.channels_last def _gen_kwargs(self): keys = self.kwargs.keys() for values in product(*self.kwargs.values()): yield dict(zip(keys, values)) def gen_input_params(self): yield from product(self._gen_shape(), self._gen_kwargs()) class _TestParamsMaxPool1d(_TestParamsMaxPoolBase): def __init__(self) -> None: super().__init__() self.kwargs['kernel_size'] += [(3,)] self.kwargs['stride'] += [(2,)] self.kwargs['padding'] += [(1,)] self.kwargs['dilation'] += [(1,)] class _TestParamsMaxPool2d(_TestParamsMaxPoolBase): def __init__(self) -> None: super().__init__() self.kwargs['kernel_size'] += [(3, 2)] self.kwargs['stride'] += [(2, 1)] self.kwargs['padding'] += [(1, 1)] self.kwargs['dilation'] += [(1, 2)] self.shapes.append([6]) class _TestParamsMaxPool3d(_TestParamsMaxPoolBase): def __init__(self) -> None: super().__init__() self.kwargs['kernel_size'] += [(3, 2, 3)] self.kwargs['stride'] += [(2, 1, 2)] self.kwargs['dilation'] += [(1, 2, 1)] self.shapes.append([6]) self.shapes.append([5]) def sample_inputs_max_pool(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) params_generator_type_dict = { 'nn.functional.max_pool1d': _TestParamsMaxPool1d, 'nn.functional.max_pool2d': _TestParamsMaxPool2d, 'nn.functional.max_pool3d': _TestParamsMaxPool3d, 'max_pool2d_with_indices_backward': _TestParamsMaxPool2d, } params_generator = params_generator_type_dict[op_info.name]() for (shape, memory_format), kwargs in params_generator.gen_input_params(): arg = make_arg(shape).to(memory_format=memory_format).requires_grad_(requires_grad) yield SampleInput(arg, kwargs=kwargs) def max_pool2d_backward(*args, kernel_size=(), stride=(), padding=(0,), dilation=(1,), ceil_mode=False, **kwargs): out, indices = torch.nn.functional.max_pool2d_with_indices( *args, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, ceil_mode=ceil_mode, return_indices=True) grad_out = torch.ones_like(out) if stride is None: stride = kernel_size out_b = torch.ops.aten.max_pool2d_with_indices_backward.default( grad_out, *args, kernel_size, stride, padding, dilation, ceil_mode, indices) return out_b def error_inputs_max_pool1d(op_info, device, **kwargs): # Toggle requires_grad because `max_pool1d` has different path # based on whether `requires_grad` is set or not. for requires_grad in (True, False): make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=requires_grad) # error inputs when pad is negative x = make_arg((0, 1, 49)) yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1, 'return_indices': True}), error_regex='pad must be non-negative') # error inputs when pad > kernel_size / 2 yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4, 'return_indices': True}), error_regex='pad should be at most half of effective kernel size') # error inputs when pad > ((kernel_size - 1) * dilation + 1) / 2, when dilation is not default yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 3, 'dilation': 2, 'stride': 1, 'padding': 3, 'return_indices': True}), error_regex='pad should be at most half of effective kernel size') # error inputs for input tensor error_msg = r'Expected 2D or 3D \(batch mode\) tensor with optional 0 dim batch size for input' yield ErrorInput(SampleInput(make_arg((), requires_grad=requires_grad), kwargs={'kernel_size': 1}), error_regex=error_msg) # error inputs for empty input yield ErrorInput(SampleInput(torch.tensor([], device=device, requires_grad=requires_grad), kwargs={'kernel_size': 1}), error_regex=error_msg) # error: unbatched input with 0 sized non-batch dims. yield ErrorInput(SampleInput(make_arg((0, 10), requires_grad=requires_grad), kwargs={'kernel_size': 1}), error_regex=error_msg) # error: batched input with 0 sized non-batch dims. yield ErrorInput(SampleInput(make_arg((1, 10, 0), requires_grad=requires_grad), kwargs={'kernel_size': 1}), error_regex=error_msg) # error inputs for empty input with stride=0 error_msg = 'stride must be greater than zero, but got 0' yield ErrorInput(SampleInput(make_arg((3, 3, 3)), kwargs={'kernel_size': 1, 'stride': 0}), error_regex=error_msg) # error inputs for empty input with dilation=0 error_msg = 'dilation must be greater than zero, but got 0' yield ErrorInput(SampleInput(make_arg((3, 3, 3)), kwargs={'kernel_size': 1, 'stride': 1, 'padding': 0, 'dilation': 0}), error_regex=error_msg) # error inputs for invalid output size error_msg = 'Invalid computed output size: -2' yield ErrorInput(SampleInput(make_arg((2, 2, 2)), kwargs={'kernel_size': 5, 'stride': 1, 'padding': 0, 'dilation': 1}), error_regex=error_msg) # error inputs when kernel_size=0 error_msg = 'kernel_size must be greater than zero' yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 0}), error_regex=error_msg) # error inputs for strides > 0 error_msg = 'stride must be greater than zero' yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 0}), error_regex=error_msg) def error_inputs_max_pool2d(op_info, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) # error inputs when pad is negative x = make_arg((0, 1, 49)) yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1, 'return_indices': True}), error_regex='pad must be non-negative') # 2-dimensional kernel yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2), 'stride': 50, 'padding': -1, 'return_indices': True}), error_regex='pad must be non-negative') # error inputs when pad > kernel_size / 2 (kernel_size : int) yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4, 'return_indices': True}), error_regex='pad should be at most half of effective kernel size') # error inputs when pad > kernel_size / 2 (kernel_size : tuple) yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2), 'stride': 50, 'padding': 4, 'return_indices': True}), error_regex='pad should be at most half of effective kernel size') # error: unbatched input with 0 sized non-batch dims. err_msg = r'Expected 3D or 4D \(batch mode\) tensor with optional 0 dim batch size for input' yield ErrorInput(SampleInput(make_arg((1, 0, 10)), kwargs={'kernel_size': 1}), error_regex=err_msg) # error: batched input with 0 sized non-batch dims. yield ErrorInput(SampleInput(make_arg((2, 1, 10, 0)), kwargs={'kernel_size': 1}), error_regex=err_msg) def error_inputs_max_pool3d(op_info, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) # error inputs when pad is negative x = make_arg((0, 1, 49, 50)) yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1, 'return_indices': True}), error_regex='pad must be non-negative') # 3-dimensional kernel yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2, 2), 'stride': 50, 'padding': -1, 'return_indices': True}), error_regex='pad must be non-negative') # error inputs when pad > kernel_size / 2 (kernel_size: int) yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4, 'return_indices': True}), error_regex='pad should be at most half of effective kernel size') # error inputs when pad > kernel_size / 2 (kernel_size: tuple) yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2, 2), 'stride': 50, 'padding': 4, 'return_indices': True}), error_regex='pad should be at most half of effective kernel size') # error: unbatched input with 0 sized non-batch dims. err_msg = r'Expected input\'s non-batch dimensions to have positive length' yield ErrorInput(SampleInput(make_arg((0, 1, 2, 10)), kwargs={'kernel_size': 1}), error_regex=err_msg) # error: batched inputs with 0 sized non-batch dims. yield ErrorInput(SampleInput(make_arg((2, 1, 0, 1, 2)), kwargs={'kernel_size': 1}), error_regex=err_msg) def sample_inputs_normalize(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, low=-1, high=1, device=device, dtype=dtype, requires_grad=requires_grad) cases: tuple[tuple[int], dict] = ( # type: ignore[assignment] ((2, 1, 4, 5), {'p': 1., 'dim': 2}), ((2, 3, 4, 5), {'p': 2., 'dim': 1}), ((1, 2, 4, 5), {'p': 0.5, 'dim': 0}), ((1, 3, 4, 5), {'p': -1., 'dim': 1}), ((1, 3, 4, 5), {'p': 0., 'dim': -1}), ((), {'p': 1.2, 'dim': 0}), ((2, 3, 4, 5), {}), ((2, 3, 4, 5), {'eps': 1e-4})) for input_shape, kwargs in cases: yield SampleInput(make_arg(input_shape), kwargs=kwargs) def complex_conv(fn, input_size, weight, grad_output, stride, padding, dilation, groups): # conv(W, x, b) = conv(Wr, xr, br) - conv(Wi, xi, 0) + i(conv(Wi, xr, bi) + conv(Wr, xi, 0)) # a = conv(Wr, xr, br), # b = conv(Wi, xi, 0), # c = conv(Wr + Wi, xr + xi, br + bi) # conv(W, x, b) = a - b + i(c - a - b) grad_output_ = torch.view_as_real(grad_output) grad_output_r = grad_output_[..., 0] grad_output_i = grad_output_[..., 1] weight_ = torch.view_as_real(weight) weight_r = weight_[..., 0] weight_i = weight_[..., 1] a = fn(input_size, weight_r, grad_output_r, stride, padding, dilation, groups) b = fn(input_size, weight_i, grad_output_i, stride, padding, dilation, groups) c = fn(input_size, weight_r + weight_i, grad_output_r + grad_output_i, stride, padding, dilation, groups) return (a - b) + 1j * (c - a - b) def conv_transpose_ref(input, weight, bias, stride=1, padding=0, output_padding=0, dilation=1, groups=1, fn=None): # Derivative of `conv` is `conv_transpose`. # To verify the correctness of `conv_transpose`, # we rely `torch.nn.grad` implementation (which is tested in test_nn.py) # for floating dtypes. assert fn is not None grad_fn_map = {torch.nn.functional.conv_transpose1d: torch.nn.grad.conv1d_input, torch.nn.functional.conv_transpose2d: torch.nn.grad.conv2d_input, torch.nn.functional.conv_transpose3d: torch.nn.grad.conv3d_input} batched_dim_map = {torch.nn.functional.conv_transpose1d: 3, torch.nn.functional.conv_transpose2d: 4, torch.nn.functional.conv_transpose3d: 5} # Input for `ref` is ndarray. input, weight = torch.from_numpy(input), torch.from_numpy(weight) is_batched = len(input.shape) == batched_dim_map[fn] if not is_batched: input = input.unsqueeze(0) if bias is not None: bias = torch.from_numpy(bias) unsqueeze_dims = input.ndim - 2 for _ in range(unsqueeze_dims): bias = bias.unsqueeze(1) grad_output = input # Get the input shape for grad_fn. conv_transpose_output = fn(grad_output.to('meta'), weight.to('meta'), None, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation) input_size = conv_transpose_output.shape grad_fn = grad_fn_map[fn] if weight.dtype.is_complex: out = complex_conv(grad_fn, input_size, weight, grad_output, stride, padding, dilation, groups) else: # Floating out = grad_fn(input_size, weight, grad_output, stride, padding, dilation, groups) if bias is not None: out = out + bias return out.squeeze(0) if not is_batched else out def sample_inputs_conv_transpose1d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as shapes for input, weight, bias # and a dict of values of (stride, padding, output_padding, groups, dilation) cases: tuple[tuple[int], tuple[int], tuple[int], dict] = ( # type: ignore[assignment] ((1, 3, 4), (3, 3, 3), (3,), {'stride': (2,), 'padding': 2, 'output_padding': (1,), 'groups': 1}), ((2, 2, 4), (2, 2, 4), (4,), {'stride': (3,), 'padding': (1,), 'output_padding': (2,), 'groups': 2, 'dilation': (4,)}), ((1, 1, 4), (1, 1, 4), (1,), {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2,)}), ((1, 1, 4), (1, 2, 3), None, {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), ((1, 4, 5), (4, 8, 3), None, {}) ) for input_shape, weight, bias, kwargs in cases: # Batched yield SampleInput(make_arg(input_shape), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) def sample_inputs_conv_transpose2d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as shapes for input, weight, bias # and a dict of values of (stride, padding, output_padding, groups, dilation) cases: tuple[tuple[int], tuple[int], tuple[int], dict] = ( # type: ignore[assignment] ((1, 3, 4, 4), (3, 3, 3, 3), (3,), {'stride': (2, 2), 'padding': 2, 'output_padding': (1, 1), 'groups': 1}), ((2, 2, 4, 4), (2, 2, 4, 5), (4,), {'stride': (3, 2), 'padding': (1, 2), 'output_padding': (2, 3), 'groups': 2, 'dilation': (4, 4)}), ((1, 1, 4, 5), (1, 1, 4, 3), (1,), {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3)}), ((1, 1, 4, 3), (1, 2, 3, 4), None, {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), ((2, 4, 4, 4), (4, 1, 3, 3), None, {'groups': 4}), ((1, 2, 5, 5), (2, 4, 3, 3), None, {}) ) for input_shape, weight, bias, kwargs in cases: # Batched yield SampleInput(make_arg(input_shape), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) def sample_inputs_conv_transpose3d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as shapes for input, weight, bias # and a dict of values of (stride, padding, output_padding, groups, dilation) cases: tuple[tuple[int], tuple[int], tuple[int], dict] = ( # type: ignore[assignment] ((1, 3, 4, 4, 4), (3, 3, 3, 3, 3), (3,), {'stride': (2, 2, 2), 'padding': 2, 'output_padding': (1, 1, 1), 'groups': 1}), ((2, 2, 4, 4, 4), (2, 2, 4, 5, 6), (4,), {'stride': (3, 2, 1), 'padding': (1, 2, 3), 'output_padding': (2, 3, 1), 'groups': 2, 'dilation': (4, 4, 4)}), ((1, 1, 4, 5, 2), (1, 1, 4, 3, 1), (1,), {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1, 'dilation': (2, 3, 2)}), ((1, 1, 4, 3, 4), (1, 2, 3, 4, 5), None, {'stride': 2, 'padding': 1, 'output_padding': 1, 'groups': 1}), ((1, 4, 5, 5, 5), (4, 8, 3, 3, 3), None, {}) ) for input_shape, weight, bias, kwargs in cases: # Batched yield SampleInput(make_arg(input_shape), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) def sample_inputs_conv1d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as shapes for input, weight, bias, # and a dict of values of (stride, padding, dilation, groups) cases: tuple = ( ((1, 3, 4), (3, 3, 3), (3,), {'stride': (2,), 'padding': 2, 'groups': 1}), ((2, 4, 8), (2, 2, 3), (2,), {'stride': 3, 'padding': 1, 'groups': 2, 'dilation': 2}), ((1, 4, 5), (1, 4, 3), None, {'stride': (2,), 'padding': 'valid'}), ((2, 2, 4), (2, 1, 4), (2,), {'stride': (1,), 'padding': 'same', 'groups': 2, 'dilation': (2,)}), # With defaults ((1, 4, 5), (3, 4, 3), None, {}), ) for input_shape, weight, bias, kwargs in cases: # Batched yield SampleInput(make_arg(input_shape), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) def error_inputs_conv1d(opinfo, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float64) make_int_arg = partial(make_tensor, device=device, dtype=torch.int64) make_complex_arg = partial(make_tensor, device=device, dtype=torch.complex128) # error inputs for different dtypes of input tensor and bias yield ErrorInput( SampleInput(make_int_arg((1, 1, 4)), args=(make_int_arg((1, 1, 2)), make_arg((1,)))), error_regex="should be the same") # error inputs for different dtypes of input tensor and bias yield ErrorInput( SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 1, 2)), make_complex_arg((1,)))), error_regex="should be the same") # error inputs for negative strides yield ErrorInput( SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 2, 2)), make_arg((1,))), kwargs={'stride': (-1,)}), error_regex="non-positive stride is not supported") # error inputs for negative padding yield ErrorInput( SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 2, 2)), make_arg((1,))), kwargs={'padding': (-1,)}), error_regex="negative padding is not supported") # error inputs for negative dilation yield ErrorInput( SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 1, 2)), make_arg((1,))), kwargs={'dilation': (-1,)}), error_regex="dilation should be greater than zero") # FIXME: https://github.com/pytorch/pytorch/issues/85656 # error inputs for bias shape not equal to the output channels # yield ErrorInput(SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 1, 3)), make_arg((2,)))), # error_regex="expected bias to be 1-dimensional with 1 elements") # error inputs for input.ndim != weight.ndim yield ErrorInput(SampleInput(make_arg((1, 1, 4)), args=(make_arg((1, 2)), make_arg((1,)))), error_regex="weight should have at least three dimensions") # error inputs for the weight[0] are less than the number of groups yield ErrorInput( SampleInput(make_arg((2, 2, 4)), args=(make_arg((2, 2, 2)), make_arg((2,))), kwargs={'padding': 'same', 'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") # error inputs for the weight[0] are less than the number of groups yield ErrorInput( SampleInput(make_arg((2, 2, 4)), args=(make_arg((2, 2, 2)), make_arg((2,))), kwargs={'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") # error inputs for invalid groups yield ErrorInput( SampleInput(make_arg((2, 2, 4)), args=(make_arg((2, 2, 2)), make_arg((2,))), kwargs={'padding': 'same', 'groups': -1}), error_regex="non-positive groups is not supported") # error inputs for invalid groups yield ErrorInput( SampleInput(make_arg((2, 2, 4)), args=(make_arg((2, 2, 2)), make_arg((2,))), kwargs={'padding': 'same', 'groups': 0}), error_regex="non-positive groups is not supported") def error_inputs_conv2d(opinfo, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float64) make_int_arg = partial(make_tensor, device=device, dtype=torch.int64) make_complex_arg = partial(make_tensor, device=device, dtype=torch.complex128) # error inputs for different dtypes of input tensor and bias yield ErrorInput( SampleInput(make_int_arg((2, 4, 4)), args=(make_int_arg((3, 2, 3, 3)), make_arg((3,)))), error_regex="should be the same") # error inputs for different dtypes of input tensor and bias yield ErrorInput( SampleInput(make_arg((2, 4, 4)), args=(make_arg((3, 2, 3, 3)), make_complex_arg((3,)))), error_regex="should be the same") # error inputs for negative strides yield ErrorInput( SampleInput(make_arg((1, 1, 4, 4)), args=(make_arg((1, 2, 2, 3)), make_arg((1,))), kwargs={'stride': (-1,)}), error_regex="non-positive stride is not supported") # error inputs for negative padding yield ErrorInput( SampleInput(make_arg((1, 1, 4, 3)), args=(make_arg((1, 2, 2, 4)), make_arg((1,))), kwargs={'padding': (-1,)}), error_regex="negative padding is not supported") # error inputs for negative dilation yield ErrorInput( SampleInput(make_arg((1, 1, 4, 2)), args=(make_arg((1, 1, 2, 5)), make_arg((1,))), kwargs={'dilation': (-1,)}), error_regex="dilation should be greater than zero") # FIXME: https://github.com/pytorch/pytorch/issues/85656 # error inputs for bias shape not equal to the output channels # yield ErrorInput(SampleInput(make_arg((1, 1, 4, 4)), args=(make_arg((1, 1, 3, 2)), make_arg((2,)))), # error_regex="expected bias to be 1-dimensional with 1 elements") # error inputs for input.ndim != weight.ndim yield ErrorInput( SampleInput(make_arg((1, 1, 4, 3)), args=(make_arg((1, 2, 2)), make_arg((1,))), kwargs={'padding': 'same'}), error_regex="Expected 3-dimensional input for 3-dimensional weight") # error inputs for the weight[0] are less than the number of groups yield ErrorInput( SampleInput(make_arg((2, 2, 4, 3)), args=(make_arg((2, 2, 1, 3)), make_arg((2,))), kwargs={'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") # error inputs for groups the weight[0] are less than the number of groups yield ErrorInput( SampleInput(make_arg((2, 2, 4, 3)), args=(make_arg((2, 2, 1, 3)), make_arg((2,))), kwargs={'padding': 'same', 'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") # error inputs for invalid groups yield ErrorInput( SampleInput(make_arg((2, 2, 4, 5)), args=(make_arg((2, 2, 1, 4)), make_arg((2,))), kwargs={'padding': 'same', 'groups': -1}), error_regex="non-positive groups is not supported") # error inputs for invalid groups yield ErrorInput( SampleInput(make_arg((2, 2, 4, 3)), args=(make_arg((2, 2, 4, 3)), make_arg((2,))), kwargs={'padding': 'same', 'groups': 0}), error_regex="non-positive groups is not supported") def sample_inputs_conv2d(op_info, device, dtype, requires_grad, jit_fail_sample=False, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as shapes for input, weight, bias # and a dict of values of (stride, padding, groups, dilation) cases: tuple = ( ((1, 3, 4, 4), (3, 3, 3, 3), (3,), {'stride': (2, 2), 'padding': 2, 'groups': 1}), ((2, 4, 8, 8), (2, 2, 3, 3), (2,), {'stride': (3, 2), 'padding': (2, 1), 'groups': 2, 'dilation': (4, 4)}), ((1, 4, 5, 5), (1, 4, 2, 3), (1,), {'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}), ((1, 4, 5, 5), (1, 4, 2, 3), (1,), {'stride': 2, 'padding': 1, 'groups': 1, 'dilation': (2, 3)}), ((1, 2, 4, 3), (4, 2, 3, 4), None, {'stride': 2, 'padding': 1, 'groups': 1}), ((1, 4, 5, 5), (1, 4, 2, 3), (1,), {'stride': 2, 'padding': "valid"}), ((1, 4, 5, 5), (1, 4, 2, 3), (1,), {'stride': 1, 'padding': "same", 'dilation': 3}), # Below are the group related samples from common_nn.py ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4}), ((2, 4, 6, 6), (8, 1, 3, 3), (8,), {'groups': 4}), ((2, 4, 6, 6), (8, 1, 3, 3), None, {'groups': 4}), ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'stride': (3, 2)}), ((2, 4, 6, 6), (4, 1, 3, 3), (4,), {'groups': 4, 'padding': (1, 1)}), ((2, 4, 5, 5), (4, 1, 2, 2), (4,), {'groups': 4, 'dilation': (2, 2)}), ((2, 4, 6, 5), (6, 2, 3, 2), (6,), {'groups': 2}), # With defaults ((1, 4, 5, 5), (3, 4, 3, 3), None, {}), ) for input_shape, weight, bias, kwargs in cases: # Batched yield SampleInput(make_arg(input_shape), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) def sample_inputs_conv3d(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as shapes for input, weight, bias # and dict of values of (stride, padding, dilation, groups) cases: tuple = ( ((1, 1, 4, 4, 4), (1, 1, 1, 1, 1), (1,), {'padding': 'same'}), ((1, 1, 4, 4, 4), (1, 1, 4, 4, 4), (1,), {'stride': (2, 2, 2)}), ((1, 1, 5, 5, 5), (1, 1, 3, 3, 3), (1,), {'dilation': 2}), ((1, 1, 1, 1, 10), (1, 1, 1, 1, 4), None, {'padding': 'valid'}), ((1, 1, 10, 11, 12), (1, 1, 1, 2, 5), None, {'padding': 'same'}), ((1, 1, 10, 11, 12), (1, 1, 1, 2, 5), None, {'padding': 'same', 'dilation': 2}), ((1, 1, 10, 11, 12), (1, 1, 4, 4, 4), None, {'padding': 'same', 'dilation': 3}), ((1, 1, 1, 1, 10), (1, 1, 1, 1, 4), None, {'padding': 'valid'}), ((3, 9, 3, 1, 9), (3, 3, 3, 1, 9), (3,), {'groups': 3}), ((3, 9, 3, 1, 9), (3, 3, 3, 1, 9), (3,), {'stride': (2, 2, 2), 'dilation': 1, 'groups': 3}), ) for input_shape, weight, bias, kwargs in cases: # Batched yield SampleInput(make_arg(input_shape), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) # Unbatched yield SampleInput(make_arg(input_shape[1:]), args=( make_arg(weight), make_arg(bias) if bias is not None else bias ), kwargs=kwargs) def error_inputs_conv3d(opinfo, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float64) make_int_arg = partial(make_tensor, device=device, dtype=torch.int64) make_complex_arg = partial(make_tensor, device=device, dtype=torch.complex128) # error inputs for different dtypes of input tensor and bias yield ErrorInput( SampleInput(make_int_arg((1, 1, 4, 4, 4)), args=(make_int_arg((1, 1, 2, 2, 2)), make_arg((1,)))), error_regex="should be the same") # error inputs for different dtypes of input tensor and bias yield ErrorInput( SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 2, 2, 2)), make_complex_arg((1,)))), error_regex="should be the same") # error inputs for negative strides yield ErrorInput( SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 2, 2, 2)), make_arg((1,))), kwargs={'stride': (-1,)}), error_regex="non-positive stride is not supported") # error inputs for negative padding yield ErrorInput( SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 2, 2, 2)), make_arg((1,))), kwargs={'padding': (-1,)}), error_regex="negative padding is not supported") # error inputs for negative dilation yield ErrorInput( SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 2, 2, 2)), make_arg((1,))), kwargs={'dilation': (-1,)}), error_regex="dilation should be greater than zero") # FIXME: https://github.com/pytorch/pytorch/issues/85656 # error inputs for bias shape not equal to the output channels # yield ErrorInput(SampleInput(make_arg((1, 1, 4, 4, 4)), args=(make_arg((1, 1, 3, 3, 3)), make_arg((2,)))), # error_regex="expected bias to be 1-dimensional with 1 elements") # error inputs for input.ndim != weight.ndim yield ErrorInput( SampleInput(make_arg((1, 1, 3, 4, 5)), args=(make_arg((1, 1, 4, 3)), make_arg((1,))), kwargs={'padding': 'same'}), error_regex="Expected 4-dimensional input for 4-dimensional weight") # error inputs for the weight[0] are less than the number of groups yield ErrorInput( SampleInput(make_arg((2, 2, 3, 4, 5)), args=(make_arg((2, 2, 4, 3, 3)), make_arg((2,))), kwargs={'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") # error inputs for the weight[0] are less than the number of groups yield ErrorInput( SampleInput(make_arg((2, 2, 3, 4, 5)), args=(make_arg((2, 2, 4, 3, 3)), make_arg((2,))), kwargs={'padding': 'same', 'groups': 3}), error_regex="expected weight to be at least 3 at dimension 0") # error inputs for invalid groups yield ErrorInput( SampleInput(make_arg((2, 2, 3, 4, 5)), args=(make_arg((2, 2, 4, 3, 3)), make_arg((2,))), kwargs={'padding': 'same', 'groups': 0}), error_regex="non-positive groups is not supported") # error inputs for padding='same' not supported by strided convolutions yield ErrorInput( SampleInput(make_arg((18, 27, 9, 1, 9)), args=(make_arg((9, 9, 9, 1, 9)), make_arg((9,))), kwargs={'stride': 2, 'padding': 'same', 'groups': 3}), error_regex="padding='same' is not supported for strided convolutions") def sample_inputs_group_norm(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as input shape, num groups, and kwargs for eps cases: tuple[tuple[int], int, float] = ( # type: ignore[assignment] ((1, 6, 3), 2, {'eps' : 0.5}), ((2, 6, 3), 2, {'eps' : -0.5}), ((1, 3), 1, {'eps' : 1e-5}), ((0, 2), 1, {'eps' : 1e-5}), ((S, S, S), 1, {'eps' : 0.5}), ) # num_channels is inferred to be input.shape[1] dimension for input_shape, num_groups, kwargs in cases: # Shape of weight and bias should be the same as num_channels channels = input_shape[1] if len(input_shape) > 1 else 0 weight_tensor = make_arg(channels) bias_tensor = make_arg(channels) # Checking for permutations of weights and biases as `None` weights = [weight_tensor, None] biases = [bias_tensor, None] for weight, bias in itertools.product(weights, biases): kwargs = { 'weight': weight, 'bias': bias, **kwargs } yield SampleInput(make_arg(input_shape), num_groups, **kwargs) # Without any optional args yield SampleInput(make_arg((1, 2)), args=(1,)) def reference_inputs_group_norm(op_info, device, dtype, requires_grad, **kwargs): yield from sample_inputs_group_norm( op_info, device, dtype, requires_grad, **kwargs) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as input shape, num groups, and kwargs for eps cases: tuple[tuple[int], int, float] = ( # type: ignore[assignment] ((20, 6, 10, 10), 3, {'eps' : 1e-5}), # equivalent with InstanceNorm # GroupNorm(C, num_groups=C) == InstanceNorm(num_features=C) ((20, 6, 10, 10), 6, {'eps' : 1e-5}), # equivalent with LayerNorm # GroupNorm(C, num_groups=1, affine=False) == LayerNorm(normalized_shape=[C, H, W], elementwise_affine=False) ((20, 6, 10, 10), 1, {'eps' : 1e-5}), ) # num_channels is inferred to be input.shape[1] dimension for input_shape, num_groups, kwargs in cases: # Shape of weight and bias should be the same as num_channels channels = input_shape[1] if len(input_shape) > 1 else 0 input_tensor = make_arg(input_shape) weight_tensor = make_arg(channels) bias_tensor = make_arg(channels) # Checking for permutations of weights and biases as `None` weights = [weight_tensor, None] biases = [bias_tensor, None] for weight, bias in itertools.product(weights, biases): kwargs = { 'weight': weight, 'bias': bias, **kwargs } yield SampleInput(input_tensor, num_groups, **kwargs) def sample_inputs_instance_norm(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_arg_without_requires_grad = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) # Ordered as: input shape, kwargs for momentum, eps cases: tuple[tuple[int], dict] = ( # type: ignore[assignment] ((S, S, S), {'momentum': 0.5, 'eps': 0.6}), ((S, S, S), {'momentum': 0.5, 'eps': 0.6, 'use_input_stats': True}), ((3, 2, 4), {'momentum': -1.2}), ((3, 2, 4), {'momentum': 0.0}), ((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}), ((3, 2, 3, 4), {'momentum': -1.0, 'eps': 0.5}), ) for input_shape, kwargs in cases: # args: running mean, running var, weight and bias should necessarily be of shape: (channels,) channels = input_shape[1] weight = make_arg(channels) bias = make_arg(channels) running_mean = make_arg_without_requires_grad(channels, low=0) running_var = make_arg_without_requires_grad(channels, low=0) new_kwargs = { 'running_mean': running_mean, 'running_var': running_var, 'weight': weight, 'bias': bias, **kwargs } yield SampleInput( make_arg(input_shape), args=(), kwargs=new_kwargs ) # Checking for permutations of weights and biases as `None` # instance_norm assumes that if there's a bias, there's a weight weights = [channels, None] biases = [None, None] for weight_channels, bias_channels in zip(weights, biases): running_mean = make_arg_without_requires_grad(channels, low=0) running_var = make_arg_without_requires_grad(channels, low=0) yield SampleInput( make_arg(input_shape), args=(), kwargs={ 'running_mean': running_mean, 'running_var': running_var, 'weight': make_arg(weight_channels) if weight_channels is not None else None, 'bias': make_arg(bias_channels) if bias_channels is not None else None } ) # Test case for no optional kwargs yield SampleInput(make_arg((1, 2, 3)), kwargs={}) def sample_inputs_safe_softmax(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) def make_bool_mask(*shape): return torch.randint(0, 2, shape, device=device, dtype=torch.bool) def mask_two_rows(rows, cols): mask_two_rows = torch.ones((rows, cols), dtype=torch.bool, device=device) mask_two_rows[rows - 1] = False mask_two_rows[rows - 3] = False return mask_two_rows def convert_to_float_mask(mask: torch.Tensor) -> torch.Tensor: return torch.where(~mask, float('-inf'), 0.0) def with_requires_grad(tensor): return tensor.requires_grad_(requires_grad) def generate_input_from_mask(mask_shape, dim): mask = make_bool_mask(*mask_shape) input_tensor = make_arg(mask_shape) masked_input = input_tensor + convert_to_float_mask(mask) return SampleInput(with_requires_grad(masked_input), kwargs={'dim': dim}) samples = [ # Basic 3D tensor with mask generate_input_from_mask((2, 3, 4), dim=1), # 2D tensor with mask, testing different dim generate_input_from_mask((5, 5), dim=0), # 4D tensor, testing with a different dim generate_input_from_mask((2, 3, 4, 5), dim=2), # Edge case: 1D tensor generate_input_from_mask((10,), dim=0), # Edge case: tensor with one dimension of size 1 generate_input_from_mask((1, 5, 5), dim=1), # Testing with all elements masked SampleInput( with_requires_grad( make_arg((3, 3)) + convert_to_float_mask( torch.zeros((3, 3), dtype=torch.bool, device=device) ) ), kwargs={"dim": 1}, ), # Testing with no elements masked SampleInput( with_requires_grad( make_arg((3, 3)) + convert_to_float_mask( torch.ones((3, 3), dtype=torch.bool, device=device) ) ), kwargs={"dim": 1}, ), # Testing with two rows masked SampleInput( with_requires_grad( make_arg((6, 3)) + convert_to_float_mask(mask_two_rows(6, 3)) ), kwargs={"dim": 1}, ), ] yield from samples def sample_inputs_layer_norm(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as input shape, normalized_shape and a kwarg dict for eps cases: tuple[tuple[int], tuple[int], dict] = ( # type: ignore[assignment] ((1, 2, 3), (1, 2, 3), {'eps': 0.5}), ((2, 2, 3), (2, 3), {'eps': -0.5}), ((1,), (1,), {}), ((1, 2), (2,), {}), ((0, 1), (1,), {}), ) for input_shape, normalized_shape, kwargs in cases: # Shape of weight and bias should be the same as normalized_shape weight = make_arg(normalized_shape) bias = make_arg(normalized_shape) yield SampleInput( make_arg(input_shape), args=(normalized_shape, weight, bias), kwargs=kwargs ) # Without any optional args yield SampleInput(make_arg((1, 2)), args=((2,),)) # TODO: @krshrimali, once to_numpy method in SampleInput class is modified to take None inputs, # enable these inputs; see https://github.com/pytorch/pytorch/pull/63276#discussion_r691950400 # With weight and a `None` bias # yield SampleInput(make_arg((1, 2)), args=((2,), make_arg((2,)), None)) # With `None` weight and bias (tests failing for this, see the link above) # yield SampleInput(make_arg((1, 2)), args=((2,), None, make_arg((2,)))) def sample_inputs_native_layer_norm(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as input shape, normalized_shape, eps cases: tuple[tuple[int], tuple[int], float] = ( # type: ignore[assignment] ((1, 2, 3), (1, 2, 3), 0.5), ((2, 2, 3), (2, 3), -0.5), ((1,), (1,), 1e-5), ((1, 2), (2,), 1e-5), ((0, 1), (1,), 1e-5), ) for input_shape, normalized_shape, eps in cases: # Shape of weight and bias should be the same as normalized_shape weight = make_arg(normalized_shape) bias = make_arg(normalized_shape) yield SampleInput( make_arg(input_shape), args=(normalized_shape, weight, bias, eps), ) yield SampleInput( make_arg(input_shape), args=(normalized_shape, None, bias, eps), ) yield SampleInput( make_arg(input_shape), args=(normalized_shape, weight, None, eps), ) yield SampleInput( make_arg(input_shape), args=(normalized_shape, None, None, eps), ) def sample_inputs_rms_norm(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, high=1000) # Ordered as input shape, normalized_shape and a kwarg dict for eps cases: tuple[tuple[int], tuple[int], dict] = ( # type: ignore[assignment] ((1, 2, 3), (1, 2, 3), {'eps': 0.5}), ((2, 2, 3), (2, 3), {'eps': -0.5}), ((1,), (1,), {}), ((1, 2), (2,), {}), ((0, 1), (1,), {}), ) for input_shape, normalized_shape, kwargs in cases: # Shape of weight and bias should be the same as normalized_shape weight = make_arg(normalized_shape) yield SampleInput( make_arg(input_shape), args=(normalized_shape, weight), kwargs=kwargs ) # Without any optional args yield SampleInput(make_arg((1, 2)), args=((2,),)) def error_inputs_group_norm(opinfo, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float32, requires_grad=False) # check that input has minimum number of dimensions err_msg1 = "Expected at least 2 dimensions for input tensor but received" s1 = SampleInput(make_arg(1), args=(1,)) yield ErrorInput(s1, error_regex=err_msg1) # check that the channels dimension is compatible with number of groups err_msg2 = "Expected number of channels in input to be divisible by num_groups, but got input of shape" s2 = SampleInput(make_arg((2, 7, 4)), args=(2,)) yield ErrorInput(s2, error_regex=err_msg2) def error_inputs_native_layer_norm(opinfo, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float32, requires_grad=False) input_shape = (1, 2, 3) err_msg1 = "Expected normalized_shape to be at least 1-dimensional" s1 = SampleInput( make_arg(input_shape), args=((), None, None, 1e-5) ) yield ErrorInput(s1, error_regex=err_msg1) normalized_shape = (1, 2, 3) weight = make_arg((1, 2)) err_msg2 = "Expected weight to be of same shape as normalized_shape" s2 = SampleInput( make_arg(input_shape), args=(normalized_shape, weight, None, 1e-5) ) yield ErrorInput(s2, error_regex=err_msg2) bias = make_arg((1, 2)) err_msg3 = "Expected bias to be of same shape as normalized_shape" s3 = SampleInput( make_arg(input_shape), args=(normalized_shape, None, bias, 1e-5) ) yield ErrorInput(s3, error_regex=err_msg3) err_msg4 = "Given normalized_shape=" s4 = SampleInput( make_arg((2, 2, 3)), args=((2, 2), None, None, 1e-5) ) yield ErrorInput(s4, error_regex=err_msg4) def error_inputs_rms_norm(opinfo, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float32, requires_grad=False) input_shape = (1, 2, 3) err_msg1 = "Expected normalized_shape to be at least 1-dimensional" s1 = SampleInput( make_arg(input_shape), args=((), None, 1e-5) ) yield ErrorInput(s1, error_regex=err_msg1) normalized_shape = (1, 2, 3) weight = make_arg((1, 2)) err_msg2 = "Expected weight to be of same shape as normalized_shape" s2 = SampleInput( make_arg(input_shape), args=(normalized_shape, weight, 1e-5) ) yield ErrorInput(s2, error_regex=err_msg2) err_msg4 = "Given normalized_shape=" s4 = SampleInput( make_arg((2, 2, 3)), args=((2, 2), None, 1e-5) ) yield ErrorInput(s4, error_regex=err_msg4) def sample_inputs_local_response_norm(opinfo, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Ordered as input shape, size and a kwarg dict for alpha, beta, and k cases: tuple[tuple[int], tuple[int], dict] = ( # type: ignore[assignment] ((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), ((1, 6, 3), 2, {'beta': 0.5, 'k': 1.25}), ((1, 6, 3), 2, {'alpha': 3e-05, 'k': 1.25}), ((1, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5}), ((1, 6, 3), 2, {'alpha': 3e-05}), ((1, 6, 3), 2, {'beta': 0.5}), ((1, 6, 3), 2, {'k': 1.25}), ((1, 6, 3), 2, {}), ((2, 6, 3), 2, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), ((1, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), ((0, 1, 2), 1, {'alpha': 3e-05, 'beta': 0.5, 'k': 1.25}), ) for input_shape, size, kwargs in cases: yield SampleInput(make_arg(input_shape), args=(size,), kwargs=kwargs) def sample_inputs_hardswish(self, device, dtype, requires_grad, **kwargs): N = 5 # make sure we are testing -3 -> 3 range. default is -10 -> 10 so maybe unnecessary ? make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=-5, high=5) return (SampleInput(make_arg((N * 2, N * 2))) for _ in range(1, N)) def sample_inputs_linear(self, device, dtype, requires_grad, **kwargs): features_options = [[3, 4], [8, 8]] batch_options: list[list[int]] = [ [], # no batch [0], [8], [2, 3], ] create_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=-2, high=2) for has_bias, (in_feat, out_feat), batch_shape in \ itertools.product([True, False], features_options, batch_options): input_tensor = create_tensor(batch_shape + [in_feat]) weight = create_tensor([out_feat, in_feat]) if not has_bias: yield SampleInput(input_tensor, weight) continue bias = create_tensor([out_feat]) yield SampleInput(input_tensor, weight, bias) # 5D tensor, used to crash on MPS, see https://github.com/pytorch/pytorch/issues/114942 yield SampleInput(create_tensor(2, 1, 2, 1, 2), create_tensor(4, 2)) yield SampleInput(create_tensor(2, 1, 2, 1, 2), create_tensor(4, 2), create_tensor(4)) def sample_inputs_bilinear(self, device, dtype, requires_grad, **kwargs): features_options = [[3, 4, 5], [8, 8, 8]] batch_options: list[list[int]] = [ [], # no batch [0], [8], [2, 3], ] create_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=-2, high=2) for has_bias, (in_feat1, in_feat2, out_feat), batch_shape in \ itertools.product([True, False], features_options, batch_options): input_tensor1 = create_tensor(batch_shape + [in_feat1]) input_tensor2 = create_tensor(batch_shape + [in_feat2]) weight = create_tensor([out_feat, in_feat1, in_feat2]) if not has_bias: yield SampleInput(input_tensor1, input_tensor2, weight) continue bias = create_tensor([out_feat]) yield SampleInput(input_tensor1, input_tensor2, weight, bias) def sample_inputs_glu(self, device, dtype, requires_grad, **kwargs): features_options = [[2], [2, 4], [8, 8], [3, 6, 8], [1, 4, 6, 7]] batch_options: list[list[int]] = [ [], # no batch [0], [8], [2, 3], ] create_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=-2, high=2) for features, batch_shape in itertools.product(features_options, batch_options): ndim = len(features) + len(batch_shape) for dim in range(ndim): input_tensor = create_tensor(batch_shape + features) dim_size = input_tensor.size(dim) if dim_size > 0 and dim_size % 2 == 0: yield SampleInput(input_tensor, dim) def sample_inputs_interpolate(mode, self, device, dtype, requires_grad, **kwargs): N, C = 2, 3 D = 4 S = 3 L = 5 align_corners_options: tuple[Any, ...] = (None,) if mode in ('linear', 'bilinear', 'bicubic', 'trilinear'): align_corners_options = (True, False, None) ranks_for_mode = { 'nearest': [1, 2, 3], 'nearest-exact': [1, 2, 3], 'linear': [1], 'bilinear': [2], 'bicubic': [2], 'trilinear': [3], 'area': [1, 2, 3] } def shape(size, rank, with_batch_channel=True): if with_batch_channel: return tuple([N, C] + ([size] * rank)) return tuple([size] * rank) if mode in ('bilinear', 'bicubic') and dtype == torch.uint8: make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, # we pick more realistic upper bound 256 instead of default 10 for uint8 dtype high=256 if dtype == torch.uint8 else None, ) # provide few samples for a more close to typical image processing usage rank = 2 for memory_format in [torch.contiguous_format, torch.channels_last]: yield SampleInput( make_arg(shape(270, rank), memory_format=memory_format), shape(130, rank, False), scale_factor=None, mode=mode, align_corners=False, ) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) for align_corners in align_corners_options: for rank in ranks_for_mode[mode]: yield SampleInput( make_arg(shape(D, rank)), shape(S, rank, False), scale_factor=None, mode=mode, align_corners=align_corners, ) yield SampleInput( make_arg(shape(D, rank)), shape(L, rank, False), scale_factor=None, mode=mode, align_corners=align_corners, ) for recompute_scale_factor in [False, True]: for scale_factor in [1.7, 0.6]: yield SampleInput( make_arg(shape(D, rank)), size=None, scale_factor=scale_factor, mode=mode, align_corners=align_corners, recompute_scale_factor=recompute_scale_factor, ) def reference_inputs_interpolate(mode, self, device, dtype, requires_grad, **kwargs): yield from sample_inputs_interpolate(mode, self, device, dtype, requires_grad, **kwargs) if mode in ('bilinear', 'bicubic'): make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, # we pick more realistic upper bound 256 instead of default 10 for uint8 dtype high=256 if dtype == torch.uint8 else None, ) # provide few samples for more typical image processing usage for memory_format in [torch.contiguous_format, torch.channels_last]: for aa in [True, False]: yield SampleInput( make_arg((2, 3, 345, 456), memory_format=memory_format), (270, 270), scale_factor=None, mode=mode, align_corners=False, antialias=aa, ) def sample_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs): N, C = 2, 3 D = 4 S = 3 L = 5 ranks_for_mode = { 'nearest': [1, 2, 3], 'bilinear': [2], } def shape(size, rank, with_batch_channel=True): if with_batch_channel: return torch.Size([N, C] + ([size] * rank)) return torch.Size([size] * rank) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) for rank in ranks_for_mode[mode]: yield SampleInput(make_arg(shape(D, rank)), size=shape(S, rank, False)) yield SampleInput(make_arg(shape(D, rank)), size=shape(L, rank, False)) yield SampleInput(make_arg(shape(D, rank)), scale_factor=1.7) yield SampleInput(make_arg(shape(D, rank)), scale_factor=0.6) def reference_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs): yield from sample_inputs_upsample(mode, self, device, dtype, requires_grad, **kwargs) if mode in ('bilinear', ): make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, # we pick more realistic upper bound 256 instead of default 10 for uint8 dtype high=256 if dtype == torch.uint8 else None, ) # provide a single sample for more typical image processing usage for memory_format in [torch.contiguous_format, torch.channels_last]: yield SampleInput( make_arg((2, 3, 345, 456), memory_format=memory_format), (270, 270), ) def sample_inputs_upsample_aa(mode, self, device, dtype, requires_grad, **kwargs): N = 6 C = 3 H = 10 W = 20 S = 3 L = 5 input_tensor = make_tensor(torch.Size([N, C, H, W]), device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=False, scale_factors=None) yield SampleInput(input_tensor, output_size=torch.Size([L, L]), align_corners=False, scale_factors=None) yield SampleInput(input_tensor, output_size=None, align_corners=False, scale_factors=[1.7, 0.9]) yield SampleInput(input_tensor, output_size=None, align_corners=True, scale_factors=[0.8, 1.0]) yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=False, scales_h=None, scales_w=None) yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=False, scales_h=1.7, scales_w=0.9) yield SampleInput(input_tensor, output_size=torch.Size([S, S]), align_corners=True, scales_h=1.7, scales_w=0.9) def sample_inputs_gelu(self, device, dtype, requires_grad, **kwargs): N = 5 for _ in range(1, N): for approximate in ['none', 'tanh']: yield SampleInput( make_tensor((N * 2, N * 2), device=device, dtype=dtype, requires_grad=requires_grad, low=-3, high=3), approximate=approximate) def error_inputs_gelu(op, device, **kwargs): # Tests that gelu errors out when passed an approximation we don't know. yield ErrorInput(SampleInput(make_tensor((), dtype=torch.float, device=device), kwargs={"approximate": "asdf"}), error_regex="approximate argument must be either") def sample_inputs_max_min_reduction_with_dim(op_info, device, dtype, requires_grad, **kwargs): args_for_reduction_with_dim = ( ((S, S, S), (1,),), ((S, S, S), (1, True, ),), ((), (0,),), ((), (0, True,),), ) return ((SampleInput(make_tensor(input_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), *args)) for input_tensor, args in args_for_reduction_with_dim) def sample_inputs_max_min_reduction_no_dim(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) yield SampleInput(make_arg((S, S, S))) yield SampleInput(make_arg(())) def _generate_nan_reduction_inputs(device, dtype, requires_grad, **kwargs): yield from _generate_reduction_inputs(device, dtype, requires_grad) # NaN only exists for floating point numbers if dtype.is_complex or dtype.is_floating_point: yield torch.tensor([2, torch.nan, -1], device=device, dtype=dtype, requires_grad=requires_grad) yield torch.tensor([[torch.nan, 2], [0, 1]], device=device, dtype=dtype, requires_grad=requires_grad) def sample_inputs_nan_reduction(supports_multiple_dims): # Generates sample inputs for reduction ops that contain the input tensor # and dim and keepdim kwargs. If a reduction op needs to test additional # args/kwargs then create a separate sample_inputs function def fn(op_info, device, dtype, requires_grad, **kwargs): for t in _generate_nan_reduction_inputs(device, dtype, requires_grad): # Add case without dim and keepdim kwargs yield SampleInput(t.clone().requires_grad_(requires_grad)) for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims): yield SampleInput(t.clone().requires_grad_(requires_grad), **kwargs) return fn def sample_inputs_reduction_quantile(op_info, device, dtype, requires_grad, **kwargs): test_quantiles = (0.5, make_tensor((2,), dtype=dtype, device=device, low=0, high=1, requires_grad=requires_grad)) test_interpolations = ['linear', 'midpoint'] for quantiles in test_quantiles: for t in _generate_reduction_inputs(device, dtype, requires_grad): # Add case without dim and keepdim kwargs input = t.clone().requires_grad_(requires_grad) yield SampleInput(input, quantiles) for kwargs in _generate_reduction_kwargs(t.ndim, supports_multiple_dims=False): # Interpolation kwarg for now is only supported when providing both dim and keepdim kwargs.setdefault('dim', 0) kwargs.setdefault('keepdim', False) for interpolation in test_interpolations: kwargs['interpolation'] = interpolation input = t.clone().requires_grad_(requires_grad) yield SampleInput(input, quantiles, **kwargs) def sample_inputs_reduction_count_nonzero(*args, **kwargs): """Sample inputs for count_nonzero""" # count_nonzero does not support keepdim yet for sample in sample_inputs_reduction(*args, **kwargs): sample.kwargs.pop('keepdim', None) yield sample def sample_inputs_leaky_relu(op_info, device, dtype, requires_grad, **kwargs): N = 10 make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return (SampleInput(make_arg((N, N))) for _ in range(1, N)) def sample_inputs_fractional_max_pool2d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Order: input_shape, kernel_size cases = (((1, 3, 9, 9), 3), ((1, 3, 9, 9), (4, 4)), ((1, 3, 9, 9), (6, 6)), ((2, 3, 9, 9), (3, 3)), ((1, 1, 4, 4), (2, 2)), ((1, 2, 6, 6), (4, 4))) for input_shape, kernel_size in cases: for return_indices in [False, True]: # test case passing a single output size yield SampleInput( make_arg(input_shape), kernel_size, output_size=2, return_indices=return_indices, ) # test case passing a tuple output size yield SampleInput( make_arg(input_shape), kernel_size, output_size=(2, 3), return_indices=return_indices, ) # test case passing an output ratio yield SampleInput( make_arg(input_shape), kernel_size, output_ratio=(0.5, 0.5), return_indices=return_indices, ) yield SampleInput( make_arg((1, 1, 16, 16)), (1, 1), output_ratio=(0.5, 0.5), return_indices=True, _random_samples=make_tensor((1, 1, 2), device=device, dtype=dtype, requires_grad=False), ) def sample_inputs_fractional_max_pool3d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Order: input_shape, kernel_size cases = (((2, 3, 5, 5, 5), (2, 2, 2)), ((1, 2, 6, 5, 4), 2), ((1, 2, 5, 6, 5), (2, 3, 2)), ((1, 2, 6, 6, 6), (2, 3, 2)), ((1, 1, 7, 6, 7), (2, 3, 4)), ((1, 1, 4, 5, 4), (2, 2, 1)), ((1, 1, 8, 7, 6), (4, 3, 2)), ((0, 1, 4, 5, 4), (2, 2, 1))) for input_shape, kernel_size in cases: for return_indices in [False, True]: # test case passing a single output size yield SampleInput( make_arg(input_shape), kernel_size, output_size=2, return_indices=return_indices, ) # test case passing a tuple output size yield SampleInput( make_arg(input_shape), kernel_size, output_size=(2, 3, 2), return_indices=return_indices, ) # test case passing an output ratio yield SampleInput( make_arg(input_shape), kernel_size, output_ratio=(0.5, 0.5, 0.5), return_indices=return_indices, ) def sample_inputs_avgpool2d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override cases = (((1, 3, 9, 9), 3, 1, 1, True, False, 2), ((1, 3, 9, 9), (4, 4), (2, 3), 1, True, False, 2), ((1, 3, 9, 9), (6, 6), (3, 3), (2, 3), True, True, 2), ((2, 3, 9, 9), (3, 3), (1, 1), (1, ), True, False, 2), ((1, 1, 4, 4), (2, 2), (), (0, ), False, True, -2), ((1, 2, 6, 6), (4, 4), (2, 2), (2, ), True, True, None)) for input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override in cases: yield SampleInput(make_arg(input_shape), args=(kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override)) # Case with just input_shape and kernel_size yield SampleInput(make_arg((1, 3, 9, 9)), args=((3, 3))) def sample_inputs_avgpool1d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Order: input_shape, kernel_size, kwargs cases: list[tuple[tuple[int, ...], Union[int, tuple[int, ...]], dict]] = [ ((2, 3, 9), (3,), {}), ((1, 3, 9), 3, dict(stride=1, padding=1, ceil_mode=True, count_include_pad=False)), ((1, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=True, count_include_pad=True)), ((2, 3, 9), (3,), dict(stride=(1,), padding=(1,), ceil_mode=False, count_include_pad=True)), ((0, 3, 9), (6,), dict(stride=(3,), padding=(2,), ceil_mode=False, count_include_pad=True)), ((1, 2, 9), (7,), dict(stride=(3,), padding=(2,), ceil_mode=False)), ((1, 2, 9), (7,), dict(stride=(3,), padding=(3,), ceil_mode=True)), ((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=False)), ((1, 2, 9), (7,), dict(stride=(3,), ceil_mode=True)), ] for input_shape, kernel_size, kwargs in cases: yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs) def sample_inputs_avgpool3d(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Order: input_shape, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override cases: list[tuple[tuple[int, ...], Union[int, tuple[int, ...]], dict]] = [ ((2, 3, 3, 4, 4), (2, 2, 2), {}), ((1, 2, 4, 4, 4), 2, dict(stride=1, padding=1, ceil_mode=True, count_include_pad=False, divisor_override=2)), ((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=True, count_include_pad=True, divisor_override=2)), ((1, 2, 5, 5, 5), (2, 3, 4), dict(stride=(1, 2, 2), padding=(0, 1, 2), ceil_mode=False)), ((1, 1, 7, 5, 7), (6, 3, 4), dict(stride=(2, 3, 2), padding=(3, 1, 0), ceil_mode=False, count_include_pad=False, divisor_override=2)), ((1, 1, 4, 5, 4), (2, 2, 3), dict(stride=(2, 2, 1), padding=0, ceil_mode=False, count_include_pad=True, divisor_override=-2)), ((1, 1, 6, 5, 6), (4, 5, 6), dict(stride=(2, 3, 2), padding=2, ceil_mode=True, count_include_pad=True, divisor_override=None)), ((0, 1, 4, 5, 4), (2, 3, 1), dict(stride=(2, 1, 2), padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None)), ] for input_shape, kernel_size, kwargs in cases: yield SampleInput(make_arg(input_shape), args=(kernel_size,), kwargs=kwargs) def error_inputs_avg_pool1d(op_info, device, **kwargs): # error inputs when pad is negative x = torch.rand([0, 1, 49], dtype=torch.float32) yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1}), error_regex='pad must be non-negative') # error inputs when pad > kernel_size / 2 yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4}), error_regex='pad should be at most half of effective kernel size') def error_inputs_avg_pool2d(op_info, device, **kwargs): # error inputs when pad is negative x = torch.rand([0, 1, 49], dtype=torch.float32) yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1}), error_regex='pad must be non-negative') # 2-dimensional kernel yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2), 'stride': 50, 'padding': -1}), error_regex='pad must be non-negative') # error inputs when pad > kernel_size / 2 yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4}), error_regex='pad should be at most half of effective kernel size') # 2-dimensional kernel yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2), 'stride': 50, 'padding': 4}), error_regex='pad should be at most half of effective kernel size') # error inputs for zero divisor x = torch.zeros(3, 3, 3) yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (2, 2), 'divisor_override': 0}), error_regex='divisor must be not zero') def error_inputs_avg_pool3d(op_info, device, **kwargs): # error inputs when pad is negative x = torch.rand([0, 1, 49, 50], dtype=torch.float32) yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': -1}), error_regex='pad must be non-negative') # 3-dimensional kernel yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2, 2), 'stride': 50, 'padding': -1}), error_regex='pad must be non-negative') # error inputs when pad > kernel_size / 2 yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 4}), error_regex='pad should be at most half of effective kernel size') # 3-dimensional kernel yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (3, 2, 2), 'stride': 50, 'padding': 4}), error_regex='pad should be at most half of effective kernel size') # error inputs for zero divisor x = torch.zeros(3, 3, 3, 3) yield ErrorInput(SampleInput(x, kwargs={'kernel_size': (2, 2, 2), 'divisor_override': 0}), error_regex='divisor must be not zero') # error inputs for invalid input dimension x = torch.rand([0, 1, 49], dtype=torch.float32) yield ErrorInput(SampleInput(x, kwargs={'kernel_size': 2, 'stride': 50, 'padding': 0}), error_regex='non-empty 4D or 5D') def sample_inputs_to(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # test_multiple_devices_to_cuda would fail if we use a different device than given devices = [device] if torch.device(device).type == 'cpu': devices = [torch.device('cpu'), torch.device('cuda:0')] if torch.cuda.is_available() else devices memory_formats = [torch.preserve_format, torch.channels_last] # TODO: can't switch `to.device` overload to use positional arguments # https://github.com/pytorch/pytorch/issues/84265 # to.device overload for device, nb, cp, mem_f in product(devices, [True, False], [True, False], memory_formats): kwargs = { "memory_format": mem_f, } yield SampleInput(make_arg((S, S, S, S)), args=(device, torch.float64, nb, cp), kwargs=kwargs) # to.dtype overload for nb, cp, mem_f in product([True, False], [True, False], memory_formats): kwargs = { "memory_format": mem_f, } yield SampleInput(make_arg((S, S, S, S)), args=(torch.float64, nb, cp), kwargs=kwargs) # to.other overload for device, nb, cp, mem_f in product(devices, [True, False], [True, False], memory_formats): kwargs = { "memory_format": mem_f, } other = make_arg((S, S, S, S), dtype=torch.float64, device=device) yield SampleInput(make_arg((S, S, S, S)), args=(other, nb, cp), kwargs=kwargs) def sample_inputs_topk(op_info, device, dtype, requires_grad, **kwargs): def get_tensor_input(size): return make_tensor(size, dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(get_tensor_input((S, M, S)), 3) yield SampleInput(get_tensor_input((S, M, S)), 3, 1) yield SampleInput(get_tensor_input((S, M, S)), 3, -2) yield SampleInput(get_tensor_input((S, M, S)), 3, 1, True) yield SampleInput(get_tensor_input((S, M, S)), 3, -2, True) yield SampleInput(get_tensor_input((S, M, S)), 3, 1, True, True) yield SampleInput(get_tensor_input((S, M, S)), 3, -2, True, True) yield SampleInput(get_tensor_input(()), 1) yield SampleInput(get_tensor_input(()), 1, 0) yield SampleInput(get_tensor_input(()), 1, -1) yield SampleInput(get_tensor_input(()), 1, 0, True) yield SampleInput(get_tensor_input(()), 1, -1, True) yield SampleInput(get_tensor_input(()), 1, 0, True, True) yield SampleInput(get_tensor_input(()), 1, -1, True, True) def sample_inputs_outer(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg(S), make_arg(M)) def sample_inputs_dist(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) sizes = ((S, S, S), (S,), (S, 1, S), (), (S, S)) ps = (2, 4) for size_x, size_y, p in product(sizes, sizes, ps): yield SampleInput(make_arg(size_x), args=(make_arg(size_y), p)) # Missing to test the nondeterminism of the operation # https://github.com/pytorch/pytorch/issues/53352 def sample_inputs_index(op_info, device, dtype, requires_grad, reference=False, **kwargs): # target.index_add(dim, idx, source, *, alpha=1) add = "index_add" in op_info.name # target.index_copy(dim, idx, source) copy = "index_copy" in op_info.name # target.index_fill(dim, idx, value) fill = "index_fill" in op_info.name # Extended reference inputs. We generate that exercise atomic adds / writing # several times to one location if reference: make_arg = partial(torch.ones, device=device, dtype=dtype, requires_grad=requires_grad) make_idx = partial(torch.zeros, device=device, dtype=torch.int64) else: make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # idx They need to be different for copy and add to be deterministic if copy or add: make_idx = partial(torch.randperm, device=device, dtype=torch.int64) else: def make_idx(n): return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=n) shapes = [(), (1,), (S, S)] # extra parameter for add if add: if dtype == torch.bool: alphas = (True, False) else: alphas = (-1, 0, 2) else: alphas = (None,) if fill: # A weird number to catch errors. # The former one tests `index_fill.int_Scalar`, and the latter one tests `index_fill.int_Tensor`. values = (make_arg((1,)).item(), make_arg(())) else: values = (None,) for shape, alpha, value in product(shapes, alphas, values): t = make_arg(shape) args = [] # dim. We handle the scalar case dim = -1 if t.ndim == 2 else 0 args.append(dim) idx = make_idx(t.shape[dim] if t.ndim != 0 else 1) args.append(idx) # source if copy or add: args.append(make_arg(shape)) elif fill: args.append(value) args = tuple(args) kwargs = {} if alpha is None else {"alpha": alpha} yield SampleInput(t, args=args, kwargs=kwargs) def sample_inputs_index_reduce(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def make_idx(n, m): return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=m) shapes = [((), ()), ((1,), (1,)), ((S, S), (S, M)), ((S, S, S), (S, M, S))] include_selfs = (True, False) reduce = op_info.variant_test_name assert reduce in ('prod', 'mean', 'amin', 'amax') for shape, include_self in product(shapes, include_selfs): self_shape, src_shape = shape # dim. We handle the scalar case dim = 1 if len(self_shape) >= 2 else 0 idx = make_idx(src_shape[dim] if len(src_shape) != 0 else 1, self_shape[dim] if len(self_shape) != 0 else 1) args = (dim, idx, make_arg(src_shape), reduce) yield SampleInput(make_arg(self_shape), args=args, kwargs={'include_self' : include_self}) # Sample inputs to test edge cases for backward if requires_grad and reduce == 'prod': # Check that gradients are propagated correctly for prod when zeros in self/src are reduced # This sample tests gradients for the following cases # (a) 1 zero reduced (from source (self[0, 1]), from self (self[0, 0])) # (b) 2 zeros reduced (1 from src and 1 from self (self[1, 0], self[1, 1]) # (c) no zeros reduced (self[2, 1], self[2, 2]) # (d) 2 zeros reduced (both from src) is tested in test/test_autograd.py # test_scatter_index_reduce_prod_gradgrad_error as this case is not supported for gradgrad input = torch.tensor([[0, 13], [0, 0], [15, 19]], dtype=dtype, device=device, requires_grad=requires_grad) src = torch.tensor([[2, 0], [0, 0], [2, 3], [2, 2]], dtype=dtype, device=device, requires_grad=requires_grad) idx = torch.tensor([0, 1, 2, 0], dtype=torch.long, device=device) yield SampleInput(input, args=(0, idx, src, reduce), kwargs={'include_self': True}) def sample_inputs__unsafe_masked_index(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def make_idx(n, m, dim, d): view_shape = [1] * dim view_shape[d] = n return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=m).view(view_shape) cases = [ ((S, S), S, M), ((S, S), M, S), ((S, S, S), S, M), ] fill_value = make_tensor([], dtype=dtype, device="cpu").item() for c in cases: self_shape, high, idx_size = c dim = len(self_shape) indices = [make_idx(idx_size, high, dim, d) for d in range(dim)] masks = [torch.logical_and(idx >= 0, idx < self_shape[i]) for i, idx in enumerate(indices) if idx is not None] mask = functools.reduce(torch.logical_and, masks) yield SampleInput(make_arg(self_shape), mask, indices, fill_value) masks = [torch.logical_and(idx >= 1, idx < self_shape[i] - 1) for i, idx in enumerate(indices) if idx is not None] mask = functools.reduce(torch.logical_and, masks) yield SampleInput(make_arg(self_shape), mask, indices, fill_value) def sample_inputs__unsafe_masked_index_put_accumulate(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def make_idx(n, m, dim, d): view_shape = [1] * dim view_shape[d] = n return make_tensor((n,), device=device, dtype=torch.int64, low=0, high=m).view(view_shape) cases = [ ((S, S), S, (M, M)), ((S, S), M, (S, S + 1)), ((S, S, S), S, (M, M - 1, M + 1)), ] for c in cases: self_shape, high, idx_sizes = c dim = len(self_shape) indices = [make_idx(idx_sizes[d], high, dim, d) for d in range(dim)] masks = [torch.logical_and(idx >= 0, idx < self_shape[i]) for i, idx in enumerate(indices) if idx is not None] mask = functools.reduce(torch.logical_and, masks) values = make_arg(idx_sizes) yield SampleInput(make_arg(self_shape), mask, indices, values) masks = [torch.logical_and(idx >= 1, idx < self_shape[i] - 1) for i, idx in enumerate(indices) if idx is not None] mask = functools.reduce(torch.logical_and, masks) yield SampleInput(make_arg(self_shape), mask, indices, values) def sample_inputs_mode(op_info, device, dtype, requires_grad, **kwargs): args = ( ((S, S, S), (),), ((S, S, S), (1, ),), ((S, S, S), (1, True, ),), ((), (),), ((), (0,),), ((), (0, True,),), # Non-fused mode kernel on CUDA ((3000,), ()), ) make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad, low=None, high=None) return (SampleInput(make_arg(input_tensor), *args) for input_tensor, args in args) # Missing to test the nondeterminism of the operation # https://github.com/pytorch/pytorch/issues/53352 def sample_inputs_put(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False) S = 3 # Generic inputs idx = torch.randperm(S * S, device=device, dtype=torch.int64)[:S] idx_list = [idx, -idx - 1] for idx, acc in product(idx_list, (True, False)): yield SampleInput(input=make_arg((S, S)), args=(idx.clone(), make_arg((S,)), acc)) # Scalar cases scalar_sizes = [(), (1,)] tgt_gen = (make_arg(size) for size in scalar_sizes) idx_gen = (make_idx(size, high=1) for size in scalar_sizes) src_gen = (make_arg(size) for size in scalar_sizes) for tgt, idx, src, acc in product(tgt_gen, idx_gen, src_gen, (True, False)): yield SampleInput(input=tgt.clone().requires_grad_(requires_grad), args=(idx.clone(), src.clone().requires_grad_(requires_grad), acc)) # Empty cases tgt_sizes = [(0,), (), (1,), (3, 2)] tgt_gen = (make_arg(size) for size in tgt_sizes) idx = make_idx((0,), high=1) src = make_arg((0,)) for tgt, acc in product(tgt_gen, (True, False)): yield SampleInput(input=tgt.clone().requires_grad_(requires_grad), args=(idx.clone(), src.clone().requires_grad_(requires_grad), acc)) def sample_inputs_take(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) make_idx = partial(make_tensor, low=0, dtype=torch.int64, device=device, requires_grad=False) S = 3 # Generic inputs: take S elements out of S * S index = make_idx((S,), high=(S * S)) for idx in (index, -index - 1): yield SampleInput(input=make_arg((S, S)), args=(idx,)) # Scalar cases scalar_sizes = [(), (1,)] src_gen = (make_arg(size) for size in scalar_sizes) idx_gen = (make_idx(size, high=1) for size in scalar_sizes) for src, idx in product(src_gen, idx_gen): yield SampleInput(input=src.clone().requires_grad_(requires_grad), args=(idx.clone(),)) # Empty cases src_sizes = [(0,), (), (1,), (3, 2)] src_gen = (make_arg(size) for size in src_sizes) idx = make_idx((0,), high=1) for src in src_gen: yield SampleInput(input=src.clone().requires_grad_(requires_grad), args=(idx.clone(),)) def sample_movedim_moveaxis(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) yield SampleInput(make_arg((4, 3, 2, 1)), [0, 1, 2, 3], [3, 2, 1, 0]) yield SampleInput(make_arg((4, 3, 2, 1)), [0, -1, -2, -3], [-3, -2, -1, -0]) def reference_movedim_moveaxis(op_info, device, dtype, requires_grad, **kwargs): yield from sample_movedim_moveaxis(op_info, device, dtype, requires_grad, **kwargs) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # shape, source, destination args = ( # empty inputs ((), (), ()), # int inputs, negative ((3, 5, 7, 2), -2, 1), # swap bounds ((3, 5, 7, 2), (-1, 0), (0, -1)), # non-sequential, negative ((2, 3, 4, 5, 6), (3, -3, 4), (1, 0, -1)), # idempotence, negative ((2, 3, 4, 5, 6), (-3, 4, 3, 1), (-3, 4, 3, 1)), # reverse, sequential, positive ((6, 2, 3, 5, 4), (4, 3, 2, 1, 0), (0, 1, 2, 3, 4)), # reverse, non-sequential ((6, 2, 3, 5, 4), (-3, -2, -4, -5, -1), (2, 1, 3, 4, 0)), # reverse, sequential, negative ((6, 2, 3, 5, 4), (4, -2, 2, -4, -5), (-5, 1, 2, -2, -1)), ) for shape, source, destination in args: yield SampleInput(make_arg(shape), args=(source, destination)) def error_movedim_moveaxis(op_info, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float32) # source length < destination length yield ErrorInput( SampleInput(make_arg(2, 3, 4, 5, 6), args=((3, -3), (1, 0, -1))), error_regex=(r"movedim: Invalid source or destination dims: source " r"\(\[3, -3\] dims\) should contain the same number of " r"dims as destination \(\[1, 0, -1\] dims\)"), ) # source length > destination length yield ErrorInput( SampleInput(make_arg(2, 3, 4, 5, 6), args=((3, -3, 4), (1, 0))), error_regex=(r"movedim: Invalid source or destination dims: source " r"\(\[3, -3, 4\] dims\) should contain the same number of " r"dims as destination \(\[1, 0\] dims\)"), ) # repeated source dim, with negative indices yield ErrorInput( SampleInput(make_arg(2, 3, 4, 5, 6), args=((0, 4, -5), (1, 0, 2))), error_regex=r"movedim: repeated dim in `source` \(\[0, 4, -5\]\)", ) # repeated destination dim, with negative indices yield ErrorInput( SampleInput(make_arg(2, 3, 4, 5, 6), args=((1, 0, 2), (0, 4, -5))), error_regex=r"movedim: repeated dim in `destination` \(\[0, 4, -5\]\)", ) # repeated dim (both), with negative indices yield ErrorInput( SampleInput(make_arg(2, 3, 4, 5, 6), args=((1, 0, -4), (0, 4, -5))), error_regex=r"movedim: repeated dim in `source` \(\[1, 0, -4\]\)", ) # out of bounds source inputs, with negative indices yield ErrorInput( SampleInput(make_arg(2, 3, 4, 5, 6), args=((0, 1, -6), (1, 4, 2))), error_regex=r"Dimension out of range \(expected to be in range of \[-5, 4\], but got -6\)", error_type=IndexError, ) # out of bounds destination inputs, with negative indices yield ErrorInput( SampleInput(make_arg(2, 3, 4, 5, 6), args=((1, 4, 2), (0, 1, -6))), error_regex=r"Dimension out of range \(expected to be in range of \[-5, 4\], but got -6\)", error_type=IndexError, ) # out of bounds source input, int yield ErrorInput( SampleInput(make_arg(2, 3, 4, 5, 6), args=(-6, 1)), error_regex=r"Dimension out of range \(expected to be in range of \[-5, 4\], but got -6\)", error_type=IndexError, ) # out of bounds destination input, int yield ErrorInput( SampleInput(make_arg(2, 3, 4, 5, 6), args=(3, -6)), error_regex=r"Dimension out of range \(expected to be in range of \[-5, 4\], but got -6\)", error_type=IndexError, ) def sample_repeat_tile(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) rep_dims = ((), (0, ), (1, ), (0, 2), (1, 1), (2, 3), (2, 3, 2), (0, 2, 3), (2, 1, 1, 1),) shapes = ((), (0,), (2,), (3, 0), (3, 2), (3, 0, 1)) if requires_grad: # Tests for variant_consistency_jit, grad, gradgrad # are slower. Use smaller bags of `rep_dims` and `shapes` # in this case. rep_dims = ((), (0, ), (0, 2), (1, 1), (2, 3), (1, 3, 2), (3, 1, 1)) # type: ignore[assignment] shapes = ((), (0,), (2,), (3, 2)) # type: ignore[assignment] is_repeat_op = op_info.name in ['repeat', '_refs.repeat'] for rep_dim, shape in product(rep_dims, shapes): # `torch.repeat` errors for `len(rep_dims) < t.dim()`, # so we filter such combinations. if is_repeat_op and len(rep_dim) < len(shape): continue yield SampleInput(make_arg(shape), rep_dim) def sample_inputs_narrow_narrow_copy(op_info, device, dtype, requires_grad, *, is_narrow, **kwargs): shapes_and_args = ( ((S, S, S), 1, 2, 2), ((S, S, S), -1, 2, 2), ((S, S, S), 1, 0, 0), ((S, S, S), -1, 0, 0), ((S, S, S), 2, 1, 2), ) for shape, dim, start, length in shapes_and_args: tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) yield SampleInput(tensor, dim, start, length) # narrow also accepts the start argument being a Tensor if is_narrow: yield SampleInput(tensor, dim, torch.tensor(start), length) def reference_inputs_narrow_narrow_copy(op_info, device, dtype, requires_grad, *, is_narrow, **kwargs): yield from sample_inputs_narrow_narrow_copy(op_info, device, dtype, requires_grad, is_narrow=is_narrow, **kwargs) shapes_and_args = ( # 1-dim ((M,), 0, 0, 0), # 0 elems from the left ((M,), -1, -1, 0), # 0 elems from the right ((M,), 0, 5, 3), # 3 elems from the left ((M,), 0, -5, 2), # 2 elems from the right ((M,), -1, 0, M), # M elems from the left ((M,), 0, -M, M), # M elems from the right # 2-dim ((M, S), 1, 0, 0), # dim 1, 0 elems from the left ((S, M), -2, -1, 0), # dim 0, 0 elems from the right ((L, S), 1, 2, 3), # dim 1, 3 elems from the left ((L, S), -1, 3, 2), # dim 1, 2 elems from the left ((M, L), 0, 0, M), # dim 0, M elems from the left ((M, L), -1, -L, L), # dim 1, L elems from the right # 3-dim ((L, M, S), 2, 0, 0), # dim 2, 0 elems from the left ((M, S, L), -1, -1, 0), # dim 2, 0 elems from the right ((S, L, M), 2, 0, M), # dim 2, M elems from the left ((L, S, M), -1, -M, M), # dim 2, M elems from the right ((S, L, M), 1, 0, 0), # dim 1, 0 elems from the left ((S, L, M), 0, 2, 1), # dim 0, 1 elem from the left ((M, S, M), -1, -5, 4), # dim 2, 4 elems from the right ) for shape, dim, start, length in shapes_and_args: tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) yield SampleInput(tensor, dim, start, length) # narrow also accepts the start argument being a Tensor if is_narrow: yield SampleInput(tensor, dim, torch.tensor(start), length) def error_inputs_narrow_narrow_copy(op_info, device, *, is_narrow, is_ref): make_arg = partial(make_tensor, device=device, dtype=torch.float32) # 0-dim yield ErrorInput(SampleInput(make_arg(()), 0, 0, 1), error_type=RuntimeError, error_regex=r"narrow\(\) cannot be applied to a 0-dim tensor\.") # out of bounds dim if not is_narrow and not is_ref and torch.device(device).type == 'cpu': # narrow_copy_dense_cpu_out yield ErrorInput(SampleInput(make_arg((M, S, L)), 3, 0, 0), error_type=RuntimeError, error_regex=r"Expected dim < static_cast<int64_t>\(self_sizes.size\(\)\) to be true, but got false\.") else: yield ErrorInput(SampleInput(make_arg((M, S, L)), 3, 0, 0), error_type=IndexError, error_regex=r"Dimension out of range \(expected to be in range of \[-3, 2\], but got 3\)") # out of bounds dim (negative) yield ErrorInput(SampleInput(make_arg((L, S, M)), -4, 0, 0), error_type=IndexError, error_regex=r"Dimension out of range \(expected to be in range of \[-3, 2\], but got -4\)") # out of bounds start yield ErrorInput(SampleInput(make_arg((L, M, S)), 1, M + 1, 0), error_type=IndexError, error_regex=r"start out of range \(expected to be in range of \[-10, 10\], but got 11\)") # out of bounds start (negative) yield ErrorInput(SampleInput(make_arg((L, M, S)), 1, -M - 1, 0), error_type=IndexError, error_regex=r"start out of range \(expected to be in range of \[-10, 10\], but got -11\)") # out of bounds length yield ErrorInput(SampleInput(make_arg((S, L, M)), 2, 0, M + 1), error_type=RuntimeError, error_regex=r"start \(0\) \+ length \(11\) exceeds dimension size \(10\)\.") # out of bounds length (negative) if not is_narrow and not is_ref and torch.device(device).type == 'cpu': # narrow_copy_dense_cpu_out yield ErrorInput(SampleInput(make_arg((M,)), 0, 0, -1), error_type=RuntimeError, error_regex=r"start \(0\) \+ length \(-1\) exceeds dimension size \(10\)\.") else: yield ErrorInput(SampleInput(make_arg((M,)), 0, 0, -1), error_type=RuntimeError, error_regex=r"narrow\(\): length must be non-negative\.") # Test Tensor overload that was added for XLA. Start must be an 0-dim # integral Tensor. narrow_copy doesn't have this overload. # https://github.com/pytorch/pytorch/issues/31558 if is_narrow: # *1-dim* integral Tensor yield ErrorInput(SampleInput(make_arg((L, M, S)), 1, make_arg(S, dtype=torch.int), 2), error_type=RuntimeError, error_regex=r"start must be an 0-dim integral Tensor\.") # 0-dim *bool* Tensor (bools are not allowed) yield ErrorInput(SampleInput(make_arg((L, M, S)), -3, make_arg((), dtype=torch.bool), 3), error_type=RuntimeError, error_regex=r"start must be an 0-dim integral Tensor\.") def sample_trapezoid(op_info, device, dtype, requires_grad, **kwargs): y_shape_x_shape_and_kwargs = [ ((2, 3), (2, 3), {}), ((2, 3), (2, 3), {'dim': 1}), ((6,), (6,), {}), ((6,), None, {}), # When 'trapezoid' is called with an empty input, it does not produce an output with requires_grad # See Issue #{61619} # ((6,0), (6,0), {}), ((2, 3), (1, 3), {}), ((3, 3), (3, 3), {}), ((3, 3), (3, 3), {'dim': -2}), ((5,), None, {'dx': 2.0}), ((2, 2), None, {'dx': 3.0}) ] make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs: y_tensor = make_arg(y_shape) if x_shape is not None: x_tensor = make_arg(x_shape) yield SampleInput(y_tensor, x_tensor, **kwarg) else: yield SampleInput(y_tensor, **kwarg) def sample_cumulative_trapezoid(op_info, device, dtype, requires_grad, **kwargs): y_shape_x_shape_and_kwargs = [ ((2, 3), (2, 3), {}), ((2, 3), (2, 3), {'dim': 1}), ((6,), (6,), {}), ((6,), None, {}), # When 'cumulative_trapezoid' is called with an empty input, it does not produce an output with requires_grad # See Issue #{61619} # ((6,0), (6,0), {}), ((2, 3), (1, 3), {}), ((3, 3), (3, 3), {}), ((3, 3), (3, 3), {'dim': -2}), ((5,), None, {'dx': 2.0}), ((2, 2), None, {'dx': 3.0}) ] make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) for y_shape, x_shape, kwarg in y_shape_x_shape_and_kwargs: y_tensor = make_arg(y_shape) if x_shape is not None: x_tensor = make_arg(x_shape) yield SampleInput(y_tensor, x_tensor, **kwarg) else: yield SampleInput(y_tensor, **kwarg) def sample_unsqueeze(op_info, device, dtype, requires_grad, **kwargs): shapes_and_axes = [ ((3, 4, 5), 0), ((3, 4, 5), 1), ((3, 4, 5), 3), ((3, 4, 5), -1), ((3, 4, 5), -3), ((), 0), ((), -1), ((1,), 0), ((1,), -1), ] for shape, axis in shapes_and_axes: tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) yield SampleInput(tensor, axis) def sample_inputs_nn_unfold(op_info, device, dtype, requires_grad, **kwargs): shapes = ((0, 1, 5, 5), (2, 3, 5, 5)) kernel_sizes = (2, (2, 2), (2, 3)) dilations = (1, 2, (1, 2)) paddings = (0, 1, (1, 2)) strides = (1, 2, (1, 2)) cases = product(shapes, kernel_sizes, dilations, paddings, strides) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) for shape, kernel_size, dilation, padding, stride in cases: tensor = make_arg(shape) yield SampleInput(tensor, kernel_size, dilation, padding, stride) # With default args yield SampleInput(make_arg((1, 1, 5, 5)), (3, 3)) def sample_inputs_squeeze(op_info, device, dtype, requires_grad, **kwargs): shapes_and_args = ( ((S, 1, S, 1), ()), ((1, 1, 1, 1), ()), ((1, 1, 1, 1), (0,)), ((S, 1, S, 1), (1,)), ((S, 1, S, 1), (-1,)), ((S, 1, S, 1), (2,)), ((S, 1, S, 1), (-2,)), ((), (0, )), ) for shape, args in shapes_and_args: tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) yield SampleInput(tensor, args=args) def sample_inputs_squeeze_multiple(op_info, device, dtype, requires_grad, **kwargs): shapes_and_args = ( ((1, 1, 1, 1), ()), ((S, 1, S, 1), (1,)), ((S, 1, S, 1), (-1,)), ((S, 1, S, 1), (1, 3)), ((S, 1, S, 1), (1, 2,)), ((), (0,)), ) for shape, dims in shapes_and_args: tensor = make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) yield SampleInput(tensor, dims) def _squeeze_ref(x, axis=None): # NumPy doesn't allow squeezing scalars if x.ndim == 0: return x if isinstance(axis, Sequence): # Numpy doesn't allow specifying non-singular dimensions axis = tuple(a for a in axis if x.shape[a] == 1) if isinstance(axis, int) and x.shape[axis] != 1: return x return np.squeeze(x, axis) def sample_inputs_nn_pad(op_info, device, dtype, requires_grad, mode, **kwargs): assert mode in ('constant', 'reflect', 'replicate', 'circular') if mode in ['reflect', 'replicate']: cases: tuple = ( # ignore ((1, 3), (1, 2)), ((1, 3), (0, 1)), ((0, 3, 3), (1, 2)), ((0, 3, 3), (0, 1)), ((1, 3, 3), (1, 2)), ((1, 3, 3), (0, 1)), ((1, 3, 3), (0, 2, 0, 1)), ((0, 3, 3, 3), (0, 2, 0, 1)), ((3, 3, 5, 5), (0, 2, 0, 1)), ((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)), ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), ((1, 3, 4, 4), (-1, 1, -2, 1)), ) elif mode == 'constant': cases = ( ((1, 3), (1, 2)), ((1, 3), (0, 1)), ((1, 3), (0, 2, 0, 1)), ((0, 3, 3), (1, 2)), ((0, 3, 3), (0, 1)), ((0, 3, 3), (0, 2, 0, 1)), ((0, 3, 3), (1, 1, 1, 1, 1, 1)), ((1, 3, 3), (1, 2)), ((1, 3, 3), (0, 1)), ((1, 3, 3), (0, 2, 0, 1)), ((1, 3, 3), (1, 1, 1, 1, 1, 1)), ((0, 3, 3, 3), (1, 2)), ((0, 3, 3, 3), (0, 1)), ((0, 3, 3, 3), (0, 2, 0, 1)), ((0, 3, 3, 3), (1, 1, 1, 1, 1, 1)), ((3, 3, 5, 5), (1, 2)), ((3, 3, 5, 5), (0, 1)), ((3, 3, 5, 5), (0, 2, 0, 1)), ((3, 3, 5, 5), (1, 1, 1, 1, 1, 1)), ((1, 3, 3, 3, 3), (1, 2)), ((1, 3, 3, 3, 3), (0, 1)), ((1, 3, 3, 3, 3), (0, 2, 0, 1)), ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), ((1, 3, 4, 4), (-1, 1, -2, 1)), ) else: # mode == 'circular' if dtype == torch.bool: # test_dtypes fails on ASAN with for the case ab # runtime error: load of value 190, which is not a valid value for type 'bool' # Reference: https://github.com/pytorch/pytorch/pull/62814#issuecomment-894156562 # Reference Issue: https://github.com/pytorch/pytorch/issues/63034 cases = ( ((2, 3, 3), (1, 2)), ((1, 3, 3), (1, 2)), ) else: cases = ( ((0, 3, 3), (1, 2)), ((0, 3, 3), (0, 1)), ((1, 3, 3), (1, 2)), ((1, 3, 3), (0, 1)), ((0, 3, 3, 3), (0, 2, 0, 1)), ((3, 3, 5, 5), (0, 2, 0, 1)), ((1, 3, 3, 3, 3), (1, 1, 1, 1, 1, 1)), ((1, 3, 4, 4), (-1, 1, -2, 1)), ) make_inp = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) if mode == 'constant': # Default args yield SampleInput(make_inp((1, 3, 3)), args=((2, 2),)) if mode in ['reflect', 'replicate', 'circular']: for shape, pad in cases: yield SampleInput(make_inp(shape), args=(pad, mode)) else: # mode == 'constant' for pad_value in (1., 2.): for shape, pad in cases: yield SampleInput(make_inp(shape), args=(pad, mode, pad_value)) def sample_inputs_nn_pad_replicate_negative(op_info, device, dtype, requires_grad, **kwargs): cases: tuple = ( ((5, 3, 4, 4), (-4, 5, 0, 0)), ((6, 2, 4, 4), (0, 0, 2, -4)), ((5, 6, 4, 4), (5, -4, -4, 3)), ((4, 2, 5, 5), (-2, -1, 4, 6)), ((2, 6, 5, 5), (8, -1, -1, -3)), ((8, 1, 5, 5), (-2, -1, -1, -3)), ) make_inp = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) for shape, pad in cases: yield SampleInput(make_inp(shape), args=(pad, 'replicate')) def sample_inputs_constant_pad_nd(op_info, device, dtype, *args, **kwargs): # Inherit sample inputs from nn.pad, but transform them to fit # constant_pad_nd's interface nn_samples = sample_inputs_nn_pad(op_info, device, dtype, *args, mode='constant', **kwargs) # NOTE: primTorch is more strict about the type of the fill value argument # So we must cast it to the correct dtype from torch._prims_common import dtype_to_type scalar_type = dtype_to_type(dtype) def drop_mode_argument(input, pad, mode=None, value=None): if value is None: return SampleInput(input, args=(pad,)) else: return SampleInput(input, args=(pad, scalar_type(value))) for sample in nn_samples: yield drop_mode_argument(sample.input, *sample.args, **sample.kwargs) def sample_inputs_repeat_interleave(op_info, device, dtype, requires_grad, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_input(()), repeats=2) yield SampleInput(make_input((2, 3, 4)), repeats=2) yield SampleInput(make_input((2, 3, 4)), repeats=2, dim=1) yield SampleInput(make_input((2, 3, 4)), repeats=torch.arange(3, device=device), dim=1) def sample_inputs_stft(op_info, device, dtype, requires_grad, **kwargs): def mt(shape, **kwargs): return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad, **kwargs) yield SampleInput(mt(100), n_fft=10, return_complex=True) yield SampleInput(mt(100), n_fft=10, return_complex=False) if dtype.is_complex: yield SampleInput(mt(100), n_fft=10) for center in [False, True]: yield SampleInput(mt(10), n_fft=7, center=center, return_complex=True) yield SampleInput(mt((10, 100)), n_fft=16, hop_length=4, center=center, return_complex=True) window = mt(16, low=.5, high=2.0) yield SampleInput( mt((2, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center)) yield SampleInput( mt((3, 100)), kwargs=dict(n_fft=16, window=window, return_complex=True, center=center)) if not dtype.is_complex: yield SampleInput( mt((10, 100)), n_fft=16, window=window, onesided=False, return_complex=True) def sample_inputs_istft(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def mt(shape, **kwargs): real_shape = shape if dtype.is_complex else shape + (2,) return make_arg(real_shape, **kwargs) yield SampleInput(mt((10, 2)), kwargs=dict(n_fft=10)) yield SampleInput(mt((6, 3)), kwargs=dict(n_fft=6, onesided=False)) yield SampleInput(mt((6, 4)), kwargs=dict(n_fft=10, onesided=True)) for center in [False, True]: yield SampleInput(mt((10, 10, 6)), kwargs=dict(n_fft=10, center=center)) yield SampleInput(mt((1, 9, 10)), kwargs=dict(n_fft=16, hop_length=4, center=center)) window = make_arg(10, low=.5, high=2.0) yield SampleInput(mt((10, 10, 6)), kwargs=dict( n_fft=10, window=window, center=center, return_complex=dtype.is_complex)) yield SampleInput(mt((10, 10, 10)), kwargs=dict( n_fft=10, window=window[:8], win_length=8, center=center, return_complex=True)) real_window = window if not dtype.is_complex else window.real yield SampleInput(mt((10, 5, 6)), kwargs=dict(n_fft=8, window=real_window[:8], center=center)) def sample_inputs_ormqr(op_info, device, dtype, requires_grad, **kwargs): # create a helper function wrapping `make_tensor` make_input = partial(make_tensor, dtype=dtype, device=device, low=-1, high=1) batches = [(), (0, ), (2, ), (2, 1)] ns = [5, 2, 0] tf = [True, False] for batch, (m, n), left, transpose in product(batches, product(ns, ns), tf, tf): input = make_input((*batch, m, n)) reflectors, tau = torch.geqrf(input) reflectors.requires_grad_(requires_grad) tau.requires_grad_(requires_grad) other_matrix_shape = (m, n) if left else (n, m) other = make_input((*batch, *other_matrix_shape), requires_grad=requires_grad) yield SampleInput(reflectors, tau, other, left=left, transpose=transpose) def sample_inputs_cholesky_solve(op_info, device, dtype, requires_grad=False, **kwargs): cholesky_inverse_samples = sample_inputs_linalg_cholesky_inverse( op_info, device, dtype, requires_grad=False ) for sample in cholesky_inverse_samples: psd_matrix = sample.input sample.input = make_tensor(psd_matrix.shape, dtype=dtype, device=device, requires_grad=requires_grad, low=None, high=None) sample.args = (psd_matrix.requires_grad_(requires_grad),) yield sample def sample_inputs_lu(op_info, device, dtype, requires_grad=False, **kwargs): make_arg = partial(make_fullrank_matrices_with_distinct_singular_values, dtype=dtype, device=device, requires_grad=requires_grad) # not needed once OpInfo tests support Iterables batch_shapes = ((), (3,), (3, 3)) for batch_shape, get_infos, size_delta in product(batch_shapes, (True, False), (-2, -1, 0, +1, +2)): shape = batch_shape + (S + size_delta, S) input = make_arg(*shape) yield SampleInput(input, args=(True, get_infos)) def sample_inputs_lu_unpack(op_info, device, dtype, requires_grad=False, **kwargs): def out_fn(output): return output[1], output[2] for lu_sample in sample_inputs_linalg_lu(op_info, device, dtype, requires_grad, **kwargs): lu_data, pivots = torch.linalg.lu_factor(lu_sample.input) lu_data.requires_grad_(requires_grad) yield SampleInput(lu_data, pivots).with_metadata(output_process_fn_grad=out_fn) def sample_inputs_roll(op_info, device, dtype, requires_grad=False, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) args = ((0, 0), (1, 2), (0, 2), (2, 0), (-1, 0), (10000, 1), (2,), ((1, 2, -1), (0, 1, 2))) for arg in args: yield SampleInput(make_arg((0, 0, 0)), args=arg) yield SampleInput(make_arg((S, S, S)), args=arg) # Scalar tensor yield SampleInput(make_arg(()), args=(10, )) def error_inputs_roll(op_info, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float32) err_msg1 = "`shifts` required" s1 = SampleInput(make_arg((S,)), ()) yield ErrorInput(s1, error_regex=err_msg1) err_msg2 = ("shifts and dimensions must align") s2 = SampleInput(make_arg((S, S)), (2, 1), 0) yield ErrorInput(s2, error_regex=err_msg2) err_msg3 = ("out of range") s3 = SampleInput(make_arg((S, )), 0, 2) yield ErrorInput(s3, error_regex=err_msg3, error_type=IndexError) err_msg4 = ("Dimension specified as 0") s4 = SampleInput(make_arg(()), 0, 0) yield ErrorInput(s4, error_regex=err_msg4, error_type=IndexError) def sample_inputs_rot90(op_info, device, dtype, requires_grad=False, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) args = itertools.product(range(-5, 6), [(0, 1), (1, 2), (1, -1)]) yield SampleInput(make_arg((S, S, S))) for arg in args: yield SampleInput(make_arg((S, S, S)), args=arg) def error_inputs_rot90(op_info, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float32) err_msg1 = "expected total rotation dims" s1 = SampleInput(make_arg((S, S)), dims=(0,)) yield ErrorInput(s1, error_regex=err_msg1) err_msg2 = "expected total dims >= 2" s2 = SampleInput(make_arg((S,))) yield ErrorInput(s2, error_regex=err_msg2) err_msg3 = "expected rotation dims to be different" s3 = SampleInput(make_arg((S, S)), dims=(1, 1)) yield ErrorInput(s3, error_regex=err_msg3) def sample_inputs_std_var(op_info, device, dtype, requires_grad, **kwargs): tensor_nd = partial(make_tensor, (S, S, S), device=device, dtype=dtype, requires_grad=requires_grad) tensor_1d = partial(make_tensor, (S,), device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(tensor_nd()) yield SampleInput(tensor_nd(), dim=1) yield SampleInput(tensor_nd(), dim=1, unbiased=True, keepdim=True) yield SampleInput(tensor_1d(), dim=0, unbiased=True, keepdim=True) yield SampleInput(tensor_1d(), dim=0, unbiased=False, keepdim=False) yield SampleInput(tensor_nd(), dim=(1,), correction=1.3) yield SampleInput(tensor_nd(), dim=(1,), correction=S // 2) yield SampleInput(tensor_nd(), dim=None, correction=0, keepdim=True) yield SampleInput(tensor_nd(), dim=None, correction=None) yield SampleInput(tensor_nd(), correction=0, keepdim=True) yield SampleInput(make_tensor(3, 4, 5, device=device, dtype=dtype, requires_grad=requires_grad), dim=-3) def sample_inputs_std_var_unbiased(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Test var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) yield SampleInput(make_arg((S, S)), True) yield SampleInput(make_arg((S,)), False) def _generate_correlation_inputs(device, dtype, requires_grad, **kwargs): shapes = [(2,), (1, 2), (3, 2), (2, 3)] for shape in shapes: yield make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad) def sample_inputs_corrcoef(op_info, device, dtype, requires_grad, **kwargs): return (SampleInput(t) for t in _generate_correlation_inputs(device, dtype, requires_grad)) def sample_inputs_cov(op_info, device, dtype, requires_grad, **kwargs): for t in _generate_correlation_inputs(device, dtype, requires_grad): yield SampleInput(t) num_observations = t.numel() if t.ndimension() < 2 else t.size(1) fweights = make_tensor((num_observations,), dtype=torch.int, device=device, low=1, high=10) aweights = make_tensor((num_observations,), dtype=torch.float, device=device, low=0, high=1, requires_grad=requires_grad) for correction, fw, aw in product(range(num_observations), [None, fweights], [None, aweights]): yield SampleInput(t.clone().requires_grad_(requires_grad), correction=correction, fweights=fw, aweights=aw) def error_inputs_cov(op_info, device, **kwargs): a = torch.rand(S, device=device) yield ErrorInput( SampleInput(torch.rand(S, S, S, device=device)), error_regex="expected input to have two or fewer dimensions") yield ErrorInput( SampleInput(a, fweights=torch.rand(S, S, device=device)), error_regex="expected fweights to have one or fewer dimensions") yield ErrorInput( SampleInput(a, aweights=torch.rand(S, S, device=device)), error_regex="expected aweights to have one or fewer dimensions") yield ErrorInput( SampleInput(a, fweights=torch.rand(S, device=device)), error_regex="expected fweights to have integral dtype") yield ErrorInput( SampleInput(a, aweights=torch.tensor([1, 1], device=device)), error_regex="expected aweights to have floating point dtype") yield ErrorInput( SampleInput(a, fweights=torch.tensor([1], device=device)), error_regex="expected fweights to have the same numel") yield ErrorInput( SampleInput(a, aweights=torch.rand(1, device=device)), error_regex="expected aweights to have the same numel") yield ErrorInput( SampleInput(a, fweights=torch.tensor([-1, -2, -3, -4 , -5], device=device)), error_regex="fweights cannot be negative") yield ErrorInput( SampleInput(a, aweights=torch.tensor([-1., -2., -3., -4., -5.], device=device)), error_regex="aweights cannot be negative") def sample_inputs_permute(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = [((1, 2, 3, 4), (0, 2, 3, 1)), ((1, 2, 3, 4), (0, -2, -1, 1)), ((), ()), ((1, 2, 3, 4), (2, 1, 3, 0))] for shape, args in cases: yield SampleInput(make_arg(shape), args=(args,)) def reference_inputs_permute(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_permute(op, device, dtype, requires_grad, **kwargs) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = ( ((), ()), ((1,), (0,)), ((2, 2), (1, 0)), ((2, 2), (0, 1)), ((2, 0, 1), (0, 2, 1)), ((3, 4, 2), (2, 1, 0)), ((3, 4, 2), (1, 0, 2)), ((3, 4, 2), (0, 1, 2)), ) # Adds tricky permutations and permutations with noncontiguity for shape, permutation in cases: for p in itertools.permutations(permutation): a = make_arg(shape).permute(p) yield SampleInput(a, args=(permutation,)) a = make_arg(shape, noncontiguous=True).permute(p) yield SampleInput(a, args=(permutation,)) def error_inputs_softshrink(op, device, **kwargs): yield ErrorInput(SampleInput(make_tensor((1,), dtype=torch.float, device=device), kwargs={"lambd": -0.5}), error_regex="lambda must be greater or equal to 0, but found to be -0.5") def sample_inputs_softshrink(op_info, device, dtype, requires_grad=False, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # The additional sample is to check additional values of lambd beyond the default # value (what is already checked by sample_inputs_elementwise_unary) for lbda in (0., 0.5): yield SampleInput(make_arg(S, S), kwargs={"lambd": lbda}) yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad) def sample_inputs_hardshrink(op_info, device, dtype, requires_grad=False, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # The additional sample is to check additional values of lambd beyond the default # value (what is already checked by sample_inputs_elementwise_unary) # Note that unlike softshrink, lambd is allowed to be negative for hardshrink for lbda in (-0.5, 0., 0.5): yield SampleInput(make_arg(S, S), kwargs={"lambd": lbda}) yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad) def sample_inputs_hardtanh(op_info, device, dtype, requires_grad=False, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # The additional sample is to check additional values of min_val and max_val beyond the default # value (what is already checked by sample_inputs_elementwise_unary) for max_val, min_val in ((0.5, -0.5), (0., 0.)): yield SampleInput(make_arg(S, S), kwargs={"min_val": min_val, "max_val": max_val}) yield from sample_inputs_elementwise_unary(op_info, device, dtype, requires_grad) def error_inputs_hardtanh(op_info, device, **kwargs): # Tests that hardtanh errors out when passed min_val > max_val. yield ErrorInput(SampleInput(make_tensor((1,), dtype=torch.float, device=device), kwargs={"min_val": 0.5, "max_val": -0.5}), error_type=ValueError, error_regex="min_val cannot be greater than max_val") def sample_inputs_einsum(op_info, device, dtype, requires_grad=False, **kwargs): def c(t): return t.clone().requires_grad_(requires_grad) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) x = make_arg((3,)) y = make_arg((4,)) A = make_arg((2, 3,)) B = make_arg((1, 3,)) C = make_arg((1, 2, 3,)) D = make_arg((1, 3, 4,)) E = make_arg((4, 4,)) H = make_arg((3, 3,)) I = make_arg((1, 3, 1,)) # Vector operations yield SampleInput([c(x)], 'i->') # sum yield SampleInput([c(x), c(y)], 'i,j->ij') # outer # Matrix operations yield SampleInput([c(A)], "ij->i") # col sum yield SampleInput([c(A), c(B)], "ij,kj->ik") # matmul yield SampleInput([c(A), c(E)], "ij,Ab->ijAb") # matrix outer product # Tensor operations yield SampleInput([c(C), c(D)], "aij,ajk->aik") # batch matmul yield SampleInput([c(D), c(E)], "aij,jk->aik") # tensor matrix contraction yield SampleInput([c(C), c(B)], "ijk,ik->j") # non contiguous # Test diagonals yield SampleInput([c(I)], 'iji->j') # non-contiguous trace # Test ellipsis yield SampleInput([c(H)], "i...->...") yield SampleInput([c(C), c(x)], '...ik, ...j -> ij') def sample_inputs_flip(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((S, M, S), (S, 0, M)) all_dims = ((0, 1, 2), (0,), (0, 2), (-1,), ()) for size, dims in product(sizes, all_dims): yield SampleInput(make_arg(size), kwargs={"dims": dims}) def sample_inputs_fliplr_flipud(op_info, device, dtype, requires_grad, **kwargs): shapes = [ (S, M, S), (S, 0, M), ] make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) return (SampleInput(make_arg(shape, low=None, high=None)) for shape in shapes) def error_inputs_fliplr(op, device, **kwargs): yield ErrorInput(SampleInput(make_tensor((1,), dtype=torch.float, device=device)), error_regex="Input must be >= 2-d.") def error_inputs_flipud(op, device, **kwargs): yield ErrorInput(SampleInput(make_tensor((), dtype=torch.float, device=device)), error_regex="Input must be >= 1-d.") def sample_inputs_clamp(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) make_integral_arg = partial(make_tensor, dtype=torch.int32, device=device, low=None, high=None, requires_grad=False) shape = (S, M, S) yield SampleInput(make_arg(shape), args=(make_arg(shape), make_arg(shape))) yield SampleInput(make_arg(shape), args=(make_arg(shape[1:]), make_arg(shape[1:]))) yield SampleInput(make_arg(shape), args=(make_arg((S, 1, S)),)) yield SampleInput(make_arg(shape), args=(None, make_arg(shape))) yield SampleInput(make_arg(shape), args=(make_arg(shape), None)) # test type promotion yield SampleInput(make_arg(shape), args=(make_integral_arg(shape), None)) yield SampleInput(make_arg(shape), args=(make_arg(shape), make_integral_arg(shape))) def reference_inputs_elementwise_ternary(op, device, dtype, requires_grad, *, sample_inputs_func, supports_scalars=False, **kwargs): yield from sample_inputs_func(op, device, dtype, requires_grad, **kwargs) make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_scalar_tensor = partial(make_tensor, (), device='cpu', dtype=dtype, requires_grad=requires_grad) supported_dtypes = op.supported_dtypes(device) # broadcasting and oncontiguous cases cases = ( ((4, 4), (4, 4), (4, 4)), ((4, 4), (1, 4, 4), (4, 4)), ((4, 4), (1, 4, 4), (4, 1, 4)), ((4, 4, 1), (1, 4, 4), (4, 4)), ((4, 1), (1, 4, 4), (1, 4)), ((4, 4), (), (4, 4)), ((4, 4), (), ()), ((), (4, 4), (1, 4, 4)), ) for a, b, c in cases: yield SampleInput(make_arg(a), args=(make_arg(b), make_arg(c))) yield SampleInput(make_arg(a, noncontiguous=True), args=(make_arg(b).transpose(0, -1), make_arg(c, noncontiguous=True).transpose(0, -1))) # scalar cases if supports_scalars: cases = [ ((), 1, 2,), ((), 1., 2), ((4, 4), 1., 2,), ((3, 4), make_scalar_tensor(), make_scalar_tensor()), ] if torch.complex64 in supported_dtypes: cases.extend([ ((3, 1, 4), complex(1, 2), 3.), ]) for a, b, c in cases: yield SampleInput(make_arg(a), args=(b, c)) # type promotion cases # int x float if torch.float in supported_dtypes and torch.long in supported_dtypes: a = make_arg((), dtype=torch.long) b = make_arg((1, 4), dtype=torch.float) c = make_arg((3, 4)) cases = ( (a, b, c), (c, a, b), ) for a, b, c in cases: yield SampleInput(a, args=(b, c)) # NaN propagation if dtype.is_floating_point or dtype.is_complex: nan = float('nan') if dtype.is_floating_point else complex(float('nan'), float('nan')) a = make_arg((12,)) a[4] = nan a[7] = nan b = make_arg((12,)) b[1] = nan b[7] = nan c = make_arg((12,)) c[9] = nan yield SampleInput(a, args=(b, c)) def _clamp_min_numpy(a, min=None): return np.maximum(a, min) def _clamp_max_numpy(a, max=None): return np.minimum(a, max) def _clamp_numpy(a, min=None, max=None): if min is None: return np.minimum(a, max) if max is None: return np.maximum(a, min) return np.minimum(max, np.maximum(a, min)) def sample_inputs_cumprod(op_info, device, dtype, requires_grad, **kwargs): def make_arg(shape): # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck return make_tensor(shape, dtype=dtype, device=device, low=-1, high=+1, requires_grad=requires_grad) def prod_zeros(dim_select): assert len(dim_select) == 2 result = make_arg(3 * (S,)) result.narrow(dim_select[0], 0, 1).narrow(dim_select[1], 1, 1).zero_() result.narrow(dim_select[0], 2, 1).narrow(dim_select[1], 3, 1).zero_() result.narrow(dim_select[0], 4, 1).narrow(dim_select[1], 3, 1).zero_() return result for dim in range(3): yield SampleInput(make_arg((S, S, S)), args=(dim,)) # Scalar tensors and empty tensor for size in [(), (1,), (0,)]: yield SampleInput(make_arg(size), args=(0,)) yield SampleInput(prod_zeros([0, 1]), args=(1,)) yield SampleInput(prod_zeros([0, 2]), args=(1,)) yield SampleInput(prod_zeros([1, 2]), args=(1,)) # test dtype kwarg yield SampleInput(prod_zeros([1, 2]), args=(1,), kwargs={'dtype': dtype}) def sample_inputs_view_as_complex(op_info, device, dtype, requires_grad, **kwargs): yield SampleInput(make_tensor((S, 2), dtype=dtype, device=device, requires_grad=requires_grad)) def sample_inputs_view_as_real(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) sizes = ((S, S), ()) return (SampleInput(make_arg(size)) for size in sizes) def error_inputs_complex(op_info, device, is_ref=False, **kwargs): make_arg = partial(make_tensor, dtype=torch.float32, device=device) if is_ref: error_float = "Expected both inputs to be Half, Float or Double tensors but got torch.float32 and torch.int32" error_dtype = "Expected object of scalar type torch.float32 but got scalar type torch.float64 for second argument" error_out = "Expected out tensor to have dtype torch.complex128 but got torch.complex64 instead" else: error_float = "Expected both inputs to be Half, Float or Double tensors but got Float and Int" error_dtype = "Expected object of scalar type Float but got scalar type Double for second argument" error_out = "Expected object of scalar type ComplexDouble but got scalar type ComplexFloat for argument 'out'" yield ErrorInput(SampleInput(make_arg(M, S), make_arg(M, S, dtype=torch.int)), error_type=RuntimeError, error_regex=error_float) yield ErrorInput(SampleInput(make_arg(M, S), make_arg(M, S, dtype=torch.float64)), error_type=RuntimeError, error_regex=error_dtype) yield ErrorInput(SampleInput(make_arg(M, S, dtype=torch.float64), make_arg(M, S, dtype=torch.float64), out=make_arg(M, S, dtype=torch.complex64)), error_type=RuntimeError, error_regex=error_out) def sample_inputs_logaddexp(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) shape = (S, S) yield SampleInput(make_arg(shape), make_arg(shape)) def sample_inputs_prod(op_info, device, dtype, requires_grad, **kwargs): def make_arg(shape): # shrink values to be in the interval [-1, +1] for better precision in gradgradcheck return make_tensor(shape, dtype=dtype, device=device, low=-1, high=+1, requires_grad=requires_grad) def prod_single_zero(): result = make_arg(2 * (S,)) result[0, 1] = 0 return result for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad): # only Tensor, ignore other inputs yield SampleInput(sample.input.clone().requires_grad_(requires_grad)) yield sample # Generates samples with keepdim = True for sample in sample_inputs_cumprod(op_info, device, dtype, requires_grad): sample.kwargs['keepdim'] = True yield sample yield SampleInput(prod_single_zero()) yield SampleInput(make_arg((3, 3, 3)), args=(1,)) yield SampleInput(make_arg((3, 3, 3)), args=(1,), kwargs={'keepdim': True}) yield SampleInput(make_arg((3, 0)), args=(1,)) yield SampleInput(make_arg((3, 0)), args=(1,), kwargs={'keepdim': True}) yield SampleInput(torch.tensor([2., 3, 0, 0], dtype=dtype, device=device, requires_grad=requires_grad)) # test zero scalar tensor zero = make_arg(()) zero.zero_() yield SampleInput(zero.clone().requires_grad_(requires_grad)) yield SampleInput(zero.clone().requires_grad_(requires_grad), args=(0,)) yield SampleInput(zero.clone().requires_grad_(requires_grad), args=(0,), kwargs={'keepdim': True}) def error_inputs_neg(op_info, device, **kwargs): si = SampleInput(torch.tensor((False, True), device=device)) msg = ("Negation, the `\\-` operator, on a bool tensor is not supported." " If you are trying to invert a mask, use the `\\~` or" " `logical_not\\(\\)` operator instead.") yield ErrorInput(si, error_regex=msg) def sample_inputs_diag(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) yield SampleInput(make_arg(M)) tensors = ( make_arg((M, M)), make_arg((3, 5)), make_arg((5, 3)), ) args = ((), (2,), (-2,), (1,), (2,)) for tensor, arg in product(tensors, args): yield SampleInput(tensor.clone().requires_grad_(requires_grad), *arg) def reference_inputs_diagonal_diag_embed(op_info, device, dtype, requires_grad, **kwargs): yield from sample_inputs_diagonal_diag_embed( op_info, device, dtype, requires_grad, **kwargs) make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) shapes1d = ((0,), (1,)) shapes2d = ((L, M),) shapes3d = ((L, M, S),) kwargs1d = {} kwargs2d = ( # dim1 > dim2 is allowed dict(dim1=1, dim2=0), # negative dims are allowed dict(dim1=-2, dim2=-1), # one dim negative and the other nonnegative is allowed dict(dim1=-1, dim2=0), # out of bounds offset should return an empty tensor in diagonal and # offset the diagonal in diag_embed dict(offset=100), ) kwargs3d = kwargs2d + ( # make sure we can use non-sequential dims dict(offset=-1, dim1=0, dim2=2), ) samples1d = product(shapes1d, kwargs1d) samples2d = product(shapes2d, kwargs2d) samples3d = product(shapes3d, kwargs3d) for shape, kwargs in chain(samples1d, samples2d, samples3d): if 'diagonal' in op_info.name: # these are error inputs for diagonal if shape in ((0,), (1,)): continue yield SampleInput(input=make_arg(shape), kwargs=kwargs) def sample_inputs_diagonal_scatter(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) # Shapes for 2D Tensors shapes_2d = ((M, M), (3, 5), (5, 3)) # Shapes for 3D Tensors shapes_3d = ((M, M, M),) args_2d = ((), (2,), (-2,), (1,)) args_3d = ((1, 1, 2), (2, 0, 1), (-2, 0, 1)) for input_shape, arg in chain(product(shapes_2d, args_2d), product(shapes_3d, args_3d)): input_ = make_arg(input_shape) # We can programmatically figure out the right shape for src: # It should be the same size as input.diagonal(other_args...) if not isinstance(arg, tuple): arg_tuple = (arg,) else: arg_tuple = arg src_shape = input_.diagonal(*arg_tuple).size() src = make_arg(src_shape) yield SampleInput(input_, args=(src, *arg_tuple)) def sample_inputs_to_sparse(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg((S, S))).with_metadata(output_process_fn_grad=lambda x: x.to_dense()) yield SampleInput(make_arg((S, S)), 1).with_metadata(output_process_fn_grad=lambda x: x.to_dense()) def sample_inputs_cross_entropy(op_info, device, dtype, requires_grad, **kwargs): batch_size, num_classes = shape = (2, 3) reductions = ("mean", "sum", "none") input_shape_and_kwargs: list[tuple[tuple[int, ...], dict[str, Any]]] = [ (shape, {}), ((*shape, 1), {}), ((*shape, 1, 2), {}), ((*shape, 1, 2, 3), {}), *[(shape, dict(reduction=reduction)) for reduction in reductions], *[ ( shape, dict( weight=make_tensor((num_classes,), device=device, dtype=dtype), reduction=reduction, ), ) for reduction in reductions ], (shape, dict(ignore_index=1)), ] for (input_shape, kwargs), probabilities_target in itertools.product(input_shape_and_kwargs, (False, True)): input = make_tensor(input_shape, device=device, dtype=dtype, requires_grad=requires_grad) if probabilities_target: # ignore_index is not supported for probabilities target if "ignore_index" in kwargs: continue target = make_tensor( input_shape, low=0, high=1, device=device, dtype=dtype, requires_grad=requires_grad, ) else: target = make_tensor( (batch_size, *input_shape[2:]), low=0, high=num_classes, device=device, dtype=torch.long, ) if "ignore_index" in kwargs and torch.all(target == kwargs["ignore_index"]): # make sure at least one item in target is not ignored target[0] = random.sample(sorted(set(range(num_classes)) - {kwargs["ignore_index"]}), 1)[0] yield SampleInput(input, target, **kwargs) def sample_inputs_logit(op_info, device, dtype, requires_grad, **kwargs): low, high = op_info.domain # Note: Operator is very sensitive at points near the # start and end of domain and leads to NaN for float16 # if domain_eps is 1e-5. if dtype.is_floating_point or dtype.is_complex: domain_eps = op_info._domain_eps if dtype != torch.float16 else 3e-2 low = low + domain_eps high = high - domain_eps make_arg = partial(make_tensor, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) yield SampleInput(make_arg((S, S, S))) yield SampleInput(make_arg((S, S, S)), 0.2) yield SampleInput(make_arg(())) yield SampleInput(make_arg(()), 0.2) def sample_inputs_isin(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # isin has two paths based on the size of elements and test_elements. # if elements.numel() < 10 * pow(test_elements.numel(), 0.145): yield SampleInput(make_arg((L,)), args=(make_arg((S,)),)) # else: yield SampleInput(make_arg((S,)), args=(make_arg((L,)),)) def sample_inputs_masked_scatter(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg((S, S)))) yield SampleInput(make_arg((S, S)), args=(torch.randn((S,), device=device) > 0, make_arg((S, S)))) yield SampleInput(make_arg((S, S)), args=(bernoulli_scalar().to(device), make_arg((S, S)))) yield SampleInput(make_arg((S,)), args=(torch.randn(S, S, device=device) > 0, make_arg((S, S))), broadcasts_input=True) def error_inputs_masked_scatter(op_info, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float) for mask_dtype in [torch.float, torch.uint8]: yield ErrorInput(SampleInput(make_arg(1, 3), args=(torch.ones(1, 3, device=device, dtype=mask_dtype), make_arg(3, 4))), error_regex=r"masked_scatter_ only supports boolean masks") def sample_inputs_masked_fill(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, 10)) yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_arg(()))) yield SampleInput(make_arg((S, S)), args=(torch.randn(S, device=device) > 0, 10)) yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, 10)) yield SampleInput(make_arg(()), args=(torch.randn((), device=device) > 0, make_arg(()))) yield SampleInput(make_arg((S, S)), args=(torch.randn((), device=device) > 0, 10)) yield SampleInput(make_arg((S,)), args=(torch.randn(S, S, device=device) > 0, make_arg(())), broadcasts_input=True) yield SampleInput(make_arg((S,)), args=(torch.randn(S, S, device=device) > 0, 10), broadcasts_input=True) if torch.device(device).type == 'cuda': # `self` and `mask` on CUDA but `value` is a CPU scalar tensor. yield SampleInput(make_arg((S, S)), args=(torch.randn(S, S, device=device) > 0, make_tensor((), device="cpu", dtype=dtype))) def error_inputs_masked_fill(op_info, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) # `value` is not a 0-D tensor. yield ErrorInput(SampleInput(make_arg((2, 2)), args=(make_arg(()) > 0, make_arg((1,)))), error_regex="only supports a 0-dimensional value tensor, but got tensor with 1 dimension") # downcasting complex value (scalar overload) yield ErrorInput(SampleInput(make_arg((2, 2)), args=(make_arg(()) > 0, 1j)), error_regex=r"value cannot be converted to type .* without overflow") # downcasting complex value (tensor overload) yield ErrorInput(SampleInput(torch.ones(2, dtype=torch.long, device=device), args=(make_arg(()) > 0, torch.tensor(1j, device=device))), error_regex=r"value cannot be converted to type .* without overflow") if torch.device(device).type == 'cuda': # `self` and `mask` on CPU but `value` is a CUDA scalar tensor. yield ErrorInput(SampleInput(torch.randn((S, S), device='cpu'), args=(torch.randn(S, S, device='cpu') > 0, torch.randn((), device='cuda'))), error_regex=r"to be on same device") def sample_inputs_masked_select(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial( make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=None, high=None) yield SampleInput(make_arg((M, M)), torch.randn(M, M, device=device) > 0) yield SampleInput(make_arg((M, M)), torch.randn((M,), device=device) > 0) yield SampleInput(make_arg((M,)), torch.randn((M, M), device=device) > 0) yield SampleInput(make_arg((M, 1, M)), torch.randn((M, M), device=device) > 0) yield SampleInput(make_arg(()), torch.tensor(1, device=device, dtype=torch.bool)) yield SampleInput(make_arg((M, M)), torch.tensor(1, device=device, dtype=torch.bool)) yield SampleInput(make_arg(()), torch.randn((M, M), device=device) > 0) def sample_inputs_matrix_exp(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_arg((S, S))) yield SampleInput(make_arg((S, S, S))) def sample_inputs_matmul(op_info, device, dtype, requires_grad, is_rmatmul=False, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) test_cases = (((L,), (L,)), ((S, M), (M,)), ((M,), (M, S)), ((S, M), (M, S)), ((S, 0), (0, M)), ((S, S, M), (M,)), ((S, S, M), (M, S)), ((S, S, 0), (0, S)), ((M,), (S, M, S)), ((S, M), (S, M, S)), ((0, 0), (S, 0, 0)), ((S, S, M, M), (S, S, M, S)), ((S, S, M, M), (M,)), ((M,), (S, S, M, S)), ((S, S, S), (1, S, S)) ) for lhs_shape, rhs_shape in test_cases: lhs = make_arg(lhs_shape) rhs = make_arg(rhs_shape) if not is_rmatmul: yield SampleInput(lhs, rhs) else: yield SampleInput(rhs, lhs) def sample_inputs_meshgrid(op_info: OpInfo, device: torch.device, dtype: torch.dtype, requires_grad: bool, *, variant: str, **kwargs) -> list[SampleInput]: if variant == 'variadic': def make_inputs( tensors: list[torch.Tensor]) -> tuple[Union[torch.Tensor, list[torch.Tensor]], tuple[torch.Tensor, ...]]: return tensors elif variant == 'list': def make_inputs( tensors: list[torch.Tensor]) -> tuple[Union[torch.Tensor, list[torch.Tensor]], tuple[torch.Tensor, ...]]: return [tensors] else: raise ValueError( 'Unsupported variant, must be one of {"variadic", "list"}. ' f'Got "{variant}".') SCALAR = torch.Size([]) VECTOR = torch.Size([3]) test_cases: list[list[torch.Size]] = [ [SCALAR], [VECTOR], [VECTOR, SCALAR], [VECTOR, SCALAR, VECTOR], [VECTOR, SCALAR, VECTOR, SCALAR], ] for shapes, indexing in itertools.product(test_cases, {'xy', 'ij'}): args = make_inputs( [make_tensor(shape, dtype=dtype, device=device, requires_grad=requires_grad) for shape in shapes]) yield SampleInput(*args, indexing=indexing) def sample_inputs_mvlgamma(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) tensor_shapes = ((S, S), ()) ns = (1, 2, 3, 4, 5) # Since the accepted lower bound for input # to mvlgamma depends on `p` argument, # the following function computes the lower bound # which we pass to `make_tensor`. def compute_min_val(p): return (p - 1.) / 2 for shape, n in product(tensor_shapes, ns): min_val = compute_min_val(n) if not dtype.is_floating_point: # Round-up minimum value for integral dtypes min_val += 1 else: min_val += 2 * torch.finfo(dtype).eps yield SampleInput(make_arg(shape, low=min_val), args=(n,)) # Since `mvlgamma` has multiple entries, # there are multiple common skips for the additional # entries. Following function is a helper to that end. def skips_mvlgamma(skip_redundant=False): skips = ( # outside domain values are hard error for mvlgamma op. DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_float_domains'), DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.float16, torch.int8)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=(torch.int8,)), ) if skip_redundant: # Redundant tests skips = skips + ( # type: ignore[assignment] DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), ) return skips # To test reference numerics against multiple values of argument `p`, # we make multiple OpInfo entries with each entry corresponding to different value of p. # We run the op tests from test_ops.py only for `p=1` to avoid redundancy in testing. def make_mvlgamma_opinfo(variant_test_name, domain, skips, sample_kwargs): return UnaryUfuncInfo('mvlgamma', ref=reference_mvlgamma if TEST_SCIPY else None, aliases=('special.multigammaln',), variant_test_name=variant_test_name, domain=domain, decorators=(precisionOverride({torch.float16: 5e-2}),), dtypes=all_types_and(torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_mvlgamma, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, skips=skips, sample_kwargs=sample_kwargs) def sample_inputs_cumulative_ops(op_info, device, dtype, requires_grad, supports_dtype_kwargs=True, **kwargs): def _make_tensor_helper(shape, low=None, high=None): return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) yield SampleInput(_make_tensor_helper((S, S, S)), 0) yield SampleInput(_make_tensor_helper((S, S, S)), 1) yield SampleInput(_make_tensor_helper(()), 0) if supports_dtype_kwargs: # NOTE: if `dtype` is not same as input, then inplace variants fail with # `provided dtype must match the dtype of self tensor in cumsum` yield SampleInput(_make_tensor_helper((S, S, S)), 1, dtype=dtype) def sample_inputs_unfold(op_info, device, dtype, requires_grad, **kwargs): test_cases = ( ((), (0, 1, 1)), ((S, S, S, S), (0, 3, 1)), ((S, S, S, S), (1, 3, 1)), ((S, S, S, S), (2, 3, 1)), ((S, S, S, S), (3, 3, 1)), ((S, S, S, S), (0, 3, 2)), ((S, S, S, S), (1, 3, 2)), ((S, S, S, S), (2, 3, 2)), ((S, S, S, S), (3, 3, 2)), ((S, S, S, S), (0, 4, 1)), ((S, S, S, S), (1, 4, 1)), ((S, S, S, S), (2, 4, 1)), ((S, S, S, S), (3, 4, 1)), ((M,), (0, 3, 1)), ((M,), (0, 3, 2)), ((M,), (0, 3, 3)), ((1000,), (0, 3, 11)), ((1000,), (0, 2, 27)), ((10, 10), (0, 1, 2)), ((10, 10), (1, 2, 3)), ((10, 10), (1, 2, 2)), ((S, S, S), (2, 3, 2)), ) for shape, arguments in test_cases: yield SampleInput(make_tensor(shape, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad), *arguments) def sample_inputs_split(op_info, device, dtype, requires_grad, *, list_args=False, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) if list_args: cases = ( ((S, S, S), (torch.Size([int(S / 3), S - int(S / 3) * 2, int(S / 3)]),)), ((S, S, S), (torch.Size([int(S / 2), S - int(S / 2) * 2, int(S / 2)]), 2),), ((S, S, S), (torch.Size([int(S / 2), S - int(S / 2) * 2, int(S / 2)]), -2),) ) else: cases = ( # type: ignore[assignment] ((S, S, S), (2,)), ((S, S, S), (S, 1)), ) for shape, args in cases: yield SampleInput(make_arg(shape), args=args) def sample_inputs_split_with_sizes(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases = (((S, S, S), (torch.Size([int(S / 3), S - int(S / 3) * 2, int(S / 3)]),)), ((S, S, S), (torch.Size([int(S / 3), S - int(S / 3), 0]),)), ((S, S, S), (torch.Size([int(S / 3), S - int(S / 3) * 2, int(S / 3)]), 2)), ((S, S, S), (torch.Size([int(S / 3), S - int(S / 3) * 2, int(S / 3)]), -2)), ) for shape, args in cases: yield SampleInput(make_arg(shape), args=args) def sample_inputs_msort(op_info, device, dtype, requires_grad, **kwargs): def apply_grad(t): if dtype in floating_types_and(torch.float16, torch.bfloat16): t.requires_grad_(requires_grad) def large_1d_unique(dtype, device): res = torch.randperm(L * L * L, dtype=torch.int64, device=device) res = res.to(dtype) apply_grad(res) return res # Test case for large tensor. yield SampleInput(large_1d_unique(dtype, device)) yield SampleInput(make_tensor((S, M, S), dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad)) def sample_inputs_lerp(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) # no broadcast yield SampleInput(make_arg((S, S)), make_arg((S, S)), 0.4) # broadcast rhs yield SampleInput(make_arg((S, S)), make_arg((S,)), 0.4) # scalar tensor yield SampleInput(make_arg(()), make_arg(()), 0.4) # broadcast rhs scalar-tensor yield SampleInput(make_arg((S, S)), make_arg(()), 0.4) # broadcast rhs with weight tensor yield SampleInput(make_arg((S, S)), make_arg((S,)), make_arg((S, S))) # broadcast rhs and weight tensor yield SampleInput(make_arg((S, S)), make_arg((S, 1)), make_arg((S,))) # broadcast lhs yield SampleInput(make_arg((S,)), make_arg((S, S)), 0.4).with_metadata(broadcasts_input=True) # scalar broadcast_lhs yield SampleInput(make_arg(()), make_arg((S, S)), 0.4).with_metadata(broadcasts_input=True) # broadcast all yield SampleInput(make_arg((S, 1)), make_arg((S, S)), 0.4).with_metadata(broadcasts_input=True) # tensor broadcast all yield SampleInput(make_arg((S, 1)), make_arg((S, S)), make_arg((S, 1))).with_metadata( broadcasts_input=True) # no broadcast with weight tensor yield SampleInput(make_arg((S, S)), make_arg((S, S)), make_arg((S, S))) # broadcast lhs with weight tensor yield SampleInput(make_arg((S,)), make_arg((S, S)), make_arg((S, S))).with_metadata( broadcasts_input=True) # broadcast lhs and weight tensor yield SampleInput(make_arg((S,)), make_arg((S, S, S)), make_arg((S, S))).with_metadata( broadcasts_input=True) # broadcast lhs and weight tensor variant yield SampleInput(make_arg((S, S)), make_arg((S, S, S)), make_arg((S,))).with_metadata( broadcasts_input=True) if dtype.is_complex: # no broadcast yield SampleInput(make_arg((S, S)), make_arg((S, S)), 0.4j) yield SampleInput(make_arg((S, S)), make_arg((S, S)), 1.2 + 0.1j) # broadcast rhs yield SampleInput(make_arg((S, S)), make_arg((S,)), 0.4j) yield SampleInput(make_arg((S, S)), make_arg((S, S)), 5.4 + 9j) # scalar tensor yield SampleInput(make_arg(()), make_arg(()), 0.4j) yield SampleInput(make_arg(()), make_arg(()), 6.1 + 0.004j) # broadcast rhs scalar-tensor yield SampleInput(make_arg((S, S)), make_arg(()), 0.4j) yield SampleInput(make_arg((S, S)), make_arg(()), 1 + 2j) def sample_inputs_tensordot(self, device, dtype, requires_grad, **kwargs): cases = ( ((2, 2, 2), (2, 2, 2), (2)), ((2, 2, 1), (2, 1, 2), ([0, 1], [2, 0])), ) for first_shape, second_shape, dims in cases: yield SampleInput(make_tensor(first_shape, dtype=dtype, device=device, requires_grad=requires_grad), make_tensor(second_shape, dtype=dtype, device=device, requires_grad=requires_grad), dims=dims) def sample_inputs_kron(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial( make_tensor, dtype=dtype, device=device, requires_grad=requires_grad, low=None, high=None) test_cases = ( ((S, S), (M, L)), ) for input_shape, other_shape in test_cases: input = make_arg(input_shape) other = make_arg(other_shape) yield SampleInput(input, other) def sample_inputs_inner(self, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(make_arg(S), make_arg(S)) yield SampleInput(make_arg(), make_arg(S, S)) def sample_inputs_scatter(op_info, device, dtype, requires_grad, **kwargs): def _tensor(shape, dtype=dtype, low=None, high=None): return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) def _gather(shape, index_dim, max_indices): return gather_variable(shape, index_dim, max_indices, device=device) zero = torch.tensor(0, dtype=torch.long, device=device) test_cases = ( (_tensor((M, S)), (0, _gather((S, S), 1, M), _tensor((S, S)))), (_tensor((M, S)), (1, _gather((S, S), 0, S), _tensor((S, S)))), (_tensor((M, S)), (-1, _gather((S, S), 0, S), _tensor((S, S)))), (_tensor((M, S)), (0, _gather((M, S // 2), 1, M), _tensor((M, S // 2)))), (_tensor((M, S)), (1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))), (_tensor((M, S)), (-1, _gather((M, S // 2), 0, S), _tensor((M, S // 2)))), (_tensor(()), (0, zero.detach().clone(), _tensor(()))), (_tensor(()), (0, zero.detach().clone(), 2.5)), ) for tensor, args in test_cases: yield SampleInput(tensor, *args) if not requires_grad: yield SampleInput(tensor.detach().clone(), *args, reduce='add') if dtype.is_floating_point: yield SampleInput(tensor.detach().clone(), *args, reduce='multiply') def sample_inputs_scatter_add(op_info, device, dtype, requires_grad, **kwargs): def _tensor(shape, dtype=dtype, low=None, high=None): return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) def _gather(shape, index_dim, max_indices): return gather_variable(shape, index_dim, max_indices, device=device) zero = torch.tensor(0, dtype=torch.long, device=device) yield SampleInput(_tensor((M, S)), 0, _gather((S, S), 1, M), _tensor((S, S))) yield SampleInput(_tensor((M, S)), 1, _gather((S, S), 0, S), _tensor((S, S))) yield SampleInput(_tensor((M, S)), -1, _gather((S, S), 0, S), _tensor((S, S))) yield SampleInput(_tensor((M, S)), 0, _gather((M, S // 2), 1, M), _tensor((M, S // 2))) yield SampleInput(_tensor((M, S)), 1, _gather((M, S // 2), 0, S), _tensor((M, S // 2))) yield SampleInput(_tensor((M, S)), -1, _gather((M, S // 2), 0, S), _tensor((M, S // 2))) yield SampleInput(_tensor(()), 0, zero.detach().clone(), _tensor(())) def sample_inputs_scatter_reduce(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) gather = partial(gather_variable, device=device) zero = torch.tensor(0, dtype=torch.long, device=device) test_cases = ( ((M, S), 0, gather((S, S), 1, M), (S, S)), ((M, S), 1, gather((S, S), 0, S), (S, S)), ((M, S), -1, gather((S, S), 0, S), (S, S)), ((M, S), 0, gather((M, S // 2), 1, M), (M, S // 2)), ((M, S), 1, gather((M, S // 2), 0, S), (M, S // 2)), ((M, S), -1, gather((M, S // 2), 0, S), (M, S // 2)), ((), 0, zero.detach().clone(), ()), ) reduce = op_info.variant_test_name for (inp_shape, dim, index, src_shape), include_self in product(test_cases, [False, True, False]): yield SampleInput(make_arg(inp_shape), args=(dim, index, make_arg(src_shape), reduce), kwargs={'include_self': include_self}) # Sample inputs to test edge cases for backward # Check that gradients are propagated correctly for prod when zeros in self/src are reduced if requires_grad and reduce == 'prod': # This sample tests gradients for the following cases # (a) 1 zero reduced (from src (self[0, 1], self[1, 1]), from self (self[0, 0], self[2, 0])) # (b) 2 zeros reduced (1 from src and 1 from self (self[1, 0]) # (c) no zeros reduced (self([2, 1])) # (d) 2 zeros reduced (both from src) is tested in test/test_autograd.py # test_scatter_index_reduce_prod_gradgrad_error as this case is not supported for gradgrad input = torch.tensor([[0, 13], [0, 17], [0, 19]], dtype=dtype, device=device, requires_grad=requires_grad) src = torch.tensor([[0, 1, 2, 3], [0, 4, 0, 1], [2, 3, 5, 6]], dtype=dtype, device=device, requires_grad=requires_grad) idx = torch.tensor([[1, 1, 0, 0], [0, 0, 1, 1], [0, 0, 0, 1]], dtype=torch.long, device=device) yield SampleInput(input, args=(1, idx, src, reduce), kwargs={'include_self': True}) def sample_inputs_segment_reduce(op_info, device, dtype, requires_grad, *, mode='lengths', **kwargs): def _tensor(shape, dtype=dtype, low=None, high=None): return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) test_cases = ( # inp_shape, dim, lengths, unsafe ((S,), 0, [0, 1, 2, 2], False), ((S,), 0, [0, 1, 2, 2], True), ((S,), 0, [2, 0, 3, 0], False), ((S, S), 0, [0, 1, 2, 2], False), # test when lengths do not sum to dim size ((M, S, S), 0, [1, 2, 0, 6, 0], True), # test for higher dimensions ((S, S), 1, [[0, 1, 2, 2] for _ in range(S)], False), ((S, S), 1, [[2, 0, 3, 0], [0, 1, 2, 2], [3, 0, 2, 0], [1, 1, 1, 2], [0, 1, 2, 2]], False), ((S, S, S), 1, [[0, 1, 2, 2] for _ in range(S)], False), ((S, S, S), 1, [[2, 0, 3, 0], [0, 1, 2, 2], [3, 0, 2, 0], [1, 1, 1, 2], [0, 1, 2, 2]], False), ) reductions = ["max", "mean", "min", "sum", "prod"] for args, reduce, initial in product(test_cases, reductions, [1, 2]): inp_shape, dim, lengths, unsafe = args lengths_t = torch.tensor(lengths, dtype=torch.long, device=device) sample_input_kwargs = {'axis': dim, 'unsafe': unsafe, 'initial': initial} if mode == 'lengths': sample_input_kwargs['lengths'] = lengths_t elif mode == 'offsets': zeros_shape = list(lengths_t.shape) zeros_shape[dim] = 1 offsets_t = torch.cat((lengths_t.new_zeros(zeros_shape), lengths_t), dim).cumsum_(dim) sample_input_kwargs['offsets'] = offsets_t else: raise RuntimeError(f"mode most be one of 'offsets' or 'lengths' got '{mode}'.") yield SampleInput(_tensor(inp_shape), args=(reduce,), kwargs=sample_input_kwargs) def sample_inputs_ravel(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, low=None, high=None, requires_grad=requires_grad) yield SampleInput(make_arg((S, S, S))) yield SampleInput(make_arg(())) yield SampleInput(make_arg((S, S, S), noncontiguous=True)) def sample_inputs_unravel_index(op_info, device, dtype, requires_grad, **kwargs): yield SampleInput( torch.tensor( [[3, 8, 13], [0, 5, 10]], device=device, dtype=dtype), (4, 5)) yield SampleInput( torch.tensor([[3, 8, 13], [0, 5, 10]], device=device, dtype=dtype), (4, 2**30)) yield SampleInput( torch.tensor([[3, 8, 13], [0, 5, 10]], device=device, dtype=dtype), (2**30, 4)) yield SampleInput( torch.tensor(2, device=device, dtype=dtype), (2, 2)) max_val = 2**(8 * dtype.itemsize - (1 if dtype.is_signed else 0)) - 1 yield SampleInput( torch.tensor(max_val - 1, device=device, dtype=dtype), (1, max_val)) yield SampleInput( torch.tensor([22, 41, 37], device=device, dtype=dtype), (7, 6)) yield SampleInput( torch.tensor(min(1621, max_val), device=device, dtype=dtype), (6, 7, 8, 9)) yield SampleInput( torch.tensor([], device=device, dtype=dtype), (10, 3, 5)) yield SampleInput( torch.tensor( [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], device=device, dtype=dtype), (5, 8)) yield SampleInput( torch.tensor( [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]], device=device, dtype=dtype), (5, 8, 10)) yield SampleInput( torch.tensor(0, device=device, dtype=dtype), ()) a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]]) b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]]) _, i1, i2 = np.intersect1d(a, b, assume_unique=True, return_indices=True) yield SampleInput(torch.tensor(i1, device=device, dtype=dtype), a.shape) yield SampleInput(torch.tensor(i2, device=device, dtype=dtype), b.shape) a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]]) b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]]) _, i1, i2 = np.intersect1d(a, b, return_indices=True) yield SampleInput(torch.tensor(i1, device=device, dtype=dtype), a.shape) yield SampleInput(torch.tensor(i2, device=device, dtype=dtype), b.shape) def sample_inputs_tril_triu(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((M, M), ()), ((M, M), (2,),), ((M, S), ()), ((M, S), (-1,)), ((M, M), (2,),), ((S, M, S), ()), ((S, M, S), (2,)), ((3, 3, S, S), ()),) for shape, args in cases: yield SampleInput(make_arg(shape), args=args) def error_inputs_tril_triu(opinfo, device, **kwargs): make_arg = partial(make_tensor, device=device, dtype=torch.float32) # error inputs for input.ndim <= 2 yield ErrorInput(SampleInput(make_arg((4,))), error_regex="input tensor must have at least 2 dimensions") def sample_inputs_trilu_indices(op_info, device, dtype, requires_grad, **kwargs): # (row, col, offset) args_list = ((0, 0), (20, 0), (0, 20), (20, 21, 0), (20, 21, 7), (20, 21, -7), # Large test cases below are deliberately commented out to speed up CI # tests and to avoid OOM error. When modifying implementations of # tril_indices and triu_indices, please enable these tests and make sure # they pass. # (2, 68435455, 3), # (5000, 5000), # (5000, 5000, 1234), # (5000, 5000, -1233), ) for args in args_list: yield SampleInput(args[0], args=args[1:], kwargs={"dtype": dtype, "device": device}) def sample_inputs_clone_contiguous(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(make_arg((S, M, S))) yield SampleInput(make_arg(())) def reference_inputs_clone_contiguous(op, device, dtype, requires_grad, **kwargs): # NOTE: the default memory format for clone is torch.preserve_format, for contiguous it's torch.contiguous_format # This exploits that default to test torch.preserve_format for clone, without causing an error when testing contiguous yield from sample_inputs_clone_contiguous(op, device, dtype, requires_grad, **kwargs) shapes = ( (3, 5, 6), (1, 1, 3, 5, 6), (1, 1, 3, 5, 6, 1, 1), (1, 0, 3, 5, 0, 2), (1, 0, 3, 5, 0, 0, 1, 1, 2), (), ) make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for shape in shapes: yield SampleInput(make_arg(shape)) yield SampleInput(make_arg(shape).transpose(0, -1)) yield SampleInput(make_arg(shape, noncontiguous=True)) yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1)) yield SampleInput(make_arg(shape), kwargs={'memory_format': torch.contiguous_format}) yield SampleInput(make_arg(shape).transpose(0, -1), kwargs={'memory_format': torch.contiguous_format}) yield SampleInput(make_arg(shape, noncontiguous=True), kwargs={'memory_format': torch.contiguous_format}) yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1), kwargs={'memory_format': torch.contiguous_format}) # shape, strides, offset strided_cases = ( ((5, 6, 2), (1, 1, 7), 2), ((5, 5, 4), (1, 1, 7), 2), ((5, 5, 2), (4, 5, 7), 3), ((5, 5, 2), (5, 5, 7), 3), ((5, 5, 2), (5, 5, 5), 3), ((9, 5, 2), (0, 1, 7), 3), ) for shape, strides, offset in strided_cases: yield SampleInput(make_arg(500,).as_strided(shape, strides, offset)) yield SampleInput(make_arg(500,).as_strided(shape, strides, offset), kwargs={'memory_format': torch.contiguous_format}) # channels last 2D yield SampleInput(make_arg((2, 2, 2, 2)), kwargs={'memory_format': torch.channels_last}) a = make_arg((2, 2, 2, 2)).permute(0, 3, 1, 2) yield SampleInput(a, kwargs={'memory_format': torch.channels_last}) # channels last 3D yield SampleInput(make_arg((2, 2, 2, 2, 2)), kwargs={'memory_format': torch.channels_last_3d}) a = make_arg((2, 2, 2, 2, 2)).permute(0, 4, 1, 2, 3) yield SampleInput(a, kwargs={'memory_format': torch.channels_last_3d}) def sample_inputs_sum_to_size(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) # list of tuples (shape, shape) defining the shapes of the input and output tensors sample_shapes = [ ((), ()), ((S,), (1,)), ((S, S), (1, 1)), ((S, S), (1, S)), ((S, S), (S, S)), ((S, S, S), (S, 1, S)), ] for input_shape, output_shape in sample_shapes: yield SampleInput(make_arg(input_shape), args=(output_shape,)) if output_shape == (): continue yield SampleInput(make_arg(input_shape), args=(list(output_shape),)) yield SampleInput(make_arg(input_shape), args=(*output_shape,)) def error_inputs_sum_to_size(op_info, device, **kwargs): shape = (M, S, M) err_msg = "is not expandable to size" si = SampleInput(make_tensor(shape, device=device, dtype=torch.float32), args=(M, M)) yield ErrorInput(si, error_regex=err_msg) shape = (M + 1, S, S, M) err_msg = "is not expandable to size" si = SampleInput(make_tensor(shape, device=device, dtype=torch.float32), args=(M + 1, 1)) yield ErrorInput(si, error_regex=err_msg) def sample_inputs_resize_ops(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device) cases = (((S, S, S), (S * S, S)), ((), ()), ((), (1, 1, 1)), ) for shape, args_or_shape in cases: # Update `args` based on operator if op_info.name == 'resize_': # resize_ takes shape/tuple of ints, args = (args_or_shape, ) elif op_info.name == 'resize_as_': # resize_as_ takes another tensor args = (make_arg(shape, requires_grad=False), ) # type:ignore[assignment] else: raise ValueError("sample_inputs_resize_ops is being used with incorrect operator") yield SampleInput(make_arg(shape, requires_grad=requires_grad), args=args) def sample_inputs_view_reshape(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = ( # a, b, is_tensor_supported ((S, S, S), (S * S, S), True), ((S * S, S), (S, S, S), True), ((S * S, S), (S, -1, S), False), # neg index ((S * S * 2, S), (S, -1), False), # neg index ((S,), (S,), True), ((), (), False), # empty ((), (1,), True), ) for a, b, is_tensor_supported in cases: # skip unsupported cases if kwargs.get("tensor_arg") and not is_tensor_supported: continue # convert to tensor if kwargs.get("tensor_arg"): b = make_arg(b, requires_grad=False) yield SampleInput(make_arg(a), args=(b,)) def reference_inputs_view_reshape(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_view_reshape(op, device, dtype, requires_grad, **kwargs) cases = ( # a, b, is_tensor_supported ((125,), (25, 5), True), ((25, 25), (1, 5, 5, 1, 5, 1, 5, 1), True), ((16, 32), (2, 4, 1, 4, 4, 1, 4), True), ((16, 12), (12, 16), True), ((1, 16, 12), (12, 16), True), ((1, 5, 1, 5), (25, 1), True), ((2, 4, 2), (4, 4), True), ((1, 4), (1, 1, 2, 1, 2), True), ((3, 5, 7), (7, 5, 3), True), ((1,), (), False), # empty ((5, 0, 2, 3), (5, 0, 2, 3), True), ((2, 1, 0, 3, 1), (5, 0), True), ((1,), (), False), # empty ((4, 5, 6), (4, 5, 6, 1, 1, 1), True), ((), (1, 1, 1, 1), False), # empty ) irreversible_cases = ( ((), (-1,), False), # neg index, empty ((4, 7, 9, 1, 1), (1, 4, 3, -1, 1), False), # neg index ) make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for a, b, is_tensor_supported in cases: # skip unsupported cases if kwargs.get("tensor_arg") and not is_tensor_supported: continue if kwargs.get("tensor_arg"): # convert to tensor yield SampleInput(make_arg(a), args=(make_arg(b, requires_grad=False),)) yield SampleInput(make_arg(b), args=(make_arg(a, requires_grad=False),)) else: yield SampleInput(make_arg(a), args=(b,)) yield SampleInput(make_arg(b), args=(a,)) for a, b, is_tensor_supported in irreversible_cases: # skip unsupported cases if kwargs.get("tensor_arg") and not is_tensor_supported: continue # convert to tensor if kwargs.get("tensor_arg"): b = make_arg(b, requires_grad=False) yield SampleInput(make_arg(a), args=(b,)) def error_inputs_view_reshape(op, device, **kwargs): cases = ( # a, b, is_tensor_supported # Reshape to different numel ((2,), (), False), # empty ((1, 3, 0), (), False), # empty ((4, 3), (4, 2), True), ((1, 3, 5), (5, 2, 2), True), # No valid inference ((1, 3, 5), (5, -1, 2), False), # neg index # Two inferred shapes ((1, 3, 5), (5, -1, -1), False), # neg index ((1), (0, -1), False), # neg index ((0, 5), (0, -1), False), # neg index ) make_arg = partial(make_tensor, dtype=torch.float32, device=device, requires_grad=False) for a, b, is_tensor_supported in cases: # skip unsupported cases if kwargs.get("tensor_arg") and not is_tensor_supported: continue if b == (5, -1, -1): error_regex = "only one dimension can be inferred" elif a == (0, 5): error_regex = (r"cannot reshape tensor of 0 elements into shape " r"\[0, -1\] because the unspecified dimension size " r"-1 can be any value and is ambiguous") else: # to avoid having issues with a regex shape = ', '.join(map(str, b)) size = a if type(a) is int else functools.reduce(operator.mul, a, 1) error_regex = rf"shape '\[{shape}\]' is invalid for input of size {size}" # convert to tensor if kwargs.get("tensor_arg"): b = make_arg(b, requires_grad=False) yield ErrorInput(SampleInput(make_arg(a), args=(b,)), error_type=Exception, error_regex=error_regex) def sample_inputs_atleast1d2d3d(op_info, device, dtype, requires_grad, **kwargs): shapes = ((S, S, S, S), (S, S, S), (S, S), (S, ), (),) make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for shape in shapes: yield SampleInput(make_tensor_partial(shape)) yield SampleInput([make_tensor_partial(shape) for shape in shapes]) def sample_inputs_column_stack(op_info, device, dtype, requires_grad, **kwargs): cases: tuple[tuple, tuple] = ( # type: ignore[assignment] ((S, 2, 1), (S, 3, 1)), ((S), (S, 5)), ((), (1, S)) ) make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for shape1, shape2 in cases: yield SampleInput([make_tensor_partial(shape1), make_tensor_partial(shape2)]) def sample_inputs_flatten(op_info, device, dtype, requires_grad, **kwargs): shapes = ((S, S, S), (S, S), (S, ), (),) make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for shape in shapes: yield SampleInput(make_tensor_partial(shape)) if len(shape) > 1: yield SampleInput(make_tensor_partial(shape), start_dim=1, end_dim=-1) def reference_inputs_flatten(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_flatten(op, device, dtype, requires_grad, **kwargs) # shape x start_dim x end_dim cases = ( ((5, 4, 0, 1, 3, 7), 1, 3), ((5, 4, 0, 1, 3, 7), 4, 5), ((5, 4, 1, 1, 3, 7), 2, 3), ((), 0, -1), ((1,), 0, -1), ((3, 7, 5), 1, 2), ((4, 5), 1, 1), ((1, 5, 5, 1, 5, 1, 5, 1), 0, 2), ((1, 5, 5, 1, 5, 1, 5, 1), 3, -1), ((1, 5, 5, 1, 5, 7, 5, 1), -2, -1), ((2, 4, 2), 0, 1), ((4, 2, 2), 1, 2), ((0, 3, 4, 5), 1, 3), ) make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for shape, start, end in cases: yield SampleInput(make_arg(shape), args=(start, end,)) yield SampleInput(make_arg(shape, noncontiguous=True).transpose(0, -1), args=(start, end,)) yield SampleInput(make_arg(shape).transpose(0, -1), args=(start, end,)) def sample_inputs_unflatten(op_info, device, dtype, requires_grad, **kwargs): # in_shape, dim, sizes args = (((8,), 0, (8,)), ((8,), 0, (4, 2)), ((8,), -1, (2, 2, 2)), ((8,), -1, (-1, 2)), ((3, 6, 2), 1, (2, 3)), ((3, 6, 2), -2, (2, 3)), ((3, 6, 2), -2, (-1, 3)), ((3, 2, 12), 2, (3, 2, 2)), ((4, 0), 0, (2, 2)), ((4, 0), 1, (2, 0, 0, 0)), ) make_tensor_partial = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) for in_shape, dim, sizes in args: yield SampleInput(make_tensor_partial(in_shape), args=(dim, sizes)) def sample_inputs_select(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((S, S, S), (1, 2)), ((S, S, S), (-1, 2)), ((S, S, S), (-1, -1)), ((S, S, S), (1, -1)), ((S,), (0, 2)) ) for shape, args in cases: yield SampleInput(make_arg(shape), args=args) def sample_inputs_select_scatter(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((S, S, S), (S, S), (1, 2)), ((S, S, S), (S, S), (-1, 2)), ((S, S, S), (S, S), (-1, -1)), ((S, S, S), (S, S), (1, -1)), ((S,), (), (0, 2)) ) for input_shape, src_shape, args in cases: input_ = make_arg(input_shape) src = make_arg(src_shape) yield SampleInput(input_, args=(src, *args)) def sample_inputs_slice_scatter(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((L, L, L), (L, L, L,), (0, 0, L, 1)), ((L, L, L), (L // 2, L, L,), (0, L // 2, L, 1)), ((L, L, L), (L // 4, L, L,), (0, L // 2, L, 2)), ((L, L, L), (L, L, L,), (1, 0, L, 1)), ((L, L, L), (L, L // 2, L,), (1, L // 2, L, 1)), ((L, L, L), (L, L // 4, L,), (1, L // 2, L, 2)), ((L, L, L), (L, L, L,), (2, 0, L, 1)), ((L, L, L), (L, L, L // 2,), (2, L // 2, L, 1)), ((L, L, L), (L, L, L // 4,), (2, L // 2, L, 2)), ) for input_shape, src_shape, args in cases: input_ = make_arg(input_shape) src = make_arg(src_shape) yield SampleInput(input_, args=(src, *args)) def sample_inputs_expand(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((S, 1, 1), (S, S, S)), ((S, 1, S), (S, S, S)), ((S, 1, S), (-1, S, -1)), ((S, 1, S), (-1, S, S)), ((S, 1), (S, S, S)), ((1,), (S, S, S)), ((1, S), (1, 1, S)), ((), ()), ((), (1, 3, 2)), ) for case in cases: shape, args = case yield SampleInput(make_arg(shape), args=(args,)) def sample_inputs_conversion(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) shapes = ((), (2, 3)) memory_format_options = [None, torch.contiguous_format] for shape, memory_format in itertools.product(shapes, memory_format_options): yield SampleInput(make_arg(shape), kwargs={'memory_format': memory_format} if memory_format else {}) yield SampleInput(make_arg((2, 3, 2, 3)), kwargs={'memory_format': torch.channels_last}) def sample_inputs_byte(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, low=0, high=255, requires_grad=requires_grad) shapes = ((), (2, 3)) memory_format_options = [None, torch.contiguous_format] for shape, memory_format in itertools.product(shapes, memory_format_options): yield SampleInput(make_arg(shape), kwargs={'memory_format': memory_format} if memory_format else {}) yield SampleInput(make_arg((2, 3, 2, 3)), kwargs={'memory_format': torch.channels_last}) def sample_inputs_expand_as(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device) cases = (((S, 1, 1), (S, S, S)), ((), ()), ((), (1, 1)), ) for shape, shape_other in cases: yield SampleInput(make_arg(shape, requires_grad=requires_grad), args=(make_arg(shape_other, requires_grad=False),)) def sample_inputs_where(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) def make_bool_mask(shape): # Make sure atleast one element is nonzero, # except for empty tensor mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False) if mask_t.numel() == 0: return mask_t elif mask_t.numel() == 1: mask_t.fill_(True) return mask_t if mask_t.sum() == 0: def random_index(shape): return tuple(random.randrange(0, max_idx) for max_idx in shape) mask_t[random_index(mask_t.shape)] = True return mask_t return mask_t cases = (((M, M), (M, M), (M, M), False), ((M, 1, M), (M, M), (M, M, 1), True), ((), (), (), False), ((M, 1, M), (), (M, M, 1), True), ((), (M, M), (), True), ((), (2), (1, 1), True), ) for shape, mask_shape, other_shape, broadcasts_input in cases: yield SampleInput(make_arg(shape), args=(make_bool_mask(mask_shape), make_arg(other_shape)), broadcasts_input=broadcasts_input) # TODO: add reference inputs for where(condition) signature def reference_inputs_where(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_where(op, device, dtype, requires_grad, **kwargs) make_cond = partial(make_tensor, dtype=torch.bool, device=device, requires_grad=requires_grad) make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) # noncontiguous c = make_cond((10, 3), noncontiguous=True) a = make_arg((10, 1), noncontiguous=True) b = make_arg((3, 10, 3)).transpose(0, -1) # NOTE that the OpInfo for where takes samples of the form a, cond, b yield SampleInput(a, args=(c, b)) # type promoting # FIXME(rec): shouldn't other_dtype be used two lines below? other_dtype = torch.double if dtype is not torch.double else torch.long # noqa: F841 c = make_cond((10, 3), noncontiguous=True) a = make_arg((10, 1), dtype=torch.long) b = make_arg((10, 1)) yield SampleInput(a, args=(c, b)) # two python scalars c = make_cond((10, 3), noncontiguous=True) a = make_arg((1,)).item() b = make_arg((1,)).item() yield SampleInput(a, args=(c, b)) # NaN propagation if dtype.is_floating_point or dtype.is_complex: if dtype.is_floating_point: nan = float('nan') else: # dtype.is_complex nan = complex(float('nan'), float('nan')) c = make_cond((1, 10, 3)) a = make_arg((10, 3), noncontiguous=True) a[2, 1] = nan b = make_arg((1, 3)) b[0, 2] = nan yield SampleInput(a, args=(c, b)) # Python scalars type promotion for scalar in (0, 0.0, 2j, False): yield SampleInput(scalar, args=(c, b)) yield SampleInput(a, args=(c, scalar)) def error_inputs_where(op_info, device, **kwargs): shape = (S,) err_msg = "Expected all tensors to be on the same device" for devices in product(('cpu', device), repeat=3): if len(set(devices)) == 2: si = SampleInput(make_tensor(shape, device=devices[0], dtype=torch.float32), args=(make_tensor(shape, dtype=torch.bool, device=devices[1]), make_tensor(shape, device=devices[2], dtype=torch.float32))) yield ErrorInput(si, error_regex=err_msg) def sample_inputs_nonzero(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) inputs = [] for shape in sizes: # construct input without any non-zero elements zeros = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad) inputs.append(zeros) # construct input with mixed zero and non-zero elements mixed = make_arg(shape).requires_grad_(False) mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False) mixed[mask_t] = 0 inputs.append(mixed) for input_t, as_tuple in product(inputs, [False, True]): yield SampleInput(input_t.clone().requires_grad_(requires_grad), kwargs=dict(as_tuple=as_tuple)) def sample_inputs_nonzero_static(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) sizes = ((), (S,), (S, S), (S, S, S), (S, 1, S), (S, 0, S)) inputs = [] for shape in sizes: # construct input without any non-zero elements zeros = torch.zeros(shape, dtype=dtype, device=device, requires_grad=requires_grad) inputs.append(zeros) # construct input with mixed zero and non-zero elements mixed = make_arg(shape).requires_grad_(False) mask_t = make_tensor(shape, dtype=torch.bool, device=device, requires_grad=False) mixed[mask_t] = 0 inputs.append(mixed) nonzero_sizes = [0, 1, XS, S, M] for input_t, nonzero_size in product(inputs, nonzero_sizes): yield SampleInput(input_t.clone().requires_grad_(requires_grad), kwargs=dict(size=nonzero_size)) def sample_inputs_chunk(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) cases = (((S, S, S), (2,)), ((S, S, S), (S, 1)), ((S, S, S), (S, -1))) for case in cases: shape, args = case yield SampleInput(make_arg(shape), args=args) def reference_inputs_chunk(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_chunk(op, device, dtype, requires_grad, **kwargs) make_arg = partial(make_tensor, dtype=dtype, device=device, requires_grad=requires_grad) # shape x chunks x dim cases = ( ((13, 9, 11), 17, -1), ((13, 9, 11), 11, -1), ((13,), 12, -1), ((15,), 12, -1), ((15,), 7, 0), ((15,), 9, 0), ((3, 7), 9, 1), ((3, 7), 9, 0), ((3, 7), 2, 0), ((3, 7), 3, 0), ((3, 7), 1, 0), ((3, 7), 1, 1), ((4, 4), 2, 0), ) for shape, chunks, dim in cases: yield SampleInput(make_arg(shape), args=(chunks, dim)) def sample_inputs_kthvalue(op_info, device, dtype, requires_grad, **kwargs): def _tensor(shape, dtype=dtype, low=None, high=None): return make_tensor(shape, dtype=dtype, device=device, low=low, high=high, requires_grad=requires_grad) test_cases = [ ((S, S, S), (2,)), ((S, S, S), (2, 1,)), ((S, S, S), (2, -1,)), ((S, S, S), (2, 1, True,)), ((S, S, S), (2, -1, True,)), ((S,), (2, 0,)), ((S,), (2, 0, True,)), ((), (1,)), ((), (1, 0,)), ((), (1, 0, True)), ] yield from (SampleInput(_tensor(tensor), *args) for tensor, args in test_cases) def error_inputs_kthvalue(op_info, device, **kwargs): # tests overlapping output fails t = make_tensor(10, dtype=torch.float32, device=device) indices = torch.empty((), device=device, dtype=torch.long) yield ErrorInput(SampleInput(t, 5, out=(t, indices)), error_regex="unsupported operation") k_out_of_range_err = "selected number k out of range for dimension" yield ErrorInput(SampleInput(torch.randn(2, 2, device=device), 3, 0), error_regex=k_out_of_range_err) yield ErrorInput(SampleInput(torch.randn(2, 2, device=device), 3), error_regex=k_out_of_range_err) yield ErrorInput(SampleInput(torch.tensor(2, device=device), 3), error_regex=k_out_of_range_err) def sample_inputs_dropout(op_info, device, dtype, requires_grad, *, train=None, valid_input_dim=None, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) if valid_input_dim: cases = ((S,) * i for i in valid_input_dim) else: cases = ((S, S), (S,), ()) p_vals = [0.0, 0.5, 1.0] # This is to handle special case for feature_alpha_dropout which has different # supported dtypes depending on `train` parameter training_vals = [train] if train is not None else [True, False] for case, p, training in product(cases, p_vals, training_vals): yield SampleInput(make_arg(case), p=p, training=training) yield SampleInput(make_arg(case)) def sample_inputs_dropout_backward(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_mask = partial(make_tensor, device=device, dtype=torch.bool, requires_grad=False) cases = ((S, S, S, S), (S,), ()) scale_vals = [0.0, 1.0, 2.0] for case, scale in product(cases, scale_vals): yield SampleInput(make_arg(case), make_mask(case), scale) def sample_inputs_embedding_bag(op_info, device, dtype, requires_grad, **kwargs): def make_input(shape): return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad) def make_long_input(shape, *, low, high, noncontiguous=False): return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high, noncontiguous=noncontiguous) def make_per_sample_weight(flag, idx): # a tensor of float / double weights, or None # to indicate all weights should be taken to be 1 if flag: return make_input(idx.shape) return None offsets = torch.tensor([0, 3], device=device, dtype=torch.long) for generate_per_sample_weight in (True, False): for mode in ('sum', 'mean', 'max'): # per_sample_weights is only supported for mode='sum' (got mode='****') if generate_per_sample_weight and mode in ('mean', 'max'): continue # 1-D index tensor idx = make_long_input((S,), low=0, high=M) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((M, S)), args=(idx,), kwargs={'offsets': offsets, 'mode': mode, 'per_sample_weights': per_sample_weights}) idx = make_long_input((S,), low=0, high=M, noncontiguous=True) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((M, S)), args=(idx,), kwargs={'offsets': offsets, 'mode': mode, 'per_sample_weights': per_sample_weights}) # bag with zero length idx = make_long_input((S,), low=0, high=M, noncontiguous=True) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((M, S)), args=(idx,), kwargs={'offsets': torch.tensor([0, 0, 3], device=device, dtype=torch.long), 'mode': mode, 'per_sample_weights': per_sample_weights}) # 2-D index tensor idx = make_long_input((S, S), low=0, high=M) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((M, S)), args=(idx,), kwargs={'mode': mode, 'per_sample_weights': per_sample_weights}) idx = make_long_input((S, S), low=0, high=M, noncontiguous=True) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((M, S)), args=(idx,), kwargs={'mode': mode, 'per_sample_weights': per_sample_weights}) # The gradient vector at `padding_idx` is not updated. # Negative padding_idx idx = make_long_input((6,), low=0, high=S) idx[0] = 4 idx[4] = 4 per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': -1, 'offsets': offsets, 'mode': mode, 'per_sample_weights': per_sample_weights},) idx = make_long_input((3, 3), low=0, high=S) # Positive padding_idx idx[0, 0] = 2 idx[1, 1] = 2 per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': 2, 'mode': mode, 'per_sample_weights': per_sample_weights},) idx = make_long_input((6, ), low=0, high=S) weights = make_input((S, S)) offsets_ = torch.tensor([0, 3, 6], device=device, dtype=torch.long) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(weights, args=(idx,), kwargs={'mode': mode, 'offsets': offsets_, 'include_last_offset': True},) if not requires_grad: # Following inputs return different gradient from the numerical gradient. # This is expected and relevant tests are present in `test_nn.py`. # Due to inplace renorming of weight, the numerical gradient doesn't match the # analytical gradient. idx = make_long_input((2, 2), low=0, high=S) weights = make_input((S, S)) * 2 per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1., 'mode': mode, 'per_sample_weights': per_sample_weights},) idx = make_long_input((6, ), low=0, high=S) weights = make_input((S, S)) * 2 per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1., 'norm_type': 1.0, 'mode': mode, 'offsets': offsets, 'per_sample_weights': per_sample_weights},) if mode != 'max': # Scale the gradient based on the inverse frequency of a particular index. # Note : smax mode does not support sparse weights idx = make_long_input((2, 2), low=0, high=S) idx[0, 0] = 1 idx[0, 1] = 1 weights = make_input((S, S)) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(weights, args=(idx,), kwargs={'scale_grad_by_freq': True, 'mode': mode, 'per_sample_weights': per_sample_weights},) # gradcheck not implemented for sparse tensors. # Note : max mode does not support sparse weights idx = make_long_input((6, ), low=0, high=S) weights = make_input((S, S)) per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(weights, args=(idx,), kwargs={'sparse': True, 'offsets': offsets, 'mode': mode, 'per_sample_weights': per_sample_weights}) idx = make_long_input((6, ), low=0, high=S) idx[0] = 1 # freq more than 1 idx[1] = 1 # freq more than 1 idx[3] = 0 # padding_idx weights = make_input((S, S)) * 2 per_sample_weights = make_per_sample_weight(generate_per_sample_weight, idx) yield SampleInput(weights, args=(idx,), kwargs={'sparse': True, 'scale_grad_by_freq': True, 'padding_idx': 0, 'max_norm': 1., 'offsets': offsets, 'mode': mode, 'per_sample_weights': per_sample_weights}) def sample_inputs_embedding(op_info, device, dtype, requires_grad, **kwargs): def make_input(shape): return make_tensor(shape, device=device, dtype=dtype, requires_grad=requires_grad) def make_long_input(shape, *, low, high): return make_tensor(shape, device=device, dtype=torch.long, low=low, high=high) # 0-D index tensor idx = make_long_input((), low=0, high=M) yield SampleInput(make_input((M, S)), args=(idx,),) # 1-D index tensor idx = make_long_input((S,), low=0, high=M) yield SampleInput(make_input((M, S)), args=(idx,),) # 2-D index tensor idx = make_long_input((S, S), low=0, high=M) yield SampleInput(make_input((M, S)), args=(idx,),) if not requires_grad: # Following inputs return different gradient from the numerical gradient. # This is expected and relevant tests are present in `test_nn.py`. # The gradient vector at `padding_idx` is not updated. idx = make_long_input((2, 2), low=0, high=S) idx[0, 0] = 2 idx[1, 1] = 2 yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': 2},) idx = make_long_input((2, 2), low=0, high=S) idx[0, 0] = 4 idx[1, 1] = 4 yield SampleInput(make_input((S, S)), args=(idx,), kwargs={'padding_idx': -1},) # Due to inplace renorming of weight, the numerical gradient doesn't match the # analytical gradient. idx = make_long_input((2, 2), low=0, high=S) weights = make_input((S, S)) * 2 yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1.},) idx = make_long_input((2, 2), low=0, high=S) weights = make_input((S, S)) * 2 yield SampleInput(weights, args=(idx,), kwargs={'max_norm': 1., 'norm_type': 1.0},) # Scale the gradient based on the inverse frequency of a particular index. idx = make_long_input((2, 2), low=0, high=S) idx[0, 0] = 1 idx[0, 1] = 1 weights = make_input((S, S)) yield SampleInput(weights, args=(idx,), kwargs={'scale_grad_by_freq': True},) # gradcheck not implemented for sparse tensors. idx = make_long_input((2, 2), low=0, high=S) weights = make_input((S, S)) yield SampleInput(weights, args=(idx,), kwargs={'sparse': True}) idx = make_long_input((3, 3), low=0, high=S) idx[0, 0] = 1 # freq more than 1 idx[0, 1] = 1 # freq more than 1 idx[1, 0] = 0 # padding_idx weights = make_input((S, S)) * 2 yield SampleInput(weights, args=(idx,), kwargs={'sparse': True, 'scale_grad_by_freq': True, 'padding_idx': 0, 'max_norm': 1.}) def sample_inputs_one_hot(op_info, device, dtype, requires_grad, **kwargs): def make_input(shape, *, low, high): return make_tensor(shape, device=device, dtype=dtype, low=low, high=high, requires_grad=requires_grad) shapes = ((), (S,), (L, M, S)) num_classess = (-1, 10) return ( SampleInput( make_input( shape, low=0, high=10 if num_classes == -1 else num_classes // 2, ), kwargs=dict(num_classes=num_classes), ) for shape, num_classes in itertools.product(shapes, num_classess) ) def sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs): rhs_requires_grad = kwargs.get('rhs_requires_grad', requires_grad) _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Although most losses also support the reduce and size_average combination instead of reduce, the former is # deprecated since 0.4.1 and thus is not tested shapes_and_kwargs = ( ((), None), ((S,), dict(reduction="mean")), ((S,), dict(reduction="sum")), ((S,), dict(reduction="none")), ((S, S), None), ((S, S, S), None), ) for shape, kwargs in shapes_and_kwargs: yield SampleInput(_make_tensor(shape), args=(_make_tensor(shape, requires_grad=rhs_requires_grad),), kwargs=kwargs) def sample_inputs_grid_sample(op_info, device, dtype, requires_grad, **kwargs): # We get better tests if we change the range of the values to something like [-2,2] # because for grid (second tensor argument) the "useful" range is [-1,1] and this way # you get a better combination of out-of-range and in-range test cases _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=-2, high=2) batch_size = 2 num_channels = 3 modes = ("bilinear", "nearest") align_cornerss = (False, True) padding_modes = ("zeros", "border", "reflection") for dim in (2, 3): modes_ = (*modes, "bicubic") if dim == 2 else modes for mode, padding_mode, align_corners in itertools.product(modes_, padding_modes, align_cornerss): yield SampleInput( _make_tensor((batch_size, num_channels, *[S] * dim)), _make_tensor((batch_size, *[S] * dim, dim)), mode=mode, padding_mode=padding_mode, align_corners=align_corners, ) def reference_inputs_grid_sample(op_info, device, dtype, requires_grad, **kwargs): batch_size = 2 num_channels = 3 height = 345 width = 456 modes = ("bilinear", "nearest", "bicubic") align_cornerss = (False, True) padding_modes = ('zeros', 'border', 'reflection') # Create an affine transformation matrix a = torch.deg2rad(torch.tensor(45.0)) ca, sa = torch.cos(a), torch.sin(a) # rotation angles s1, s2 = 1.23, 1.34 # scales theta = torch.tensor([[ [ca / s1, sa, 0.0], [-sa, ca / s2, 0.0], ]], dtype=dtype, device=device) theta = theta.expand(batch_size, 2, 3).contiguous() x = torch.arange(batch_size * num_channels * height * width, device=device) x = x.reshape(batch_size, num_channels, height, width).to(torch.uint8) x = x.to(dtype=dtype) x.requires_grad_(requires_grad) for mode, padding_mode, align_corners in itertools.product(modes, padding_modes, align_cornerss): grid = torch.nn.functional.affine_grid( theta, size=(batch_size, num_channels, height, width), align_corners=align_corners ) yield SampleInput( x, grid, mode, padding_mode, align_corners, ) def sample_inputs_grid_sampler_2d(op_info, device, dtype, requires_grad, **kwargs): # We get better tests if we change the range of the values to something like [-2,2] # because for grid (second tensor argument) the "useful" range is [-1,1] and this way # you get a better combination of out-of-range and in-range test cases _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad, low=-2, high=2) batch_size = 2 num_channels = 3 modes = (0, 1, 2) align_cornerss = (False, True) padding_modes = (0, 1, 2) for mode, padding_mode, align_corners in itertools.product(modes, padding_modes, align_cornerss): yield SampleInput( _make_tensor((batch_size, num_channels, S, L)), _make_tensor((batch_size, M + 3, M, 2)), mode, padding_mode, align_corners, ) def sample_inputs_cosine_embedding_loss(op_info, device, dtype, requires_grad, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def make_target(shape): shape = () if len(shape) == 1 else (shape[0], ) t = torch.randint(0, 2, shape, device=device, dtype=torch.long) # Label with -1 or 1 t = t * 2 - 1 target = t.to(dtype=dtype).detach_().requires_grad_(requires_grad) return target shapes = ((S, S), (S,)) reductions = ('none', 'mean', 'sum') for s, r in product(shapes, reductions): yield SampleInput( make_input(s), args=(make_input(s), make_target(s)), kwargs=dict(reduction=r, margin=random.uniform(-1, 1)) ) def sample_inputs_ctc_loss(op_info, device, dtype, requires_grad, **kwargs): input_length = 50 batch = 16 num_char = 20 target_length = 30 def make_log_probs(s): t = make_tensor(s, device=device, dtype=dtype) log_probs = t.log_softmax(2).to(device=device, dtype=dtype).detach().requires_grad_(requires_grad=requires_grad) return log_probs reductions = ('none', 'mean', 'sum') zero_inf = (True, False) lengths_type = (list, torch.Tensor) for r, z, lt in product(reductions, zero_inf, lengths_type): log_probs = make_log_probs((input_length, batch, num_char)) targets = torch.randint(1, num_char, (batch, target_length), dtype=torch.long, device=device) input_lengths = torch.full((batch, ), input_length, dtype=torch.long, device=device) target_lengths = torch.randint(10, target_length, (batch, ), dtype=torch.long, device=device) # Dont generate int[] types if reduction = "Mean" since this results in non composite compliant calls # to ctc_loss.IntList since a tensor needs to be created from the target lengths. # Creating such a tensor requires the use of pointers to copy data from int[] -> torch.Tensor # e.g. via std::copy. Similarly symbolic/real tracing with fx will also not work if lt is list and r in ["none", "sum"]: input_lengths = input_lengths.tolist() target_lengths = target_lengths.tolist() yield SampleInput(log_probs, args=(targets, input_lengths, target_lengths,), kwargs=dict(reduction=r, zero_infinity=z)) def sample_inputs_nll_loss(op_info, device, dtype, requires_grad, **kwargs): shape = (2, 3) num_classes = shape[1] make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # FIXME: Derivative wrt. weight not implemented make_weight = partial(make_tensor, num_classes, device=device, dtype=dtype, requires_grad=False) def make_target(shape, zeros=False): s = (shape[0], *shape[2:]) if len(shape) > 1 else () if zeros: return torch.zeros(s, device=device, dtype=torch.long) else: return make_tensor(s, low=0, high=shape[1] if len(shape) > 1 else shape[0], device=device, dtype=torch.long) def gen_shape_kwargs(): # Batched, non-batched and 2d shapes = (shape, (num_classes,), shape + (2, 2)) reductions = ('none', 'mean', 'sum') for reduction, s in product(reductions, shapes): yield make_input(s), make_target(s), dict(reduction=reduction) yield make_input(s), make_target(s), dict(weight=make_weight(), reduction=reduction) yield make_input(s), make_target(s), dict(weight=make_weight(low=0), reduction=reduction) yield make_input(s), make_target(s), dict(weight=make_weight(high=0), reduction=reduction) t = make_target(s) ignore = num_classes // 2 # If "mean", nll returns NaN, so it's not differentiable at those points if t.eq(ignore).all() and reduction == "mean": t.fill_(0) yield make_input(s), t, dict(ignore_index=num_classes // 2, reduction=reduction) yield make_input(s), t, dict(ignore_index=num_classes // 2, reduction=reduction, weight=make_weight()) # Test ignoring all the targets # If "mean", nll returns NaN, so it's not differentiable at those points if reduction != "mean": yield make_input(s), make_target(s, zeros=True), dict(ignore_index=0, reduction=reduction) for input, target, kwargs in gen_shape_kwargs(): yield SampleInput(input, args=(target,), kwargs=kwargs) target = torch.tensor([-1, 2], device=device, dtype=torch.long) yield SampleInput(make_input(shape), args=(target,), kwargs={'ignore_index': -1}) def sample_inputs_binary_cross_entropy_with_logits( op_info, device, dtype, requires_grad, **kwargs ): make = partial(make_tensor, device=device, dtype=dtype) make_prob = partial(make, low=0, high=1) reductions = ("mean", "sum", "none") def make_weight_shape_kwargs(): kwargs = [] for shape in ((1,), (1, S), (S), (S, S)): kwargs.extend([((S, S), dict(reduction=reduction, weight=make(shape))) for reduction in reductions]) return kwargs shapes_and_kwargs = [ *[(shape, None) for shape in ((), (1,), (S,), (S, S), (S, S, S))], *[((S, S), dict(reduction=reduction)) for reduction in reductions], *make_weight_shape_kwargs(), *[((S, S), dict(reduction=reduction, pos_weight=make((S,), low=0))) for reduction in reductions], *[((S, S), dict(reduction=reduction, weight=make((S, S)), pos_weight=make((S,), low=0))) for reduction in reductions], ] for shape, kwargs in shapes_and_kwargs: yield SampleInput( make(shape, requires_grad=requires_grad), args=(make_prob(shape, requires_grad=requires_grad),), kwargs=kwargs, ) def sample_inputs_argwhere(op_info, device, dtype, requires_grad, **kwargs): yield SampleInput(torch.tensor([1, 0, 2, 0], dtype=dtype, device=device, requires_grad=requires_grad)) mask = torch.tensor([[0, 1, 0, 1, 0], [1, 1, 1, 1, 0], [0, 0, 0, 1, 0], [1, 0, 1, 1, 0], [1, 0, 0, 1, 0]], dtype=torch.bool, device=device) t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad) t[mask] = 0 yield SampleInput(t) t = make_tensor((S, S), dtype=dtype, device=device, requires_grad=requires_grad, noncontiguous=True) t[mask] = 0 yield SampleInput(t) t = make_tensor((S, 0), dtype=dtype, device=device, requires_grad=requires_grad) yield SampleInput(t) yield SampleInput(torch.zeros((S,), dtype=dtype, device=device, requires_grad=requires_grad)) yield SampleInput(make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad)) def _generate_sample_shape_reduction(): shapes = ((S,), (S, S), (S, S, S)) reductions = ('none', 'mean', 'sum') yield from product(shapes, reductions) def sample_inputs_gaussian_nll_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) # Set low slightly above 0 so gradcheck doesn't accidentally dip below 0 make_var = partial(make_tensor, low=0.1, device=device, dtype=dtype, requires_grad=requires_grad) def gen_shape(shape): yield shape # Broadcast yield (*shape[:-1], 1) yield shape[:-1] def gen_shape_kwargs(): for s, r in _generate_sample_shape_reduction(): for t_s, v_s in product(gen_shape(s), gen_shape(s)): yield _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(reduction=r) yield ( _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(full=True, reduction=r) ) yield ( _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(eps=random.uniform(1e-6, 1e-3), reduction=r) ) yield ( _make_tensor(s), _make_tensor(t_s), make_var(v_s), dict(full=True, eps=random.uniform(1e-6, 1e-3), reduction=r) ) for input, target, var, kwargs in gen_shape_kwargs(): yield SampleInput(input, args=(target, var, ), kwargs=kwargs) def error_inputs_gaussian_nll_loss(op_info, device, **kwargs): _make = partial(make_tensor, device=device, dtype=torch.float32) # invalid reduction value yield ErrorInput(SampleInput(_make(10, 2, 3), _make(10, 2, 3), _make((10, 2, 3), low=0), reduction="abc"), error_type=ValueError, error_regex="abc is not valid") # var is of incorrect shape yield ErrorInput(SampleInput(_make(10, 2, 3), _make(10, 2, 3), _make((10, 2, 2), low=0)), error_type=ValueError, error_regex="var is of incorrect size") # target is of incorrect shape yield ErrorInput(SampleInput(_make(10, 2, 3), _make(10, 2, 2), _make((10, 2, 3), low=0)), error_type=RuntimeError, error_regex=(r"The size of tensor a \(3\) must match the size of tensor b \(2\) " r"at non-singleton dimension 2")) def _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) for s, r in _generate_sample_shape_reduction(): yield _make_tensor(s), _make_tensor(s), dict(reduction=r) def sample_inputs_hinge_embedding_loss(op_info, device, dtype, requires_grad, **kwargs): for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): # target should contain either 1 or -1 as per docs mask = torch.rand_like(target) > 0.5 target[mask] = 1 target[~mask] = -1 d['margin'] = random.uniform(-9, 9) yield SampleInput(input, args=(target, ), kwargs=d) # scalar input and target. _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(_make_tensor(()), args=(_make_tensor(()), )) def error_inputs_hinge_embedding_loss(op, device, **kwargs): make_input = partial(make_tensor, device=device, dtype=torch.float32) # invalid reduction value yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'reduction': 'abc'}), error_type=ValueError, error_regex='is not a valid value') def reference_inputs_hinge_embedding_loss(op, device, dtype, requires_grad, **kwargs): yield from sample_inputs_hinge_embedding_loss(op, device, dtype, requires_grad, **kwargs) make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) for reduction in ('sum', 'mean', 'none'): if dtype.is_floating_point: # only supports ints and floats # NaN propagation inp = make_input((10, )) inp[2] = float('nan') target = make_input((10, )) # target should contain either 1 or -1 as per docs mask = torch.rand_like(target) > 0.5 target[mask] = -1 target[~mask] = 1 yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) # Inf Handling inp = make_input((10, )) inp[4] = float('inf') target = make_input((10, )) mask = torch.rand_like(target) > 0.5 target[mask] = -1 target[~mask] = 1 yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) # Broadcasting inp = make_input((5, 5)) target = make_input((1, 5)) mask = torch.rand_like(target) > 0.5 target[mask] = -1 target[~mask] = 1 yield SampleInput(inp, args=(target,), kwargs={'reduction': reduction}) def sample_inputs_huber_loss(op_info, device, dtype, requires_grad, **kwargs): for input, target, d in _generate_sample_inputs_nn_loss(op_info, device, dtype, requires_grad, **kwargs): d['delta'] = random.uniform(1e-3, 9) yield SampleInput(input, args=(target, ), kwargs=d) def error_inputs_huber_loss(op, device, **kwargs): make_input = partial(make_tensor, device=device, dtype=torch.float32) # invalid reduction value err = 'is not a valid value for reduction' yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'reduction': 'abc'}), error_type=ValueError, error_regex=err) # delta <= 0 for delta in (0, -1): err = 'huber_loss does not support non-positive values for delta.' yield ErrorInput(SampleInput(make_input(5, 4), args=(make_input(5, 4),), kwargs={'delta': delta}), error_type=RuntimeError, error_regex=err) def sample_inputs_poisson_nll_loss(op_info, device, dtype, requires_grad, **kwargs): _make_tensor = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def gen_shape_kwargs(): for s, r in _generate_sample_shape_reduction(): for li in (True, False): for f in (True, False): i1 = _make_tensor(s) i2 = _make_tensor(s) # For Poisson NLL Loss, # target is assumed to be from # Poisson Distribution which # always has positive samples t1 = _make_tensor(s, low=0) t2 = _make_tensor(s, low=0) if not li: i1.abs_() i2.abs_() t1.abs_() t2.abs_() yield ( i1, t1, dict(log_input=li, full=f, reduction=r) ) yield ( i2, t2, dict(log_input=li, full=f, eps=random.uniform(1e-8, 1e-3), reduction=r) ) for input, target, kwargs in gen_shape_kwargs(): yield SampleInput(input, args=(target, ), kwargs=kwargs) # test INT_TO_FLOAT promotion if dtype.is_complex: for d in (torch.bool, torch.int64): yield SampleInput(_make_tensor(dtype=dtype), args=(_make_tensor(dtype=d),)) yield SampleInput(_make_tensor(dtype=d), args=(_make_tensor(dtype=dtype),)) def error_inputs_poisson_nll_loss(op_info, device, **kwargs): make = partial(make_tensor, device=device, dtype=torch.float32) # invalid reduction value yield ErrorInput(SampleInput(make(5, 4), args=(make(5, 4),), kwargs={'reduction': 'abc'}), error_type=ValueError, error_regex='abc is not a valid value for reduction') # invalid input shapes yield ErrorInput(SampleInput(make(5, 4), args=(make(5,),)), error_regex=(r'(Attempting to broadcast a dimension of length|' r'The size of tensor a \(5\) must match the ' r'size of tensor b \(4\) at non-singleton ' r'dimension 1)')) def error_inputs_soft_margin_loss(op_info, device, **kwargs): make = partial(make_tensor, device=device, dtype=torch.float32) # invalid reduction value yield ErrorInput(SampleInput(make(5, 4), args=(make(5, 4),), kwargs={'reduction': 'abc'}), error_type=ValueError, error_regex='abc is not a valid value for reduction') # invalid input shapes yield ErrorInput(SampleInput(make(5, 4), args=(make(5,),)), error_regex=(r'(Attempting to broadcast a dimension of length|' r'The size of tensor a \(4\) must match the ' r'size of tensor b \(5\) at non-singleton ' r'dimension 1)')) def sample_inputs_triplet_margin_loss(op_info, device, dtype, requires_grad, with_distance=False, **kwargs): make = partial(make_tensor, (S, M), device=device, dtype=dtype, requires_grad=requires_grad) kwargss = ( *[dict(margin=margin) for margin in (1e-6, 1.0, 10.0)], dict(swap=True), *[dict(reduction=reduction) for reduction in ("mean", "sum", "none")], ) for kwargs in kwargss: input = make() args = (make(), make()) if with_distance: kwargs["distance_function"] = torch.nn.PairwiseDistance() yield SampleInput(input, args=args, kwargs=kwargs) def error_inputs_triplet_margin_loss(op_info, device, **kwargs): make_input = partial(make_tensor, device=device, dtype=torch.float32) samples = ( # input, args, kwargs, error_type, error_regex # invalid reduction (make_input(3, 4), (make_input(3, 4), make_input(3, 4)), dict(reduction="abc"), ValueError, "abc is not a valid value for reduction"), # invalid margin (make_input(3, 4), (make_input(3, 4), make_input(3, 4)), dict(margin=-1.0), ValueError, "margin must be greater than 0, got -1.0"), # shape mismatch (make_input(3, 5), (make_input(3, 4), make_input(3, 4)), {}, RuntimeError, (r'(Attempting to broadcast a dimension of length|' r"The size of tensor a \(5\) must match the size of tensor b \(4\) " r"at non-singleton dimension 1)")), (make_input(3, 4), (make_input(3, 5), make_input(3, 4)), {}, RuntimeError, (r'(Attempting to broadcast a dimension of length|' r"The size of tensor a \(4\) must match the size of tensor b \(5\) " r"at non-singleton dimension 1)")), (make_input(3, 4), (make_input(3, 4), make_input(3, 5)), {}, RuntimeError, (r'(Attempting to broadcast a dimension of length|' r"The size of tensor a \(4\) must match the size of tensor b \(5\) " r"at non-singleton dimension 1)")), # different dimensions (make_input(3,), (make_input(3, 4), make_input(3, 4)), {}, RuntimeError, (r"The anchor, positive, and negative tensors are expected to have " r"the same number of dimensions, but got: anchor 1D, positive 2D, " r"and negative 2D inputs")), (make_input(3, 4), (make_input(3,), make_input(3, 4)), {}, RuntimeError, (r"The anchor, positive, and negative tensors are expected to have " r"the same number of dimensions, but got: anchor 2D, positive 1D, " r"and negative 2D inputs")), (make_input(3, 4), (make_input(3, 4), make_input(3,)), {}, RuntimeError, (r"The anchor, positive, and negative tensors are expected to have " r"the same number of dimensions, but got: anchor 2D, positive 2D, " r"and negative 1D inputs")), ) for input, args, kwargs, error_type, error_regex in samples: yield ErrorInput(SampleInput(input, args=args, kwargs=kwargs), error_type=error_type, error_regex=error_regex) def sample_inputs_scaled_mm(op_info, device, dtype, requires_grad, **kwargs): make_mat_e4m3 = partial(make_tensor, device=device, dtype=torch.float8_e4m3fn, requires_grad=requires_grad) make_mat_e5m2 = partial(make_tensor, device=device, dtype=torch.float8_e5m2, requires_grad=requires_grad) make_scale = partial(make_tensor, device=device, dtype=torch.float, requires_grad=False) M, N, K = 15, 32, 16 samples = [] # two e4m3 mat1 = make_mat_e4m3((M, K)) mat2 = make_mat_e4m3((K, N)).t().contiguous().t() scale1 = make_scale((1,)) scale2 = make_scale((1,)) samples.append(SampleInput(mat1, mat2, scale1, scale2)) # mat1 e4m3 mat2 e5m2 mat1 = make_mat_e4m3((M, K)) mat2 = make_mat_e5m2((K, N)).t().contiguous().t() scale1 = make_scale((1,)) scale2 = make_scale((1,)) samples.append(SampleInput(mat1, mat2, scale1, scale2)) # mat1 e5m2 mat2 e4m3 mat1 = make_mat_e5m2((M, K)) mat2 = make_mat_e4m3((K, N)).t().contiguous().t() scale1 = make_scale((1,)) scale2 = make_scale((1,)) samples.append(SampleInput(mat1, mat2, scale1, scale2)) yield from samples def sample_inputs_scaled_dot_product_attention(op_info, device, dtype, requires_grad, **kwargs): make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) batch, seq_q, seq_kv, num_heads, head_dim = 4, 3, 6, 4, 8 dim_3_q_shape = (batch, seq_q, head_dim) dim_3_kv_shape = (batch, seq_kv, head_dim) dim_4_q_shape = (batch, num_heads, seq_q, head_dim) dim_4_kv_shape = (batch, num_heads, seq_kv, head_dim) broadcast_tuple = ((num_heads, seq_q, head_dim), (batch, num_heads, seq_kv, head_dim)) qkv_shapes = [(dim_3_q_shape, dim_3_kv_shape), (dim_4_q_shape, dim_4_kv_shape), broadcast_tuple] samples = [] gqa_options = [True, False] causal_options = [True, False] for qkv_shape, is_causal, dropout_p, _enable_gqa in product( qkv_shapes, causal_options, [0.0, 0.5], gqa_options): shape_q, shape_kv = qkv_shape samples.append(SampleInput( make(shape_q), make(shape_kv), make(shape_kv), is_causal=is_causal, dropout_p=dropout_p )) # Add non standard shapes # FIXME(rec): should diff_v_head_dim be appended to samples? diff_v_head_dim = SampleInput( # noqa: F841 make((batch, num_heads, seq_q, head_dim)), make((batch, num_heads, seq_kv, head_dim)), make((batch, num_heads, seq_kv, head_dim + 8)), is_causal=is_causal, dropout_p=dropout_p ) # Add an attn_mask samples.append( SampleInput( make((batch, num_heads, seq_q, head_dim)), make((batch, num_heads, seq_kv, head_dim)), make((batch, num_heads, seq_kv, head_dim)), attn_mask=make((seq_q, seq_kv)), is_causal=False, dropout_p=0.0) ) yield from samples def sample_inputs_efficient_attention_forward(op_info, device, dtype, requires_grad, **kwargs): make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) batch, num_heads, head_dim = 4, 4, 8 seq_q = 11 seq_kv = 32 dim_4_q_shape = (batch, num_heads, seq_q, head_dim) dim_4_kv_shape = (batch, num_heads, seq_kv, head_dim) qkv_shapes = [(dim_4_q_shape, dim_4_kv_shape)] samples = [] mask_types = [1, 2] # UpperLeft, LowerRight scales = [None, 1.0] for qkv_shape, _is_causal, dropout_p, mask_type, scale in product( qkv_shapes, [True, False], [0.0, 0.5], mask_types, scales): shape_q, shape_kv = qkv_shape samples.append(SampleInput( make(shape_q).transpose(1, 2), make(shape_kv).transpose(1, 2), make(shape_kv).transpose(1, 2), bias=None, cu_seqlens_q=None, cu_seqlens_k=None, max_seqlen_q=None, max_seqlen_k=None, dropout_p=dropout_p, custom_mask_type=mask_type, compute_log_sumexp=requires_grad, scale=scale, seqlen_k=None )) # Add non standard shapes # FIXME(rec): should diff_v_head_dim be appended to samples? diff_v_head_dim = SampleInput( # noqa: F841 make((batch, seq_q, num_heads, head_dim)), make((batch, seq_kv, num_heads, head_dim)), make((batch, seq_kv, num_heads, head_dim + 8)), bias=None, cu_seqlens_q=None, cu_seqlens_k=None, max_seqlen_q=None, max_seqlen_k=None, dropout_p=dropout_p, custom_mask_type=0, # No Mask compute_log_sumexp=requires_grad, scale=None, seqlen_k=None ) # Add an attn_mask samples.append( SampleInput( make((batch, seq_q, num_heads, head_dim)), make((batch, seq_kv, num_heads, head_dim)), make((batch, seq_kv, num_heads, head_dim)), bias=make(batch, num_heads, seq_q, seq_kv), cu_seqlens_q=None, cu_seqlens_k=None, max_seqlen_q=None, max_seqlen_k=None, dropout_p=dropout_p, custom_mask_type=0, # No Mask compute_log_sumexp=requires_grad, scale=None, seqlen_k=None ) ) # jagged (with query/keys offsets) cu_seqlens_k = torch.arange(-1, 32 * 2 + 1, 2, dtype=torch.int32, device=device) cu_seqlens_k[-1] = 62 cu_seqlens_k[0] = 0 samples.append( SampleInput( make((32, 2, 64)).view(-1, 8, 8).unsqueeze(0), make((64, 64)).view(-1, 8, 8).unsqueeze(0), make((64, 64)).view(-1, 8, 8).unsqueeze(0), bias=None, cu_seqlens_q=torch.arange(0, 32 * 2 + 2, 2, dtype=torch.int32, device=device), cu_seqlens_k=cu_seqlens_k, max_seqlen_q=2, max_seqlen_k=2, dropout_p=0.0, custom_mask_type=0, # No Mask compute_log_sumexp=requires_grad, scale=None, seqlen_k=None, ) ) yield from samples def sample_inputs_flash_attention_forward(op_info, device, dtype, requires_grad, **kwargs): make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) batch, num_heads, head_dim = 4, 4, 8 seq_q = 11 seq_kv = 32 dim_4_q_shape = (batch, num_heads, seq_q, head_dim) dim_4_kv_shape = (batch, num_heads, seq_kv, head_dim) qkv_shapes = [(dim_4_q_shape, dim_4_kv_shape)] samples = [] scales = [None, 1.0] for qkv_shape, is_causal, dropout_p, scale in product( qkv_shapes, [True, False], [0.0, 0.5], scales): shape_q, shape_kv = qkv_shape samples.append(SampleInput( make(shape_q).transpose(1, 2), make(shape_kv).transpose(1, 2), make(shape_kv).transpose(1, 2), cum_seq_q=None, cum_seq_k=None, max_q=seq_q, max_k=seq_kv, dropout_p=dropout_p, is_causal=is_causal, return_debug_mask=False, scale=scale, )) yield from samples def sample_inputs_pairwise_distance(op_info, device, dtype, requires_grad, **kwargs): make = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) shape = (3,) batched_shape = (2, *shape) shapes_and_kwargs = [ (shape, None), (batched_shape, None), (shape, dict(keepdim=True)), (batched_shape, dict(keepdim=True)), (shape, dict(p=5.0)), (shape, dict(p=-1.0)), (shape, dict(eps=1.0)), ] return ( SampleInput(make(shape), args=(make(shape),), kwargs=kwargs) for shape, kwargs in shapes_and_kwargs ) def sample_inputs_pixel_shuffle(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield from ( SampleInput(make_arg((1, 9, 2, 2)), upscale_factor=upscale_factor) for upscale_factor in (1, 3) ) yield from ( SampleInput(make_arg(shape), upscale_factor=1) for shape in [ (1, 0, 1, 1), (1, 1, 0, 1), (1, 1, 1, 0), ] ) def sample_inputs_pixel_unshuffle(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield from ( SampleInput(make_arg((1, 1, 6, 6)), downscale_factor=downscale_factor) for downscale_factor in (1, 3) ) yield from ( SampleInput(make_arg(shape), downscale_factor=1) for shape in [ (1, 0, 1, 1), (1, 1, 0, 1), (1, 1, 1, 0), ] ) def sample_inputs_channel_shuffle(op_info, device, dtype, requires_grad, **kwargs): make_arg = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) shapes_groups = [ ((1, 4, 10, 10), 2), ((2, 6, 8, 8), 3), ((2, 8, 5, 5), 4), ] yield from ( SampleInput(make_arg(shape), args=(groups,)) for shape, groups in shapes_groups ) def sample_inputs_binary_cross_entropy(op_info, device, dtype, requires_grad, logits=False, **kwargs): make = partial(make_tensor, device=device, dtype=dtype) # Lower bounds must be greater than 'eps' defined in gradcheck.py::gradgradcheck() -> eps # otherwise perturbation calculation causes Tensor value to become negative triggering # a device-side hardware assertion make_prob = partial(make, low=1e-6, high=1) reductions = ("mean", "sum", "none") shapes_and_kwargs = [ *[(shape, None) for shape in ((), (1,), (S,), (S, S), (S, S, S))], *[((S, S), dict(reduction=reduction)) for reduction in reductions], *[((S, S), dict(reduction=reduction, weight=make((S, S)))) for reduction in reductions], ] if logits: shapes_and_kwargs.extend( [((S, S), dict(reduction=reduction, pos_weight=make((S,), low=0))) for reduction in reductions] ) for shape, kwargs in shapes_and_kwargs: yield SampleInput( (make if logits else make_prob)(shape, requires_grad=requires_grad), args=(make_prob(shape, requires_grad=requires_grad),), kwargs=kwargs, ) def sample_inputs_allclose(op_info, device, dtype, requires_grad, **kwargs): sample_shapes = [(), (S), (S, S, S)] atols = [1e-2, 1e-16] rtols = [1e-1, 0.5] for s, rtol, atol in product(sample_shapes, rtols, atols): # close sample t = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) close = (t + atol).detach().requires_grad_(requires_grad) yield SampleInput(t, close, rtol=rtol, atol=atol) # random sample a = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) b = make_tensor(s, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(a, b, rtol=rtol, atol=atol) def sample_inputs_l1_loss(op_info, device, dtype, requires_grad, **kwargs): yield from sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs) # test COMPLEX_TO_FLOAT promotion if dtype.is_complex: make = partial(make_tensor, (), device=device, requires_grad=requires_grad) yield SampleInput(make(dtype=dtype), args=(make(dtype=torch.double),)) yield SampleInput(make(dtype=torch.double), args=(make(dtype=dtype),)) def error_inputs_l1_loss(op_info, device, **kwargs): make = partial(make_tensor, device=device, dtype=torch.float32) # invalid reduction value yield ErrorInput(SampleInput(make(5, 4), args=(make(5, 4),), kwargs={'reduction': 'abc'}), error_type=ValueError, error_regex='abc is not a valid value for reduction') # invalid input shapes yield ErrorInput(SampleInput(make(5, 4), args=(make(5,),)), error_regex=(r'(Attempting to broadcast a dimension of length|' r'The size of tensor a \(4\) must match the ' r'size of tensor b \(5\) at non-singleton ' r'dimension 1)') ) def sample_inputs_smooth_l1_loss(op_info, device, dtype, requires_grad, **kwargs): yield from sample_inputs_loss(op_info, device, dtype, requires_grad, **kwargs) make = partial(make_tensor, (S, S), device=device, dtype=dtype, requires_grad=requires_grad) # This test case always triggers the smooth condition, since absolute difference of input and target # is smaller than beta yield SampleInput(make(low=0, high=2), args=(make(low=-2, high=0),), kwargs=dict(beta=5)) yield SampleInput(make(), args=(make(),), kwargs=dict(beta=0)) def sample_inputs_kl_div(op_info, device, dtype, requires_grad, **kwargs): # kl_div works with inputs in [0, 1] (aka the pdf of a probability measure) # Then log [0, 1] = (-inf, 0], so this is the log space make_arg = partial(make_tensor, low=0., device=device, dtype=dtype, requires_grad=requires_grad) def make_log(shape): out = torch.nn.functional.log_softmax(make_arg(shape), -1) out.requires_grad_(requires_grad) return out def make_prob(shape): out = torch.nn.functional.softmax(make_arg(shape), -1) out.requires_grad_(requires_grad) return out shapes = ((2,), (2, 3)) reductions = ("none", "mean", "batchmean", "sum") for shape, reduction, log_target in product(shapes, reductions, (True, False)): input = make_log(shape) target = make_log(shape) if log_target else make_prob(shape) yield SampleInput(input, args=(target,), kwargs=dict(reduction=reduction, log_target=log_target)) def sample_inputs_pdist(op_info, device, dtype, requires_grad, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield from (SampleInput(make_input((n, m))) for n, m in itertools.product((1, S), repeat=2)) yield from (SampleInput(make_input((S, S)), kwargs=dict(p=p)) for p in (0.0, 1.0, 2.0, 10.0, float("inf"))) def reference_pdist(input, p=2): pdist = scipy.spatial.distance.pdist if p == 0: output = pdist(input, "hamming") * input.shape[1] elif p == float("inf"): output = pdist(input, lambda x, y: np.abs(x - y).max()) else: output = pdist(input, "minkowski", p=p) return output.astype(input.dtype) def sample_inputs_diagflat(op_info, device, dtype, requires_grad, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) yield SampleInput(make_input(())) yield SampleInput(make_input((2,))) yield SampleInput(make_input((2, 2))) yield SampleInput(make_input((2,)), offset=1) yield SampleInput(make_input((2,)), offset=-1) def sample_inputs_max_unpool(op_info, device, dtype, requires_grad, **kwargs): unpool_name_to_pool_method_dict = { 'nn.functional.max_unpool1d': torch.nn.functional.max_pool1d, 'nn.functional.max_unpool2d': torch.nn.functional.max_pool2d, 'nn.functional.max_unpool3d': torch.nn.functional.max_pool3d } unpool_name_to_dim = { 'nn.functional.max_unpool1d': 1, 'nn.functional.max_unpool2d': 2, 'nn.functional.max_unpool3d': 3 } unpool_to_pool_name_dict = {k: f'nn.functional.{v.__name__}' for k, v in unpool_name_to_pool_method_dict.items()} pool_dim = unpool_name_to_dim[op_info.name] pool_method = unpool_name_to_pool_method_dict[op_info.name] pool_op_info = copy.copy(op_info) pool_op_info.name = unpool_to_pool_name_dict[op_info.name] for sample in sample_inputs_max_pool(pool_op_info, device, dtype, requires_grad, **kwargs): # shapes (C, ...) do not work as of now, # see https://github.com/pytorch/pytorch/issues/68337 # TODO: remove once the issue is resolved if sample.input.dim() != pool_dim + 2: continue # No dilation > 1 for max_unpool, # see https://github.com/pytorch/pytorch/issues/68420 if sample.kwargs['dilation'] != 1: continue # Can't unpool without indices if sample.kwargs['return_indices']: pool, indices = pool_method(sample.input, **sample.kwargs) # arg has to be a leaf arg = pool.detach().requires_grad_(requires_grad) sample_kwargs = { 'kernel_size': sample.kwargs['kernel_size'], 'stride': sample.kwargs['stride'], 'padding': sample.kwargs['padding'], # output_size could be None but we specify it explicitly # to compensate for the information lose in pool due # to the floor/ceil operation used to compute the shapes 'output_size': sample.input.size() } yield SampleInput(arg, args=(indices,), kwargs=sample_kwargs) def sample_inputs_max_unpool_grad(op_info, device, dtype, requires_grad, **kwargs): for sample in sample_inputs_max_unpool(op_info, device, dtype, requires_grad, **kwargs): indices = sample.args[0] # The samples for max_unpool are generated with max_pool. # It could be that a single element from the max_pool's # input is mapped to several locations in its output. # This situation leads to failed gradchecks because # the finite difference algorithm perturbs the elements # of the output one by one, and not in classes of # equivalences determined by whether two elements # in the output are coming from the same location in the # input (simply put, they have the same corresponding index). # So, there are two ways to resolve this issue: # 1. Extract a perturbation for one element and apply it all # the elements from the same equivalence class, or # 2. Make sure that the equivalence classes are all singletons, # i.e. the index tensor has to be comprised of only unique # indices. # Here we go with the solution 2, the easiest of all. if indices.unique().numel() == indices.numel(): yield sample def sample_inputs_multi_head_attention_forward(opinfo, device, dtype, requires_grad, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) if requires_grad: # backward tests would take too long to complete, causing the job timeout. bsz = 2 is_batcheds = (True,) use_separate_proj_weights = (False,) emb_sizes = (2,) src_lens = (XS,) tgt_lens = (XS,) heads = (2,) dropouts = (0.5,) mask_types = ("2d",) else: bsz = 2 is_batcheds = (False, True) use_separate_proj_weights = (False, True) emb_sizes = (2, 4) src_lens = (XS,) tgt_lens = (XS, S) heads = (1, 2) dropouts = (0.0, 0.5) mask_types = (None, "2d", "3d") for is_batched, use_separate_proj_weight, mask_type, emb_size, src_len, tgt_len, num_heads, dropout_p in itertools.product( is_batcheds, use_separate_proj_weights, mask_types, emb_sizes, src_lens, tgt_lens, heads, dropouts ): attn_mask = None if mask_type == "2d": attn_mask = make_input(src_len, tgt_len) elif mask_type == "3d": attn_mask = make_input((bsz if is_batched else 1) * num_heads, src_len, tgt_len) if is_batched: q = make_input(src_len, bsz, emb_size) k = make_input(tgt_len, bsz, emb_size) v = make_input(tgt_len, bsz, emb_size) else: q = make_input(src_len, emb_size) k = make_input(tgt_len, emb_size) v = make_input(tgt_len, emb_size) if use_separate_proj_weight: in_proj_weight = None q_proj_weight = make_input(emb_size, emb_size) k_proj_weight = make_input(emb_size, emb_size) v_proj_weight = make_input(emb_size, emb_size) else: in_proj_weight = make_input(emb_size * 3, emb_size) q_proj_weight = None k_proj_weight = None v_proj_weight = None bias_k = make_input(emb_size) bias_v = make_input(emb_size) in_proj_bias = make_input(emb_size * 3) out_proj_weight = make_input(emb_size, emb_size) out_proj_bias = make_input(emb_size) sample_args = ( k, v, emb_size, num_heads, in_proj_weight, in_proj_bias, bias_k, bias_v, False, dropout_p, out_proj_weight, out_proj_bias ) sample_kwargs = { "q_proj_weight" : q_proj_weight, "k_proj_weight" : k_proj_weight, "v_proj_weight" : v_proj_weight, "attn_mask" : attn_mask, "training" : True if dropout_p > 0.0 else False, "use_separate_proj_weight" : use_separate_proj_weight } yield SampleInput(q, args=sample_args, kwargs=sample_kwargs) # Includes some values such that N * N won't be a multiple of 4, # which should ensure we test the vectorized and non-vectorized # kernel code paths. NUM_SIZE0_TENSORS = 10000 foreach_num_tensors = [20, 23] if not TEST_WITH_SLOW else [23, 30, 300] _foreach_inputs_default_kwargs = {"noncontiguous": False, "same_size": False, "low": None, "high": None} class ForeachRightmostArgType(enum.Enum): TensorList = enum.auto() ScalarList = enum.auto() Scalar = enum.auto() Tensor = enum.auto() class ForeachSampleInput(SampleInput): # For TensorList <op> Scalar/Tensor, we compute the reference # by converting it into TensorList <op> ScalarList/TensorList and # then converting into multiple Tensor <op> Scalar/Tensor. # ref_args contains the args converted to TensorList <op> ScalarList/TensorList ref_args: Any disable_fastpath: bool def __init__(self, *args, disable_fastpath=False, ref_args=None, **kwargs): super().__init__(*args, **kwargs) self.ref_args = ref_args or self.args self.disable_fastpath = disable_fastpath class foreach_inputs_sample_func: def __init__( self, arity: int, rightmost_supports_scalar: bool, rightmost_supports_scalarlist: bool, rightmost_supports_tensor: bool = False, ) -> None: self.arity = arity self._set_rightmost_arg_types( rightmost_supports_scalar, rightmost_supports_scalarlist, rightmost_supports_tensor, ) self._intersperse_empty = (True, False) def _set_rightmost_arg_types( self, rightmost_supports_scalar: bool, rightmost_supports_scalarlist: bool, rightmost_supports_tensor: bool, ) -> None: self._rightmost_arg_types = [ForeachRightmostArgType.TensorList] if self.arity > 1: if rightmost_supports_scalar: self._rightmost_arg_types.append(ForeachRightmostArgType.Scalar) if rightmost_supports_scalarlist: self._rightmost_arg_types.append(ForeachRightmostArgType.ScalarList) if rightmost_supports_tensor: self._rightmost_arg_types.append(ForeachRightmostArgType.Tensor) def _sample_rightmost_arg( self, opinfo, rightmost_arg_type, device, dtype, num_tensors, allow_higher_dtype_scalars, **_foreach_inputs_kwargs, ): if rightmost_arg_type == ForeachRightmostArgType.TensorList: return [sample_inputs_foreach(None, device, dtype, num_tensors, **_foreach_inputs_kwargs)] if rightmost_arg_type == ForeachRightmostArgType.Tensor: return [make_tensor( (), device=device, dtype=dtype, noncontiguous=_foreach_inputs_kwargs["noncontiguous"], requires_grad=_foreach_inputs_kwargs.get("requires_grad", False), )] should_use_simpler_scalars = opinfo.name == "_foreach_pow" and dtype in (torch.float16, torch.bfloat16) def sample_float(): s = random.random() if should_use_simpler_scalars: return 1.0 if s > 0.5 else 2.0 else: return 1.0 - s high = 2 if should_use_simpler_scalars else 9 if rightmost_arg_type == ForeachRightmostArgType.ScalarList: scalarlist_list = [] scalarlist_list.append([random.randint(0, high) + 1 for _ in range(num_tensors)]) if allow_higher_dtype_scalars or dtype.is_floating_point: scalarlist_list.append([sample_float() for _ in range(num_tensors)]) if allow_higher_dtype_scalars or dtype.is_complex: scalarlist_list.append([complex(sample_float(), sample_float()) for _ in range(num_tensors)]) scalarlist_list.append([1, 2.0, 3.0 + 4.5j] + [3.0 for _ in range(num_tensors - 3)]) scalarlist_list.append([True, 1, 2.0, 3.0 + 4.5j] + [3.0 for _ in range(num_tensors - 4)]) return scalarlist_list if rightmost_arg_type == ForeachRightmostArgType.Scalar: scalars = [] scalars.append(random.randint(1, high + 1)) if allow_higher_dtype_scalars or dtype.is_floating_point: scalars.append(sample_float()) if allow_higher_dtype_scalars or dtype.is_complex: scalars.append(complex(sample_float(), sample_float())) scalars.append(True) return scalars raise AssertionError(f"Invalid rightmost_arg_type of {rightmost_arg_type}") def _should_disable_fastpath(self, opinfo, rightmost_arg, rightmost_arg_type, dtype): if self.arity == 1: if "foreach_abs" in opinfo.name and dtype in complex_types(): return True # unary if opinfo.ref in (torch.abs, torch.neg): return False if opinfo.ref_inplace in (torch.Tensor.zero_,): return False return dtype in integral_types_and(torch.bool) if self.arity < 2 or rightmost_arg_type == ForeachRightmostArgType.Tensor: return None if "foreach_pow" in opinfo.name and dtype in integral_types_and(torch.bool): return True if any( foreach_name in opinfo.name for foreach_name in ("foreach_clamp_max", "foreach_clamp_min", "foreach_maximum", "foreach_minimum") ) and dtype in integral_types_and(torch.bool): return True if rightmost_arg_type == ForeachRightmostArgType.TensorList: disable_fastpath = "foreach_div" in opinfo.name and dtype in integral_types_and(torch.bool) if "foreach_add" in opinfo.name and dtype == torch.bool: disable_fastpath = True return disable_fastpath elif rightmost_arg_type == ForeachRightmostArgType.Scalar: disable_fastpath = "foreach_div" in opinfo.name and dtype in integral_types_and(torch.bool) if isinstance(rightmost_arg, bool): disable_fastpath |= dtype == torch.bool if opinfo.ref in (torch.add, torch.mul): disable_fastpath = False elif isinstance(rightmost_arg, int): disable_fastpath |= dtype == torch.bool elif isinstance(rightmost_arg, float): disable_fastpath |= dtype in integral_types_and(torch.bool) elif isinstance(rightmost_arg, complex): disable_fastpath |= dtype not in complex_types() else: raise AssertionError(f"Invalid scalar of type {rightmost_arg_type} - {rightmost_arg}") return disable_fastpath elif rightmost_arg_type == ForeachRightmostArgType.ScalarList: disable_fastpath = opinfo.ref == torch.div and dtype in integral_types_and(torch.bool) elmt_t = type(rightmost_arg[0]) has_same_type = all(isinstance(v, elmt_t) for v in rightmost_arg) if not has_same_type: return dtype not in complex_types() if isinstance(rightmost_arg[0], bool): if ("foreach_add" in opinfo.name or "foreach_mul" in opinfo.name) and dtype == torch.bool: disable_fastpath = False elif isinstance(rightmost_arg[0], int): disable_fastpath |= dtype == torch.bool elif isinstance(rightmost_arg[0], float): disable_fastpath |= dtype in integral_types_and(torch.bool) elif isinstance(rightmost_arg[0], complex): disable_fastpath |= dtype not in complex_types() else: raise AssertionError(f"Invalid scalarlist of {rightmost_arg}") return disable_fastpath else: raise AssertionError(f"Invalid rightmost_arg_type of {rightmost_arg_type}") def _sample_kwargs(self, opinfo, rightmost_arg, rightmost_arg_type, dtype): kwargs = {} if rightmost_arg_type == ForeachRightmostArgType.TensorList and opinfo.supports_alpha_param: if dtype in integral_types_and(torch.bool): kwargs["alpha"] = 3 elif dtype.is_complex: kwargs["alpha"] = complex(3, 3) else: kwargs["alpha"] = 3.14 if self.arity > 1: kwargs["disable_fastpath"] = self._should_disable_fastpath(opinfo, rightmost_arg, rightmost_arg_type, dtype) return kwargs def sample_zero_size_tensor_inputs(self, opinfo, device, dtype, requires_grad, **kwargs): assert "num_input_tensors" not in kwargs _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} _foreach_inputs_kwargs["requires_grad"] = requires_grad allow_higher_dtype_scalars = kwargs.pop("allow_higher_dtype_scalars", False) for _rightmost_arg_type in self._rightmost_arg_types: zero_size_foreach_inputs_kwargs = copy.deepcopy(_foreach_inputs_kwargs) zero_size_foreach_inputs_kwargs["zero_size"] = True input = sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, **zero_size_foreach_inputs_kwargs) if self.arity > 1: args = [ sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, **zero_size_foreach_inputs_kwargs) for _ in range(self.arity - 2) ] args.append( self._sample_rightmost_arg( opinfo, ForeachRightmostArgType.TensorList, device, dtype, NUM_SIZE0_TENSORS, allow_higher_dtype_scalars=allow_higher_dtype_scalars, **zero_size_foreach_inputs_kwargs, )[0]) kwargs = self._sample_kwargs( opinfo, args[-1], ForeachRightmostArgType.TensorList, dtype) else: args = [] kwargs = {} if opinfo.ref in (torch.abs, torch.neg): kwargs["disable_fastpath"] = False else: kwargs["disable_fastpath"] = dtype in integral_types_and(torch.bool) yield ForeachSampleInput(input, *args, **kwargs) def __call__(self, opinfo, device, dtype, requires_grad, **kwargs): num_input_tensors_specified = "num_input_tensors" in kwargs num_input_tensors = kwargs.pop("num_input_tensors") if num_input_tensors_specified else foreach_num_tensors assert isinstance(num_input_tensors, list) _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} _foreach_inputs_kwargs["requires_grad"] = requires_grad _foreach_inputs_kwargs["zero_size"] = False allow_higher_dtype_scalars = kwargs.pop("allow_higher_dtype_scalars", False) # add empty tensor interspersion to test fully fixing #100701 for num_tensors, rightmost_arg_type, intersperse_empty_tensors in itertools.product( num_input_tensors, self._rightmost_arg_types, self._intersperse_empty): if intersperse_empty_tensors and (num_tensors != max(num_input_tensors) or str(device) == 'cpu'): # generate interspersed empty tensors for only 1 N on non-cpu device to lessen redundancy continue _foreach_inputs_kwargs["intersperse_empty_tensors"] = intersperse_empty_tensors input = sample_inputs_foreach( None, device, dtype, num_tensors, **_foreach_inputs_kwargs) args = [] if self.arity > 1: args = [ sample_inputs_foreach( None, device, dtype, num_tensors, **_foreach_inputs_kwargs) for _ in range(self.arity - 2) ] rightmost_arg_list = self._sample_rightmost_arg( opinfo, rightmost_arg_type, device, dtype, num_tensors, allow_higher_dtype_scalars, **_foreach_inputs_kwargs) for rightmost_arg in rightmost_arg_list: args.append(rightmost_arg) kwargs = self._sample_kwargs(opinfo, rightmost_arg, rightmost_arg_type, dtype) ref_args = args if rightmost_arg_type in (ForeachRightmostArgType.Scalar, ForeachRightmostArgType.Tensor): ref_args = args[:-1] + [[args[-1] for _ in range(num_tensors)]] sample = ForeachSampleInput(input, *args, ref_args=ref_args, **kwargs) yield sample args.pop() else: yield ForeachSampleInput( input, *args, disable_fastpath=self._should_disable_fastpath(opinfo, None, None, dtype), ) class foreach_max_sample_func(foreach_inputs_sample_func): def __init__( self, arity: int, rightmost_supports_scalar: bool, rightmost_supports_scalarlist: bool, rightmost_supports_tensor: bool = False, ) -> None: super().__init__(arity, rightmost_supports_scalar, rightmost_supports_scalarlist, rightmost_supports_tensor) self._intersperse_empty = (False,) def sample_zero_size_tensor_inputs(self, opinfo, device, dtype, requires_grad, **kwargs): return [] def _should_disable_fastpath(self, opinfo, rightmost_arg, rightmost_arg_type, dtype): return False class foreach_norm_sample_func(foreach_inputs_sample_func): def sample_zero_size_tensor_inputs(self, opinfo, device, dtype, requires_grad, **kwargs): assert "num_input_tensors" not in kwargs _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} _foreach_inputs_kwargs["requires_grad"] = requires_grad for ord in (0, 1, 2, -1, -2, float('inf'), float('-inf')): input = sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, zero_size=True, **_foreach_inputs_kwargs) disable_fastpath = True if ord in (1, 2, float('inf')) and dtype in floating_types_and(torch.half, torch.bfloat16): disable_fastpath = False yield ForeachSampleInput(input, ord=ord, disable_fastpath=disable_fastpath) def __call__(self, opinfo, device, dtype, requires_grad, **kwargs): num_input_tensors = kwargs.pop("num_input_tensors", foreach_num_tensors) assert isinstance(num_input_tensors, list) _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} _foreach_inputs_kwargs["requires_grad"] = requires_grad _allow_higher_dtype_scalars = kwargs.pop("allow_higher_dtype_scalars", False) for num_tensors, ord, out_dtype, intersperse_empty_tensors in product( num_input_tensors, (0, 1, 2, -1, -2, float('inf'), float('-inf')), (None,) + (torch.complex128,) if dtype in complex_types() else (torch.float64,), (True, False), ): # inf norm and negative norms on empty tensors is not supported by our reference func vector norm: # linalg.vector_norm cannot compute the inf norm on an empty tensor because the operation does not have an identity if (ord in [float('inf'), float('-inf')] or ord < 0) and intersperse_empty_tensors: continue _foreach_inputs_kwargs["intersperse_empty_tensors"] = intersperse_empty_tensors input = sample_inputs_foreach(None, device, dtype, num_tensors, zero_size=False, **_foreach_inputs_kwargs) disable_fastpath = True if ord in (1, 2, float('inf')) and dtype in floating_types_and(torch.half, torch.bfloat16): disable_fastpath = False yield ForeachSampleInput(input, ord=ord, disable_fastpath=disable_fastpath, dtype=out_dtype) # Also test nan propagation with a single tensor, but skip autograd testing if not requires_grad: nan_inputs = [ [float('nan')], [float('nan'), 1.0], [1.0, float('nan')], [1.0, 2.0, 3.0, float('nan'), float('nan'), 7.0, float('nan'), float('nan'), -1.5, 6.0], [7.0, 3.0, float('nan'), float('nan'), -1.5, 6.0], [3.0, float('nan'), float('nan'), -1.5, 6.0], ] for input in nan_inputs: x = torch.tensor(input, device=device) disable_fastpath = True if ord in (1, 2, float('inf')) and dtype in floating_types_and(torch.half, torch.bfloat16): disable_fastpath = False yield ForeachSampleInput([x], ord=ord, disable_fastpath=disable_fastpath) class foreach_pointwise_sample_func(foreach_inputs_sample_func): def __init__( self, arity: int = 3, rightmost_supports_scalar: bool = False, rightmost_supports_scalarlist: bool = False, ): super().__init__(arity, rightmost_supports_scalar, rightmost_supports_scalarlist) def _should_disable_fastpath(self, opinfo, rightmost_arg, rightmost_arg_type, dtype): return dtype in integral_types_and(torch.bool) and opinfo.ref in (torch.addcmul,) def sample_zero_size_tensor_inputs(self, opinfo, device, dtype, requires_grad, **kwargs): assert "num_input_tensors" not in kwargs _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} _foreach_inputs_kwargs["requires_grad"] = requires_grad # zero_size tensor input = sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, zero_size=True, **_foreach_inputs_kwargs) args = [ sample_inputs_foreach(None, device, dtype, NUM_SIZE0_TENSORS, zero_size=True, **_foreach_inputs_kwargs) for _ in range(2) ] if "scalars" in kwargs: del kwargs["scalars"] kwargs.update(self._sample_kwargs(opinfo, args[-1], ForeachRightmostArgType.TensorList, dtype)) yield ForeachSampleInput(input, *args, **kwargs) def __call__(self, opinfo, device, dtype, requires_grad, **kwargs): num_input_tensors_specified = "num_input_tensors" in kwargs num_input_tensors = kwargs.pop("num_input_tensors") if num_input_tensors_specified else foreach_num_tensors assert isinstance(num_input_tensors, list) _foreach_inputs_kwargs = {k: kwargs.pop(k, v) for k, v in _foreach_inputs_default_kwargs.items()} _foreach_inputs_kwargs["requires_grad"] = requires_grad allow_higher_dtype_scalars = kwargs.pop("allow_higher_dtype_scalars", False) for num_tensors, rightmost_arg_type, intersperse_empty_tensors in itertools.product( num_input_tensors, self._rightmost_arg_types, (True, False)): _foreach_inputs_kwargs["intersperse_empty_tensors"] = intersperse_empty_tensors input = sample_inputs_foreach(None, device, dtype, num_tensors, zero_size=False, **_foreach_inputs_kwargs) args = [ sample_inputs_foreach(None, device, dtype, num_tensors, zero_size=False, **_foreach_inputs_kwargs) for _ in range(2 - int(rightmost_arg_type == ForeachRightmostArgType.TensorList)) ] rightmost_arg_list = self._sample_rightmost_arg( opinfo, rightmost_arg_type, device, dtype, num_tensors, zero_size=False, allow_higher_dtype_scalars=False if intersperse_empty_tensors else allow_higher_dtype_scalars, **_foreach_inputs_kwargs, ) for rightmost_arg in rightmost_arg_list: kwargs = {} if rightmost_arg_type == ForeachRightmostArgType.TensorList: args.append(rightmost_arg) elif rightmost_arg_type in [ForeachRightmostArgType.Tensor, ForeachRightmostArgType.ScalarList]: kwargs["scalars"] = rightmost_arg else: kwargs["value"] = rightmost_arg kwargs.update(self._sample_kwargs(opinfo, rightmost_arg, rightmost_arg_type, dtype)) assert len(args) == 2, f"{len(args)=}" sample = ForeachSampleInput(input, *args, **kwargs) yield sample if rightmost_arg_type == ForeachRightmostArgType.TensorList: args.pop() foreach_unary_op_db: list[OpInfo] = [ ForeachFuncInfo( 'exp', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32), backward_requires_result=True, supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool,), ), ), ), ForeachFuncInfo( 'acos', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool,), ), ), ), ForeachFuncInfo( 'asin', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool,), ), ), ), ForeachFuncInfo( 'atan', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool,), ), ), ), ForeachFuncInfo( 'cos', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool,), ), ), ), ForeachFuncInfo( 'cosh', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool,), ), ), ), ForeachFuncInfo( 'log', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool,), ), ), ), ForeachFuncInfo( 'log10', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool,), ), ), ), ForeachFuncInfo( 'log2', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool,), ), ), ), ForeachFuncInfo( 'tan', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), backward_requires_result=True, supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( # due to https://github.com/pytorch/pytorch/pull/102427 enabling jiterator for complex DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( toleranceOverride( { torch.complex64: tol(atol=3e-04, rtol=2e-05) } ), 'TestForeach', 'test_parity', device_type='cuda' ), ), ), ForeachFuncInfo( 'tanh', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), backward_requires_result=True, supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( toleranceOverride( {torch.complex64: tol(atol=5e-03, rtol=1e-04)} ), 'TestForeach', 'test_parity', device_type='cuda' ), ), ), ForeachFuncInfo( 'sin', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool,), ), ), ), ForeachFuncInfo( 'sinh', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool), ), ), ), ForeachFuncInfo( 'neg', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_unary_op_tensors_on_different_devices", device_type="cuda", dtypes=(torch.bool,), ), ), ), ForeachFuncInfo( 'sqrt', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, backward_requires_result=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool), ), ), ), ForeachFuncInfo( 'rsqrt', sample_inputs_func=foreach_inputs_sample_func(1, False, False), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, backward_requires_result=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool), ), ), ), ForeachFuncInfo( 'ceil', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_autodiff", device_type="cuda", dtypes=(torch.complex128,), ), ), ), ForeachFuncInfo( 'erf', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool) + complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool) + complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool) + complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_autodiff", device_type="cuda", dtypes=(torch.complex128,), ), ), ), ForeachFuncInfo( 'erfc', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool) + complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool) + complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool) + complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_autodiff", device_type="cuda", dtypes=(torch.complex128,), ), ), ), ForeachFuncInfo( 'expm1', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, backward_requires_result=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool), ), ), ), ForeachFuncInfo( 'floor', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_autodiff", device_type="cuda", dtypes=(torch.complex128,), ), ), ), ForeachFuncInfo( 'log1p', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool), ), ), ), ForeachFuncInfo( 'round', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_autodiff", device_type="cuda", dtypes=(torch.complex128,), ), ), ), ForeachFuncInfo( 'frac', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool) + complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=integral_types_and(torch.bool) + complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool) + complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=integral_types_and(torch.bool) + complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool) + complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=integral_types_and(torch.bool) + complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_autodiff", device_type="cuda", dtypes=(torch.complex128,), ), ), ), ForeachFuncInfo( 'reciprocal', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, backward_requires_result=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool), ), ), ), ForeachFuncInfo( 'sigmoid', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, backward_requires_result=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool), ), ), ), ForeachFuncInfo( 'trunc', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=complex_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_autodiff", device_type="cuda", dtypes=(torch.complex128,), ), ), ), ForeachFuncInfo( 'abs', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", device_type="cpu", dtypes=(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", device_type="cpu", dtypes=(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", device_type="cpu", dtypes=(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", device_type="cpu", dtypes=(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_outplace", device_type="cpu", dtypes=(torch.bool,), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", device_type="cpu", dtypes=(torch.bool,), ), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=complex_types()), ), ), ForeachFuncInfo( 'zero', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, supports_out=False, ), ForeachFuncInfo( 'sign', sample_inputs_func=foreach_inputs_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_autodiff", device_type="cuda", dtypes=(torch.complex128,), ), ), ), ForeachFuncInfo( 'lgamma', sample_inputs_func=foreach_inputs_sample_func(1, False, False), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool)), # DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta", # "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool)), DecorateInfo(unittest.skip("In-place lgamma not supported for integral tensors"), "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool)), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=complex_types() + integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=complex_types() + integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=complex_types() + integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=complex_types(), ), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_autodiff", device_type="cuda", dtypes=(torch.complex128,), ), ), ), ] foreach_binary_op_db: list[OpInfo] = [ ForeachFuncInfo( "add", sample_inputs_func=foreach_inputs_sample_func(2, True, True, True), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16, torch.int32), supports_alpha_param=True, supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( # These tests fail with aten._local_scalar_dense not being implemented. DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)), # Samples have complex types and inplace only works if the dtype is complex. DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", dtypes=integral_types() + complex_types_and(torch.bool, torch.bfloat16, torch.float16, torch.float64)), ), ), ForeachFuncInfo( "sub", sample_inputs_func=foreach_inputs_sample_func(2, True, True), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_alpha_param=True, supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace"), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace"), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace"), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), DecorateInfo(unittest.skip("consistently fails internally and causes other tests to appear flaky"), "TestForeach", "test_parity", dtypes=(torch.complex128,), active_if=lambda kwargs: IS_FBCODE and not kwargs["noncontiguous"]), ), ), ForeachFuncInfo( "mul", sample_inputs_func=foreach_inputs_sample_func(2, True, True, True), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( # Samples have complex types and inplace only works if the dtype is complex. DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", dtypes=(torch.bool,)), DecorateInfo(unittest.skip("consistently fails internally and causes other tests to appear flaky"), "TestForeach", "test_parity", dtypes=(torch.complex128,), active_if=lambda kwargs: IS_FBCODE and not kwargs["noncontiguous"]), ), ), ForeachFuncInfo( "div", sample_inputs_func=foreach_inputs_sample_func(2, True, True, True), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16, torch.int32, torch.int8), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( # Samples have complex types and inplace only works if the dtype is complex. DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", dtypes=integral_types_and(torch.bool)), ), ), ForeachFuncInfo( "clamp_min", sample_inputs_func=foreach_inputs_sample_func(2, True, True), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16, torch.int64, torch.int32, torch.int8, torch.bool), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", dtypes=complex_types_and(torch.bool)), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_autodiff", device_type="cuda", dtypes=(torch.complex128,), ), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_binary_op_scalar_with_overlapping_tensors", dtypes=complex_types(), ), ), ), ForeachFuncInfo( "clamp_max", sample_inputs_func=foreach_inputs_sample_func(2, True, True), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16, torch.int64, torch.int32, torch.int8, torch.bool), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", dtypes=complex_types_and(torch.bool)), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_autodiff", device_type="cuda", dtypes=(torch.complex128,), ), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_binary_op_scalar_with_overlapping_tensors", dtypes=complex_types(), ), ), ), # note(crcrpar): forward ad not implemented. ForeachFuncInfo( "minimum", sample_inputs_func=foreach_inputs_sample_func(2, True, True), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), supports_autograd=True, supports_inplace_autograd=False, supports_forward_ad=False, decorators=( DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", dtypes=complex_types_and(torch.bool)), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_autodiff", device_type="cuda", dtypes=(torch.complex128,), ), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_binary_op_scalar_with_overlapping_tensors", dtypes=complex_types(), ), ), ), # note(crcrpar): forward ad not implemented. ForeachFuncInfo( "maximum", sample_inputs_func=foreach_inputs_sample_func(2, True, True), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), supports_autograd=True, supports_forward_ad=False, supports_inplace_autograd=False, decorators=( DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", dtypes=complex_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", dtypes=complex_types_and(torch.bool)), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_autodiff", device_type="cuda", dtypes=(torch.complex128,), ), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_binary_op_scalar_with_overlapping_tensors", dtypes=complex_types(), ), ), ), ForeachFuncInfo( "pow", supports_alpha_param=False, supports_scalar_self_arg=True, sample_inputs_func=foreach_inputs_sample_func(2, True, True), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16, torch.int32, torch.int8, torch.bool), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=(torch.bool,),), DecorateInfo(unittest.skip("flaky"), "TestForeach", "test_parity", device_type="cpu", dtypes=(torch.complex64,)), DecorateInfo( unittest.skip("failed starting on ROCm 6.2"), "TestForeach", "test_parity", device_type="cuda", dtypes=(torch.complex64,), active_if=TEST_WITH_ROCM), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_binary_op_with_scalar_self_support", device_type="cuda", dtypes=(torch.bool,), active_if=lambda kwargs: kwargs["is_fastpath"], ), ), backward_requires_result=True, ), ForeachFuncInfo( "copy", sample_inputs_func=foreach_inputs_sample_func(2, False, False), supports_out=False, supports_forward_ad=False, supports_autograd=False, supports_inplace_autograd=False, ) ] foreach_pointwise_op_db: list[ForeachFuncInfo] = [ ForeachFuncInfo( "addcmul", sample_inputs_func=foreach_pointwise_sample_func(4, True, True), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", dtypes=(torch.bool,)), # # Samples have complex types and inplace only works if the dtype is complex. DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=(torch.bool,)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", dtypes=integral_types() + complex_types_and(torch.bool)), ), ), ForeachFuncInfo( "addcdiv", sample_inputs_func=foreach_pointwise_sample_func(4, True, True), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( # Samples have complex types and inplace only works if the dtype is complex. DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", dtypes=integral_types() + complex_types_and(torch.bool)), # fails with div_cpu is not implemented with ComplexHalf DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=integral_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=integral_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=integral_types_and(torch.bool)), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", dtypes=integral_types() + complex_types_and(torch.bool)), ), ), ] foreach_reduce_op_db: list[ForeachFuncInfo] = [ ForeachFuncInfo( "max", sample_inputs_func=foreach_max_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( # no complex support for ordering ops like max DecorateInfo( unittest.expectedFailure, "TestForeach", "test_autodiff", dtypes=(torch.complex128, torch.complex64), ), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_foreach_reduce_large_input", dtypes=(torch.complex128, torch.complex64), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=(torch.complex128, torch.complex64), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=(torch.complex128, torch.complex64), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=(torch.complex128, torch.complex64), ), ), ), ForeachFuncInfo( "norm", sample_inputs_func=foreach_norm_sample_func(1, False, False), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace"), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace"), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_meta_inplace"), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides"), DecorateInfo( unittest.expectedFailure, "TestForeach", "test_foreach_reduce_large_input", device_type="cuda", dtypes=integral_types_and(torch.bool), ), ), ), ] foreach_other_op_db: list[ForeachFuncInfo] = [ ForeachFuncInfo( "lerp", sample_inputs_func=foreach_inputs_sample_func(3, True, True), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_autograd=True, supports_inplace_autograd=True, supports_forward_ad=True, decorators=( DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_inplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_meta_outplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_inplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_meta_outplace", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_inplace_all_strides", dtypes=integral_types_and(torch.bool), ), DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides", dtypes=integral_types_and(torch.bool), ), ), ), ] def reference_sign(x): if x.dtype == np.bool_: # `np.sign` doesn't support `bool`. # >>> np.sign(True) # ufunc 'sign' did not contain a loop # with signature matching types dtype('bool') -> dtype('bool') return np.sign(x, dtype=np.uint8).astype(np.bool_) return np.sign(x) def reference_sgn(x): # NumPy doesn't have an equivalent to `torch.sgn` when the dtype is complex. # For complex inputs, `np.sign` returns sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j. # while `torch.sgn` returns, 0 if abs(input) == 0 else input/abs(input) if x.dtype not in [np.complex64, np.complex128]: return reference_sign(x) out = (x / np.abs(x)) if out.ndim == 0: # Handle x == 0 case if (x == 0): # Can't assign to np.complex object # So make a new one. return np.array(complex(0, 0), dtype=x.dtype) return out # Handle x == 0 case mask = (x == 0) out[mask] = complex(0, 0) return out def reference_sigmoid(x): # 'scipy.special.expit' not supported for the input types if x.dtype in [np.complex64, np.complex128]: return (1 / (1 + np.exp(-x))) return scipy.special.expit(x) def reference_logsigmoid(x): return np.where( x < 0, x - np.log1p(np.exp(x)), -np.log1p(np.exp(-x))) def reference_hardsigmoid(x): intermediate = x / 6 + 0.5 y = np.clip(intermediate, 0, None) return np.where(y > 1, 1, y).astype(x.dtype) def reference_lgamma(x): # scipy.special.gammaln returns `-inf` when input is `-inf`. # While Pytorch, C and C++, all return `inf` when input is `-inf`. # Reference: # https://en.cppreference.com/w/cpp/numeric/math/lgamma # https://en.cppreference.com/w/c/numeric/math/lgamma # To handle the above discrepancy, # we replace -inf with inf so values # that were originally -inf map to inf as expected if x.dtype.kind == 'f': x = np.where(x == float('-inf'), np.array(float('inf'), dtype=x.dtype), x) out = scipy.special.gammaln(x) if x.dtype == np.float16: # `scipy.special.gammaln` returns output of float32 when input is float16, # while `torch.lgamma` preserves `float16`. But due to smaller range of float16, # Pytorch version outputs `inf` while SciPy returns finite values. out = out.astype(np.float16) return out def reference_mvlgamma(x, d): if x.dtype == np.float16: return scipy.special.multigammaln(x, d).astype(np.float16) return scipy.special.multigammaln(x, d) def reference_softplus(input, beta=1, threshold=20): non_linear = input * beta <= threshold output = input.copy() output[non_linear] = np.log(1 + np.exp(beta * input[non_linear])) / beta return output def reference_gelu(X, *, approximate='none'): def _gelu_ref(X): return X * stats.norm.cdf(X) def _tanh_gelu_ref(X): M_SQRT_2_PI = math.sqrt(2 / math.pi) Z = M_SQRT_2_PI * (X + 0.044715 * np.power(X, 3.0)) return 0.5 * X * (1.0 + np.tanh(Z)) if approximate == 'tanh': return _tanh_gelu_ref(X) else: return _gelu_ref(X) def reference_one_hot(a: npt.NDArray, num_classes: int = -1) -> npt.NDArray: if num_classes == -1: num_classes = int(np.amax(a) + 1) idcs = a.reshape(-1) + np.arange(0, a.size, dtype=np.int64) * num_classes one_hot = np.zeros((a.size, num_classes), dtype=a.dtype) np.put(one_hot, idcs, 1) return one_hot.reshape(*a.shape, -1) def reference_mse_loss(input, target, reduction="mean"): se = (input - target) ** 2 if reduction == "mean": return np.mean(se) elif reduction == "sum": return np.sum(se) else: # reduction == "none" return se def reference_layer_norm(inp: npt.NDArray, normalized_shape: tuple[int], weight=None, bias=None, eps=1e-5): return reference_native_layer_norm(inp, normalized_shape, weight, bias, eps)[0] def reference_native_layer_norm(inp: npt.NDArray, normalized_shape: tuple[int], weight, bias, eps): feature_size = np.prod(normalized_shape) inp_view = inp.reshape(-1, feature_size) # type: ignore[call-overload] mean = inp_view.mean(axis=-1, keepdims=True) var = inp_view.var(axis=-1, ddof=0, keepdims=True) Y = (inp_view - mean) / np.sqrt(var + eps) if weight is None and bias is not None: Y = Y + bias.reshape(-1) elif weight is not None and bias is None: Y = Y * weight.reshape(-1) elif weight is not None and bias is not None: Y = Y * weight.reshape(-1) + bias.reshape(-1) axis = inp.ndim - len(normalized_shape) stat_shape = inp.shape[:axis] + (1,) * len(normalized_shape) return Y.reshape(*inp.shape), mean.reshape(stat_shape), (1.0 / np.sqrt(var + eps)).reshape(stat_shape) def reference_rms_norm(inp: npt.NDArray, normalized_shape: tuple[int], weight=None, eps=None): if eps is None: eps = torch.finfo(numpy_to_torch_dtype(inp.dtype)).eps feature_size = np.prod(normalized_shape) inp_view = inp.reshape(-1, feature_size) # type: ignore[call-overload] rms = np.sqrt((inp_view**2).mean(axis=-1, keepdims=True) + eps) Y = inp_view / rms if weight is not None: Y = Y * weight.reshape(-1) return Y.reshape(*inp.shape) def reference_group_norm(inp: npt.NDArray, num_groups: int, weight=None, bias=None, eps=1e-5): inp_view = inp if np.prod(inp.shape) != 0: inp_view = inp.reshape((inp.shape[0], num_groups, -1)) mean = inp_view.mean(axis=-1, keepdims=True) var = inp_view.var(axis=-1, ddof=0, keepdims=True) Y = (inp_view - mean) / np.sqrt(var + eps) Y = Y.reshape(inp.shape) if weight is not None: # weight is a vector of length equal to the channel if len(Y.shape) > 2: weight = np.expand_dims(weight, [0] + [idx + 2 for idx in range(inp.ndim - 2)]) Y = Y * weight if bias is not None: # bias is a vector of length equal to the channel if len(Y.shape) > 2: bias = np.expand_dims(bias, [0] + [idx + 2 for idx in range(inp.ndim - 2)]) Y = Y + bias return Y # using a custom reference function since numpy only has a string side arg (instead of right and side) and doesn't # have an out_int32 arg. Additionally, numpy doesn't support searchsorted with ND arrays, so this splits those into # stacked 1D cases def reference_searchsorted(sorted_sequence, boundary, out_int32=False, right=False, side='left', sorter=None): side = 'right' if (right or side == 'right') else 'left' if len(sorted_sequence.shape) == 1 : ret = np.searchsorted(sorted_sequence, boundary, side=side, sorter=sorter) return ret.astype(np.int32) if out_int32 else ret elif sorted_sequence.shape[0] == 0: if sorter is not None: sorter = sorter.flatten() ret = np.searchsorted(sorted_sequence.flatten(), boundary.flatten(), side=side, sorter=sorter) ret = ret.astype(np.int32) if out_int32 else ret return ret.reshape(boundary.shape) else: # numpy searchsorted only supports 1D inputs so we split up ND inputs orig_shape = boundary.shape num_splits = np.prod(sorted_sequence.shape[:-1]) splits = range(0, num_splits) sorted_sequence, boundary = sorted_sequence.reshape(num_splits, -1), boundary.reshape(num_splits, -1) if sorter is not None: sorter = sorter.reshape(num_splits, -1) split_sequence = [sorted_sequence[i] for i in splits] split_boundary = [boundary[i] for i in splits] split_sorter = [sorter[i] if (sorter is not None) else None for i in splits] split_ret = [np.searchsorted(s_seq, b, side=side, sorter=s_sort) for (s_seq, b, s_sort) in zip(split_sequence, split_boundary, split_sorter)] split_ret = [i.astype(np.int32) for i in split_ret] if out_int32 else split_ret return np.stack(split_ret).reshape(orig_shape) def loss_reference_reduction_wrapper(fn): def wrapper(input, target, *, size_average=None, reduce=None, reduction="mean", **other_kwargs): if size_average is not None or reduce is not None: raise RuntimeError( "The keyword arguments 'size_average' and 'reduce' are deprecated and not supported by this wrapper" ) output = fn(input, target, **other_kwargs) if reduction == "mean": return np.mean(output) elif reduction == "sum": return np.sum(output) else: # reduction == "none" return output return wrapper @loss_reference_reduction_wrapper def reference_smooth_l1_loss(input, target, beta=1.0): diff = input - target abs_diff = np.abs(diff) above_threshold = abs_diff >= beta loss = np.empty_like(input) loss[above_threshold] = abs_diff[above_threshold] - 0.5 * beta loss[~above_threshold] = diff[~above_threshold] ** 2 / (2 * beta) return loss def reference_std_var(f): """Forwards unbiased/correction kwargs as NumPy's equivalent ddof""" g = reference_reduction_numpy(f) @wraps(g) def wrapper(x: npt.NDArray, *args, **kwargs): assert not ('unbiased' in kwargs and 'correction' in kwargs) if 'unbiased' in kwargs: kwargs['ddof'] = int(kwargs.pop('unbiased')) elif 'correction' in kwargs: kwargs['ddof'] = kwargs.pop('correction') return g(x, *args, **kwargs) return wrapper def generate_std_var_kwargs(t: torch.Tensor, **kwargs): """Generates unbiased/correction kwargs for std/var operators""" yield ((), {'unbiased': True}) yield ((), {'unbiased': False}) # Currently, calling std with correction is only enabled when # both dim and keepdim are provided. if 'dim' in kwargs and 'keepdim' in kwargs: yield ((), {'correction': 0}) yield ((), {'correction': 1}) numel = torch.tensor(t.shape)[kwargs.get('dim')].prod() yield ((), {'correction': numel // 2}) def error_inputs_mean(op_info, device, is_ref=False, **kwargs): if is_ref: err_msg1 = (r"mean\(\): could not infer output dtype. " r"Input dtype must be either a floating point or complex dtype. " r"Got: torch.int64") else: err_msg1 = (r"mean\(\): could not infer output dtype. " r"Input dtype must be either a floating point or complex dtype. " r"Got: Long") yield ErrorInput( SampleInput(make_tensor((3, 4, 5), dtype=torch.int64, device=device), []), error_regex=err_msg1, ) if is_ref: err_msg2 = (r"mean\(\): could not infer output dtype. " r"Optional dtype must be either a floating point or complex dtype. " r"Got: torch.int64") else: err_msg2 = (r"mean\(\): could not infer output dtype. " r"Optional dtype must be either a floating point or complex dtype. " r"Got: Long") yield ErrorInput( SampleInput( make_tensor((3, 4, 5), dtype=torch.float32, device=device), [], dtype=torch.int64), error_regex=err_msg2 ) # numpy implementation of torch.flatten # unfortunately there's no np.flatten. we figure out the desired shape and call np.reshape def reference_flatten(input, start_dim=0, end_dim=-1): in_shape = input.shape in_rank = len(in_shape) for d in start_dim, end_dim: if not ((in_rank == 0 and d in (-1, 0)) or -in_rank <= d < in_rank): raise IndexError(f"Dimension out of range (expected to be in range of [{-in_rank}, {in_rank - 1}], but got {d}") end_dim = end_dim if end_dim >= 0 else in_rank + end_dim start_dim = start_dim if start_dim >= 0 else in_rank + start_dim if in_rank == 0: end_dim = start_dim if end_dim < start_dim: raise RuntimeError("flatten() has invalid args: start_dim cannot come after end_dim") flatten_bit_dim = functools.reduce(operator.mul, in_shape[start_dim:end_dim + 1], 1) out_shape = in_shape[:start_dim] + (flatten_bit_dim,) + in_shape[end_dim + 1:] return np.reshape(input, out_shape) def sample_inputs_alias_copy(op_info, device, dtype, requires_grad, **kwargs): yield SampleInput(make_tensor((S,), dtype=dtype, device=device, requires_grad=requires_grad)) yield SampleInput(make_tensor((), dtype=dtype, device=device, requires_grad=requires_grad)) # Operator database (sorted alphabetically) op_db: list[OpInfo] = [ UnaryUfuncInfo('abs', aliases=('absolute', ), ref=np.abs, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), skips=( DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestBwdGradients', 'test_inplace_grad', dtypes=(torch.cdouble,)), DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestBwdGradients', 'test_inplace_gradgrad', dtypes=(torch.cdouble,)), DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), 'TestFwdGradients', 'test_inplace_forward_mode_AD', dtypes=(torch.cdouble,)), DecorateInfo(unittest.skip("In-place abs not supported for complex tensors"), "TestSparseUnaryUfuncs", "test_inplace", dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), # Reference: https://github.com/pytorch/pytorch/issues/49224 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=[torch.int8], active_if=TEST_WITH_ASAN), # TODO: Fix test_out_arg_all_dtypes as torch.empty_like(expected_output) where expected_output=op(input) # We can break the logic of the loop over all possible types but it is OK. # https://github.com/pytorch/pytorch/blob/master/test/test_unary_ufuncs.py#L440-L449 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace', dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_inplace', dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace', dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace_all_strides', dtypes=(torch.cdouble, torch.cfloat, torch.chalf)), ), supports_fwgrad_bwgrad=True, assert_autodiffed=True, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, supports_forward_ad=True), # NOTE: CPU complex acos produces incorrect outputs (https://github.com/pytorch/pytorch/issues/42952) UnaryUfuncInfo('acos', aliases=('arccos', ), ref=np.arccos, domain=(-1, 1), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, decorators=(precisionOverride({torch.float16: 1e-2, torch.bfloat16: 1e-1, torch.complex64: 1e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), # Failing with wrong imaginary sign on at least some Windows jobs DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), # Failing with wrong imaginary sign on at least some Windows jobs DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_method_grad', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_inplace_grad', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_inplace_forward_mode_AD', dtypes=[torch.cdouble], active_if=IS_WINDOWS),)), # NOTE: the derivative for inplace acosh is not implemented UnaryUfuncInfo('acosh', aliases=('arccosh', ), ref=np.arccosh, domain=(1, None), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), decorators=(precisionOverride({torch.bfloat16: 5e-2}),), supports_inplace_autograd=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), # Failing with wrong imaginary sign on at least some Windows jobs DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), ), # acosh is not defined at x < 1 (real) reference_numerics_filter=NumericsFilter( condition=lambda x: (x < 1 if not x.is_complex() else torch.zeros_like(x, dtype=torch.bool)), safe_val=2)), BinaryUfuncInfo('add', # NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate ref=lambda input, other, *, alpha=1: np.add(input, other) if alpha == 1 \ else np.add(input, np.multiply(alpha, other)), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), assert_autodiffed=True, sample_inputs_func=sample_inputs_add_sub, supports_fwgrad_bwgrad=True, supports_forward_ad=True, supports_two_python_scalars=True, decorators=( DecorateInfo( toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), 'TestBinaryUfuncs', 'test_reference_numerics'), ), skips=( # boolean alpha not handled properly DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bool,)), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_numpy_refs', dtypes=(torch.complex128,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', dtypes=(torch.complex64, torch.complex128)), )), OpInfo('item', op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.item, inp, *args, **kwargs), ref=np.ndarray.item, method_variant=None, dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.chalf, torch.bool), dtypesIfHpu=custom_types(torch.float32), supports_out=False, supports_autograd=False, error_inputs_func=error_inputs_item, sample_inputs_func=sample_inputs_item, skips=( # Error testing item function variant DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, torch.complex64)), # FX failed to normalize op - add the op to the op_skip list. DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # RuntimeError: Composite compliance check failed with the above error. DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), # Booleans mismatch: AssertionError: False is not true DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_autocast'), # Booleans mismatch: AssertionError: False is not true DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake'), )), OpInfo('arange', dtypes=all_types_and(torch.bfloat16, torch.float16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8), supports_out=True, supports_autograd=False, is_factory_function=True, error_inputs_func=error_inputs_arange, sample_inputs_func=sample_inputs_arange, skips=( # https://github.com/pytorch/pytorch/issues/81774 DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # Tests that assume input is a tensor or sequence of tensors DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Lazy tensor failures DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'), DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness'), DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness_with_reusing_ir'), # Exception raised from analyzeImpl at ../torch/csrc/jit/ir/alias_analysis.cpp:608 # We don't have an op for aten::arange but it isn't a special case. # Argument types: bool, bool, bool, int, int, Device, boo DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), # Captured graph does not contain aten::arange (succeeds on complex!) # g: graph(): # %25 : Long(1, strides=[1], requires_grad=0, device=cpu) = prim::Constant[value={1}]() # return (%25) DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), )), OpInfo('cauchy', op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.cauchy_, inp, *args, **kwargs), inplace_variant=torch.Tensor.cauchy_, dtypes=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_autograd=False, allow_cow_input_materialize_forward=[0], sample_inputs_func=sample_inputs_cauchy, error_inputs_func=error_inputs_cauchy, skips=( # Tests that assume input tensor has a meaningful effect on output tensor DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # AssertionError: Tensor-likes are not close! DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), # FX failed to normalize op - add the op to the op_skip list. DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # vmap: calling random operator not supported DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), )), OpInfo('exponential', op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.exponential_, inp, *args, **kwargs), inplace_variant=torch.Tensor.exponential_, dtypes=floating_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_out=False, supports_autograd=False, allow_cow_input_materialize_forward=[0], sample_inputs_func=sample_inputs_exponential, error_inputs_func=error_inputs_exponential, skips=( # Tests that assume input tensor has a meaningful effect on output tensor DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # AssertionError: Tensor-likes are not close! DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), # FX failed to normalize op - add the op to the op_skip list. DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # vmap: calling random operator not supported DecorateInfo(unittest.expectedFailure, "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), DecorateInfo(unittest.expectedFailure, "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), )), OpInfo('geometric', op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.geometric_, inp, *args, **kwargs), inplace_variant=torch.Tensor.geometric_, dtypes=floating_types_and(torch.float16, torch.bfloat16, torch.int8, torch.int16, torch.int32, torch.int64, torch.uint8), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_out=False, supports_autograd=False, allow_cow_input_materialize_forward=[0], sample_inputs_func=sample_inputs_geometric, error_inputs_func=error_inputs_geometric, skips=( # Tests that assume input tensor has a meaningful effect on output tensor DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # AssertionError: Tensor-likes are not close! DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), # FX failed to normalize op - add the op to the op_skip list. DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # vmap: calling random operator not supported DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), )), OpInfo('log_normal', op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.log_normal_, inp, *args, **kwargs), inplace_variant=torch.Tensor.log_normal_, dtypes=floating_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_out=False, supports_autograd=False, allow_cow_input_materialize_forward=[0], sample_inputs_func=sample_inputs_log_normal, error_inputs_func=error_inputs_log_normal, skips=( # Tests that assume input tensor has a meaningful effect on output tensor DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # AssertionError: Tensor-likes are not close! DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), # FX failed to normalize op - add the op to the op_skip list. DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # vmap: calling random operator not supported DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), )), OpInfo('normal', variant_test_name='in_place', op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.normal_, inp, *args, **kwargs), inplace_variant=torch.Tensor.normal_, dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_out=False, supports_autograd=False, allow_cow_input_materialize_forward=[0], sample_inputs_func=sample_inputs_normal, error_inputs_func=error_inputs_normal, skips=( # Tests that assume input is a tensor or sequence of tensors DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"), # Tests that assume input tensor has a meaningful effect on output tensor DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # AssertionError: Tensor-likes are not close! DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), # FX failed to normalize op - add the op to the op_skip list. DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # vmap: calling random operator not supported DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), )), OpInfo('uniform', op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.uniform_, inp, *args, **kwargs), method_variant=None, inplace_variant=torch.Tensor.uniform_, dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_out=False, supports_autograd=False, is_factory_function=False, allow_cow_input_materialize_forward=[0], sample_inputs_func=sample_inputs_uniform, error_inputs_func=error_inputs_uniform, skips=( # FX failed to normalize op - add the op to the op_skip list. DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # Tests that assume input tensor has a meaningful effect on output tensor DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # aten.uniform was not decomposed DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), )), BinaryUfuncInfo('clamp_max', ref=_clamp_max_numpy, dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8), supports_forward_ad=True, supports_rhs_python_scalar=False, supports_fwgrad_bwgrad=True, rhs_make_tensor_kwargs=dict(exclude_zero=False), skips=( # RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), # dispatch to lazy test failed DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'), # test error disabled since rhs non-tensor python scalar is supported DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_errors'), )), BinaryUfuncInfo('clamp_min', ref=_clamp_min_numpy, dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8), supports_forward_ad=True, supports_rhs_python_scalar=False, supports_fwgrad_bwgrad=True, rhs_make_tensor_kwargs=dict(exclude_zero=False), skips=( # RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), # dispatch to lazy test failed DecorateInfo(unittest.expectedFailure, 'TestLazyOpInfo', 'test_dispatched_to_lazy'), # test error disabled since rhs non-tensor python scalar is supported DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_errors'), )), BinaryUfuncInfo('mul', aliases=('multiply',), dtypes=all_types_and_complex_and(torch.chalf, torch.float16, torch.bfloat16, torch.bool), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_two_python_scalars=True, error_inputs_sparse_func=error_inputs_sparse_mul, sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_coo), sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_csr), sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_csc), sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_bsr), sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_mul, layout=torch.sparse_bsc)), BinaryUfuncInfo('sub', # NumPy has no builtin reference for the alpha kwarg, but it is easy enough to emulate ref=lambda input, other, *, alpha=1: np.subtract(input, np.multiply(alpha, other)), aliases=('subtract',), dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.chalf), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_add_sub, supports_two_python_scalars=True, decorators=( DecorateInfo( toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0), torch.bfloat16: tol(atol=1e-5, rtol=5e-3), torch.complex32: tol(atol=1e-5, rtol=1e-3)}), 'TestBinaryUfuncs', 'test_reference_numerics'), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), 'TestCommon', 'test_complex_half_reference_testing', device_type='cpu'), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), 'TestDecomp', 'test_comprehensive', device_type='cpu'), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), 'TestDecomp', 'test_quick', device_type='cpu'), ), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics', dtypes=(torch.uint8,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.uint8,)), )), OpInfo('addmm', # This addmm OpInfo is for when alpha and beta are not both equal to 1. # alpha=beta=1 is tested in the following opinfo, because that special case will # trigger addmm being decomposed by a jit pass. dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_addmm, skips=( # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 DecorateInfo( unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', dtypes=(torch.complex64, torch.complex128)), )), OpInfo('addmm', # When alpha=beta=1 as compile-time constants, JIT will decompose addmm into mm and add. variant_test_name='decomposed', dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, autodiff_nonfusible_nodes=['aten::add', 'aten::mm'], sample_inputs_func=partial(sample_inputs_addmm, alpha=1, beta=1), skips=( # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 DecorateInfo( unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', dtypes=(torch.complex64, torch.complex128)), # https://github.com/pytorch/pytorch/issues/71784 DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', device_type='cpu', dtypes=(torch.float16,)), )), OpInfo('addmv', dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[ DecorateInfo( toleranceOverride({torch.half: tol(atol=1e-5, rtol=3e-3)}), 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'), ], sample_inputs_func=sample_inputs_addmv), OpInfo('addbmm', ref=lambda M, batch1, batch2, beta=1, alpha=1: np.add(np.multiply(np.asarray(beta, dtype=M.dtype), M), np.multiply(np.asarray(alpha, dtype=batch1.dtype), np.sum(np.matmul(batch1, batch2), axis=0))), dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if SM53OrLater or TEST_WITH_ROCM else []), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[ DecorateInfo( toleranceOverride({torch.float32: tol(atol=1.3e-05, rtol=1.3e-05), torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), 'TestCommon', 'test_numpy_refs'), # MPS has slightly worse precision. Is this acceptable? DecorateInfo( toleranceOverride({torch.float32: tol(atol=1.3e-04, rtol=1.3e-04), torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), 'TestCommon', 'test_numpy_ref_mps'), DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}), 'TestConsistency', 'test_output_match', ), DecorateInfo( toleranceOverride({torch.float32: tol(atol=1.5e-05, rtol=1e-05)}), 'TestCommon', 'test_out'), DecorateInfo( toleranceOverride({torch.half: tol(atol=6e-3, rtol=1e-2)}), 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'), ], skips=( # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), # addbmm does not correctly warn when resizing out= inputs DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # https://github.com/pytorch/pytorch/issues/55907 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), ), sample_inputs_func=sample_inputs_addbmm), OpInfo('baddbmm', dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), dtypesIfCUDA=floating_types_and(torch.float16, torch.complex64, torch.complex128, torch.bfloat16), backward_dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if SM53OrLater or TEST_WITH_ROCM else [], torch.complex64, torch.complex128), # Runs very slowly on slow gradcheck - alternatively reduce input sizes dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[ DecorateInfo( toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), DecorateInfo( toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), 'TestMathBits', 'test_conj_view', device_type='cuda'), ], sample_inputs_func=sample_inputs_baddbmm, skips=( # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 DecorateInfo( unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', dtypes=(torch.complex64, torch.complex128)), )), OpInfo('dot', dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), assert_autodiffed=True, sample_inputs_func=sample_inputs_dot_vdot, error_inputs_func=error_inputs_dot_vdot, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 DecorateInfo( unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', dtypes=(torch.complex64, torch.complex128)), )), OpInfo('vdot', dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), sample_inputs_func=sample_inputs_dot_vdot, error_inputs_func=error_inputs_dot_vdot, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 DecorateInfo( unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', dtypes=(torch.complex64, torch.complex128)), )), OpInfo('bmm', dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if SM53OrLater or TEST_WITH_ROCM else []), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), assert_autodiffed=True, assert_jit_shape_analysis=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-5, rtol=1e-5)}), "TestCommon", "test_out") ), sample_inputs_func=sample_inputs_bmm), OpInfo('mv', dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_mv), OpInfo('addr', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), # Reference: https://github.com/pytorch/pytorch/issues/50747 supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Reference: https://github.com/pytorch/pytorch/issues/50747 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16)), ), sample_inputs_func=sample_inputs_addr, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), OpInfo('addcmul', dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # TODO: update sample inputs with for_inplace_variant kwarg to support this test DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), ), sample_inputs_func=sample_inputs_addcmul_addcdiv, reference_inputs_func=partial( reference_inputs_elementwise_ternary, sample_inputs_func=reference_inputs_addcmul_addcdiv)), OpInfo('addcdiv', dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # TODO: update sample inputs with for_inplace_variant kwarg to support this test DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), ), sample_inputs_func=sample_inputs_addcmul_addcdiv, reference_inputs_func=partial( reference_inputs_elementwise_ternary, sample_inputs_func=reference_inputs_addcmul_addcdiv)), UnaryUfuncInfo('asin', aliases=('arcsin', ), ref=np.arcsin, domain=(-1, 1), supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), assert_autodiffed=True, decorators=[ DecorateInfo( toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}), 'TestUnaryUfuncs', device_type='cuda' ), DecorateInfo( toleranceOverride({torch.float32: tol(atol=8e-5, rtol=4e-5)}), 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda' ), DecorateInfo( toleranceOverride({torch.complex64: tol(atol=5e-05, rtol=2e-05)}), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu' ), precisionOverride({torch.bfloat16: 1e-2}), ], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), )), # NOTE: derivative for inplace asinh is not implemented UnaryUfuncInfo('asinh', aliases=('arcsinh', ), ref=np.arcsinh, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), decorators=(precisionOverride({torch.bfloat16: 5e-2}),), supports_inplace_autograd=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), )), UnaryUfuncInfo('atan', aliases=('arctan', ), ref=np.arctan, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, promotes_int_to_float=True, decorators=(precisionOverride({torch.bfloat16: 1e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), )), BinaryUfuncInfo('atan2', aliases=('arctan2',), dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, supports_rhs_python_scalar=False, skips=( # Incorrectly attempts to use a scalar for the second argument DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), )), UnaryUfuncInfo('atanh', aliases=('arctanh', ), ref=np.arctanh, domain=(-1, 1), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), decorators=[ precisionOverride({torch.bfloat16: 1e-2}), DecorateInfo( toleranceOverride({torch.float32: tol(atol=9e-3, rtol=8e-5)}), "TestInductorOpInfo", "test_comprehensive", device_type="cuda" ), ], supports_inplace_autograd=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cfloat], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), )), OpInfo('allclose', dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), ref=np.allclose, supports_autograd=False, supports_forward_ad=False, sample_inputs_func=sample_inputs_allclose, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), ), supports_out=False), OpInfo('broadcast_to', ref=np.broadcast_to, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_broadcast_to), OpInfo('broadcast_shapes', op=torch.broadcast_shapes, ref=np.broadcast_shapes if np.lib.NumpyVersion(np.__version__) >= '1.20.0' else None, dtypes=_dispatch_dtypes((torch.float32,)), supports_out=False, supports_gradgrad=False, assert_autodiffed=False, supports_autograd=False, supports_scripting=False, sample_inputs_func=sample_inputs_broadcast_shapes, skips=( # https://github.com/pytorch/pytorch/issues/64997 DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # skip dtype tests since broadcast_shape is not device dependent. # having dtypes limited to torch.float32 would cause test_dtypes to report unexpected success DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'), # skip these tests since we have non tensor input DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"), DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('broadcast_tensors', ref=np.broadcast_arrays, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), sample_inputs_func=sample_inputs_broadcast_tensors, reference_inputs_func=reference_inputs_broadcast_tensors, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, skips=( # https://github.com/pytorch/pytorch/issues/64997 DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # JIT does not support variadic tensors. # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), )), OpInfo('block_diag', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # Default batching rule in core doesn't work for ops with TensorList args check_batched_forward_grad=False, skips=( # https://github.com/pytorch/pytorch/issues/64997 DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # JIT does not support variadic tensors. # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), ), sample_inputs_func=sample_inputs_block_diag), UnaryUfuncInfo('bitwise_not', ref=np.bitwise_not, dtypes=integral_types_and(torch.bool), dtypesIfHpu=custom_types(torch.bool), operator_variant=operator.invert, supports_autograd=False), BinaryUfuncInfo('bitwise_left_shift', op=torch.bitwise_left_shift, dtypes=integral_types(), dtypesIfCUDA=integral_types(), dtypesIfHpu=custom_types(torch.int32, torch.int8, torch.bool), operator_variant=operator.lshift, inplace_operator_variant=operator.ilshift, supports_autograd=False, supports_one_python_scalar=True, rhs_make_tensor_kwargs=dict(low=0), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), # https://github.com/pytorch/pytorch/issues/70904 DecorateInfo(unittest.skip("Some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'), )), BinaryUfuncInfo('bitwise_right_shift', op=torch.bitwise_right_shift, dtypes=integral_types(), dtypesIfCUDA=integral_types(), dtypesIfHpu=custom_types(torch.int32, torch.int8, torch.bool), operator_variant=operator.rshift, inplace_operator_variant=operator.irshift, supports_autograd=False, supports_one_python_scalar=True, rhs_make_tensor_kwargs=dict(low=0), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), # https://github.com/pytorch/pytorch/issues/70904 DecorateInfo(unittest.skip("Some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'), )), OpInfo('combinations', op=torch.combinations, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, supports_out=False, sample_inputs_func=sample_inputs_combinations), OpInfo('cartesian_prod', op=torch.cartesian_prod, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_cartesian_prod, skips=( DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), )), OpInfo('cdist', dtypes=floating_types(), supports_out=False, supports_gradgrad=False, assert_autodiffed=False, sample_inputs_func=sample_inputs_cdist), UnaryUfuncInfo('ceil', ref=np.ceil, dtypes=all_types_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=tuple(t for t in integral_types() if t != torch.uint8)), ), supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, assert_autodiffed=True), OpInfo('cholesky', dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_cholesky, gradcheck_wrapper=gradcheck_wrapper_hermitian_input, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack],), OpInfo('cholesky_inverse', dtypes=floating_and_complex_types(), backward_dtypes=floating_and_complex_types(), # https://github.com/pytorch/pytorch/issues/80411 gradcheck_fast_mode=True, supports_fwgrad_bwgrad=True, supports_forward_ad=True, check_batched_gradgrad=True, sample_inputs_func=sample_inputs_linalg_cholesky_inverse, gradcheck_wrapper=gradcheck_wrapper_triangular_input_real_positive_diagonal, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack], skips=( # Strides are not the same! Original strides were ((4, 2, 1),) and strides are now ((4, 1, 2),) DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'),)), OpInfo('cholesky_solve', op=torch.cholesky_solve, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_cholesky_solve, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs), decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]), OpInfo('chunk', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), sample_inputs_func=sample_inputs_chunk, reference_inputs_func=reference_inputs_chunk, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('unsafe_chunk', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), sample_inputs_func=sample_inputs_chunk, check_batched_forward_grad=False, reference_inputs_func=reference_inputs_chunk, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('clone', ref=np.copy, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool), sample_inputs_func=sample_inputs_clone_contiguous, reference_inputs_func=reference_inputs_clone_contiguous, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, skips=( # TypeError: _copy_dispatcher() got an unexpected keyword argument 'memory_format' # (NumPy reference needs to be extended with memory_format) DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref'), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref_mps'), ),), OpInfo('contiguous', op=lambda x, *args, **kwargs: x.contiguous(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), sample_inputs_func=sample_inputs_clone_contiguous, reference_inputs_func=reference_inputs_clone_contiguous, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_fusible_nodes=['aten::contiguous'], assert_jit_shape_analysis=True, supports_out=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), )), OpInfo('sum_to_size', op=lambda x, *args, **kwargs: x.sum_to_size(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_sum_to_size, error_inputs_func=error_inputs_sum_to_size, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float,)), )), OpInfo('clamp', aliases=('clip',), ref=_clamp_numpy, dtypes=all_types_and(torch.bfloat16, torch.half), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool), sample_inputs_func=sample_inputs_clamp, reference_inputs_func=partial(reference_inputs_elementwise_ternary, sample_inputs_func=sample_inputs_clamp), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # NNC appear to not handle boolean clamp DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bool,)), # MPS does not support float64, while numpy does internal computations in float64. # See https://github.com/pytorch/pytorch/blob/3c1cf03fde145bdbe1f5ffb81765d076c10b4c04/test/test_ops.py#L260-L264 DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref_mps'), )), UnaryUfuncInfo('positive', ref=np.positive, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, ), UnaryUfuncInfo('conj', ref=np.conj, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), dtypesIfHpu=custom_types(torch.float32, torch.int32), supports_sparse=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, supports_out=False), UnaryUfuncInfo('conj_physical', decomp_aten_name='_conj_physical', ref=np.conj, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, skips=( # RuntimeError: inputSet && outputSet # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":118, # please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, )), DecorateInfo(unittest.skip("Skipped! conj_physical_ not implemented for sparse"), 'TestSparseUnaryUfuncs', 'test_inplace'), )), OpInfo('resolve_conj', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), sample_inputs_func=sample_inputs_view_as_real, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, ), OpInfo('resolve_neg', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), sample_inputs_func=sample_inputs_view_as_real, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, ), OpInfo('view_as_real', dtypes=complex_types(), supports_forward_ad=True, supports_out=False, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_view_as_real, test_conjugated_samples=False, ), OpInfo('view_as_complex', dtypes=floating_types_and(torch.half), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, test_neg_view=False, sample_inputs_func=sample_inputs_view_as_complex, skips=( # RuntimeError: Tensor must have a last dimension with stride 1 DecorateInfo(unittest.expectedFailure, "TestCommon", "test_noncontiguous_samples"), # RuntimeError: "eq_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.half,)), # RuntimeError: view size is not compatible with input tensor's size and stride DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), )), BinaryUfuncInfo('complex', dtypes=floating_types_and(torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_rhs_python_scalar=False, error_inputs_func=error_inputs_complex, skips=( # Tests don't account for complex's type promotion semantics DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps'), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'),)), BinaryUfuncInfo('copysign', dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), promotes_int_to_float=True, # https://github.com/pytorch/pytorch/issues/80411 gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True), OpInfo('corrcoef', dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_corrcoef, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, skips=( # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 DecorateInfo( unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', dtypes=(torch.complex64, torch.complex128)), ), supports_out=False), UnaryUfuncInfo('cos', ref=np.cos, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), assert_autodiffed=True, handles_large_floats=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, decorators=(precisionOverride({torch.bfloat16: 1e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), # This fails on CUDA but passes on ROCm DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.cdouble,), device_type='cuda'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), # AssertionError: Tensor-likes are not close! # Greatest absolute difference: nan at index (700,) (up to 1e-05 allowed) # Greatest relative difference: nan at index (700,) (up to 0.001 allowed) DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=(torch.chalf,), active_if=IS_WINDOWS), )), UnaryUfuncInfo('cosh', ref=np_unary_ufunc_integer_promotion_wrapper(np.cosh), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, skips=( # Reference: https://github.com/pytorch/pytorch/issues/48641 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.int8]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), # AssertionError: Tensor-likes are not close! # Greatest absolute difference: nan at index (6000,) (up to 1e-05 allowed) # Greatest relative difference: nan at index (6000,) (up to 0.001 allowed) DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=(torch.chalf,), active_if=IS_WINDOWS), )), OpInfo('cov', dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_cov, error_inputs_func=error_inputs_cov, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 DecorateInfo( unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', dtypes=(torch.complex64, torch.complex128)), # Float did not match double DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_grad'), # Jacobian mismatch DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_gradgrad'), DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), DecorateInfo(unittest.skip("Barely fails"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), # JIT test not working for tensor kwargs (https://github.com/pytorch/pytorch/issues/58507) # RuntimeError: # undefined value tensor: # File "<string>", line 3 # def the_method(i0): # return torch.cov(i0, correction=0, fweights=None, aweights=tensor([0.0518, 0.4681], dtype=torch.float32, requires_grad=True)) # noqa: B950 # ~~~~~~ <--- HERE DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=8e-3, rtol=1.4e-3)}), "TestInductorOpInfo", "test_comprehensive", device_type="cpu"), )), OpInfo('cross', dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), sample_inputs_func=sample_inputs_cross, supports_fwgrad_bwgrad=True, supports_out=True, supports_forward_ad=True), OpInfo('cumsum', dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # cumsum does not handle correctly out= dtypes DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), ), sample_inputs_func=sample_inputs_cumulative_ops), OpInfo('cumprod', dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # cumprod does not handle correctly out= dtypes DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), ), # gradgradcheck fails in fast_mode=True: #56275 sample_inputs_func=sample_inputs_cumprod, gradcheck_fast_mode=False), OpInfo('cummax', dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( ), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), OpInfo('cummin', dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_cumulative_ops, supports_dtype_kwargs=False), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( ), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), UnaryUfuncInfo('deg2rad', ref=np.radians, decorators=(precisionOverride({torch.bfloat16: 7e-1, torch.float16: 7e-1}),), dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, promotes_int_to_float=True), OpInfo('diff', op=torch.diff, # np.diff has np._NoValue as default values for prepend and append, compare_with_reference breaks if prepend/append # are set as None when converting to numpy ref=lambda input, n=1, dim=-1, prepend=np._NoValue, append=np._NoValue: ( np.diff(input, n, dim, np._NoValue if prepend is None else prepend, np._NoValue if append is None else append) ), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_diff, error_inputs_func=error_inputs_diff, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, skips=( )), BinaryUfuncInfo('div', aliases=('divide',), variant_test_name='no_rounding_mode', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_forward_ad=True, promotes_int_to_float=True, supports_fwgrad_bwgrad=True, supports_two_python_scalars=True, assert_autodiffed=True, rhs_make_tensor_kwargs=dict(exclude_zero=True),), BinaryUfuncInfo('div', aliases=('divide',), variant_test_name='trunc_rounding', dtypes=all_types_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8), sample_kwargs=lambda device, dtype, input: ({"rounding_mode": "trunc"}, {"rounding_mode": "trunc"}), # https://github.com/pytorch/pytorch/issues/80411 gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_two_python_scalars=True, assert_autodiffed=True, rhs_make_tensor_kwargs=dict(exclude_zero=True), decorators=( # See https://github.com/pytorch/pytorch/issues/111126 DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), ), skips=( # RuntimeError: MALFORMED INPUT: Unhandled node kind (in computeValue): aten::div DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_working'), # FIXME: # torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for # output 0 with respect to input 1, # numerical:tensor(-17746.9307, dtype=torch.float64) # analytical:tensor(0., dtype=torch.float64) DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad', device_type='cpu', dtypes=(torch.float64,)), )), BinaryUfuncInfo('div', aliases=('divide',), variant_test_name='floor_rounding', dtypes=all_types_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8), sample_kwargs=lambda device, dtype, input: ({"rounding_mode": "floor"}, {"rounding_mode": "floor"}), # https://github.com/pytorch/pytorch/issues/80411 gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_two_python_scalars=True, assert_autodiffed=True, rhs_make_tensor_kwargs=dict(exclude_zero=True), decorators=( # See https://github.com/pytorch/pytorch/issues/111126 DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), ), skips=( # RuntimeError: MALFORMED INPUT: Unhandled node kind (in computeValue): aten::div DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_working'), # FIXME: # torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for # output 0 with respect to input 1, # numerical:tensor(-17746.9307, dtype=torch.float64) # analytical:tensor(0., dtype=torch.float64) DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad', dtypes=(torch.float64,), device_type='cpu'), )), BinaryUfuncInfo('true_divide', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_forward_ad=True, promotes_int_to_float=True, supports_fwgrad_bwgrad=True, supports_two_python_scalars=True, rhs_make_tensor_kwargs=dict(exclude_zero=True)), OpInfo('equal', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool), ref=lambda input, other: (input == other).all(), sample_inputs_func=sample_inputs_equal, supports_autograd=False, supports_tracing=False, skips=( )), UnaryUfuncInfo('exp', ref=np_unary_ufunc_integer_promotion_wrapper(np.exp), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), skips=( # Reference: https://github.com/pytorch/pytorch/issues/48010 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), ), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True), OpInfo('expand', op=lambda self, shape: self.expand(shape), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), sample_inputs_func=sample_inputs_expand, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, supports_out=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), )), OpInfo('expand_as', op=lambda self, other: self.expand_as(other), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_expand_as, supports_out=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'),), ), OpInfo('expand_copy', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_expand, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, supports_out=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), )), OpInfo('diag', ref=np.diag, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, sample_inputs_func=sample_inputs_diag, error_inputs_func=error_inputs_diag), OpInfo('diag_embed', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), supports_out=False, # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_diagonal_diag_embed, reference_inputs_func=reference_inputs_diagonal_diag_embed, error_inputs_func=error_inputs_diagonal_diag_embed), OpInfo('diagonal', aten_backward_name='diagonal_backward', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_diagonal_diag_embed, reference_inputs_func=reference_inputs_diagonal_diag_embed, error_inputs_func=error_inputs_diagonal_diag_embed), OpInfo('diagonal_copy', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_diagonal_diag_embed, reference_inputs_func=reference_inputs_diagonal_diag_embed, error_inputs_func=error_inputs_diagonal_diag_embed), OpInfo('diagonal_scatter', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_diagonal_scatter), OpInfo('alias_copy', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), sample_inputs_func=sample_inputs_alias_copy, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=True), BinaryUfuncInfo('eq', ref=np.equal, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool), always_returns_bool=True, supports_autograd=False, sample_inputs_func=sample_inputs_comparison_ops, skips=( )), BinaryUfuncInfo('fmax', op=torch.fmax, dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_rhs_python_scalar=False, skips=( # RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), )), BinaryUfuncInfo('fmin', op=torch.fmin, dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_rhs_python_scalar=False, skips=( # RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), )), BinaryUfuncInfo('fmod', ref=np.fmod, dtypes=all_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), # https://github.com/pytorch/pytorch/issues/80411 gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=None, rhs_make_tensor_kwargs={'exclude_zero': True}, decorators=( DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_contig_vs_every_other', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_non_contig', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.uint8,)), # FIXME: # torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for # output 0 with respect to input 1, # numerical:tensor(101.6283, dtype=torch.float64) # analytical:tensor(-18.3575, dtype=torch.float64) DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad', dtypes=(torch.float64,), device_type='cpu'), )), BinaryUfuncInfo('remainder', ref=np.remainder, dtypes=all_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool), # https://github.com/pytorch/pytorch/issues/80411 gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=None, operator_variant=operator.mod, inplace_operator_variant=operator.imod, supports_one_python_scalar=True, rhs_make_tensor_kwargs={'exclude_zero': True}, decorators=( DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_contig_vs_every_other', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_non_contig', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.uint8,)), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16,)), # Fails on XLA # False is not true : Tensors failed to compare as equal! # Attempted to compare equality of tensors with different dtypes DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)), # FIXME: # torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for # output 0 with respect to input 1, # numerical:tensor(102.4676, dtype=torch.float64) # analytical:tensor(-17.5182, dtype=torch.float64) DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad', device_type='cpu', dtypes=(torch.float64,)), DecorateInfo( toleranceOverride({ torch.float16: tol(atol=5e-4, rtol=3e-3), }), "TestInductorOpInfo", "test_comprehensive", device_type="cuda" ), )), UnaryUfuncInfo('frac', ref=lambda x: np.modf(x)[0], dtypes=floating_types_and(torch.bfloat16, torch.float16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64)), # 76047 DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16, torch.float32, torch.float64)), )), OpInfo('stft', decorators=[ skipCPUIfNoFFT, DecorateInfo(unittest.skip("Skipped! stft does not match the native function"), 'TestJit', 'test_variant_consistency_jit'), ], dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_stft, # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, check_batched_grad=False, check_batched_gradgrad=False, supports_out=False, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, ), OpInfo('istft', dtypes=complex_types(), sample_inputs_func=sample_inputs_istft, # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, check_batched_grad=False, check_batched_gradgrad=False, supports_out=False, decorators=( DecorateInfo(unittest.skip("Skipped! istft does not match the native function"), 'TestJit', 'test_variant_consistency_jit'), ), skips=( skipCPUIfNoFFT, # gradcheck fails on ROCm (gh-68429) # grad is computed improperly (probably for weights tensor) DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_grad'), # Pre-existing condition (calls .item); needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), )), UnaryUfuncInfo('floor', ref=np.floor, dtypes=all_types_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=tuple(t for t in integral_types() if t != torch.uint8)), ), supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, assert_autodiffed=True), OpInfo('flip', op=torch.flip, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool), sample_inputs_func=sample_inputs_flip, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('fliplr', op=torch.fliplr, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_fliplr_flipud, error_inputs_func=error_inputs_fliplr, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('flipud', op=torch.flipud, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_fliplr_flipud, error_inputs_func=error_inputs_flipud, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('sparse.sampled_addmm', dtypes=floating_and_complex_types(), supports_autograd=True, sample_inputs_func=sample_inputs_sparse_sampled_addmm, decorators=[ skipCUDAIf(not ((_get_torch_cuda_version() >= (11, 3)) or (_get_torch_rocm_version() >= (5, 2))), "cusparseSDDMM was added in 11.2.1"), skipCPUIfNoMklSparse, ], skips=( # NotImplementedError: Tensors of type SparseCsrTensorImpl do not have is_contiguous DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), # RuntimeError: Sparse CSR tensors do not have strides. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Skipped!"), 'TestTags', 'test_tags'), # RuntimeError: sampled_addmm: Expected result to have sparse csr layout, but got Strided DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out_warning'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # RuntimeError: unsupported memory format option Preserve DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # RuntimeError: sparse_mask does not support automatic differentiation for outputs with complex dtype # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), # RuntimeError: sparse_mask does not support automatic differentiation for outputs with complex dtype. # RuntimeError: Sparse CSR tensors do not have is_contiguous DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), # NotImplementedError: Could not run 'aten::sparse_sampled_addmm' with arguments from the 'SparseCsrMeta' backend. DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace'), DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace'), DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_meta_outplace'), DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'), DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake_crossref_backward_no_amp'), )), OpInfo('sparse.mm', dtypes=floating_types_and(torch.bfloat16, torch.float16), variant_test_name='reduce', supports_autograd=True, supports_out=False, supports_gradgrad=False, supports_forward_ad=False, sample_inputs_func=sample_inputs_sparse_mm_reduce, decorators=[onlyCPU], skips=( # NotImplementedError: Tensors of type SparseCsrTensorImpl do not have is_contiguous DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), # RuntimeError: Sparse CSR tensors do not have strides. DecorateInfo(unittest.skip("Skipped!"), 'TestTags', 'test_tags'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # RuntimeError: unsupported memory format option Preserve DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), # RuntimeError: Sparse CSR tensors do not have is_contiguou DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), # RuntimeError: Sparse CSR tensors do not have strides DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), # ValueError: Sparse output is not supported at gradcheck yet. Please call to_dense(masked_grad=...) ... DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_fail_gradgrad'), # NotImplementedError: Could not run 'aten::_sparse_mm_reduce_impl' with arguments from the 'SparseCsrMeta' backend DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace'), DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace'), DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_meta_outplace'), )), UnaryUfuncInfo('i0', ref=np_unary_ufunc_integer_promotion_wrapper( scipy.special.i0) if TEST_SCIPY else None, aliases=('special.i0',), decorators=(precisionOverride({torch.bfloat16: 3e-1, torch.float16: 5e-1}),), dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, sample_inputs_func=sample_inputs_i0_i1, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.int8,)), )), BinaryUfuncInfo('floor_divide', ref=_floor_divide_np, dtypes=all_types_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool), supports_autograd=False, rhs_make_tensor_kwargs=dict(exclude_zero=True), supports_two_python_scalars=True, skips=( # AssertionError: Results of original model and exported/imported version of model differed DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), # bfloat16 floor_divide compared with a float32 reference works inconsistently DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', dtypes=(torch.bfloat16,)), # int8 floor divide has different results for -128 // -1 vs. NumPy DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.int8,)), # The following tests fails on some jobs DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', dtypes=(torch.float16,)), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=5e-3)}), 'TestBinaryUfuncs', 'test_reference_numerics'), )), UnaryUfuncInfo('frexp', op=torch.frexp, ref=np.frexp, dtypes=floating_types_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), # skip testing torch.frexp as it is not supported by ROCm platform yet decorators=[], supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # skips below tests as torch.frexp returns tuple-like (mantissa, exponent) as outputs, # while theses tests currently requires output to a single tensor. DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_non_contig_expand'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), # skips test_reference_numerics due to error in Windows CI. # The np.frexp returns exponent as np.intc dtype on Windows platform, # and np.intc does not have the correspond torch dtype DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', active_if=IS_WINDOWS), )), UnaryUfuncInfo('log1p', ref=np.log1p, aliases=('special.log1p',), domain=(-1, None), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), decorators=(precisionOverride({torch.bfloat16: 1e-1}),), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, assert_autodiffed=True, promotes_int_to_float=True), BinaryUfuncInfo('ge', ref=np.greater_equal, aliases=('greater_equal',), dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), always_returns_bool=True, supports_autograd=False, skips=( )), OpInfo('geqrf', dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_qr_geqrf, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], supports_autograd=False, skips=( # FIXME: geqrf can't forward with complex inputs that require grad DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), # Strides are not the same! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), )), BinaryUfuncInfo('gt', ref=np.greater, aliases=('greater',), dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8), always_returns_bool=True, supports_autograd=False, skips=( )), UnaryUfuncInfo('imag', ref=np.imag, dtypes=complex_types_and(torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/issues/66357 # RuntimeError: view_as_real doesn't work on unresolved conjugated tensors. check_batched_forward_grad=False, skips=( # Skip since real and imag don't have out variants. DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), )), OpInfo('gradient', dtypes=floating_and_complex_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.bfloat16, torch.half), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # following tests give a runtime error with undefined value tensor # see discussion : https://github.com/pytorch/pytorch/issues/56660 # RuntimeError: # Arguments for call are not valid. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32, torch.complex64)), # noqa: B950 DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), ), supports_inplace_autograd=False, sample_inputs_func=sample_inputs_gradient, error_inputs_func=error_inputs_gradient), OpInfo('isin', dtypes=all_types_and(torch.bfloat16, torch.half), supports_autograd=False, sample_inputs_func=sample_inputs_isin), OpInfo('kthvalue', dtypes=all_types_and(torch.bfloat16, torch.float16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_kthvalue, error_inputs_func=error_inputs_kthvalue), BinaryUfuncInfo('le', ref=np.less_equal, aliases=('less_equal',), dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), always_returns_bool=True, supports_autograd=False, skips=( )), OpInfo('linspace', dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), is_factory_function=True, supports_out=True, supports_autograd=False, error_inputs_func=error_inputs_linspace, sample_inputs_func=sample_inputs_linspace, skips=( # FX failed to normalize op - add the op to the op_skip list. DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # Tests that assume input is a tensor or sequence of tensors DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Same failure as arange: cannot find linspace in captured graph DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API # in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64! # Caching allocator allocated memory was 0 and is now reported as 307200 on device 0. # CUDA driver allocated memory was 1254555648 and is now 1242955776. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.cfloat,), device_type="cuda"), )), OpInfo('linspace', dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), is_factory_function=True, supports_out=True, supports_autograd=False, error_inputs_func=error_inputs_linspace, sample_inputs_func=sample_inputs_linspace_tensor_overload, variant_test_name="tensor_overload", skips=( # FX failed to normalize op - add the op to the op_skip list. DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # TypeError: 'int' object is not subscriptable DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), # Same failure as arange: cannot find linspace in captured graph DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API # in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64! # Caching allocator allocated memory was 0 and is now reported as 307200 on device 0. # CUDA driver allocated memory was 1254555648 and is now 1242955776. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.cfloat,), device_type="cuda"), )), OpInfo('logspace', dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), is_factory_function=True, supports_out=True, supports_autograd=False, error_inputs_func=error_inputs_linspace, sample_inputs_func=sample_inputs_logspace, skips=( # FX failed to normalize op - add the op to the op_skip list. DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # Tests that assume input is a tensor or sequence of tensors DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Same failure as arange: cannot find linspace in captured graph DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # Off-by-one issue when casting floats to ints DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick', dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_comprehensive', dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), # UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API # in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64! # Caching allocator allocated memory was 0 and is now reported as 307200 on device 0. # CUDA driver allocated memory was 1254555648 and is now 1242955776. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.cfloat,), device_type="cuda"), )), OpInfo('logspace', dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), is_factory_function=True, supports_out=True, supports_autograd=False, error_inputs_func=error_inputs_linspace, sample_inputs_func=sample_inputs_logspace_tensor_overload, variant_test_name="tensor_overload", skips=( # FX failed to normalize op - add the op to the op_skip list. DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # TypeError: 'int' object is not subscriptable DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), # Same failure as arange: cannot find linspace in captured graph DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # Off-by-one issue when casting floats to ints DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick', dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_comprehensive', dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), # UserWarning: CUDA caching allocator reports a memory leak not verified by the driver API # in __main__.TestJitCUDA.test_variant_consistency_jit_logspace_cuda_complex64! # Caching allocator allocated memory was 0 and is now reported as 307200 on device 0. # CUDA driver allocated memory was 1254555648 and is now 1242955776. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.cfloat,), device_type="cuda"), )), UnaryUfuncInfo('log', ref=np.log, domain=(0, None), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.chalf), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, decorators=(precisionOverride({torch.bfloat16: 5e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), ), # log(z)->-inf for |z|->0 reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), UnaryUfuncInfo('log10', ref=np.log10, domain=(0, None), decorators=(precisionOverride({torch.bfloat16: 5e-2}),), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), ), # log10(z)->-inf for |z|->0 reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), UnaryUfuncInfo('log2', ref=np.log2, domain=(0, None), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, decorators=(precisionOverride({torch.bfloat16: 1e-1}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.cfloat, torch.cdouble]), ), # log2(z)->-inf for |z|->0 reference_numerics_filter=NumericsFilter(condition=lambda x: torch.abs(x) < 0.1, safe_val=1)), BinaryUfuncInfo('ldexp', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_inplace_autograd=False, promotes_int_to_float=True, supports_out=True, supports_rhs_python_scalar=False, skips=( # RuntimeError: mul(): functions with out=... arguments don't support # automatic differentiation, but one of the arguments requires grad # https://github.com/pytorch/pytorch/issues/68966 DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), ), decorators=[ DecorateInfo( toleranceOverride({ torch.complex64: tol(atol=1e-05, rtol=1e-05) }), 'TestCommon', device_type='cpu', ), ], ), BinaryUfuncInfo('logaddexp', dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_rhs_python_scalar=False, skips=( # TODO: FIXME: RuntimeError: not implemented for 'ComplexFloat' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), )), OpInfo('logaddexp2', dtypes=floating_types_and(torch.bfloat16, torch.half), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_logaddexp), UnaryUfuncInfo('logical_not', ref=np.logical_not, decorators=(precisionOverride({torch.bfloat16: 7e-1, torch.float16: 5e-1}),), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int8, torch.bool), supports_autograd=False, skips=( # The function variant always returns BoolTensor # while the inplace variant preserves the input dtype. # >>> t = torch.randn(3) # >>> torch.logical_not(t) # tensor([False, False, False]) # >>> torch.logical_not(t).dtype # torch.bool # >>> t.logical_not_().dtype # torch.float32 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_variant_consistency', dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', dtypes=all_types_and_complex_and(torch.half, torch.bfloat16)), )), BinaryUfuncInfo('lt', ref=np.less, aliases=('less',), dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int8, torch.int32), always_returns_bool=True, supports_autograd=False, skips=( )), OpInfo('lu_unpack', op=torch.lu_unpack, dtypes=floating_and_complex_types(), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=(skipCPUIfNoLapack,), sample_inputs_func=sample_inputs_lu_unpack), OpInfo('lu', op=torch.lu, dtypes=floating_and_complex_types(), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_lu, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], skips=( # we skip jit tests because `lu` is a torch function # RuntimeError: # 'Tensor (inferred)' object has no attribute or method 'lu'.: # File "<string>", line 3 # def the_method(i0): # return i0.lu(True, True) # ~~~~~ <--- HERE DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # RuntimeError not raised: Expected RuntimeError when calling with input.device=cpu and out.device=cuda DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), )), OpInfo('lu_solve', op=torch.lu_solve, dtypes=floating_and_complex_types(), supports_forward_ad=True, # See https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_lu_solve, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Tests different backward paths"), "TestCommon", "test_floating_inputs_are_differentiable"),), decorators=[skipCPUIfNoLapack, skipCUDAIfNoMagmaAndNoCusolver]), OpInfo('masked_fill', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int8, torch.bool, torch.int32), sample_inputs_func=sample_inputs_masked_fill, error_inputs_func=error_inputs_masked_fill, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, supports_out=False), OpInfo('masked_scatter', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int8, torch.bool, torch.int32), sample_inputs_func=sample_inputs_masked_scatter, error_inputs_func=error_inputs_masked_scatter, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, supports_out=False, skips=( )), OpInfo('masked_select', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_masked_select, error_inputs_func=error_inputs_masked_select, skips=( # Compiler issue on ROCm. Might need to skip until ROCm5.5 DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_non_standard_bool_values', dtypes=[torch.bool], active_if=TEST_WITH_ROCM), )), OpInfo('matrix_exp', dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), aliases=('linalg.matrix_exp',), sample_inputs_func=sample_inputs_matrix_exp, # Needs to construct a 2nx2n matrix by copy_ ing into it check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, skips=( # mexp does not support bf16 and fp16 DecorateInfo(unittest.skip('Skipped!'), 'TestInductorOpInfo', 'test_comprehensive', dtypes=[torch.half], device_type="cpu"), ), supports_out=False, ), OpInfo('matmul', aliases=('linalg.matmul',), dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if SM53OrLater or TEST_WITH_ROCM else []), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), assert_autodiffed=True, assert_jit_shape_analysis=True, # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, sample_inputs_func=partial(sample_inputs_matmul, is_rmatmul=False), decorators=[ # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), # ROCm intermittently fails the test with standard atol/rtol DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}), 'TestCommon', 'test_noncontiguous_samples', device_type='cuda', active_if=TEST_WITH_ROCM), DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=0)}), 'TestCommon', 'test_out', device_type='cuda', active_if=TEST_WITH_ROCM), # mv for the sample with shapes (S, S, M, M), (M,) has some variance in the # backward on CPU DecorateInfo(toleranceOverride({torch.float32: tol(atol=0, rtol=1e-5)}), 'TestCommon', 'test_noncontiguous_samples', device_type='cpu'), DecorateInfo( toleranceOverride({ torch.float32: tol(atol=1e-5, rtol=1e-5), torch.complex64: tol(atol=1e-5, rtol=1e-5), }), "TestDecomp", "test_comprehensive", device_type="cuda", ), ], skips=( # Strides are not the same! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # https://github.com/pytorch/pytorch/issues/67470 DecorateInfo(unittest.skip("67470!"), 'TestCommon', 'test_noncontiguous_samples', device_type='cpu', dtypes=(torch.long,)), # AssertionError: False is not true : Tensors failed to compare as equal! DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)), # https://github.com/pytorch/pytorch/issues/71774 DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', device_type='cpu', dtypes=(torch.long,)), )), OpInfo('max', variant_test_name='reduction_with_dim', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), sample_inputs_func=sample_inputs_max_min_reduction_with_dim, supports_fwgrad_bwgrad=True, skips=( ), supports_forward_ad=True), OpInfo('max', variant_test_name='reduction_no_dim', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), supports_out=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_max_min_reduction_no_dim, skips=( )), OpInfo('median', dtypes=all_types_and(torch.bfloat16, torch.float16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), # TODO: some signatures of median do support out supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, error_inputs_func=error_inputs_median, sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)), OpInfo('nanmedian', dtypes=all_types_and(torch.bfloat16, torch.float16), # TODO: some signatures of nanmedian do support out supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=partial(sample_inputs_reduction, supports_multiple_dims=False)), OpInfo('var_mean', dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), sample_inputs_func=sample_inputs_std_var, # TODO: some signatures of var_mean do support out supports_out=False, supports_forward_ad=True, check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, decorators=( DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}), "TestDecomp", "test_comprehensive", device_type="cuda"), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=2e-3)}), "TestInductorOpInfo", "test_comprehensive", device_type="cuda"), )), OpInfo('var_mean', variant_test_name='unbiased', dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), sample_inputs_func=sample_inputs_std_var_unbiased, # TODO: some signatures of var_mean do support out supports_out=False, supports_forward_ad=True, check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, decorators=( DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}), "TestDecomp", "test_comprehensive", device_type="cuda"), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=2e-3)}), "TestInductorOpInfo", "test_comprehensive", device_type="cuda"), )), OpInfo('std_mean', dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), sample_inputs_func=sample_inputs_std_var, # TODO: some signatures of std_mean do support out supports_out=False, supports_forward_ad=True, check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, decorators=( DecorateInfo(toleranceOverride({torch.float64: tol(atol=2e-7, rtol=2e-7)}), "TestDecomp", "test_comprehensive", device_type="cuda"), )), OpInfo('std_mean', variant_test_name='unbiased', dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), sample_inputs_func=sample_inputs_std_var_unbiased, # TODO: some signatures of var_mean do support out supports_out=False, supports_forward_ad=True, check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, decorators=( DecorateInfo( toleranceOverride({ torch.float16: tol(atol=4e-5, rtol=9e-3), torch.float64: tol(atol=2e-7, rtol=2e-7), }), "TestDecomp", "test_comprehensive", device_type="cuda" ), DecorateInfo( toleranceOverride({ torch.float16: tol(atol=4e-5, rtol=9e-3), torch.float64: tol(atol=2e-7, rtol=2e-7), }), "TestInductorOpInfo", "test_comprehensive", device_type="cuda" ), )), OpInfo('meshgrid', variant_test_name='variadic_tensors', ref=np.meshgrid, dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), sample_inputs_func=partial(sample_inputs_meshgrid, variant='variadic'), skips=[ # JIT does not support variadic tensors. # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, # please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # meshgrid is defined in torch.functional to take a # variadic list of tensors. Variadic parameters are not # compatible with the normalize operator tests. DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # Skip operator schema test because this is a functional and not an operator DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), ], supports_out=False, supports_fwgrad_bwgrad=True, supports_forward_ad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False,), OpInfo('meshgrid', variant_test_name='list_of_tensors', # Unlike the variant above, we do not use np.meshgrid as a # ref since it does not officially support list of numpy # arrays. dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.float16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), sample_inputs_func=partial(sample_inputs_meshgrid, variant='list'), skips=[ # meshgrid is defined in torch.functional to take a # variadic list of tensors. Variadic parameters are not # compatible with the normalize operator tests. DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), ], assert_autodiffed=True, supports_out=False, autodiff_nonfusible_nodes=[], supports_fwgrad_bwgrad=True, supports_forward_ad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False,), OpInfo('min', variant_test_name='reduction_with_dim', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), sample_inputs_func=sample_inputs_max_min_reduction_with_dim, supports_fwgrad_bwgrad=True, supports_forward_ad=True, skips=( )), OpInfo('min', variant_test_name='reduction_no_dim', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_out=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_max_min_reduction_no_dim, skips=( )), OpInfo('quantile', dtypes=floating_types(), sample_inputs_func=sample_inputs_reduction_quantile, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/issues/66357 # Relies on copy_ to broadcast, but the forward AD path calls broadcast_to which # does not have a batching rule in core check_batched_forward_grad=False), OpInfo('nanquantile', dtypes=floating_types(), sample_inputs_func=sample_inputs_reduction_quantile, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/issues/66357 # Relies on copy_ to broadcast, but the forward AD path calls broadcast_to which # does not have a batching rule in core check_batched_forward_grad=False), BinaryUfuncInfo( 'max', aliases=('maximum',), variant_test_name='binary', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, ref=np.maximum, supports_rhs_python_scalar=False, skips=( # Incorrectly attempts to use a scalar for the second argument DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), # TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), )), BinaryUfuncInfo( 'maximum', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), supports_forward_ad=True, supports_fwgrad_bwgrad=True, ref=np.maximum, supports_rhs_python_scalar=False, skips=( # TODO: FIXME: RuntimeError: "max_elementwise_cuda" not implemented for 'ComplexFloat' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), )), BinaryUfuncInfo( 'min', aliases=('minimum',), variant_test_name='binary', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, ref=np.minimum, supports_rhs_python_scalar=False, skips=( # Incorrectly attempts to use a scalar for the second argument DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), # TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), )), BinaryUfuncInfo( 'minimum', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), supports_forward_ad=True, supports_fwgrad_bwgrad=True, ref=np.minimum, supports_rhs_python_scalar=False, skips=( # TODO: FIXME: RuntimeError: "min_elementwise_cuda" not implemented for 'ComplexFloat' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), ), ), BinaryUfuncInfo('logical_and', ref=np.logical_and, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool), supports_autograd=False, always_returns_bool=True, supports_rhs_python_scalar=False), BinaryUfuncInfo('logical_or', ref=np.logical_or, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int8, torch.bool), supports_autograd=False, always_returns_bool=True, supports_rhs_python_scalar=False), BinaryUfuncInfo('logical_xor', ref=np.logical_xor, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int8, torch.bool), supports_autograd=False, always_returns_bool=True, supports_rhs_python_scalar=False, skips=( )), BinaryUfuncInfo('bitwise_and', ref=np.bitwise_and, dtypes=integral_types_and(torch.bool), dtypesIfHpu=custom_types(torch.bool), operator_variant=operator.and_, inplace_operator_variant=operator.iand, supports_autograd=False, supports_one_python_scalar=True, skips=( # RuntimeError: "bitwise_and_cuda" not implemented for 'Half' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), )), BinaryUfuncInfo('bitwise_or', ref=np.bitwise_or, dtypes=integral_types_and(torch.bool), dtypesIfHpu=custom_types(torch.bool), operator_variant=operator.or_, inplace_operator_variant=operator.ior, supports_autograd=False, supports_one_python_scalar=True, skips=( # TODO: FIXME: RuntimeError: "bitwise_or_cuda" not implemented for 'Half' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), )), BinaryUfuncInfo('bitwise_xor', ref=np.bitwise_xor, dtypes=integral_types_and(torch.bool), dtypesIfHpu=custom_types(torch.bool), operator_variant=operator.xor, inplace_operator_variant=operator.ixor, supports_autograd=False, supports_one_python_scalar=True, skips=( # TODO: FIXME: RuntimeError: "bitwise_xor_cuda" not implemented for 'Half' DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion', device_type='cuda'), )), BinaryUfuncInfo('heaviside', ref=lambda a, b: ( # necessary because np.heaviside incorrectly returns float64 when passed args of dtype int64 np.int64(np.heaviside(a, b)) if a.dtype == np.int64 and b.dtype == np.int64 else np.heaviside(a, b) ), dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32), supports_autograd=False, supports_rhs_python_scalar=False, skips=( # RuntimeError: heaviside is not yet implemented for tensors with different dtypes. DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), # PyTorch's heaviside does not appear to propagate NaNs DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values'), )), BinaryUfuncInfo('lcm', ref=np.lcm, dtypes=integral_types_and(), supports_autograd=False, supports_rhs_python_scalar=False), BinaryUfuncInfo('gcd', ref=np.gcd, dtypes=integral_types_and(), supports_autograd=False, supports_rhs_python_scalar=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.int8,)),)), BinaryUfuncInfo('isclose', ref=np.isclose, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_isclose, error_inputs_func=error_inputs_isclose, supports_autograd=False, supports_out=False, supports_rhs_python_scalar=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_refs', dtypes=(torch.complex128,)), # RuntimeError: Short did not match Int DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values'), )), # `softmax` supports different dtypes based on whether `dtype` argument, # is passed or not. Hence two OpInfo entries, one with dtype and other without. # https://github.com/pytorch/pytorch/issues/68752 OpInfo('softmax', aliases=('special.softmax', 'nn.functional.softmax',), aten_name='softmax', aten_backward_name='_softmax_backward_data', dtypes=floating_types_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), sample_inputs_func=sample_inputs_softmax_variant, assert_jit_shape_analysis=True, assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=True), OpInfo('softmax', aliases=('special.softmax', 'nn.functional.softmax',), variant_test_name="with_dtype", aten_name='softmax', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=True), OpInfo( '_softmax_backward_data', op=torch.ops.aten._softmax_backward_data, aten_name='_softmax_backward_data', dtypes=floating_types_and(torch.bfloat16, torch.float16), sample_inputs_func=sample_inputs_softmax_backward_data, assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), ), ), # `softmin` supports different dtypes based on whether `dtype` argument, # is passed or not. Hence two OpInfo entries, one with dtype and other without. # https://github.com/pytorch/pytorch/issues/68752 OpInfo('nn.functional.softmin', aten_name='softmin', dtypes=floating_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_softmax_variant, assert_jit_shape_analysis=False, assert_autodiffed=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('nn.functional.softmin', variant_test_name="with_dtype", aten_name='softmin', dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), assert_autodiffed=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo( "nn.functional.cross_entropy", dtypes=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_cross_entropy, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=( DecorateInfo( toleranceOverride({torch.float32: tol(atol=3e-3, rtol=1e-3)}), "TestJit", "test_variant_consistency_jit", device_type="cpu", ), ), skips=( # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 1536 # test_ops.TestJitCUDA.test_variant_consistency_jit_nn_functional_cross_entropy_cuda_float32 leaked # 1536 bytes CUDA memory on device 0 DecorateInfo( unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", device_type="cuda", ), DecorateInfo(unittest.skip("FP16 corss_entropy cases have not been enabled on MPS yet"), dtypes=(torch.half,), device_type="mps"), ) ), OpInfo('nn.functional.normalize', dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_normalize, supports_forward_ad=True, supports_fwgrad_bwgrad=True), OpInfo('aminmax', ref=lambda x, dim=None, keepdim=False: (np.amin(x, axis=dim, keepdims=keepdim), np.amax(x, axis=dim, keepdims=keepdim)), dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8), decorators=(onlyNativeDeviceTypes,), supports_autograd=False, sample_inputs_func=sample_inputs_aminmax, error_inputs_func=error_inputs_aminmax_amax_amin), OpInfo('as_strided', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, sample_inputs_func=sample_inputs_as_strided, skips=( # Note: This xfail is fine -- it's inherent to how as_strided works DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), # AssertionError: False is not true : Scalars failed to compare as equal! DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestCommon', 'test_variant_consistency_eager'), # Not close DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestCommon', 'test_complex_half_reference_testing'), # Not close DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.skip("Numerous errors"), 'TestFwdGradients'), DecorateInfo(unittest.skip("Numerous errors"), 'TestBwdGradients'), )), OpInfo('as_strided', variant_test_name='partial_views', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.int32, torch.int8, torch.bool), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, sample_inputs_func=sample_inputs_as_strided_partial_views, skips=( # Note: This xfail is fine -- it's inherent to how as_strided works DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), # These fail because the test changes the input's in-memory layout DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_complex_half_reference_testing'), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', dtypes=(torch.complex64, torch.complex128)), DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_inplace_forward_mode_AD'), DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_inplace_grad'), DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_inplace_gradgrad'), DecorateInfo(unittest.expectedFailure, 'TestProxyTensorOpInfo', 'test_make_fx_symbolic_exhaustive_inplace'), DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), # Fail but are also flaky DecorateInfo(unittest.skip("Test changes in memory layout"), 'TestMathBits'), DecorateInfo(unittest.skip("Modifies input strides and storage_offset"), 'TestCommon', 'test_non_standard_bool_values'), # RuntimeError: setStorage: sizes [2, 2], strides [1, 2], storage offset 10, and itemsize 2 requiring a # storage size of 28 are out of bounds for storage of size 20 DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace'), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_inplace'), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace'), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace_all_strides'), )), OpInfo('as_strided_copy', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, sample_inputs_func=sample_inputs_as_strided, skips=( # Note: This xfail is fine -- it's inherent to how as_strided works DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples'), # AssertionError: False is not true : Scalars failed to compare as equal! DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestCommon', 'test_variant_consistency_eager'), # Not close DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestCommon', 'test_complex_half_reference_testing'), # Not close DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.skip("Numerous errors"), 'TestFwdGradients'), DecorateInfo(unittest.skip("Numerous errors"), 'TestBwdGradients'), DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'), )), OpInfo('as_strided_scatter', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, sample_inputs_func=sample_inputs_as_strided_scatter, error_inputs_func=error_inputs_as_strided_scatter, skips=( DecorateInfo(unittest.skip('Works for int64, fails for everything else'), 'TestCommon', 'test_noncontiguous_samples'), # noqa: B950 DecorateInfo(unittest.skip('Fails in most cases, passes on LAZY for some reason'), 'TestCommon', 'test_variant_consistency_eager'), # noqa: B950 DecorateInfo(unittest.skip('Fails on cuda + rocm'), 'TestCommon', 'test_complex_half_reference_testing'), DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_grad'), DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), DecorateInfo(unittest.skip('Passes on complex128 and float64 only'), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), # AssertionError: Tensor-likes are not close! (new_empty_strided.default) DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestDecomp', 'test_comprehensive'),)), OpInfo('native_layer_norm', aten_name='native_layer_norm', ref=reference_native_layer_norm, dtypes=floating_types_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_out=False, assert_jit_shape_analysis=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_native_layer_norm, error_inputs_func=error_inputs_native_layer_norm, skips=( # IndexError: tuple index out of range DecorateInfo(unittest.skip('Skipped!'), 'TestFwdGradients', 'test_forward_mode_AD'), # Tests fail when weight=None and bias is defined # https://github.com/pytorch/pytorch/issues/79705 DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_gradgrad'), # JIT test also tries to compute double backward, which fails DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), DecorateInfo(toleranceOverride({torch.float32: tol(atol=2e-03, rtol=5e-03)}), "TestDecomp", "test_comprehensive", device_type="cpu"), )), OpInfo('native_batch_norm', aten_name='native_batch_norm', dtypes=floating_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, allow_cow_input_materialize_forward=[3, 4], allow_cow_input_materialize_backward=[3, 4], sample_inputs_func=sample_inputs_native_batch_norm, skips=( # NotImplementedError: Could not run # 'aten::native_batch_norm.out' with arguments from the 'CPU' backend. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type="cpu"), # RuntimeError: out_invstd.dim() == 1 && out_invstd.is_contiguous() && out_invstd.sizes()[0] DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type="cuda"), # Problem with _get_numerical_jacobian # IndexError: tuple index out of range DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), # RuntimeError: deepEquals(input.iValue, deepCopiedInput) INTERNAL ASSERT FAILED DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # https://github.com/pytorch/pytorch/issues/85960 DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'), # AssertionError: Booleans mismatch: True is not False DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake_autocast'), DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', 'test_fake'), DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-5)}), "TestCompositeCompliance", "test_forward_ad"), ) ), OpInfo('_native_batch_norm_legit', aten_name='_native_batch_norm_legit', dtypes=floating_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, allow_cow_input_materialize_forward=[3, 4], allow_cow_input_materialize_backward=[3, 4], sample_inputs_func=sample_inputs__native_batch_norm_legit, skips=( # NotImplementedError: Could not run # 'aten::native_batch_norm.out' with arguments from the 'CPU' backend. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type="cpu"), # RuntimeError: out_invstd.dim() == 1 && out_invstd.is_contiguous() && out_invstd.sizes()[0] DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type="cuda"), # Problem with _get_numerical_jacobian # IndexError: tuple index out of range DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), # RuntimeError: deepEquals(input.iValue, deepCopiedInput) INTERNAL ASSERT FAILED DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # https://github.com/pytorch/pytorch/issues/85960 DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'), DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-5)}), "TestCompositeCompliance", "test_forward_ad"), ) ), OpInfo('_batch_norm_with_update', op=torch.ops.aten._batch_norm_with_update, aten_name='_batch_norm_with_update', dtypes=floating_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, allow_cow_input_materialize_forward=[3, 4], allow_cow_input_materialize_backward=[3, 4], sample_inputs_func=sample_inputs__batch_norm_with_update, skips=( # NotImplementedError: Could not run # 'aten::native_batch_norm.out' with arguments from the 'CPU' backend. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type="cpu"), # RuntimeError: out_invstd.dim() == 1 && out_invstd.is_contiguous() && out_invstd.sizes()[0] DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type="cuda"), # Problem with _get_numerical_jacobian # IndexError: tuple index out of range DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), # RuntimeError: deepEquals(input.iValue, deepCopiedInput) INTERNAL ASSERT FAILED DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-5)}), "TestCompositeCompliance", "test_forward_ad"), # _batch_norm_with_update expects contiguous inputs for cudnn and miopen DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type="cuda"), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides', device_type="cuda"), # _batch_norm_with_update does not have python bindings DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # aten out variants do not accept out= kwarg, only python out variants DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), ) ), OpInfo('nn.functional.cosine_similarity', aten_name="cosine_similarity", dtypes=floating_types_and(torch.half, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[ DecorateInfo( toleranceOverride({torch.float16: tol(atol=1.3e-5, rtol=2e-2)}), "TestInductorOpInfo", "test_comprehensive", device_type="cuda" ), ], sample_inputs_func=sample_inputs_cosine_similarity), OpInfo('nn.functional.adaptive_avg_pool1d', dtypes=floating_types_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, error_inputs_func=error_inputs_adaptive_avg_pool1d, sample_inputs_func=sample_inputs_adaptive_avg_pool1d), OpInfo('nn.functional.adaptive_avg_pool2d', dtypes=floating_types_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16), decorators=( # RuntimeError: # adaptive_avg_pool2d(Tensor input, int[2] output_size) -> (Tensor): # Expected a value of type 'List[int]' for argument 'output_size' but # instead found type 'Tuple[NoneType, int]'. : # File "<string>", line 3 # def the_method(i0): # return torch.nn.functional.adaptive_avg_pool2d(i0, (None, 7)) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, error_inputs_func=error_inputs_adaptive_avg_pool2d, sample_inputs_func=sample_inputs_adaptive_avg_pool2d), OpInfo('nn.functional.adaptive_avg_pool3d', dtypes=floating_types_and(torch.half, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16), decorators=( # RuntimeError: # adaptive_avg_pool3d(Tensor input, int[3] output_size) -> (Tensor): # Expected a value of type 'List[int]' for argument 'output_size' but # instead found type 'Tuple[NoneType, NoneType, NoneType]'. : # File "<string>", line 3 # # def the_method(i0): # return torch.nn.functional.adaptive_avg_pool3d(i0, (None, None, None)) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE # DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, error_inputs_func=error_inputs_adaptive_avg_pool3d, sample_inputs_func=sample_inputs_adaptive_avg_pool3d), OpInfo('nn.functional.adaptive_max_pool1d', dtypes=floating_types_and(torch.half, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # got: Batching rule not implemented for aten::flatten.using_ints check_batched_forward_grad=False, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, error_inputs_func=error_inputs_adaptive_max_pool1d, sample_inputs_func=sample_inputs_adaptive_max_pool1d), OpInfo('nn.functional.adaptive_max_pool2d', dtypes=floating_types_and(torch.half, torch.bfloat16), decorators=( # RuntimeError: # adaptive_max_pool2d(Tensor input, int[2] output_size) -> (Tensor): # Expected a value of type 'List[int]' for argument 'output_size' but # instead found type 'Tuple[NoneType, int]'. : # File "<string>", line 3 # def the_method(i0): # return torch.nn.functional.adaptive_max_pool2d(i0, (None, 7)) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # got: Batching rule not implemented for aten::flatten.using_ints check_batched_forward_grad=False, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, error_inputs_func=error_inputs_adaptive_max_pool2d, sample_inputs_func=sample_inputs_adaptive_max_pool2d), OpInfo('nn.functional.adaptive_max_pool3d', dtypes=floating_types_and(torch.bfloat16, torch.half), decorators=( # RuntimeError: # adaptive_max_pool3d(Tensor input, int[3] output_size) -> (Tensor): # Expected a value of type 'List[int]' for argument 'output_size' but # instead found type 'Tuple[NoneType, NoneType, NoneType]'. : # File "<string>", line 3 # # def the_method(i0): # return torch.nn.functional.adaptive_max_pool3d(i0, (None, None, None)) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ <--- HERE # DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # got: Batching rule not implemented for aten::flatten.using_ints check_batched_forward_grad=False, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, error_inputs_func=error_inputs_adaptive_max_pool3d, sample_inputs_func=sample_inputs_adaptive_max_pool3d), OpInfo('nn.functional.avg_pool1d', aten_name='avg_pool1d', supports_autograd=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.int64, torch.float16, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, error_inputs_func=error_inputs_avg_pool1d, sample_inputs_func=sample_inputs_avgpool1d), OpInfo('nn.functional.avg_pool3d', aten_name='avg_pool3d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.int64), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, error_inputs_func=error_inputs_avg_pool3d, sample_inputs_func=sample_inputs_avgpool3d, skips=( # AssertionError: Tensor-likes are not close! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'), )), OpInfo( "nn.functional.binary_cross_entropy_with_logits", aten_name="binary_cross_entropy_with_logits", supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, dtypes=floating_types_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_binary_cross_entropy_with_logits, skips=( DecorateInfo( unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,) ), ), ), UnaryUfuncInfo( 'nn.functional.relu', aten_name="relu", ref=lambda a: np.where(a <= 0, 0, a), supports_autograd=True, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, dtypes=all_types_and(torch.half, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16, torch.float16), sample_inputs_func=sample_inputs_nn_activation_relu, supports_out=False, supports_fwgrad_bwgrad=True, supports_forward_ad=True), OpInfo('nn.functional.conv_transpose1d', # `ref` for this function is backward of # corresponding `conv*d` ref=partial(conv_transpose_ref, fn=torch.nn.functional.conv_transpose1d), aten_name='conv_transpose1d', aliases=('conv_transpose1d',), dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, torch.bfloat16), sample_inputs_func=sample_inputs_conv_transpose1d, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, decorators=( DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }), 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=5e-2, rtol=5e-2), }), 'TestCommon', 'test_complex_half_reference_testing'), DecorateInfo( toleranceOverride({torch.float: tol(atol=1.5e-5, rtol=1.5e-5), }), 'TestCommon', 'test_numpy_ref_mps'), DecorateInfo( toleranceOverride({torch.half: tol(atol=1e-3, rtol=5e-3), }), 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'), ), skips=( # Reason for Skip: https://github.com/pytorch/pytorch/pull/79694#issuecomment-1186949486 DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64,)), # RuntimeError: UNSUPPORTED DTYPE: complex DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float,)), # RuntimeError: "slow_conv2d_cpu_grad_input" not implemented for 'Long' DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', dtypes=(torch.int64,)), ), supports_out=False,), OpInfo('nn.functional.conv_transpose2d', aten_name='conv_transpose2d', aliases=('conv_transpose2d',), # `ref` for this function is backward of # corresponding `conv*d` ref=partial(conv_transpose_ref, fn=torch.nn.functional.conv_transpose2d), dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, torch.bfloat16), sample_inputs_func=sample_inputs_conv_transpose2d, # Runs very slowly on slow-gradcheck for complex. gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, decorators=[ DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), }), 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), DecorateInfo( toleranceOverride({torch.float32: tol(atol=2e-05, rtol=5e-05), }), 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=8e-2, rtol=8e-2), }), 'TestCommon', 'test_complex_half_reference_testing'), DecorateInfo( toleranceOverride({torch.half: tol(atol=1e-3, rtol=4e-3), }), 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu')], skips=( # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # RuntimeError: UNSUPPORTED DTYPE: complex DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), # RuntimeError: "slow_conv2d_cpu_grad_input" not implemented for 'Long' DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', dtypes=(torch.int64,)), # Reference: https://github.com/pytorch/pytorch/issues/86356 DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', dtypes=(torch.double, torch.cdouble)), DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), # AssertionError: None mismatch: torch.complex64 is not None DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules', 'test_custom_rules', dtypes=(torch.complex64, torch.complex128)), ), supports_out=False,), OpInfo('nn.functional.conv_transpose3d', aten_name='conv_transpose3d', aliases=('conv_transpose3d',), # `ref` for this function is backward of # corresponding `conv*d` ref=partial(conv_transpose_ref, fn=torch.nn.functional.conv_transpose3d), dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and( torch.float16, torch.chalf, torch.bfloat16), sample_inputs_func=sample_inputs_conv_transpose3d, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, # Runs very slowly on slow-gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, decorators=[ DecorateInfo( toleranceOverride({torch.float16: tol(atol=5e-2, rtol=5e-2), }), 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda'), DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1.3e-06), torch.complex64: tol(atol=1.3e-04, rtol=1.3e-05)}), 'TestCommon', 'test_variant_consistency_eager', device_type='cuda'), DecorateInfo( toleranceOverride({torch.float32: tol(atol=2e-04, rtol=2e-04), }), 'TestCompositeCompliance', 'test_operator', device_type='cuda'), DecorateInfo( toleranceOverride({torch.float32: tol(atol=1.3e-04, rtol=1.3e-06), torch.complex64: tol(atol=1.3e-04, rtol=1.3e-05)}), 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'), DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-04, rtol=2e-05), }), 'TestCompositeCompliance', 'test_forward_ad', device_type='cuda', active_if=TEST_CUDNN), DecorateInfo( toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1e-4)}), "TestMathBits", "test_conj_view", device_type='cuda'), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=9e-2, rtol=9e-2), }), 'TestCommon', 'test_complex_half_reference_testing'), DecorateInfo( toleranceOverride({torch.half: tol(atol=9e-3, rtol=2e-1), }), 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu')], skips=( # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":104, please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # RuntimeError: "slow_conv3d_cpu_grad_input" not implemented for 'Long' DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', dtypes=(torch.int64,)), # Reference: https://github.com/pytorch/pytorch/issues/86356 DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref', dtypes=(torch.double, torch.cdouble)), DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), # RuntimeError: UNSUPPORTED DTYPE: complex DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), DecorateInfo(unittest.skip('Skipped for ROCm!'), 'TestCommon', 'test_complex_half_reference_testing', dtypes=[torch.complex32], active_if=TEST_WITH_ROCM), ), supports_out=False,), OpInfo('nn.functional.conv1d', aliases=('conv1d',), aten_name='conv1d', dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), sample_inputs_func=sample_inputs_conv1d, error_inputs_func=error_inputs_conv1d, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, decorators=( DecorateInfo( toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=5e-2)}), 'TestCommon', 'test_complex_half_reference_testing' ), DecorateInfo( toleranceOverride({torch.float16: tol(atol=2e-3, rtol=1e-3)}), 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda', ), ), skips=( # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # Ref: https://github.com/pytorch/pytorch/issues/75309 # AssertionError: None mismatch: torch.complex128 is not None DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules', 'test_custom_rules', dtypes=(torch.complex64, torch.complex128)), # Ref: https://github.com/pytorch/pytorch/issues/75309 # RuntimeError: UNSUPPORTED DTYPE: complex DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), ), supports_expanded_weight=True, supports_out=False,), OpInfo('nn.functional.conv2d', aliases=('conv2d',), aten_name='conv2d', dtypes=floating_and_complex_types_and(torch.int64, torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), sample_inputs_func=partial(sample_inputs_conv2d), error_inputs_func=error_inputs_conv2d, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, decorators=( DecorateInfo( toleranceOverride({torch.chalf: tol(atol=6e-2, rtol=5e-2)}), 'TestCommon', 'test_complex_half_reference_testing', ), DecorateInfo( toleranceOverride({torch.float16: tol(atol=5e-3, rtol=1e-3)}), 'TestInductorOpInfo', 'test_comprehensive', ), ), skips=( # RuntimeError: !lhs.isAliasOf(rhs)INTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch. DecorateInfo(unittest.skip("Works on some configs!"), 'TestJit', 'test_variant_consistency_jit'), # Ref: https://github.com/pytorch/pytorch/issues/75309 # AssertionError: None mismatch: torch.complex128 is not None DecorateInfo(unittest.expectedFailure, 'TestDtypeCustomRules', 'test_custom_rules', dtypes=(torch.complex64, torch.complex128)), # RuntimeError: UNSUPPORTED DTYPE: complex DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), ), supports_expanded_weight=True, supports_out=False,), OpInfo('nn.functional.conv3d', aliases=('conv3d',), aten_name='conv3d', dtypes=floating_and_complex_types_and(torch.int64, torch.bfloat16, torch.float16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.chalf, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), sample_inputs_func=sample_inputs_conv3d, error_inputs_func=error_inputs_conv3d, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=( DecorateInfo( toleranceOverride({torch.chalf: tol(atol=6e-2, rtol=5e-2)}), 'TestCommon', 'test_complex_half_reference_testing', ), # TF32 DecorateInfo( toleranceOverride({torch.float32: tol(atol=5e-3, rtol=1e-3), torch.complex64: tol(atol=5e-3, rtol=1e-3)}), 'TestCommon', 'test_noncontiguous_samples', ), DecorateInfo( toleranceOverride({torch.complex64: tol(atol=5e-5, rtol=5e-6)}), 'TestMathBits', 'test_conj_view', ), DecorateInfo( toleranceOverride({torch.float32: tol(atol=5e-5, rtol=5e-6)}), 'TestOperators', 'test_vjpvmap', ), DecorateInfo( toleranceOverride({torch.float16: tol(atol=5e-3, rtol=1e-3)}), 'TestInductorOpInfo', 'test_comprehensive', ), ), skips=( # RuntimeError: !lhs.isAliasOf(rhs) INTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":103, please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # RuntimeError: UNSUPPORTED DTYPE: complex DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.complex64, torch.complex128)), # AssertionError: Tensor-likes are not close! # break slow tests DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_compare_cpu'), ), supports_expanded_weight=True, supports_out=False,), OpInfo('nn.functional.group_norm', aten_name='group_norm', aliases=('group_norm',), ref=reference_group_norm, dtypes=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, error_inputs_func=error_inputs_group_norm, decorators=[ # RuntimeError: Cannot insert a Tensor that requires grad as a constant. # Consider making it a parameter or input, or detaching the gradient DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), DecorateInfo( toleranceOverride({torch.float32: tol(atol=5e-05, rtol=3e-03)}), "TestDecomp", "test_comprehensive", device_type="cpu" ), ], sample_inputs_func=sample_inputs_group_norm, reference_inputs_func=reference_inputs_group_norm, supports_expanded_weight=True,), OpInfo('nn.functional.instance_norm', # no ref because instance_norm will often have numerical instability (large numbers or nan) dtypes=floating_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, allow_cow_input_materialize_forward=['running_mean', 'running_var'], decorators=[ # RuntimeError: Cannot insert a Tensor that requires grad as a constant. # Consider making it a parameter or input, or detaching the gradient DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), ], sample_inputs_func=sample_inputs_instance_norm, supports_expanded_weight=True,), OpInfo('nn.functional.layer_norm', aten_name='layer_norm', aten_backward_name='layer_norm_backward', aliases=('layer_norm',), ref=reference_layer_norm, dtypes=floating_types_and(torch.half, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, decorators=[ DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1e-03)}), 'TestCommon', 'test_numpy_refs' ), DecorateInfo(unittest.skip("Bug in MPS backend!"), 'TestCommon', 'test_numpy_ref_mps'), ], sample_inputs_func=sample_inputs_layer_norm, supports_expanded_weight=True,), OpInfo('nn.functional.rms_norm', aten_name='rms_norm', aliases=('rms_norm',), ref=reference_rms_norm, dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_rms_norm, error_inputs_func=error_inputs_rms_norm,), OpInfo('nn.functional.local_response_norm', dtypes=floating_types_and(torch.int64, torch.float16, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[ # RuntimeError: falseINTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), ], sample_inputs_func=sample_inputs_local_response_norm,), OpInfo('constant_pad_nd', supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), sample_inputs_func=sample_inputs_constant_pad_nd, supports_out=False, skips=( # bool can't be passed to Scalar arguments in JIT tracer because # BoolType is not a subtype of ScalarType. DecorateInfo( unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bool,)), )), OpInfo('nn.functional.pad', variant_test_name='constant', aten_name='constant_pad_nd', # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), sample_inputs_func=partial(sample_inputs_nn_pad, mode='constant'), supports_out=False), OpInfo('nn.functional.pad', variant_test_name='reflect', supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), sample_inputs_func=partial(sample_inputs_nn_pad, mode='reflect'), skips=( # Doesn't have a corresponding aten operator. # RuntimeError: falseINTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), ), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_out=False), OpInfo('nn.functional.pad', variant_test_name='replicate', supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_nn_pad, mode='replicate'), skips=( # Doesn't have a corresponding aten operator. # RuntimeError: falseINTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), ), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_out=False), OpInfo('nn.functional.pad', variant_test_name='replicate_negative', supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_nn_pad_replicate_negative, skips=( # Doesn't have a corresponding aten operator. # RuntimeError: falseINTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), # Some negative padding cases cause a segfault on MPS DecorateInfo(unittest.skip("Not fully supported on MPS"), 'TestConsistency'), ), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_out=False), OpInfo('nn.functional.pad', variant_test_name='circular', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), sample_inputs_func=partial(sample_inputs_nn_pad, mode='circular'), supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_grad=False, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, skips=( # Doesn't have a corresponding aten operator. # RuntimeError: falseINTERNAL ASSERT FAILED at # "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), # Difference from <type> is larger with decomposition new_empty_strided.default than original on output 0 DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestDecomp', 'test_comprehensive'), ), supports_out=False), OpInfo('nn.functional.hardswish', aten_name="hardswish", aten_backward_name='hardswish_backward', supports_autograd=True, assert_autodiffed=True, sample_inputs_func=sample_inputs_hardswish, dtypes=floating_types_and(torch.bfloat16, torch.half), supports_gradgrad=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, autodiff_nonfusible_nodes=["aten::hardswish"]), OpInfo('nn.functional.unfold', aten_name='im2col', dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.bool), dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.bool), sample_inputs_func=sample_inputs_nn_unfold, # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, skips=( # NOTE: this failure may not reproduce consistently on different systems # false INTERNAL ASSERT FAILED at "...torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185 DecorateInfo(unittest.skip("Internal assert failed!"), 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('nn.functional.interpolate', aten_name="interpolate", variant_test_name='nearest', supports_autograd=True, supports_fwgrad_bwgrad=True, supports_forward_ad=True, dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_interpolate, 'nearest'), skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo('nn.functional.interpolate', aten_name="interpolate", variant_test_name='nearest-exact', supports_autograd=True, supports_fwgrad_bwgrad=True, supports_forward_ad=True, dtypes=floating_types_and(torch.half, torch.bfloat16, torch.uint8), sample_inputs_func=partial(sample_inputs_interpolate, 'nearest-exact'), skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # RuntimeError: aten::_upsample_nearest_exact*d hit the vmap fallback which is currently disabled DecorateInfo(unittest.expectedFailure, 'TestOperators', 'test_vmapjvpall_has_batch_rule'), DecorateInfo(unittest.expectedFailure, 'TestOperators', 'test_vmapvjp_has_batch_rule'), DecorateInfo(unittest.expectedFailure, 'TestVmapOperatorsOpInfo', 'test_op_has_batch_rule'), # NotImplementedError: The operator 'aten::_upsample_nearest_exact3d.out' is not currently implemented # for the MPS device. DecorateInfo(unittest.expectedFailure, 'TestConsistency'), ), supports_out=False), OpInfo('nn.functional.interpolate', aten_name="interpolate", variant_test_name='linear', supports_autograd=True, supports_fwgrad_bwgrad=True, supports_forward_ad=True, dtypes=floating_types_and(torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_interpolate, 'linear'), skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo('nn.functional.interpolate', aten_name="interpolate", variant_test_name='bilinear', supports_fwgrad_bwgrad=True, supports_autograd=True, supports_forward_ad=True, dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=partial(sample_inputs_interpolate, 'bilinear'), reference_inputs_func=partial(reference_inputs_interpolate, 'bilinear'), skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo('nn.functional.interpolate', aten_name="interpolate", variant_test_name='bicubic', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_interpolate, 'bicubic'), reference_inputs_func=partial(reference_inputs_interpolate, 'bicubic'), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo('nn.functional.interpolate', aten_name="interpolate", variant_test_name='trilinear', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.half, torch.bfloat16), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=partial(sample_inputs_interpolate, 'trilinear'), skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo('nn.functional.interpolate', aten_name="interpolate", variant_test_name='area', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.half, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_inputs_func=partial(sample_inputs_interpolate, 'area'), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo('nn.functional.upsample_bilinear', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=partial(sample_inputs_upsample, 'bilinear'), reference_inputs_func=partial(reference_inputs_upsample, 'bilinear'), skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo('_upsample_bilinear2d_aa', op=torch.ops.aten._upsample_bilinear2d_aa, aten_name='_upsample_bilinear2d_aa', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.uint8), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=partial(sample_inputs_upsample_aa, 'bilinear'), supports_out=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'), DecorateInfo(unittest.expectedFailure, 'TestInductorOpInfo', 'test_comprehensive'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), )), OpInfo( "nn.functional.soft_margin_loss", dtypes=floating_types_and(torch.half, torch.bfloat16), supports_out=False, supports_forward_ad=True, # doesn't support grad on target sample_inputs_func=partial(sample_inputs_loss, rhs_requires_grad=False), error_inputs_func=error_inputs_soft_margin_loss, ), OpInfo('nn.functional.upsample_nearest', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.uint8, torch.half, torch.bfloat16), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=partial(sample_inputs_upsample, 'nearest'), skips=( # RuntimeError: false # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":185, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), supports_out=False), OpInfo( "nn.functional.margin_ranking_loss", dtypes=all_types_and(torch.half, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_margin_ranking_loss, error_inputs_func=error_inputs_margin_ranking_loss, reference_inputs_func=reference_inputs_margin_ranking_loss, supports_forward_ad=True, supports_fwgrad_bwgrad=True), OpInfo( "nn.functional.multi_margin_loss", dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), supports_out=False, supports_gradgrad=False, sample_inputs_func=sample_inputs_multi_margin_loss, reference_inputs_func=reference_inputs_multi_margin_loss, error_inputs_func=error_inputs_multi_margin_loss, decorators=( DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), "TestJit", "test_variant_consistency_jit", ), ), ), OpInfo( "nn.functional.multilabel_margin_loss", dtypes=floating_types(), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), supports_out=False, supports_gradgrad=False, sample_inputs_func=sample_inputs_multilabel_margin_loss, reference_inputs_func=reference_inputs_multilabel_margin_loss, error_inputs_func=error_inputs_multilabel_margin_loss, ), OpInfo('nn.functional.leaky_relu', aliases=None, aten_name="leaky_relu", aten_backward_name='leaky_relu_backward', sample_inputs_func=sample_inputs_leaky_relu, dtypes=floating_types_and(torch.bfloat16, torch.float16), inplace_variant=lambda x, negative_slope=0.01: torch.nn.functional.leaky_relu(x, negative_slope, inplace=True), supports_autograd=True, assert_autodiffed=True, supports_gradgrad=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=["aten::leaky_relu"]), OpInfo( "nn.functional.multilabel_soft_margin_loss", supports_out=False, dtypes=floating_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_multilabel_soft_margin_loss, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=( DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), "TestJit", "test_variant_consistency_jit", ), DecorateInfo( toleranceOverride({torch.float16: tol(atol=4e-3, rtol=1.3e-3)}), "TestInductorOpInfo", "test_comprehensive", device_type="cuda" ), ), skips=( # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 4096 # __main__.TestJitCUDA.test_variant_consistency_jit_nn_functional_multilabel_soft_margin_loss_cuda_float32 # leaked 4096 bytes CUDA memory on device 0 DecorateInfo( # Skip instead of expectedFailure because this fails # locally for me but passes in CI. unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", device_type="cuda", ), ), ), OpInfo('nn.functional.avg_pool2d', aten_name='avg_pool2d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.int64, torch.float16, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), error_inputs_func=error_inputs_avg_pool2d, sample_inputs_func=sample_inputs_avgpool2d, skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'), )), OpInfo('nn.functional.fractional_max_pool2d', supports_autograd=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.fractional_max_pool2d, input, *args, **kwargs), # vmap does not support random operations check_batched_forward_grad=False, dtypes=floating_types_and(torch.bfloat16, torch.float16), test_neg_view=False, sample_inputs_func=sample_inputs_fractional_max_pool2d, decorators=( # FIXME: AssertionError: False is not true : Tensors failed to compare as equal! DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit')), skips=( DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),)), OpInfo('nn.functional.fractional_max_pool3d', supports_autograd=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.fractional_max_pool3d, input, *args, **kwargs), # vmap does not support random operations check_batched_forward_grad=False, dtypes=floating_types_and(torch.bfloat16, torch.float16), test_neg_view=False, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, sample_inputs_func=sample_inputs_fractional_max_pool3d, decorators=( # FIXME: both derivatives are implemented incorrectly # https://github.com/pytorch/pytorch/issues/69322 # FIXME: AssertionError: False is not true : Tensors failed to compare as equal! DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270 DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit')), skips=( DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'),)), OpInfo('nn.functional.max_pool1d', aten_name='max_pool1d', supports_autograd=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # got: Batching rule not implemented for aten::flatten.using_ints check_batched_forward_grad=False, # TODO: add shape checks assert_jit_shape_analysis=False, dtypes=floating_types_and(torch.bfloat16, torch.float16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), skips=( # Pre-existing condition; Needs to be fixed DecorateInfo(unittest.skip("Works on some configs"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16,)), # RuntimeError: The tensor has a non-zero number of elements, but its data is not allocated yet. # Caffe2 uses a lazy allocation, so you will need to call mutable_data() or raw_mutable_data() # to actually allocate memory DecorateInfo(unittest.skip("Skipped!"), 'TestTags', 'test_tags'), ), error_inputs_func=error_inputs_max_pool1d, sample_inputs_func=sample_inputs_max_pool), OpInfo('nn.functional.max_pool2d', aten_name='max_pool2d', # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, # Vmap is not happy with non-contiguous (channels_last) inputs check_batched_gradgrad=False, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # got: Batching rule not implemented for aten::flatten.using_ints check_batched_forward_grad=False, assert_jit_shape_analysis=True, dtypes=all_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), error_inputs_func=error_inputs_max_pool2d, sample_inputs_func=sample_inputs_max_pool), OpInfo('max_pool2d_with_indices_backward', op=max_pool2d_backward, # We've defined a custom op, so there's no corresponding aten op aten_name=None, method_variant=None, inplace_variant=None, operator_variant=None, inplace_operator_variant=None, check_batched_gradgrad=False, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, assert_jit_shape_analysis=False, dtypes=floating_types_and(torch.bfloat16, torch.float16), sample_inputs_func=sample_inputs_max_pool, skips=( # We've defined a custom op here, and we don't handle the case where we receive an out kwarg DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_out"), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # FX failed to normalize op - add the op to the op_skip list. DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # object has no attribute max_pool2d_with_indices_backward (It's not available on torch -- so expected) DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit') )), OpInfo('nn.functional.max_pool3d', aten_name='max_pool3d', # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # got: Batching rule not implemented for aten::flatten.using_ints check_batched_forward_grad=False, # TODO: add shape checks assert_jit_shape_analysis=False, dtypes=all_types_and(torch.bfloat16, torch.float16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), # TODO: investigate nondeterminism gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, error_inputs_func=error_inputs_max_pool3d, sample_inputs_func=sample_inputs_max_pool), OpInfo('nn.functional.max_unpool1d', aten_name='max_unpool1d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, assert_jit_shape_analysis=False, dtypes=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_max_unpool, skips=( # Gradients are tested in `variant_test_name=grad` below. # We skip tests here because there is non-determinism in backward # with gather, when there are writes into the same memory location, # and if there are several indices pointing to the same memory, # gradcheck is oblivious about that and cannot perturb them all at once # (see sample_inputs_max_unpool_grad to find out more). DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD', active_if=(not IS_MACOS)), DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad', device_type='cpu'), DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick_core_backward'), )), OpInfo('nn.functional.max_unpool1d', variant_test_name='grad', aten_name='max_unpool1d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, assert_jit_shape_analysis=False, dtypes=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_max_unpool_grad), OpInfo('nn.functional.max_unpool2d', aten_name='max_unpool2d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, assert_jit_shape_analysis=False, dtypes=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_max_unpool, skips=( # Gradients are tested in `variant_test_name=grad` below. # We skip tests here because there is non-determinism in backward # with gather, when there are writes into the same memory location, # and if there are several indices pointing to the same memory, # gradcheck is oblivious about that and cannot perturb them all at once # (see sample_inputs_max_unpool_grad to find out more). DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD', active_if=(not IS_MACOS)), DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad'), DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick_core_backward'), )), OpInfo('nn.functional.max_unpool2d', variant_test_name='grad', aten_name='max_unpool2d', # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # Vmap is not happy with non-contiguous (channels_last) inputs check_batched_grad=False, supports_out=False, assert_jit_shape_analysis=False, dtypes=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_max_unpool_grad), OpInfo('nn.functional.max_unpool3d', aten_name='max_unpool3d', # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, assert_jit_shape_analysis=False, dtypes=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_max_unpool, skips=( # Gradients are tested in `variant_test_name=grad` below. # We skip tests here because there is non-determinism in backward # with gather, when there are writes into the same memory location, # and if there are several indices pointing to the same memory, # gradcheck is oblivious about that and cannot perturb them all at once # (see sample_inputs_max_unpool_grad to find out more). DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD', active_if=(not IS_MACOS)), DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad'), DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_grad'), DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad'), DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick_core_backward'), )), OpInfo('nn.functional.max_unpool3d', variant_test_name='grad', aten_name='max_unpool3d', supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, assert_jit_shape_analysis=False, dtypes=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_max_unpool_grad), OpInfo('nn.functional.linear', aten_name='linear', supports_autograd=True, supports_gradgrad=True, sample_inputs_func=sample_inputs_linear, dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), dtypesIfROCM=floating_and_complex_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), # linear calls mm under the hood which is nondeterministic on CUDA # https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html#torch.use_deterministic_algorithms gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, supports_expanded_weight=True, decorators=( # Strides are not the same! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), )), OpInfo('nn.functional.bilinear', aten_name='bilinear', supports_autograd=True, sample_inputs_func=sample_inputs_bilinear, dtypes=all_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, *[torch.bfloat16] if SM53OrLater or TEST_WITH_ROCM else []), decorators=( DecorateInfo(toleranceOverride({torch.float16: tol(atol=2e-03, rtol=1.3e-03)}), 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu'), ), skips=( # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16,)), ), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('nn.functional.glu', aten_name='glu', # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, sample_inputs_func=sample_inputs_glu, dtypes=floating_types_and(torch.bfloat16, torch.float16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), UnaryUfuncInfo( 'nn.functional.elu', aten_backward_name='elu_backward', ref=lambda x, alpha=1.0, inplace=False: np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x) - 1)), dtypes=floating_types_and(torch.bfloat16, torch.float16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, sample_kwargs=lambda device, dtype, input: ({'alpha': 0.8}, {'alpha': 0.8}), inplace_variant=lambda x, alpha=1.0: torch.nn.functional.elu(x, alpha, inplace=True), decorators=[ DecorateInfo( toleranceOverride({ torch.float16: tol(atol=1e-03, rtol=1.2e-03), torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) }), 'TestUnaryUfuncs', device_type='cuda', ), ], ), # Marked as a Unary function because it has some rather odd broadcasting semantics in its # second argument UnaryUfuncInfo( 'nn.functional.prelu', aten_backward_name='_prelu_kernel_backward', ref=lambda x, weight: np.maximum(0., x) + np.minimum(0., x) * (weight if x.ndim == 1 else weight.reshape([weight.size if i == 1 else 1 for i in range(0, x.ndim)])), dtypes=floating_types_and(torch.bfloat16, torch.float16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, # test_reference_numerics only tests the case when the weight tensor is a scalar sample_kwargs=sample_kwargs_prelu_scalar_weight, error_inputs_func=error_inputs_prelu, sample_inputs_func=sample_inputs_prelu, reference_inputs_func=reference_inputs_prelu, decorators=[ # RuntimeError: Cannot insert a Tensor that requires grad as a constant. # Consider making it a parameter or input, or detaching the gradient # https://github.com/pytorch/pytorch/issues/68752 DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ], ), UnaryUfuncInfo( 'nn.functional.celu', ref=lambda x, alpha=1.0, inplace=False: np.maximum(0., x) + np.minimum(0., alpha * (np.exp(x / alpha) - 1)), dtypes=floating_types_and(torch.bfloat16, torch.float16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, sample_kwargs=lambda device, dtype, input: ({'alpha': 0.8}, {'alpha': 0.8}), inplace_variant=lambda x, alpha=1.0: torch.nn.functional.celu(x, alpha, inplace=True), decorators=[ DecorateInfo( toleranceOverride({ torch.float16: tol(atol=1e-03, rtol=1.2e-03), torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) }), 'TestUnaryUfuncs', device_type='cuda', ), ], ), UnaryUfuncInfo( 'nn.functional.rrelu', aten_backward_name='rrelu_with_noise_backward', op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.rrelu, input, *args, **kwargs), inplace_variant=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.rrelu, input, *args, inplace=True, **kwargs), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), gradcheck_wrapper=wrapper_set_seed, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, sample_kwargs=lambda device, dtype, input: (dict(lower=0., upper=1., training=True), dict(lower=0., upper=1., training=True)), sample_inputs_func=sample_inputs_rrelu, error_inputs_func=error_inputs_rrelu, decorators=( DecorateInfo( toleranceOverride({ torch.float16: tol(atol=1e-03, rtol=1.2e-03), torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) }), 'TestUnaryUfuncs', device_type='cuda', ),), skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # lambda impl DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # In-place operations do not play well with forward AD # https://github.com/pytorch/pytorch/issues/77447 DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_inplace_forward_mode_AD'), # The noise vector that's generated in these tests is not the same elementwise DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_batch_vs_slicing'), DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_contig_vs_every_other'), DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_non_contig_expand'), DecorateInfo(unittest.skip("Different noise"), 'TestUnaryUfuncs', 'test_contig_vs_transposed'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), skip_correctness_check_compile_vs_eager=True, ), UnaryUfuncInfo( 'nn.functional.selu', ref=lambda x, inplace=False: 1.0507009873554804934193349852946 * ( np.maximum(0., x) + np.minimum(0., 1.6732632423543772848170429916717 * (np.exp(x) - 1)) ), dtypes=floating_types_and(torch.bfloat16, torch.float16), supports_forward_ad=True, # depends on 'elu' supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, inplace_variant=lambda x: torch.nn.functional.selu(x, inplace=True), decorators=[ DecorateInfo( toleranceOverride({ torch.float16: tol(atol=1e-2, rtol=1.8e-2), torch.bfloat16: tol(atol=1e-2, rtol=1.8e-2) }), 'TestUnaryUfuncs', device_type='cuda', ), ], ), OpInfo( 'torch._scaled_mm', sample_inputs_func=sample_inputs_scaled_mm, dtypes=empty_types(), dtypesIfCUDA=empty_types() + (torch.float8_e4m3fn,), supports_out=True, supports_forward_ad=False, supports_autograd=False, decorators=[skipCUDAIf(not SM89OrLater or TEST_WITH_ROCM, 'Requires CUDA SM >= 8.9')], skips=( # Sample inputs isn't really parametrized on dtype DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda'), # "mul_cuda" not implemented for float8_e4m3fn # https://github.com/pytorch/pytorch/issues/107256 DecorateInfo(unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', dtypes=(torch.float8_e4m3fn,)), ) ), OpInfo( 'torch.ops.aten._safe_softmax.default', dtypes=all_types_and(torch.half, torch.bfloat16, torch.bool), sample_inputs_func=sample_inputs_safe_softmax, assert_jit_shape_analysis=True, assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, supports_cow_input_no_materialize_backward=False, decorators=[], skips=( DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), ), OpInfo( 'nn.functional.scaled_dot_product_attention', op=lambda *args, **kwargs: wrapper_set_seed(torch.nn.functional.scaled_dot_product_attention, *args, **kwargs), sample_inputs_func=sample_inputs_scaled_dot_product_attention, dtypes=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=False, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, decorators=[DecorateInfo(toleranceOverride( {torch.float32: tol(atol=5e-05, rtol=5e-6)}), 'TestCommon',), ], skips=( # When attn mask is a composite tensor this fails backward by returning a none DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward', device_type='cuda'), # This is only failing on Linux Bionic 3.10 Cuda 11.6 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=_get_torch_cuda_version() >= (11, 6)), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples', dtypes=(torch.float32,)), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # Forward works for dtype=float64 which is the math path DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), # Not implemented for Forward AD DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', device_type='cpu'), # Not implemented for backward derivative DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients', 'test_fn_gradgrad', device_type='cpu'), # CPU and CUDA have inconsistencies for intermediate outputs DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_meta_outplace', device_type='cpu'), DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace', device_type='cpu'), # When changing input from Tensor to CompositeCompliantTensor, input.requires_grad() changes from true to false DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward', device_type='cpu'), # OpInfo was implemented with a lambda DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # TODO Need to understand what this is testing and why it doesn't work DecorateInfo(unittest.skip("Skipped"), 'TestDecomp', 'test_comprehensive'), DecorateInfo(unittest.skip('output is non-deterministic (when dropout_p > 0)'), 'TestCommon', 'test_compare_cpu'), # TODO skip this for now since we can't skip on runtime arch support DecorateInfo(unittest.skip('This is '), 'TestInductorOpInfo', 'test_comprehensive'), # skip for sm < 80 DecorateInfo(unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', device_type='cuda', dtypes=(torch.bfloat16,), active_if=not SM80OrLater), # FIXME DecorateInfo(unittest.skip('test_cow_input does not work with efficient attention on ROCM'), 'TestCompositeCompliance', 'test_cow_input', device_type='cuda', dtypes=(torch.bfloat16, torch.float16, torch.float32), active_if=TEST_WITH_ROCM and PLATFORM_SUPPORTS_MEM_EFF_ATTENTION),), ), OpInfo( 'torch.ops.aten._flash_attention_forward', sample_inputs_func=sample_inputs_flash_attention_forward, dtypes=empty_types(), dtypesIfCUDA=custom_types(torch.float16) if not SM80OrLater else custom_types(torch.float16, torch.bfloat16), supports_out=False, supports_autograd=True, supports_fwgrad_bwgrad=False, supports_forward_ad=False, check_batched_forward_grad=False, decorators=[skipCUDAIf(not PLATFORM_SUPPORTS_FLASH_ATTENTION, "This platform doesn't support Flash Attention")], skips=( # Checking the scalar value of the philox seed and offset DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator', device_type='cuda'), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', device_type='cuda'), # None Mismatch Tensor DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward', device_type='cuda'), ) ), OpInfo( 'torch.ops.aten._efficient_attention_forward', sample_inputs_func=sample_inputs_efficient_attention_forward, dtypes=empty_types(), dtypesIfCUDA=custom_types(torch.float16, torch.float32) if not SM80OrLater else custom_types(torch.float16, torch.float32, torch.bfloat16), supports_out=False, supports_autograd=True, supports_fwgrad_bwgrad=False, supports_forward_ad=False, check_batched_forward_grad=False, # TODO: Skip because it produces a CUDA illegal memory access for some reason skip_cow_input_backward=True, # FIXME: mask_type == 2 (LowerRight) decorators=[ skipCUDAIf(not PLATFORM_SUPPORTS_MEM_EFF_ATTENTION, "This platform doesn't support efficient attention"), skipCUDAIf(TEST_WITH_ROCM, "Efficient attention on ROCM doesn't support custom_mask_type==2")], skips=( # Checking the scaler value of the philox seed and offset DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator', device_type='cuda'), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_noncontiguous_samples', device_type='cuda'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', device_type='cuda'), # None Mismatch Tensor DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward', device_type='cuda'), ) ), UnaryUfuncInfo( 'nn.functional.silu', aten_backward_name='silu_backward', ref=lambda x, inplace=False: x / (1 + np.exp(-x)), dtypes=floating_types_and(torch.bfloat16, torch.float16), supports_forward_ad=True, supports_autograd=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, supports_out=False, inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True), decorators=[ DecorateInfo( toleranceOverride({ torch.float16: tol(atol=1e-3, rtol=1e-3), torch.bfloat16: tol(atol=1e-4, rtol=1e-4) }), 'TestUnaryUfuncs', device_type='cuda', ), ], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', dtypes=(torch.cfloat,), device_type='cpu'), ), autodiff_nonfusible_nodes=["aten::silu"], ), # TODO: combine this with the nn.functional.silu OpInfo when # complex autodiff for silu is supported or when # the forward bug is fixed # Note: silu errors when given inputs that require grad # but it doesn't support grad in their dtype # This is why the dtypes list above passes test_dtypes, # because it's getting lucky and failing in forward # because test_dtypes sets requires_grad to True # THIS IS A BUG UnaryUfuncInfo( 'nn.functional.silu', variant_test_name='complex', ref=lambda x, inplace=False: x / (1 + np.exp(-x)), dtypes=complex_types(), dtypesIfCUDA=complex_types(), supports_forward_ad=False, supports_autograd=False, assert_autodiffed=False, supports_out=False, inplace_variant=lambda x: torch.nn.functional.silu(x, inplace=True), decorators=[ DecorateInfo( toleranceOverride({ torch.float16: tol(atol=1e-3, rtol=1e-3), torch.bfloat16: tol(atol=1e-4, rtol=1e-4) }), 'TestUnaryUfuncs', device_type='cuda', ), ], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', dtypes=(torch.cfloat,)), # FIXME: intentionally misreports dtypes DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), # FIXME: numpy reference diverges: Comparing (nan+nanj) and (-0+0j) DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.complex64, torch.cdouble)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=(torch.complex64,)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=(torch.complex64,)))), UnaryUfuncInfo( 'nn.functional.hardsigmoid', aten_backward_name='hardsigmoid_backward', ref=reference_hardsigmoid, dtypes=floating_types_and(torch.bfloat16, torch.float16), supports_autograd=True, assert_autodiffed=False, supports_gradgrad=False, supports_forward_ad=True, supports_out=False, inplace_variant=partial(torch.nn.functional.hardsigmoid, inplace=True), decorators=[ DecorateInfo( toleranceOverride({torch.float16: tol(atol=1e-04, rtol=0.001)}), 'TestUnaryUfuncs', device_type='cuda',), ], skips=[ # still want to test that first derivative works though second derivative isn't supported DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', "test_inplace_gradgrad"), # produces 0 instead of nan on ROCM DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', "test_reference_numerics_extremal", device_type='cuda', active_if=(TEST_WITH_ROCM)), ] ), UnaryUfuncInfo( 'nn.functional.logsigmoid', aten_name="log_sigmoid", aten_backward_name='log_sigmoid_backward', ref=reference_logsigmoid, dtypes=floating_types_and(torch.half, torch.bfloat16), supports_autograd=True, assert_autodiffed=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_gradgrad=True, # autodiff_nonfusible_nodes=["aten::log_sigmoid"], decorators=[ DecorateInfo( precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), 'TestUnaryUfuncs', 'test_reference_numerics_small'), DecorateInfo( precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), 'TestUnaryUfuncs', 'test_reference_numerics_large'), DecorateInfo( precisionOverride({torch.float16: 1e-2, torch.bfloat16: 5e-3}), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), ], skips=( # Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cpu'), ), ), UnaryUfuncInfo( 'nn.functional.mish', aten_backward_name='mish_backward', ref=lambda x: x * np.tanh(reference_softplus(x)), dtypes=floating_types_and(torch.bfloat16, torch.float16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, inplace_variant=partial(torch.nn.functional.mish, inplace=True), decorators=[ DecorateInfo( toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), 'TestUnaryUfuncs',), ], ), UnaryUfuncInfo( 'nn.functional.softsign', ref=lambda x: x / (np.abs(x) + 1), dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, decorators=[ DecorateInfo( toleranceOverride({torch.float16: tol(atol=1e-03, rtol=1.3e-04)}), 'TestUnaryUfuncs',), ], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=(torch.int, torch.int8)),), ), UnaryUfuncInfo( 'nn.functional.tanhshrink', ref=lambda x: x - np.tanh(x), dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_autograd=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, decorators=[ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo( toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02)}), 'TestUnaryUfuncs',), DecorateInfo(toleranceOverride({torch.complex64: tol(atol=6e-04, rtol=1e-05), torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02)}), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'), ], skips=( # in each case, pytorch will produce a nan while numpy will not DecorateInfo(unittest.skip("Fails on some jobs works on others!"), 'TestUnaryUfuncs', "test_reference_numerics_large", dtypes=(torch.complex64, torch.complex128), active_if=(IS_MACOS)), DecorateInfo(unittest.skip("Fails on some jobs works on others!"), 'TestUnaryUfuncs', "test_reference_numerics_extremal", dtypes=(torch.complex64, torch.complex128), device_type='cpu', active_if=(IS_MACOS or IS_WINDOWS)), ), # tan(j * pi/2 * odd_number) is nan which also make tanhshrink nan. reference_numerics_filter=NumericsFilter( condition=lambda x: (close_to_int(x / (math.pi * 0.5j)) if x.is_complex() else x.new_tensor(False, dtype=torch.bool)), safe_val=0) ), UnaryUfuncInfo( 'nn.functional.threshold', ref=lambda x, threshold, value: np.where(x <= threshold, value, x).astype(x.dtype), dtypes=all_types_and(torch.half, torch.bfloat16), inplace_variant=lambda x, threshold, value: torch.nn.functional.threshold(x, threshold, value, inplace=True), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=False, supports_gradgrad=True, supports_out=False, sample_kwargs=lambda device, dtype, input: ({'threshold': float.fromhex('0x1.3ap-3'), 'value': -9}, {'threshold': float.fromhex('0x1.3ap-3'), 'value': -9}), # TODO(whc) should not need sample_inputs_func, but without it # kwargs aren't being hooked up properly sample_inputs_func=sample_inputs_threshold, ), OpInfo( "nn.functional.triplet_margin_loss", sample_inputs_func=sample_inputs_triplet_margin_loss, error_inputs_func=error_inputs_triplet_margin_loss, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo( "nn.functional.triplet_margin_with_distance_loss", sample_inputs_func=partial(sample_inputs_triplet_margin_loss, with_distance=True), error_inputs_func=error_inputs_triplet_margin_loss, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # This test cannot handle a callable passed to `distance_function`. If we would use # `distance_function=None`, the test would pass fine. DecorateInfo( unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", ), DecorateInfo( unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive", ), ), ), BinaryUfuncInfo('nextafter', dtypes=floating_types_and(torch.bfloat16, torch.half), supports_autograd=False, supports_rhs_python_scalar=False), OpInfo( "to", op=lambda x, *args, **kwargs: x.to(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, sample_inputs_func=sample_inputs_to, skips=( # RuntimeError: undefined value cpu DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", device_type="cpu", ), # NotImplementedError: Cannot copy out of meta tensor; no data! DecorateInfo( unittest.skip("Skipped!"), "TestMeta", "test_meta_outplace", ), # https://github.com/pytorch/pytorch/issues/84335 DecorateInfo( unittest.skip("Skipped!"), "TestProxyTensorOpInfo", "test_make_fx_symbolic_exhaustive", ), DecorateInfo( unittest.skip("Skipped!"), "TestNormalizeOperators", "test_normalize_operator_exhaustive", ), ), ), OpInfo('topk', dtypes=all_types_and(torch.bfloat16, torch.float16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, sample_inputs_func=sample_inputs_topk), # Multiple variants for batch_norm to test with and without cuDNN disabled # See https://github.com/pytorch/pytorch/pull/63218#discussion_r688549391 for more details OpInfo('nn.functional.batch_norm', aten_name='batch_norm', dtypes=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, allow_cow_input_materialize_forward=[1, 2], allow_cow_input_materialize_backward=[1, 2], sample_inputs_func=sample_inputs_batch_norm, skips=( # see https://github.com/pytorch/pytorch/issues/71286 DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', device_type='cpu', dtypes=(torch.bfloat16, torch.float16)), DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-05, rtol=1e-05)}), 'TestCompositeCompliance', 'test_forward_ad', device_type="cpu"), )), # This variant tests batch_norm with cuDNN disabled only on CUDA devices OpInfo('nn.functional.batch_norm', variant_test_name='without_cudnn', aten_name='batch_norm', dtypes=empty_types(), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, allow_cow_input_materialize_forward=[1, 2], allow_cow_input_materialize_backward=[1, 2], decorators=[onlyCUDA, disablecuDNN], skips=( DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-04)}), 'TestJit', 'test_variant_consistency_jit'), ), sample_inputs_func=sample_inputs_batch_norm), OpInfo( "nn.functional.binary_cross_entropy", aten_backward_name='binary_cross_entropy_backward', sample_inputs_func=sample_inputs_binary_cross_entropy, dtypes=floating_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, gradcheck_fast_mode=False, supports_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=( # RuntimeError: expected int at position 0, but got: Tensor DecorateInfo( unittest.skip("Skipped!"), "TestCudaFuserOpInfo", ), # RuntimeError: expected int at position 0, but got: Tensor DecorateInfo( unittest.skip("Skipped!"), "TestNNCOpInfo", "test_nnc_correctness", ), # Fails for unknown reason: https://github.com/pytorch/pytorch/issues/120783 DecorateInfo( unittest.skip("Skipped!"), "TestCompositeCompliance", "test_cow_input", device_type='cuda', ), DecorateInfo( toleranceOverride({torch.float32: tol(atol=1e-3, rtol=1e-3)}), "TestJit", "test_variant_consistency_jit", ), # RuntimeError: output with shape [] doesn't match the broadcast shape [5, 5] DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_outplace'), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace'), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'), ), skips=( # RuntimeError: expected int at position 0, but got: Tensor DecorateInfo( unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", ), ), ), # We have to add 2 OpInfo entry for `igamma` and `igammac`.First is the # standard entry, second is to run gradcheck tests on the second argument. BinaryUfuncInfo('igamma', dtypes=floating_types_and(torch.bfloat16, torch.float16), aliases=('torch.special.gammainc',), dtypesIfCUDA=floating_types(), # TODO: FIXME supports_rhs_python_scalar=False, supports_autograd=False, skips=( # FIXME: incorrectly tries to pass a rhs scalar DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), )), # TODO: FIXME, ideally by implemented grad for both inputs # BinaryUfuncInfo('igamma', # variant_test_name='grad_other', # # Since autograd formula is implemented only for other and # # gradcheck test verifies the formula for input in SampleInput, # # we permute the arguments. # op=lambda self, other, **kwargs: torch.igamma(other, self, **kwargs), # inplace_variant=None, # method_variant=None, # supports_rhs_python_scalar=False, # rhs_make_tensor_kwargs=dict(requires_grad=False), # dtypes=floating_types_and(torch.bfloat16, torch.float16), # backward_dtypesIfCPU=floating_types_and(torch.bfloat16), # dtypesIfCUDA=floating_types(), # backward_dtypesIfCUDA=floating_types(), # supports_inplace_autograd=False, # skips=( # # Derivative wrt first tensor not implemented # DecorateInfo(unittest.expectedFailure, "TestCommon", # "test_floating_inputs_are_differentiable"),"), # # test does not work with passing lambda for op # # AssertionError: False is not true : Tensors failed to compare as equal! # DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # # test fails are we permute the arguments function variant # # but not for inplace or method. # DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), # # TypeError: igamma(): argument 'input' (position 1) must be Tensor, not float # DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'), # )), BinaryUfuncInfo('igammac', dtypes=floating_types_and(torch.bfloat16, torch.float16), aliases=('torch.special.gammaincc',), dtypesIfCUDA=floating_types(), supports_autograd=False, supports_rhs_python_scalar=False, skips=( # FIXME: incorrectly tries to pass a rhs scalar DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), )), # TODO: FIXME, ideally by implementing grad for both inputs # BinaryUfuncInfo('igammac', # variant_test_name='grad_other', # # Since autograd formula is implemented only for other and # # gradcheck test verifies the formula for input in SampleInput, # # we permute the arguments # op=lambda self, other, **kwargs: torch.igammac(other, self, **kwargs), # inplace_variant=None, # method_variant=None, # supports_rhs_python_scalar=False, # rhs_make_tensor_kwargs=dict(requires_grad=False), # dtypes=floating_types_and(torch.bfloat16, torch.float16), # backward_dtypesIfCPU=floating_types_and(torch.bfloat16), # dtypesIfCUDA=floating_types(), # backward_dtypesIfCUDA=floating_types(), # supports_inplace_autograd=False, # decorators=[ # # Derivative wrt first tensor not implemented # DecorateInfo(unittest.expectedFailure, "TestCommon", # "test_floating_inputs_are_differentiable"), # ], # skips=( # # test does not work with passing lambda for op # # AssertionError: False is not true : Tensors failed to compare as equal! # DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # # test fails are we permute the arguments function variant # # but not for inplace or method. # DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), # # TypeError: igammac(): argument 'input' (position 1) must be Tensor, not float # DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs'), # )), UnaryUfuncInfo('nn.functional.softshrink', aten_name="softshrink", aten_backward_name='softshrink_backward', dtypes=floating_types_and(torch.bfloat16, torch.float16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=False, sample_inputs_func=sample_inputs_softshrink, error_inputs_func=error_inputs_softshrink), UnaryUfuncInfo('nn.functional.hardshrink', aten_name="hardshrink", aten_backward_name='hardshrink_backward', dtypes=floating_types_and(torch.bfloat16, torch.float16), assert_autodiffed=True, sample_inputs_func=sample_inputs_hardshrink, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=["aten::hardshrink"]), UnaryUfuncInfo('nn.functional.hardtanh', aten_name="hardtanh", aten_backward_name='hardtanh_backward', dtypes=floating_types_and(torch.int8, torch.int16, torch.int32, torch.int64, torch.half, torch.bfloat16), backward_dtypes=all_types_and(torch.half, torch.bfloat16), backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), assert_autodiffed=True, sample_inputs_func=sample_inputs_hardtanh, error_inputs_func=error_inputs_hardtanh, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=["aten::hardtanh"]), OpInfo('nn.functional.gelu', aten_name="gelu", aten_backward_name='gelu_backward', ref=reference_gelu if TEST_SCIPY else None, error_inputs_func=error_inputs_gelu, supports_autograd=True, assert_autodiffed=True, sample_inputs_func=sample_inputs_gelu, dtypes=floating_types_and(torch.bfloat16, torch.half), supports_gradgrad=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=["aten::gelu"], skips=( # AssertionError: Tensor-likes are not close! # May not replicate in CI DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), )), UnaryUfuncInfo('nn.functional.relu6', aten_name="relu6", dtypes=all_types_and(torch.half, torch.bfloat16), backward_dtypes=floating_types_and(torch.half, torch.bfloat16), assert_autodiffed=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=["aten::relu6"]), OpInfo('mm', dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_mm, skips=( # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 DecorateInfo( unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', dtypes=(torch.complex64, torch.complex128)), )), OpInfo('mode', op=torch.mode, dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Resized a non-empty tensor but did not warn about it DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # FIXME: # Expected 2114 but got 1123. # Absolute difference: 991 (up to 0.001 allowed) # Relative difference: 0.46877956480605487 (up to 0.001 allowed) DecorateInfo( unittest.skip("Skipped!"), "TestCommon", "test_compare_cpu", dtypes=(torch.float32,), device_type="cuda", ), ), sample_inputs_func=sample_inputs_mode,), make_mvlgamma_opinfo(variant_test_name='mvlgamma_p_1', domain=(1, None), skips=skips_mvlgamma(), sample_kwargs=lambda device, dtype, input: ({'p': 1}, {'d': 1})), make_mvlgamma_opinfo(variant_test_name='mvlgamma_p_3', domain=(2, None), skips=skips_mvlgamma(), sample_kwargs=lambda device, dtype, input: ({'p': 3}, {'d': 3})), make_mvlgamma_opinfo(variant_test_name='mvlgamma_p_5', domain=(3, None), skips=skips_mvlgamma(), sample_kwargs=lambda device, dtype, input: ({'p': 5}, {'d': 5})), BinaryUfuncInfo('ne', ref=np.not_equal, aliases=('not_equal',), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), always_returns_bool=True, supports_autograd=False, skips=( )), OpInfo('narrow', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=partial(sample_inputs_narrow_narrow_copy, is_narrow=True), reference_inputs_func=partial(reference_inputs_narrow_narrow_copy, is_narrow=True), error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=True, is_ref=False), skips=( # Use of .item() DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), )), OpInfo('narrow_copy', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), supports_out=True, supports_forward_ad=False, supports_fwgrad_bwgrad=False, supports_autograd=False, # https://github.com/pytorch/pytorch/issues/86931 sample_inputs_func=partial(sample_inputs_narrow_narrow_copy, is_narrow=False), reference_inputs_func=partial(reference_inputs_narrow_narrow_copy, is_narrow=False), error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=False, is_ref=False), skips=( # https://github.com/pytorch/pytorch/issues/84577 DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # Could not run 'aten::narrow_copy.out' with arguments from the 'CUDA' backend DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_outplace', device_type='cuda'), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_outplace', device_type='cuda'), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace', device_type='cuda'), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'), )), OpInfo('view_copy', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), ref=lambda x, newshape: np.reshape(x, newshape).copy(), supports_out=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_autograd=True, sample_inputs_func=sample_inputs_view_reshape, error_inputs_func=error_inputs_view_reshape, skips=( # RuntimeError: view size is not compatible with input tensor's size and stride # (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead. DecorateInfo( unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides" ), )), UnaryUfuncInfo('neg', aliases=('negative', ), ref=np.negative, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), error_inputs_func=error_inputs_neg, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, assert_autodiffed=True), OpInfo('dist', op=torch.dist, dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_out=False, supports_forward_ad=True, # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: # Could not allocate memory to change Tensor SizesAndStrides! check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_dist), OpInfo('outer', op=torch.outer, aliases=('ger', ), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_outer,), OpInfo('ormqr', op=torch.ormqr, dtypes=floating_and_complex_types(), # https://github.com/pytorch/pytorch/issues/80411 gradcheck_fast_mode=True, supports_forward_ad=False, supports_fwgrad_bwgrad=False, sample_inputs_func=sample_inputs_ormqr, error_inputs_func=error_inputs_ormqr, decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack], skips=( # Strides are not the same! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), )), OpInfo('permute', ref=np.transpose, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, assert_autodiffed=True, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused assert_jit_shape_analysis=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_varargs=True, sample_inputs_func=sample_inputs_permute, reference_inputs_func=reference_inputs_permute), OpInfo('permute_copy', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=True, assert_autodiffed=True, assert_jit_shape_analysis=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_varargs=False, # torch.permute is also not varargs sample_inputs_func=sample_inputs_permute, reference_inputs_func=reference_inputs_permute, skips=( DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)), )), BinaryUfuncInfo('pow', dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.half, torch.bfloat16, torch.chalf), ref=np.power, # Due to AVX2 currently not being fully supported for Float16, log_vml_cpu can't be enabled # for Float16, causing this test to fail. pow's autograd for Float16 is thus currently # unsupported on CPU. backward_dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half, torch.chalf), # https://github.com/pytorch/pytorch/issues/80411 gradcheck_fast_mode=True, supports_inplace_autograd=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, supports_one_python_scalar=True, # Integer types do not support negative exponentes rhs_make_tensor_kwargs=dict(low=0), # Raising negative real numbers to fractional powers is not supported lhs_make_tensor_kwargs=dict(low=0), decorators=( DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05)}), 'TestBinaryUfuncs', 'test_reference_numerics'), DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05), torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}), 'TestBinaryUfuncs', 'test_scalar_support'), ), skips=( # Skipping integers because they are being raised to negative powers causing an error DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=[torch.int8, torch.int16, torch.int32, torch.int64]), DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_large_values', dtypes=[torch.int16, torch.int32, torch.int64]), # FIXME Complex values error with: Greatest absolute difference: nan at index # Ref: https://github.com/pytorch/pytorch/issues/76853 # For `chalf`, reference computation in `numpy` is computed in `cfloat`. # Output of `chalf` saturates to `inf` quicker than reference due to its small range # which leads to failure of this test. DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick', dtypes=(torch.complex32,), active_if=TEST_WITH_ROCM), # FIXME: # Mismatched elements: 1 / 500 (0.2%) # Greatest absolute difference: nan at index (7, 9, 0) (up to 1e-05 allowed) # Greatest relative difference: nan at index (7, 9, 0) (up to 0.001 allowed) DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_comprehensive', dtypes=(torch.complex32,)), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_complex_half_reference_testing', dtypes=(torch.complex32,), active_if=TEST_WITH_ROCM), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_batch_vs_slicing', dtypes=(torch.complex32,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_non_contig', dtypes=(torch.complex32,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics', dtypes=(torch.complex32,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.complex32, torch.complex64, torch.complex128)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_large_values', dtypes=(torch.complex32, torch.complex64, torch.complex128)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', dtypes=(torch.complex32, torch.complex64, torch.complex128)), )), BinaryUfuncInfo('float_power', ref=np.float_power, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool), promotes_int_to_float=True, # https://github.com/pytorch/pytorch/issues/80411 gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_one_python_scalar=True, # Integer types do not support negative exponentes rhs_make_tensor_kwargs=dict(low=0), # Raising negative real numbers to fractional powers is not supported lhs_make_tensor_kwargs=dict(low=0), decorators=( DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05), torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}), 'TestBinaryUfuncs', 'test_scalar_support'), ), skips=( # FIXME # AssertionError: Object comparison failed: torch.float64 != torch.float32 DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), # -3.43399e+38 is outside the range of representable values of type 'float' DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # Complex values error with: Greatest absolute difference: nan at index DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=[torch.complex64, torch.complex128]), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_large_values', dtypes=[torch.complex64, torch.complex128]), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', dtypes=[torch.complex64, torch.complex128]), # Inplace always promotes to double and thus other floating dtypes are not supported DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace', dtypes=[torch.bfloat16, torch.float16, torch.float32]), )), OpInfo('qr', op=torch.qr, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_linalg_qr_geqrf, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # In-place ops check_batched_gradgrad=False, decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack]), UnaryUfuncInfo('rad2deg', ref=np.degrees, decorators=(precisionOverride({torch.bfloat16: 7e-1, torch.float16: 7e-1}),), dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, promotes_int_to_float=True), UnaryUfuncInfo('real', ref=np.real, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, skips=( # Skip since real and imag don't have out variants. DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_out_arg_all_dtypes'), )), OpInfo( "roll", ref=np.roll, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), error_inputs_func=error_inputs_roll, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_roll, decorators=(onlyNativeDeviceTypes,), ), OpInfo( "rot90", dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half), error_inputs_func=error_inputs_rot90, # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_rot90, ), # To test reference numerics against multiple values of argument `decimals`, # we make multiple OpInfo entries with each entry corresponding to different value of decimals. UnaryUfuncInfo('round', ref=np.round, aliases=('special.round',), dtypes=all_types_and(torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=tuple(t for t in integral_types() if t != torch.uint8)), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bfloat16,)), ), supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, assert_autodiffed=True, ), UnaryUfuncInfo('round', ref=np.round, variant_test_name='decimals_0', aliases=('special.round',), dtypes=floating_types_and(torch.half, torch.bfloat16), sample_kwargs=lambda device, dtype, input: ({'decimals': 0}, {'decimals': 0}), sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': 0}), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=False, supports_sparse_csr=False), UnaryUfuncInfo('round', ref=np.round, variant_test_name='decimals_3', aliases=('special.round',), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_kwargs=lambda device, dtype, input: ({'decimals': 3}, {'decimals': 3}), sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': 3}), skips=( # test_ops already tested for this overload with `decimals_0` opinfo entry DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits'), DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}), "TestUnaryUfuncs", "test_reference_numerics_extremal", device_type="cuda"), DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}), "TestUnaryUfuncs", "test_reference_numerics_normal", device_type="cuda"), ), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=False, supports_sparse_csr=False), UnaryUfuncInfo('round', ref=np.round, variant_test_name='decimals_neg_3', aliases=('special.round',), dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), sample_kwargs=lambda device, dtype, input: ({'decimals': -3}, {'decimals': -3}), sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'decimals': -3}), skips=( # test_ops already tested for this overload with `decimals_0` opinfo entry DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits'), ), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=False, supports_sparse_csr=False), UnaryUfuncInfo('sin', ref=np.sin, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, handles_large_floats=False, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, skips=( # Fails on CUDA but passes on ROCm DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.cdouble,), device_type='cuda'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), ), decorators=(precisionOverride({torch.bfloat16: 1e-2}),)), UnaryUfuncInfo('sinc', ref=np_sinc_with_fp16_as_fp32, aliases=('special.sinc',), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), handles_large_floats=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True), UnaryUfuncInfo('sinh', ref=np_unary_ufunc_integer_promotion_wrapper(np.sinh), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, promotes_int_to_float=True, decorators=(precisionOverride({torch.float16: 1e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.cdouble,)), # Reference: https://github.com/pytorch/pytorch/issues/48641 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.int8]), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), )), UnaryUfuncInfo('sign', ref=reference_sign, dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), dtypesIfCUDA=all_types_and(torch.bool, torch.bfloat16, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, skips=( # Reference: https://github.com/pytorch/pytorch/issues/41245 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]), )), UnaryUfuncInfo('sgn', ref=reference_sgn, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), backward_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16, torch.half, torch.chalf), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, skips=( # Reference: https://github.com/pytorch/pytorch/issues/41245 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), )), OpInfo('split', dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), sample_inputs_func=partial(sample_inputs_split, list_args=False), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused assert_autodiffed=True), OpInfo('split', # Cannot declare this aten_name because of # test_variant_consistency_jit_split_list_args_cpu_float32 decomp_aten_name='split_with_sizes', variant_test_name='list_args', dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), sample_inputs_func=partial(sample_inputs_split, list_args=True), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), # `unsafe_split` supports only `int` for split_size argument OpInfo('unsafe_split', dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), sample_inputs_func=partial(sample_inputs_split, list_args=False), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused assert_autodiffed=True, check_batched_forward_grad=False), OpInfo('split_with_sizes', dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), sample_inputs_func=sample_inputs_split_with_sizes, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True), OpInfo('split_with_sizes_copy', dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), sample_inputs_func=sample_inputs_split_with_sizes, supports_out=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # No error raised DecorateInfo(unittest.expectedFailure, "TestCommon", "test_out_requires_grad_error"), )), BinaryUfuncInfo('__radd__', op=torch.Tensor.__radd__, dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), supports_out=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), ), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=['aten::add'],), BinaryUfuncInfo('__rdiv__', op=torch.Tensor.__rdiv__, dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), promotes_int_to_float=True, lhs_make_tensor_kwargs={'exclude_zero': True}, # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_out=False, skips=( # https://github.com/pytorch/pytorch/issues/76806 DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), ), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, autodiff_nonfusible_nodes=['aten::mul', 'aten::reciprocal'],), BinaryUfuncInfo('__rmul__', op=torch.Tensor.__rmul__, dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool), supports_out=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), ), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, autodiff_nonfusible_nodes=['aten::mul'],), BinaryUfuncInfo('__rand__', op=torch.Tensor.__rand__, dtypes=integral_types_and(torch.bool), supports_out=False, supports_autograd=False, supports_forward_ad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), )), BinaryUfuncInfo('__ror__', op=torch.Tensor.__ror__, dtypes=integral_types_and(torch.bool), supports_out=False, supports_autograd=False, supports_forward_ad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), )), BinaryUfuncInfo('__rxor__', op=torch.Tensor.__rxor__, dtypes=integral_types_and(torch.bool), supports_out=False, supports_autograd=False, supports_forward_ad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), )), OpInfo('__rmatmul__', op=torch.Tensor.__rmatmul__, dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, *[torch.bfloat16] if SM53OrLater or TEST_WITH_ROCM else []), assert_autodiffed=True, sample_inputs_func=partial(sample_inputs_matmul, is_rmatmul=True), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, decorators=( # NVIDIA only assures that bfloat16 is supported by bmm if SM >= 5.3 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes', device_type='cuda', active_if=not SM53OrLater), DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1.2e-03)}), 'TestMathBits', 'test_conj_view'), DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-05, rtol=1.2e-03)}), 'TestCommon', 'test_noncontiguous_samples'), DecorateInfo(toleranceOverride({torch.complex64: tol(atol=1e-05, rtol=1e-05)}), "TestDecomp", "test_comprehensive", device_type="cuda", active_if=TEST_WITH_ROCM), ), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), # https://github.com/pytorch/pytorch/issues/67470 DecorateInfo(unittest.skip("67470!"), 'TestCommon', 'test_noncontiguous_samples', device_type='cpu', dtypes=(torch.long,)), # Fails on XLA. # AssertionError: False is not true : Tensors failed to compare as equal DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla', dtypes=(torch.long,)), # https://github.com/pytorch/pytorch/issues/71774 DecorateInfo(unittest.skip('Skipped!'), 'TestNNCOpInfo', 'test_nnc_correctness', device_type='cpu', dtypes=(torch.long,)), )), BinaryUfuncInfo('__rmod__', op=torch.Tensor.__rmod__, dtypes=floating_types_and(torch.bfloat16, torch.half,), dtypesIfCUDA=all_types_and(torch.bfloat16, torch.half), # https://github.com/pytorch/pytorch/issues/80411 gradcheck_fast_mode=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_one_python_scalar=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), ), # Support autograd after torch.remainder(Tensor, Tensor) supports # autograd of the second argument. # https://github.com/pytorch/pytorch/pull/58476/files#r637167630 # supports_autograd=False, assert_autodiffed=True, autodiff_nonfusible_nodes=['aten::remainder'],), BinaryUfuncInfo('__rpow__', op=torch.Tensor.__rpow__, dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), # Reference: https://github.com/pytorch/pytorch/issues/54774 # "log2" "_vml_cpu" not implemented for Half backward_dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_one_python_scalar=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), # TODO: FIXME tolerance is too high DecorateInfo(unittest.skip('Skipped!'), 'TestFwdGradients'), DecorateInfo(unittest.skip('Skipped!'), 'TestBwdGradients'), ), assert_autodiffed=True, autodiff_nonfusible_nodes=['aten::pow'],), BinaryUfuncInfo('__rsub__', op=torch.Tensor.__rsub__, dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, supports_one_python_scalar=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit',), ), assert_autodiffed=True, autodiff_nonfusible_nodes=['aten::rsub'],), BinaryUfuncInfo('rsub', dtypes=all_types_and_complex_and(torch.bfloat16, torch.half), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, supports_inplace_autograd=False, assert_autodiffed=None, sample_inputs_func=sample_inputs_add_sub), OpInfo('select', aten_backward_name='select_backward', dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), sample_inputs_func=sample_inputs_select, assert_jit_shape_analysis=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('select_scatter', dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool), sample_inputs_func=sample_inputs_select_scatter, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False), OpInfo('slice', op=torch.ops.aten.slice.Tensor, dtypes=all_types_and_complex_and(torch.bfloat16, torch.half, torch.bool, torch.chalf), sample_inputs_func=sample_inputs_slice, gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_scripting=False, supports_inplace_autograd=False, supports_out=False), OpInfo('slice_scatter', dtypes=all_types_and(torch.bfloat16, torch.half, torch.bool), sample_inputs_func=sample_inputs_slice_scatter, # https://github.com/pytorch/pytorch/issues/80411 gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=True), UnaryUfuncInfo('signbit', ref=np.signbit, dtypes=all_types_and(torch.bool, torch.bfloat16, torch.half), supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, supports_autograd=False,), UnaryUfuncInfo('tan', ref=np.tan, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), decorators=(DecorateInfo( toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=1e-05)}), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'),), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), # FIXME: # Mismatched elements: 2 / 400 (0.5%) # Greatest absolute difference: inf at index (7, 16) (up to 1e-05 allowed) # Greatest relative difference: nan at index (7, 16) (up to 0.001 allowed) DecorateInfo( unittest.skip("Skipped!"), "TestInductorOpInfo", "test_comprehensive", dtypes=(torch.float16,), device_type="cuda", ), ), # tan(pi/2 * odd_number) is nan reference_numerics_filter=NumericsFilter( condition=lambda x: close_to_int(x / (math.pi * 0.5)), safe_val=math.pi)), UnaryUfuncInfo('tanh', ref=np.tanh, aten_backward_name='tanh_backward', aliases=('nn.functional.tanh',), decorators=(precisionOverride({torch.bfloat16: 1e-2}), DecorateInfo( toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=2e-05)}), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'),), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, assert_jit_shape_analysis=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), ), # tan(j * pi/2 * odd_number) is nan reference_numerics_filter=NumericsFilter( condition=lambda x: (close_to_int(x / (math.pi * 0.5j)) if x.is_complex() else x.new_tensor(False, dtype=torch.bool)), safe_val=0)), OpInfo('tensor_split', ref=np.array_split, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Pre-existing condition; Needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), ), sample_inputs_func=sample_inputs_tensor_split,), OpInfo('hsplit', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_hsplit, error_inputs_func=error_inputs_hsplit,), OpInfo('vsplit', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_vsplit, error_inputs_func=error_inputs_vsplit,), OpInfo('dsplit', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.bfloat16, torch.float16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_dsplit, error_inputs_func=error_inputs_dsplit,), OpInfo('triangular_solve', op=torch.triangular_solve, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_legacy_solve, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_wrapper=lambda *args, **kwargs: gradcheck_wrapper_triangular_input(*args, idx=1, **kwargs), decorators=[ skipCUDAIfNoMagma, skipCPUIfNoLapack, DecorateInfo( toleranceOverride({torch.float32: tol(atol=3e-5, rtol=3e-6)}), 'TestConsistency', 'test_output_match', device_type='cpu', ), ], skips=( # AssertionError: Scalars are not equal! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # Gradcheck fails DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', dtypes=floating_and_complex_types()), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), UnaryUfuncInfo('trunc', aliases=('fix', ), ref=np.trunc, dtypes=all_types_and(torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=tuple(t for t in integral_types() if t != torch.uint8)), ), supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, assert_autodiffed=True), UnaryUfuncInfo('exp2', aliases=('special.exp2', ), ref=np_unary_ufunc_integer_promotion_wrapper(np.exp2), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.cdouble]), # Reference: https://github.com/pytorch/pytorch/issues/48010 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), )), UnaryUfuncInfo('expm1', aliases=('special.expm1', ), ref=np_unary_ufunc_integer_promotion_wrapper(np.expm1), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, promotes_int_to_float=True, assert_autodiffed=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.complex128]), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), )), UnaryUfuncInfo('nan_to_num', ref=np.nan_to_num, dtypes=all_types_and(torch.half, torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.half, torch.bool, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse=True, skips=( DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), ), # Passing numpy_kwargs via sample_kwargs, as numpy does comparison # with BFloat16 in float, since it currently doesn't support BFloat16. # Ref: https://github.com/pytorch/pytorch/issues/57982#issuecomment-839150556 sample_kwargs=lambda device, dtype, input: ({}, {'posinf': torch.finfo(torch.bfloat16).max, 'neginf': torch.finfo(torch.bfloat16).min}) if dtype is torch.bfloat16 else ({}, {})), UnaryUfuncInfo('reciprocal', ref=np_unary_ufunc_integer_promotion_wrapper(np.reciprocal), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, skips=( # Reference: https://github.com/pytorch/pytorch/issues/45690 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.cfloat, torch.cdouble]), )), UnaryUfuncInfo('rsqrt', ref=lambda x: np.reciprocal(np.sqrt(x)), domain=(0, None), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), decorators=(precisionOverride({torch.half: 5e-2}),), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=(torch.cfloat, torch.cdouble)), # AssertionError: Tensor-likes are not close! # Greatest absolute difference: nan at index (700,) (up to 0.01 allowed) # Greatest relative difference: nan at index (700,) (up to 0.001 allowed) DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.chalf,)), )), UnaryUfuncInfo('sqrt', ref=np.sqrt, supports_sparse=True, domain=(0, None), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, decorators=( precisionOverride({torch.bfloat16: 7e-2}), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), 'TestUnaryUfuncs', 'test_reference_numerics_large'), ), skips=( # Reference: https://github.com/pytorch/pytorch/issues/47358 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=(torch.cfloat, torch.cdouble), active_if=IS_MACOS), DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), )), UnaryUfuncInfo('square', ref=np.square, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Reference: https://github.com/pytorch/pytorch/issues/52549 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.cfloat, torch.cdouble]), # >>> t = torch.tensor(complex(-0.01, float("inf"))) # >>> np.square(t.numpy()) # (-inf-infj) # >>> t.square() # tensor(-inf-infj) # >>> t.cuda().square() # tensor(inf+nanj, device='cuda:0') DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_meta_inplace', dtypes=[torch.bool]), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_inplace', dtypes=[torch.bool]), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_inplace', dtypes=[torch.bool]), ),), OpInfo('lerp', dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half), dtypesIfCUDA=floating_and_complex_types_and(torch.chalf, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_lerp, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True), UnaryUfuncInfo('angle', ref=np.angle, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool), decorators=(precisionOverride({torch.float16: 1e-2, torch.bfloat16: 1e-2}),), backward_dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.chalf), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, supports_complex_to_float=True, skips=( # Ref: https://github.com/pytorch/pytorch/issues/78413 DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64),), )), UnaryUfuncInfo('isfinite', ref=np.isfinite, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), supports_out=False, supports_autograd=False), UnaryUfuncInfo('isinf', ref=np.isinf, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), supports_out=False, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, supports_autograd=False), UnaryUfuncInfo('isposinf', ref=np.isposinf, dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, supports_autograd=False), UnaryUfuncInfo('isneginf', ref=np.isneginf, dtypes=all_types_and(torch.bool, torch.bfloat16, torch.float16), supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, supports_autograd=False), UnaryUfuncInfo('isreal', ref=np.isreal, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), supports_out=False, supports_autograd=False), UnaryUfuncInfo('isnan', ref=np.isnan, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), supports_out=False, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, supports_autograd=False), OpInfo('einsum', # we need this lambda because SampleInput expects tensor input as the first argument # TODO(@heitorschueroff) update SampleInput to handle such cases op=lambda tensors, equation: torch.einsum(equation, tensors), dtypes=all_types_and_complex_and(torch.half, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, # See https://github.com/pytorch/pytorch/issues/66357 sample_inputs_func=sample_inputs_einsum, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # test does not work with passing lambda for op # there's a test `test_einsum` in `test_jit.py` to handle this case # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('svd', op=torch.svd, dtypes=floating_and_complex_types(), sample_inputs_func=sample_inputs_svd, # Runs very slowly on slow-gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, # We're using at::allclose, which does not have a batching rule check_batched_grad=False, check_batched_gradgrad=False, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack, with_tf32_off], skips=( # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 DecorateInfo( unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', dtypes=(torch.complex64, torch.complex128)), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('svd_lowrank', op=lambda *args, **kwargs: wrapper_set_seed( lambda a, b, **kwargs: torch.svd_lowrank(a @ b.mT, **kwargs), *args, **kwargs ), dtypes=floating_and_complex_types(), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_out=False, # Due to the use of randomness check_batched_grad=False, check_batched_gradgrad=False, check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, supports_forward_ad=True, sample_inputs_func=sample_inputs_svd_lowrank, decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack, with_tf32_off, DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03), torch.complex64: tol(atol=1e-02, rtol=1e-02)}), 'TestCommon', 'test_noncontiguous_samples'), # FIXME This should be the following, but the toleranceOverride does not seem to do anything! # DecorateInfo(toleranceOverride({torch.complex128: tol(atol=1e-04, rtol=1e-04)}), # 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), DecorateInfo(unittest.skip("See comment above"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), ], skips=( # test does not work with passing lambda for op DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 DecorateInfo(unittest.expectedFailure, 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', dtypes=(torch.complex64, torch.complex128)), DecorateInfo(slowTest, 'TestCompositeCompliance', 'test_forward_ad'), )), OpInfo('pca_lowrank', op=lambda *args, **kwargs: wrapper_set_seed( lambda a, b, **kwargs: torch.pca_lowrank(a @ b.mT, **kwargs), *args, **kwargs ), dtypes=floating_and_complex_types(), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_out=False, check_batched_forward_grad=False, check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_pca_lowrank, decorators=[skipCUDAIfNoCusolver, skipCPUIfNoLapack, with_tf32_off, DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-03, rtol=1e-03), torch.complex64: tol(atol=4e-02, rtol=4e-02)}), 'TestCommon', 'test_noncontiguous_samples'), DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-05, rtol=5e-05)}), 'TestOperators', 'test_grad'), # FIXME This should be the following, but the toleranceOverride does not seem to do anything! # DecorateInfo(toleranceOverride({torch.complex128: tol(atol=1e-04, rtol=1e-04)}), # 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), DecorateInfo(unittest.skip("See comment above"), 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), DecorateInfo( toleranceOverride({torch.float32: tol(atol=3e-5, rtol=1e-3)}), 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda'), ], skips=( # test does not work with passing lambda for op DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 DecorateInfo(unittest.expectedFailure, 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', dtypes=(torch.complex64, torch.complex128)), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), )), BinaryUfuncInfo('polar', dtypes=floating_types(), # this function is undefined if 'abs' values are <0 supports_forward_ad=True, lhs_make_tensor_kwargs=dict(low=0), supports_rhs_python_scalar=False, skips=( # RuntimeError: Expected object of scalar type Float but got scalar type Double for second argument DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_type_promotion'), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), # GradcheckError: Jacobian computed with forward mode mismatch for output 0 with respect to input 0 # Numerical: # tensor([[0.]], dtype=torch.float64) # Analytical: # tensor([[-0.0047]], dtype=torch.float64, grad_fn=<CopySlices>) DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), )), # TODO(@kshitij12345): Refactor similar to `mvlgamma` entries. # To test reference numerics against multiple values of argument `n`, # we make multiple OpInfo entries with each entry corresponding to different value of n (currently 0 to 4). # We run the op tests from test_ops.py only for `n=0` to avoid redundancy in testing. UnaryUfuncInfo('polygamma', op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), variant_test_name='polygamma_n_0', ref=reference_polygamma if TEST_SCIPY else None, dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, sample_inputs_func=sample_inputs_polygamma, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), ), sample_kwargs=lambda device, dtype, input: ({'n': 0}, {'n': 0}), # polygamma functions have multiple singularities at x having non-positive integer value reference_numerics_filter=NumericsFilter(condition=lambda x: (x < 0.1) & ((x - x.round()).abs() < 1e-4), safe_val=1)), *(UnaryUfuncInfo('polygamma', op=lambda x, n, **kwargs: torch.polygamma(n, x, **kwargs), variant_test_name=f'polygamma_n_{n_}', ref=reference_polygamma if TEST_SCIPY else None, dtypes=all_types_and(torch.bool, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, sample_inputs_func=sample_inputs_polygamma, decorators=( DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-3)}), 'TestUnaryUfuncs'), DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e1, rtol=1e-1), torch.float32: tol(atol=1e-4, rtol=1e-2)}), 'TestUnaryUfuncs', 'test_reference_numerics_normal', active_if=IS_WINDOWS), ), skips=( # Redundant tests DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestJit'), DecorateInfo(unittest.skip("Skipped!"), 'TestNormalizeOperators'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon'), # Mismatch: https://github.com/pytorch/pytorch/issues/55357 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large'), ), sample_kwargs=lambda device, dtype, input: ({'n': n_}, {'n': n_}), # polygamma functions have multiple singularities at x having non-positive integer value reference_numerics_filter=NumericsFilter(condition=lambda x: (x < 0.1) & ((x - x.round()).abs() < 1e-4), safe_val=1)) for n_ in (1, 2, 3, 4)), OpInfo('ravel', ref=np.ravel, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_ravel, ), OpInfo('unravel_index', ref=np.unravel_index, dtypes=integral_types_and(), supports_out=False, supports_autograd=False, sample_inputs_func=sample_inputs_unravel_index, ), OpInfo('reshape', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), sample_inputs_func=sample_inputs_view_reshape, reference_inputs_func=reference_inputs_view_reshape, error_inputs_func=error_inputs_view_reshape, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo('reshape_as', op=lambda x, other: x.reshape_as(other), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), sample_inputs_func=partial(sample_inputs_view_reshape, tensor_arg=True), reference_inputs_func=partial(reference_inputs_view_reshape, tensor_arg=True), error_inputs_func=partial(error_inputs_view_reshape, tensor_arg=True), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), )), OpInfo('view', op=lambda x, shape: x.view(shape), dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, sample_inputs_func=sample_inputs_view_reshape, reference_inputs_func=reference_inputs_view_reshape, error_inputs_func=error_inputs_view_reshape, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: view size is not compatible with input tensor's size and stride # (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead. DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), )), OpInfo('view_as', op=lambda x, other: x.view_as(other), dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=partial(sample_inputs_view_reshape, tensor_arg=True), reference_inputs_func=partial(reference_inputs_view_reshape, tensor_arg=True), error_inputs_func=partial(error_inputs_view_reshape, tensor_arg=True), skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: view size is not compatible with input tensor's size and stride DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides") )), OpInfo('atleast_1d', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_atleast1d2d3d, skips=( # JIT does not support variadic tensors. # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), ), ), OpInfo('atleast_2d', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), ), sample_inputs_func=sample_inputs_atleast1d2d3d, ), OpInfo('atleast_3d', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=[torch.float32]), ), sample_inputs_func=sample_inputs_atleast1d2d3d, ), OpInfo('flatten', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), ref=reference_flatten, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_flatten, reference_inputs_func=reference_inputs_flatten, ), OpInfo('unflatten', op=torch.unflatten, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_unflatten, ), OpInfo('column_stack', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_column_stack,), OpInfo('pinverse', op=torch.pinverse, dtypes=floating_and_complex_types(), check_batched_grad=False, check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_out=False, sample_inputs_func=sample_inputs_linalg_invertible, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager', device_type='mps', dtypes=[torch.float32]), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='mps', dtypes=[torch.float32]), )), OpInfo('gather', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_gather, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_forward_ad=True, supports_fwgrad_bwgrad=True, error_inputs_func=error_inputs_gather, ), OpInfo('index_fill', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, skips=( # RuntimeError: Mismatch on aten._unique.default: Shapes torch.Size([2]) and torch.Size([1]) are not equal! DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_crossref_backward_no_amp'), # RuntimeError: Mismatch on aten._unique.default: Shapes torch.Size([2]) and torch.Size([1]) are not equal! DecorateInfo(unittest.expectedFailure, 'TestFakeTensor', 'test_fake_crossref_backward_amp'), ), sample_inputs_func=sample_inputs_index, reference_inputs_func=partial(sample_inputs_index, reference=True)), OpInfo('index_copy', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32), supports_out=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_index, reference_inputs_func=partial(sample_inputs_index, reference=True), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), OpInfo('index_select', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf), sample_inputs_func=sample_inputs_index, reference_inputs_func=partial(sample_inputs_index, reference=True), error_inputs_func=error_inputs_index_select, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), OpInfo('index_add', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_index, reference_inputs_func=partial(sample_inputs_index, reference=True), error_inputs_func=error_inputs_index_add, skips=( # boolean alpha not handled properly DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bool,)), ), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL), *(OpInfo('index_reduce', variant_test_name=reduction_type, dtypes=all_types_and(torch.float16, torch.bfloat16), skips=( DecorateInfo(toleranceOverride({torch.float16: tol(atol=2e-3, rtol=3e-3)}), 'TestInductorOpInfo', 'test_comprehensive'), ), supports_out=True, sample_inputs_func=sample_inputs_index_reduce, ) for reduction_type in ('mean', 'prod', 'amin', 'amax')), OpInfo('_unsafe_masked_index', dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool), supports_out=False, supports_inplace_autograd=False, supports_scripting=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs__unsafe_masked_index, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), DecorateInfo(slowTest, 'TestDecomp', 'test_quick_core_backward', dtypes=(torch.float64,), active_if=IS_WINDOWS), ),), OpInfo('_unsafe_masked_index_put_accumulate', dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16, torch.bool), supports_out=False, supports_inplace_autograd=False, supports_scripting=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=( DecorateInfo( toleranceOverride({torch.float16: tol(atol=2e-3, rtol=3e-2)}), 'TestInductorOpInfo', 'test_comprehensive', device_type='cpu' ), ), sample_inputs_func=sample_inputs__unsafe_masked_index_put_accumulate, skips=( DecorateInfo(slowTest, 'TestDecomp', 'test_quick_core_backward', dtypes=(torch.float64,), active_if=IS_WINDOWS), ),), OpInfo('__getitem__', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_inplace_autograd=False, supports_scripting=False, op=torch.Tensor.__getitem__, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # AssertionError: False is not true : Scalars failed to compare as equal! 0 != 104448 DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit', device_type='cuda'),), sample_inputs_func=sample_inputs_getitem), OpInfo('index_put', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_inplace_autograd=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, test_neg_view=False, sample_inputs_func=sample_inputs_index_put, skips=( DecorateInfo(unittest.skip("Skipped"), 'TestBwdGradients', 'test_fn_grad', dtypes=[torch.float64], device_type='cuda', active_if=(TEST_WITH_ROCM and TEST_WITH_TORCHINDUCTOR)), )), OpInfo('sort', dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_sort, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_non_standard_bool_values', dtypes=[torch.bool], device_type='cuda', active_if=not TEST_WITH_ROCM), )), OpInfo('unique', dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16, torch.uint16, torch.uint32, torch.uint64), sample_inputs_func=sample_inputs_unique, supports_out=False, supports_autograd=False, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip('Output order is undefined when sorted=False'), 'TestCommon', 'test_compare_cpu'), )), OpInfo('unique_consecutive', dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_unique_consecutive, supports_out=False, supports_autograd=False, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('put', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, check_batched_gradgrad=False, # vmap complains of the sizes sample_inputs_func=sample_inputs_put), OpInfo('take', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), check_batched_grad=False, # vmap complains of the sizes supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_take, error_inputs_func=error_inputs_take), OpInfo('scatter', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_scatter, error_inputs_func=error_inputs_scatter_and_scatter_add), UnaryUfuncInfo( 'bfloat16', op=lambda x, *args, **kwargs: x.bfloat16(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, skips=( # autograd tests don't handle operators that change dtype DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), )), UnaryUfuncInfo( 'bool', op=lambda x, *args, **kwargs: x.bool(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attributis not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), UnaryUfuncInfo( 'byte', op=lambda x, *args, **kwargs: x.byte(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_byte, # The autograd test runner cannot handle functions that change dtype supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), )), UnaryUfuncInfo( 'char', op=lambda x, *args, **kwargs: x.char(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, # The autograd test runner cannot handle functions that change dtype supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), )), UnaryUfuncInfo( 'double', op=lambda x, *args, **kwargs: x.double(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), UnaryUfuncInfo( 'float', op=lambda x, *args, **kwargs: x.float(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, skips=( # autograd tests don't handle operators that change dtype DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), UnaryUfuncInfo( 'half', op=lambda x, *args, **kwargs: x.half(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_conversion, supports_autograd=True, skips=( # autograd tests don't handle operators that change dtype DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), UnaryUfuncInfo( 'int', op=lambda x, *args, **kwargs: x.int(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_conversion, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), )), UnaryUfuncInfo( 'long', op=lambda x, *args, **kwargs: x.long(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), )), UnaryUfuncInfo( 'short', op=lambda x, *args, **kwargs: x.short(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_conversion, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), )), UnaryUfuncInfo( 'cdouble', op=torch.Tensor.cdouble, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), )), UnaryUfuncInfo( 'cfloat', op=torch.Tensor.cfloat, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, skips=( # autograd tests don't handle operators that change dtype DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # RuntimeError: attribute lookup is not defined on builtin DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), )), UnaryUfuncInfo( 'chalf', op=lambda x, *args, **kwargs: x.chalf(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_conversion, skips=( # autograd tests don't handle operators that change dtype DecorateInfo(unittest.expectedFailure, 'TestFwdGradients'), DecorateInfo(unittest.expectedFailure, 'TestBwdGradients'), # use of lambda doesn't work with test_normalize_operator_exhaustive DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager', device_type='cpu'), # TypeError: 'int' object is not iterable DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view', device_type='cpu'), # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view', device_type='cpu'), # RuntimeError: "sum_cpu" not implemented for 'ComplexHalf' # RuntimeError: "neg_conj_cuda" not implemented for 'ComplexHalf' DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), ) ), OpInfo('empty_like', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_like_fns, reference_inputs_func=reference_inputs_like_fns, supports_autograd=False, skips=( # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_complex_half_reference_testing'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), DecorateInfo(unittest.skip("Expected: empty_like is not comparable"), 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), )), OpInfo('zeros_like', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_like_fns, supports_autograd=False, error_inputs_sparse_func=error_inputs_sparse_like_fns, sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_coo), sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csr), sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csc), sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsr), sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsc), skips=( )), OpInfo('ones_like', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_like_fns, supports_autograd=False, skips=( )), OpInfo('randn', dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.complex32), op=lambda *args, **kwargs: wrapper_set_seed(torch.randn, *args, **kwargs), supports_out=True, sample_inputs_func=sample_inputs_randn, supports_autograd=False, skips=( # Tests that assume input is a tensor or sequence of tensors DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"), DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), # CPU randn generates different values based on the strides of out tensor DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cpu'), # randn fails to warn when resizing its out tensor DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # FX failed to normalize op - add the op to the op_skip list. DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # Tests that assume input tensor has a meaningful effect on output tensor DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.expectedFailure, 'TestDecomp', 'test_quick'), )), OpInfo('randn_like', dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.complex32), op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.randn_like, inp, *args, **kwargs), supports_out=False, sample_inputs_func=sample_inputs_like_fns, supports_autograd=False, error_inputs_sparse_func=error_inputs_sparse_like_fns, sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_coo), sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csr), sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_csc), sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsr), sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_like_fns, layout=torch.sparse_bsc), skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Expected: randn_like is not comparable between dtypes"), 'TestCommon', 'test_complex_half_reference_testing'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), )), OpInfo('rand_like', dtypes=floating_types_and(torch.half, torch.bfloat16, torch.complex32, torch.complex64, torch.complex128), op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.randn_like, inp, *args, **kwargs), supports_out=False, sample_inputs_func=sample_inputs_like_fns, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Expected: randn_like is not comparable between dtypes"), 'TestCommon', 'test_complex_half_reference_testing'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), )), OpInfo('randint', dtypes=all_types_and(torch.half, torch.bfloat16), op=lambda *args, **kwargs: wrapper_set_seed(torch.randint, *args, **kwargs), supports_out=False, sample_inputs_func=sample_inputs_randint, supports_autograd=False, skips=( # Tests that assume input is a tensor or sequence of tensors DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"), DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_vmap_exhaustive"), DecorateInfo(unittest.skip("Test expects tensor input"), "TestVmapOperatorsOpInfo", "test_op_has_batch_rule"), # CPU randint generates different values based on the strides of out tensor DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # randint fails to warn when resizing its out tensor DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # FX failed to normalize op - add the op to the op_skip list. DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # Tests that assume input tensor has a meaningful effect on output tensor DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Might need to skip until ROCm5.5 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_multiple_devices', dtypes=[torch.float32, torch.int64], active_if=TEST_WITH_ROCM), )), OpInfo('randint_like', dtypes=all_types_and(torch.half, torch.bfloat16), op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.randint_like, inp, *args, **kwargs), supports_out=False, sample_inputs_func=sample_inputs_randint_like, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), )), OpInfo('full_like', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_full_like, supports_autograd=False, skips=( )), OpInfo('new_zeros', op=lambda x, *args, **kwargs: x.new_zeros(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_new_fns, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), ), supports_autograd=False), OpInfo('new_ones', op=lambda x, *args, **kwargs: x.new_ones(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_new_fns, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), ), supports_autograd=False), OpInfo('ones', op=torch.ones, supports_autograd=False, supports_varargs=True, is_factory_function=True, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=True, sample_inputs_func=sample_inputs_ones_zeros, skips=( # Tests that assume input is a tensor or sequence of tensors DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Same failure as arange: cannot find linspace in captured graph DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), )), OpInfo('zeros', op=torch.zeros, supports_autograd=False, is_factory_function=True, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=True, sample_inputs_func=sample_inputs_ones_zeros, skips=( # Tests that assume input is a tensor or sequence of tensors DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Same failure as arange: cannot find linspace in captured graph DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), )), OpInfo('full', op=torch.full, supports_autograd=False, is_factory_function=True, dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=True, sample_inputs_func=sample_inputs_full, skips=( # Tests that assume input is a tensor or sequence of tensors DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Same failure as arange: cannot find linspace in captured graph DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # RuntimeError: UNSUPPORTED DTYPE: bool DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.bool,)), )), OpInfo('new_empty', op=lambda x, *args, **kwargs: x.new_empty(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_new_fns, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), DecorateInfo(unittest.skip("Expected: new_empty is not comparable"), 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.skip("Expected: new_empty is not comparable"), 'TestCommon', 'test_complex_half_reference_testing'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), ), supports_autograd=False), OpInfo('new_empty_strided', op=lambda x, *args, **kwargs: x.new_empty_strided(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=partial(sample_inputs_new_fns, is_strided=True), supports_autograd=False, skips=( # FX failed to normalize op DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Lazy tensor failures DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness'), DecorateInfo(unittest.skip("Skipped!"), 'TestLazyOpInfo', 'test_correctness_with_reusing_ir'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestCommon', 'test_noncontiguous_samples'), DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestMathBits', 'test_neg_conj_view'), DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestCommon', 'test_non_standard_bool_values'), DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestCommon', 'test_complex_half_reference_testing'), DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestDecomp', 'test_comprehensive'), DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestDecomp', 'test_quick'), DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestProxyTensorOpInfo', 'test_make_fx_exhaustive'), DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestProxyTensorOpInfo', 'test_make_fx_fake_exhaustive'), DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestProxyTensorOpInfo', 'test_make_fx_symbolic_exhaustive'), DecorateInfo(unittest.skip("Expected: new_empty_strided is not comparable"), 'TestNNCOpInfo', 'test_nnc_correctness'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), )), OpInfo('empty_strided', op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.empty_strided, inp, *args, **kwargs), dtypes=all_types_and_complex_and(torch.bfloat16, torch.bool, torch.half), supports_out=False, supports_autograd=False, sample_inputs_func=sample_inputs_empty_strided, skips=( # FX failed to normalize op - add the op to the op_skip list. DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_compare_cpu'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCompositeCompliance', 'test_operator'), # Lazy tensor failures DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestLazyOpInfo'), # RuntimeError: unsupported operation: more than one element of the written-to tensor refers to a single # memory location. Please clone() the tensor before performing the operation. DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_meta_outplace'), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace'), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'), )), OpInfo('empty', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), sample_inputs_func=sample_inputs_empty, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCompositeCompliance', 'test_operator'), # requires_grad doesn't exist in the jit schema DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestLazyOpInfo'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_complex_half_reference_testing'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), )), OpInfo('eye', dtypes=all_types_complex_float8_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_eye, error_inputs_func=error_inputs_eye, supports_out=True, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # TODO: same as this? # https://github.com/pytorch/pytorch/issues/81774 # also see: arange, new_full # fails to match any schemas despite working in the interpreter DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), # fails to match any schemas despite working in the interpreter DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # skip these tests since we have non tensor input DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"), DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # "mul_cpu_reduced_float" not implemented for 'Float8_e4m3fn' DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness', dtypes=(torch.float8_e4m3fn, torch.float8_e4m3fnuz, torch.float8_e5m2, torch.float8_e5m2fnuz)), )), OpInfo('empty_permuted', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), sample_inputs_func=sample_inputs_empty_permuted, error_inputs_func=error_inputs_empty_permuted, supports_out=False, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestNNCOpInfo', 'test_nnc_correctness'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCudaFuserOpInfo'), # Empty tensor data is garbage so it's hard to make comparisons with it. DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values'), DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), 'TestCompositeCompliance', 'test_operator'), # requires_grad doesn't exist in the jit schema DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), 'TestLazyOpInfo'), DecorateInfo(unittest.skip("Expected: empty_permuted is not comparable"), 'TestCommon', 'test_complex_half_reference_testing'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), )), OpInfo('scalar_tensor', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), sample_inputs_func=sample_inputs_scalar_tensor, supports_autograd=False, supports_out=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # fails to match any schemas despite working in the interpreter DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), # fails to match any schemas despite working in the interpreter DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # skip these tests since we have non tensor input DecorateInfo(unittest.skip('Skipped!'), "TestCommon", "test_noncontiguous_samples"), DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), )), OpInfo('new_full', op=lambda x, *args, **kwargs: x.new_full(*args, **kwargs), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_out=False, sample_inputs_func=sample_inputs_new_full, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), ), supports_autograd=False), OpInfo('multinomial', op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.multinomial, inp, *args, **kwargs), method_variant=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.multinomial, inp, *args, **kwargs), dtypes=floating_types_and(torch.bfloat16, torch.half), supports_out=True, sample_inputs_func=sample_inputs_multinomial, error_inputs_func=error_inputs_multinomial, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Strides are not the same! # This may not be reproducible in CI DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), supports_autograd=False), OpInfo('normal', op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.normal, inp, *args, **kwargs), # The inplace variant (Tensor.normal_) is different from torch.normal inplace_variant=None, dtypes=floating_types_and(torch.bfloat16, torch.half), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half), supports_out=True, sample_inputs_func=sample_inputs_normal_tensor_first, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Tensor-likes are not close! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # Computed gradient is incorrect -- would be an exfail but gradgrad somehow passes DecorateInfo(unittest.skip("Gradients are incorrect!"), 'TestFwdGradients'), DecorateInfo(unittest.skip("Gradients are incorrect!"), 'TestBwdGradients'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), # RuntimeError: Difference from {dtype} is larger with decomposition DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_comprehensive'), DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick'), # The inplace variant (Tensor.normal_) is different from torch.normal # inplace varaint Tensor.normal_ is decomposed using randn_like() DecorateInfo(unittest.skip("Skipped!"), 'TestMeta', 'test_dispatch_symbolic_meta_outplace_all_strides'))), OpInfo('normal', # This has its own variant b/c OpInfos assume the first arg is a Tensor but it is not here variant_test_name='number_mean', op=lambda std, mean, *args, **kwargs: wrapper_set_seed(torch.normal, mean, std, *args, **kwargs), # The inplace variant (Tensor.normal_) is different from torch.normal inplace_variant=None, dtypes=floating_types_and(torch.bfloat16, torch.half), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.half), supports_out=True, sample_inputs_func=sample_inputs_normal_tensor_second, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestBwdGradients'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_compare_cpu'), DecorateInfo(unittest.skip("Skipped!"), 'TestEagerFusionOpInfo'), DecorateInfo(unittest.skip("Skipped!"), 'TestOperators'), # AssertionError DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_comprehensive'), # AssertionError DecorateInfo(unittest.skip("Skipped!"), 'TestDecomp', 'test_quick'), # AssertionError in CUDA variant DecorateInfo(unittest.skip("Skipped!"), 'TestFakeTensor', device_type='cuda'), DecorateInfo(unittest.skip("Skipped!"), 'TestDeviceUtils', 'test_device_mode_ops'))), OpInfo('bernoulli', op=lambda inp, *args, **kwargs: wrapper_set_seed(torch.bernoulli, inp, *args, **kwargs), # The inplace variant (Tensor.bernoulli_) is different from torch.bernoulli inplace_variant=None, method_variant=lambda inp, *args, **kwargs: wrapper_set_seed(torch.Tensor.bernoulli, inp, *args, **kwargs), dtypes=floating_types_and(torch.bfloat16, torch.half), supports_out=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_bernoulli, error_inputs_func=error_inputs_bernoulli, skips=( # vmap: We do not yet support calling random operations inside of vmap DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Expected RuntimeError when doing an unsafe cast from a result of # dtype torch.float32 into an out= with dtype torch.lon DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'))), OpInfo('scatter_add', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_scatter_add, error_inputs_func=error_inputs_scatter_and_scatter_add, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo('stack', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_stack, assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # https://github.com/pytorch/pytorch/issues/77046 DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), ), ), OpInfo('_chunk_cat', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_chunk_cat, error_inputs_func=error_inputs_chunk_cat, supports_autograd=False, supports_out=True, ), OpInfo('hstack', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_hstack_dstack_vstack, error_inputs_func=error_inputs_hstack_dstack_vstack, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), BinaryUfuncInfo('hypot', dtypes=floating_types_and(torch.bfloat16, torch.half), dtypesIfCUDA=floating_types_and(torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_rhs_python_scalar=False), OpInfo('histogram', dtypes=floating_types(), dtypesIfCUDA=_dispatch_dtypes(), # histogram is only implemented on CPU sample_inputs_func=sample_inputs_histogram, supports_autograd=False, skips=( # JIT tests don't work with Tensor keyword arguments # https://github.com/pytorch/pytorch/issues/58507 # RuntimeError: # undefined value tensor: # File "<string>", line 3 # def the_method(i0): # return torch.histogram(i0, 1, weight=tensor(-0.5735, dtype=torch.float32), density=False) # ~~~~~~ <--- HERE DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Not Implemented on XLA. DecorateInfo(unittest.skip("Skipped!"), 'TestOpInfo', device_type='xla'), )), OpInfo('histogramdd', dtypes=floating_types(), dtypesIfCUDA=_dispatch_dtypes(), # histogramdd is only implemented on CPU sample_inputs_func=sample_inputs_histogramdd, error_inputs_func=error_inputs_histogramdd, supports_autograd=False, skips=( # Not implemented on CUDA DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_errors', device_type='cuda'), # JIT tests don't work with Tensor keyword arguments # https://github.com/pytorch/pytorch/issues/58507 DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('histc', dtypes=floating_types_and(torch.bfloat16, torch.float16), dtypesIfCUDA=floating_types_and(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), sample_inputs_func=sample_inputs_histc, supports_out=True, supports_autograd=False, skips=( # CUDA histc returns a float tensor but does not correctly warn when passed an integral out tensor # "AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast # from a result of dtype torch.float32 into an out= with dtype torch.long" DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out', device_type='cuda'), )), OpInfo('bincount', dtypes=integral_types_and(), sample_inputs_func=sample_inputs_bincount, supports_out=False, supports_autograd=False, skips=( # JIT tests don't work with Tensor keyword arguments # https://github.com/pytorch/pytorch/issues/58507 DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('bucketize', dtypes=all_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), sample_inputs_func=sample_inputs_bucketize, reference_inputs_func=reference_inputs_bucketize, error_inputs_func=error_inputs_bucketize, supports_autograd=False, skips=( # JIT tests don't work with Tensor keyword arguments DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('searchsorted', dtypes=all_types_and(torch.bfloat16, torch.float16), dtypesIfCUDA=all_types_and(torch.bfloat16, torch.float16), sample_inputs_func=sample_inputs_searchsorted, supports_autograd=False, ref=reference_searchsorted, skips=( # JIT tests don't work with Tensor keyword arguments # https://github.com/pytorch/pytorch/issues/58507 DecorateInfo(unittest.skip("Expected failure!"), 'TestJit', 'test_variant_consistency_jit'), )), OpInfo('cat', ref=_cat_np, aliases=('concat', 'concatenate'), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.complex32), sample_inputs_func=sample_inputs_cat_concat, reference_inputs_func=reference_inputs_cat, error_inputs_func=error_inputs_cat, # https://github.com/pytorch/pytorch/issues/80411 gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, assert_autodiffed=True, skips=( # https://github.com/pytorch/pytorch/issues/89353 DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_numpy_ref_mps'), # RuntimeError: Arguments for call not valid. # Expected a value of type 'List[Tensor]' for argument # 'tensors' but instead found type 'Tensor (inferred)'. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'), # see https://github.com/pytorch/pytorch/issues/71286 DecorateInfo(unittest.expectedFailure, 'TestNNCOpInfo', 'test_nnc_correctness'), # see https://github.com/pytorch/pytorch/issues/99806 # RuntimeError: The size of tensor a (25) must match the size of tensor b (0) at non-singleton dimension 0. DecorateInfo(unittest.expectedFailure, 'TestBwdGradients', 'test_fn_gradgrad'), )), OpInfo('unbind', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), ref=reference_unbind, sample_inputs_func=sample_inputs_unbind, error_inputs_func=error_inputs_unbind, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_gradgrad=True, supports_out=False, ), OpInfo('unbind_copy', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), ref=reference_unbind, sample_inputs_func=sample_inputs_unbind, error_inputs_func=error_inputs_unbind, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_gradgrad=True, supports_out=True, check_batched_grad=False, skips=( # Expected __torch_dispatch__ for aten::unbind_copy.int_out to return None # but it returned something else instead. DecorateInfo( unittest.expectedFailure, 'TestProxyTensorOpInfo', 'test_make_fx_symbolic_exhaustive_out' ), )), OpInfo('vstack', aliases=('row_stack',), dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_hstack_dstack_vstack, error_inputs_func=error_inputs_hstack_dstack_vstack, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # RuntimeError: _fn() Expected a value of type # 'Tensor (inferred)' for argument 't0' but instead found type 'tuple'. DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_jit_alias_remapping'),)), OpInfo('dstack', dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_hstack_dstack_vstack, error_inputs_func=error_inputs_hstack_dstack_vstack, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, ), OpInfo('unfold', op=lambda x, *args: x.unfold(*args), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), backward_dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_gradgrad=False, # See https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Skip operator schema test because this is a functional and not an operator DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), ), sample_inputs_func=sample_inputs_unfold), OpInfo('unfold_copy', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), backward_dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_out=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_gradgrad=False, # See https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_unfold), OpInfo('msort', dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.float16, torch.bfloat16), check_batched_gradgrad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_msort), OpInfo('movedim', aliases=('moveaxis',), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, sample_inputs_func=sample_movedim_moveaxis, reference_inputs_func=reference_movedim_moveaxis, error_inputs_func=error_movedim_moveaxis), OpInfo('renorm', dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_renorm, error_inputs_func=error_inputs_renorm, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # RuntimeError: Difference from float64 is larger with decomposition # linalg_vector_norm.default than original on output 0. # Original max diff: 2.560596747969157e-07, # Decomp max diff: 1.8187482915266173e-06 DecorateInfo(unittest.skip("Inconsistent accuracy"), 'TestDecomp', 'test_comprehensive', device_type='cpu', dtypes=(torch.float16,)), )), ShapeFuncInfo('repeat', op=lambda x, dims: x.repeat(dims), ref=np.tile, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), # https://github.com/pytorch/pytorch/issues/80411 gradcheck_fast_mode=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_repeat_tile, skips=( DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), )), OpInfo('squeeze', ref=_squeeze_ref, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, assert_autodiffed=True, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused assert_jit_shape_analysis=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_squeeze), OpInfo('squeeze', ref=_squeeze_ref, variant_test_name="multiple", dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, assert_autodiffed=True, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_squeeze_multiple), OpInfo('squeeze_copy', ref=_squeeze_ref, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=True, assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_squeeze, skips=( DecorateInfo( unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,), ), )), UnaryUfuncInfo( 'fill', ref=_fill_np, method_variant=None, sample_kwargs=_fill_sample_kwargs, sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'value': True}), supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, dtypes=all_types_and_complex_and(torch.complex32, torch.bool, torch.float16, torch.bfloat16), supports_out=False, skips=( # JIT has issue when op is passed as lambda # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip("No fill_ op"), 'TestCudaFuserOpInfo'), DecorateInfo(unittest.skip("No fill_ op"), 'TestNNCOpInfo'), )), OpInfo('resize_', op=lambda x, shape: x.clone().resize_(shape), method_variant=None, inplace_variant=torch.Tensor.resize_, # the test fails because resize_ doesn't work with imag views as expected by the test # https://github.com/pytorch/pytorch/issues/65945 test_neg_view=False, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_autograd=False, skips=( # Cannot resize variables that require grad DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_operator'), ), sample_inputs_func=sample_inputs_resize_ops), OpInfo('resize_as_', op=lambda x, other: torch.resize_as_(x.clone(), other), method_variant=None, inplace_variant=torch.Tensor.resize_as_, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_autograd=False, skips=( # Cannot resize variables that require grad DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_dtypes'), DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), ), sample_inputs_func=sample_inputs_resize_ops), OpInfo('take_along_dim', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_inplace_autograd=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_take_along_dim, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, decorators=( # RuntimeError: view size is not compatible with input tensor's size and stride DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), )), ShapeFuncInfo('tile', ref=np.tile, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), # https://github.com/pytorch/pytorch/issues/80411 gradcheck_fast_mode=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_repeat_tile), OpInfo('trapz', # TODO: in the future, 'trapz' should be made a proper alias of 'trapezoid' dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, decorators=[ DecorateInfo( toleranceOverride({torch.half: tol(atol=9e-4, rtol=4.3e-3)}), 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda' ), ], sample_inputs_func=sample_trapezoid), OpInfo('trapezoid', dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, decorators=[ DecorateInfo( toleranceOverride({torch.half: tol(atol=9e-4, rtol=4.3e-3)}), 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda' ), ], sample_inputs_func=sample_trapezoid), OpInfo('cumulative_trapezoid', dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, supports_out=False, decorators=( DecorateInfo( toleranceOverride({torch.float16: tol(atol=4e-3, rtol=4e-3)}), 'TestInductorOpInfo', 'test_comprehensive', ), ), sample_inputs_func=sample_cumulative_trapezoid,), OpInfo('unsqueeze', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, # vmap does not support inplace views check_inplace_batched_forward_grad=False, assert_jit_shape_analysis=True, assert_autodiffed=True, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused sample_inputs_func=sample_unsqueeze), OpInfo('unsqueeze_copy', dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), supports_out=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, # vmap does not support inplace views check_inplace_batched_forward_grad=False, assert_jit_shape_analysis=True, assert_autodiffed=True, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused sample_inputs_func=sample_unsqueeze, skips=( DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'), DecorateInfo( unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,), ), )), BinaryUfuncInfo('xlogy', aliases=('special.xlogy',), dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), promotes_int_to_float=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_one_python_scalar=True, # We don't test 0 as the gradient will be NaN and it'll break rhs_make_tensor_kwargs=dict(low=0.01)), OpInfo('zero_', op=lambda x: torch.zero_(x.clone()), method_variant=None, inplace_variant=torch.Tensor.zero_, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), # https://github.com/pytorch/pytorch/issues/80411 gradcheck_fast_mode=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_gradgrad=True, skips=( DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), ), sample_inputs_func=sample_inputs_zero_), OpInfo('logsumexp', aliases=('special.logsumexp',), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, gradcheck_fast_mode=False, sample_inputs_func=sample_inputs_logsumexp, reference_inputs_func=reference_inputs_logsumexp), OpInfo('trace', dtypes=all_types_and_complex(), dtypesIfCUDA=all_types_and_complex_and(torch.chalf, torch.bool, torch.half, torch.bfloat16), error_inputs_func=error_inputs_trace, supports_inplace_autograd=False, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_trace), OpInfo('transpose', ref=_numpy_ref_transpose, aliases=('swapdims', 'swapaxes'), assert_jit_shape_analysis=True, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, sample_inputs_func=sample_inputs_transpose_swapdims), OpInfo('transpose_copy', assert_jit_shape_analysis=True, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_out=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # vmap does not support inplace views check_inplace_batched_forward_grad=False, sample_inputs_func=sample_inputs_transpose_swapdims, skips=( DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'), DecorateInfo( unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,) ), )), OpInfo('T', op=lambda x: x.T, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), sample_inputs_func=sample_inputs_T, error_inputs_func=error_inputs_T), OpInfo('H', op=lambda x: x.H, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), sample_inputs_func=sample_inputs_T), OpInfo('mT', op=lambda x: x.mT, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), sample_inputs_func=sample_inputs_adjoint), OpInfo('mH', op=lambda x: x.mH, aliases=('adjoint',), dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half, torch.chalf), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),), sample_inputs_func=sample_inputs_adjoint), OpInfo('tril', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_forward_ad=True, supports_fwgrad_bwgrad=True, error_inputs_func=error_inputs_tril_triu, sample_inputs_func=sample_inputs_tril_triu), OpInfo('triu', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf), supports_forward_ad=True, supports_fwgrad_bwgrad=True, error_inputs_func=error_inputs_tril_triu, sample_inputs_func=sample_inputs_tril_triu), OpInfo('triu_indices', dtypes=_dispatch_dtypes((torch.int32, torch.int64)), sample_inputs_func=sample_inputs_trilu_indices, ref=lambda h, w, ofs=0, dtype=torch.long, device='cpu' : np.array(np.triu_indices(h, ofs, w), dtype=dtype), supports_out=False, supports_autograd=False, skips=( # skip these tests since we have non tensor input DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'), DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'), )), OpInfo('tril_indices', dtypes=_dispatch_dtypes((torch.int32, torch.int64)), sample_inputs_func=sample_inputs_trilu_indices, ref=lambda h, w, ofs=0, dtype=torch.long, device='cpu' : np.array(np.tril_indices(h, ofs, w), dtype=dtype), supports_out=False, supports_autograd=False, skips=( # skip these tests since we have non tensor input DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'), DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'), )), OpInfo('kron', dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_inplace_autograd=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_kron, decorators=( # RuntimeError: view size is not compatible with input tensor's size and stride DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), )), OpInfo('inner', dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_inner, ), OpInfo('tensordot', dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), dtypesIfROCM=floating_and_complex_types_and(torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, sample_inputs_func=sample_inputs_tensordot, skips=( # Skip operator schema test because this is a functional and not an operator. # Reference: https://github.com/pytorch/pytorch/issues/54574 DecorateInfo(unittest.skip("Skipped!"), 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), ) ), OpInfo('to_sparse', op=lambda x, *args: x.to_sparse(*args), sample_inputs_func=sample_inputs_to_sparse, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), backward_dtypes=floating_types(), backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_sparse_csr=True, supports_sparse_csc=True, check_batched_grad=False, check_batched_gradgrad=False, skips=( # NotImplementedError: Could not run 'aten::normal_' with arguments from the 'SparseCPU' backend DecorateInfo(unittest.skip(""), 'TestCommon', 'test_noncontiguous_samples'), # TODO: FIXME: complex inputs requiring grad error in forward DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_dtypes'), # lambda impl DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # Allowed exception: sparse tensors don't have strides DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_operator'), DecorateInfo(unittest.skip("Allowed exception"), 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.skip("Allowed exception"), 'TestTags', 'test_tags'), # TODO: implement csr.to_sparse(sample_dim) where sampled_dim is 1. DecorateInfo(unittest.skip("csr.to_sparse(1) not implemented. Skipped!"), 'TestSparseCSR', 'test_sparse_csr_consistency'), # Compiler issue on ROCm. Might need to skip until ROCm5.5 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values', dtypes=[torch.bool], active_if=TEST_WITH_ROCM), ) ), OpInfo('logcumsumexp', dtypes=floating_and_complex_types_and(torch.bfloat16, torch.half), backward_dtypes=floating_and_complex_types_and(torch.bfloat16), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # AssertionError: UserWarning not triggered : Resized a non-empty tensor but did not warn about it. DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning', device_type='cuda'), # RuntimeError: "max_values_cpu" not implemented for 'ComplexDouble' # Falling back to non-numerically stablized exp, causing nan in the results. DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD', dtypes=[torch.complex128]), DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad', dtypes=[torch.complex128]), DecorateInfo( toleranceOverride({ torch.float16: tol(atol=7e-5, rtol=6e-3), }), "TestInductorOpInfo", "test_comprehensive", device_type="cuda" ), ), sample_inputs_func=sample_inputs_logcumsumexp, error_inputs_func=error_inputs_logcumsumexp), UnaryUfuncInfo('sigmoid', aliases=('special.expit', 'nn.functional.sigmoid'), aten_backward_name='sigmoid_backward', ref=reference_sigmoid if TEST_SCIPY else None, decorators=(precisionOverride({torch.float16: 1e-2, torch.complex64: 1e-1, torch.bfloat16: 1e-2}),), skips=( # Reference: https://github.com/pytorch/pytorch/issues/56012 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.complex64, torch.cdouble], device_type='cuda'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.chalf, torch.complex64, torch.cdouble], device_type='cuda')), dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.complex32, torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, assert_autodiffed=True, # sigmoid(z) = 1 / (1 + exp(-z)), at z = j * pi * odd_number, the denominator is zero reference_numerics_filter=NumericsFilter( condition=lambda x: (close_to_int(x / (math.pi * 1j)) if x.is_complex() else x.new_tensor(False, dtype=torch.bool)), safe_val=0)), UnaryUfuncInfo('digamma', ref=scipy.special.digamma if TEST_SCIPY else None, aliases=('special.psi', 'special.digamma',), decorators=(precisionOverride({torch.float16: 5e-1}),), dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True), UnaryUfuncInfo('erf', ref=scipy.special.erf if TEST_SCIPY else None, aliases=('special.erf', ), decorators=(precisionOverride({torch.float16: 1e-2, torch.bfloat16: 1e-2}),), skips=( DecorateInfo(unittest.skip("Skipped! sparse backward not supported"), 'TestSparseUnaryUfuncs', 'test_sparse_fn_grad'), ), dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, assert_jit_shape_analysis=True, supports_sparse=True, supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True), UnaryUfuncInfo('erfc', ref=scipy.special.erfc if TEST_SCIPY else None, aliases=('special.erfc', ), decorators=(precisionOverride({torch.float16: 1e-2, torch.bfloat16: 1e-2}),), dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), assert_autodiffed=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True), UnaryUfuncInfo('erfinv', ref=scipy.special.erfinv if TEST_SCIPY else None, aliases=('special.erfinv', ), decorators=(precisionOverride({torch.float16: 1e-2, torch.bfloat16: 1e-2, torch.float32: 1e-4}),), dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), supports_sparse_csr=True, supports_sparse_csc=True, supports_sparse_bsr=True, supports_sparse_bsc=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, domain=(-1, 1), skips=( # Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), )), OpInfo("nn.functional.smooth_l1_loss", ref=reference_smooth_l1_loss, sample_inputs_func=sample_inputs_smooth_l1_loss, dtypes=floating_types_and(torch.float16, torch.bfloat16), backward_dtypes=floating_types_and(torch.bfloat16), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), backward_dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # RuntimeError: input->type()->kind() == TypeKind::OptionalTypeINTERNAL ASSERT FAILED # at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit"),)), OpInfo( "nn.functional.l1_loss", ref=loss_reference_reduction_wrapper(lambda input, target: np.abs(input - target)), sample_inputs_func=sample_inputs_l1_loss, error_inputs_func=error_inputs_l1_loss, dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # RuntimeError: input->type()->kind() == TypeKind::OptionalTypeINTERNAL ASSERT FAILED # at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, please report a bug to PyTorch. DecorateInfo( unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,), ), ), ), UnaryUfuncInfo('lgamma', ref=reference_lgamma if TEST_SCIPY else None, aliases=('special.gammaln', ), decorators=(precisionOverride({torch.float16: 7e-1}),), dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.half, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, skips=( # Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), ), # lgamma have multiple singularities at x <= 0 reference_numerics_filter=NumericsFilter(condition=lambda x: x < 0.1, safe_val=1)), OpInfo( 'logdet', dtypes=floating_and_complex_types(), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_linalg_det_logdet_slogdet, decorators=[skipCUDAIfNoMagma, skipCPUIfNoLapack]), # `log_softmax` supports different dtypes based on whether `dtype` argument, # is passed or not. Hence two OpInfo entries, one with dtype and other without. OpInfo( 'log_softmax', aliases=('special.log_softmax', 'nn.functional.log_softmax'), supports_out=True, aten_backward_name='_log_softmax_backward_data', dtypes=floating_types_and(torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_softmax_variant, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True), OpInfo( 'log_softmax', variant_test_name='with_dtype', aliases=('special.log_softmax', 'nn.functional.log_softmax'), supports_out=True, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), sample_inputs_func=partial(sample_inputs_softmax_variant, with_dtype=True), supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True), UnaryUfuncInfo('logit', aten_backward_name='logit_backward', ref=scipy.special.logit if TEST_SCIPY else None, domain=(0, 1), aliases=('special.logit', ), supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_float=True, decorators=(precisionOverride({torch.bfloat16: 5e-1, torch.float16: 5e-1}),), dtypes=all_types_and(torch.bool, torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_logit), OpInfo('where', # Currently only the `input` is tested in gradcheck. # If we pass `condition` first, none of the input which supports # autograd will be tested. Hence the following lambda. op=lambda self, condition, other, **kwargs: torch.where(condition, self, other, **kwargs), ref=lambda self, condition, other: np.where(condition, self, other), sample_inputs_func=sample_inputs_where, reference_inputs_func=reference_inputs_where, error_inputs_func=error_inputs_where, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=( DecorateInfo(onlyCUDA, "TestCommon", 'test_errors'),), skips=( # lambda impl DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), ), dtypes=all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.chalf)), OpInfo('nonzero', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), sample_inputs_func=sample_inputs_nonzero, supports_autograd=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # nonzero(): argument 'out' must be Tensor, not tuple DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # https://github.com/pytorch/pytorch/issues/67458 DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # nonzero is not raising a warning when the out is resized DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), # Can't find schemas for this operator for some reason DecorateInfo(unittest.expectedFailure, 'TestOperatorSignatures', 'test_get_torch_func_signature_exhaustive'), # Compiler issue on ROCm. Might need to skip until ROCm5.5 DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values', dtypes=[torch.bool], active_if=TEST_WITH_ROCM), )), OpInfo('nonzero_static', dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16, torch.chalf), sample_inputs_func=sample_inputs_nonzero_static, supports_out=False, supports_autograd=False, decorators=[onlyCPU], skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'), DecorateInfo(unittest.expectedFailure, 'TestInductorOpInfo', 'test_comprehensive'), DecorateInfo(unittest.expectedFailure, 'TestVmapOperatorsOpInfo', 'test_op_has_batch_rule'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_non_standard_bool_values', dtypes=[torch.bool], active_if=TEST_WITH_ROCM), )), # Following tests are for jiterator's python interface # Jiterator can be used to author elementwise CUDA kernel # jiterator._create_jit_fn returns a callable that behaves like a regular pytorch op # See create_jit_fn in jiterator.py for more information UnaryUfuncInfo( 'jiterator_unary', op=torch.cuda.jiterator._create_jit_fn("template <typename T> T unary(T x) { return x * x + x; }"), ref=lambda x: x * x + x, dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), supports_out=False, supports_autograd=False, # jiterator ops doesn't have backward defined decorators=[ onlyCUDA, DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestUnaryUfuncs', 'test_reference_numerics_extremal'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestUnaryUfuncs', 'test_reference_numerics_hard'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestUnaryUfuncs', 'test_reference_numerics_normal'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-02)}), 'TestUnaryUfuncs', 'test_reference_numerics_small'), ], skips=( # Jiterator ops doesn't support neg or conj view DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Jiterator ops doesn't support CompositeCompliantTensor # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), # Skip reference_numerics tests for bool type, as the defined function doesn't work for bool DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bool]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_hard', dtypes=[torch.bool]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', dtypes=[torch.bool]), # ROCm generates -inf+infj instead of nan+infj for complex64 for some of the results DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.complex64], active_if=TEST_WITH_ROCM), # Expected failure: torch.jiterator_unary is not a valid op DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Skip Nvfuser DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), ) ), BinaryUfuncInfo( 'jiterator_binary', op=torch.cuda.jiterator._create_jit_fn( "template <typename T> T binary(T x, T y, T alpha) { return x + alpha * y; }", alpha=1), ref=lambda input, other, *, alpha=1: np.add(input, other) if alpha == 1 \ else np.add(input, np.multiply(alpha, other)), dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2, alpha=-3.14), supports_out=False, supports_autograd=False, # jiterator ops doesn't have backward defined supports_rhs_python_scalar=False, decorators=[onlyCUDA], skips=( # Jiterator ops doesn't support neg or conj view DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Jiterator ops doesn't support CompositeCompliantTensor # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), # Expected failure: torch.jiterator_binary is not a valid op DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Skip Nvfuser DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), ) ), OpInfo( 'jiterator_4inputs_with_extra_args', op=torch.cuda.jiterator._create_jit_fn( "template <typename T> T binary(T i0, T i1, T i2, T i3, T alpha, T beta) { return alpha * i0 + beta * i1 + i2 + i3; }", alpha=1, beta=1), ref=lambda i0, i1, i2, i3, *, alpha=1, beta=1: alpha * i0 + beta * i1 + i2 + i3, dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=4, alpha=3.14, beta=-4.20), supports_out=False, supports_autograd=False, # jiterator ops doesn't have backward defined decorators=[onlyCUDA], skips=( # Jiterator ops doesn't support neg or conj view DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Jiterator ops doesn't support CompositeCompliantTensor # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Skip Nvfuser DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), ) ), BinaryUfuncInfo( 'jiterator_binary_return_by_ref', op=torch.cuda.jiterator._create_multi_output_jit_fn( """ template <typename T> void binary_return_by_ref(T i0, T i1, T& out0) { out0 = i0 + i1; } """, num_outputs=1), ref=operator.add, dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2, alpha=-0.42), supports_out=False, supports_autograd=False, # jiterator ops doesn't have backward defined supports_rhs_python_scalar=False, decorators=[onlyCUDA], skips=( # Jiterator ops doesn't support neg or conj view DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Jiterator ops doesn't support CompositeCompliantTensor # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Skip Nvfuser DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), ) ), OpInfo( 'jiterator_2inputs_2outputs', op=torch.cuda.jiterator._create_multi_output_jit_fn( """ template <typename T> void binary_2outputs(T i0, T i1, T& out0, T& out1) { out0 = i0 + i1; out1 = i0 - i1; } """, num_outputs=2), ref=lambda i0, i1, *, alpha=1: (i0 + i1, i0 - i1), dtypes=all_types_and_complex_and(torch.bfloat16, torch.float16, torch.bool), sample_inputs_func=partial(sample_inputs_jiterator, num_inputs=2), supports_out=False, supports_autograd=False, # jiterator ops doesn't have backward defined decorators=[onlyCUDA], skips=( # Jiterator ops doesn't support neg or conj view DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Jiterator ops doesn't support CompositeCompliantTensor # Following test should expectedFailure, but it's causing cascading failures in CUDA, thus skipped DecorateInfo(unittest.skip("skip"), 'TestCompositeCompliance', 'test_operator'), # Expected failure: torch.jiterator_4inputs_with_extra_args is not a valid op DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # Skip Nvfuser DecorateInfo(unittest.skip('Skipped!'), 'TestCudaFuserOpInfo'), ) ), # `torch.norm` has multiple code paths depending on the value of `p`. # These paths have different dtype support. Also JIT supports, # most variants but not all of them. So we split the OpInfo entries, # for `norm` based on the code-paths and JIT support. OpInfo( "norm", sample_inputs_func=sample_inputs_norm, dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), # TODO Benchmark again with the new implementation # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, check_batched_forward_grad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # Dispatches in Python to vector_norm. Not sure how to make this test happy # Happens to pass on complex64. Also a mystery DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)),) ), OpInfo('norm', variant_test_name='nuc', sample_inputs_func=sample_inputs_norm_nuc, decorators=[skipCUDAIfNoMagmaAndNoCusolver, skipCPUIfNoLapack], check_batched_gradgrad=False, # torch.autograd.gradcheck.GradcheckError: While computing batched gradients # got: Could not allocate memory to change Tensor SizesAndStrides! check_batched_forward_grad=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_and_complex_types(), dtypesIfCUDA=floating_and_complex_types(), skips=( # Dispatches in Python to matrix_norm. Not sure how to make this test happy DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64, torch.float32,)),) ), OpInfo('norm', variant_test_name='fro', sample_inputs_func=sample_inputs_norm_fro, dtypes=floating_and_complex_types_and(torch.bfloat16, torch.float16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, # torch.autograd.gradcheck.GradcheckError: While computing batched gradients # got: Could not allocate memory to change Tensor SizesAndStrides! check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, skips=( # MPS has some mild accuracy issues for float16. We divide the tolerances by 10 DecorateInfo( toleranceOverride({torch.float16: tol(atol=1e-4, rtol=0.01)}), 'TestConsistency', 'test_output_match', ), # Issue with conj and torch dispatch, see https://github.com/pytorch/pytorch/issues/82479 DecorateInfo( unittest.skip("Skipped!"), 'TestSchemaCheckModeOpInfo', 'test_schema_correctness', dtypes=(torch.complex64, torch.complex128)), # Dispatches in Python to vector_norm. Not sure how to make this test happy DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.complex64, torch.float32,)),) ), OpInfo( "norm", variant_test_name="inf", sample_inputs_func=sample_inputs_norm_inf, dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, # fast gradcheck produces NaNs gradcheck_fast_mode=False, skips=( DecorateInfo( toleranceOverride({torch.float16: tol(atol=2e-3, rtol=1e-3)}), 'TestInductorOpInfo', 'test_comprehensive', device_type='cuda', ), # Dispatches in Python to vector_norm. Not sure how to make this test happy # Happens to pass on complex64. Also a mystery DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit', dtypes=(torch.float32,)) ), ), OpInfo('t', sample_inputs_func=sample_inputs_t, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, # vmap does not support inplace views check_inplace_batched_forward_grad=False, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), assert_autodiffed=True, error_inputs_func=error_inputs_t), OpInfo('t_copy', sample_inputs_func=sample_inputs_t, supports_out=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, # vmap does not support inplace views check_inplace_batched_forward_grad=False, autodiff_fusible_nodes=[], # aliases inputs, shouldn't be fused autodiff_nonfusible_nodes=[], # aliases inputs, shouldn't be fused dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), assert_autodiffed=True, error_inputs_func=error_inputs_t), OpInfo( "nn.functional.dropout", op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs), dtypes=floating_types_and(torch.float16, torch.bfloat16), skips=( DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # Probably because we have used lambda for the op here # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # inplace variant dispatches to dropout kernel, while on CUDA # the op dispatches to _fused_dropout (with a few more conditions) # hence, different values and this skip here DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), supports_forward_ad=True, supports_fwgrad_bwgrad=True, # https://github.com/pytorch/pytorch/issues/66357 check_batched_forward_grad=False, supports_out=False, sample_inputs_func=sample_inputs_dropout, inplace_variant=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.dropout, input, *args, **kwargs, inplace=True)), OpInfo( "native_dropout_backward", op=torch.ops.aten.native_dropout_backward.default, aten_name="native_dropout_backward", dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfCUDA=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_dropout_backward, skips=( DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), # Lazy tensor failures DecorateInfo(unittest.skip('Skipped!'), 'TestLazyOpInfo', 'test_dispatched_to_lazy'), # These tests fail only when built with ASAN DecorateInfo(unittest.skip("Fails with ASAN"), 'TestLazyOpInfo', 'test_correctness', active_if=TEST_WITH_ASAN), DecorateInfo( unittest.skip("Fails with ASAN"), 'TestLazyOpInfo', 'test_correctness_with_reusing_ir', active_if=TEST_WITH_ASAN ), ), ), OpInfo( "nn.functional.dropout2d", op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.dropout2d, input, *args, **kwargs), dtypes=floating_types_and(torch.float16, torch.bfloat16), skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, check_batched_forward_grad=False, # As per the docs, valid input dims are (3, 4) sample_inputs_func=partial(sample_inputs_dropout, valid_input_dim=(3, 4)), inplace_variant=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.dropout2d, input, *args, **kwargs, inplace=True)), OpInfo( "nn.functional.dropout3d", op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.dropout3d, input, *args, **kwargs), dtypes=floating_types_and(torch.float16, torch.bfloat16), skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, check_batched_forward_grad=False, # As per the docs, valid input dims are (4, 5) sample_inputs_func=partial(sample_inputs_dropout, valid_input_dim=(4, 5)), inplace_variant=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.dropout3d, input, *args, **kwargs, inplace=True)), OpInfo( "nn.functional.alpha_dropout", op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.alpha_dropout, input, *args, **kwargs), dtypes=floating_types_and(torch.float16, torch.bfloat16), gradcheck_wrapper=wrapper_set_seed, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, sample_inputs_func=sample_inputs_dropout, check_batched_forward_grad=False, inplace_variant=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.alpha_dropout, input, *args, **kwargs, inplace=True), skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # AssertionError: Tensor-likes are not close! # Fails in cuda11.7 # Error Log: https://github.com/pytorch/pytorch/actions/runs/3440108478/jobs/5738475757 DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu', device_type='cuda'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),),), # In training mode, feature_alpha_dropout currently doesn't support inputs of complex dtype # unlike when `train=False`, it supports complex inputs, hence 2 OpInfos to cover all cases OpInfo( "nn.functional.feature_alpha_dropout", op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs), variant_test_name="with_train", dtypes=floating_types_and(torch.float16, torch.bfloat16), skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), # torch.autograd.gradcheck.GradcheckError: While computing batched gradients, got: # vmap: We do not yet support calling random operations inside of vmap. # Please perform random operations outside of vmap as a workaround DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', "test_forward_mode_AD"), DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', "test_inplace_forward_mode_AD"), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu')), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, # As per the docs, valid input dims are (4, 5) sample_inputs_func=partial(sample_inputs_dropout, train=True, valid_input_dim=(4, 5)), inplace_variant=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)), OpInfo( "nn.functional.feature_alpha_dropout", op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs), variant_test_name="without_train", dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'),), gradcheck_wrapper=wrapper_set_seed, supports_forward_ad=True, supports_fwgrad_bwgrad=True, supports_out=False, sample_inputs_func=partial(sample_inputs_dropout, train=False), inplace_variant=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.feature_alpha_dropout, input, *args, **kwargs, inplace=True)), OpInfo( "nn.functional.one_hot", ref=reference_one_hot, supports_out=False, dtypes=_dispatch_dtypes((torch.int64,)), sample_inputs_func=sample_inputs_one_hot, ), OpInfo( "nn.functional.embedding", aten_backward_name="embedding_dense_backward", # We use lambda to reshuffle the positional arguments. # This is because currently only the `input` field of SampleInput # is tested in gradient tests. op=lambda weight, idx, **kwargs: torch.nn.functional.embedding(idx, weight, **kwargs), dtypes=floating_types_and(torch.bfloat16, torch.float16), sample_inputs_func=sample_inputs_embedding, allow_cow_input_materialize_forward=[0], error_inputs_func=error_inputs_embedding, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # Fails on CI https://github.com/pytorch/pytorch/issues/85377 DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_compare_cpu'), # Reference: https://github.com/pytorch/pytorch/issues/67084 DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view', device_type='cuda'), # Not a problem: embedding does weird stuff to its input (it renormalizes) DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), # Fails due to non-determinism (see issue #74679) # TODO: Investigate why more granular skips in the test don't work in CI DecorateInfo(unittest.skip('Skipped!'), 'TestExpandedWeightFunctional', 'test_expanded_weight_forward'), ), supports_expanded_weight=True, supports_out=False, ), OpInfo( "nn.functional.embedding_bag", # We use lambda to reshuffle the positional arguments. # This is because currently only the `input` field of SampleInput # is tested in gradient tests. op=lambda weight, idx, **kwargs: torch.nn.functional.embedding_bag(idx, weight, **kwargs), dtypes=floating_types_and(torch.bfloat16, torch.float16), dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16), # backward is not supported for mode `max` and dtype `bfloat16` backward_dtypesIfCUDA=floating_types_and(torch.float16), sample_inputs_func=sample_inputs_embedding_bag, skips=( # lambda impl DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.expectedFailure, 'TestNormalizeOperators', 'test_normalize_operator_exhaustive'), # Not a problem: embedding_bag does weird stuff to its input (it renormalizes) DecorateInfo(unittest.skip('Allowed exemption'), 'TestCompositeCompliance', 'test_operator'), ), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, supports_out=False, supports_gradgrad=False, allow_cow_input_materialize_forward=[0], ), OpInfo( "nn.functional.multi_head_attention_forward", op=lambda input, *args, **kwargs: wrapper_set_seed(torch.nn.functional.multi_head_attention_forward, input, *args, **kwargs), dtypes=floating_types_and(torch.bfloat16, torch.float16), sample_inputs_func=sample_inputs_multi_head_attention_forward, skips=( # Tensor-likes are not close DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_noncontiguous_samples', dtypes=(torch.float32,)), DecorateInfo(toleranceOverride({torch.float32: tol(atol=5e-3, rtol=0)}), 'TestDecomp', 'test_comprehensive'), # TODO skip this for now since we can't skip on runtime arch support (taken from scaled_dot_product_attention) DecorateInfo(unittest.skip("Skipped!"), 'TestInductorOpInfo', 'test_comprehensive'), # randomness DecorateInfo(unittest.skip("Skipped!"), 'TestFwdGradients', 'test_forward_mode_AD'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), # lambda impl # AssertionError: JIT Test does not execute any logic DecorateInfo(unittest.expectedFailure, 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.expectedFailure, "TestNormalizeOperators", "test_normalize_operator_exhaustive"), # tests running very slowly break slow tests, so we skip them instead of using `slowTest`. DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_forward_ad'), DecorateInfo(unittest.skip("Skipped!"), 'TestCompositeCompliance', 'test_operator'), DecorateInfo( unittest.skip("Skipped - baddbmm decomp does not have enough precision for 16-bit float"), 'TestDecomp', 'test_comprehensive', dtypes=(torch.bfloat16, torch.float16), ), DecorateInfo( unittest.skip("Skipped - baddbmm decomp does not have enough precision for 16-bit float"), 'TestDecomp', 'test_quick', dtypes=(torch.bfloat16, torch.float16))), supports_out=False, supports_gradgrad=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, ), UnaryUfuncInfo( "nn.functional.softplus", aten_backward_name='softplus_backward', ref=reference_softplus, sample_kwargs=lambda device, dtype, input: ({'beta': 3, 'threshold': .2}, {'beta': 3, 'threshold': .2}), sample_inputs_func=partial(sample_inputs_elementwise_unary, op_kwargs={'beta': 3, 'threshold': .2}), supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.bfloat16, torch.float16), decorators=( DecorateInfo( toleranceOverride ({ torch.half: tol(atol=1e-2, rtol=1e-2), torch.bfloat16: tol(atol=1e-2, rtol=1e-2), }), 'TestUnaryUfuncs'), ), ), OpInfo( "nn.functional.mse_loss", aten_backward_name='mse_loss_backward', ref=loss_reference_reduction_wrapper(lambda input, target: (input - target) ** 2), sample_inputs_func=sample_inputs_loss, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.float16, torch.bfloat16), skips=( # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252, # please report a bug to PyTorch. DecorateInfo(unittest.expectedFailure, "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), ), ), OpInfo( "nn.functional.grid_sample", dtypes=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_grid_sample, reference_inputs_func=reference_inputs_grid_sample, supports_gradgrad=False, gradcheck_nondet_tol=1e-15), # TODO: delete this OpInfo once we add meta support for grid_sampler_3d OpInfo( "grid_sampler_2d", dtypes=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_grid_sampler_2d, supports_gradgrad=False, gradcheck_nondet_tol=1e-15, skips=( DecorateInfo(slowTest, 'TestDecomp', 'test_comprehensive', dtypes=(torch.float32, torch.float64), active_if=IS_WINDOWS), ),), OpInfo( "argwhere", ref=np.argwhere, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_autograd=False, sample_inputs_func=sample_inputs_argwhere, skips=( # Compiler issue on ROCm. Might need to skip until ROCm5.5 DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_non_standard_bool_values', dtypes=[torch.bool], active_if=TEST_WITH_ROCM), ), ), ReductionOpInfo( 'all', identity=True, supports_autograd=False, result_dtype=torch.bool, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.all), skips=( # FIXME: uint8 input returns uint8 instead of bool DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]), ), ), ReductionOpInfo( 'any', identity=False, supports_autograd=False, result_dtype=torch.bool, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.any), skips=( # FIXME: uint8 input returns uint8 instead of bool DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]), ), ), ReductionOpInfo( 'amax', nan_policy='propagate', supports_forward_ad=True, check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), ref=reference_reduction_numpy(np.amax), skips=( # FIXME: reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), ), error_inputs_func=error_inputs_aminmax_amax_amin, ), ReductionOpInfo( 'amin', nan_policy='propagate', supports_forward_ad=True, check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), ref=reference_reduction_numpy(np.amin), skips=( # FIXME: reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), ), error_inputs_func=error_inputs_aminmax_amax_amin, ), ReductionOpInfo( 'argmax', supports_multiple_dims=False, supports_autograd=False, assert_jit_shape_analysis=True, result_dtype=torch.int64, dtypes=all_types_and(torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.argmax, supports_keepdims=False), ), ReductionOpInfo( 'argmin', supports_multiple_dims=False, supports_autograd=False, result_dtype=torch.int64, dtypes=all_types_and(torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.argmin, supports_keepdims=False), ), ReductionOpInfo( 'count_nonzero', identity=0, supports_out=False, supports_autograd=False, result_dtype=torch.int64, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_reduction_count_nonzero, ref=reference_reduction_numpy(np.count_nonzero), skips=( # FIXME: count_nonzero does not accept keepdim kwarg DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_single_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_unsorted_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_offbounds_keepdim'), # FIXME: dim=[] reduces all dimensions DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), ), ), ReductionOpInfo( 'mean', nan_policy='propagate', supports_forward_ad=True, supports_fwgrad_bwgrad=True, # FIXME: mean needs 'dim' parameter when using the 'out' overload. # Adding it with 'generate_args_kwargs' does not work, since these also get passed # onto the reference implementations. supports_out=True, assert_autodiffed=True, assert_jit_shape_analysis=True, promotes_int_to_float=True, dtypes=floating_and_complex_types_and(torch.float16, torch.bfloat16), ref=reference_reduction_numpy(np.mean), error_inputs_func=error_inputs_mean, skips=( # AssertionError: RuntimeError not raised : Expected RuntimeError when doing an unsafe cast from a result # of dtype torch.float32 into an out= with dtype torch.long DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_out', device_type='cuda', dtypes=[torch.float32]), # FIXME: mean does not support passing keepdim without passing dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), # FIXME: mean reduces all dimensions when dim=[] DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: improve precision DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', dtypes=[torch.float16]), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values', device_type='cuda', dtypes=[torch.complex64]), ), ), ReductionOpInfo( 'nanmean', nan_policy='omit', assert_autodiffed=True, promotes_int_to_float=True, supports_forward_ad=True, check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, dtypes=floating_types_and(torch.float16, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf), sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True), ref=reference_reduction_numpy(np.nanmean), skips=( # AssertionError: False is not true : # Failure in testing nodes' autodifferentiation. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # FIXME: prod reduces all dimensions when dim=[] DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: improve precision DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', dtypes=[torch.float16]), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', device_type='cuda', dtypes=[torch.float16]), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_extremal_values', device_type='cuda', dtypes=[torch.complex64]), ), ), ReductionOpInfo( 'std', nan_policy='propagate', supports_out=True, complex_to_real=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, promotes_int_to_float=True, check_batched_forward_grad=False, dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_std_var, ref=reference_std_var(np.std), generate_args_kwargs=generate_std_var_kwargs, skips=( # FIXME: cannot specify keepdim without dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), # FIXME: dim=[] reduces all dimensions DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: improve precision DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', dtypes=(torch.float16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', dtypes=(torch.float16,)), ), ), ReductionOpInfo( 'std', variant_test_name='unbiased', nan_policy='propagate', supports_out=False, complex_to_real=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, promotes_int_to_float=True, check_batched_forward_grad=False, dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_std_var_unbiased, skips=( # FIXME: dim=[] reduces all dimensions DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), ), ), ReductionOpInfo( 'var', nan_policy='propagate', supports_out=True, assert_autodiffed=True, promotes_int_to_float=True, complex_to_real=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, check_batched_forward_grad=False, dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_std_var, ref=reference_std_var(np.var), generate_args_kwargs=generate_std_var_kwargs, skips=( # FIXME: cannot specify keepdim without dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), # FIXME: dim=[] reduces all dimensions DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: improve precision DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values'), # NumPy is giving NaN for this DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_large_input'), ), ), ReductionOpInfo( 'var', variant_test_name='unbiased', nan_policy='propagate', supports_out=False, complex_to_real=True, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_autodiffed=True, promotes_int_to_float=True, check_batched_forward_grad=False, dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16), dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16), sample_inputs_func=sample_inputs_std_var_unbiased, skips=( # FIXME: dim=[] reduces all dimensions DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), ), ), ReductionOpInfo( 'prod', identity=1, nan_policy='propagate', supports_multiple_dims=False, # https://github.com/pytorch/pytorch/issues/80411 gradcheck_fast_mode=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_int64=True, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), sample_inputs_func=sample_inputs_prod, ref=prod_numpy, skips=( # FIXME: prod does not support passing keepdim without passing dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), # FIXME: prod reduces all dimensions when dim=[] DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: prod does not support passing None to dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', dtypes=[torch.float16, torch.complex64]), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', dtypes=[torch.uint8, torch.float16, torch.complex64]), # FIXME: ValueError: The data in MaskedTensor a and Tensor b do not match DecorateInfo(unittest.skip("Skipped!"), 'TestOperators', 'test_reduction_all', dtypes=[torch.float16]), ), ), ReductionOpInfo( 'sum', identity=0, nan_policy='propagate', supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, promotes_int_to_int64=True, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), ref=reference_reduction_numpy(np.sum), error_inputs_sparse_func=error_inputs_sparse_reduction_sum, sample_inputs_sparse_coo_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_coo), sample_inputs_sparse_csr_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_csr), sample_inputs_sparse_csc_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_csc), sample_inputs_sparse_bsr_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_bsr), sample_inputs_sparse_bsc_func=partial(sample_inputs_sparse_reduction_sum, layout=torch.sparse_bsc), skips=( # FIXME: sum does not support passing keepdim without passing dim DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), # FIXME: sum reduces all dimensions when dim=[] DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: improve precision DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', dtypes=[torch.float16]), DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', dtypes=[torch.float16]), DecorateInfo(unittest.skip("Skipped!"), 'TestOperators', 'test_reduction_all', dtypes=[torch.float32]), ), ), ReductionOpInfo( 'nansum', identity=0, nan_policy='omit', supports_out=True, promotes_int_to_int64=True, supports_forward_ad=True, check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), sample_inputs_func=sample_inputs_nan_reduction(supports_multiple_dims=True), ref=reference_reduction_numpy(np.nansum), skips=( # please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), 'TestJit', 'test_variant_consistency_jit'), # FIXME: nansum reduces all dimensions when dim=[] DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo(unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: flaky test so skipped instead of xfailed # possibly bad low precision reference in numpy DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', dtypes=[torch.float16]), ), ), OpInfo( "nn.functional.ctc_loss", dtypes=floating_types(), supports_out=False, sample_inputs_func=sample_inputs_ctc_loss, skips=( # https://github.com/pytorch/pytorch/issues/67462 # torch.autograd.gradcheck.GradcheckError: Jacobian mismatch for output 0 with respect to input 0 DecorateInfo( unittest.expectedFailure, "TestBwdGradients", "test_fn_grad", dtypes=(torch.float64,), ), # RuntimeError: derivative for aten::_ctc_loss_backward is not implemented DecorateInfo( unittest.expectedFailure, "TestBwdGradients", "test_fn_gradgrad", dtypes=(torch.float64,), ), # RuntimeError: derivative for aten::_ctc_loss_backward is not implemented DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,), ), # Ref: https://github.com/pytorch/pytorch/issues/85231 DecorateInfo(unittest.skip("Fails with ASAN"), 'TestProxyTensorOpInfo', 'test_make_fx_fake_exhaustive', active_if=TEST_WITH_ASAN), ), ), OpInfo( "nn.functional.cosine_embedding_loss", dtypes=all_types_and(torch.half, torch.bfloat16, torch.bool), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, decorators=[ DecorateInfo( toleranceOverride({torch.float16: tol(atol=1e-4, rtol=2e-3)}), 'TestInductorOpInfo', 'test_comprehensive', device_type="cuda", ), ], sample_inputs_func=sample_inputs_cosine_embedding_loss, ), OpInfo( "nn.functional.nll_loss", dtypes=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, sample_inputs_func=sample_inputs_nll_loss, supports_forward_ad=True, supports_fwgrad_bwgrad=True, assert_jit_shape_analysis=True, skips=( # RuntimeError: # undefined value tensor: # File "<string>", line 3 # def the_method(i0, i1): # return torch.nn.functional.nll_loss(i0, i1, weight=tensor([8.4784, 1.7658, 4.3228], dtype=torch.float32)) # ~~~~~~ <--- HERE DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), # Fails for unknown reason: https://github.com/pytorch/pytorch/issues/120782 DecorateInfo( unittest.skip("Skipped!"), "TestCompositeCompliance", "test_cow_input", device_type='cuda', ), DecorateInfo(unittest.skip("FP16 nll_loss cases have not been enabled on MPS yet"), dtypes=(torch.half,), device_type="mps"), ), ), OpInfo( "nn.functional.gaussian_nll_loss", dtypes=floating_types_and(torch.half, torch.bfloat16), # Runs very slowly on slow gradcheck - alternatively reduce input sizes gradcheck_fast_mode=True, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_gaussian_nll_loss, error_inputs_func=error_inputs_gaussian_nll_loss, skips=( # Pre-existing condition (calls .item); needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_backward'), DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_forward_ad'), # Pre-existing condition (calls .item); needs to be fixed DecorateInfo(unittest.expectedFailure, 'TestCompositeCompliance', 'test_operator'), # JIT does not support variadic tensors. # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, # please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), ), ), OpInfo( "nn.functional.hinge_embedding_loss", dtypes=floating_types_and(torch.half, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_hinge_embedding_loss, error_inputs_func=error_inputs_hinge_embedding_loss, reference_inputs_func=reference_inputs_hinge_embedding_loss, ), OpInfo( "nn.functional.huber_loss", aten_backward_name='huber_loss_backward', dtypes=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, sample_inputs_func=sample_inputs_huber_loss, error_inputs_func=error_inputs_huber_loss, skips=( # JIT does not support variadic tensors. # RuntimeError: input->type()->kind() == TypeKind::OptionalType # INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":270, # please report a bug to PyTorch. DecorateInfo(unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,),), ) ), OpInfo( "nn.functional.pdist", ref=reference_pdist, sample_inputs_func=sample_inputs_pdist, dtypes=floating_types(), supports_out=False, supports_gradgrad=False, skips=( DecorateInfo(unittest.skip("Unsupported on MPS for now"), 'TestCommon', 'test_numpy_ref_mps'), ) ), OpInfo( "nn.functional.poisson_nll_loss", dtypes=all_types_and(torch.half, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_poisson_nll_loss, error_inputs_func=error_inputs_poisson_nll_loss, ), OpInfo( "argsort", dtypes=all_types_and(torch.bool, torch.float16, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.bool, torch.float16, torch.bfloat16), sample_inputs_func=sample_inputs_sort, supports_out=False, supports_autograd=False, skips=( DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32,), ), DecorateInfo( unittest.expectedFailure, "TestCommon", "test_non_standard_bool_values", dtypes=[torch.bool], device_type='cuda', active_if=not TEST_WITH_ROCM ), ), ), OpInfo( "repeat_interleave", dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16, torch.chalf), backward_dtypesIfCUDA=floating_and_complex_types_and(torch.float16, torch.bfloat16, torch.chalf), sample_inputs_func=sample_inputs_repeat_interleave, supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, skips=( DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32, torch.complex64), ), ), ), OpInfo( "nn.functional.pairwise_distance", ref=lambda a, b, p=2.0, eps=1e-6, keepdim=False: ( np.sum(np.abs(a - b + eps) ** p, axis=-1, keepdims=keepdim) ** (1 / p) ), sample_inputs_func=sample_inputs_pairwise_distance, dtypes=all_types_and_complex_and(torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32, torch.complex64), ), ), ), OpInfo( "nn.functional.pixel_shuffle", sample_inputs_func=sample_inputs_pixel_shuffle, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32, torch.complex64), ), ), ), OpInfo( "nn.functional.pixel_unshuffle", sample_inputs_func=sample_inputs_pixel_unshuffle, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, skips=( DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", dtypes=(torch.float32, torch.complex64), ), ), ), OpInfo( "nn.functional.channel_shuffle", sample_inputs_func=sample_inputs_channel_shuffle, dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, allow_cow_input_materialize_forward=[0], allow_cow_input_materialize_backward=[0, 'output grad 0'], skips=( # Skip due to NotImplementedError for MPS device. DecorateInfo(unittest.expectedFailure, 'TestConsistency'), DecorateInfo(unittest.expectedFailure, 'TestDTensorOps', 'test_dtensor_op_db'), DecorateInfo(unittest.expectedFailure, "TestMeta", "test_dispatch_symbolic_meta_outplace_all_strides"), ), ), OpInfo( "nn.functional.kl_div", sample_inputs_func=sample_inputs_kl_div, dtypes=floating_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, ), OpInfo( "diagflat", ref=lambda input, offset=0: np.diagflat(input, k=offset), sample_inputs_func=sample_inputs_diagflat, dtypes=all_types_and_complex_and(torch.bool, torch.bfloat16, torch.float16), dtypesIfCUDA=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), supports_out=False, supports_forward_ad=True, supports_fwgrad_bwgrad=True, # See https://github.com/pytorch/pytorch/pull/78358 check_batched_forward_grad=False, ), OpInfo( 'scatter_reduce', variant_test_name='sum', # complex not added to dtypes as complex gradients are not properly handled # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_scatter_reduce, ), OpInfo( 'scatter_reduce', variant_test_name='prod', # complex not added to dtypes as complex gradients are not properly handled # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), sample_inputs_func=sample_inputs_scatter_reduce, skips=( # Not implemented DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_forward_mode_AD'), DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_inplace_forward_mode_AD'), DecorateInfo(unittest.expectedFailure, 'TestFwdGradients', 'test_fn_fwgrad_bwgrad'), ), ), OpInfo( 'scatter_reduce', variant_test_name='mean', # complex not added to dtypes as complex gradients are not properly handled # and scatter_reduce hasn't been added to the whitelist in gen_variable_type yet dtypes=all_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), supports_forward_ad=True, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_scatter_reduce, ), OpInfo( 'scatter_reduce', variant_test_name='amin', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_forward_ad=True, check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_scatter_reduce, ), OpInfo( 'scatter_reduce', variant_test_name='amax', dtypes=all_types_and(torch.float16, torch.bfloat16, torch.bool), dtypesIfCUDA=all_types_and(torch.float16, torch.bfloat16), dtypesIfHpu=custom_types(torch.float32, torch.bfloat16), supports_forward_ad=True, check_batched_forward_grad=False, supports_fwgrad_bwgrad=True, sample_inputs_func=sample_inputs_scatter_reduce, ), OpInfo( '_segment_reduce', aten_name='segment_reduce', variant_test_name='lengths', dtypes=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, # RuntimeError: derivative for aten::_segment_reduce_backward is not implemented supports_gradgrad=False, sample_inputs_func=sample_inputs_segment_reduce, skips=( # FIXME: CUDA driver API confirmed a leak in # __main__.TestJitCUDA.test_variant_consistency_jit_segment_reduce_cuda_float32 DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", device_type="cuda", ), ), ), OpInfo( '_segment_reduce', aten_name='segment_reduce', variant_test_name='offsets', dtypes=floating_types_and(torch.float16, torch.bfloat16), supports_out=False, # RuntimeError: derivative for aten::_segment_reduce_backward is not implemented supports_gradgrad=False, sample_inputs_func=partial(sample_inputs_segment_reduce, mode='offsets'), skips=( # FIXME: CUDA driver API confirmed a leak in # __main__.TestJitCUDA.test_variant_consistency_jit_segment_reduce_cuda_float32 DecorateInfo( unittest.skip("Skipped!"), "TestJit", "test_variant_consistency_jit", device_type="cuda", ), ), ), ] op_db += opinfo.definitions.op_db # Separate registry for experimental Python Reference OpInfos. python_ref_db = [ # # Elementwise Unary OpInfos # ElementwiseUnaryPythonRefInfo( "_refs.abs", torch_opinfo_name="abs", skips=( # Reference: https://github.com/pytorch/pytorch/issues/49224 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=[torch.int8], active_if=TEST_WITH_ASAN), ), ), ElementwiseUnaryPythonRefInfo( "_refs.acos", torch_opinfo_name="acos", skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), # Failing with wrong imaginary sign on at least some Windows jobs DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), # Failing with wrong imaginary sign on at least some Windows jobs DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), ) ), ElementwiseUnaryPythonRefInfo( "_refs.acosh", torch_opinfo_name="acosh", skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), # Failing with wrong imaginary sign on at least some Windows jobs DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), ), ), ElementwiseUnaryPythonRefInfo( "_refs.asin", torch_opinfo_name="asin", decorators=[ DecorateInfo( toleranceOverride({torch.float16: tol(atol=1e-05, rtol=1e-03)}), 'TestUnaryUfuncs', device_type='cuda'), DecorateInfo( toleranceOverride({torch.complex64: tol(atol=5e-05, rtol=2e-05)}), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu' ), precisionOverride({torch.bfloat16: 1e-2}), ], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), ), ), ElementwiseUnaryPythonRefInfo( "_refs.asinh", torch_opinfo_name="asinh", decorators=(precisionOverride({torch.bfloat16: 5e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cdouble], active_if=IS_WINDOWS), ), ), PythonRefInfo( "_refs.lerp", torch_opinfo_name="lerp", ), PythonRefInfo( "_refs.ones", torch_opinfo_name="ones", skips=( # Tests that assume input is a tensor or sequence of tensors DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), ), ), PythonRefInfo( "_refs.zeros", torch_opinfo_name="zeros", skips=( # Tests that assume input is a tensor or sequence of tensors DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), ), ), PythonRefInfo( "_refs.cauchy", torch_opinfo_name="cauchy", decorators=( # TODO: RuntimeError: no _refs support for torch.rand_like DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), 'TestCommon', 'test_python_ref'), # AssertionError: Tensor-likes are not close! DecorateInfo(unittest.skip("Expected: cauchy is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: cauchy is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor'), DecorateInfo(unittest.skip("Expected: cauchy is not comparable"), 'TestCommon', 'test_python_ref_torch_fallback'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), ) ), PythonRefInfo( "_refs.exponential", torch_opinfo_name="exponential", supports_out=True, decorators=( # dtypes that do not support check_uniform_bounds of rand_like DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta', dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)), DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'), # TODO: RuntimeError: no _refs support for torch.rand_like DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), 'TestCommon', 'test_python_ref'), # AssertionError: Tensor-likes are not close! DecorateInfo(unittest.skip("Expected: exponential is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: exponential is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor'), DecorateInfo(unittest.skip("Expected: exponential is not comparable"), 'TestCommon', 'test_python_ref_torch_fallback'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), ) ), PythonRefInfo( "_refs.geometric", torch_opinfo_name="geometric", supports_out=True, decorators=( # dtypes that do not support check_uniform_bounds of rand_like DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_dtypes'), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta', dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64)), # TODO: RuntimeError: no _refs support for torch.rand_like DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), 'TestCommon', 'test_python_ref'), DecorateInfo(unittest.skip("Expected: geometric is not comparable"), 'TestCommon', 'test_python_ref_executor', device_type='cuda'), # AssertionError: Tensor-likes are not close! DecorateInfo(unittest.skip("Expected: geometric is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: geometric is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip("Expected: geometric is not comparable"), 'TestCommon', 'test_python_ref_torch_fallback'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), ) ), PythonRefInfo( "_refs.log_normal", torch_opinfo_name="log_normal", supports_out=True, decorators=( # TODO: RuntimeError: no _refs support for torch.rand_like DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), 'TestCommon', 'test_python_ref'), DecorateInfo(unittest.skip("Expected: log_normal is not comparable"), 'TestCommon', 'test_python_ref_executor', device_type='cuda'), # AssertionError: Tensor-likes are not close! DecorateInfo(unittest.skip("Expected: log_normal is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: log_normal is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip("Expected: log_normal is not comparable"), 'TestCommon', 'test_python_ref_torch_fallback'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), ) ), PythonRefInfo( "_refs.normal", torch_opinfo_name="normal", supports_out=True, decorators=( # TODO: RuntimeError: no _refs support for torch.rand_like DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), 'TestCommon', 'test_python_ref'), # AssertionError: Tensor-likes are not close! DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestCommon', 'test_python_ref_torch_fallback'), DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestDecomp', 'test_comprehensive'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), ) ), PythonRefInfo( "_refs.normal", torch_opinfo_name="normal", torch_opinfo_variant_name="number_mean", supports_out=True, decorators=( # TODO: RuntimeError: no _refs support for torch.rand_like DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), 'TestCommon', 'test_python_ref'), # AssertionError: Tensor-likes are not close! DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestCommon', 'test_python_ref_torch_fallback'), DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestDecomp', 'test_comprehensive'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), ) ), PythonRefInfo( "_refs.normal_", op=torch.Tensor.normal_, torch_opinfo_name="normal", torch_opinfo_variant_name="in_place", supports_out=False, decorators=( # TODO: RuntimeError: no _refs support for torch.rand_like DecorateInfo(unittest.skip("TODO: RuntimeError: no _refs support for torch.rand_like"), 'TestCommon', 'test_python_ref'), # AssertionError: Tensor-likes are not close! DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestCommon', 'test_python_ref_torch_fallback'), DecorateInfo(unittest.skip("Expected: normal is not comparable"), 'TestDecomp', 'test_comprehensive'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), ) ), PythonRefInfo( "_refs.arange", torch_opinfo_name="arange", skips=( # Tests that assume input is a tensor or sequence of tensors DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), ), ), PythonRefInfo( "_refs.linspace", torch_opinfo_name="linspace", skips=( # Tests that assume input is a tensor or sequence of tensors DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # cpu implementation is wrong on some integral types # https://github.com/pytorch/pytorch/issues/81996 DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"), # cuda implementation is off-by-one on some inputs due to precision issues # https://github.com/pytorch/pytorch/issues/82230 DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), device_type="cuda"), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), device_type="cuda"), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), device_type="cuda"), ), ), PythonRefInfo( "_refs.linspace", torch_opinfo_name="linspace", torch_opinfo_variant_name="tensor_overload", skips=( # TypeError: 'int' object is not subscriptable DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), # cpu implementation is wrong on some integral types # https://github.com/pytorch/pytorch/issues/81996 DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64), device_type="cpu"), # cuda implementation is off-by-one on some inputs due to precision issues # https://github.com/pytorch/pytorch/issues/82230 DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), device_type="cuda"), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), device_type="cuda"), # TODO torch.ops.aten.copy is not in _refs DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=(torch.float32, torch.float64, torch.float16, torch.complex64, torch.complex128, torch.bfloat16), device_type="cuda"), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=(torch.float32, torch.float64, torch.float16, torch.complex64, torch.complex128, torch.bfloat16), device_type="cpu"), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', dtypes=(torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64), device_type="cuda"), ), ), PythonRefInfo( "_refs.logspace", torch_opinfo_name="logspace", skips=( # Tests that assume input is a tensor or sequence of tensors DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_conj_view'), # Off-by-one issue when casting floats to ints DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), ), ), PythonRefInfo( "_refs.logspace", torch_opinfo_name="logspace", torch_opinfo_variant_name="tensor_overload", skips=( # TypeError: 'int' object is not subscriptable DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.expectedFailure, 'TestMathBits', 'test_conj_view'), # Off-by-one issue when casting floats to ints DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', dtypes=(torch.int16, torch.int32, torch.int64), device_type="cuda"), # TODO copy doesn't have prim refs DecorateInfo( unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=( torch.float32, torch.float64, torch.float16, torch.complex64, torch.complex128, torch.bfloat16, torch.int8, torch.uint8 ), device_type="cuda" ), DecorateInfo( unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=( torch.float32, torch.float64, torch.float16, torch.complex64, torch.complex128, torch.bfloat16, torch.int16, torch.int32, torch.int64, torch.int8, torch.uint8 ), device_type="cpu"), ), ), PythonRefInfo( "_refs.meshgrid", torch_opinfo_name="meshgrid", torch_opinfo_variant_name="variadic_tensors", ), PythonRefInfo( "_refs.take_along_dim", torch_opinfo_name="take_along_dim", skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), ), ), PythonRefInfo( "_refs.to", torch_opinfo_name="to", ), PythonRefInfo( "_refs.triu", torch_opinfo_name="triu", ), PythonRefInfo( "_refs.tril", torch_opinfo_name="tril", ), PythonRefInfo( "_refs.triu_indices", torch_opinfo_name="triu_indices", # the implementation uses torch.stack that violates view consistency validate_view_consistency=False, skips=( # skip these tests since we have non tensor input DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'), DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'), )), PythonRefInfo( "_refs.tril_indices", torch_opinfo_name="tril_indices", # the implementation uses torch.stack that violates view consistency validate_view_consistency=False, skips=( # skip these tests since we have non tensor input DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_noncontiguous_samples'), DecorateInfo(unittest.skip('Skipped!'), 'TestCommon', 'test_variant_consistency_eager'), DecorateInfo(unittest.skip('Skipped!'), 'TestJit', 'test_variant_consistency_jit'), DecorateInfo(unittest.skip('Skipped!'), 'TestMathBits', 'test_neg_view'), )), PythonRefInfo( "_refs.meshgrid", torch_opinfo_name="meshgrid", torch_opinfo_variant_name="list_of_tensors", ), PythonRefInfo( "_refs.movedim", aliases=('moveaxis',), torch_opinfo_name="movedim", ), PythonRefInfo( "_refs.bucketize", torch_opinfo_name="bucketize", skips=( # RuntimeError: It appears that you're trying to get value out of a tracing tensor with # aten._local_scalar_dense.default - erroring out! [...] # triggered by mid_val = boundaries[mid] DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref_executor"), ) ), PythonRefInfo( "_refs.equal", torch_opinfo_name="equal", skips=( # RuntimeError: Cannot cast FakeTensor to number DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta',), ) ), ElementwiseUnaryPythonRefInfo( "_refs.atan", torch_opinfo_name="atan", decorators=(precisionOverride({torch.bfloat16: 1e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), ), ), ElementwiseUnaryPythonRefInfo( "_refs.atanh", torch_opinfo_name="atanh", decorators=(precisionOverride({torch.bfloat16: 1e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=[torch.cfloat], active_if=IS_WINDOWS), ), ), ElementwiseUnaryPythonRefInfo( "_refs.bitwise_not", torch_opinfo_name="bitwise_not", ), ElementwiseUnaryPythonRefInfo( "_refs.ceil", torch_opinfo_name="ceil", # Fails on int32 # https://github.com/pytorch/pytorch/issues/85258 ), PythonRefInfo( "_refs.item", torch_opinfo_name="item", skips=( # RuntimeError: Cannot cast FakeTensor(FakeTensor(..., device='meta', size=()), cpu) to number DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_meta'), # ValueError: Can't convert a tensor with 10 elements to a number! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),), ), ElementwiseUnaryPythonRefInfo( "_refs.conj_physical", torch_opinfo_name="conj_physical", ), ElementwiseUnaryPythonRefInfo( "_refs.cos", torch_opinfo_name="cos", decorators=(precisionOverride({torch.bfloat16: 1e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), # This fails on CUDA but passes on ROCm DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.cdouble,), device_type='cuda'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), # AssertionError: Tensor-likes are not close! # Greatest absolute difference: nan at index (700,) (up to 1e-05 allowed) # Greatest relative difference: nan at index (700,) (up to 0.001 allowed) DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=(torch.chalf,), active_if=IS_WINDOWS), ), ), ElementwiseUnaryPythonRefInfo( "_refs.cosh", torch_opinfo_name="cosh", skips=( # Reference: https://github.com/pytorch/pytorch/issues/48641 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.int8]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_MACOS), # AssertionError: Tensor-likes are not close! # Greatest absolute difference: nan at index (6000,) (up to 1e-05 allowed) # Greatest relative difference: nan at index (6000,) (up to 0.001 allowed) DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cuda', dtypes=(torch.chalf,), active_if=IS_WINDOWS), ), ), ElementwiseUnaryPythonRefInfo( "_refs.digamma", torch_opinfo_name="digamma", ), ElementwiseUnaryPythonRefInfo( "_refs.erf", torch_opinfo_name="erf", ), ElementwiseUnaryPythonRefInfo( "_refs.erfinv", torch_opinfo_name="erfinv", decorators=(precisionOverride({torch.float16: 1e-2, torch.bfloat16: 1e-2, torch.float32: 1e-4}),), skips=( # Reference: https://github.com/pytorch/pytorch/pull/49155#issuecomment-742664611 DecorateInfo( unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), DecorateInfo( unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), DecorateInfo( unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', active_if=TEST_SCIPY and version.parse(scipy.__version__) < version.parse("1.4.0")), ), ), ElementwiseUnaryPythonRefInfo( "_refs.erfc", torch_opinfo_name="erfc", ), ElementwiseUnaryPythonRefInfo( "_refs.exp", torch_opinfo_name="exp", skips=( # Reference: https://github.com/pytorch/pytorch/issues/48010 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), ), ), ElementwiseUnaryPythonRefInfo( "_refs.expm1", torch_opinfo_name="expm1", ), ElementwiseUnaryPythonRefInfo( "_refs.exp2", torch_opinfo_name="exp2", skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.cdouble]), # Reference: https://github.com/pytorch/pytorch/issues/48010 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), ), ), ElementwiseUnaryPythonRefInfo( "_refs.fill", torch_opinfo_name="fill", supports_out=True, ), ElementwiseUnaryPythonRefInfo( "_refs.floor", torch_opinfo_name="floor", # Fails on int32 # https://github.com/pytorch/pytorch/issues/85258 ), ElementwiseUnaryPythonRefInfo( "_refs.frexp", torch_opinfo_name="frexp", # Skipped due to numerical failures on Windows CI. # This is also skipped in frexp earlier in the file. skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', active_if=IS_WINDOWS), ), ), ElementwiseUnaryPythonRefInfo( "_refs.frac", torch_opinfo_name="frac", skips=( DecorateInfo( unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=(torch.bfloat16, torch.float16, torch.float32, torch.float64)), ), ), ElementwiseUnaryPythonRefInfo( "_refs.imag", torch_opinfo_name="imag", ), ElementwiseUnaryPythonRefInfo( "_refs.isfinite", torch_opinfo_name="isfinite", supports_out=True, ), ElementwiseUnaryPythonRefInfo( "_refs.isinf", torch_opinfo_name="isinf", supports_out=True, ), ElementwiseUnaryPythonRefInfo( "_refs.isposinf", torch_opinfo_name="isposinf", supports_out=True, ), ElementwiseUnaryPythonRefInfo( "_refs.isneginf", torch_opinfo_name="isneginf", supports_out=True, ), ElementwiseUnaryPythonRefInfo( "_refs.isnan", torch_opinfo_name="isnan", supports_out=True, ), ElementwiseUnaryPythonRefInfo( "_refs.isreal", torch_opinfo_name="isreal", supports_out=True, ), ElementwiseUnaryPythonRefInfo( "_refs.i0", torch_opinfo_name="i0", decorators=(precisionOverride({torch.bfloat16: 3e-1, torch.float16: 5e-1}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.int8,)), ), ), ElementwiseUnaryPythonRefInfo( "_refs.lgamma", torch_opinfo_name="lgamma", decorators=(precisionOverride({torch.float16: 7e-1}),), skips=( # Reference: https://github.com/pytorch/pytorch/pull/50140#issuecomment-756150214 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.float32, torch.float64], active_if=IS_WINDOWS), ), ), ElementwiseUnaryPythonRefInfo( "_refs.special.multigammaln", torch_opinfo_name="mvlgamma", torch_opinfo_variant_name="mvlgamma_p_1", skips=skips_mvlgamma(), decorators=( DecorateInfo(torch.testing._internal.common_utils.markDynamoStrictTest, 'TestUnaryUfuncs', 'test_reference_numerics_large'), DecorateInfo(torch.testing._internal.common_utils.xfailIfTorchDynamo, 'TestUnaryUfuncs', 'test_reference_numerics_large'), ), ), ElementwiseUnaryPythonRefInfo( "_refs.special.multigammaln", torch_opinfo_name="mvlgamma", torch_opinfo_variant_name="mvlgamma_p_3", skips=skips_mvlgamma(), ), ElementwiseUnaryPythonRefInfo( "_refs.special.multigammaln", torch_opinfo_name="mvlgamma", torch_opinfo_variant_name="mvlgamma_p_5", skips=skips_mvlgamma(), ), ElementwiseUnaryPythonRefInfo( "_refs.log", torch_opinfo_name="log", decorators=(precisionOverride({torch.bfloat16: 5e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), ), ), ElementwiseUnaryPythonRefInfo( "_refs.log1p", torch_opinfo_name="log1p", ), ElementwiseUnaryPythonRefInfo( "_refs.log10", torch_opinfo_name="log10", decorators=(precisionOverride({torch.bfloat16: 5e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=IS_WINDOWS), ), ), ElementwiseUnaryPythonRefInfo( "_refs.log2", torch_opinfo_name="log2", decorators=(precisionOverride({torch.bfloat16: 1e-1}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.cfloat, torch.cdouble]), ), ), PythonRefInfo( "_refs.logsumexp", torch_opinfo_name="logsumexp", # When keepdim=False logsumexp function uses squeeze operation # that is not yet exposed in nvFuser's Python API. ), PythonRefInfo( "_refs.log_softmax", torch_opinfo_name="log_softmax", torch_opinfo_variant_name="with_dtype", ), ElementwiseUnaryPythonRefInfo( "_refs.nan_to_num", torch_opinfo_name="nan_to_num", ), ElementwiseUnaryPythonRefInfo( "_refs.neg", torch_opinfo_name="neg", ), ElementwiseUnaryPythonRefInfo( "_refs.positive", torch_opinfo_name="positive", ), ElementwiseUnaryPythonRefInfo( "_refs.real", torch_opinfo_name="real", ), ElementwiseUnaryPythonRefInfo( "_refs.reciprocal", torch_opinfo_name="reciprocal", skips=( # Reference: https://github.com/pytorch/pytorch/issues/45690 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.cfloat, torch.cdouble]), ), ), ElementwiseUnaryPythonRefInfo( "_refs.round", torch_opinfo_name="round", # Fails on int32 # https://github.com/pytorch/pytorch/issues/85258 skips=( DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}), "TestUnaryUfuncs", "test_reference_numerics_extremal", device_type="cuda"), DecorateInfo(toleranceOverride({torch.bfloat16: tol(atol=1e-3, rtol=0.016)}), "TestUnaryUfuncs", "test_reference_numerics_normal", device_type="cuda"), ), ), ElementwiseUnaryPythonRefInfo( "_refs.rsqrt", torch_opinfo_name="rsqrt", decorators=(precisionOverride({torch.half: 5e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=(torch.cfloat, torch.cdouble)), # AssertionError: Tensor-likes are not close! # Greatest absolute difference: nan at index (700,) (up to 0.01 allowed) # Greatest relative difference: nan at index (700,) (up to 0.001 allowed) DecorateInfo(unittest.expectedFailure, 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.chalf,)), ), ), ElementwiseUnaryPythonRefInfo( "_refs.sigmoid", torch_opinfo_name="sigmoid", aliases=('_refs.special.expit',), # Reference: https://github.com/pytorch/pytorch/issues/56012 handles_complex_extremal_values=False, handles_large_floats=False, decorators=(precisionOverride({torch.float16: 1e-2, torch.complex64: 1e-1, torch.bfloat16: 1e-2}),), skips=( # Reference: https://github.com/pytorch/pytorch/issues/56012 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.complex64, torch.cdouble], device_type='cuda'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.chalf, torch.complex64, torch.cdouble], device_type='cuda') ), ), ElementwiseUnaryPythonRefInfo( "_refs.sign", torch_opinfo_name="sign", skips=( # Reference: https://github.com/pytorch/pytorch/issues/41245 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]), ), ), ElementwiseUnaryPythonRefInfo( "_refs.sgn", torch_opinfo_name="sgn", # This is an issue with the vectorised abs on CPU handles_complex_extremal_values=False, handles_large_floats=False, skips=( # Reference: https://github.com/pytorch/pytorch/issues/41245 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=[torch.bfloat16, torch.float16, torch.float32, torch.float64]), ), ), ElementwiseUnaryPythonRefInfo( "_refs.signbit", torch_opinfo_name="signbit", ), ElementwiseUnaryPythonRefInfo( "_refs.sin", torch_opinfo_name="sin", decorators=(precisionOverride({torch.bfloat16: 1e-2}),), skips=( # Fails on CUDA but passes on ROCm DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.cdouble,), device_type='cuda'), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.cfloat, torch.cdouble,), device_type='cpu', active_if=IS_WINDOWS), ), ), ElementwiseUnaryPythonRefInfo( "_refs.sinc", torch_opinfo_name="sinc", decorators=(precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2}),), skips=( # Reference: https://github.com/pytorch/pytorch/issues/49133 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_small', dtypes=[torch.cfloat]), ), ), ElementwiseUnaryPythonRefInfo( "_refs.sinh", torch_opinfo_name="sinh", decorators=(precisionOverride({torch.float16: 1e-2}),), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.cdouble,)), # Reference: https://github.com/pytorch/pytorch/issues/48641 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.int8]), ), ), PythonRefInfo( "_refs.softmax", torch_opinfo_name="softmax", torch_opinfo_variant_name="with_dtype", ), ElementwiseUnaryPythonRefInfo( "_refs.sqrt", torch_opinfo_name="sqrt", decorators=( precisionOverride({torch.bfloat16: 7e-2}), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), 'TestUnaryUfuncs', 'test_reference_numerics_large'), ), skips=( # Reference: https://github.com/pytorch/pytorch/issues/47358 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=(torch.cfloat, torch.cdouble), active_if=IS_MACOS), # Reference: https://github.com/pytorch/pytorch/pull/47293#issuecomment-721774436 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=(torch.bfloat16,)), ), ), ElementwiseUnaryPythonRefInfo( "_refs.square", torch_opinfo_name="square", decorators=(precisionOverride({torch.complex64: 3e-4, torch.bfloat16: 3e-1}),), skips=( # AssertionError: Reference result was farther (2.2417024338305655e-07) from the precise computation DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_executor', dtypes=(torch.complex64,)), # Reference: https://github.com/pytorch/pytorch/issues/52549 DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda', dtypes=[torch.cfloat, torch.cdouble]), ), ), ElementwiseUnaryPythonRefInfo( "_refs.tan", torch_opinfo_name="tan", decorators=[ DecorateInfo( toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=1e-05)}), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'), ], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), ) ), ElementwiseUnaryPythonRefInfo( "_refs.tanh", torch_opinfo_name="tanh", decorators=[ DecorateInfo( toleranceOverride({torch.complex64: tol(atol=1e-04, rtol=2e-05)}), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'), ], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble], active_if=(IS_MACOS or IS_WINDOWS)), ), ), ElementwiseUnaryPythonRefInfo( "_refs.trunc", torch_opinfo_name="trunc", # Fails on int32 # https://github.com/pytorch/pytorch/issues/85258 ), PythonRefInfo( "_refs.special.log_softmax", torch_opinfo_name="log_softmax", # alias torch_opinfo_variant_name="with_dtype", supports_out=False, ), PythonRefInfo( "_refs.special.softmax", torch_opinfo_name="softmax", # alias torch_opinfo_variant_name="with_dtype", supports_out=False, ), # # Elementwise Unary Special OpInfos # ElementwiseUnaryPythonRefInfo( "_refs.special.logit", torch_opinfo_name="logit", ), # # Elementwise Unary nn.functional OpInfos # PythonRefInfo( "_refs.nn.functional.alpha_dropout", torch_opinfo_name="nn.functional.alpha_dropout", decorators=( DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestCommon', 'test_python_ref'), # AssertionError: Tensor-likes are not close! DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestCommon', 'test_python_ref_torch_fallback'), DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestCommon', 'test_python_ref_executor', device_type='cuda'), # AssertionError: Tensor-likes are not close! DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestMathBits', 'test_neg_view'), # AssertionError: Tensor-likes are not close! DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestCommon', 'test_compare_cpu'), ) ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.celu", torch_opinfo_name="nn.functional.celu", supports_out=True, ), PythonRefInfo( "_refs.nn.functional.channel_shuffle", torch_opinfo_name="nn.functional.channel_shuffle", supports_out=True, ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.threshold", torch_opinfo_name="nn.functional.threshold", supports_out=True, ), PythonRefInfo( "_refs.nn.functional.dropout", torch_opinfo_name="nn.functional.dropout", decorators=( DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestCommon', 'test_python_ref'), DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestCommon', 'test_python_ref_torch_fallback'), DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestMathBits', 'test_neg_conj_view'), DecorateInfo(unittest.skip("Expected: dropout is not comparable"), 'TestMathBits', 'test_neg_view'), # dropout is not comparable DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), ) ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.elu", torch_opinfo_name="nn.functional.elu", supports_out=True, decorators=[ DecorateInfo( toleranceOverride({ torch.float16: tol(atol=1e-03, rtol=1.2e-03), torch.bfloat16: tol(atol=1e-03, rtol=1.2e-03) }), 'TestUnaryUfuncs', device_type='cuda', ), ], ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.hardtanh", torch_opinfo_name="nn.functional.hardtanh", supports_out=True, ), PythonRefInfo( # TODO: Port this to an UnaryOpInfo "_refs.nn.functional.gelu", torch_opinfo_name="nn.functional.gelu", ), PythonRefInfo( "_refs.nn.functional.layer_norm", torch_opinfo_name="nn.functional.layer_norm", skips=( # Reference result was farther (3.5762786809723224e-07) from the precise computation # than the torch result was (2.5068410824946596e-07)! DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', dtypes=(torch.float32,), device_type='cpu'), ), ), PythonRefInfo( "_refs.nn.functional.glu", torch_opinfo_name="nn.functional.glu", supports_out=True, ), PythonRefInfo( "_refs.nn.functional.pairwise_distance", torch_opinfo_name="nn.functional.pairwise_distance", supports_out=True, ), PythonRefInfo( "_refs.nn.functional.pdist", torch_opinfo_name="nn.functional.pdist", supports_out=True, skips=( # RunTimeError: no _refs support for torch.Tensor.index_select DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), # Reference result was farther (1.946091651916504e-05) from the precise # computation than the torch result was (1.1920928955078125e-06)! DecorateInfo( unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', dtypes=(torch.float32,), device_type='cpu', ), )), PythonRefInfo( "_refs.nn.functional.leaky_relu", torch_opinfo_name="nn.functional.leaky_relu", supports_out=True, ), PythonRefInfo( "_refs.nn.functional.log_softmax", torch_opinfo_name="log_softmax", # alias torch_opinfo_variant_name="with_dtype", supports_out=False, ), PythonRefInfo( "_refs.nn.functional.pixel_shuffle", torch_opinfo_name="nn.functional.pixel_shuffle", ), PythonRefInfo( "_refs.nn.functional.pixel_unshuffle", torch_opinfo_name="nn.functional.pixel_unshuffle", ), PythonRefInfo( "_refs.nn.functional.poisson_nll_loss", torch_opinfo_name="nn.functional.poisson_nll_loss", ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.prelu", torch_opinfo_name="nn.functional.prelu", ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.relu", torch_opinfo_name="nn.functional.relu", supports_out=True, ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.relu6", torch_opinfo_name="nn.functional.relu6", supports_out=True, ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.mish", torch_opinfo_name="nn.functional.mish", supports_out=True, decorators=[ DecorateInfo( toleranceOverride({torch.float16: tol(atol=1e-02, rtol=1e-03)}), 'TestUnaryUfuncs',), ], ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.selu", torch_opinfo_name="nn.functional.selu", supports_out=True, decorators=[ DecorateInfo( toleranceOverride({ torch.float16: tol(atol=1e-2, rtol=1.8e-2), torch.bfloat16: tol(atol=1e-2, rtol=1.8e-2) }), 'TestUnaryUfuncs', device_type='cuda', ), ], ), PythonRefInfo( "_refs.nn.functional.softmax", torch_opinfo_name="softmax", # alias torch_opinfo_variant_name="with_dtype", supports_out=False, ), PythonRefInfo( "_refs.nn.functional.softmin", torch_opinfo_name="nn.functional.softmin", torch_opinfo_variant_name="with_dtype", supports_out=False, ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.softplus", torch_opinfo_name="nn.functional.softplus", ), PythonRefInfo( "_refs.nn.functional.l1_loss", torch_opinfo_name="nn.functional.l1_loss", ), PythonRefInfo( "_refs.nn.functional.margin_ranking_loss", torch_opinfo_name="nn.functional.margin_ranking_loss", ), PythonRefInfo( "_refs.nn.functional.mse_loss", torch_opinfo_name="nn.functional.mse_loss", ), PythonRefInfo( "_refs.nn.functional.smooth_l1_loss", torch_opinfo_name="nn.functional.smooth_l1_loss", ), PythonRefInfo( "_refs.nn.functional.hinge_embedding_loss", torch_opinfo_name="nn.functional.hinge_embedding_loss" ), PythonRefInfo( "_refs.nn.functional.nll_loss", torch_opinfo_name="nn.functional.nll_loss", # The corresponding PyTorch op doesn't support out. But the ref is # registered as a decomp and ATen has an out variant. supports_out=True, # For simpler indexing, we flatten target indices, then reshape the result tensor. # This creates inconsistent view state with reference impl. validate_view_consistency=False, skips=( # RuntimeError: It appears that you're trying to get value out of a tracing tensor - erroring out! DecorateInfo( unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', device_type="cuda" ), ), ), PythonRefInfo( "_refs.nn.functional.huber_loss", torch_opinfo_name="nn.functional.huber_loss", # The corresponding PyTorch op doesn't support out. But the ref is # registered as a decomp and ATen has an out variant. supports_out=True, ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.tanhshrink", torch_opinfo_name="nn.functional.tanhshrink", decorators=[ DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_normal', device_type='cpu', dtypes=[torch.cfloat, torch.cdouble]), DecorateInfo( toleranceOverride({torch.bfloat16: tol(atol=1e-02, rtol=1.6e-02), torch.complex64: tol(atol=6e-04, rtol=1e-05)}), 'TestUnaryUfuncs', 'test_reference_numerics_extremal', device_type='cuda'), ], skips=( # in each case, pytorch will produce a nan while numpy will not DecorateInfo(unittest.skip("Fails on some jobs works on others!"), 'TestUnaryUfuncs', "test_reference_numerics_large", dtypes=(torch.complex64, torch.complex128), active_if=(IS_MACOS)), DecorateInfo(unittest.skip("Fails on some jobs works on others!"), 'TestUnaryUfuncs', "test_reference_numerics_extremal", dtypes=(torch.complex64, torch.complex128), device_type='cpu', active_if=(IS_MACOS or IS_WINDOWS)), ), ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.hardshrink", torch_opinfo_name="nn.functional.hardshrink", ), ElementwiseUnaryPythonRefInfo( "_refs.nn.functional.softshrink", torch_opinfo_name="nn.functional.softshrink", ), # # Elementwise Binary Reference OpInfos # ElementwiseBinaryPythonRefInfo( "_refs.add", torch_opinfo_name="add", # https://github.com/pytorch/pytorch/issues/76944 supports_two_python_scalars=True, supports_one_python_scalar=True, decorators=( DecorateInfo( toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), 'TestBinaryUfuncs', 'test_reference_numerics'), ), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', dtypes=(torch.complex64, torch.complex128)), ), ), ElementwiseBinaryPythonRefInfo( "_refs.atan2", torch_opinfo_name="atan2", ), ElementwiseBinaryPythonRefInfo( "_refs.bitwise_and", torch_opinfo_name="bitwise_and", ), ElementwiseBinaryPythonRefInfo( "_refs.bitwise_left_shift", torch_opinfo_name="bitwise_left_shift", skips=( # https://github.com/pytorch/pytorch/issues/70904 DecorateInfo(unittest.skip("Some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.bitwise_right_shift", torch_opinfo_name="bitwise_right_shift", skips=( # # https://github.com/pytorch/pytorch/issues/70904 DecorateInfo(unittest.skip("Skipped some inputs produce undefined outputs"), 'TestCommon', 'test_compare_cpu'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.bitwise_or", torch_opinfo_name="bitwise_or", ), ElementwiseBinaryPythonRefInfo( "_refs.bitwise_xor", torch_opinfo_name="bitwise_xor", ), ElementwiseBinaryPythonRefInfo( "_refs.copysign", torch_opinfo_name="copysign", skips=( # RuntimeError: Expected divisor (b) to be on the same device (cuda:0) as dividend (a), but it is found on cpu! DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_type_promotion'), # FIXME output 0: meta disagrees with real impl DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), ) ), ElementwiseBinaryPythonRefInfo( "_refs.div", torch_opinfo_name="div", torch_opinfo_variant_name="no_rounding_mode", # https://github.com/pytorch/pytorch/issues/76944 supports_two_python_scalars=True, supports_one_python_scalar=True, skips=( # NotImplementedError: argument of type: <class 'complex'> DecorateInfo( unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_executor', dtypes=(torch.complex32, torch.complex64, torch.complex128,) ), # Reference result was farther (0.7433461727239705) from the precise # computation than the torch result was (nan)! DecorateInfo( unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=(torch.complex32,), device_type="cuda" ), # Reference result was farther (0.7433461727239705) from the precise # computation than the torch result was (nan)! DecorateInfo( unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', dtypes=(torch.complex32,), device_type="cuda" ), ), ), ElementwiseBinaryPythonRefInfo( "_refs.div", torch_opinfo_name="div", torch_opinfo_variant_name="trunc_rounding", # https://github.com/pytorch/pytorch/issues/76944 supports_two_python_scalars=True, supports_one_python_scalar=True, decorators=( # See https://github.com/pytorch/pytorch/issues/111126 DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.div", torch_opinfo_name="div", torch_opinfo_variant_name="floor_rounding", # https://github.com/pytorch/pytorch/issues/76944 supports_two_python_scalars=True, supports_one_python_scalar=True, decorators=( # See https://github.com/pytorch/pytorch/issues/111126 DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), # Reference result was farther (nan) from the precise computation than the # torch result was (inf)! DecorateInfo( unittest.expectedFailure, "TestCommon", "test_python_ref", dtypes=(torch.bfloat16,), device_type="cpu", ), ), ), ElementwiseBinaryPythonRefInfo( "_refs.eq", torch_opinfo_name="eq", ), ElementwiseBinaryPythonRefInfo( "_refs.float_power", torch_opinfo_name="float_power", skips=( # Test doesn't account for float -> double type promotion DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), # Complex values error with: Greatest absolute difference: nan at index DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=[torch.complex64, torch.complex128]), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_large_values', dtypes=[torch.complex64, torch.complex128]), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', dtypes=[torch.complex64, torch.complex128]), ), ), ElementwiseBinaryPythonRefInfo( "_refs.logaddexp", torch_opinfo_name="logaddexp", skips=( # failure due to mismatch in edge cases, which boils down to what torch.exp(inf + infj) should be DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', device_type='cpu', dtypes=(torch.complex64, torch.complex128)), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', device_type='cpu', dtypes=(torch.complex64, torch.complex128)), ), ), PythonRefInfo( "_refs.logaddexp2", torch_opinfo_name="logaddexp2", ), ElementwiseBinaryPythonRefInfo( "_refs.floor_divide", torch_opinfo_name="floor_divide", rhs_make_tensor_kwargs=dict(exclude_zero=True), # https://github.com/pytorch/pytorch/issues/76944 supports_two_python_scalars=True, supports_one_python_scalar=True, # bfloat16 floor_divide compared with a float32 reference works inconsistently skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', dtypes=(torch.bfloat16,)), # bfloat16 floor_divide compared with a float32 reference works inconsistently DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', dtypes=(torch.bfloat16,)), # int8 floor divide has different results for -128 // -1 vs. NumPy DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.int8,)), # The following tests fails on some jobs DecorateInfo(unittest.skip('Skipped!'), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', dtypes=(torch.float16,)), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-3, rtol=5e-3)}), 'TestBinaryUfuncs', 'test_reference_numerics'), # FIXME output 0: meta disagrees with real impl DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.fmax", torch_opinfo_name="fmax", supports_rhs_python_scalar=False, ), ElementwiseBinaryPythonRefInfo( "_refs.fmin", torch_opinfo_name="fmin", supports_rhs_python_scalar=False, ), ElementwiseBinaryPythonRefInfo( "_refs.fmod", torch_opinfo_name="fmod", rhs_make_tensor_kwargs={'exclude_zero': True}, supports_rhs_python_scalar=True, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', dtypes=(torch.bfloat16,), device_type='cpu'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', dtypes=(torch.bfloat16,), device_type='cpu'), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_contig_vs_every_other', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_non_contig', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.uint8,)), ), ), ElementwiseBinaryPythonRefInfo( "_refs.gcd", torch_opinfo_name="gcd", skips=( DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.int8,)), ), ), ElementwiseBinaryPythonRefInfo( "_refs.ge", torch_opinfo_name="ge", ), ElementwiseBinaryPythonRefInfo( "_refs.gt", torch_opinfo_name="gt", ), ElementwiseBinaryPythonRefInfo( "_refs.heaviside", torch_opinfo_name="heaviside", supports_rhs_python_scalar=False, skips=( # PyTorch's heaviside does not appear to propagate NaNs DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.hypot", torch_opinfo_name="hypot", supports_rhs_python_scalar=False, ), ElementwiseBinaryPythonRefInfo( "_refs.igamma", torch_opinfo_name="igamma", ), ElementwiseBinaryPythonRefInfo( "_refs.igammac", torch_opinfo_name="igammac", ), ElementwiseBinaryPythonRefInfo( "_refs.isclose", torch_opinfo_name="isclose", skips=( # Intentional xfail -- isclose does not type promote DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.lcm", torch_opinfo_name="lcm", ), ElementwiseBinaryPythonRefInfo( "_refs.le", torch_opinfo_name="le", ), ElementwiseBinaryPythonRefInfo( "_refs.logical_and", torch_opinfo_name="logical_and", ), ElementwiseUnaryPythonRefInfo( "_refs.logical_not", torch_opinfo_name="logical_not", ), ElementwiseBinaryPythonRefInfo( "_refs.logical_or", torch_opinfo_name="logical_or", ), ElementwiseBinaryPythonRefInfo( "_refs.logical_xor", torch_opinfo_name="logical_xor", ), ElementwiseBinaryPythonRefInfo( "_refs.lt", torch_opinfo_name="lt", ), ElementwiseBinaryPythonRefInfo( "_refs.maximum", torch_opinfo_name="maximum", skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.minimum", torch_opinfo_name="minimum", skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.mul", torch_opinfo_name="mul", # https://github.com/pytorch/pytorch/issues/76944 supports_two_python_scalars=True, supports_one_python_scalar=True, skips=( # Reference result was farther (0.0) from the precise computation # than the torch result was (nan)! DecorateInfo( unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', dtypes=(torch.complex32,), ), # Reference result was farther (0.0) from the precise computation # than the torch result was (nan)! DecorateInfo( unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=(torch.complex32,), device_type='cuda' ), # Reference result was farther (0.0) from the precise computation # than the torch result was (nan)! DecorateInfo( unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', dtypes=(torch.complex32,), device_type='cuda' ), ) ), ElementwiseBinaryPythonRefInfo( "_refs.ne", torch_opinfo_name="ne", ), ElementwiseBinaryPythonRefInfo( "_refs.nextafter", torch_opinfo_name="nextafter", ), ElementwiseBinaryPythonRefInfo( "_refs.pow", torch_opinfo_name="pow", decorators=( DecorateInfo( toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05)}), 'TestBinaryUfuncs', 'test_reference_numerics'), DecorateInfo( toleranceOverride({torch.complex64: tol(atol=1e-4, rtol=1.3e-05), torch.complex128: tol(atol=1e-4, rtol=1.3e-05)}), 'TestBinaryUfuncs', 'test_scalar_support'), ), skips=( # Reference result was farther (inf) from the precise # computation than the torch result was (nan)! DecorateInfo( unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', dtypes=(torch.complex32,), ), # Reference result was farther (inf) from the precise # computation than the torch result was (nan)! DecorateInfo( unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=(torch.complex32,), device_type="cuda" ), # Reference result was farther (inf) from the precise # computation than the torch result was (nan)! DecorateInfo( unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', dtypes=(torch.complex32,), device_type="cuda" ), # Skipping integers because they are being raised to negative powers causing an error DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=[torch.int8, torch.int16, torch.int32, torch.int64]), DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_reference_numerics_large_values', dtypes=[torch.int16, torch.int32, torch.int64]), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics', dtypes=(torch.complex32,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.complex32, torch.complex64, torch.complex128)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_large_values', dtypes=(torch.complex32, torch.complex64, torch.complex128)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_extremal_values', dtypes=(torch.complex32, torch.complex64, torch.complex128)), ), ), ElementwiseBinaryPythonRefInfo( "_refs.remainder", torch_opinfo_name="remainder", skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', dtypes=(torch.bfloat16,), device_type='cpu'), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', dtypes=(torch.bfloat16,), device_type='cpu'), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics', dtypes=(torch.bfloat16,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.uint8,)), ), ), ElementwiseBinaryPythonRefInfo( "_refs.rsub", torch_opinfo_name="rsub", # https://github.com/pytorch/pytorch/issues/76944 skips=( # Reference result was farther (nan) from the precise computation than # the torch result was (nan)! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=(torch.chalf,), device_type='cpu'), # Reference result was farther (nan) from the precise computation than # the torch result was (nan)! DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', dtypes=(torch.chalf,), device_type='cpu'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.sub", torch_opinfo_name="sub", # https://github.com/pytorch/pytorch/issues/76944 supports_two_python_scalars=True, supports_one_python_scalar=True, decorators=( DecorateInfo( toleranceOverride({torch.float16: tol(atol=1e-2, rtol=0), torch.bfloat16: tol(atol=1e-5, rtol=5e-3), torch.complex32: tol(atol=1e-5, rtol=1e-3)}), 'TestBinaryUfuncs', 'test_reference_numerics'), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=1e-2, rtol=0)}), 'TestCommon', 'test_complex_half_reference_testing', device_type='cpu'), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), 'TestDecomp', 'test_comprehensive', device_type='cpu'), DecorateInfo( toleranceOverride({torch.chalf: tol(atol=5e-3, rtol=0)}), 'TestDecomp', 'test_quick', device_type='cpu'), ), skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics', dtypes=(torch.uint8,)), DecorateInfo(unittest.skip("Skipped!"), 'TestBinaryUfuncs', 'test_reference_numerics_small_values', dtypes=(torch.uint8,)), ), ), ElementwiseBinaryPythonRefInfo( "_refs.true_divide", torch_opinfo_name="true_divide", # https://github.com/pytorch/pytorch/issues/76944 supports_two_python_scalars=True, supports_one_python_scalar=True, skips=( # Reference result was farther (0.7433461727239705) from the precise # computation than the torch result was (nan)! DecorateInfo( unittest.expectedFailure, 'TestCommon', 'test_python_ref_executor', dtypes=(torch.complex32,), ), # Reference result was farther (0.7433461727239705) from the precise # computation than the torch result was (nan)! DecorateInfo( unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=(torch.complex32,), device_type="cuda" ), # Reference result was farther (0.7433461727239705) from the precise # computation than the torch result was (nan)! DecorateInfo( unittest.expectedFailure, 'TestCommon', 'test_python_ref_torch_fallback', dtypes=(torch.complex32,), device_type="cuda" ), ), ), # # Elementwise Ternary Reference OpInfos # PythonRefInfo( "_refs.addcdiv", torch_opinfo_name="addcdiv", ), PythonRefInfo( "_refs.addcmul", torch_opinfo_name="addcmul", skips=( # Reference result was farther (1.3343989849090576e-05) # from the precise computation than the torch result # was (9.592622518539429e-06)! # FIXME: enable dtype-based tolerances in test_ops.py:TestCommon._ref_test_helper DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', dtypes=(torch.float16,), device_type="cpu"), DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref_torch_fallback', dtypes=(torch.float16,), device_type="cpu"), ), ), ElementwiseBinaryPythonRefInfo( "_refs.clamp_min", torch_opinfo_name="clamp_min", skips=( # test error disabled since rhs non-tensor python scalar is supported DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), ), ), ElementwiseBinaryPythonRefInfo( "_refs.clamp_max", torch_opinfo_name="clamp_max", skips=( # test error disabled since rhs non-tensor python scalar is supported DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), ), ), PythonRefInfo( "_refs.clamp", torch_opinfo_name="clamp", ), PythonRefInfo( "_refs.nn.functional.triplet_margin_loss", torch_opinfo_name="nn.functional.triplet_margin_loss", supports_out=False, # TODO: Uses minimum and clamp skips=( # AssertionError: Tensor-likes are not close! # Greatest absolute difference: 6.103515625e-05 at index (4,) (up to 1e-05 allowed) # Greatest relative difference: 8.519846983548175e-06 at index (4,) (up to 1.3e-06 allowed) DecorateInfo(unittest.skip("Skipped!"), 'TestCommon', 'test_python_ref', dtypes=(torch.uint8,), device_type="cpu"), ) ), ElementwiseBinaryPythonRefInfo( "_refs.xlogy", torch_opinfo_name="xlogy", supports_one_python_scalar=True, ), # # Elementwise Binary Special OpInfos # ElementwiseBinaryPythonRefInfo( "_refs.special.xlog1py", torch_opinfo_name="special.xlog1py", supports_one_python_scalar=True, ), # # Data Conversion & Data Movement Opinfos # ElementwiseUnaryPythonRefInfo( "_refs._conversions.bfloat16", torch_opinfo_name="bfloat16", # TODO: If self already has the correct dtype and device, then self is # returned ignoring memory_format. # https://github.com/pytorch/pytorch/issues/86558 validate_view_consistency=False, ), ElementwiseUnaryPythonRefInfo( "_refs._conversions.bool", torch_opinfo_name="bool", # TODO: If self already has the correct dtype and device, then self is # returned ignoring memory_format. # https://github.com/pytorch/pytorch/issues/86558 validate_view_consistency=False, ), ElementwiseUnaryPythonRefInfo( "_refs._conversions.byte", torch_opinfo_name="byte", # TODO: If self already has the correct dtype and device, then self is # returned ignoring memory_format. # https://github.com/pytorch/pytorch/issues/86558 validate_view_consistency=False, skips=( DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), ) ), ElementwiseUnaryPythonRefInfo( "_refs._conversions.char", torch_opinfo_name="char", # TODO: If self already has the correct dtype and device, then self is # returned ignoring memory_format. # https://github.com/pytorch/pytorch/issues/86558 validate_view_consistency=False, skips=( DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), ) ), ElementwiseBinaryPythonRefInfo( "_refs._conversions.complex", torch_opinfo_name="complex", error_inputs_func=partial(error_inputs_complex, is_ref=True), skips=( # Tests don't account for complex's type promotion semantics DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), ) ), ElementwiseBinaryPythonRefInfo( "_refs._conversions.polar", torch_opinfo_name="polar", skips=( # Tests don't account for complex's type promotion semantics DecorateInfo(unittest.expectedFailure, 'TestBinaryUfuncs', 'test_type_promotion'), DecorateInfo(unittest.expectedFailure, 'TestMeta', 'test_binary_ufuncs_mixed_dtype'), ) ), ElementwiseUnaryPythonRefInfo( "_refs._conversions.double", torch_opinfo_name="double", # TODO: If self already has the correct dtype and device, then self is # returned ignoring memory_format. # https://github.com/pytorch/pytorch/issues/86558 validate_view_consistency=False, ), ElementwiseUnaryPythonRefInfo( "_refs._conversions.float", torch_opinfo_name="float", # TODO: If self already has the correct dtype and device, then self is # returned ignoring memory_format. # https://github.com/pytorch/pytorch/issues/86558 validate_view_consistency=False, ), ElementwiseUnaryPythonRefInfo( "_refs._conversions.half", torch_opinfo_name="half", # TODO: If self already has the correct dtype and device, then self is # returned ignoring memory_format. # https://github.com/pytorch/pytorch/issues/86558 validate_view_consistency=False, ), ElementwiseUnaryPythonRefInfo( "_refs._conversions.int", torch_opinfo_name="int", # TODO: If self already has the correct dtype and device, then self is # returned ignoring memory_format. # https://github.com/pytorch/pytorch/issues/86558 validate_view_consistency=False, skips=( DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), ) ), ElementwiseUnaryPythonRefInfo( "_refs._conversions.long", torch_opinfo_name="long", # TODO: If self already has the correct dtype and device, then self is # returned ignoring memory_format. # https://github.com/pytorch/pytorch/issues/86558 validate_view_consistency=False, skips=( DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), ) ), ElementwiseUnaryPythonRefInfo( "_refs._conversions.short", torch_opinfo_name="short", # TODO: If self already has the correct dtype and device, then self is # returned ignoring memory_format. # https://github.com/pytorch/pytorch/issues/86558 validate_view_consistency=False, skips=( DecorateInfo(unittest.skip('Overflow when downcasting signed type is undefined'), 'TestCommon', 'test_compare_cpu'), ) ), ElementwiseUnaryPythonRefInfo( "_refs._conversions.chalf", torch_opinfo_name="chalf", # TODO: If self already has the correct dtype and device, then self is # returned ignoring memory_format. # https://github.com/pytorch/pytorch/issues/86558 validate_view_consistency=False, ), ElementwiseUnaryPythonRefInfo( "_refs._conversions.cfloat", torch_opinfo_name="cfloat", # TODO: If self already has the correct dtype and device, then self is # returned ignoring memory_format. # https://github.com/pytorch/pytorch/issues/86558 validate_view_consistency=False, ), ElementwiseUnaryPythonRefInfo( "_refs._conversions.cdouble", torch_opinfo_name="cdouble", # TODO: If self already has the correct dtype and device, then self is # returned ignoring memory_format. # https://github.com/pytorch/pytorch/issues/86558 validate_view_consistency=False, ), PythonRefInfo( "_refs.clone", torch_opinfo_name="clone", ), # # View & Shape OpInfos # PythonRefInfo( "_refs.alias_copy", torch_opinfo_name="alias_copy", supports_out=True, ), PythonRefInfo( "_refs.atleast_1d", torch_opinfo_name="atleast_1d", validate_view_consistency=False, ), PythonRefInfo( "_refs.atleast_2d", torch_opinfo_name="atleast_2d", validate_view_consistency=False, ), PythonRefInfo( "_refs.atleast_3d", torch_opinfo_name="atleast_3d", validate_view_consistency=False, ), PythonRefInfo( "_refs.as_strided", torch_opinfo_name="as_strided", # FIXME: doesn't support chalf dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), skips=( # cloned_mutable_input.is_same(returned_output) INTERNAL ASSERT FAILED DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_conj_view'), ), ), PythonRefInfo( "_refs.as_strided_copy", torch_opinfo_name="as_strided_copy", supports_out=True, # FIXME: doesn't support chalf dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), skips=( # cloned_mutable_input.is_same(returned_output) INTERNAL ASSERT FAILED DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_conj_view'), # The view function this decompose into does not have a ref DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref"), ), ), PythonRefInfo( "_refs.as_strided", torch_opinfo_name="as_strided", torch_opinfo_variant_name="partial_views", # FIXME: doesn't support chalf dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16), skips=( # cloned_mutable_input.is_same(returned_output) INTERNAL ASSERT FAILED DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Errors when storage_offset is included"), 'TestMathBits', 'test_neg_conj_view'), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_compare_cpu'), ), ), PythonRefInfo( "_refs.as_strided_scatter", torch_opinfo_name="as_strided_scatter", # returns a view of an intermediate tensor (as_strided) validate_view_consistency=False, ), PythonRefInfo( "_refs.block_diag", torch_opinfo_name="block_diag", ), PythonRefInfo( "_refs.broadcast_shapes", torch_opinfo_name="broadcast_shapes", ), PythonRefInfo( "_refs.broadcast_tensors", torch_opinfo_name="broadcast_tensors", ), PythonRefInfo( "_refs.broadcast_to", torch_opinfo_name="broadcast_to", ), PythonRefInfo( "_refs.cat", torch_opinfo_name="cat", skips=( # FIXME: AssertionError: RuntimeError not raised DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), ), ), PythonRefInfo( "_refs.chunk", torch_opinfo_name="chunk", ), PythonRefInfo( "_refs.column_stack", torch_opinfo_name="column_stack", ), ElementwiseUnaryPythonRefInfo( "_refs.conj", torch_opinfo_name="conj", ), PythonRefInfo( "_refs.constant_pad_nd", torch_opinfo_name="constant_pad_nd", ), PythonRefInfo( "_refs.contiguous", torch_opinfo_name="contiguous", ), ElementwiseUnaryPythonRefInfo( "_refs.deg2rad", torch_opinfo_name="deg2rad", decorators=(precisionOverride({torch.bfloat16: 7e-1, torch.float16: 7e-1}),), ), PythonRefInfo( "_refs.dsplit", torch_opinfo_name="dsplit", ), PythonRefInfo( "_refs.diag", torch_opinfo_name="diag", ), PythonRefInfo( "_refs.diagonal", torch_opinfo_name="diagonal", ), PythonRefInfo( "_refs.diagonal_copy", torch_opinfo_name="diagonal_copy", supports_out=True, ), PythonRefInfo( "_refs.diagonal_scatter", torch_opinfo_name="diagonal_scatter", supports_out=True, # returns a view of an intermediate tensor (as_strided) validate_view_consistency=False, ), PythonRefInfo( "_refs.diag_embed", torch_opinfo_name="diag_embed", supports_out=True, ), PythonRefInfo( "_refs.dstack", torch_opinfo_name="dstack", skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), ), ), PythonRefInfo( "_refs.expand", torch_opinfo_name="expand", ), PythonRefInfo( "_refs.expand_as", torch_opinfo_name="expand_as", ), PythonRefInfo( "_refs.expand_copy", torch_opinfo_name="expand_copy", supports_out=True, ), PythonRefInfo( "_refs.flatten", torch_opinfo_name="flatten", ), PythonRefInfo( "_refs.flip", torch_opinfo_name="flip", ), PythonRefInfo( "_refs.fliplr", torch_opinfo_name="fliplr", ), PythonRefInfo( "_refs.flipud", torch_opinfo_name="flipud", ), PythonRefInfo( "_refs.hstack", torch_opinfo_name="hstack", skips=( # https://github.com/pytorch/pytorch/issues/78613 DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), ), ), PythonRefInfo( "_refs.narrow", torch_opinfo_name="narrow", error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=True, is_ref=True), ), PythonRefInfo( "_refs.narrow_copy", torch_opinfo_name="narrow_copy", supports_out=True, error_inputs_func=partial(error_inputs_narrow_narrow_copy, is_narrow=False, is_ref=True), skips=( # The view function this decompose into does not have a ref DecorateInfo(unittest.expectedFailure, "TestCommon", "test_python_ref"), ), ), PythonRefInfo( "_refs.nn.functional.group_norm", torch_opinfo_name="nn.functional.group_norm", validate_view_consistency=False, ), PythonRefInfo( "_refs.native_layer_norm", torch_opinfo_name="native_layer_norm", skips=( DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_python_ref", device_type="cpu", dtypes=(torch.float32,)), DecorateInfo(unittest.skip("Skipped!"), "TestCommon", "test_python_ref_torch_fallback", device_type="cpu", dtypes=(torch.float32,)), ), ), PythonRefInfo( "_refs.permute", torch_opinfo_name="permute", ), PythonRefInfo( "_refs.permute_copy", torch_opinfo_name="permute_copy", supports_out=True, ), ElementwiseUnaryPythonRefInfo( "_refs.rad2deg", torch_opinfo_name="rad2deg", decorators=(precisionOverride({torch.bfloat16: 7e-1, torch.float16: 7e-1}),), ), PythonRefInfo( "_refs.ravel", torch_opinfo_name="ravel", ), PythonRefInfo( "_refs.renorm", torch_opinfo_name="renorm", ), PythonRefInfo( "_refs.repeat", torch_opinfo_name="repeat", validate_view_consistency=False, ), PythonRefInfo( "_refs.reshape", torch_opinfo_name="reshape", ), PythonRefInfo( "_refs.reshape_as", torch_opinfo_name="reshape_as", ), PythonRefInfo( "_refs.roll", torch_opinfo_name="roll", validate_view_consistency=False, ), PythonRefInfo( "_refs.rot90", torch_opinfo_name="rot90", validate_view_consistency=False, ), PythonRefInfo( "_refs.select_scatter", torch_opinfo_name="select_scatter", ), PythonRefInfo( "_refs.stack", torch_opinfo_name="stack", validate_view_consistency=False, ), PythonRefInfo( "_refs.squeeze", torch_opinfo_name="squeeze", ), PythonRefInfo( "_refs.squeeze_copy", torch_opinfo_name="squeeze_copy", supports_out=True, ), PythonRefInfo( "_refs.squeeze", torch_opinfo_name="squeeze", torch_opinfo_variant_name="multiple", ), PythonRefInfo( "_refs.tensor_split", torch_opinfo_name="tensor_split", skips=( # RuntimeError: no _refs support for torch.Tensor.tolist DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), ), ), PythonRefInfo( "_refs.hsplit", torch_opinfo_name="hsplit", ), PythonRefInfo( "_refs.vsplit", torch_opinfo_name="vsplit", ), PythonRefInfo( "_refs.dot", torch_opinfo_name="dot", error_inputs_func=partial(error_inputs_dot_vdot, is_ref=True), # .conj() does not set ._is_view() correctly in ATen validate_view_consistency=False, skips=( # RuntimeError: no _refs support for torch.Tensor.is_conj DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=[torch.complex64, torch.complex128]), ), ), PythonRefInfo( "_refs.vdot", torch_opinfo_name="vdot", error_inputs_func=partial(error_inputs_dot_vdot, is_ref=True), # .conj() does not set ._is_view() correctly in ATen validate_view_consistency=False, skips=( # RuntimeError: no _refs support for torch.Tensor.is_conj DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref', dtypes=[torch.complex64, torch.complex128]), ), ), PythonRefInfo( "_refs.transpose", torch_opinfo_name="transpose", ), PythonRefInfo( "_refs.transpose_copy", torch_opinfo_name="transpose_copy", supports_out=True, ), PythonRefInfo( "_refs.t", torch_opinfo_name="t", ), PythonRefInfo( "_refs.t_copy", torch_opinfo_name="t_copy", supports_out=True, ), PythonRefInfo( "_refs.T", torch_opinfo_name="T", error_inputs_func=partial(error_inputs_T, has_ndims_error=True), ), PythonRefInfo( "_refs.unbind_copy", torch_opinfo_name="unbind_copy", ), PythonRefInfo( "_refs.unfold", torch_opinfo_name="unfold", ), PythonRefInfo( "_refs.unfold_copy", torch_opinfo_name="unfold_copy", supports_out=True, ), PythonRefInfo( "_refs.unsqueeze", torch_opinfo_name="unsqueeze", ), PythonRefInfo( "_refs.unsqueeze_copy", torch_opinfo_name="unsqueeze_copy", supports_out=True, ), PythonRefInfo( "_refs.view", torch_opinfo_name="view", ), PythonRefInfo( "_refs.view_as", torch_opinfo_name="view_as", ), PythonRefInfo( "_refs.view_copy", torch_opinfo_name="view_copy", supports_out=True, ), PythonRefInfo( "_refs.vstack", torch_opinfo_name="vstack", skips=( # https://github.com/pytorch/pytorch/issues/78613 DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), ), ), PythonRefInfo( "_refs.unflatten", torch_opinfo_name="unflatten", ), PythonRefInfo( "_refs.unbind", torch_opinfo_name="unbind", ), # # Reduction Reference OpInfos # ReductionPythonRefInfo( "_refs.all", torch_opinfo_name="all", skips=( # FIXME: uint8 input returns uint8 instead of bool DecorateInfo( unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]), ), ), ReductionPythonRefInfo( "_refs.amax", torch_opinfo_name="amax", error_inputs_func=partial(error_inputs_aminmax_amax_amin, is_ref=True), skips=( # FIXME: reduces all dimensions when dim=[] DecorateInfo( unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo( unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), ), ), ReductionPythonRefInfo( "_refs.amin", torch_opinfo_name="amin", error_inputs_func=partial(error_inputs_aminmax_amax_amin, is_ref=True), skips=( # FIXME: reduces all dimensions when dim=[] DecorateInfo( unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo( unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), ), ), ReductionPythonRefInfo( "_refs.any", torch_opinfo_name="any", skips=( # FIXME: uint8 input returns uint8 instead of bool DecorateInfo( unittest.expectedFailure, 'TestReductions', 'test_result_dtype', dtypes=[torch.uint8]), ), ), ReductionPythonRefInfo( "_refs.count_nonzero", torch_opinfo_name="count_nonzero", skips=( # FIXME: count_nonzero does not accept keepdim kwarg DecorateInfo( unittest.skip("Skipped!"), 'TestReductions', 'test_dim_default_keepdim'), DecorateInfo( unittest.skip("Skipped!"), 'TestReductions', 'test_dim_none_keepdim'), DecorateInfo( unittest.skip("Skipped!"), 'TestReductions', 'test_dim_single_keepdim'), DecorateInfo( unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), DecorateInfo( unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_keepdim'), DecorateInfo( unittest.skip("Skipped!"), 'TestReductions', 'test_dim_multi_unsorted_keepdim'), # FIXME: dim=[] reduces all dimensions DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), ), ), ReductionPythonRefInfo( "_refs.mean", torch_opinfo_name="mean", supports_out=True, error_inputs_func=partial(error_inputs_mean, is_ref=True), skips=( # FIXME: reduces all dimensions when dim=[] DecorateInfo( unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo( unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), ), ), ReductionPythonRefInfo( "_refs.std", torch_opinfo_name="std", supports_out=True, skips=( # FIXME: reduces all dimensions when dim=[] DecorateInfo( unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo( unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: improve precision DecorateInfo( unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', dtypes=(torch.float16,)), DecorateInfo( unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', dtypes=(torch.float16,)), ), ), # std_mean and var_mean are not ReductionInfos PythonRefInfo( "_refs.std_mean", torch_opinfo_name="std_mean", ), ReductionPythonRefInfo( "_refs.sum", torch_opinfo_name="sum", supports_out=True, skips=( # FIXME: doesn't test out behavior properly for this operator DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # FIXME: mean reduces all dimensions when dim=[] DecorateInfo(unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty'), DecorateInfo( unittest.skip("Skipped!"), 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: improve precision DecorateInfo( unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', dtypes=[torch.float16]), DecorateInfo( unittest.skip("Skipped!"), 'TestReductions', 'test_ref_duplicate_values', dtypes=[torch.float16]), DecorateInfo( unittest.skip("Skipped!"), 'TestOperators', 'test_reduction_all', dtypes=[torch.float32]), ), ), PythonRefInfo( "_refs.cumsum", torch_opinfo_name="cumsum", supports_out=True, skips=( # doesn't test out behavior properly for this operator DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), ), ), PythonRefInfo( "_refs.cumprod", torch_opinfo_name="cumprod", supports_out=True, skips=( # doesn't test out behavior properly for this operator DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), ), ), PythonRefInfo( "_refs.sum_to_size", torch_opinfo_name="sum_to_size", validate_view_consistency=False, ), ReductionPythonRefInfo( "_refs.prod", torch_opinfo_name="prod", supports_out=True, supports_multiple_dims=True, skips=( # FIXME: doesn't test out behavior properly for this operator DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_out'), # FIXME: reduces all dimensions when dim=[] DecorateInfo( unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo( unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: improve precision DecorateInfo( unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input', dtypes=[torch.float16, torch.complex64]), ), ), ReductionPythonRefInfo( "_refs.var", torch_opinfo_name="var", supports_out=True, skips=( # FIXME: reduces all dimensions when dim=[] DecorateInfo( unittest.expectedFailure, 'TestReductions', 'test_dim_empty'), DecorateInfo( unittest.expectedFailure, 'TestReductions', 'test_dim_empty_keepdim'), # FIXME: improve precision DecorateInfo( unittest.skip("Skipped!"), 'TestReductions', 'test_ref_small_input'), ), ), PythonRefInfo( "_refs.var_mean", torch_opinfo_name="var_mean", validate_view_consistency=False, ), # # Linear Algebra Operators # PythonRefInfo( "_refs.addr", torch_opinfo_name="addr", decorators=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref',), ), ), PythonRefInfo( "_refs.trace", torch_opinfo_name="trace", ), PythonRefInfo( "_refs.norm", torch_opinfo_name="norm", supports_out=True, # Uses vector_norm inside and vector_norm is affected by # https://github.com/pytorch/pytorch/issues/77216 validate_view_consistency=False, ), # # Tensor Creation Reference OpInfos # PythonRefInfo( "_refs.empty", torch_opinfo_name="empty", skips=( DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_python_ref'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_python_ref_torch_fallback'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_neg_conj_view'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_neg_view'), # FIXME: shouldn't check empty results DecorateInfo(unittest.skip("Can't check result for empty"), 'TestCommon', 'test_python_ref_executor'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), ), ), PythonRefInfo( "_refs.empty_like", torch_opinfo_name="empty_like", skips=( DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_python_ref'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_python_ref_torch_fallback'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_neg_conj_view'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_neg_view'), # FIXME: should not compare results of empty_like DecorateInfo(unittest.skip("Can't check result for empty_like"), 'TestCommon', 'test_python_ref_executor'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), ), ), PythonRefInfo( "_refs.randn", torch_opinfo_name="randn", op=lambda *args, **kwargs: wrapper_set_seed(refs.randn, *args, **kwargs), skips=( # see https://github.com/pytorch/pytorch/issues/85121 DecorateInfo(unittest.skip("make_traced() doesn't set seed properly!"), 'TestCommon', 'test_python_ref_executor'), # These tests expect the input to be a tensor or a sequence of tensors DecorateInfo(unittest.skip("Test expects tensor input"), "TestCommon", "test_noncontiguous_samples"), DecorateInfo(unittest.skip("Test expects tensor input"), 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.skip("Test expects tensor input"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Test expects tensor input"), 'TestMathBits', 'test_neg_conj_view'), ), ), PythonRefInfo( "_refs.eye", torch_opinfo_name="eye", skips=( # skip these tests since we have non tensor input DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_conj_view'), DecorateInfo(unittest.skip("Skipped!"), 'TestMathBits', 'test_neg_view'), ), ), PythonRefInfo( "_refs.new_empty", torch_opinfo_name="new_empty", skips=( DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_python_ref'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_python_ref_torch_fallback'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_out'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestCommon', 'test_out_warning'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_neg_conj_view'), DecorateInfo(unittest.skip("Expected: empty is not comparable"), 'TestMathBits', 'test_neg_view'), # FIXME: should not compare results of empty_like DecorateInfo(unittest.skip("Can't check result for new_empty"), 'TestCommon', 'test_python_ref_executor'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), ), ), PythonRefInfo( "_refs.new_empty_strided", torch_opinfo_name="new_empty_strided", skips=( DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), 'TestCommon', 'test_python_ref'), DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), 'TestCommon', 'test_python_ref_torch_fallback'), DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), 'TestMathBits', 'test_neg_conj_view'), DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), 'TestCommon', 'test_python_ref_executor'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), ), ), PythonRefInfo( "_refs.empty_strided", torch_opinfo_name="empty_strided", skips=( DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), 'TestCommon', 'test_python_ref'), DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), 'TestCommon', 'test_python_ref_torch_fallback'), DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), 'TestMathBits', 'test_conj_view'), DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), 'TestMathBits', 'test_neg_conj_view'), DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), 'TestMathBits', 'test_neg_view'), DecorateInfo(unittest.skip("Expected: empty_strided is not comparable"), 'TestCommon', 'test_python_ref_executor'), DecorateInfo(unittest.skip('output is non-deterministic'), 'TestCommon', 'test_compare_cpu'), ), ), PythonRefInfo( "_refs.new_full", torch_opinfo_name="new_full", ), PythonRefInfo( "_refs.new_ones", torch_opinfo_name="new_ones", ), PythonRefInfo( "_refs.new_zeros", torch_opinfo_name="new_zeros", ), # # Conditional Reference OpInfos # PythonRefInfo( "_refs.masked_fill", torch_opinfo_name="masked_fill", skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), ), ), PythonRefInfo( "_refs.where", torch_opinfo_name="where", op=lambda self, condition, other: refs.where(condition, self, other), supports_out=False, skips=( DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors', device_type='cuda'), ), ), PythonRefInfo( "_refs.index_select", torch_opinfo_name="index_select", # empty_strided skips=( # no _refs support for Tensor.__setitem__ DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), # Sample out= with a stride of zero. This _out operation checks that the input has no # inner overlap DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'),) ), PythonRefInfo( "_refs.index_copy", torch_opinfo_name="index_copy", # empty_strided skips=( # no _refs support for Tensor.__setitem__ DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), ), ), PythonRefInfo( "_refs.index_add", torch_opinfo_name="index_add", # empty_strided skips=( # no _refs support for Tensor.__setitem__ DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'), DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref_errors'), ), ), PythonRefInfo( "_refs.index_fill", torch_opinfo_name="index_fill", # empty_strided skips=( # no _refs support for Tensor.__setitem__ DecorateInfo(unittest.expectedFailure, 'TestCommon', 'test_python_ref'),) ), # # Test-related functions # PythonRefInfo( "_refs.allclose", torch_opinfo_name="allclose", ), # # Misc functions # PythonRefInfo( "_refs.stft", torch_opinfo_name="stft", skips=[ # RuntimeError: no _refs support for aten.pad DecorateInfo( unittest.expectedFailure, 'TestCommon', 'test_python_ref' ), ], ), PythonRefInfo( "_refs.istft", torch_opinfo_name="istft", skips=[ # RuntimeError: no _refs support for aten.unfold_backward DecorateInfo( unittest.expectedFailure, 'TestCommon', 'test_python_ref' ), DecorateInfo( unittest.skip("Expected: unfold_backward() got an unexpected keyword argument 'input_sizes'"), 'TestCommon', 'test_python_ref_executor', dtypes=(torch.complex64, torch.complex128), ), ], ), PythonRefInfo( "_refs.view_as_complex", torch_opinfo_name="view_as_complex", ), PythonRefInfo( "_refs.split_with_sizes", torch_opinfo_name="split_with_sizes", ), ] python_ref_db += opinfo.definitions.python_ref_db # Common operator groupings ops_and_refs = op_db + python_ref_db unary_ufuncs = [op for op in ops_and_refs if isinstance(op, UnaryUfuncInfo)] binary_ufuncs = [op for op in ops_and_refs if isinstance(op, BinaryUfuncInfo)] binary_ufuncs_and_refs = tuple(op for op in ops_and_refs if isinstance(op, BinaryUfuncInfo)) spectral_funcs = [op for op in ops_and_refs if isinstance(op, SpectralFuncInfo)] sparse_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse] sparse_csr_unary_ufuncs = [op for op in op_db if isinstance(op, UnaryUfuncInfo) and op.supports_sparse_csr] sparse_reduction_ops = [op for op in op_db if isinstance(op, ReductionOpInfo) and op.supports_sparse] shape_funcs = [op for op in ops_and_refs if isinstance(op, ShapeFuncInfo)] reduction_ops = [op for op in ops_and_refs if isinstance(op, ReductionOpInfo)] reference_filtered_ops = [op for op in reduction_ops if op.ref is not None] reference_masked_ops = [op for op in reference_filtered_ops if op.name.startswith('masked.')] sparse_masked_reduction_ops = [op for op in sparse_reduction_ops if op.name.startswith('masked.')] def index_variable(shape, max_indices, device=torch.device('cpu')): if not isinstance(shape, tuple): shape = (shape,) return torch.testing.make_tensor(*shape, dtype=torch.long, device=device, low=0, high=max_indices) def gather_variable(shape, index_dim, max_indices, duplicate=False, device=torch.device('cpu')): assert len(shape) == 2 assert index_dim < 2 batch_dim = 1 - index_dim index = torch.zeros(*shape, dtype=torch.long, device=device) for i in range(shape[index_dim]): index.select(index_dim, i).copy_( torch.randperm(max_indices, device=device)[:shape[batch_dim]]) if duplicate: index.select(batch_dim, 0).copy_(index.select(batch_dim, 1)) return index def bernoulli_scalar(): return torch.tensor(0, dtype=torch.bool).bernoulli_() def mask_not_all_zeros(shape): assert len(shape) > 0 while True: result = torch.randn(shape).gt(0) if result.sum() > 0: return result # Copied from functorch def xfail(op_name, variant_name='', *, device_type=None, dtypes=None): return (op_name, variant_name, device_type, dtypes, True) def skip(op_name, variant_name='', *, device_type=None, dtypes=None): return (op_name, variant_name, device_type, dtypes, False) def skipOps(test_case_name, base_test_name, to_skip): all_opinfos = op_db for xfail in to_skip: op_name, variant_name, device_type, dtypes, expected_failure = xfail matching_opinfos = [o for o in all_opinfos if o.name == op_name and o.variant_test_name == variant_name] assert len(matching_opinfos) >= 1, f"Couldn't find OpInfo for {xfail}" for op in matching_opinfos: decorators = list(op.decorators) if expected_failure: decorator = DecorateInfo(unittest.expectedFailure, test_case_name, base_test_name, device_type=device_type, dtypes=dtypes) decorators.append(decorator) else: decorator = DecorateInfo(unittest.skip("Skipped!"), test_case_name, base_test_name, device_type=device_type, dtypes=dtypes) decorators.append(decorator) op.decorators = tuple(decorators) # This decorator doesn't modify fn in any way def wrapped(fn): return fn return wrapped ```
================================================================================================================================ SOURCE CODE FILE: common_mkldnn.py LINES: 1 SIZE: 2.34 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\common_mkldnn.py ENCODING: utf-8 ```py # mypy: ignore-errors import contextlib import functools import inspect import torch # Test whether hardware BF32 math mode enabled. It is enabled only on: # - MKLDNN is available # - BF16 is supported by MKLDNN def bf32_is_not_fp32(): if not torch.backends.mkldnn.is_available(): return False if not torch.ops.mkldnn._is_mkldnn_bf16_supported(): return False return True @contextlib.contextmanager def bf32_off(): old_matmul_precision = torch.get_float32_matmul_precision() try: torch.set_float32_matmul_precision("highest") yield finally: torch.set_float32_matmul_precision(old_matmul_precision) @contextlib.contextmanager def bf32_on(self, bf32_precision=1e-5): old_matmul_precision = torch.get_float32_matmul_precision() old_precision = self.precision try: torch.set_float32_matmul_precision("medium") self.precision = bf32_precision yield finally: torch.set_float32_matmul_precision(old_matmul_precision) self.precision = old_precision # This is a wrapper that wraps a test to run this test twice, one with # allow_bf32=True, another with allow_bf32=False. When running with # allow_bf32=True, it will use reduced precision as specified by the # argument def bf32_on_and_off(bf32_precision=1e-5): def with_bf32_disabled(self, function_call): with bf32_off(): function_call() def with_bf32_enabled(self, function_call): with bf32_on(self, bf32_precision): function_call() def wrapper(f): params = inspect.signature(f).parameters arg_names = tuple(params.keys()) @functools.wraps(f) def wrapped(*args, **kwargs): for k, v in zip(arg_names, args): kwargs[k] = v cond = bf32_is_not_fp32() if "device" in kwargs: cond = cond and (torch.device(kwargs["device"]).type == "cpu") if "dtype" in kwargs: cond = cond and (kwargs["dtype"] == torch.float) if cond: with_bf32_disabled(kwargs["self"], lambda: f(**kwargs)) with_bf32_enabled(kwargs["self"], lambda: f(**kwargs)) else: f(**kwargs) return wrapped return wrapper ```
================================================================================================================================= SOURCE CODE FILE: common_modules.py LINES: 1 SIZE: 219.49 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\common_modules.py ENCODING: utf-8 ```py # mypy: ignore-errors import torch import unittest from copy import deepcopy from enum import Enum from functools import wraps, partial from itertools import chain, product import itertools import math import torch.nn.functional as F from torch.nn.utils.rnn import pack_padded_sequence from torch.testing import make_tensor from torch.testing._internal.common_cuda import TEST_CUDNN from torch.testing._internal.common_dtype import ( floating_types, floating_and_complex_types_and, get_all_fp_dtypes) from torch.testing._internal.common_device_type import ( _TestParametrizer, _update_param_kwargs, expectedFailureMPS, toleranceOverride, tol, skipCUDAIfCudnnVersionLessThan, skipCUDAIfRocm, precisionOverride, skipMeta, skipMPS, skipCUDAVersionIn) from torch.testing._internal.common_methods_invocations import DecorateInfo from torch.testing._internal.common_nn import ( cosineembeddingloss_reference, cross_entropy_loss_reference, ctcloss_reference, hingeembeddingloss_reference, huberloss_reference, kldivloss_reference, marginrankingloss_reference, multimarginloss_reference, multilabelmarginloss_reference, nllloss_reference, nlllossNd_reference, smoothl1loss_reference, softmarginloss_reference, get_reduction) from torch.testing._internal.common_utils import ( freeze_rng_state, skipIfMPS, skipIfMPSOnMacOS13, GRADCHECK_NONDET_TOL, TEST_WITH_ROCM, IS_WINDOWS, skipIfTorchDynamo) from types import ModuleType import operator # List of all namespaces containing modules to test. MODULE_NAMESPACES: list[ModuleType] = [ torch.nn.modules, torch.ao.nn.qat.modules, torch.ao.nn.quantizable.modules, torch.ao.nn.quantized.modules, torch.ao.nn.quantized.modules, ] # Modules that shouldn't be tested for one reason or another. MODULES_TO_SKIP: set[type] = { torch.nn.Module, # abstract base class torch.nn.Container, # deprecated torch.nn.NLLLoss2d, # deprecated torch.ao.nn.quantized.MaxPool2d, # aliases to nn.MaxPool2d torch.ao.nn.quantized.MaxPool2d, # aliases to nn.MaxPool2d } # List of all module classes to test. MODULE_CLASSES: list[type] = [*chain.from_iterable([ [getattr(namespace, module_name) for module_name in namespace.__all__] # type: ignore[attr-defined] for namespace in MODULE_NAMESPACES])] MODULE_CLASSES = [cls for cls in MODULE_CLASSES if cls not in MODULES_TO_SKIP] # Dict of module class -> common name. Useful for making test names more intuitive. # Example: torch.nn.modules.linear.Linear -> "nn.Linear" MODULE_CLASS_NAMES: dict[type, str] = {} for namespace in MODULE_NAMESPACES: for module_name in namespace.__all__: # type: ignore[attr-defined] module_cls = getattr(namespace, module_name) namespace_name = namespace.__name__.replace('torch.', '').replace('.modules', '') # Deal with any aliases by preferring earlier names. if module_cls not in MODULE_CLASS_NAMES: MODULE_CLASS_NAMES[module_cls] = f'{namespace_name}.{module_name}' # Specifies the modes (i.e. train, eval) to test over. TrainEvalMode = Enum('TrainEvalMode', ('train_only', 'eval_only', 'train_and_eval')) class modules(_TestParametrizer): """ PROTOTYPE: Decorator for specifying a list of modules over which to run a test. """ def __init__(self, module_info_iterable, allowed_dtypes=None, train_eval_mode=TrainEvalMode.train_and_eval, skip_if_dynamo=True): self.module_info_list = list(module_info_iterable) self.allowed_dtypes = set(allowed_dtypes) if allowed_dtypes is not None else None self.train_eval_mode = train_eval_mode self.skip_if_dynamo = skip_if_dynamo def _get_training_flags(self, module_info): training_flags = [] if (self.train_eval_mode == TrainEvalMode.train_only or self.train_eval_mode == TrainEvalMode.train_and_eval): training_flags.append(True) if (self.train_eval_mode == TrainEvalMode.eval_only or self.train_eval_mode == TrainEvalMode.train_and_eval): training_flags.append(False) # If train and eval modes don't differ for the module, don't bother using more than one. if not module_info.train_and_eval_differ: training_flags = training_flags[:1] return training_flags def _parametrize_test(self, test, generic_cls, device_cls): if device_cls is None: raise RuntimeError('The @modules decorator is only intended to be used in a device-specific ' 'context; use it with instantiate_device_type_tests() instead of ' 'instantiate_parametrized_tests()') for module_info in self.module_info_list: dtypes = set(module_info.supported_dtypes(device_cls.device_type)) if self.allowed_dtypes is not None: dtypes = dtypes.intersection(self.allowed_dtypes) training_flags = self._get_training_flags(module_info) for (training, dtype) in product(training_flags, dtypes): # Construct the test name; device / dtype parts are handled outside. # See [Note: device and dtype suffix placement] test_name = module_info.formatted_name if len(training_flags) > 1: test_name += f"_{'train_mode' if training else 'eval_mode'}" # Construct parameter kwargs to pass to the test. param_kwargs = {'module_info': module_info} _update_param_kwargs(param_kwargs, 'dtype', dtype) _update_param_kwargs(param_kwargs, 'training', training) try: @wraps(test) def test_wrapper(*args, **kwargs): return test(*args, **kwargs) if self.skip_if_dynamo and not torch.testing._internal.common_utils.TEST_WITH_TORCHINDUCTOR: test_wrapper = skipIfTorchDynamo("Policy: we don't run ModuleInfo tests w/ Dynamo")(test_wrapper) decorator_fn = partial(module_info.get_decorators, generic_cls.__name__, test.__name__, device_cls.device_type, dtype) yield (test_wrapper, test_name, param_kwargs, decorator_fn) except Exception as ex: # Provides an error message for debugging before rethrowing the exception print(f"Failed to instantiate {test_name} for module {module_info.name}!") raise ex def get_module_common_name(module_cls): if module_cls in MODULE_CLASS_NAMES: # Example: "nn.Linear" return MODULE_CLASS_NAMES[module_cls] else: return module_cls.__name__ class FunctionInput: """ Contains args and kwargs to pass as input to a function. """ __slots__ = ['args', 'kwargs'] def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs class ModuleInput: """ Contains args / kwargs for module instantiation + forward pass. """ __slots__ = ['constructor_input', 'forward_input', 'desc', 'reference_fn'] def __init__(self, constructor_input, forward_input=None, desc='', reference_fn=None): self.constructor_input = constructor_input # Inputs to pass during construction self.forward_input = forward_input # Inputs to pass to forward() self.desc = desc # Description for this set of inputs self.reference_fn = reference_fn # Reference with signature: reference_fn(module, parameters, *args, **kwargs) if reference_fn is not None: @wraps(reference_fn) def copy_reference_fn(m, *args, **kwargs): # Copy inputs to avoid undesired side effects from calling the reference. args, kwargs = deepcopy(args), deepcopy(kwargs) # Note that module parameters are passed in for convenience. return reference_fn(m, list(m.parameters()), *args, **kwargs) self.reference_fn = copy_reference_fn class ModuleErrorEnum(Enum): """ Enumerates when error is raised when testing modules. """ CONSTRUCTION_ERROR = 0 FORWARD_ERROR = 1 class ErrorModuleInput: """ A ModuleInput that will cause the operation to throw an error plus information about the resulting error. """ __slots__ = ["module_error_input", "error_on", "error_type", "error_regex"] def __init__(self, module_error_input, *, error_on=ModuleErrorEnum.CONSTRUCTION_ERROR, error_type=RuntimeError, error_regex): self.module_error_input = module_error_input self.error_on = error_on self.error_type = error_type self.error_regex = error_regex class ModuleInfo: """ Module information to be used in testing. """ def __init__(self, module_cls, # Class object for the module under test *, module_inputs_func, # Function to generate module inputs skips=(), # Indicates which tests to skip decorators=None, # Additional decorators to apply to generated tests dtypes=floating_types(), # dtypes this function is expected to work with dtypesIfMPS=(torch.float16, torch.float32,), # dtypes this function is expected to work with on MPS dtypesIfHpu=(torch.bfloat16, torch.float32,), supports_gradgrad=True, # whether the op supports second order gradients gradcheck_nondet_tol=0.0, # tolerance for nondeterminism while performing gradcheck module_memformat_affects_out=False, # whether converting module to channels last will generate # channels last output train_and_eval_differ=False, # whether the module has differing behavior between train and eval module_error_inputs_func=None, # Function to generate module inputs that error gradcheck_fast_mode=None, # Whether to use the fast implementation for gradcheck/gradgradcheck. # When set to None, defers to the default value provided by the wrapper # function around gradcheck (testing._internal.common_utils.gradcheck) ): self.module_cls = module_cls self.module_inputs_func = module_inputs_func self.decorators = (*(decorators if decorators else []), *(skips if skips else [])) self.dtypes = dtypes self.dtypesIfMPS = dtypesIfMPS self.dtypesIfHpu = dtypesIfHpu self.supports_gradgrad = supports_gradgrad self.gradcheck_nondet_tol = gradcheck_nondet_tol self.module_memformat_affects_out = module_memformat_affects_out self.train_and_eval_differ = train_and_eval_differ self.module_error_inputs_func = module_error_inputs_func self.gradcheck_fast_mode = gradcheck_fast_mode self.is_lazy = issubclass(module_cls, torch.nn.modules.lazy.LazyModuleMixin) def get_decorators(self, test_class, test_name, device, dtype, param_kwargs): result = [] for decorator in self.decorators: if isinstance(decorator, DecorateInfo): if decorator.is_active(test_class, test_name, device, dtype, param_kwargs): result.extend(decorator.decorators) else: result.append(decorator) return result def supported_dtypes(self, device_type): if device_type == 'mps': return self.dtypesIfMPS elif device_type == 'hpu': return self.dtypesIfHpu else: return self.dtypes @property def name(self): return get_module_common_name(self.module_cls) @property def formatted_name(self): return self.name.replace('.', '_') # Start of module inputs functions. def module_inputs_torch_nn_Linear(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) module_inputs = [ ModuleInput(constructor_input=FunctionInput(10, 8), forward_input=FunctionInput(input=make_input((4, 10))), reference_fn=lambda m, p, input: torch.mm(input, p[0].t()) + p[1].view(1, -1).expand(4, 8)), ModuleInput(constructor_input=FunctionInput(10, 8, bias=False), forward_input=FunctionInput(make_input((4, 10))), desc='no_bias', reference_fn=lambda m, p, i: torch.mm(i, p[0].t())), ModuleInput(constructor_input=FunctionInput(3, 5), forward_input=FunctionInput(make_input(3)), desc='no_batch_dim', reference_fn=lambda m, p, i: torch.mm(i.view(1, -1), p[0].t()).view(-1) + p[1]) ] return module_inputs def module_inputs_torch_nn_Bilinear(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def bilinear_reference_fn(m, p, x1, x2, bias=True): result = torch.einsum('bn,anm,bm->ba', x1, p[0], x2) if bias: if x1.shape[0] == 1: result = result.view(-1) + p[1] else: result = result + p[1].view(1, -1).expand(x1.shape[0], p[0].shape[0]) return result module_inputs = [ ModuleInput(constructor_input=FunctionInput(2, 3, 4), forward_input=FunctionInput(make_input((8, 2)), make_input((8, 3))), reference_fn=bilinear_reference_fn), ModuleInput(constructor_input=FunctionInput(2, 3, 4, bias=False), forward_input=FunctionInput(make_input((8, 2)), make_input((8, 3))), desc='no_bias', reference_fn=lambda m, p, x1, x2: bilinear_reference_fn(m, p, x1, x2, bias=False)), ModuleInput(constructor_input=FunctionInput(2, 3, 4), forward_input=FunctionInput(make_input(2), make_input(3)), desc='no_batch_dim', reference_fn=lambda m, p, x1, x2: bilinear_reference_fn(m, p, x1.view(1, -1), x2.view(1, -1))), ] return module_inputs def module_inputs_torch_nn_KLDivLoss(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases: list[tuple[str, dict]] = [ ('', {}), ('reduction_sum', {'reduction': 'sum'}), ('reduction_batchmean', {'reduction': 'batchmean'}), ('reduction_none', {'reduction': 'none'}), ('log_target', {'log_target': True}) ] module_inputs = [] for desc, constructor_kwargs in cases: def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): return kldivloss_reference(i, t, **constructor_kwargs) input = make_input((10, 10)).log() target = make_input((10, 10)) if kwargs.get('log_target', False) else make_input((10, 10)).log() module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(input, target), desc=desc, reference_fn=reference_fn) ) scalar_input = make_input(()).log() # FIXME(rec): scalar_target is unused, perhaps should be argument to FunctionInput? scalar_target = ( # noqa: F841 make_input(()) if kwargs.get('log_target', False) else make_input(()).log() ) module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(scalar_input, scalar_input), desc='scalar_' + desc, reference_fn=reference_fn) ) return module_inputs def module_inputs_torch_nn_NLLLoss(module_info, device, dtype, requires_grad, training, **kwargs): def make_input(shape, device=device, dtype=dtype, requires_grad=requires_grad): return make_tensor(shape, device=device, dtype=dtype, requires_grad=False).log_softmax(dim=1).requires_grad_(requires_grad) make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) cases: list[tuple[str, dict]] = [ ('', {}), ('reduction_sum', {'reduction': 'sum'}), ('reduction_none', {'reduction': 'none'}), ('ignore_index', {'ignore_index': 2}), ('weights', {'weight': make_weight(4).abs()}), ('weights_ignore_index', {'weight': make_weight(4).abs(), 'ignore_index': 2}), ('weights_ignore_index_neg', {'weight': make_weight(4).abs(), 'ignore_index': -1}) ] # TODO: Uncomment when negative weights is supported. # negative_weight = make_weight(10) # negative_weight[0] = -1 # cases.append(('weights_negative', {'weight': negative_weight})) module_inputs = [] for desc, constructor_kwargs in cases: def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): return nllloss_reference(i, t, **constructor_kwargs) module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input((15, 4)), torch.empty(15, device=device).uniform_().mul(4).floor().long()), desc=desc, reference_fn=reference_fn) ) def nd_reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): return nlllossNd_reference(i, t, **constructor_kwargs) module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput( make_input((2, 4, 5, 5)), torch.empty(2, 5, 5, device=device).uniform_().mul(4).floor().long()), desc=f"nd_{desc}", reference_fn=nd_reference_fn) ) module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput( make_input((2, 4, 5, 5, 2, 2)), torch.empty(2, 5, 5, 2, 2, device=device).uniform_().mul(4).floor().long()), desc=f"higher_dim_{desc}", reference_fn=nd_reference_fn) ) module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput( make_input((2, 4, 5)), torch.empty(2, 5, device=device).uniform_().mul(4).floor().long()), desc=f"3d_{desc}", reference_fn=nd_reference_fn) ) return module_inputs def module_inputs_torch_nn_GaussianNLLLoss(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) cases: list[tuple[str, dict]] = [ ('', {}), ('reduction_sum', {'reduction': 'sum'}), ('reduction_mean', {'reduction': 'mean'}), ('reduction_none', {'reduction': 'none'}), ] module_inputs = [] for desc, constructor_kwargs in cases: module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input(3), make_target(3), make_input(1).abs()), desc=desc, reference_fn=no_batch_dim_reference_fn) ) return module_inputs def module_inputs_torch_nn_PoissonNLLLoss(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) cases: list[tuple[str, dict]] = [ ('', {}), ('reduction_sum', {'reduction': 'sum'}), ('reduction_mean', {'reduction': 'mean'}), ('reduction_none', {'reduction': 'none'}), ('full', {'full': True}), ('no_log_input', {'log_input': False}), ('full_no_log_input', {'full': True, 'log_input': False}), ] def poissonnllloss_reference_fn(i, t, log_input=True, full=False, reduction='mean', eps=1e-8): if log_input: result = i.exp() - t.mul(i) else: result = i - t.mul((i + eps).log()) if full: result += (t.mul(t.log()) - t + 0.5 * (2. * math.pi * t).log()).masked_fill(t <= 1, 0) if reduction == 'none': return result elif reduction == 'mean': return result.sum() / i.numel() else: return result.sum() module_inputs = [] for desc, constructor_kwargs in cases: def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): return poissonnllloss_reference_fn(i, t, **constructor_kwargs) log_input = constructor_kwargs.get('log_input', True) input = make_input((2, 3, 4, 5)) if log_input else make_input((2, 3, 4, 5)).abs().add(0.001) module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(input, make_target((2, 3, 4, 5)).floor_().abs_()), desc=desc, reference_fn=reference_fn) ) return module_inputs def module_inputs_torch_nn_MSELoss(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) cases: list[tuple[str, dict]] = [ ('', {}), ('reduction_sum', {'reduction': 'sum'}), ('reduction_mean', {'reduction': 'mean'}), ('reduction_none', {'reduction': 'none'}), ] def mse_loss_reference_fn(m, p, i, t, reduction='mean'): if reduction == 'none': return (i - t).pow(2) elif reduction == 'mean': return (i - t).pow(2).sum() / i.numel() else: return (i - t).pow(2).sum() module_inputs = [] for desc, constructor_kwargs in cases: module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input((2, 3, 4, 5)), make_target((2, 3, 4, 5))), desc=desc, reference_fn=partial(mse_loss_reference_fn, **constructor_kwargs)) ) module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input(()), make_target(())), desc=f'{desc}_scalar', reference_fn=partial(mse_loss_reference_fn, **constructor_kwargs)) ) return module_inputs def no_batch_dim_reference_fn(m, p, *args, **kwargs): """Reference function for modules supporting no batch dimensions. Unbatched inputs are unsqueezed to form a single batch input before passing them to the module. The output is squeezed to compare with the output of unbatched input to the module. Currently it only supports modules which return a single Tensor as output. You can bind the following kwargs. Kwargs: batch_first[bool] : If True, all the Tensors in `args` while be unsqueezed at dim `0` . and output will be squeezed at dim `0` else dim `1` for both. kwargs_to_batchify[dict] : Dictionary specifying the name of the argument and dimension to unsqueeze. Useful if there are few arguments whose batch dimension are different from the ones selected by `batch_first`. is_criterion[bool] : Specify if the module is a criterion and handle the reduction for output accordingly. """ def get_and_pop(key, default): v = kwargs.get(key, default) if key in kwargs: kwargs.pop(key) return v batch_dim = 0 if get_and_pop('batch_first', True) else 1 kwargs_to_batchify = get_and_pop('kwargs_to_batchify', None) is_criterion = get_and_pop('is_criterion', False) if kwargs_to_batchify is not None: assert isinstance(kwargs_to_batchify, dict) for k, v in kwargs.items(): if k in kwargs_to_batchify and v is not None: bdim = kwargs_to_batchify[k] kwargs[k] = v.unsqueeze(bdim) single_batch_input_args = [input.unsqueeze(batch_dim) for input in args] with freeze_rng_state(): output = m(*single_batch_input_args, **kwargs).squeeze(batch_dim) if is_criterion: reduction = get_reduction(m) if reduction == 'none': return output.squeeze(0) return output def no_batch_dim_reference_mha(m, p, *args, **kwargs): """Reference function for MultiheadAttention supporting no batch dimensions. Unbatched inputs are unsqueezed to form a single batch input before passing them to the module. The output is squeezed to compare with the output of unbatched input to the module. """ batch_dim = 0 if kwargs.get('batch_first', True) else 1 if 'batch_first' in kwargs: kwargs.pop('batch_first') if 'key_padding_mask' in kwargs and kwargs['key_padding_mask'] is not None: kwargs['key_padding_mask'] = kwargs['key_padding_mask'].unsqueeze(0) single_batch_input_args = [input.unsqueeze(batch_dim) for input in args] with freeze_rng_state(): output = m(*single_batch_input_args, **kwargs) return (output[0].squeeze(batch_dim), output[1].squeeze(0)) def no_batch_dim_reference_rnn_gru(m, p, *args, **kwargs): """Reference function for RNN and GRU supporting no batch dimensions. Unbatched inputs are unsqueezed to form a single batch input before passing them to the module. The output is squeezed to compare with the output of unbatched input to the module. """ if len(args) == 1: inp, = args h = None elif len(args) == 2: inp, h = args h = h.unsqueeze(1) batch_dim = 0 if kwargs['batch_first'] else 1 kwargs.pop('batch_first') inp = inp.unsqueeze(batch_dim) single_batch_input_args = (inp, h) with freeze_rng_state(): output = m(*single_batch_input_args, **kwargs) return (output[0].squeeze(batch_dim), output[1].squeeze(1)) def no_batch_dim_reference_lstm(m, p, *args, **kwargs): """Reference function for LSTM supporting no batch dimensions. Unbatched inputs are unsqueezed to form a single batch input before passing them to the module. The output is squeezed to compare with the output of unbatched input to the module. """ if len(args) == 1: inp, = args h = None elif len(args) == 2: inp, h = args h = (h[0].unsqueeze(1), h[1].unsqueeze(1)) batch_dim = 0 if kwargs['batch_first'] else 1 kwargs.pop('batch_first') inp = inp.unsqueeze(batch_dim) single_batch_input_args = (inp, h) with freeze_rng_state(): output = m(*single_batch_input_args, **kwargs) return (output[0].squeeze(batch_dim), (output[1][0].squeeze(1), output[1][1].squeeze(1))) def no_batch_dim_reference_lstmcell(m, p, *args, **kwargs): """Reference function for LSTMCell supporting no batch dimensions. The module is passed the input and target in batched form with a single item. The output is squeezed to compare with the no-batch input. """ inp, (h, c) = args single_batch_input_args = (inp.unsqueeze(0), (h.unsqueeze(0), c.unsqueeze(0))) with freeze_rng_state(): output = m(*single_batch_input_args, **kwargs) return (output[0].squeeze(0), output[1].squeeze(0)) def generate_regression_criterion_inputs(make_input): return [ ModuleInput( constructor_input=FunctionInput(reduction=reduction), forward_input=FunctionInput(make_input((4, )), make_input(4,)), reference_fn=partial(no_batch_dim_reference_fn, is_criterion=True), desc=f'no_batch_dim_{reduction}' ) for reduction in ['none', 'mean', 'sum']] def module_inputs_torch_nn_AvgPool1d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(kernel_size=2), forward_input=FunctionInput(make_input((3, 6))), desc='no_batch_dim', reference_fn=no_batch_dim_reference_fn), ModuleInput(constructor_input=FunctionInput(2), forward_input=FunctionInput(make_input((2, 3, 6)))), ModuleInput(constructor_input=FunctionInput((2,), (2,)), forward_input=FunctionInput(make_input((2, 3, 6))), desc='stride'), ModuleInput(constructor_input=FunctionInput(2, 2, 1), forward_input=FunctionInput(make_input((2, 3, 6))), desc='stride_pad')] def module_inputs_torch_nn_AvgPool2d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput((2, 2)), forward_input=FunctionInput(make_input((3, 6, 6))), desc='no_batch_dim', reference_fn=no_batch_dim_reference_fn), ModuleInput(constructor_input=FunctionInput((2, 2)), forward_input=FunctionInput(make_input((2, 3, 6, 6)))), ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2)), forward_input=FunctionInput(make_input((2, 3, 6, 6))), desc='stride'), ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2), (1, 1)), forward_input=FunctionInput(make_input((2, 3, 6, 6))), desc='stride_pad'), ModuleInput(constructor_input=FunctionInput((2, 2), divisor_override=1), forward_input=FunctionInput(make_input((2, 3, 6, 6))), desc='divisor'), ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2), divisor_override=1), forward_input=FunctionInput(make_input((2, 3, 6, 6))), desc='divisor_stride'), ModuleInput(constructor_input=FunctionInput((2, 2), (2, 2), (1, 1), divisor_override=1), forward_input=FunctionInput(make_input((2, 3, 6, 6))), desc='divisor_stride_pad')] def module_inputs_torch_nn_AvgPool3d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput((2, 2, 2)), forward_input=FunctionInput(make_input((3, 4, 4, 4))), desc='no_batch_dim', reference_fn=no_batch_dim_reference_fn), ModuleInput(constructor_input=FunctionInput((2, 2, 2)), forward_input=FunctionInput(make_input((2, 3, 4, 4, 4)))), ModuleInput(constructor_input=FunctionInput(2, (2, 2, 2)), forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), desc='stride'), ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1)), forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), desc='stride_pad'), ModuleInput(constructor_input=FunctionInput(4, 2, (1, 2, 1)), forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), desc='stride_pad_gpu_fixedkw_output'), ModuleInput(constructor_input=FunctionInput((2, 4, 8), 1, (1, 1, 2)), forward_input=FunctionInput(make_input((2, 3, 2, 4, 8))), desc='stride_pad_gpu_general_output'), ModuleInput(constructor_input=FunctionInput(3, 1, 0), forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), desc='stride1_pad0_gpu_input'), ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1)), forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), desc='stride_pad_gpu_input_nooverlap'), ModuleInput(constructor_input=FunctionInput((2, 2, 2), divisor_override=1), forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), desc='divisor'), ModuleInput(constructor_input=FunctionInput(2, (2, 2, 2), divisor_override=1), forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), desc='divisor_stride'), ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1), divisor_override=1), forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), desc='divisor_stride_pad'), ModuleInput(constructor_input=FunctionInput(4, 2, (1, 2, 1), divisor_override=1), forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), desc='divisor_stride_pad_gpu_fixedkw_output'), ModuleInput(constructor_input=FunctionInput((2, 4, 8), 1, (1, 1, 2), divisor_override=1), forward_input=FunctionInput(make_input((2, 3, 2, 4, 8))), desc='divisor_stride_pad_gpu_general_output'), ModuleInput(constructor_input=FunctionInput(3, 1, 0, divisor_override=1), forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), desc='divisor_stride1_pad0_gpu_input'), ModuleInput(constructor_input=FunctionInput(2, 2, (1, 1, 1), divisor_override=1), forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), desc='divisor_stride_pad_gpu_input_nooverlap')] def module_inputs_torch_nn_AdaptiveAvgPool1d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(3,), forward_input=FunctionInput(make_input((1, 3, 5))), desc='single'), ModuleInput(constructor_input=FunctionInput(3,), forward_input=FunctionInput(make_input((3, 5))), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim'), ModuleInput(constructor_input=FunctionInput(1,), forward_input=FunctionInput(make_input((1, 3, 5))), desc='one_output')] def module_inputs_torch_nn_AdaptiveAvgPool2d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(3,), forward_input=FunctionInput(make_input((1, 3, 5, 6))), desc='single'), ModuleInput(constructor_input=FunctionInput(3,), forward_input=FunctionInput(make_input((3, 5, 6))), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim'), ModuleInput(constructor_input=FunctionInput(1,), forward_input=FunctionInput(make_input((1, 3, 5, 6))), desc='single_1x1output'), ModuleInput(constructor_input=FunctionInput((3, 4)), forward_input=FunctionInput(make_input((1, 3, 5, 6))), desc='tuple'), ModuleInput(constructor_input=FunctionInput((3, None)), forward_input=FunctionInput(make_input((1, 3, 5, 6))), desc='tuple_none')] def module_inputs_torch_nn_AdaptiveAvgPool3d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(3,), forward_input=FunctionInput(make_input((2, 3, 5, 2, 7))), desc='single'), ModuleInput(constructor_input=FunctionInput(3,), forward_input=FunctionInput(make_input((3, 5, 2, 7))), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim'), ModuleInput(constructor_input=FunctionInput((3, 4, 5)), forward_input=FunctionInput(make_input((2, 3, 5, 3, 7))), desc='tuple'), ModuleInput(constructor_input=FunctionInput((None, 4, 5)), forward_input=FunctionInput(make_input((2, 3, 5, 3, 7))), desc='tuple_none'), ModuleInput(constructor_input=FunctionInput((3, 2, 2)), forward_input=FunctionInput(make_input((1, 1, 3, 2, 6))), desc='last_dim')] def module_inputs_torch_nn_AdaptiveMaxPool1d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(3,), forward_input=FunctionInput(make_input((1, 3, 5))), desc='single'), ModuleInput(constructor_input=FunctionInput(3,), forward_input=FunctionInput(make_input((3, 5))), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim')] def module_inputs_torch_nn_AdaptiveMaxPool2d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(3,), forward_input=FunctionInput(make_input((1, 3, 5, 6))), desc='single'), ModuleInput(constructor_input=FunctionInput(3,), forward_input=FunctionInput(make_input((3, 5, 6))), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim'), ModuleInput(constructor_input=FunctionInput((3, 4)), forward_input=FunctionInput(make_input((1, 3, 5, 6))), desc='tuple'), ModuleInput(constructor_input=FunctionInput((3, None)), forward_input=FunctionInput(make_input((1, 3, 5, 6))), desc='tuple_none')] def module_inputs_torch_nn_AdaptiveMaxPool3d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(3,), forward_input=FunctionInput(make_input((2, 3, 5, 6, 7))), desc='single'), ModuleInput(constructor_input=FunctionInput(3,), forward_input=FunctionInput(make_input((3, 5, 6, 7))), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim'), ModuleInput(constructor_input=FunctionInput((3, 4, 5)), forward_input=FunctionInput(make_input((2, 3, 5, 6, 7))), desc='tuple'), ModuleInput(constructor_input=FunctionInput((3, None, 5)), forward_input=FunctionInput(make_input((2, 3, 5, 6, 7))), desc='tuple_none'), ModuleInput(constructor_input=FunctionInput(3), forward_input=FunctionInput(make_input((2, 3, 12, 9, 3))), desc='single_nonatomic'), ModuleInput(constructor_input=FunctionInput((3, 4, 5)), forward_input=FunctionInput(make_input((2, 3, 6, 4, 10))), desc='tuple_nonatomic')] def module_inputs_torch_nn_BatchNorm1d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(10,), forward_input=FunctionInput(make_input((4, 10))), desc='affine'), ModuleInput(constructor_input=FunctionInput(5,), forward_input=FunctionInput(make_input((4, 5, 3))), desc='3d_input'), ModuleInput(constructor_input=FunctionInput(10, 1e-3, None), forward_input=FunctionInput(make_input((4, 10))), desc='affine_simple_average'), ModuleInput(constructor_input=FunctionInput(10, 1e-3, 0.3, False), forward_input=FunctionInput(make_input((4, 10))), desc='not_affine'), ModuleInput(constructor_input=FunctionInput(10, 1e-3, 0.3, True, False), forward_input=FunctionInput(make_input((4, 10))), desc='not_tracking_stats'), ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False), forward_input=FunctionInput(make_input((4, 5, 3))), desc='3d_input_not_affine'), ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False), forward_input=FunctionInput(make_input((0, 5, 9))), desc='zero_batch')] def module_inputs_torch_nn_BatchNorm2d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(3,), forward_input=FunctionInput(make_input((2, 3, 6, 6)))), ModuleInput(constructor_input=FunctionInput(3, 1e-3, None), forward_input=FunctionInput(make_input((2, 3, 6, 6))), desc='2d_simple_average'), ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.8), forward_input=FunctionInput(make_input((2, 3, 6, 6))), desc='momentum'), ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.8, False), forward_input=FunctionInput(make_input((2, 3, 6, 6))), desc='not_affine'), ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.8, True, False), forward_input=FunctionInput(make_input((2, 3, 6, 6))), desc='not_tracking_stats'), ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False), forward_input=FunctionInput(make_input((0, 5, 2, 2))), desc='zero_batch')] def module_inputs_torch_nn_BatchNorm3d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(3,), forward_input=FunctionInput(make_input((2, 3, 4, 4, 4)))), ModuleInput(constructor_input=FunctionInput(3, 1e-3, None), forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), desc='3d_simple_average'), ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.7), forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), desc='momentum'), ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.7, False), forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), desc='not_affine'), ModuleInput(constructor_input=FunctionInput(3, 1e-3, 0.7, True, False), forward_input=FunctionInput(make_input((2, 3, 4, 4, 4))), desc='not_tracking_stats'), ModuleInput(constructor_input=FunctionInput(5, 1e-3, 0.3, False), forward_input=FunctionInput(make_input((0, 5, 2, 2, 2))), desc='zero_batch')] def module_inputs_torch_nn_ConvNd(module_info, device, dtype, requires_grad, training, **kwargs): N = kwargs['N'] lazy = kwargs.get('lazy', False) transposed = kwargs.get('transposed', False) make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) conv_kwargs_list = [{}] if transposed else [{}, {'padding': 'same'}] kernel_size, C_in, C_out = 3, 4, 5 input_no_batch_shape = (C_in,) + tuple(i + 3 for i in range(N)) input_batch_shape = (2,) + input_no_batch_shape return [ ModuleInput(constructor_input=(FunctionInput(C_out, kernel_size, **conv_kwargs) if lazy else FunctionInput(C_in, C_out, kernel_size, **conv_kwargs)), forward_input=FunctionInput(make_input( input_batch_shape if with_batch else input_no_batch_shape)), desc=('' if with_batch else 'no_batch_dim'), reference_fn=(None if with_batch else no_batch_dim_reference_fn)) for with_batch, conv_kwargs in itertools.product([True, False], conv_kwargs_list) ] def module_inputs_torch_nn_CosineEmbeddingLoss(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) cases: list[tuple[str, dict]] = [ ('', {}), ('reduction_sum', {'reduction': 'sum'}), ('reduction_mean', {'reduction': 'mean'}), ('reduction_none', {'reduction': 'none'}), ('margin', {'margin': 0.7}) ] module_inputs = [] for desc, constructor_kwargs in cases: def reference_fn(m, p, i1, i2, t, constructor_kwargs=constructor_kwargs): return cosineembeddingloss_reference(i1, i2, t, **constructor_kwargs) module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input((15, 10)), make_input((15, 10)), make_target((15,)).sign()), desc=desc, reference_fn=reference_fn) ) return module_inputs def module_inputs_torch_nn_ELU(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(alpha=2.), forward_input=FunctionInput(make_input((3, 2, 5))), reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2 * (i.exp() - 1))), ModuleInput(constructor_input=FunctionInput(alpha=2.), forward_input=FunctionInput(make_input(())), desc='scalar'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((3,))), desc='no_batch_dim', reference_fn=no_batch_dim_reference_fn), ModuleInput(constructor_input=FunctionInput(alpha=2.), forward_input=FunctionInput(make_input((2, 3, 2, 5))), desc='4d_input')] def module_inputs_torch_nn_CELU(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(alpha=2.), forward_input=FunctionInput(make_input((3, 2, 5))), reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2. * ((.5 * i).exp() - 1))), ModuleInput(constructor_input=FunctionInput(alpha=2.), forward_input=FunctionInput(make_input(())), reference_fn=lambda m, p, i: torch.where(i >= 0, i, 2. * ((.5 * i).exp() - 1)), desc='scalar'), ModuleInput(constructor_input=FunctionInput(alpha=2.), forward_input=FunctionInput(make_input((3,))), desc='no_batch_dim', reference_fn=no_batch_dim_reference_fn)] def module_inputs_torch_nn_GLU(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((5, 6)))), ModuleInput(constructor_input=FunctionInput(1), forward_input=FunctionInput(make_input((5, 6, 7))), desc='dim'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((4,))), desc='no_batch_dim', reference_fn=no_batch_dim_reference_fn)] def module_inputs_torch_nn_GELU(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput('none'), forward_input=FunctionInput(make_input(())), reference_fn=lambda m, p, x, *_: x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))), desc='scalar'), ModuleInput(constructor_input=FunctionInput('none'), forward_input=FunctionInput(make_input((3, 2, 5))), reference_fn=lambda m, p, x, *_: x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((3,))), desc='no_batch_dim', reference_fn=no_batch_dim_reference_fn)] def module_inputs_torch_nn_ReLU(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(())), desc='scalar'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(4)), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((2, 3, 4, 5))), desc='channels_last_mem_format'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((2, 3, 3, 4, 5))), desc='channels_last_3d_mem_format')] def module_inputs_torch_nn_ReLU6(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(())), desc='scalar'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(4)), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((2, 3, 4, 5))), desc='channels_last_mem_format'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((2, 3, 3, 4, 5))), desc='channels_last_3d_mem_format')] def module_inputs_torch_nn_LeakyReLU(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((3, 2, 5)))), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(4)), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim'), ModuleInput(constructor_input=FunctionInput(0.5), forward_input=FunctionInput(make_input((3, 2, 5))), desc='with_negval'), ModuleInput(constructor_input=FunctionInput(0.0), forward_input=FunctionInput(make_input((10, 10))), desc='with_zero_negval'), ModuleInput(constructor_input=FunctionInput(0.5), forward_input=FunctionInput(make_input(())), desc='with_negval_scalar')] def module_inputs_torch_nn_PReLU(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(())), desc='scalar'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(4)), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((2, 3, 4))), reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], desc='1d'), ModuleInput(constructor_input=FunctionInput(3), forward_input=FunctionInput(make_input((2, 3, 4))), reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], desc='1d_multiparam'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((2, 3, 4, 5))), reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], desc='2d'), ModuleInput(constructor_input=FunctionInput(3), forward_input=FunctionInput(make_input((2, 3, 4, 5))), reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], desc='2d_multiparam'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((2, 3, 4, 5, 6))), reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], desc='3d'), ModuleInput(constructor_input=FunctionInput(3), forward_input=FunctionInput(make_input((2, 3, 4, 5, 6))), reference_fn=lambda m, p, i: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0], desc='3d_multiparam')] def module_inputs_torch_nn_SELU(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((3, 2, 5)))), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(4)), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(())), desc='scalar')] def module_inputs_torch_nn_SiLU(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(())), reference_fn=lambda m, p, x, *_: x * torch.sigmoid(x), desc='scalar'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(4)), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((5, 6, 7))), reference_fn=lambda m, p, x, *_: x * torch.sigmoid(x))] def module_inputs_torch_nn_Softmax(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(1), forward_input=FunctionInput(make_input((10, 20))), reference_fn=lambda m, p, i: torch.exp(i).div(torch.exp(i).sum(1, True).expand(10, 20))), ModuleInput(constructor_input=FunctionInput(0), forward_input=FunctionInput(make_input(())), reference_fn=lambda m, p, i: torch.exp(i).div(torch.exp(i).sum(0, True)), desc='scalar'), ModuleInput(constructor_input=FunctionInput(-1), forward_input=FunctionInput(make_input((4, 5))), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim')] def module_inputs_torch_nn_Softmax2d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((1, 3, 10, 20))), reference_fn=lambda m, p, i: torch.exp(i).div(torch.exp(i).sum(1, False))), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((3, 4, 5))), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim')] def module_inputs_torch_nn_LogSoftmax(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(1), forward_input=FunctionInput(make_input((10, 20))), reference_fn=lambda m, p, i: torch.exp(i).div_(torch.exp(i).sum(1, True).expand(10, 20)).log_()), ModuleInput(constructor_input=FunctionInput(1), forward_input=FunctionInput(make_input((1, 3, 10, 20))), reference_fn=lambda m, p, i: torch.exp(i).div_(torch.exp(i).sum(1, False)).log_(), desc='multiparam'), ModuleInput(constructor_input=FunctionInput(0), forward_input=FunctionInput(make_input(())), reference_fn=lambda m, p, i: torch.exp(i).div_(torch.exp(i).sum(0, False)).log_(), desc='multiparam_scalar'), ModuleInput(constructor_input=FunctionInput(-1), forward_input=FunctionInput(make_input((4, 5))), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim')] def module_inputs_torch_nn_Softmin(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(1), forward_input=FunctionInput(make_input((10, 20)))), ModuleInput(constructor_input=FunctionInput(1), forward_input=FunctionInput(make_input((2, 3, 5, 10))), desc='multidim'), ModuleInput(constructor_input=FunctionInput(0), forward_input=FunctionInput(make_input(())), desc='scalar'), ModuleInput(constructor_input=FunctionInput(-1), forward_input=FunctionInput(make_input((3, 4, 10))), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim')] def module_inputs_torch_nn_Softplus(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((10, 20))), reference_fn=lambda m, p, i: torch.log1p(torch.exp(i))), ModuleInput(constructor_input=FunctionInput(2), forward_input=FunctionInput(make_input((10, 20))), reference_fn=lambda m, p, i: 1. / 2. * torch.log1p(torch.exp(2 * i)), desc='beta'), ModuleInput(constructor_input=FunctionInput(2, -100), forward_input=FunctionInput(make_input((10, 20))), reference_fn=( lambda m, p, i: ((i * 2) > -100).type_as(i) * i + ((i * 2) <= -100).type_as(i) * 1. / 2. * torch.log1p(torch.exp(2 * i))), desc='beta_threshold'), ModuleInput(constructor_input=FunctionInput(2, -100), forward_input=FunctionInput(make_input(())), reference_fn=( lambda m, p, i: ((i * 2) > -100).type_as(i) * i + ((i * 2) <= -100).type_as(i) * 1. / 2. * torch.log1p(torch.exp(2 * i))), desc='beta_threshold_scalar'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(4)), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim')] def module_inputs_torch_nn_Softshrink(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((3, 2, 5)))), ModuleInput(constructor_input=FunctionInput(1,), forward_input=FunctionInput(make_input((3, 2, 5))), desc='lambda'), ModuleInput(constructor_input=FunctionInput(1,), forward_input=FunctionInput(make_input(())), desc='lambda_scalar'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(4)), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim')] def module_inputs_torch_nn_Softsign(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((3, 2, 5))), reference_fn=lambda m, p, i: i.div(1 + torch.abs(i))), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(())), reference_fn=lambda m, p, i: i.div(1 + torch.abs(i)), desc='scalar'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(4)), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim')] def module_inputs_torch_nn_Tanh(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((2, 3, 4, 5)))), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(())), desc='scalar'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(4)), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim')] def module_inputs_torch_nn_Tanhshrink(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((2, 3, 4, 5)))), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(())), desc='scalar'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(4)), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim')] def module_inputs_torch_nn_Threshold(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(2., 1.), forward_input=FunctionInput(make_input((2, 3, 4, 5))), desc='threshold_value'), ModuleInput(constructor_input=FunctionInput(2., 10.), forward_input=FunctionInput(make_input((2, 3, 4, 5))), desc='large_value'), ModuleInput(constructor_input=FunctionInput(2., 1.), forward_input=FunctionInput(make_input(())), desc='threshold_value_scalar'), ModuleInput(constructor_input=FunctionInput(2., 1.), forward_input=FunctionInput(make_input(4)), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim')] def module_inputs_torch_nn_Mish(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((5, 6, 7))), reference_fn=lambda m, p, i: i * torch.tanh(F.softplus(i))), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(())), reference_fn=lambda m, p, i: i * torch.tanh(F.softplus(i)), desc='scalar'), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(4)), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim')] def module_inputs_torch_nn_L1Loss(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((2, 3, 4)), make_input((2, 3, 4))), reference_fn=lambda m, p, i, t: 1. / i.numel() * sum((a - b).abs().sum() for a, b in zip(i, t))), ModuleInput(constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(()), make_input(())), reference_fn=lambda m, p, i, t: 1. / i.numel() * (i - t).abs().sum(), desc='scalar')] + generate_regression_criterion_inputs(make_input) def module_inputs_torch_nn_SmoothL1Loss(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases: list[tuple[str, dict]] = [ ('', {}), ('reduction_sum', {'reduction': 'sum'}), ('reduction_mean', {'reduction': 'mean'}), ('reduction_none', {'reduction': 'none'}), ] module_inputs = [] for desc, constructor_kwargs in cases: def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): return smoothl1loss_reference(i, t, **constructor_kwargs) module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input((5, 10)), make_input((5, 10))), desc=desc, reference_fn=reference_fn) ) module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input(()), make_input(())), desc=f'scalar_{desc}', reference_fn=reference_fn) ) return module_inputs def module_inputs_torch_nn_BCELoss(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) cases: list[tuple[str, dict]] = [ ('', {}), ('reduction_sum', {'reduction': 'sum'}), ('reduction_mean', {'reduction': 'mean'}), ('reduction_none', {'reduction': 'none'}), ('weights', {'weight': make_weight((10,))}), ] def bce_loss_reference_fn(m, p, i, t, reduction='mean', weight=None): result = -(t * i.log() + (1 - t) * (1 - i).log()) if weight is not None: result = result * weight if reduction == 'none': return result elif reduction == 'mean': return result.sum() / i.numel() else: return result.sum() module_inputs = [] for desc, constructor_kwargs in cases: module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input((15, 10), low=1e-2, high=1 - 1e-2), make_target((15, 10)).gt(0).to(dtype)), desc=desc, reference_fn=partial(bce_loss_reference_fn, **constructor_kwargs)) ) scalar_weight = make_weight(()) module_inputs.append( ModuleInput(constructor_input=FunctionInput(weight=scalar_weight), forward_input=FunctionInput(make_input((), low=1e-2, high=1 - 1e-2), make_target(()).gt(0).to(dtype)), desc='scalar_weight', reference_fn=partial(bce_loss_reference_fn, weight=scalar_weight)) ) return module_inputs def module_inputs_torch_nn_BCEWithLogitsLoss(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) cases: list[tuple[str, dict]] = [ ('', {}), ('reduction_sum', {'reduction': 'sum'}), ('reduction_mean', {'reduction': 'mean'}), ('reduction_none', {'reduction': 'none'}), ('weights', {'weight': make_weight((10,))}), ('scalar_weights', {'weight': make_weight(())}) ] def bce_withlogitsloss_reference_fn(m, p, i, t, reduction='mean', weight=None): # TODO: add pos_weight to the definition here and corresponding SampleInputs max_val = (-i).clamp(min=0) result = (1 - t).mul_(i).add_(max_val).add_((-max_val).exp_().add_((-i - max_val).exp_()).log_()) if weight is not None: result = result * weight if reduction == 'none': return result elif reduction == 'mean': return result.sum() / i.numel() else: return result.sum() module_inputs = [] for desc, constructor_kwargs in cases: module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input((15, 10), low=1e-2, high=1 - 1e-2), make_target((15, 10)).gt(0).to(dtype)), desc=desc, reference_fn=partial(bce_withlogitsloss_reference_fn, **constructor_kwargs)) ) return module_inputs def module_inputs_torch_nn_CrossEntropyLoss(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) reductions: list[str] = ['mean', 'sum', 'none'] cases: list[tuple[str, dict]] = [ ('', {}), ('weights', {'weight': make_weight((3,))}), ('ignore_index', {'ignore_index': 1}), ('label_smoothing', {'label_smoothing': 0.15}), ('ignore_index_label_smoothing', {'ignore_index': 1, 'label_smoothing': 0.15}) ] module_inputs = [] for reduction, (desc, constructor_kwargs) in product(reductions, cases): def reference_fn(m, p, i, t, reduction=reduction, constructor_kwargs=constructor_kwargs): return cross_entropy_loss_reference(i, t, reduction=reduction, **constructor_kwargs) module_inputs.append( ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), forward_input=FunctionInput(make_input((2, 3, 5, 5)), make_target((2, 5, 5), low=0, high=3)), desc=f"4d_{desc}_{reduction}", reference_fn=reference_fn) ) module_inputs.append( ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), forward_input=FunctionInput(make_input((2, 3, 5)), make_target((2, 5), low=0, high=3)), desc=f"3d_{desc}_{reduction}", reference_fn=reference_fn) ) module_inputs.append( ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), forward_input=FunctionInput(make_input((2, 3)), make_target((2), low=0, high=3)), desc=f"2d_{desc}_{reduction}", reference_fn=reference_fn) ) module_inputs.append( ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), forward_input=FunctionInput(make_input((2, 3, 5, 5, 2, 2)), make_target((2, 5, 5, 2, 2), low=0, high=3)), desc=f"higher_dim_{desc}_{reduction}", reference_fn=reference_fn) ) if constructor_kwargs.get('ignore_index', None) is None: module_inputs.append( ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), forward_input=FunctionInput(make_input((5, 3, 4, 2)), make_input((5, 3, 4, 2)).softmax(dim=1)), desc=f"4d_prob_target_{desc}_{reduction}", reference_fn=reference_fn) ) module_inputs.append( ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), forward_input=FunctionInput(make_input((5, 3, 4)), make_input((5, 3, 4)).softmax(dim=1)), desc=f"3d_prob_target_{desc}_{reduction}", reference_fn=reference_fn) ) module_inputs.append( ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), forward_input=FunctionInput(make_input((5, 3)), make_input((5, 3)).softmax(dim=1)), desc=f"2d_prob_target_{desc}_{reduction}", reference_fn=reference_fn) ) module_inputs.append( ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), forward_input=FunctionInput(make_input((2, 3, 5, 5, 2, 2)), make_input((2, 3, 5, 5, 2, 2)).softmax(dim=1)), desc=f"higher_dim_prob_target_{desc}_{reduction}", reference_fn=reference_fn) ) module_inputs.append( ModuleInput(constructor_input=FunctionInput(reduction=reduction, **constructor_kwargs), forward_input=FunctionInput(make_input((3,)), make_target((), low=0, high=3)), desc=f"no_batch_dim_{desc}_{reduction}", reference_fn=partial(no_batch_dim_reference_fn, is_criterion=True)) ) return module_inputs def module_inputs_torch_nn_CTCLoss(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(make_tensor, device=device, requires_grad=False) cases: list[tuple[str, dict]] = [ ('', {}), ('reduction_sum', {'reduction': 'sum'}), ('reduction_mean', {'reduction': 'mean'}), ('reduction_none', {'reduction': 'none'}), ('blank', {'blank': 14}) ] target_dtypes = [torch.int, torch.long] module_inputs = [] for target_dtype, (desc, constructor_kwargs) in product(target_dtypes, cases): def reference_fn(m, p, i, t, il, tl, constructor_kwargs=constructor_kwargs): return ctcloss_reference(i, t, il, tl, **constructor_kwargs) blank = constructor_kwargs.get('blank', 0) low = 0 if blank == 14 else 1 high = 14 if blank == 14 else 15 module_inputs.append( ModuleInput( constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input((50, 3, 15)).log_softmax(2), make_target((3, 30), dtype=target_dtype, low=low, high=high), (50, 50, 50), (30, 25, 20)), desc=f'{desc}_lengths_intlists', reference_fn=reference_fn) ) module_inputs.append( ModuleInput( constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input((50, 3, 15)).log_softmax(2), make_target((3, 30), dtype=target_dtype, low=low, high=high), torch.tensor((50, 50, 50), device=device), torch.tensor((30, 25, 20), device=device)), desc=f'{desc}_lengths_tensors', reference_fn=reference_fn) ) module_inputs.append( ModuleInput( constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input((50, 3, 15)).log_softmax(2), make_target((30 + 25 + 20,), dtype=target_dtype, low=low, high=high), (50, 50, 50), (30, 25, 20)), desc=f'{desc}_1d_target_lengths_intlists', reference_fn=reference_fn) ) module_inputs.append( ModuleInput( constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input((50, 3, 15)).log_softmax(2), make_target((30 + 25 + 20,), dtype=target_dtype, low=low, high=high), torch.tensor((50, 50, 50), device=device), torch.tensor((30, 25, 20), device=device)), desc=f'{desc}_1d_target_lengths_tensors', reference_fn=reference_fn) ) return module_inputs def module_inputs_torch_nn_GroupNorm(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(3, 6, 1e-3), forward_input=FunctionInput(make_input((4, 6, 5))), desc='1d_affine'), ModuleInput( constructor_input=FunctionInput(3, 12, 1e-3), forward_input=FunctionInput(make_input((4, 12))), desc='1d_affine_GN'), ModuleInput( constructor_input=FunctionInput(1, 6, 1e-3), forward_input=FunctionInput(make_input((150, 6))), desc='1d_affine_large_batch'), ModuleInput( constructor_input=FunctionInput(5, 5, 1e-3, False), forward_input=FunctionInput(make_input((4, 5, 5))), desc='1d_no_affine_IN'), ModuleInput( constructor_input=FunctionInput(1, 10, 1e-3, False), forward_input=FunctionInput(make_input((4, 10))), desc='1d_no_affine_LN'), ModuleInput( constructor_input=FunctionInput(3, 6, 1e-3), forward_input=FunctionInput(make_input((4, 6, 2, 3))), desc='2d_affine'), ModuleInput( constructor_input=FunctionInput(3, 3, 1e-3, False), forward_input=FunctionInput(make_input((4, 3, 2, 3))), desc='2d_no_affine_IN'), ModuleInput( constructor_input=FunctionInput(1, 3, 1e-3, False), forward_input=FunctionInput(make_input((4, 3, 2, 3))), desc='2d_no_affine_LN'), ] def module_inputs_torch_nn_Hardshrink(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(2.), forward_input=FunctionInput(make_input((4, 3, 2, 4))), ), ModuleInput( constructor_input=FunctionInput(2.), forward_input=FunctionInput(make_input(())), desc='scalar', ), ModuleInput( constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(4)), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim', ) ] def module_inputs_torch_nn_Hardswish(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(4)), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim', ), ModuleInput( constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((2, 3, 2, 5))), desc='4d_input') ] def module_inputs_torch_nn_Hardtanh(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((3, 2, 5))), reference_fn=lambda m, p, i: i.clamp(-1, 1), ), ModuleInput( constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(())), reference_fn=lambda m, p, i: i.clamp(-1, 1), desc='scalar', ), ModuleInput( constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(4)), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim', ) ] def module_inputs_torch_nn_HingeEmbeddingLoss(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) cases: list[tuple[str, dict]] = [ ('', {}), ('reduction_sum', {'reduction': 'sum'}), ('reduction_mean', {'reduction': 'mean'}), ('reduction_none', {'reduction': 'none'}), ('margin', {'margin': 0.5}) ] module_inputs = [] for desc, constructor_kwargs in cases: def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): return hingeembeddingloss_reference(i, t, **constructor_kwargs) module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input((10,)), make_target((10,)).gt(0).to(dtype).mul_(2).sub_(1)), desc=desc, reference_fn=reference_fn) ) module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input(()), make_target(()).gt(0).to(dtype).mul_(2).sub_(1)), desc=f'scalar_{desc}', reference_fn=reference_fn) ) return module_inputs def module_inputs_torch_nn_HuberLoss(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) cases: list[tuple[str, dict]] = [ ('', {}), ('reduction_sum', {'reduction': 'sum'}), ('reduction_mean', {'reduction': 'mean'}), ('reduction_none', {'reduction': 'none'}), ] module_inputs = [] for desc, constructor_kwargs in cases: def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): return huberloss_reference(i, t, **constructor_kwargs) module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input((5, 10)), make_input((5, 10))), desc=desc, reference_fn=reference_fn) ) return module_inputs def module_inputs_torch_nn_InstanceNormNd(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) lazy = kwargs.get('lazy', False) N = kwargs['N'] num_features, eps, momentum, affine, track_running_stats = 3, 1e-3, 0.3, False, True input_no_batch_shape_dict = {1: (3, 15), 2: (3, 6, 6), 3: (3, 4, 4, 4)} input_no_batch_shape = input_no_batch_shape_dict[N] input_batch_shape = (4,) + input_no_batch_shape return [ ModuleInput( constructor_input=( FunctionInput(eps, momentum) if lazy else FunctionInput(num_features, eps, momentum) ), forward_input=FunctionInput(make_input(input_batch_shape))), ModuleInput( constructor_input=( FunctionInput(eps, momentum, affine, track_running_stats) if lazy else FunctionInput(num_features, eps, momentum, affine, track_running_stats) ), forward_input=FunctionInput(make_input(input_batch_shape)), desc='tracking_stats'), ModuleInput( constructor_input=( FunctionInput(eps, momentum) if lazy else FunctionInput(num_features, eps, momentum) ), forward_input=FunctionInput(make_input(input_no_batch_shape)), reference_fn=no_batch_dim_reference_fn, desc='tracking_stats_no_batch_dim'), ModuleInput( constructor_input=( FunctionInput(eps, momentum, affine, track_running_stats) if lazy else FunctionInput(num_features, eps, momentum, affine, track_running_stats) ), forward_input=FunctionInput(make_input(input_no_batch_shape)), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim') ] def module_inputs_torch_nn_LayerNorm(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput([5], 1e-3), forward_input=FunctionInput(make_input((4, 5, 5))), desc='1d_elementwise_affine'), ModuleInput( constructor_input=FunctionInput([5], 1e-3), forward_input=FunctionInput(make_input((128, 5, 5))), desc='1d_elementwise_affine_large_batch'), ModuleInput( constructor_input=FunctionInput([5], 1e-3, False), forward_input=FunctionInput(make_input((4, 5, 5))), desc='1d_no_elementwise_affine'), ModuleInput( constructor_input=FunctionInput([2, 2, 5], 1e-3), forward_input=FunctionInput(make_input((4, 2, 2, 5))), desc='3d_elementwise_affine'), ModuleInput( constructor_input=FunctionInput([2, 2, 5], 1e-3, False), forward_input=FunctionInput(make_input((4, 2, 2, 5))), desc='3d_no_elementwise_affine'), ModuleInput( constructor_input=FunctionInput([5], 1e-3), forward_input=FunctionInput(make_input((0, 5))), desc='1d_empty_elementwise_affine'), ModuleInput( constructor_input=FunctionInput([2, 2, 5], 1e-3, elementwise_affine=True, bias=False), forward_input=FunctionInput(make_input((4, 2, 2, 5))), desc='3d_elementwise_affine_no_bias'), ] def module_inputs_torch_nn_RMSNorm(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def rms_norm_reference_fn(m, p, i): eps = m.eps if eps is None: eps = torch.finfo(i.dtype).eps ndim = i.ndim normalized_shape = m.normalized_shape weight = m.weight dims = [ndim - i - 1 for i in range(len(normalized_shape))] upcasted_i = i.float() result = upcasted_i * torch.rsqrt(upcasted_i.pow(2).mean(dim=dims, keepdim=True) + m.eps) if weight is not None: result *= weight return result.type_as(i) return [ ModuleInput( constructor_input=FunctionInput([5], 1e-3), forward_input=FunctionInput(make_input((4, 5, 5))), desc='1d_elementwise_affine', reference_fn=rms_norm_reference_fn), ModuleInput( constructor_input=FunctionInput([5], 1e-3), forward_input=FunctionInput(make_input((128, 5, 5))), desc='1d_elementwise_affine_large_batch', reference_fn=rms_norm_reference_fn), ModuleInput( constructor_input=FunctionInput([5], 1e-3, False), forward_input=FunctionInput(make_input((4, 5, 5))), desc='1d_no_elementwise_affine', reference_fn=rms_norm_reference_fn), ModuleInput( constructor_input=FunctionInput([2, 2, 5], 1e-3), forward_input=FunctionInput(make_input((4, 2, 2, 5))), desc='3d_elementwise_affine', reference_fn=rms_norm_reference_fn), ModuleInput( constructor_input=FunctionInput([2, 2, 5], 1e-3, False), forward_input=FunctionInput(make_input((4, 2, 2, 5))), desc='3d_no_elementwise_affine', reference_fn=rms_norm_reference_fn), ModuleInput( constructor_input=FunctionInput([5], 1e-3), forward_input=FunctionInput(make_input((0, 5))), desc='1d_empty_elementwise_affine', reference_fn=rms_norm_reference_fn), ] def module_inputs_torch_nn_LocalResponseNorm(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(3,), forward_input=FunctionInput(make_input((1, 5, 7))), desc='1d'), ModuleInput( constructor_input=FunctionInput(2,), forward_input=FunctionInput(make_input((1, 5, 7, 7))), desc='2d_uneven_pad'), ModuleInput( constructor_input=FunctionInput(1, 1., 0.5, 2.), forward_input=FunctionInput(make_input((1, 5, 7, 7, 7))), desc='3d_custom_params'), ] def module_inputs_torch_nn_LPPool1d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(1.5, 2), forward_input=FunctionInput(make_input((1, 3, 7))), desc='norm'), ModuleInput( constructor_input=FunctionInput(2, 2, 3), forward_input=FunctionInput(make_input((1, 3, 7)))), ModuleInput( constructor_input=FunctionInput(2, 2, 3), forward_input=FunctionInput(make_input((3, 7))), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim'), ] def module_inputs_torch_nn_LPPool2d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(2, 2, 2), forward_input=FunctionInput(make_input((1, 3, 7, 7)))), ModuleInput( constructor_input=FunctionInput(2, 2, 2), forward_input=FunctionInput(make_input((3, 7, 7))), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim'), ModuleInput( constructor_input=FunctionInput(1.5, 2), forward_input=FunctionInput(make_input((1, 3, 7, 7))), desc='norm'), ] def module_inputs_torch_nn_LPPool3d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(2, 2, 2), forward_input=FunctionInput(make_input((1, 3, 7, 7, 7)))), ModuleInput( constructor_input=FunctionInput(2, 2, 2), forward_input=FunctionInput(make_input((3, 7, 7, 7))), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim'), ModuleInput( constructor_input=FunctionInput(1.5, 2), forward_input=FunctionInput(make_input((1, 3, 7, 7, 7))), desc='norm'), ] def module_inputs_torch_nn_MaxPool1d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(4), forward_input=FunctionInput(make_input((2, 10, 4))), desc='3d_input'), ModuleInput( constructor_input=FunctionInput(4, 4), forward_input=FunctionInput(make_input((2, 10, 4))), desc='stride'), ModuleInput( constructor_input=FunctionInput(4, return_indices=True), forward_input=FunctionInput(make_input((2, 10, 4))), desc='return_indices'), ] def module_inputs_torch_nn_MaxPool2d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput((3, 3), (2, 2), (1, 1)), forward_input=FunctionInput(make_input((3, 7, 7))), desc='3d_input'), ModuleInput( constructor_input=FunctionInput((3, 3), (2, 2), (1, 1)), forward_input=FunctionInput(make_input((1, 3, 7, 7))), desc='4d_input'), ModuleInput( constructor_input=FunctionInput((3, 3), (2, 2), (1, 1), return_indices=True), forward_input=FunctionInput(make_input((1, 3, 7, 7))), desc='return_indices'), ] def module_inputs_torch_nn_MaxPool3d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput((2, 2, 2)), forward_input=FunctionInput(make_input((2, 3, 5, 5, 5)))), ModuleInput( constructor_input=FunctionInput(2, (2, 2, 2)), forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), desc='stride'), ModuleInput( constructor_input=FunctionInput(2, 2, (1, 1, 1)), forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), desc='stride_padding'), ModuleInput( constructor_input=FunctionInput(2, 2, (1, 1, 1), return_indices=True), forward_input=FunctionInput(make_input((2, 3, 5, 5, 5))), desc='return_indices'), ] def module_inputs_torch_nn_FractionalMaxPool2d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def make_random_samples(): return torch.empty((1, 3, 2), dtype=torch.double, device=device).uniform_() return [ ModuleInput( constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()), forward_input=FunctionInput(make_input((1, 3, 5, 7))), desc='ratio'), ModuleInput( constructor_input=FunctionInput((2, 3), output_size=(4, 3), _random_samples=make_random_samples()), forward_input=FunctionInput(make_input((1, 3, 7, 6))), desc='size'), ModuleInput( constructor_input=FunctionInput( 2, output_ratio=0.5, _random_samples=make_random_samples(), return_indices=True ), forward_input=FunctionInput(make_input((1, 3, 5, 7))), desc='ratio_return_indices'), ModuleInput( constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()), forward_input=FunctionInput(make_input((3, 5, 7))), reference_fn=no_batch_dim_reference_fn, desc='ratio_no_batch_dim'), ModuleInput( constructor_input=FunctionInput((2, 3), output_size=(4, 3), _random_samples=make_random_samples()), forward_input=FunctionInput(make_input((3, 7, 6))), reference_fn=no_batch_dim_reference_fn, desc='size_no_batch_dim'), ] def module_inputs_torch_nn_FractionalMaxPool3d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def make_random_samples(): return torch.empty((2, 4, 3), dtype=torch.double, device=device).uniform_() return [ ModuleInput( constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()), forward_input=FunctionInput(make_input((2, 4, 5, 5, 5))), desc='ratio'), ModuleInput( constructor_input=FunctionInput((2, 2, 2), output_size=(4, 4, 4), _random_samples=make_random_samples()), forward_input=FunctionInput(make_input((2, 4, 7, 7, 7))), desc='size'), ModuleInput( constructor_input=FunctionInput((4, 2, 3), output_size=(10, 3, 2), _random_samples=make_random_samples()), forward_input=FunctionInput(make_input((2, 4, 16, 7, 5))), desc='asymsize'), ModuleInput( constructor_input=FunctionInput( 2, output_ratio=0.5, _random_samples=make_random_samples(), return_indices=True ), forward_input=FunctionInput(make_input((2, 4, 5, 5, 5))), desc='ratio_return_indices'), ModuleInput( constructor_input=FunctionInput(2, output_ratio=0.5, _random_samples=make_random_samples()), forward_input=FunctionInput(make_input((4, 5, 5, 5))), reference_fn=no_batch_dim_reference_fn, desc='ratio_no_batch_dim'), ModuleInput( constructor_input=FunctionInput((2, 2, 2), output_size=(4, 4, 4), _random_samples=make_random_samples()), forward_input=FunctionInput(make_input((4, 7, 7, 7))), reference_fn=no_batch_dim_reference_fn, desc='size_no_batch_dim'), ] def module_inputs_torch_nn_Sigmoid(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(())), desc='scalar' ), ModuleInput( constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(4)), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim', ), ModuleInput( constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((2, 3, 4, 5))), desc='channels_last_mem_format' ), ModuleInput( constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((2, 3, 3, 4, 5))), desc='channels_last_3d_mem_format' ) ] def module_inputs_torch_nn_LogSigmoid(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(())), reference_fn=lambda m, p, i: i.sigmoid().log(), desc='scalar' ), ModuleInput( constructor_input=FunctionInput(), forward_input=FunctionInput(make_input((2, 3, 4))), reference_fn=lambda m, p, i: i.sigmoid().log(), ), ModuleInput( constructor_input=FunctionInput(), forward_input=FunctionInput(make_input(4)), reference_fn=no_batch_dim_reference_fn, desc='no_batch_dim', ), ] def module_inputs_torch_nn_MarginRankingLoss(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) cases: list[tuple[str, dict]] = [ ('', {}), ('reduction_sum', {'reduction': 'sum'}), ('reduction_mean', {'reduction': 'mean'}), ('reduction_none', {'reduction': 'none'}), ('margin', {'margin': 0.5}) ] module_inputs = [] for desc, constructor_kwargs in cases: def reference_fn(m, p, i1, i2, t, constructor_kwargs=constructor_kwargs): return marginrankingloss_reference(i1, i2, t, **constructor_kwargs) module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input((50,)), make_input((50,)), make_target((50,)).sign()), desc=desc, reference_fn=reference_fn) ) return module_inputs def module_inputs_torch_nn_MultiLabelMarginLoss(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) cases: list[tuple[str, dict]] = [ ('', {}), ('reduction_sum', {'reduction': 'sum'}), ('reduction_mean', {'reduction': 'mean'}), ('reduction_none', {'reduction': 'none'}), ] module_inputs = [] for desc, constructor_kwargs in cases: def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): return multilabelmarginloss_reference(i, t, **constructor_kwargs) module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input((10,)), make_target((10), low=0, high=10)), desc=f'1d_{desc}', reference_fn=reference_fn) ) module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input((5, 10)), make_target((5, 10), low=0, high=10)), desc=desc, reference_fn=reference_fn) ) return module_inputs def module_inputs_torch_nn_MultiMarginLoss(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) cases: list[tuple[str, dict]] = [ ('', {}), ('reduction_sum', {'reduction': 'sum'}), ('reduction_mean', {'reduction': 'mean'}), ('reduction_none', {'reduction': 'none'}), ('p', {'p': 2}), ('margin', {'margin': 0.5}), ('weights', {'weight': make_weight(10)}) ] module_inputs = [] for desc, constructor_kwargs in cases: def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): return multimarginloss_reference(i, t, **constructor_kwargs) module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input((5, 10)), make_target((5), low=0, high=10)), desc=desc, reference_fn=reference_fn) ) return module_inputs def module_inputs_torch_nn_MultiLabelSoftMarginLoss(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(make_tensor, device=device, dtype=torch.long, requires_grad=False) make_weight = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) cases: list[tuple[str, dict]] = [ ('', {}), ('reduction_sum', {'reduction': 'sum'}), ('reduction_mean', {'reduction': 'mean'}), ('reduction_none', {'reduction': 'none'}), ('weight', {'weight': make_weight(10)}), ] def multilabelsoftmargin_loss_reference_fn(m, p, i, t, reduction='mean', weight=None): result = t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log() if weight is not None: result *= weight result = (-result).sum(i.dim() - 1) / i.size(-1) if reduction == 'none': return result elif reduction == 'mean': return result.mean() else: return result.sum() module_inputs = [] for desc, constructor_kwargs in cases: module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input((5, 10)), make_target((5, 10), low=0, high=2)), desc=desc, reference_fn=partial(multilabelsoftmargin_loss_reference_fn, **constructor_kwargs)) ) return module_inputs def module_inputs_torch_nn_SoftMarginLoss(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) make_target = partial(make_tensor, device=device, dtype=dtype, requires_grad=False) cases: list[tuple[str, dict]] = [ ('', {}), ('reduction_sum', {'reduction': 'sum'}), ('reduction_mean', {'reduction': 'mean'}), ('reduction_none', {'reduction': 'none'}), ] module_inputs = [] for desc, constructor_kwargs in cases: def reference_fn(m, p, i, t, constructor_kwargs=constructor_kwargs): return softmarginloss_reference(i, t, **constructor_kwargs) module_inputs.append( ModuleInput(constructor_input=FunctionInput(**constructor_kwargs), forward_input=FunctionInput(make_input((5, 5)), make_target((5, 5)).sign()), desc=desc, reference_fn=reference_fn) ) return module_inputs def module_inputs_torch_nn_TransformerEncoder(module_info, device, dtype, requires_grad, training, **kwargs): # Reuse the TransformerEncoderLayer samples since the forward args are nearly the same. samples = [] for layer_module_input in module_inputs_torch_nn_TransformerEncoderLayer( None, device, dtype, requires_grad, training): # Construct a TransformerEncoderLayer object to pass to TransformerEncoder. l_args, l_kwargs = (layer_module_input.constructor_input.args, layer_module_input.constructor_input.kwargs) l_kwargs['device'] = device l_kwargs['dtype'] = dtype encoder_layer = torch.nn.TransformerEncoderLayer(*l_args, **l_kwargs) num_layers = 2 # Note: TransformerEncoderLayer takes a "src_mask" while # TransformerEncoder takes a "mask"; rename kwarg appropriately. forward_input = layer_module_input.forward_input if 'src_mask' in forward_input.kwargs: forward_input.kwargs['mask'] = forward_input.kwargs['src_mask'] del forward_input.kwargs['src_mask'] samples.append(ModuleInput( constructor_input=FunctionInput(encoder_layer, num_layers), forward_input=forward_input, desc=layer_module_input.desc )) return samples def module_inputs_torch_nn_TransformerEncoderLayer(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) samples = [ ModuleInput( constructor_input=FunctionInput(4, 2, 16, 0.0), forward_input=FunctionInput( make_input((2, 3, 4)) ), desc='relu_activation' ), ModuleInput( constructor_input=FunctionInput(4, 2, 8, 0.0, F.gelu), forward_input=FunctionInput( make_input((2, 3, 4)) ), desc='gelu_activation' ), ModuleInput( constructor_input=FunctionInput(4, 2, 8, 0.0, bias=False), forward_input=FunctionInput( make_input((2, 3, 4)) ), desc='no_bias' ), ] # Samples below are for validating the no-batch-dim support. key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool)) attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3))) for src_mask, src_key_padding_mask, norm_first, batch_first, bias in \ itertools.product(attn_masks, key_padding_masks, (True, False), (True, False), (True, False)): samples.append( ModuleInput( constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, dropout=0.0, batch_first=batch_first, norm_first=norm_first, bias=bias), forward_input=FunctionInput( make_input((3, 4)), src_mask=src_mask, src_key_padding_mask=src_key_padding_mask ), reference_fn=partial(no_batch_dim_reference_fn, batch_first=batch_first, kwargs_to_batchify={'src_key_padding_mask': 0}), desc=f'no_batch_dim_batch_first_{batch_first}' )) # Samples below where we pass reference_fn are for validating the fast path, # since the fast path requires no_grad mode, we run the fast path in .eval() # and no_grad() in the reference_fn and verify that against the results in train mode. def fast_path_reference_fn(module, parameters, *args, **kwargs): assert module.training module.train(False) with torch.no_grad(): output = module(*args, **kwargs) module.train(True) return output if training: for norm_first, bias in itertools.product((True, False), (True, False)): samples.append( ModuleInput( constructor_input=FunctionInput( 4, 2, 8, dropout=0.0, batch_first=True, norm_first=norm_first, bias=bias ), forward_input=FunctionInput( make_input((2, 3, 4)), ), # fastpath doesn't run when bias=False reference_fn=fast_path_reference_fn if bias else None, desc=f'fastpath_{bias}_norm_first_{norm_first}' ) ) return samples def module_inputs_torch_nn_TransformerDecoderLayer(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) samples = [ ModuleInput( constructor_input=FunctionInput(4, 2, 16, 0.0), forward_input=FunctionInput( make_input((2, 3, 4)), make_input((2, 3, 4)) ), desc='relu_activation' ), ModuleInput( constructor_input=FunctionInput(4, 2, 8, 0.0, F.gelu), forward_input=FunctionInput( make_input((2, 3, 4)), make_input((2, 3, 4)) ), desc='gelu_activation' ), ModuleInput( constructor_input=FunctionInput(4, 2, 8, 0.0, bias=False), forward_input=FunctionInput( make_input((2, 3, 4)), make_input((2, 3, 4)) ), desc='no_bias' ), ] key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool)) attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3))) for tgt_mask, tgt_key_padding_mask, norm_first, bias, batch_first in \ itertools.product(attn_masks, key_padding_masks, (True, False), (True, False), (True, False)): # Using same mask for tgt and memory memory_mask = tgt_mask memory_key_padding_mask = tgt_key_padding_mask samples.append( ModuleInput( constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, dropout=0.0, batch_first=batch_first, norm_first=norm_first, bias=bias), forward_input=FunctionInput( make_input((3, 4)), make_input((3, 4)), tgt_mask=tgt_mask, memory_mask=memory_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask ), reference_fn=partial(no_batch_dim_reference_fn, batch_first=batch_first, kwargs_to_batchify={'tgt_key_padding_mask': 0, 'memory_key_padding_mask': 0}), desc=f'no_batch_dim_batch_first_{batch_first}' )) src, tgt = make_input((2, 3, 4)), make_input((2, 3, 4)) if not batch_first: src, tgt = src.transpose(0, 1), tgt.transpose(0, 1) if tgt_key_padding_mask is not None: memory_key_padding_mask, tgt_key_padding_mask = (tgt_key_padding_mask.expand(2, 3),) * 2 samples.append( ModuleInput( constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, dropout=0.0, batch_first=batch_first, norm_first=norm_first, bias=bias), forward_input=FunctionInput( src, tgt, tgt_mask=tgt_mask, memory_mask=memory_mask, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask ), desc=f'norm_first_{norm_first}_batch_first_{batch_first}_bias_{bias}' )) return samples def module_inputs_torch_nn_Transformer(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) samples = [] # Samples below are for validating the no-batch-dim support. key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool)) attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3))) for mask, key_padding_mask, norm_first, bias, batch_first in \ itertools.product(attn_masks, key_padding_masks, (True, False), (True, False), (True, False)): # Using same mask for tgt and memory src_mask , tgt_mask = (mask,) * 2 src_key_padding_mask, tgt_key_padding_mask = (key_padding_mask,) * 2 samples.append( ModuleInput( constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, num_encoder_layers=1, num_decoder_layers=1, dropout=0.0, batch_first=batch_first, norm_first=norm_first, bias=bias), forward_input=FunctionInput( make_input((3, 4)), make_input((3, 4)), tgt_mask=tgt_mask, src_mask=src_mask, tgt_key_padding_mask=tgt_key_padding_mask, src_key_padding_mask=src_key_padding_mask ), reference_fn=partial(no_batch_dim_reference_fn, batch_first=batch_first, kwargs_to_batchify={'tgt_key_padding_mask': 0, 'src_key_padding_mask': 0}), desc=f'no_batch_dim_batch_first_{batch_first}' )) src, tgt = make_input((2, 3, 4)), make_input((2, 3, 4)) if not batch_first: src = src.transpose(0, 1) tgt = tgt.transpose(0, 1) if key_padding_mask is not None: src_key_padding_mask, tgt_key_padding_mask = (key_padding_mask.expand(2, 3),) * 2 samples.append( ModuleInput( constructor_input=FunctionInput(d_model=4, nhead=2, dim_feedforward=8, num_encoder_layers=1, num_decoder_layers=1, dropout=0.0, batch_first=batch_first, norm_first=norm_first, bias=bias), forward_input=FunctionInput( src, tgt, tgt_mask=tgt_mask, src_mask=src_mask, tgt_key_padding_mask=tgt_key_padding_mask, src_key_padding_mask=src_key_padding_mask ), )) return samples def module_inputs_torch_nn_Embedding(module_info, device, dtype, requires_grad, training, **kwargs): make_empty = partial(torch.empty, device=device, dtype=torch.long, requires_grad=False) return [ ModuleInput( constructor_input=FunctionInput(num_embeddings=4, embedding_dim=3), forward_input=FunctionInput(make_empty(2, 3).random_(4)) ), ModuleInput( constructor_input=FunctionInput(num_embeddings=4, embedding_dim=3), forward_input=FunctionInput(make_empty(1, 512).random_(4).expand(7, 512)), desc='discontiguous' ), ] def module_inputs_torch_nn_MultiheadAttention(module_info, device, dtype, requires_grad, training, **kwargs): # Currently all samples below are for validating the no-batch-dim support. make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) samples = [] bool_vals = (True, False) key_padding_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool)) attn_masks = (None, torch.tensor([False, False, True], device=device, dtype=torch.bool).expand((3, 3, 3))) products = itertools.product(bool_vals, bool_vals, bool_vals, key_padding_masks, attn_masks) for bias, add_bias_kv, add_zero_attn, key_padding_mask, attn_mask in products: samples.append( ModuleInput( constructor_input=FunctionInput(embed_dim=3, num_heads=3, batch_first=True, bias=bias, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn), forward_input=FunctionInput(make_input((3, 3)), make_input((3, 3)), make_input((3, 3)), key_padding_mask=key_padding_mask, attn_mask=attn_mask), reference_fn=no_batch_dim_reference_mha, ) ) samples.append( ModuleInput( constructor_input=FunctionInput(embed_dim=3, num_heads=3, batch_first=False, bias=bias, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn), forward_input=FunctionInput(make_input((3, 3)), make_input((3, 3)), make_input((3, 3)), key_padding_mask=key_padding_mask, attn_mask=attn_mask), reference_fn=partial(no_batch_dim_reference_mha, batch_first=False), ) ) return samples def module_inputs_torch_nn_RNN_GRU_Cell(module_info, device, dtype, requires_grad, training, **kwargs): # Currently all samples below are for validating the no-batch-dim support. make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) samples = [ ModuleInput( constructor_input=FunctionInput(5, 10), forward_input=FunctionInput(make_input(5), make_input(10)), reference_fn=no_batch_dim_reference_fn, ), ModuleInput( constructor_input=FunctionInput(5, 10, bias=True), forward_input=FunctionInput(make_input(5), make_input(10)), reference_fn=no_batch_dim_reference_fn, ) ] is_rnn = kwargs.get('is_rnn', False) if is_rnn: # RNN also supports `nonlinearity` argument. # `tanh` is the default, so we check with `relu` samples.append( ModuleInput( constructor_input=FunctionInput(5, 10, bias=True, nonlinearity='relu'), forward_input=FunctionInput(make_input(5), make_input(10)), reference_fn=no_batch_dim_reference_fn, ) ) return samples def module_inputs_torch_nn_LSTMCell(module_info, device, dtype, requires_grad, training, **kwargs): # Currently all samples below are for validating the no-batch-dim support. make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) samples = ( ModuleInput( constructor_input=FunctionInput(5, 10), forward_input=FunctionInput(make_input(5), (make_input(10), make_input(10))), reference_fn=no_batch_dim_reference_lstmcell, ), ModuleInput( constructor_input=FunctionInput(5, 10, bias=True), forward_input=FunctionInput(make_input(5), (make_input(10), make_input(10))), reference_fn=no_batch_dim_reference_lstmcell, ), ) return samples def make_packed_sequence(inp, batch_sizes): required_grad = inp.requires_grad inp.requires_grad_(False) # user won't have access to inp so won't be able to get its grads seq = pack_padded_sequence(inp, batch_sizes) seq.data.requires_grad_(required_grad) return seq def module_inputs_torch_nn_RNN_GRU(module_info, device, dtype, requires_grad, training, with_packed_sequence=False, **kwargs): # Currently all samples below are for validating the no-batch-dim support. make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) is_rnn = kwargs['is_rnn'] nonlinearity = ('relu', 'tanh') bias = (False, True) batch_first = (False, True) bidirectional = (False, True) samples = [] if is_rnn: prod_gen = product(nonlinearity, bias, batch_first, bidirectional) else: prod_gen = product(bias, batch_first, bidirectional) for args in prod_gen: if is_rnn: nl, b, b_f, bidir = args else: b, b_f, bidir = args cons_args = {'input_size': 2, 'hidden_size': 2, 'num_layers': 2, 'batch_first': b_f, 'bias': b, 'bidirectional': bidir} cons_args_hidden = {'input_size': 2, 'hidden_size': 3, 'num_layers': 2, 'batch_first': b_f, 'bias': b, 'bidirectional': bidir} if is_rnn: cons_args['nonlinearity'] = nl cons_args_hidden['nonlinearity'] = nl samples.append( ModuleInput( constructor_input=FunctionInput(**cons_args), forward_input=FunctionInput(make_input((3, 2))), reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f), ) ) samples.append( ModuleInput( constructor_input=FunctionInput(**cons_args_hidden), forward_input=FunctionInput(make_input((3, 2)), make_input((4 if bidir else 2, 3))), reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f), ) ) if with_packed_sequence: samples.append( ModuleInput( constructor_input=FunctionInput(**cons_args), forward_input=FunctionInput(make_packed_sequence(make_input((5, 2, 2)), torch.tensor([5, 3]))), reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f), ) ) samples.append( ModuleInput( constructor_input=FunctionInput(**cons_args), forward_input=FunctionInput(make_packed_sequence(make_input((5, 5, 2)), torch.tensor([5, 3, 3, 2, 2]))), reference_fn=partial(no_batch_dim_reference_rnn_gru, batch_first=b_f), ) ) return samples def module_inputs_torch_nn_LSTM(module_info, device, dtype, requires_grad, training, **kwargs): # Currently all samples below are for validating the no-batch-dim support. make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) bias = (False, True) batch_first = (False, True) bidirectional = (False, True) proj_sizes = (0, 2) samples = [] prod_gen = product(bias, batch_first, bidirectional, proj_sizes) for args in prod_gen: b, b_f, bidir, proj_size = args hidden_size = 3 cons_args = {'input_size': 2, 'hidden_size': hidden_size, 'num_layers': 2, 'proj_size': proj_size, 'batch_first': b_f, 'bias': b, 'bidirectional': bidir} cons_args_hidden = {'input_size': 2, 'hidden_size': hidden_size, 'num_layers': 2, 'proj_size': proj_size, 'batch_first': b_f, 'bias': b, 'bidirectional': bidir} samples.append( ModuleInput( constructor_input=FunctionInput(**cons_args), forward_input=FunctionInput(make_input((2, 2))), reference_fn=partial(no_batch_dim_reference_lstm, batch_first=b_f), ) ) h_out = proj_size if proj_size > 0 else hidden_size hx = (make_input((4 if bidir else 2, h_out)), make_input((4 if bidir else 2, hidden_size))) samples.append( ModuleInput( constructor_input=FunctionInput(**cons_args_hidden), forward_input=FunctionInput(make_input((3, 2)), hx), reference_fn=partial(no_batch_dim_reference_lstm, batch_first=b_f), ) ) return samples def module_inputs_torch_nn_ReflectionPad1d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(1), forward_input=FunctionInput(make_input((2, 3))), reference_fn=no_batch_dim_reference_fn, ), ModuleInput( constructor_input=FunctionInput((1, 2)), forward_input=FunctionInput(make_input((2, 3, 4))), ), ] def module_inputs_torch_nn_ReflectionPad2d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(1), forward_input=FunctionInput(make_input((3, 4, 5))), reference_fn=no_batch_dim_reference_fn, ), ModuleInput( constructor_input=FunctionInput((1, 2, 3, 4)), forward_input=FunctionInput(make_input((3, 4, 5, 6))), ), ] def module_inputs_torch_nn_ReflectionPad3d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(1), forward_input=FunctionInput(make_input((2, 3, 4, 5))), reference_fn=no_batch_dim_reference_fn ), ModuleInput( constructor_input=FunctionInput((1, 2, 1, 2, 1, 2)), forward_input=FunctionInput(make_input((3, 3, 3, 3, 3))), ), ] def module_inputs_torch_nn_ReplicationPad1d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(1), forward_input=FunctionInput(make_input((3, 4))), reference_fn=no_batch_dim_reference_fn ), ModuleInput( constructor_input=FunctionInput((1, 2)), forward_input=FunctionInput(make_input((3, 4, 5))), ), ] def module_inputs_torch_nn_ReplicationPad2d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(1), forward_input=FunctionInput(make_input((3, 4, 5))), reference_fn=no_batch_dim_reference_fn, ), ModuleInput( constructor_input=FunctionInput((1, 2, 3, 4)), forward_input=FunctionInput(make_input((3, 4, 5, 6))), ), ] def module_inputs_torch_nn_ReplicationPad3d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(1), forward_input=FunctionInput(make_input((3, 4, 5, 6))), reference_fn=no_batch_dim_reference_fn, ), ModuleInput( constructor_input=FunctionInput((1, 2, 3, 4, 5, 6)), forward_input=FunctionInput(make_input((3, 4, 5, 6, 7))), ), ] def module_inputs_torch_nn_ZeroPad1d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(1), forward_input=FunctionInput(make_input((3, 4))), reference_fn=no_batch_dim_reference_fn, ), ModuleInput( constructor_input=FunctionInput((1, 2)), forward_input=FunctionInput(make_input((3, 4, 5))), ), ] def module_inputs_torch_nn_ZeroPad2d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(1), forward_input=FunctionInput(make_input((1, 2, 3))), reference_fn=no_batch_dim_reference_fn ), ModuleInput( constructor_input=FunctionInput((1, 2, 3, 4)), forward_input=FunctionInput(make_input((1, 2, 3, 4))), ), ] def module_inputs_torch_nn_ZeroPad3d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(1), forward_input=FunctionInput(make_input((3, 4, 5, 6))), reference_fn=no_batch_dim_reference_fn, ), ModuleInput( constructor_input=FunctionInput((1, 2, 3, 4, 5, 6)), forward_input=FunctionInput(make_input((1, 2, 3, 4, 5))), ), ] def module_inputs_torch_nn_ConstantPad1d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(1, 2), forward_input=FunctionInput(make_input((3, 4))), reference_fn=no_batch_dim_reference_fn, ), ModuleInput( constructor_input=FunctionInput((1, 2), 3), forward_input=FunctionInput(make_input((3, 4, 5))), ), ] def module_inputs_torch_nn_ConstantPad2d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(1, 3), forward_input=FunctionInput(make_input((3, 4, 5))), reference_fn=no_batch_dim_reference_fn ), ModuleInput( constructor_input=FunctionInput((1, 2, 3, 4), 5), forward_input=FunctionInput(make_input((1, 2, 3, 4))), ), ] def module_inputs_torch_nn_ConstantPad3d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) return [ ModuleInput( constructor_input=FunctionInput(1, 3), forward_input=FunctionInput(make_input((3, 4, 5, 6))), reference_fn=no_batch_dim_reference_fn, ), ModuleInput( constructor_input=FunctionInput((1, 2, 3, 4, 5, 6), 7), forward_input=FunctionInput(make_input((1, 2, 1, 2, 1))), ), ] def module_inputs_torch_nn_CircularPad1d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def padding1d_circular_ref(inp, pad): r""" input: [[[0., 1., 2.], [3., 4., 5.]]] pad: (1, 2) output: [[[2., 0., 1., 2., 0., 1.], [5., 3., 4., 5., 3., 4.]]] """ return torch.cat([inp[:, :, -pad[0]:], inp, inp[:, :, :pad[1]]], dim=2) return [ ModuleInput( constructor_input=FunctionInput(1), forward_input=FunctionInput(make_input((3, 4))), reference_fn=no_batch_dim_reference_fn ), ModuleInput( constructor_input=FunctionInput((1, 2)), forward_input=FunctionInput(make_input((1, 2, 3))), reference_fn=lambda m, p, i: padding1d_circular_ref(i, m.padding), ), ModuleInput( constructor_input=FunctionInput((3, 1)), forward_input=FunctionInput(make_input((1, 2, 3))), reference_fn=lambda m, p, i: padding1d_circular_ref(i, m.padding), ), ModuleInput( constructor_input=FunctionInput((3, 3)), forward_input=FunctionInput(make_input((1, 2, 3))), reference_fn=lambda m, p, i: padding1d_circular_ref(i, m.padding), ), ] def module_inputs_torch_nn_CircularPad2d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def padding2d_circular_ref(inp, pad): r"""input: [[[[0., 1., 2], [3., 4., 5.]]]] pad: (1, 2, 2, 1) output: [[[[2., 0., 1., 2., 0., 1.], [5., 3., 4., 5., 3., 4.], [2., 0., 1., 2., 0., 1.], [5., 3., 4., 5., 3., 4.], [2., 0., 1., 2., 0., 1.]]]] """ inp = torch.cat([inp[:, :, -pad[2]:], inp, inp[:, :, :pad[3]]], dim=2) return torch.cat([inp[:, :, :, -pad[0]:], inp, inp[:, :, :, :pad[1]]], dim=3) return [ ModuleInput( constructor_input=FunctionInput(1), forward_input=FunctionInput(make_input((3, 4, 5))), reference_fn=no_batch_dim_reference_fn, ), ModuleInput( constructor_input=FunctionInput((1, 2, 2, 1)), forward_input=FunctionInput(make_input((1, 1, 2, 3))), reference_fn=lambda m, p, i: padding2d_circular_ref(i, m.padding), ), ModuleInput( constructor_input=FunctionInput((2, 3, 2, 2)), forward_input=FunctionInput(make_input((1, 1, 2, 3))), reference_fn=lambda m, p, i: padding2d_circular_ref(i, m.padding), ), ModuleInput( constructor_input=FunctionInput((3, 3, 3, 1)), forward_input=FunctionInput(make_input((1, 1, 3, 3))), reference_fn=lambda m, p, i: padding2d_circular_ref(i, m.padding), ), ] def module_inputs_torch_nn_CircularPad3d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) def padding3d_circular_ref(inp, pad): r"""input: [[[[[ 0., 1., 2.], [ 3., 4., 5.]], [[ 6., 7., 8.], [ 9., 10., 11.]]]]] pad: (1, 2, 2, 1, 1, 2) output: [[[[[ 8., 6., 7., 8., 6., 7.], [11., 9., 10., 11., 9., 10.], [ 8., 6., 7., 8., 6., 7.], [11., 9., 10., 11., 9., 10.], [ 8., 6., 7., 8., 6., 7.]], [[ 2., 0., 1., 2., 0., 1.], [ 5., 3., 4., 5., 3., 4.], [ 2., 0., 1., 2., 0., 1.], [ 5., 3., 4., 5., 3., 4.], [ 2., 0., 1., 2., 0., 1.]], [[ 8., 6., 7., 8., 6., 7.], [11., 9., 10., 11., 9., 10.], [ 8., 6., 7., 8., 6., 7.], [11., 9., 10., 11., 9., 10.], [ 8., 6., 7., 8., 6., 7.]], [[ 2., 0., 1., 2., 0., 1.], [ 5., 3., 4., 5., 3., 4.], [ 2., 0., 1., 2., 0., 1.], [ 5., 3., 4., 5., 3., 4.], [ 2., 0., 1., 2., 0., 1.]], [[ 8., 6., 7., 8., 6., 7.], [11., 9., 10., 11., 9., 10.], [ 8., 6., 7., 8., 6., 7.], [11., 9., 10., 11., 9., 10.], [ 8., 6., 7., 8., 6., 7.]]]]] """ inp = torch.cat([inp[:, :, -pad[4]:], inp, inp[:, :, :pad[5]]], dim=2) inp = torch.cat([inp[:, :, :, -pad[2]:], inp, inp[:, :, :, :pad[3]]], dim=3) return torch.cat([inp[:, :, :, :, -pad[0]:], inp, inp[:, :, :, :, :pad[1]]], dim=4) return [ ModuleInput( constructor_input=FunctionInput(1), forward_input=FunctionInput(make_input((3, 4, 5, 6))), reference_fn=no_batch_dim_reference_fn, ), ModuleInput( constructor_input=FunctionInput((1, 2, 1, 2, 1, 2)), forward_input=FunctionInput(make_input((1, 1, 2, 2, 3))), reference_fn=lambda m, p, i: padding3d_circular_ref(i, m.padding) ), ModuleInput( constructor_input=FunctionInput((3, 2, 2, 1, 1, 2)), forward_input=FunctionInput(make_input((1, 1, 2, 2, 3))), reference_fn=lambda m, p, i: padding3d_circular_ref(i, m.padding) ), ModuleInput( constructor_input=FunctionInput((3, 3, 2, 1, 2, 2)), forward_input=FunctionInput(make_input((1, 1, 2, 2, 3))), reference_fn=lambda m, p, i: padding3d_circular_ref(i, m.padding) ), ] # All these operators share similar issues on cuDNN and MIOpen rnn_gru_lstm_module_info_decorators = ( # RuntimeError: Batching rule not implemented for aten::_cudnn_rnn_backward. # We could not generate a fallback DecorateInfo( unittest.expectedFailure, "TestModule", "test_grad", active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda' ), # NotImplementedError: the derivative for '_cudnn_rnn_backward' is not implemented. # Double backwards is not supported for CuDNN RNNs due to limitations in the CuDNN API DecorateInfo( unittest.expectedFailure, "TestModule", "test_gradgrad", active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda' ), # CUDNN GRU doesn't accept non-contiguous hx DecorateInfo( unittest.expectedFailure, "TestModule", "test_non_contiguous_tensors", active_if=(TEST_CUDNN and not TEST_WITH_ROCM), device_type='cuda' ), # MIOPEN GRU doesn't accept non-contiguous hx (this is dispatched to miopen only for float). DecorateInfo( unittest.expectedFailure, "TestModule", "test_non_contiguous_tensors", active_if=(TEST_CUDNN and TEST_WITH_ROCM), dtypes=(torch.float,), device_type='cuda' ), DecorateInfo( skipCUDAVersionIn([(11, 7)]), "TestExpandedWeightModule", "test_module", device_type='cuda' ), DecorateInfo( skipCUDAVersionIn([(11, 7)]), "TestDecomp", "test_rnn_decomp_module", device_type='cuda' ) ) # Start of module error inputs functions. def module_error_inputs_torch_nn_RNN_GRU_Cell(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) samples = [ ErrorModuleInput( ModuleInput( constructor_input=FunctionInput(10, 20), forward_input=FunctionInput(make_input(3, 11), make_input(3, 20)), ), error_on=ModuleErrorEnum.FORWARD_ERROR, error_type=RuntimeError, error_regex="input has inconsistent input_size: got 11 expected 10" ), ErrorModuleInput( ModuleInput( constructor_input=FunctionInput(10, 20), forward_input=FunctionInput(make_input(3, 10), make_input(3, 21)), ), error_on=ModuleErrorEnum.FORWARD_ERROR, error_type=RuntimeError, error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20" ), ErrorModuleInput( ModuleInput( constructor_input=FunctionInput(10, 20), forward_input=FunctionInput(make_input(3, 10), make_input(5, 20)), ), error_on=ModuleErrorEnum.FORWARD_ERROR, error_type=RuntimeError, error_regex="Input batch size 3 doesn't match hidden0 batch size 5" ), ErrorModuleInput( ModuleInput( constructor_input=FunctionInput(10, 20), forward_input=FunctionInput(make_input(3, 10), make_input(3, 1, 1, 20)), ), error_on=ModuleErrorEnum.FORWARD_ERROR, error_type=ValueError, error_regex="Expected hidden to be 1D or 2D, got 4D instead" ), ErrorModuleInput( ModuleInput( constructor_input=FunctionInput(10, 20, 'relu'), forward_input=FunctionInput(make_input(3, 10), make_input(3, 21)), ), error_on=ModuleErrorEnum.FORWARD_ERROR, error_type=RuntimeError, error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20" ), ErrorModuleInput( ModuleInput( constructor_input=FunctionInput(10, 20, 'tanh'), forward_input=FunctionInput(make_input(3, 10), make_input(3, 21)), ), error_on=ModuleErrorEnum.FORWARD_ERROR, error_type=RuntimeError, error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20" ), ] return samples def module_error_inputs_torch_nn_LSTMCell(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) samples = [ ErrorModuleInput( ModuleInput( constructor_input=FunctionInput(10, 20), forward_input=FunctionInput(make_input(3, 11), (make_input(3, 20), make_input(3, 20))), ), error_on=ModuleErrorEnum.FORWARD_ERROR, error_type=RuntimeError, error_regex="input has inconsistent input_size: got 11 expected 10" ), ErrorModuleInput( ModuleInput( constructor_input=FunctionInput(10, 20), forward_input=FunctionInput(make_input(3, 10), (make_input(3, 21), make_input(3, 21))), ), error_on=ModuleErrorEnum.FORWARD_ERROR, error_type=RuntimeError, error_regex="hidden0 has inconsistent hidden_size: got 21, expected 20" ), ErrorModuleInput( ModuleInput( constructor_input=FunctionInput(10, 20), forward_input=FunctionInput(make_input(3, 10), (make_input(5, 20), make_input(5, 20))), ), error_on=ModuleErrorEnum.FORWARD_ERROR, error_type=RuntimeError, error_regex="Input batch size 3 doesn't match hidden0 batch size 5" ), ErrorModuleInput( ModuleInput( constructor_input=FunctionInput(10, 20), forward_input=FunctionInput(make_input(3, 10), (make_input(3, 1, 1, 20), make_input(3, 1, 1, 20))), ), error_on=ModuleErrorEnum.FORWARD_ERROR, error_type=ValueError, error_regex="Expected hx\\[0\\] to be 1D or 2D, got 4D instead" ), ] return samples def module_error_inputs_torch_nn_RNN_GRU(module_info, device, dtype, requires_grad, training, **kwargs): samples = [ ErrorModuleInput( ModuleInput(constructor_input=FunctionInput(10, 0, 1)), error_on=ModuleErrorEnum.CONSTRUCTION_ERROR, error_type=ValueError, error_regex="hidden_size must be greater than zero" ), ErrorModuleInput( ModuleInput(constructor_input=FunctionInput(10, 10, 0)), error_on=ModuleErrorEnum.CONSTRUCTION_ERROR, error_type=ValueError, error_regex="num_layers must be greater than zero" ), ] return samples def module_error_inputs_torch_nn_Pad1d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) is_constant = kwargs.get('is_constant', False) return [ ErrorModuleInput( ModuleInput( constructor_input=FunctionInput(1, 3) if is_constant else FunctionInput(3), forward_input=FunctionInput(make_input((2, 3, 4, 5))), ), error_on=ModuleErrorEnum.FORWARD_ERROR, error_type=ValueError, error_regex=r"expected 2D or 3D input \(got 4D input\)", ), ] def module_error_inputs_torch_nn_Pad2d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) is_constant = kwargs.get('is_constant', False) return [ ErrorModuleInput( ModuleInput( constructor_input=FunctionInput(1, 3) if is_constant else FunctionInput(3), forward_input=FunctionInput(make_input((2, 3))), ), error_on=ModuleErrorEnum.FORWARD_ERROR, error_type=ValueError, error_regex=r"expected 3D or 4D input \(got 2D input\)", ), ] def module_error_inputs_torch_nn_Pad3d(module_info, device, dtype, requires_grad, training, **kwargs): make_input = partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) is_constant = kwargs.get('is_constant', False) return [ ErrorModuleInput( ModuleInput( constructor_input=FunctionInput(1, 3) if is_constant else FunctionInput(3), forward_input=FunctionInput(make_input((2, 3))), ), error_on=ModuleErrorEnum.FORWARD_ERROR, error_type=ValueError, error_regex=r"expected 4D or 5D input \(got 2D input\)", ), ] _macos15_or_newer = torch.backends.mps.is_available() and torch.backends.mps.is_macos_or_newer(15, 0) # Database of ModuleInfo entries in alphabetical order. module_db: list[ModuleInfo] = [ ModuleInfo(torch.nn.AdaptiveAvgPool1d, module_inputs_func=module_inputs_torch_nn_AdaptiveAvgPool1d, skips=( # Fails on MPS backend if input/output sizes are not divisible DecorateInfo(skipMPS),) ), ModuleInfo(torch.nn.AdaptiveAvgPool2d, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, module_inputs_func=module_inputs_torch_nn_AdaptiveAvgPool2d, skips=( # Fails on MPS backend if input/output sizes are not divisible DecorateInfo(skipMPS), # Fails on backward check if output size is 1x1 DecorateInfo( unittest.expectedFailure, 'TestModule', 'test_memory_format', active_if=operator.itemgetter('training'), ),) ), ModuleInfo(torch.nn.AdaptiveAvgPool3d, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, module_inputs_func=module_inputs_torch_nn_AdaptiveAvgPool3d, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), # not supported on MPS backend DecorateInfo(skipMPS),) ), ModuleInfo(torch.nn.AdaptiveMaxPool1d, module_inputs_func=module_inputs_torch_nn_AdaptiveMaxPool1d, ), ModuleInfo(torch.nn.AdaptiveMaxPool2d, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, module_inputs_func=module_inputs_torch_nn_AdaptiveMaxPool2d, ), ModuleInfo(torch.nn.AdaptiveMaxPool3d, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, module_inputs_func=module_inputs_torch_nn_AdaptiveMaxPool3d, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), # not supported on MPS backend DecorateInfo(skipMPS),) ), ModuleInfo(torch.nn.AvgPool1d, module_inputs_func=module_inputs_torch_nn_AvgPool1d, ), ModuleInfo(torch.nn.AvgPool2d, module_inputs_func=module_inputs_torch_nn_AvgPool2d, skips=( # The difference between channels last backward and # channels first backward of AvgPool2d on CUDA is too large # See https://github.com/pytorch/pytorch/issues/107201 DecorateInfo( unittest.expectedFailure, 'TestModule', 'test_memory_format', active_if=operator.itemgetter('training'), device_type='cuda', ), # error: input types 'tensor<f32>' and 'tensor<15x10xf16>' are not broadcast compatible DecorateInfo(skipIfMPSOnMacOS13, 'TestModule', dtypes=[torch.float16], device_type='mps',),), ), ModuleInfo(torch.nn.AvgPool3d, module_inputs_func=module_inputs_torch_nn_AvgPool3d, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, skips=( # No channels_last support for AvgPool1d as it does not take 4D inputs DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), # not supported on MPS backend DecorateInfo(skipMPS),) ), ModuleInfo(torch.nn.BatchNorm1d, train_and_eval_differ=True, module_inputs_func=module_inputs_torch_nn_BatchNorm1d, skips=( # tracking here rather than in the list in test_aotdispatch.py as eval mode passes # RuntimeError: tried to get Double out of SymInt DecorateInfo( unittest.expectedFailure, 'TestEagerFusionModuleInfo', 'test_aot_autograd_symbolic_module_exhaustive', active_if=operator.itemgetter('training') ), # torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default DecorateInfo( unittest.expectedFailure, 'TestEagerFusionModuleInfo', 'test_aot_autograd_module_exhaustive', active_if=operator.itemgetter('training') )) ), ModuleInfo(torch.nn.BatchNorm2d, train_and_eval_differ=True, module_inputs_func=module_inputs_torch_nn_BatchNorm2d, skips=( # See https://github.com/pytorch/pytorch/issues/134580 DecorateInfo(expectedFailureMPS, 'TestModule', 'test_memory_format', active_if=operator.itemgetter('training')), # tracking here rather than in the list in test_aotdispatch.py as eval mode passes # RuntimeError: tried to get Double out of SymInt DecorateInfo( unittest.expectedFailure, 'TestEagerFusionModuleInfo', 'test_aot_autograd_symbolic_module_exhaustive', active_if=operator.itemgetter('training') ), # torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default DecorateInfo( unittest.expectedFailure, 'TestEagerFusionModuleInfo', 'test_aot_autograd_module_exhaustive', active_if=operator.itemgetter('training') ),) ), ModuleInfo(torch.nn.BatchNorm3d, train_and_eval_differ=True, module_inputs_func=module_inputs_torch_nn_BatchNorm3d, skips=( # not supported on MPS backend DecorateInfo(skipMPS), # tracking here rather than in the list in test_aotdispatch.py as eval mode passes # RuntimeError: tried to get Double out of SymInt DecorateInfo( unittest.expectedFailure, 'TestEagerFusionModuleInfo', 'test_aot_autograd_symbolic_module_exhaustive', active_if=operator.itemgetter('training') ), # torch._subclasses.fake_tensor.DataDependentOutputException: aten._local_scalar_dense.default DecorateInfo( unittest.expectedFailure, 'TestEagerFusionModuleInfo', 'test_aot_autograd_module_exhaustive', active_if=operator.itemgetter('training') ),) ), ModuleInfo(torch.nn.CELU, module_inputs_func=module_inputs_torch_nn_CELU, # not MPS specific, will be xfailed for all devices in next PR skips=( DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_check_inplace', device_type='mps', dtypes=[torch.float16]),) ), ModuleInfo(torch.nn.Conv1d, module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=False), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, module_memformat_affects_out=True, skips=( # channels_last support on cuda requires cudnn >= 7603 DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), # Failure on ROCM for float32 issue #70125 DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' # xfail does not work due to Fatal Python error: Aborted DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_memory_format", device_type='mps', dtypes=[torch.float16]), DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_non_contiguous_tensors", device_type='mps', dtypes=[torch.float16]), ), decorators=( DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), )), ModuleInfo(torch.nn.Conv2d, module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=False), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, module_memformat_affects_out=True, skips=( # channels_last support on cuda requires cudnn >= 7603 DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), # Failure on ROCM for float32 issue #70125 DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), # This was wrongly being skipped before and needs investigation. # See https://github.com/pytorch/pytorch/issues/80247 DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda', dtypes=[torch.float64]), # Fails with channels last test on MPS backend DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps', dtypes=[torch.float32, torch.float16]), # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' # xfail does not work due to Fatal Python error: Aborted DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_memory_format", device_type='mps', dtypes=[torch.float16]), DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_non_contiguous_tensors", device_type='mps', dtypes=[torch.float16]), ), decorators=( DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), )), ModuleInfo(torch.nn.Conv3d, module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=False), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, module_memformat_affects_out=True, skips=( # channels_last support on cuda requires cudnn >= 8005 DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'), # Failure on ROCM for float32 issue #70125 DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), # Conv3d is not supported on MPS backend DecorateInfo(skipMPS, device_type="mps"), # This was wrongly being skipped before and needs investigation. # See https://github.com/pytorch/pytorch/issues/80247 DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"), ), decorators=( DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), )), ModuleInfo(torch.nn.ConvTranspose1d, module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=False, transposed=True), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, module_memformat_affects_out=True, dtypes=floating_and_complex_types_and(torch.chalf), skips=( # channels_last support on cuda requires cudnn >= 7603 DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), # Failure on ROCM for float32 issue #70125 DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), # Not implemented for chalf on CPU DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity', dtypes=(torch.chalf,), device_type='cuda'), # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' # xfail does not work due to Fatal Python error: Aborted DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_memory_format", device_type='mps', dtypes=[torch.float16]), DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_non_contiguous_tensors", device_type='mps', dtypes=[torch.float16]),), decorators=( DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), DecorateInfo(precisionOverride({torch.chalf: 5e-03}), 'TestModule', 'test_memory_format'), )), ModuleInfo(torch.nn.ConvTranspose2d, module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=False, transposed=True), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, module_memformat_affects_out=True, dtypes=floating_and_complex_types_and(torch.chalf), skips=( # channels_last support on cuda requires cudnn >= 7603 DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), # Failure on ROCM for float32 issue #70125 DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), # Fails on backward check because ViewAsRealBackward apply contiguous for grad DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_memory_format', dtypes=(torch.complex32, torch.complex64, torch.complex128)), # This was wrongly being skipped before and needs investigation. # See https://github.com/pytorch/pytorch/issues/80247 DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda', dtypes=[torch.float64, torch.complex128]), # Fails with channels last test on MPS backend DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps', dtypes=[torch.float16, torch.float32]), # Not implemented for chalf on CPU DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity', dtypes=(torch.chalf,), device_type='cuda'), # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' # xfail does not work due to Fatal Python error: Aborted DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_memory_format", device_type='mps', dtypes=[torch.float16]), DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_non_contiguous_tensors", device_type='mps', dtypes=[torch.float16]), ), decorators=( DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), DecorateInfo(precisionOverride({torch.chalf: 5e-03}), 'TestModule', 'test_memory_format'), )), ModuleInfo(torch.nn.ConvTranspose3d, module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=False, transposed=True), dtypes=floating_and_complex_types_and(torch.chalf), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, module_memformat_affects_out=True, skips=( # channels_last support on cuda requires cudnn >= 8005 DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'), # Failure on ROCM for float32 issue #70125 DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), # ConvTranspose3d is not supported on MPS backend DecorateInfo(skipMPS), # This was wrongly being skipped before and needs investigation. # See https://github.com/pytorch/pytorch/issues/80247 DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"), # These fail only on ROCm DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda', dtypes=[torch.complex32, torch.complex64], active_if=TEST_WITH_ROCM), # Not implemented for chalf on CPU DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_cpu_gpu_parity', dtypes=(torch.chalf,), device_type='cuda'), ), decorators=( DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), DecorateInfo(precisionOverride({torch.complex64: 1e-04}), 'TestModule', 'test_cpu_gpu_parity'), DecorateInfo(precisionOverride({torch.chalf: 5e-03}), 'TestModule', 'test_memory_format'), )), ModuleInfo(torch.nn.CosineEmbeddingLoss, module_inputs_func=module_inputs_torch_nn_CosineEmbeddingLoss, skips=( # No channels_last support for loss functions. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) ), ModuleInfo(torch.nn.ELU, module_inputs_func=module_inputs_torch_nn_ELU, # not MPS specific, will be xfailed for all devices in next PR skips=( DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_check_inplace', device_type='mps', dtypes=[torch.float16]),) ), ModuleInfo(torch.nn.FractionalMaxPool2d, module_inputs_func=module_inputs_torch_nn_FractionalMaxPool2d, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, skips=( # not supported on MPS backend DecorateInfo(skipMPS), DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) ), ModuleInfo(torch.nn.FractionalMaxPool3d, module_inputs_func=module_inputs_torch_nn_FractionalMaxPool3d, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, skips=( # not supported on MPS backend DecorateInfo(skipMPS), DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) ), ModuleInfo(torch.nn.L1Loss, module_inputs_func=module_inputs_torch_nn_L1Loss, skips=( # No channels_last support for loss functions. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) ), ModuleInfo(torch.nn.SmoothL1Loss, module_inputs_func=module_inputs_torch_nn_SmoothL1Loss, skips=( # No channels_last support for loss functions. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), # See #119108: input types 'tensor<f32>' and 'tensor<15x10xf16>' are not broadcast compatible # NS: Still fails on MacOS15.1 DecorateInfo(skipIfMPS, 'TestModule', 'test_non_contiguous_tensors', dtypes=[torch.float16], device_type='mps'),), ), ModuleInfo(torch.nn.LazyConv1d, module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=True), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, module_memformat_affects_out=True, skips=( # channels_last support on cuda requires cudnn >= 7603 DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), # Failure on ROCM for float32 issue #70125 DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), # Lazy modules don't currently play well with ModuleInfo tests on the meta device. # See https://github.com/pytorch/pytorch/issues/70505 for more info. DecorateInfo(skipMeta), # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' # xfail does not work due to Fatal Python error: Aborted DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_memory_format", device_type='mps', dtypes=[torch.float16]), DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_non_contiguous_tensors", device_type='mps', dtypes=[torch.float16]), ), decorators=( DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), )), ModuleInfo(torch.nn.LazyConv2d, module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=True), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, module_memformat_affects_out=True, skips=( # channels_last support on cuda requires cudnn >= 7603 DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), # Failure on ROCM for float32 issue #70125 DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), # Lazy modules don't currently play well with ModuleInfo tests on the meta device. # See https://github.com/pytorch/pytorch/issues/70505 for more info. DecorateInfo(skipMeta), # This was wrongly being skipped before and needs investigation. # See https://github.com/pytorch/pytorch/issues/80247 DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda', dtypes=[torch.float64]), # Fails with channels last test on MPS backend DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps', dtypes=[torch.float32, torch.float16]), # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' # xfail does not work due to Fatal Python error: Aborted DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_memory_format", device_type='mps', dtypes=[torch.float16]), DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_non_contiguous_tensors", device_type='mps', dtypes=[torch.float16]), ), decorators=( DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), )), ModuleInfo(torch.nn.LazyConv3d, module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=True), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, module_memformat_affects_out=True, skips=( # channels_last support on cuda requires cudnn >= 8005 DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'), # Failure on ROCM for float32 issue #70125 DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), # Lazy modules don't currently play well with ModuleInfo tests on the meta device. # See https://github.com/pytorch/pytorch/issues/70505 for more info. DecorateInfo(skipMeta), # LazyConv3d is not supported on MPS backend DecorateInfo(skipMPS), # This was wrongly being skipped before and needs investigation. # See https://github.com/pytorch/pytorch/issues/80247 DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"), ), decorators=( DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), )), ModuleInfo(torch.nn.LazyConvTranspose1d, module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=1, lazy=True, transposed=True), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, module_memformat_affects_out=True, skips=( # channels_last support on cuda requires cudnn >= 7603 DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), # Failure on ROCM for float32 issue #70125 DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), # Lazy modules don't currently play well with ModuleInfo tests on the meta device. # See https://github.com/pytorch/pytorch/issues/70505 for more info. DecorateInfo(skipMeta), # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' # xfail does not work due to Fatal Python error: Aborted DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_memory_format", device_type='mps', dtypes=[torch.float16]), DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_non_contiguous_tensors", device_type='mps', dtypes=[torch.float16]), ), decorators=( DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), )), ModuleInfo(torch.nn.LazyConvTranspose2d, module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=2, lazy=True, transposed=True), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, module_memformat_affects_out=True, skips=( # channels_last support on cuda requires cudnn >= 7603 DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=7603), 'TestModule', 'test_memory_format'), # Failure on ROCM for float32 issue #70125 DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), # Lazy modules don't currently play well with ModuleInfo tests on the meta device. # See https://github.com/pytorch/pytorch/issues/70505 for more info. DecorateInfo(skipMeta), # This was wrongly being skipped before and needs investigation. # See https://github.com/pytorch/pytorch/issues/80247 DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='cuda', dtypes=[torch.float64]), # Fails with channels last test on MPS backend DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps', dtypes=[torch.float32, torch.float16]), # See #119108: MPSNDArrayConvolutionA14.mm:3976: failed assertion `destination datatype must be fp32' # xfail does not work due to Fatal Python error: Aborted DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_memory_format", device_type='mps', dtypes=[torch.float16]), DecorateInfo(skipIfMPSOnMacOS13, "TestModule", "test_non_contiguous_tensors", device_type='mps', dtypes=[torch.float16]), ), decorators=( DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), )), ModuleInfo(torch.nn.LazyConvTranspose3d, module_inputs_func=partial(module_inputs_torch_nn_ConvNd, N=3, lazy=True, transposed=True), gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, module_memformat_affects_out=True, skips=( # channels_last support on cuda requires cudnn >= 8005 DecorateInfo(skipCUDAIfCudnnVersionLessThan(version=8005), 'TestModule', 'test_memory_format'), # Failure on ROCM for float32 issue #70125 DecorateInfo(skipCUDAIfRocm, 'TestModule', 'test_memory_format', dtypes=[torch.float32]), # Lazy modules don't currently play well with ModuleInfo tests on the meta device. # See https://github.com/pytorch/pytorch/issues/70505 for more info. DecorateInfo(skipMeta), # LazyConvTranspose3d is not supported on MPS backend DecorateInfo(skipMPS), # This was wrongly being skipped before and needs investigation. # See https://github.com/pytorch/pytorch/issues/80247 DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"), ), decorators=( DecorateInfo(precisionOverride({torch.float32: 1e-04}), 'TestModule', 'test_memory_format'), )), ModuleInfo(torch.nn.Linear, module_inputs_func=module_inputs_torch_nn_Linear, skips=( # No channels_last support for Linear currently. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) ), ModuleInfo(torch.nn.Bilinear, module_inputs_func=module_inputs_torch_nn_Bilinear, decorators=[ DecorateInfo( toleranceOverride({ torch.float32: tol(atol=1e-4, rtol=1e-4), torch.float64: tol(atol=1e-4, rtol=1e-4)}), 'TestModule', 'test_forward', device_type='cpu'), ], skips=( # No channels_last support for Bilinear currently. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), # See #119108: tolerance issue DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),) ), ModuleInfo(torch.nn.LPPool1d, module_inputs_func=module_inputs_torch_nn_LPPool1d, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),) ), ModuleInfo(torch.nn.LPPool2d, module_inputs_func=module_inputs_torch_nn_LPPool2d, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'), # Fails on backward check on MPS # See https://github.com/pytorch/pytorch/issues/107214 DecorateInfo( unittest.expectedFailure, 'TestModule', 'test_memory_format', active_if=operator.itemgetter('training'), device_type='mps', ),) ), ModuleInfo(torch.nn.LPPool3d, module_inputs_func=module_inputs_torch_nn_LPPool3d, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'), DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), DecorateInfo(skipIfMPS, device_type='mps'),) ), ModuleInfo(torch.nn.MaxPool1d, module_inputs_func=module_inputs_torch_nn_MaxPool1d, ), ModuleInfo(torch.nn.MaxPool2d, module_inputs_func=module_inputs_torch_nn_MaxPool2d, ), ModuleInfo(torch.nn.MaxPool3d, module_inputs_func=module_inputs_torch_nn_MaxPool3d, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, skips=( # not supported on MPS backend DecorateInfo(skipIfMPS, device_type='mps'),) ), ModuleInfo(torch.nn.KLDivLoss, module_inputs_func=module_inputs_torch_nn_KLDivLoss, skips=( # No channels_last support for loss functions. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), # https://github.com/pytorch/pytorch/issues/115588 DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_cpu_gpu_parity'), DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),) ), ModuleInfo(torch.nn.MSELoss, module_inputs_func=module_inputs_torch_nn_MSELoss, skips=( # No channels_last support for loss functions. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), # See #119108: input types 'tensor<f32>' and 'tensor<15x10xf16>' are not broadcast compatible DecorateInfo(skipIfMPSOnMacOS13, 'TestModule', 'test_non_contiguous_tensors', device_type='mps', dtypes=[torch.float16],), # See #119108: tolerance issue DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),) ), ModuleInfo(torch.nn.MarginRankingLoss, module_inputs_func=module_inputs_torch_nn_MarginRankingLoss, skips=( # No channels_last support for loss functions. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) ), ModuleInfo(torch.nn.MultiLabelMarginLoss, module_inputs_func=module_inputs_torch_nn_MultiLabelMarginLoss, skips=( # No channels_last support for loss functions. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), # 'aten::multilabel_margin_loss_forward' is not currently implemented for the MPS device. DecorateInfo(skipIfMPS, 'TestModule', device_type='mps'), # derivative for aten::multilabel_margin_loss_backward is not implemented DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),) ), ModuleInfo(torch.nn.MultiMarginLoss, module_inputs_func=module_inputs_torch_nn_MultiMarginLoss, skips=( # No channels_last support for loss functions. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), # 'aten::multi_margin_loss' is not currently implemented for the MPS device. DecorateInfo(skipIfMPS, 'TestModule', device_type='mps'), # RuntimeError: derivative for aten::multi_margin_loss_backward is not implemented DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'),) ), ModuleInfo(torch.nn.SoftMarginLoss, module_inputs_func=module_inputs_torch_nn_SoftMarginLoss, skips=( # No channels_last support for loss functions. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), # See #119108: tolerance issue DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),) ), ModuleInfo(torch.nn.MultiLabelSoftMarginLoss, module_inputs_func=module_inputs_torch_nn_MultiLabelSoftMarginLoss, skips=( # No channels_last support for loss functions. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) ), ModuleInfo(torch.nn.NLLLoss, module_inputs_func=module_inputs_torch_nn_NLLLoss, skips=( # No channels_last support for loss functions. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), # See #119108: tolerance issue DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),) ), ModuleInfo(torch.nn.GaussianNLLLoss, module_inputs_func=module_inputs_torch_nn_GaussianNLLLoss, skips=( # No channels_last support for loss functions. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)), ModuleInfo(torch.nn.PoissonNLLLoss, module_inputs_func=module_inputs_torch_nn_PoissonNLLLoss, skips=( # No channels_last support for loss functions. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),)), ModuleInfo(torch.nn.HingeEmbeddingLoss, module_inputs_func=module_inputs_torch_nn_HingeEmbeddingLoss, skips=( # No channels_last support for loss functions. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) ), ModuleInfo(torch.nn.HuberLoss, module_inputs_func=module_inputs_torch_nn_HuberLoss, skips=( # No channels_last support for loss functions. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), # See #119108: seemingly incorrect output dtype DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),) ), ModuleInfo(torch.nn.BCELoss, module_inputs_func=module_inputs_torch_nn_BCELoss, skips=( # No channels_last support for loss functions. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), # error: input types 'tensor<f32>' and 'tensor<15x10xf16>' are not broadcast compatible DecorateInfo(skipIfMPS, 'TestModule', dtypes=[torch.float16], device_type='mps'),) ), ModuleInfo(torch.nn.BCEWithLogitsLoss, module_inputs_func=module_inputs_torch_nn_BCEWithLogitsLoss, skips=( # No channels_last support for loss functions. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), # see #119108: tolerance issue DecorateInfo(skipIfMPS, 'TestModule', dtypes=[torch.float16], device_type='mps'),) ), ModuleInfo(torch.nn.CrossEntropyLoss, module_inputs_func=module_inputs_torch_nn_CrossEntropyLoss, dtypes=get_all_fp_dtypes(include_half=True, include_bfloat16=False), decorators=( # No channels_last support for loss functions. DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_memory_format'), DecorateInfo(toleranceOverride({torch.float16: tol(atol=3e-2, rtol=1e-3)}), "TestModule", "test_forward", dtypes=[torch.float16], device_type='cpu'), DecorateInfo(unittest.expectedFailure, "TestModule", "test_cpu_gpu_parity", dtypes=[torch.float16], device_type='cuda'),), ), ModuleInfo(torch.nn.CTCLoss, module_inputs_func=module_inputs_torch_nn_CTCLoss, skips=( # No channels_last support for loss functions. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), # The operator aten::_ctc_loss is not currently implemented for the MPS device. DecorateInfo(skipIfMPS, 'TestModule', device_type='mps',), # derivative for aten::_ctc_loss_backward is not implemented DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_grad'), DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad'), # https://github.com/pytorch/pytorch/issues/115585 DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_non_contiguous_tensors'),) ), ModuleInfo(torch.nn.GELU, module_inputs_func=module_inputs_torch_nn_GELU, skips=( # See #119108: tolerance issue DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),) ), ModuleInfo(torch.nn.GLU, module_inputs_func=module_inputs_torch_nn_GLU, ), ModuleInfo(torch.nn.GroupNorm, module_inputs_func=module_inputs_torch_nn_GroupNorm, dtypes=get_all_fp_dtypes(include_bfloat16=True, include_half=True), skips=( # Tracking at https://github.com/pytorch/pytorch/issues/98089 DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_cpu_gpu_parity'), DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), 'TestModule', 'test_memory_format', device_type='cpu'), # No channels_last support for GroupNorm currently. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', device_type='cuda'), DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', device_type='mps'), DecorateInfo(unittest.skip("Skipped!"), "TestModule", "test_grad", active_if=TEST_WITH_ROCM, device_type='cuda'),) ), ModuleInfo(torch.nn.Hardshrink, module_inputs_func=module_inputs_torch_nn_Hardshrink, skips=( # not supported on MPS backend DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_forward', device_type='mps'), DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_if_train_and_eval_modes_differ', device_type='mps'), DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_memory_format', device_type='mps'), DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_non_contiguous_tensors', device_type='mps'), DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_save_load', device_type='mps'),), ), ModuleInfo(torch.nn.Hardswish, module_inputs_func=module_inputs_torch_nn_Hardswish, skips=None if _macos15_or_newer else ( # Fails on backward check on MPS # See https://github.com/pytorch/pytorch/issues/107214 DecorateInfo( unittest.expectedFailure, 'TestModule', 'test_memory_format', active_if=operator.itemgetter('training'), device_type='mps', ),), supports_gradgrad=False), ModuleInfo(torch.nn.Hardtanh, module_inputs_func=module_inputs_torch_nn_Hardtanh, ), ModuleInfo(torch.nn.InstanceNorm1d, module_inputs_func=partial(module_inputs_torch_nn_InstanceNormNd, N=1), train_and_eval_differ=True, skips=( # No channels_last support for InstanceNorm1d currently. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) ), ModuleInfo(torch.nn.InstanceNorm2d, module_inputs_func=partial(module_inputs_torch_nn_InstanceNormNd, N=2), train_and_eval_differ=True, skips=( # No channels_last support for InstanceNorm2d currently. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) ), ModuleInfo(torch.nn.InstanceNorm3d, module_inputs_func=partial(module_inputs_torch_nn_InstanceNormNd, N=3), train_and_eval_differ=True, skips=( # not supported on MPS backend DecorateInfo(expectedFailureMPS, 'TestModuleMPS', 'test_memory_format'), DecorateInfo(expectedFailureMPS, 'TestModuleMPS', 'test_non_contiguous_tensors'), DecorateInfo(expectedFailureMPS, 'TestModuleMPS', 'test_forward'), DecorateInfo(expectedFailureMPS, 'TestModuleMPS', 'test_non_contiguous'), DecorateInfo(expectedFailureMPS, 'TestModuleMPS', 'test_save_load'), # No channels_last support for InstanceNorm3d currently. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) ), ModuleInfo(torch.nn.LocalResponseNorm, module_inputs_func=module_inputs_torch_nn_LocalResponseNorm, skips=( # uses avg_pool3d which is not supported on MPS backend DecorateInfo(expectedFailureMPS, 'TestModule', 'test_memory_format'), DecorateInfo(expectedFailureMPS, 'TestModule', 'test_non_contiguous_tensors'), DecorateInfo(expectedFailureMPS, 'TestModule', 'test_forward'), DecorateInfo(expectedFailureMPS, 'TestModule', 'test_if_train_and_eval_modes_differ'), DecorateInfo(expectedFailureMPS, 'TestModule', 'test_non_contiguous'), DecorateInfo(expectedFailureMPS, 'TestModule', 'test_save_load'),) ), ModuleInfo(torch.nn.LayerNorm, module_inputs_func=module_inputs_torch_nn_LayerNorm, skips=( # No channels_last support for LayerNorm currently. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) ), ModuleInfo(torch.nn.RMSNorm, module_inputs_func=module_inputs_torch_nn_RMSNorm, ), # TransformerEncoder takes the same inputs as TransformerEncoderLayer ModuleInfo(torch.nn.TransformerEncoder, train_and_eval_differ=True, module_inputs_func=module_inputs_torch_nn_TransformerEncoder, decorators=[ # Not implemented for SDPA backward derivative DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad', device_type='cpu'), ], skips=( # No channels_last support for TransformerEncoderLayer currently. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), # Doesn't support device / dtype kwargs directly because it is just a # container of TransformerEncoderLayers. DecorateInfo(unittest.expectedFailure, 'TestModule', 'test_factory_kwargs'),) ), ModuleInfo(torch.nn.TransformerEncoderLayer, train_and_eval_differ=True, module_inputs_func=module_inputs_torch_nn_TransformerEncoderLayer, decorators=[ DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), 'TestModule', 'test_non_contiguous_tensors', device_type='cpu', active_if=IS_WINDOWS), DecorateInfo(toleranceOverride({torch.float16: tol(atol=1e-4, rtol=2e-3)}), 'TestModule', 'test_forward', device_type='mps'), # Not implemented for SDPA backward derivative DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad', device_type='cpu'), ], skips=( # No channels_last support for TransformerEncoderLayer currently. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) ), ModuleInfo(torch.nn.TransformerDecoderLayer, module_inputs_func=module_inputs_torch_nn_TransformerDecoderLayer, decorators=[ # Not implemented for SDPA backward derivative DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad', device_type='cpu'), ], skips=( # No channels_last support for TransformerDecoderLayer currently. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) ), ModuleInfo(torch.nn.Transformer, module_inputs_func=module_inputs_torch_nn_Transformer, # Inputs are too large to run with slow gradcheck # https://github.com/pytorch/pytorch/issues/117140 gradcheck_fast_mode=True, decorators=[ # Not implemented for SDPA backward derivative DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_gradgrad', device_type='cpu'), ], skips=( # No channels_last support for Transformer currently. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) ), ModuleInfo(torch.nn.MultiheadAttention, train_and_eval_differ=True, module_inputs_func=module_inputs_torch_nn_MultiheadAttention, skips=( # No channels_last support for MultiheadAttention currently. DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) ), ModuleInfo(torch.nn.Embedding, module_inputs_func=module_inputs_torch_nn_Embedding, decorators=[ DecorateInfo(toleranceOverride({torch.float32: tol(atol=1e-4, rtol=1e-4)}), 'TestModule', 'test_non_contiguous_tensors', device_type='mps')], skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) ), ModuleInfo(torch.nn.ReLU, module_inputs_func=module_inputs_torch_nn_ReLU, skips=None if _macos15_or_newer else ( # Fails on backward check on MPS # See https://github.com/pytorch/pytorch/issues/107214 DecorateInfo( unittest.expectedFailure, 'TestModule', 'test_memory_format', active_if=operator.itemgetter('training'), device_type='mps', ),) ), ModuleInfo(torch.nn.LeakyReLU, module_inputs_func=module_inputs_torch_nn_LeakyReLU, ), ModuleInfo(torch.nn.ReLU6, module_inputs_func=module_inputs_torch_nn_ReLU6, skips=( # test fails on MPS backend and is being investigated. # See https://github.com/pytorch/pytorch/issues/100914 DecorateInfo(skipMPS),) ), ModuleInfo(torch.nn.PReLU, module_inputs_func=module_inputs_torch_nn_PReLU, skips=( # test fails on MPS backend and is being investigated. # See https://github.com/pytorch/pytorch/issues/100914 DecorateInfo(skipMPS),) ), ModuleInfo(torch.nn.RNNCell, module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU_Cell, is_rnn=True), module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU_Cell, ), ModuleInfo(torch.nn.GRUCell, module_inputs_func=module_inputs_torch_nn_RNN_GRU_Cell, module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU_Cell, ), ModuleInfo(torch.nn.LSTMCell, module_inputs_func=module_inputs_torch_nn_LSTMCell, module_error_inputs_func=module_error_inputs_torch_nn_LSTMCell, ), ModuleInfo(torch.nn.Sigmoid, module_inputs_func=module_inputs_torch_nn_Sigmoid, skips=None if _macos15_or_newer else ( # Fails on backward check on MPS # See https://github.com/pytorch/pytorch/issues/107214 DecorateInfo( unittest.expectedFailure, 'TestModule', 'test_memory_format', active_if=operator.itemgetter('training'), device_type='mps', ),) ), ModuleInfo(torch.nn.LogSigmoid, module_inputs_func=module_inputs_torch_nn_LogSigmoid, skips=( # See #119108: tolerance issue DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),) ), ModuleInfo(torch.nn.SiLU, module_inputs_func=module_inputs_torch_nn_SiLU, ), ModuleInfo(torch.nn.Softmax, module_inputs_func=module_inputs_torch_nn_Softmax, ), ModuleInfo(torch.nn.Softmax2d, module_inputs_func=module_inputs_torch_nn_Softmax2d, skips=( # no channels last support for Softmax2d currently DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), # See #119108: tolerance issue DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),) ), ModuleInfo(torch.nn.LogSoftmax, module_inputs_func=module_inputs_torch_nn_LogSoftmax, skips=( # no channels last support for LogSoftmax currently DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'), # See #119108: inf nan error DecorateInfo(unittest.expectedFailure, "TestModule", "test_forward", device_type='mps', dtypes=[torch.float16]),) ), ModuleInfo(torch.nn.Softmin, module_inputs_func=module_inputs_torch_nn_Softmin, skips=( # no channels last support for Softmin currently DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format'),) ), ModuleInfo(torch.nn.Softplus, module_inputs_func=module_inputs_torch_nn_Softplus, skips=( # test fails on MPS backend and is being investigated. # See https://github.com/pytorch/pytorch/issues/100914 DecorateInfo(skipMPS),) ), ModuleInfo(torch.nn.Softshrink, module_inputs_func=module_inputs_torch_nn_Softshrink, skips=( # not supported on MPS backend DecorateInfo(skipMPS),) ), ModuleInfo(torch.nn.Softsign, module_inputs_func=module_inputs_torch_nn_Softsign, ), ModuleInfo(torch.nn.Tanh, module_inputs_func=module_inputs_torch_nn_Tanh, skips=None if _macos15_or_newer else ( # Fails on backward check on MPS # See https://github.com/pytorch/pytorch/issues/107214 DecorateInfo( unittest.expectedFailure, 'TestModule', 'test_memory_format', active_if=operator.itemgetter('training'), device_type='mps', ),) ), ModuleInfo(torch.nn.Tanhshrink, module_inputs_func=module_inputs_torch_nn_Tanhshrink, skips=None if _macos15_or_newer else ( # Fails on backward check on MPS # See https://github.com/pytorch/pytorch/issues/107214 DecorateInfo( unittest.expectedFailure, 'TestModule', 'test_memory_format', active_if=operator.itemgetter('training'), device_type='mps', ),) ), ModuleInfo(torch.nn.Threshold, module_inputs_func=module_inputs_torch_nn_Threshold, skips=( # test fails on MPS backend and is being investigated. # See https://github.com/pytorch/pytorch/issues/100914 DecorateInfo(skipMPS),) ), ModuleInfo(torch.nn.Mish, module_inputs_func=module_inputs_torch_nn_Mish, skips=( # not supported on MPS backend DecorateInfo(skipMPS),) ), ModuleInfo(torch.nn.RNN, train_and_eval_differ=True, module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU, is_rnn=True), module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU, decorators=rnn_gru_lstm_module_info_decorators ), ModuleInfo(torch.nn.GRU, train_and_eval_differ=True, module_inputs_func=partial(module_inputs_torch_nn_RNN_GRU, is_rnn=False), module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU, decorators=rnn_gru_lstm_module_info_decorators), ModuleInfo(torch.nn.LSTM, train_and_eval_differ=True, module_inputs_func=module_inputs_torch_nn_LSTM, module_error_inputs_func=module_error_inputs_torch_nn_RNN_GRU, skips=( # LSTM with projections is not currently supported with MPS DecorateInfo(skipMPS),), decorators=rnn_gru_lstm_module_info_decorators), ModuleInfo(torch.nn.ReflectionPad1d, module_inputs_func=module_inputs_torch_nn_ReflectionPad1d, ), ModuleInfo(torch.nn.ReflectionPad2d, module_inputs_func=module_inputs_torch_nn_ReflectionPad2d, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', device_type='cuda'), DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', device_type='mps'),) ), ModuleInfo(torch.nn.ReflectionPad3d, module_inputs_func=module_inputs_torch_nn_ReflectionPad3d, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', device_type='cuda'), DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', device_type='mps'),) ), ModuleInfo(torch.nn.ReplicationPad1d, module_inputs_func=module_inputs_torch_nn_ReplicationPad1d, ), ModuleInfo(torch.nn.ReplicationPad2d, module_inputs_func=module_inputs_torch_nn_ReplicationPad2d, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', device_type='cuda'), DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', device_type='mps'),) ), ModuleInfo(torch.nn.ReplicationPad3d, module_inputs_func=module_inputs_torch_nn_ReplicationPad3d, gradcheck_nondet_tol=GRADCHECK_NONDET_TOL, skips=( DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', device_type='cuda'), DecorateInfo(unittest.skip("Skipped!"), 'TestModule', 'test_memory_format', device_type='mps'),) ), ModuleInfo(torch.nn.SELU, module_inputs_func=module_inputs_torch_nn_SELU, skips=( # test fails on MPS backend and is being investigated. # See https://github.com/pytorch/pytorch/issues/100914 DecorateInfo(skipMPS),) ), ModuleInfo(torch.nn.ZeroPad1d, module_inputs_func=module_inputs_torch_nn_ZeroPad1d, ), ModuleInfo(torch.nn.ZeroPad2d, module_inputs_func=module_inputs_torch_nn_ZeroPad2d, skips=( # Fails with channels last test on MPS backend DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),) ), ModuleInfo(torch.nn.ZeroPad3d, module_inputs_func=module_inputs_torch_nn_ZeroPad3d, skips=( # Fails with channels last test on MPS backend DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),) ), ModuleInfo(torch.nn.CircularPad1d, module_inputs_func=module_inputs_torch_nn_CircularPad1d, module_error_inputs_func=module_error_inputs_torch_nn_Pad1d, ), ModuleInfo(torch.nn.CircularPad2d, module_inputs_func=module_inputs_torch_nn_CircularPad2d, module_error_inputs_func=module_error_inputs_torch_nn_Pad2d, ), ModuleInfo(torch.nn.CircularPad3d, module_inputs_func=module_inputs_torch_nn_CircularPad3d, module_error_inputs_func=module_error_inputs_torch_nn_Pad3d, skips=( # Fails with channels last test on MPS backend DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format"),) ), ModuleInfo(torch.nn.ConstantPad1d, module_inputs_func=module_inputs_torch_nn_ConstantPad1d, ), ModuleInfo(torch.nn.ConstantPad2d, module_inputs_func=module_inputs_torch_nn_ConstantPad2d, skips=( # Fails with channels last test on MPS backend DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),) ), ModuleInfo(torch.nn.ConstantPad3d, module_inputs_func=module_inputs_torch_nn_ConstantPad3d, skips=( # Fails with channels last test on MPS backend DecorateInfo(unittest.expectedFailure, "TestModule", "test_memory_format", device_type='mps'),) ) ] ```
============================================================================================================================ SOURCE CODE FILE: common_nn.py LINES: 1 SIZE: 172.32 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\common_nn.py ENCODING: utf-8 ```py # mypy: ignore-errors from abc import abstractmethod import tempfile import unittest from copy import deepcopy from functools import reduce, partial from itertools import product from operator import mul import torch import torch.cuda import torch.nn as nn import torch.nn.functional as F from torch.nn import _reduction as _Reduction from torch.testing._internal.common_utils import TestCase, to_gpu, freeze_rng_state, is_iterable, \ gradcheck, gradgradcheck, set_default_dtype, skipIfTorchDynamo from torch.testing._internal.common_cuda import TEST_CUDA, SM90OrLater from torch.autograd.gradcheck import _get_numerical_jacobian, _iter_tensors from torch.autograd import Variable from torch.types import _TensorOrTensors import torch.backends.cudnn from typing import Callable, Union, Any from collections.abc import Sequence TemporaryFile = tempfile.TemporaryFile PRECISION = 1e-5 def get_reduction(m): result = getattr(m, 'reduction', None) if result is None: result = _Reduction.legacy_get_string(getattr(m, 'sizeAverage', None), True, emit_warning=False) assert result is not None return result def get_weight(m): result = getattr(m, 'weight', None) if result is not None: return result return getattr(m, 'weights', None) # NOTE [How to check NN module / functional API parity between Python and C++ frontends] # # The way to check API parity is to add parity tests for the NN module / functional of interest. # Here are the detailed steps: # # For NN module: # 1. Make sure you already have a test dict with the module configuration you want to test. # 2. Add `cpp_constructor_args` entry to the test dict, with its value exactly matching # the Python module constructor arguments. For example, if in the test dict we pass # `(10, 8)` to `torch.nn.Linear` constructor, then we should pass `torch::nn::LinearOptions(10, 8)` # as the corresponding C++ constructor argument to `torch::nn::Linear`. # 3. If in the process of performing the above step you referenced any variables # in the `cpp_constructor_args` entry, you must add `cpp_var_map` entry # to the test dict to make sure that those variables are populated with the right Python values. # For example, if the Python constructor call is # `torch.nn.FractionalMaxPool2d(2, output_ratio=0.5, _random_samples=random_samples)`, # the corresponding C++ constructor argument is # `torch::nn::FractionalMaxPool2dOptions(2).output_ratio(0.5)._random_samples(random_samples)`, # and the `cpp_var_map` entry must be # `{'random_samples': random_samples}` in order to populate the C++ variable `random_samples` # used in the C++ constructor argument with the Python tensor value `random_samples`. # # For NN functional: # 1. Make sure you already have a test dict with the functional configuration you want to test. # 2. If the test dict's `constructor` entry looks like `wrap_functional(F.some_functional_name, ...)`, # then you must add `cpp_options_args` entry to the test dict, with its value exactly matching the Python # functional optional arguments. For example, if the test dict's `constructor` entry is # `wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest')`, # then the `cpp_options_args` entry should be # "F::InterpolateFuncOptions().size(std::vector<int64_t>({12})).scale_factor(std::nullopt).mode(torch::kNearest)". # 3. Otherwise, if the test dict's `constructor` entry looks like # `wrap_functional(lambda i: F.some_functional_name(...))`, # then you must add `cpp_function_call` entry to the test dict, with its value exactly matching the Python # functional function call. For example, if the test dict's `constructor` entry is # `wrap_functional(lambda i: F.poisson_nll_loss(i, t.type_as(i), reduction='none'))`, # then the `cpp_function_call` entry should be # "F::poisson_nll_loss(i, t.to(i.options()), F::PoissonNLLLossFuncOptions().reduction(torch::kNone))". # 4. If in the process of performing the above two steps you referenced any variables # in the `cpp_options_args` or `cpp_function_call` entry, you must # add `cpp_var_map` entry to the test dict to make sure that those variables # are populated with the right Python values. For example, if the test dict's `constructor` entry is # `wrap_functional(lambda i: F.poisson_nll_loss(i, t.type_as(i), reduction='none'))`, # then the `cpp_function_call` entry should be # "F::poisson_nll_loss(i, t.to(i.options()), F::PoissonNLLLossFuncOptions().reduction(torch::kNone))". # Notice that there are two variables `i` and `t` that need to have their values provided, # and the way to do so is to add a `cpp_var_map` entry: `cpp_var_map={'i': '_get_input()', 't': t}`. # (Note that for `i`, since we want it to take the Python input value, we pass '_get_input()' string as value # and the C++ parity test mechanism will populate `i` with the Python input value correctly.) # # There are also a few optional flags in the test dict to control the C++ parity test behavior: # # - `test_cpp_api_parity`: if `False`, skips the C++ parity test for this test dict. Default: True. # - `has_parity`: if `False`, expects this test dict to fail the C++ parity test. Default: True. module_tests = [ dict( module_name='Linear', constructor_args=(10, 8), cpp_constructor_args='torch::nn::LinearOptions(10, 8)', input_size=(4, 10), reference_fn=lambda i, p, _: torch.mm(i, p[0].t()) + p[1].view(1, -1).expand(4, 8), with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( module_name='Linear', constructor_args=(10, 8, False), cpp_constructor_args='torch::nn::LinearOptions(10, 8).bias(false)', input_size=(4, 10), desc='no_bias', reference_fn=lambda i, p, _: torch.mm(i, p[0].t()), with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( module_name='RReLU', input_size=(1, 2, 2), test_cuda=False, default_dtype=torch.double, ), dict( module_name='RReLU', constructor_args=(0.1, 0.9), cpp_constructor_args='torch::nn::RReLUOptions().lower(0.1).upper(0.9)', input_size=(4, 4, 5), desc='with_up_down', test_cuda=False, default_dtype=torch.double, ), dict( module_name='Flatten', input_size=(2, 3, 4, 5), reference_fn=lambda i, *_: torch.flatten(i, 1), default_dtype=torch.double, ), # TODO: reference function dict( module_name='CrossMapLRN2d', constructor_args=(5, 5e-3, 1e-3, 2), cpp_constructor_args='torch::nn::CrossMapLRN2dOptions(5).alpha(5e-3).beta(1e-3).k(2)', input_size=(2, 3, 6, 6), check_gradgrad=False, # TODO(#50743): Figure out the error. "RuntimeError: Unrecognized tensor type ID: Batched" check_batched_grad=False, default_dtype=torch.double, ), ] # Generates rand tensor with non-equal values. This ensures that duplicate # values won't be causing test failure for modules like MaxPooling. # size should be small, otherwise randperm fails / long overflows. def _rand_tensor_non_equal(*size): total = reduce(mul, size, 1) return torch.randperm(total).view(*size).double() def wrap_functional(fn, **kwargs): class FunctionalModule(nn.Module): def forward(self, *args): return fn(*args, **kwargs) return FunctionalModule def poissonnllloss_no_reduce_test(): t = torch.randn(10, 10) return dict( fullname='PoissonNLLLoss_no_reduce', constructor=wrap_functional( lambda i: F.poisson_nll_loss(i, t.type_as(i), reduction='none')), cpp_function_call='F::poisson_nll_loss(' 'i, t.to(i.options()), F::PoissonNLLLossFuncOptions().reduction(torch::kNone))', input_fn=lambda: torch.rand(10, 10), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: i.exp() - t.mul(i), pickle=False, default_dtype=torch.double) def bceloss_no_reduce_test(): t = Variable(torch.randn(15, 10).gt(0).to(torch.double)) return dict( fullname='BCELoss_no_reduce', constructor=wrap_functional( lambda i: F.binary_cross_entropy(i, t.type_as(i), reduction='none')), cpp_function_call='F::binary_cross_entropy(' 'i, t.to(i.options()), F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))', input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: -(t * i.log() + (1 - t) * (1 - i).log()), pickle=False, precision=7e-4, default_dtype=torch.double) def bceloss_no_reduce_scalar_test(): t = torch.randn(()).gt(0).to(torch.double) return dict( fullname='BCELoss_no_reduce_scalar', constructor=wrap_functional( lambda i: F.binary_cross_entropy(i, t.type_as(i), reduction='none')), cpp_function_call='F::binary_cross_entropy(' 'i, t.to(i.options()), F::BinaryCrossEntropyFuncOptions().reduction(torch::kNone))', input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: -(t * i.log() + (1 - t) * (1 - i).log()), pickle=False, default_dtype=torch.double) def bceloss_weights_no_reduce_test(): t = Variable(torch.randn(15, 10, dtype=torch.double).gt(0).to(torch.double)) weights = torch.rand(10, dtype=torch.double) return dict( fullname='BCELoss_weights_no_reduce', constructor=wrap_functional( lambda i: F.binary_cross_entropy(i, t.type_as(i), weight=weights.type_as(i), reduction='none')), cpp_function_call='F::binary_cross_entropy(' 'i, t.to(i.options()), ' 'F::BinaryCrossEntropyFuncOptions().weight(weights.to(i.options())).reduction(torch::kNone))', input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2), cpp_var_map={'i': '_get_input()', 't': t, 'weights': weights}, reference_fn=lambda i, p, m: -(t * i.log() + (1 - t) * (1 - i).log()) * weights, pickle=False, precision=3e-4, default_dtype=torch.double, ) def bceloss_weights_no_reduce_scalar_test(): t = torch.randn(()).gt(0).to(torch.double) weights = torch.rand((), dtype=torch.double) return dict( fullname='BCELoss_weights_no_reduce_scalar', constructor=wrap_functional( lambda i: F.binary_cross_entropy(i, t.type_as(i), weight=weights.type_as(i), reduction='none')), cpp_function_call='''F::binary_cross_entropy( i, t.to(i.options()), F::BinaryCrossEntropyFuncOptions().weight(weights.to(i.options())).reduction(torch::kNone))''', cpp_var_map={'i': '_get_input()', 't': t, 'weights': weights}, input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2), reference_fn=lambda i, *_: -(t * i.log() + (1 - t) * (1 - i).log()) * weights, pickle=False, default_dtype=torch.double, ) def bce_with_logistic_legacy_enum_test(): t = Variable(torch.randn(15, 10).gt(0).to(torch.double)) sigmoid = nn.Sigmoid() return dict( fullname='BCEWithLogitsLoss_legacy_enum', constructor=wrap_functional( lambda i: F.binary_cross_entropy_with_logits(i, t.type_as(i), reduce=False)), cpp_function_call='''F::binary_cross_entropy_with_logits( i, t.to(i.options()), F::BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kNone))''', input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: -(t * sigmoid(i).log() + (1 - t) * (1 - sigmoid(i)).log()), check_gradgrad=False, pickle=False, default_dtype=torch.double, ) def bce_with_logistic_no_reduce_test(): t = Variable(torch.randn(15, 10).gt(0).to(torch.double)) sigmoid = nn.Sigmoid() return dict( fullname='BCEWithLogitsLoss_no_reduce', constructor=wrap_functional( lambda i: F.binary_cross_entropy_with_logits(i, t.type_as(i), reduction='none')), cpp_function_call='''F::binary_cross_entropy_with_logits( i, t.to(i.options()), F::BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kNone))''', input_fn=lambda: torch.rand(15, 10).clamp_(2.8e-2, 1 - 2.8e-2), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: -(t * sigmoid(i).log() + (1 - t) * (1 - sigmoid(i)).log()), check_gradgrad=False, pickle=False, default_dtype=torch.double, ) def bce_with_logistic_no_reduce_scalar_test(): t = torch.randn(()).gt(0).to(torch.double) sigmoid = nn.Sigmoid() return dict( fullname='BCEWithLogitsLoss_no_reduce_scalar', constructor=wrap_functional( lambda i: F.binary_cross_entropy_with_logits(i, t.type_as(i), reduction='none')), cpp_function_call='''F::binary_cross_entropy_with_logits( i, t.to(i.options()), F::BinaryCrossEntropyWithLogitsFuncOptions().reduction(torch::kNone))''', input_fn=lambda: torch.rand(()).clamp_(2.8e-2, 1 - 2.8e-2), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: -(t * sigmoid(i).log() + (1 - t) * (1 - sigmoid(i)).log()), check_gradgrad=False, pickle=False, default_dtype=torch.double, ) def kldivloss_with_target_no_reduce_test(): t = torch.rand(10, 10, dtype=torch.double) return dict( fullname='KLDivLoss_with_target_no_reduce', constructor=wrap_functional( lambda i: F.kl_div(i, t.type_as(i), reduction='none')), cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone))', input_fn=lambda: torch.rand(10, 10).log(), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['KLDivLoss'](i, t.type_as(i), reduction='none'), supports_forward_ad=True, pickle=False, default_dtype=torch.double) def kldivloss_no_reduce_test(): t = torch.rand(10, 10, dtype=torch.double) return dict( fullname='KLDivLoss_no_reduce', constructor=wrap_functional( lambda i: F.kl_div(i, t.type_as(i), reduction='none')), cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone))', input_fn=lambda: torch.rand(10, 10).log(), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['KLDivLoss'](i, t.type_as(i), reduction='none'), supports_forward_ad=True, pickle=False, default_dtype=torch.double, ) def kldivloss_no_reduce_scalar_test(): t = torch.rand((), dtype=torch.double) return dict( fullname='KLDivLoss_no_reduce_scalar', constructor=wrap_functional( lambda i: F.kl_div(i, t.type_as(i), reduction='none')), cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone))', input_fn=lambda: torch.rand(()).log(), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['KLDivLoss'](i, t.type_as(i), reduction='none'), supports_forward_ad=True, pickle=False, default_dtype=torch.double) def kldivloss_with_log_target_no_reduce_test(): t = torch.rand(10, 10, dtype=torch.double).log() return dict( fullname='KLDivLoss_with_log_target_no_reduce', constructor=wrap_functional( lambda i: F.kl_div(i, t.type_as(i), reduction='none', log_target=True)), cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone).log_target(true))', input_fn=lambda: torch.rand(10, 10).log(), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['KLDivLoss_log_target'](i, t.type_as(i), reduction='none'), supports_forward_ad=True, pickle=False, default_dtype=torch.double) def kldivloss_no_reduce_log_target_test(): t = torch.rand(10, 10, dtype=torch.double).log() return dict( fullname='KLDivLoss_no_reduce_log_target', constructor=wrap_functional( lambda i: F.kl_div(i, t.type_as(i), reduction='none', log_target=True)), cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone).log_target(true))', input_fn=lambda: torch.rand(10, 10).log(), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['KLDivLoss_log_target'](i, t.type_as(i), reduction='none'), supports_forward_ad=True, pickle=False, default_dtype=torch.double, ) def kldivloss_no_reduce_scalar_log_target_test(): t = torch.rand((), dtype=torch.double).log() return dict( fullname='KLDivLoss_no_reduce_scalar_log_target', constructor=wrap_functional( lambda i: F.kl_div(i, t.type_as(i), reduction='none', log_target=True)), cpp_function_call='F::kl_div(i, t.to(i.options()), F::KLDivFuncOptions().reduction(torch::kNone).log_target(true))', input_fn=lambda: torch.rand(()).log(), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['KLDivLoss_log_target'](i, t.type_as(i), reduction='none'), supports_forward_ad=True, pickle=False, default_dtype=torch.double) def l1loss_no_reduce_test(): t = torch.randn(2, 3, 4, dtype=torch.double) return dict( fullname='L1Loss_no_reduce', constructor=wrap_functional( lambda i: F.l1_loss(i, t.type_as(i), reduction='none')), cpp_function_call='F::l1_loss(i, t.to(i.options()), F::L1LossFuncOptions().reduction(torch::kNone))', input_fn=lambda: torch.randn(2, 3, 4), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: (i - t.type_as(i)).abs(), supports_forward_ad=True, pickle=False, default_dtype=torch.double) def l1loss_no_reduce_complex_test(): t = torch.randn(2, 3, 4, dtype=torch.cdouble) return dict( fullname='L1Loss_no_reduce_complex', constructor=wrap_functional( lambda i: F.l1_loss(i, t.type_as(i), reduction='none')), cpp_function_call='F::l1_loss(i, t.to(i.options()), F::L1LossFuncOptions().reduction(torch::kNone))', input_fn=lambda: torch.randn(2, 3, 4, dtype=torch.cdouble), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: (i - t.type_as(i)).abs(), supports_forward_ad=True, pickle=False) def l1loss_no_reduce_scalar_test(): t = torch.randn((), dtype=torch.double) return dict( fullname='L1Loss_no_reduce_scalar', constructor=wrap_functional( lambda i: F.l1_loss(i, t.type_as(i), reduction='none')), cpp_function_call='F::l1_loss(i, t.to(i.options()), F::L1LossFuncOptions().reduction(torch::kNone))', input_fn=lambda: torch.randn(()), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: (i - t.type_as(i)).abs(), supports_forward_ad=True, pickle=False, default_dtype=torch.double) def mseloss_no_reduce_test(): input_size = (2, 3, 4, 5) target = torch.randn(*input_size, dtype=torch.double) return dict( fullname='MSELoss_no_reduce', constructor=wrap_functional( lambda i: F.mse_loss(i, target.type_as(i), reduction='none')), cpp_function_call='F::mse_loss(i, target.to(i.options()), F::MSELossFuncOptions().reduction(torch::kNone))', input_size=input_size, cpp_var_map={'i': '_get_input()', 'target': target}, reference_fn=lambda i, *_: (i - target).pow(2), supports_forward_ad=True, pickle=False, default_dtype=torch.double) def mseloss_no_reduce_scalar_test(): input_size = () target = torch.randn(input_size, dtype=torch.double) return dict( fullname='MSELoss_no_reduce_scalar', constructor=wrap_functional( lambda i: F.mse_loss(i, target.type_as(i), reduction='none')), cpp_function_call='F::mse_loss(i, target.to(i.options()), F::MSELossFuncOptions().reduction(torch::kNone))', input_size=input_size, cpp_var_map={'i': '_get_input()', 'target': target}, reference_fn=lambda i, *_: (i - target).pow(2), supports_forward_ad=True, pickle=False, default_dtype=torch.double) def nllloss_no_reduce_test(): t = Variable(torch.empty(15).uniform_().mul(10).floor().long()) kwargs = {'reduction': 'none'} return dict( fullname='NLLLoss_no_reduce', constructor=wrap_functional( lambda i: F.nll_loss(i, t.type_as(i).long(), reduction=kwargs['reduction'])), cpp_function_call='''F::nll_loss( i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().reduction(torch::kNone))''', input_fn=lambda: torch.rand(15, 10).log(), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs), pickle=False, default_dtype=torch.double) def nllloss_no_reduce_ignore_index_test(): t = Variable(torch.empty(15).uniform_().mul(10).floor().long()) kwargs: dict[str, Union[int, str]] = {'ignore_index': 2, 'reduction': 'none'} return dict( fullname='NLLLoss_no_reduce_ignore_index', constructor=wrap_functional( lambda i: F.nll_loss(i, t.type_as(i).long(), ignore_index=int(kwargs['ignore_index']), reduction=str(kwargs['reduction']))), cpp_function_call='''F::nll_loss( i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().ignore_index(2).reduction(torch::kNone))''', input_fn=lambda: torch.rand(15, 10).log(), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs), pickle=False, default_dtype=torch.double) def nllloss_no_reduce_weights_test(): t = Variable(torch.empty(15).uniform_().mul(10).floor().long()) weight = torch.rand(10) def kwargs(i): return {'weight': weight.type_as(i), 'reduction': 'none'} return dict( fullname='NLLLoss_no_reduce_weights', constructor=wrap_functional( lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))), cpp_function_call='''F::nll_loss( i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone))''', input_fn=lambda: torch.rand(15, 10).add(1e-2).log(), cpp_var_map={'i': '_get_input()', 't': t, 'weight': weight}, reference_fn=lambda i, *_: loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs(i)), pickle=False, default_dtype=torch.double) def nllloss_no_reduce_weights_ignore_index_test(): t = Variable(torch.empty(15).uniform_().mul(10).floor().long()) weight = torch.rand(10) def kwargs(i): return {'weight': weight.type_as(i), 'reduction': 'none', 'ignore_index': 2} return dict( fullname='NLLLoss_no_reduce_weights_ignore_index', constructor=wrap_functional( lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i.data))), cpp_function_call='''F::nll_loss( i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone).ignore_index(2))''', input_fn=lambda: torch.rand(15, 10).add(1e-2).log(), cpp_var_map={'i': '_get_input()', 't': t, 'weight': weight}, reference_fn=lambda i, *_: loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs(i)), pickle=False, default_dtype=torch.double) def nllloss_no_reduce_weights_ignore_index_neg_test(): t = Variable(torch.empty(15).uniform_().mul(10).floor().long()) weight = torch.rand(10) def kwargs(i): return {'weight': weight.type_as(i), 'reduction': 'none', 'ignore_index': -1} return dict( fullname='NLLLoss_no_reduce_weights_ignore_index_neg', constructor=wrap_functional( lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))), cpp_function_call='''F::nll_loss( i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone).ignore_index(-1))''', input=torch.rand(15, 10, dtype=torch.double).add(1e-2).log(), cpp_var_map={'i': '_get_input()', 't': t, 'weight': weight}, reference_fn=lambda i, *_: loss_reference_fns['NLLLoss'](i, t.type_as(i).long(), **kwargs(i)), pickle=False, default_dtype=torch.double) def nllloss2d_no_reduce_test(): t = Variable(torch.rand(2, 5, 5).mul(3).floor().long()) kwargs = {'reduction': 'none'} return dict( fullname='NLLLoss2d_no_reduce', constructor=wrap_functional( lambda i: F.nll_loss(i, t.type_as(i).long(), reduction=kwargs['reduction'])), cpp_function_call='''F::nll_loss( i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().reduction(torch::kNone))''', input_fn=lambda: torch.rand(2, 3, 5, 5).log(), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs), pickle=False, default_dtype=torch.double) def nllloss2d_no_reduce_ignore_index_test(): t = Variable(torch.rand(2, 5, 5).mul(3).floor().long()) kwargs: dict[str, Union[int, str]] = {'ignore_index': 1, 'reduction': 'none'} return dict( fullname='NLLLoss2d_no_reduce_ignore_index', constructor=wrap_functional( lambda i: F.nll_loss(i, t.type_as(i).long(), ignore_index=int(kwargs['ignore_index']), reduction=str(kwargs['reduction']))), cpp_function_call='''F::nll_loss( i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().ignore_index(1).reduction(torch::kNone))''', input_fn=lambda: torch.rand(2, 3, 5, 5).log(), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs), pickle=False, default_dtype=torch.double) def nllloss2d_no_reduce_weights_test(): t = Variable(torch.rand(2, 5, 5).mul(3).floor().long()) weight = torch.rand(3) def kwargs(i): return {'weight': weight.type_as(i), 'reduction': 'none'} return dict( fullname='NLLLoss2d_no_reduce_weights', constructor=wrap_functional( lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))), cpp_function_call='''F::nll_loss( i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone))''', input_fn=lambda: torch.rand(2, 3, 5, 5).log(), cpp_var_map={'i': '_get_input()', 't': t, 'weight': weight}, reference_fn=lambda i, *_: loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs(i)), pickle=False, default_dtype=torch.double) def nlllossNd_no_reduce_test(): t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long()) kwargs = {'reduction': 'none'} return dict( fullname='NLLLossNd_no_reduce', constructor=wrap_functional( lambda i: F.nll_loss(i, t.type_as(i).long(), reduction=kwargs['reduction'])), cpp_function_call='''F::nll_loss( i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().reduction(torch::kNone))''', input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs), pickle=False, default_dtype=torch.double) def nlllossNd_no_reduce_ignore_index_test(): t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long()) kwargs: dict[str, Union[int, str]] = {'ignore_index': 1, 'reduction': 'none'} return dict( fullname='NLLLossNd_no_reduce_ignore_index', constructor=wrap_functional( lambda i: F.nll_loss(i, t.type_as(i).long(), ignore_index=int(kwargs['ignore_index']), reduction=str(kwargs['reduction']))), cpp_function_call='''F::nll_loss( i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().ignore_index(1).reduction(torch::kNone))''', input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs), pickle=False, default_dtype=torch.double) def nlllossNd_no_reduce_weights_test(): t = Variable(torch.rand(2, 5, 5, 2, 2).mul(3).floor().long()) weight = torch.rand(3) def kwargs(i): return {'weight': weight.type_as(i), 'reduction': 'none'} return dict( fullname='NLLLossNd_no_reduce_weights', constructor=wrap_functional( lambda i: F.nll_loss(i, t.type_as(i).long(), **kwargs(i))), cpp_function_call='''F::nll_loss( i, t.to(i.options()).to(torch::kLong), F::NLLLossFuncOptions().weight(weight.to(i.options())).reduction(torch::kNone))''', input_fn=lambda: torch.rand(2, 3, 5, 5, 2, 2).log(), cpp_var_map={'i': '_get_input()', 't': t, 'weight': weight}, reference_fn=lambda i, *_: loss_reference_fns['NLLLossNd'](i, t.type_as(i).long(), **kwargs(i)), pickle=False, default_dtype=torch.double) def smoothl1loss_no_reduce_test(): t = torch.randn(2, 3, 4, dtype=torch.double) return dict( fullname='SmoothL1Loss_no_reduce', constructor=wrap_functional( lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction='none')), cpp_function_call='''F::smooth_l1_loss( i, t.to(i.options()), F::SmoothL1LossFuncOptions().reduction(torch::kNone))''', input_fn=lambda: torch.randn(2, 3, 4), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none'), supports_forward_ad=True, pickle=False, default_dtype=torch.double) def smoothl1loss_no_reduce_scalar_test(): t = torch.randn((), dtype=torch.double) return dict( fullname='SmoothL1Loss_no_reduce_scalar', constructor=wrap_functional( lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction='none')), cpp_function_call='''F::smooth_l1_loss( i, t.to(i.options()), F::SmoothL1LossFuncOptions().reduction(torch::kNone))''', input_fn=lambda: torch.randn(()), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none'), supports_forward_ad=True, pickle=False, default_dtype=torch.double) def smoothl1loss_beta_test(): t = torch.randn(2, 3, 4, dtype=torch.double) return dict( fullname='SmoothL1Loss_beta', constructor=wrap_functional( lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction='none', beta=0.5)), cpp_function_call='''F::smooth_l1_loss( i, t.to(i.options()), F::SmoothL1LossFuncOptions().reduction(torch::kNone), 0.5)''', input_fn=lambda: torch.randn(2, 3, 4), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none', beta=0.5), supports_forward_ad=True, pickle=False, default_dtype=torch.double) def smoothl1loss_zero_beta_test(): t = torch.randn(2, 3, 4, dtype=torch.double) return dict( fullname='SmoothL1Loss_zero_beta', constructor=wrap_functional( lambda i: F.smooth_l1_loss(i, t.type_as(i), reduction='none', beta=0)), cpp_function_call='''F::smooth_l1_loss( i, t.to(i.options()), F::SmoothL1LossFuncOptions().reduction(torch::kNone), 0)''', input_fn=lambda: torch.randn(2, 3, 4), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['SmoothL1Loss'](i, t.type_as(i), reduction='none', beta=0), supports_forward_ad=True, pickle=False, default_dtype=torch.double) def huberloss_delta_test(): t = torch.randn(2, 3, 4) return dict( fullname='HuberLoss_delta', constructor=wrap_functional( lambda i: F.huber_loss(i, t.type_as(i), reduction='none', delta=0.5)), cpp_function_call='''F::huber_loss( i, t.to(i.options()), F::HuberLossFuncOptions().reduction(torch::kNone).delta(0.5))''', input_fn=lambda: torch.randn(2, 3, 4), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['HuberLoss'](i, t.type_as(i), reduction='none', delta=0.5), supports_forward_ad=True, pickle=False, default_dtype=torch.double) def multilabelmarginloss_0d_no_reduce_test(): t = torch.zeros(()).long() return dict( fullname='MultiLabelMarginLoss_0d_no_reduce', constructor=wrap_functional( lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')), cpp_function_call='''F::multilabel_margin_loss( i, t.to(i.options()).to(torch::kLong), F::MultilabelMarginLossFuncOptions().reduction(torch::kNone))''', input_fn=lambda: torch.randn(()), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), check_sum_reduction=True, check_gradgrad=False, pickle=False) def multilabelmarginloss_1d_no_reduce_test(): t = Variable(torch.rand(10).mul(10).floor().long()) return dict( fullname='MultiLabelMarginLoss_1d_no_reduce', constructor=wrap_functional( lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')), cpp_function_call='''F::multilabel_margin_loss( i, t.to(i.options()).to(torch::kLong), F::MultilabelMarginLossFuncOptions().reduction(torch::kNone))''', input_fn=lambda: torch.randn(10), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), check_sum_reduction=True, check_gradgrad=False, pickle=False, default_dtype=torch.double) def multilabelmarginloss_index_neg_test(): t = Variable(torch.clamp(torch.rand(5, 10).add(-.5).mul(20).floor().long(), min=-1)) return dict( fullname='MultiLabelMarginLoss_index_neg', constructor=wrap_functional( lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')), cpp_function_call='''F::multilabel_margin_loss( i, t.to(i.options()).to(torch::kLong), F::MultilabelMarginLossFuncOptions().reduction(torch::kNone))''', input_fn=lambda: torch.randn(5, 10), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), check_sum_reduction=True, check_gradgrad=False, pickle=False, default_dtype=torch.double) def multilabelmarginloss_no_reduce_test(): t = Variable(torch.rand(5, 10).mul(10).floor().long()) return dict( fullname='MultiLabelMarginLoss_no_reduce', constructor=wrap_functional( lambda i: F.multilabel_margin_loss(i, t.type_as(i).long(), reduction='none')), cpp_function_call='''F::multilabel_margin_loss( i, t.to(i.options()).to(torch::kLong), F::MultilabelMarginLossFuncOptions().reduction(torch::kNone))''', input_fn=lambda: torch.randn(5, 10), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['MultiLabelMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), check_sum_reduction=True, check_gradgrad=False, pickle=False, default_dtype=torch.double) def hingeembeddingloss_no_reduce_test(): t = Variable(torch.randn(10).gt(0).to(torch.double).mul_(2).sub(1)) return dict( fullname='HingeEmbeddingLoss_no_reduce', constructor=wrap_functional( lambda i: F.hinge_embedding_loss(i, t.type_as(i), reduction='none')), cpp_function_call='''F::hinge_embedding_loss( i, t.to(i.options()), F::HingeEmbeddingLossFuncOptions().reduction(torch::kNone))''', input_fn=lambda: torch.randn(10), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['HingeEmbeddingLoss'](i, t.type_as(i), reduction='none'), check_sum_reduction=True, pickle=False, default_dtype=torch.double) def hingeembeddingloss_margin_no_reduce_test(): t = Variable(torch.randn(10).gt(0).to(torch.double).mul_(2).sub(1)) return dict( fullname='HingeEmbeddingLoss_margin_no_reduce', constructor=wrap_functional( lambda i: F.hinge_embedding_loss(i, t.type_as(i), margin=0.5, reduction='none')), cpp_function_call='''F::hinge_embedding_loss( i, t.to(i.options()), F::HingeEmbeddingLossFuncOptions().margin(0.5).reduction(torch::kNone))''', input_fn=lambda: torch.randn(10), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['HingeEmbeddingLoss'](i, t.type_as(i), margin=0.5, reduction='none'), check_sum_reduction=True, pickle=False, default_dtype=torch.double) def softmarginloss_no_reduce_test(): t = torch.randn(5, 5, dtype=torch.double) return dict( fullname='SoftMarginLoss_no_reduce', constructor=wrap_functional( lambda i: F.soft_margin_loss(i, t.type_as(i), reduction='none')), cpp_function_call='''F::soft_margin_loss( i, t.to(i.options()), F::SoftMarginLossFuncOptions().reduction(torch::kNone))''', input_fn=lambda: torch.randn(5, 5), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['SoftMarginLoss'](i, t.type_as(i), reduction='none'), supports_forward_ad=True, pickle=False, default_dtype=torch.double) def multilabelsoftmarginloss_no_reduce_test(): t = torch.rand(5, 10).mul(2).floor() return dict( fullname='MultiLabelSoftMarginLoss_no_reduce', constructor=wrap_functional( lambda i: F.multilabel_soft_margin_loss(i, t.type_as(i), reduction='none')), cpp_function_call='''F::multilabel_soft_margin_loss( i, t.to(i.options()), F::MultilabelSoftMarginLossFuncOptions().reduction(torch::kNone))''', input_fn=lambda: torch.randn(5, 10), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: (-(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log())).sum(dim=1) / i.size(1), check_gradgrad=False, pickle=False, default_dtype=torch.double) def multilabelsoftmarginloss_weights_no_reduce_test(): t = torch.rand(5, 10).mul(2).floor() weights = torch.rand(10) return dict( fullname='MultiLabelSoftMarginLoss_weights_no_reduce', constructor=wrap_functional( lambda i: F.multilabel_soft_margin_loss(i, t.type_as(i), weight=weights.type_as(i), reduction='none')), cpp_function_call='''F::multilabel_soft_margin_loss( i, t.to(i.options()), F::MultilabelSoftMarginLossFuncOptions().weight(weights.to(i.options())).reduction(torch::kNone))''', input_fn=lambda: torch.randn(5, 10), cpp_var_map={'i': '_get_input()', 't': t, 'weights': weights}, reference_fn=lambda i, *_: (-(t * i.sigmoid().log() + (1 - t) * (-i).sigmoid().log()) * weights).sum(dim=1) / i.size(1), check_sum_reduction=True, check_gradgrad=False, pickle=False, default_dtype=torch.double) def multimarginloss_no_reduce_test(): t = torch.rand(5).mul(8).floor().long() return dict( fullname='MultiMarginLoss_no_reduce', constructor=wrap_functional( lambda i: F.multi_margin_loss(i, t.type_as(i).long(), reduction='none')), cpp_function_call='''F::multi_margin_loss( i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().reduction(torch::kNone))''', input_fn=lambda: torch.randn(5, 10), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), check_sum_reduction=True, check_gradgrad=False, pickle=False, default_dtype=torch.double) def multimarginloss_1d_no_reduce_test(): t = torch.rand(1).mul(8).floor().long() return dict( fullname='MultiMarginLoss_1d_no_reduce', constructor=wrap_functional( lambda i: F.multi_margin_loss(i, t.type_as(i).long(), reduction='none')), cpp_function_call='''F::multi_margin_loss( i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().reduction(torch::kNone))''', input_fn=lambda: torch.randn(10), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), check_sum_reduction=True, check_gradgrad=False, pickle=False, default_dtype=torch.double) def multimarginloss_1d_input_0d_target_no_reduce_test(): t = torch.rand(()).mul(8).floor().long() return dict( fullname='multimarginloss_1d_input_0d_target_no_reduce', constructor=wrap_functional( lambda i: F.multi_margin_loss(i, t.type_as(i).long(), reduction='none')), cpp_function_call='''F::multi_margin_loss( i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().reduction(torch::kNone))''', input_fn=lambda: torch.randn(10), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), reduction='none'), check_sum_reduction=True, check_gradgrad=False, pickle=False, default_dtype=torch.double) def multimarginloss_p_no_reduce_test(): t = torch.rand(5).mul(8).floor().long() return dict( fullname='MultiMarginLoss_p_no_reduce', constructor=wrap_functional( lambda i: F.multi_margin_loss(i, t.type_as(i).long(), p=2, reduction='none')), cpp_function_call='''F::multi_margin_loss( i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().p(2).reduction(torch::kNone))''', input_fn=lambda: torch.randn(5, 10).clamp_(1e-2, 1 - 1e-2), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), p=2, reduction='none'), check_sum_reduction=True, check_gradgrad=False, pickle=False, default_dtype=torch.double) def multimarginloss_margin_no_reduce_test(): t = torch.rand(5).mul(8).floor().long() return dict( fullname='MultiMarginLoss_margin_no_reduce', constructor=wrap_functional( lambda i: F.multi_margin_loss(i, t.type_as(i).long(), margin=0.5, reduction='none')), cpp_function_call='''F::multi_margin_loss( i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().margin(0.5).reduction(torch::kNone))''', input_fn=lambda: torch.randn(5, 10), cpp_var_map={'i': '_get_input()', 't': t}, reference_fn=lambda i, *_: loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), margin=0.5, reduction='none'), check_sum_reduction=True, check_gradgrad=False, pickle=False, default_dtype=torch.double) def multimarginloss_weights_no_reduce_test(): t = torch.rand(5).mul(8).floor().long() weights = torch.rand(10, dtype=torch.double) return dict( fullname='MultiMarginLoss_weights_no_reduce', constructor=wrap_functional( lambda i: F.multi_margin_loss(i, t.type_as(i).long(), weight=weights.type_as(i), reduction='none')), cpp_function_call='''F::multi_margin_loss( i, t.to(i.options()).to(torch::kLong), F::MultiMarginLossFuncOptions().weight(weights.to(i.options())).reduction(torch::kNone))''', input_fn=lambda: torch.randn(5, 10), cpp_var_map={'i': '_get_input()', 't': t, 'weights': weights}, reference_fn=lambda i, *_: loss_reference_fns['MultiMarginLoss'](i, t.data.type_as(i).long(), weight=weights, reduction='none'), check_sum_reduction=True, check_gradgrad=False, pickle=False, default_dtype=torch.double) def single_batch_reference_fn(input, parameters, module): """Reference function for modules supporting no batch dimensions. The module is passed the input and target in batched form with a single item. The output is squeezed to compare with the no-batch input. """ def unsqueeze_inp(inp): if isinstance(inp, (list, tuple)): return [t.unsqueeze(0) for t in inp] return inp.unsqueeze(0) single_batch_input = unsqueeze_inp(input) single_batch_input = [single_batch_input] if isinstance(single_batch_input, torch.Tensor) else single_batch_input with freeze_rng_state(): return module(*single_batch_input).squeeze(0) def get_new_module_tests(): new_module_tests = [ poissonnllloss_no_reduce_test(), bceloss_no_reduce_test(), bceloss_weights_no_reduce_test(), bce_with_logistic_legacy_enum_test(), bce_with_logistic_no_reduce_test(), bceloss_no_reduce_scalar_test(), bceloss_weights_no_reduce_scalar_test(), bce_with_logistic_no_reduce_scalar_test(), kldivloss_with_target_no_reduce_test(), kldivloss_no_reduce_test(), kldivloss_no_reduce_scalar_test(), kldivloss_with_log_target_no_reduce_test(), kldivloss_no_reduce_log_target_test(), kldivloss_no_reduce_scalar_log_target_test(), l1loss_no_reduce_test(), l1loss_no_reduce_complex_test(), l1loss_no_reduce_scalar_test(), mseloss_no_reduce_test(), mseloss_no_reduce_scalar_test(), nllloss_no_reduce_test(), nllloss_no_reduce_ignore_index_test(), nllloss_no_reduce_weights_test(), nllloss_no_reduce_weights_ignore_index_test(), nllloss_no_reduce_weights_ignore_index_neg_test(), nllloss2d_no_reduce_test(), nllloss2d_no_reduce_weights_test(), nllloss2d_no_reduce_ignore_index_test(), nlllossNd_no_reduce_test(), nlllossNd_no_reduce_weights_test(), nlllossNd_no_reduce_ignore_index_test(), smoothl1loss_no_reduce_test(), smoothl1loss_no_reduce_scalar_test(), smoothl1loss_beta_test(), smoothl1loss_zero_beta_test(), huberloss_delta_test(), multilabelmarginloss_0d_no_reduce_test(), multilabelmarginloss_1d_no_reduce_test(), multilabelmarginloss_index_neg_test(), multilabelmarginloss_no_reduce_test(), hingeembeddingloss_no_reduce_test(), hingeembeddingloss_margin_no_reduce_test(), softmarginloss_no_reduce_test(), multilabelsoftmarginloss_no_reduce_test(), multilabelsoftmarginloss_weights_no_reduce_test(), multimarginloss_no_reduce_test(), multimarginloss_1d_no_reduce_test(), multimarginloss_1d_input_0d_target_no_reduce_test(), multimarginloss_p_no_reduce_test(), multimarginloss_margin_no_reduce_test(), multimarginloss_weights_no_reduce_test(), dict( module_name='Conv1d', constructor_args=(4, 5, 3), cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3)', input_size=(2, 4, 10), cudnn=True, with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( module_name='Conv1d', constructor_args=(4, 5, 3, 2), cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).stride(2)', input_size=(2, 4, 10), cudnn=True, desc='stride', with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( module_name='Conv1d', constructor_args=(4, 5, 3, 1, 1), cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).stride(1).padding(1)', input_size=(2, 4, 10), cudnn=True, desc='pad1', with_tf32=True, tf32_precision=0.01, default_dtype=torch.double, ), dict( module_name='Conv1d', constructor_args=(4, 5, 5, 1, 2), cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 5).stride(1).padding(2)', input_size=(2, 4, 10), cudnn=True, desc='pad2', with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( module_name='Conv1d', constructor_args=(4, 4, 3, 1, 1), cpp_constructor_args='torch::nn::Conv1dOptions(4, 4, 3).stride(1).padding(1)', input_size=(1, 4, 1), cudnn=True, desc='pad1size1', with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( module_name='Conv1d', constructor_args=(4, 4, 5, 1, 2), cpp_constructor_args='torch::nn::Conv1dOptions(4, 4, 5).stride(1).padding(2)', input_size=(1, 4, 1), cudnn=True, desc='pad2size1', with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( module_name='Conv1d', constructor_args=(4, 5, 3), cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3)', input_size=(0, 4, 10), cudnn=True, desc='zero_batch', with_tf32=True, tf32_precision=0.005, ), dict( fullname='Conv1d_dilated', constructor=lambda: nn.Conv1d(4, 5, kernel_size=3, dilation=2), cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).dilation(2)', input_size=(2, 4, 10), with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( fullname='Conv1d_groups', constructor=lambda: nn.Conv1d(4, 6, kernel_size=3, groups=2), cpp_constructor_args='torch::nn::Conv1dOptions(4, 6, 3).groups(2)', input_size=(2, 4, 6), cudnn=True, with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( fullname='Conv1d_pad_valid', constructor=lambda: nn.Conv1d(4, 5, 3, padding="valid"), cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).padding(torch::kValid)', input_size=(2, 4, 10), cudnn=True, with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( fullname='Conv1d_pad_same', constructor=lambda: nn.Conv1d(4, 5, 3, padding="same"), cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).padding(torch::kSame)', input_size=(2, 4, 10), cudnn=True, with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( fullname='Conv1d_pad_same2', constructor=lambda: nn.Conv1d(4, 5, 4, padding="same"), cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 4).padding(torch::kSame)', input_size=(2, 4, 10), cudnn=True, with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( fullname='Conv1d_pad_same_dilated', constructor=lambda: nn.Conv1d(4, 5, 4, padding="same", dilation=2), cpp_constructor_args='torch::nn::Conv1dOptions(4, 5, 3).padding(torch::kSame).dilation(2)', input_size=(2, 4, 10), cudnn=True, with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( fullname='ConvTranspose1d', constructor=lambda: nn.ConvTranspose1d(3, 4, kernel_size=3, stride=(3,), padding=1, output_padding=(1,)), cpp_constructor_args='torch::nn::ConvTranspose1dOptions(3, 4, 3).stride(3).padding(1).output_padding(1)', cudnn=True, input_size=(1, 3, 7), with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( module_name='ConvTranspose1d', constructor_args=(3, 4, 3, 2, 1, 1, 1, False), cpp_constructor_args='''torch::nn::ConvTranspose1dOptions(3, 4, 3) .stride(2).padding(1).output_padding(1).groups(1).bias(false)''', input_size=(1, 3, 6), cudnn=True, desc='no_bias', with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( module_name='ConvTranspose1d', constructor_args=(3, 4, 3, 2, 1, 1, 1, True, 2), cpp_constructor_args='''torch::nn::ConvTranspose1dOptions(3, 4, 3) .stride(2).padding(1).output_padding(1).groups(1).bias(true).dilation(2)''', input_size=(1, 3, 6), cudnn=True, desc='dilated', with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( fullname='ConvTranspose1d_groups', constructor=lambda: nn.ConvTranspose1d(4, 6, 3, stride=(3,), padding=1, output_padding=(1,), groups=2), cpp_constructor_args='''torch::nn::ConvTranspose1dOptions(4, 6, 3) .stride(3).padding(1).output_padding(1).groups(2)''', cudnn=True, input_size=(2, 4, 7), with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( module_name='Conv2d', constructor_args=(3, 4, (3, 2)), cpp_constructor_args='torch::nn::Conv2dOptions(3, 4, {3, 2})', input_size=(2, 3, 7, 5), cudnn=True, check_with_long_tensor=True, with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( module_name='Conv2d', constructor_args=(3, 4, (3, 3), (2, 2)), cpp_constructor_args='torch::nn::Conv2dOptions(3, 4, {3, 3}).stride({2, 2})', input_size=(2, 3, 6, 6), cudnn=True, desc='strided', check_with_long_tensor=True, with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( module_name='Conv2d', constructor_args=(3, 4, (3, 3), (2, 2), (1, 1)), cpp_constructor_args='torch::nn::Conv2dOptions(3, 4, {3, 3}).stride({2, 2}).padding({1, 1})', input_size=(2, 3, 6, 6), cudnn=True, desc='padding', check_with_long_tensor=True, with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( module_name='Conv2d', constructor_args=(3, 2, (3, 3), (2, 2), (1, 1), (2, 2)), cpp_constructor_args='torch::nn::Conv2dOptions(3, 2, {3, 3}).stride({2, 2}).padding({1, 1}).dilation({2, 2})', input_size=(2, 3, 8, 8), cudnn=True, desc='dilated', check_with_long_tensor=True, with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( module_name='Conv2d', constructor_args=(3, 4, (3, 2), 1, 0, 1, 1, False), cpp_constructor_args='''torch::nn::Conv2dOptions(3, 4, {3, 2}) .stride(1).padding(0).dilation(1).groups(1).bias(false)''', input_size=(2, 3, 6, 5), cudnn=True, desc='no_bias', check_with_long_tensor=True, with_tf32=True, tf32_precision=0.015, default_dtype=torch.double, ), dict( module_name='Conv2d', constructor_args=(3, 4, (3, 2)), cpp_constructor_args='torch::nn::Conv2dOptions(3, 4, {3, 2})', input_size=(0, 3, 7, 5), cudnn=True, desc='zero_batch', check_with_long_tensor=True, with_tf32=True, ), dict( fullname='Conv2d_groups', constructor=lambda: nn.Conv2d(4, 6, (3, 2), groups=2), cpp_constructor_args='torch::nn::Conv2dOptions(4, 6, {3, 2}).groups(2)', input_size=(2, 4, 6, 5), cudnn=True, check_with_long_tensor=True, with_tf32=True, tf32_precision=0.015, default_dtype=torch.double, ), dict( fullname='Conv2d_groups_thnn', constructor=lambda: nn.Conv2d(4, 6, (3, 2), groups=2), cpp_constructor_args='torch::nn::Conv2dOptions(4, 6, {3, 2}).groups(2)', input_size=(2, 4, 6, 5), check_with_long_tensor=True, with_tf32=True, tf32_precision=0.015, default_dtype=torch.double, ), dict( fullname='Conv2d_pad_valid', constructor=lambda: nn.Conv2d(2, 4, (3, 4), padding="valid"), cpp_constructor_args='torch::nn::Conv2dOptions(2, 4, {3, 4}).padding(torch::kValid)', input_size=(2, 2, 6, 5), cudnn=True, with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( fullname='Conv2d_pad_same', constructor=lambda: nn.Conv2d(2, 4, (3, 4), padding="same"), cpp_constructor_args='torch::nn::Conv2dOptions(2, 4, {3, 4}).padding(torch::kSame)', input_size=(2, 2, 6, 5), cudnn=True, with_tf32=True, tf32_precision=0.01, default_dtype=torch.double, ), dict( fullname='Conv2d_pad_same_dilated', constructor=lambda: nn.Conv2d(2, 4, (3, 4), padding="same", dilation=2), cpp_constructor_args='torch::nn::Conv2dOptions(2, 4, {3, 4}).padding(torch::kSame).dilation(2)', input_size=(2, 2, 6, 5), cudnn=True, with_tf32=True, tf32_precision=0.01, default_dtype=torch.double, ), dict( module_name='ConvTranspose2d', constructor_args=(3, 4, 3, (3, 2), 1, (1, 1)), cpp_constructor_args='''torch::nn::ConvTranspose2dOptions(3, 4, 3) .stride({3, 2}).padding(1).output_padding({1, 1})''', cudnn=True, input_size=(1, 3, 7, 6), check_with_long_tensor=True, with_tf32=True, tf32_precision=0.01, default_dtype=torch.double, ), dict( module_name='ConvTranspose2d', constructor_args=(3, 4, 3, (2, 3), 1, (1, 1), 1, False, (2, 2)), cpp_constructor_args='''torch::nn::ConvTranspose2dOptions(3, 4, 3) .stride({2, 3}) .padding(1) .output_padding({1, 1}) .groups(1) .bias(false) .dilation({2, 2})''', input_size=(1, 3, 6, 7), cudnn=True, desc='dilated', check_with_long_tensor=True, with_tf32=True, tf32_precision=0.01, default_dtype=torch.double, ), dict( module_name='ConvTranspose2d', constructor_args=(3, 4, 3, (2, 3), 1, (1, 1), 1, False), cpp_constructor_args='''torch::nn::ConvTranspose2dOptions(3, 4, 3) .stride({2, 3}).padding(1).output_padding({1, 1}).groups(1).bias(false)''', input_size=(1, 3, 6, 7), cudnn=True, desc='no_bias', check_with_long_tensor=True, with_tf32=True, tf32_precision=0.01, default_dtype=torch.double, ), dict( fullname='ConvTranspose2d_groups', constructor=lambda: nn.ConvTranspose2d(2, 4, (2, 3), groups=2), cpp_constructor_args='torch::nn::ConvTranspose2dOptions(2, 4, {2, 3}).groups(2)', input_size=(1, 2, 4, 5), cudnn=True, check_with_long_tensor=True, with_tf32=True, tf32_precision=0.01, default_dtype=torch.double, ), dict( fullname='Conv2d_depthwise', constructor=lambda: nn.Conv2d(4, 4, (3, 3), groups=4), cpp_constructor_args='torch::nn::Conv2dOptions(4, 4, {3, 3}).groups(4)', input_size=(2, 4, 6, 6), with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( fullname='Conv2d_depthwise_with_multiplier', constructor=lambda: nn.Conv2d(4, 8, (3, 3), groups=4), cpp_constructor_args='torch::nn::Conv2dOptions(4, 8, {3, 3}).groups(4)', input_size=(2, 4, 6, 6), with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( fullname='Conv2d_depthwise_strided', constructor=lambda: nn.Conv2d(4, 4, (3, 3), stride=(2, 2), groups=4), cpp_constructor_args='torch::nn::Conv2dOptions(4, 4, {3, 3}).stride({2, 2}).groups(4)', input_size=(2, 4, 6, 6), with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( fullname='Conv2d_depthwise_padded', constructor=lambda: nn.Conv2d(4, 4, (3, 3), padding=(1, 1), groups=4), cpp_constructor_args='torch::nn::Conv2dOptions(4, 4, {3, 3}).padding({1, 1}).groups(4)', input_size=(2, 4, 6, 6), with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( fullname='Conv2d_depthwise_dilated', constructor=lambda: nn.Conv2d(4, 4, (2, 2), dilation=(2, 2), groups=4), cpp_constructor_args='torch::nn::Conv2dOptions(4, 4, {2, 2}).dilation({2, 2}).groups(4)', input_size=(2, 4, 5, 5), with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( module_name='Conv3d', constructor_args=(2, 3, (2, 3, 2)), cpp_constructor_args='torch::nn::Conv3dOptions(2, 3, {2, 3, 2})', input_size=(1, 2, 4, 5, 4), cudnn=True, check_with_long_tensor=True, with_tf32=True, tf32_precision=0.05, default_dtype=torch.double, ), dict( module_name='Conv3d', constructor_args=(2, 3, (2, 3, 4), 1, 0, 1, 1, False), cpp_constructor_args='''torch::nn::Conv3dOptions(2, 3, {2, 3, 4}) .stride(1).padding(0).dilation(1).groups(1).bias(false)''', input_size=(1, 2, 3, 4, 5), cudnn=True, desc='no_bias', check_with_long_tensor=True, with_tf32=True, tf32_precision=0.05, default_dtype=torch.double, ), dict( module_name='Conv3d', constructor_args=(2, 3, (1, 1, 1), 1, 0, 1, 1, False), cpp_constructor_args='''torch::nn::Conv3dOptions(2, 3, {2, 3, 4}) .stride(1).padding(0).dilation(1).groups(1).bias(false)''', input_size=(1, 2, 3, 4, 5), cudnn=True, desc='1x1x1_no_bias', check_with_long_tensor=False, with_tf32=True, tf32_precision=0.05, default_dtype=torch.double, ), dict( module_name='Conv3d', constructor_args=(3, 4, 2, 2), cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, 2).stride(2)', input_size=(2, 3, 5, 5, 5), cudnn=True, desc='stride', check_with_long_tensor=True, with_tf32=True, tf32_precision=0.05, default_dtype=torch.double, ), dict( module_name='Conv3d', constructor_args=(3, 4, 2, 2, 1), cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, 2).stride(2).padding(1)', input_size=(2, 3, 5, 5, 5), cudnn=True, desc='stride_padding', check_with_long_tensor=True, with_tf32=True, tf32_precision=0.05, default_dtype=torch.double, ), dict( module_name='Conv3d', constructor_args=(3, 4, (2, 3, 4)), cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, {2, 3, 4})', input_size=(0, 3, 3, 4, 5), cudnn=True, check_with_long_tensor=True, desc='zero_batch', with_tf32=True, ), dict( fullname='Conv3d_groups', constructor=lambda: nn.Conv3d(2, 4, kernel_size=3, groups=2), cpp_constructor_args='torch::nn::Conv3dOptions(2, 4, 3).groups(2)', input_size=(1, 2, 4, 5, 4), cudnn=True, check_with_long_tensor=True, with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( fullname='Conv3d_dilated', constructor=lambda: nn.Conv3d(3, 4, kernel_size=2, dilation=2), cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, 2).dilation(2)', input_size=(2, 3, 5, 5, 5), with_tf32=True, tf32_precision=0.05, default_dtype=torch.double, ), dict( fullname='Conv3d_dilated_strided', constructor=lambda: nn.Conv3d(3, 4, kernel_size=2, dilation=2, stride=2), cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, 2).dilation(2).stride(2)', input_size=(2, 3, 5, 5, 5), with_tf32=True, tf32_precision=0.05, default_dtype=torch.double, ), dict( fullname='Conv3d_pad_valid', constructor=lambda: nn.Conv3d(3, 4, (2, 3, 4), padding="valid"), cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, {2, 3, 4}).padding(torch::kValid)', input_size=(2, 3, 6, 5, 4), cudnn=True, with_tf32=True, tf32_precision=0.05, default_dtype=torch.double, ), dict( fullname='Conv3d_pad_same', constructor=lambda: nn.Conv3d(3, 4, (2, 3, 4), padding="same"), cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, {2, 3, 4}).padding(torch::kSame)', input_size=(2, 3, 6, 5, 4), cudnn=True, with_tf32=True, tf32_precision=0.05, default_dtype=torch.double, ), dict( fullname='Conv3d_pad_same_dilated', constructor=lambda: nn.Conv3d(3, 4, (2, 3, 4), padding="same", dilation=2), cpp_constructor_args='torch::nn::Conv3dOptions(3, 4, {2, 3, 4}).padding(torch::kSame).dilation(2)', input_size=(2, 3, 6, 5, 4), cudnn=True, with_tf32=True, tf32_precision=0.05, default_dtype=torch.double, ), dict( module_name='ConvTranspose3d', constructor_args=(2, 3, (2, 3, 2)), cpp_constructor_args='torch::nn::ConvTranspose3dOptions(2, 3, {2, 3, 2})', cudnn=True, input_size=(1, 2, 4, 5, 4), with_tf32=True, tf32_precision=0.05, default_dtype=torch.double, ), dict( module_name='ConvTranspose3d', constructor_args=(2, 3, (2, 3, 2), 1, 0, 0, 1, True, (2, 2, 2)), cpp_constructor_args='''torch::nn::ConvTranspose3dOptions(2, 3, {2, 3, 2}) .stride(1).padding(0).output_padding(0).groups(1).bias(true).dilation({2, 2, 2})''', cudnn=True, input_size=(1, 2, 4, 5, 4), desc='dilated', with_tf32=True, tf32_precision=0.05, default_dtype=torch.double, ), dict( module_name='ReplicationPad3d', constructor_args=((1, 2, 3, 3, 2, 1),), cpp_constructor_args='torch::nn::ReplicationPad3dOptions({1, 2, 3, 3, 2, 1})', input_size=(2, 3, 2, 2, 2), default_dtype=torch.double, ), dict( module_name='ReplicationPad3d', constructor_args=((1, 2, 3, 3, 2, 1),), cpp_constructor_args='torch::nn::ReplicationPad3dOptions({1, 2, 3, 3, 2, 1})', input_size=(3, 2, 2, 2), reference_fn=single_batch_reference_fn, desc='no_batch_dim', default_dtype=torch.double, ), dict( module_name='ReplicationPad3d', constructor_args=((1, 2, 3, 3, 2, 1),), cpp_constructor_args='torch::nn::ReplicationPad3dOptions({1, 2, 3, 3, 2, 1})', input_fn=lambda: torch.rand(2, 3, 2, 2, 2, dtype=torch.complex128, requires_grad=True), skip_half=True, desc='complex' ), dict( module_name='Embedding', constructor_args=(4, 3), cpp_constructor_args='torch::nn::EmbeddingOptions(4, 3)', input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4), check_gradgrad=False, default_dtype=torch.double, decorator=skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/117971") ), dict( module_name='Embedding', constructor_args=(4, 3), cpp_constructor_args='torch::nn::EmbeddingOptions(4, 3)', input_fn=lambda: torch.empty(1, 512, dtype=torch.long).random_(4).expand(7, 512), check_gradgrad=False, desc='discontiguous', default_dtype=torch.double, decorator=skipIfTorchDynamo("https://github.com/pytorch/pytorch/issues/117971") ), dict( module_name='EmbeddingBag', constructor_args=(4, 3), cpp_constructor_args='torch::nn::EmbeddingBagOptions(4, 3)', input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4), check_gradgrad=False, desc='mean', default_dtype=torch.double, ), dict( module_name='EmbeddingBag', constructor_args=(4, 3), cpp_constructor_args='torch::nn::EmbeddingBagOptions(4, 3)', input_fn=lambda: torch.empty(1, 512, dtype=torch.long).random_(4).expand(7, 512), check_gradgrad=False, desc='discontiguous', default_dtype=torch.double, ), dict( module_name='EmbeddingBag', constructor_args=(4, 3, None, 2., False, 'sum'), cpp_constructor_args='''torch::nn::EmbeddingBagOptions(4, 3) .max_norm(std::nullopt).norm_type(2.).scale_grad_by_freq(false).mode(torch::kSum)''', input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4), check_gradgrad=False, desc='sum', default_dtype=torch.double, ), dict( module_name='EmbeddingBag', constructor_args=(4, 3, None, 2., False, 'max'), cpp_constructor_args='''torch::nn::EmbeddingBagOptions(4, 3) .max_norm(std::nullopt).norm_type(2.).scale_grad_by_freq(false).mode(torch::kMax)''', input_fn=lambda: torch.empty(2, 3, dtype=torch.long).random_(4), check_gradgrad=False, desc='max', default_dtype=torch.double, ), dict( fullname='EmbeddingBag_mean_padding_idx', constructor=lambda: nn.EmbeddingBag(4, 3, padding_idx=1), cpp_constructor_args='torch::nn::EmbeddingBagOptions(4, 3).padding_idx(1)', input_fn=lambda: torch.stack([torch.randperm(3), torch.randperm(3)]), check_gradgrad=False, default_dtype=torch.double, ), dict( fullname='EmbeddingBag_sum_padding_idx', constructor=lambda: nn.EmbeddingBag(4, 3, None, 2., False, 'sum', padding_idx=1), cpp_constructor_args='''torch::nn::EmbeddingBagOptions(4, 3) .max_norm(std::nullopt).norm_type(2.).scale_grad_by_freq(false).mode(torch::kSum).padding_idx(1)''', input_fn=lambda: torch.stack([torch.randperm(3), torch.randperm(3)]), check_gradgrad=False, default_dtype=torch.double, ), dict( fullname='EmbeddingBag_max_padding_idx', constructor=lambda: nn.EmbeddingBag(4, 3, None, 2., False, 'max', padding_idx=1), cpp_constructor_args='''torch::nn::EmbeddingBagOptions(4, 3) .max_norm(std::nullopt).norm_type(2.).scale_grad_by_freq(false).mode(torch::kMax).padding_idx(1)''', input_fn=lambda: torch.stack([torch.randperm(3), torch.randperm(3)]), check_gradgrad=False, default_dtype=torch.double, ), dict( fullname='EmbeddingBag_sparse', constructor=lambda: nn.EmbeddingBag(4, 3, sparse=True, dtype=torch.double), cpp_constructor_args='''torch::nn::EmbeddingBagOptions(4, 3) .sparse(true)._weight(torch::rand({4, 3}).to(torch::kFloat64))''', input_fn=lambda: torch.randperm(2).repeat(1, 2), check_gradgrad=False, has_sparse_gradients=True, ), dict( constructor=lambda: nn.Embedding(4, 3, dtype=torch.double, sparse=True), cpp_constructor_args='torch::nn::EmbeddingOptions(4, 3).sparse(true)._weight(torch::rand({4, 3}).to(torch::kFloat64))', input_fn=lambda: torch.randperm(2).repeat(1, 2), fullname='Embedding_sparse', check_gradgrad=False, has_sparse_gradients=True, ), dict( module_name='PixelShuffle', constructor_args=(3,), cpp_constructor_args='torch::nn::PixelShuffleOptions(3)', input_size=(1, 9, 4, 4), default_dtype=torch.double, ), dict( module_name='PixelUnshuffle', constructor_args=(3,), cpp_constructor_args='torch::nn::PixelUnshuffleOptions(3)', input_size=(1, 1, 12, 12), default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({12})).scale_factor(std::nullopt).mode(torch::kNearest)''', input_size=(1, 2, 4), fullname='interpolate_nearest_1d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({12})).scale_factor(std::nullopt).mode(torch::kNearest)''', input_size=(0, 2, 4), fullname='interpolate_nearest_1d_zero_dim', pickle=False, ), dict( constructor=wrap_functional(F.interpolate, size=(12, ), scale_factor=None, mode='nearest'), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({12})).scale_factor(std::nullopt).mode(torch::kNearest)''', input_size=(1, 2, 3), fullname='interpolate_nearest_tuple_1d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='nearest'), cpp_options_args='''F::InterpolateFuncOptions() .size(std::nullopt).scale_factor(std::vector<double>({4.})).mode(torch::kNearest)''', input_size=(1, 2, 4), fullname='interpolate_nearest_scale_1d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='linear', align_corners=False), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({12})) .scale_factor(std::nullopt) .mode(torch::kLinear) .align_corners(false)''', input_size=(1, 2, 4), fullname='interpolate_linear_1d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=(4, ), scale_factor=None, mode='linear', align_corners=False), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({4})) .scale_factor(std::nullopt) .mode(torch::kLinear) .align_corners(false)''', input_size=(1, 2, 3), fullname='interpolate_linear_tuple_1d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='linear', align_corners=False), cpp_options_args='''F::InterpolateFuncOptions() .size(std::nullopt) .scale_factor(std::vector<double>({4.})) .mode(torch::kLinear) .align_corners(false)''', input_size=(1, 2, 4), fullname='interpolate_linear_scale_1d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='linear', align_corners=False), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({12})) .scale_factor(std::nullopt) .mode(torch::kLinear) .align_corners(false)''', input_size=(0, 2, 4), fullname='interpolate_linear_1d_zero_dim', pickle=False, ), dict( constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='linear', align_corners=True), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({12})) .scale_factor(std::nullopt) .mode(torch::kLinear) .align_corners(true)''', input_size=(1, 2, 4), fullname='interpolate_linear_1d_align_corners', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='linear', align_corners=True), cpp_options_args='''F::InterpolateFuncOptions() .size(std::nullopt) .scale_factor(std::vector<double>({4.})) .mode(torch::kLinear) .align_corners(true)''', input_size=(1, 2, 4), fullname='interpolate_linear_scale_1d_align_corners', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=2, scale_factor=None, mode='nearest'), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({2, 2})) .scale_factor(std::nullopt) .mode(torch::kNearest)''', input_size=(1, 128, 1, 1), fullname='interpolate_nearest_2d_launch_configs', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({12, 12})) .scale_factor(std::nullopt) .mode(torch::kNearest)''', input_size=(1, 2, 4, 4), fullname='interpolate_nearest_2d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=(12, 16), scale_factor=None, mode='nearest'), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({12, 16})) .scale_factor(std::nullopt) .mode(torch::kNearest)''', input_size=(1, 2, 3, 4), fullname='interpolate_nearest_tuple_2d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='nearest'), cpp_options_args='''F::InterpolateFuncOptions() .size(std::nullopt) .scale_factor(std::vector<double>({4., 4.})) .mode(torch::kNearest)''', input_size=(1, 2, 4, 4), fullname='interpolate_nearest_scale_2d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({12, 12})) .scale_factor(std::nullopt) .mode(torch::kNearest)''', input_size=(0, 2, 4, 4), fullname='interpolate_nearest_2d_zero_dim', pickle=False, ), dict( constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='bilinear', align_corners=False), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({12, 12})) .scale_factor(std::nullopt) .mode(torch::kBilinear) .align_corners(false)''', input_size=(1, 2, 4, 4), fullname='interpolate_bilinear_2d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='bilinear', align_corners=False), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({12, 12})) .scale_factor(std::nullopt) .mode(torch::kBilinear) .align_corners(false)''', input_size=(0, 2, 4, 4), fullname='interpolate_bilinear_2d_zero_dim', pickle=False, ), dict( constructor=wrap_functional(F.interpolate, size=(4, 6), scale_factor=None, mode='bilinear', align_corners=False), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({4, 6})) .scale_factor(std::nullopt) .mode(torch::kBilinear) .align_corners(false)''', input_size=(1, 2, 2, 3), fullname='interpolate_bilinear_tuple_2d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='bilinear', align_corners=False), cpp_options_args='''F::InterpolateFuncOptions() .size(std::nullopt) .scale_factor(std::vector<double>({4., 4.})) .mode(torch::kBilinear) .align_corners(false)''', input_size=(1, 2, 4, 4), fullname='interpolate_bilinear_scale_2d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 2.), mode='bilinear', align_corners=False), cpp_options_args='''F::InterpolateFuncOptions() .size(std::nullopt) .scale_factor(std::vector<double>({2., 2.})) .mode(torch::kBilinear) .align_corners(false)''', input_size=(1, 2, 4, 4), fullname='interpolate_bilinear_scale_tuple_shared_2d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 1.), mode='bilinear', align_corners=False), cpp_options_args='''F::InterpolateFuncOptions() .size(std::nullopt) .scale_factor(std::vector<double>({2., 1.})) .mode(torch::kBilinear) .align_corners(false)''', input_size=(1, 2, 4, 4), fullname='interpolate_bilinear_scale_tuple_skewed_2d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=(4, 6), scale_factor=None, mode='bilinear', align_corners=True), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({4, 6})) .scale_factor(std::nullopt) .mode(torch::kBilinear) .align_corners(true)''', input_size=(1, 2, 4, 4), fullname='interpolate_bilinear_tuple_2d_align_corners', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 1.), mode='bilinear', align_corners=True), cpp_options_args='''F::InterpolateFuncOptions() .size(std::nullopt) .scale_factor(std::vector<double>({2., 1.})) .mode(torch::kBilinear) .align_corners(true)''', input_size=(1, 2, 4, 4), fullname='interpolate_bilinear_scale_tuple_skewed_2d_align_corners', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='bicubic', align_corners=False), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({12, 12})) .scale_factor(std::nullopt) .mode(torch::kBicubic) .align_corners(false)''', input_size=(1, 2, 4, 4), fullname='interpolate_bicubic_2d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='bicubic', align_corners=False), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({12, 12})) .scale_factor(std::nullopt) .mode(torch::kBicubic) .align_corners(false)''', input_size=(0, 2, 4, 4), fullname='interpolate_bicubic_2d_zero_dim', pickle=False, ), dict( constructor=wrap_functional(F.interpolate, size=(4, 6), scale_factor=None, mode='bicubic', align_corners=False), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({4, 6})) .scale_factor(std::nullopt) .mode(torch::kBicubic) .align_corners(false)''', input_size=(1, 2, 2, 3), fullname='interpolate_bicubic_tuple_2d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='bicubic', align_corners=False), cpp_options_args='''F::InterpolateFuncOptions() .size(std::nullopt) .scale_factor(std::vector<double>({4., 4.})) .mode(torch::kBicubic) .align_corners(false)''', input_size=(1, 2, 4, 4), fullname='interpolate_bicubic_scale_2d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 2.), mode='bicubic', align_corners=False), cpp_options_args='''F::InterpolateFuncOptions() .size(std::nullopt) .scale_factor(std::vector<double>({2., 2.})) .mode(torch::kBicubic) .align_corners(false)''', input_size=(1, 2, 4, 4), fullname='interpolate_bicubic_scale_tuple_shared_2d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 1.), mode='bicubic', align_corners=False), cpp_options_args='''F::InterpolateFuncOptions() .size(std::nullopt) .scale_factor(std::vector<double>({2., 1.})) .mode(torch::kBicubic) .align_corners(false)''', input_size=(1, 2, 4, 4), fullname='interpolate_bicubic_scale_tuple_skewed_2d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=(4, 6), scale_factor=None, mode='bicubic', align_corners=True), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({4, 6})) .scale_factor(std::nullopt) .mode(torch::kBicubic) .align_corners(true)''', input_size=(1, 2, 4, 4), fullname='interpolate_bicubic_tuple_2d_align_corners', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=None, scale_factor=(2., 1.), mode='bicubic', align_corners=True), cpp_options_args='''F::InterpolateFuncOptions() .size(std::nullopt) .scale_factor(std::vector<double>({2., 1.})) .mode(torch::kBicubic) .align_corners(true)''', input_size=(1, 2, 4, 4), fullname='interpolate_bicubic_scale_tuple_skewed_2d_align_corners', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({12, 12, 12})) .scale_factor(std::nullopt) .mode(torch::kNearest)''', input_size=(1, 2, 4, 4, 4), fullname='interpolate_nearest_3d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='nearest'), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({12, 12, 12})) .scale_factor(std::nullopt) .mode(torch::kNearest)''', input_size=(0, 2, 4, 4, 4), fullname='interpolate_nearest_3d_zero_dim', pickle=False, ), dict( constructor=wrap_functional(F.interpolate, size=(12, 16, 16), scale_factor=None, mode='nearest'), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({12, 16, 16})) .scale_factor(std::nullopt) .mode(torch::kNearest)''', input_size=(1, 2, 3, 4, 4), fullname='interpolate_nearest_tuple_3d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=None, scale_factor=4., mode='nearest'), cpp_options_args='''F::InterpolateFuncOptions() .size(std::nullopt) .scale_factor(std::vector<double>({4., 4., 4.})) .mode(torch::kNearest)''', input_size=(1, 2, 4, 4, 4), fullname='interpolate_nearest_scale_3d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='trilinear', align_corners=False), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({12, 12, 12})) .scale_factor(std::nullopt) .mode(torch::kTrilinear) .align_corners(false)''', input_size=(1, 2, 4, 4, 4), fullname='interpolate_trilinear_3d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=12, scale_factor=None, mode='trilinear', align_corners=False), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({12, 12, 12})) .scale_factor(std::nullopt) .mode(torch::kTrilinear) .align_corners(false)''', input_size=(0, 2, 4, 4, 4), fullname='interpolate_trilinear_3d_zero_dim', pickle=False, ), dict( constructor=wrap_functional(F.interpolate, size=(4, 6, 6), scale_factor=None, mode='trilinear', align_corners=False), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({4, 6, 6})) .scale_factor(std::nullopt) .mode(torch::kTrilinear) .align_corners(false)''', input_size=(1, 2, 2, 3, 3), fullname='interpolate_trilinear_tuple_3d', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=None, scale_factor=3., mode='trilinear', align_corners=False), cpp_options_args='''F::InterpolateFuncOptions() .size(std::nullopt) .scale_factor(std::vector<double>({3., 3., 3.})) .mode(torch::kTrilinear) .align_corners(false)''', input_size=(1, 2, 3, 4, 5), fullname='interpolate_trilinear_scale_3d', # See https://github.com/pytorch/pytorch/issues/5006 precision=3e-4, pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.interpolate, size=(4, 6, 6), scale_factor=None, mode='trilinear', align_corners=True), cpp_options_args='''F::InterpolateFuncOptions() .size(std::vector<int64_t>({4, 6, 6})) .scale_factor(std::nullopt) .mode(torch::kTrilinear) .align_corners(true)''', input_size=(1, 2, 2, 3, 3), fullname='interpolate_trilinear_tuple_3d_align_corners', pickle=False, default_dtype=torch.double ), dict( constructor=wrap_functional(F.interpolate, size=None, scale_factor=3., mode='trilinear', align_corners=True), cpp_options_args='''F::InterpolateFuncOptions() .size(std::nullopt) .scale_factor(std::vector<double>({3., 3., 3.})) .mode(torch::kTrilinear) .align_corners(true)''', input_size=(1, 2, 3, 4, 4), fullname='interpolate_trilinear_scale_3d_align_corners', # See https://github.com/pytorch/pytorch/issues/5006 precision=3e-4, pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.softmax, dim=-1), cpp_options_args='F::SoftmaxFuncOptions(-1)', input_size=(2, 128), # trigger the last-dim algo in CUDA fullname='softmax_lastdim', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.softmax, dim=1, dtype=torch.float64), cpp_options_args='F::SoftmaxFuncOptions(1).dtype(torch::kFloat64)', input_size=(2, 128), fullname='softmax_lastdim_dtype', pickle=False, test_cuda=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.softmax, dim=1), cpp_options_args='F::SoftmaxFuncOptions(1)', input_size=(2, 128, 2, 2), # trigger special case of spatial CUDA algo fullname='softmax_spatial_special', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.softmax, dim=1), cpp_options_args='F::SoftmaxFuncOptions(1)', input_size=(2, 2, 4, 4), # regular spatial algorithm fullname='softmax_spatial', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.softmax, dim=1, dtype=torch.float64), cpp_options_args='F::SoftmaxFuncOptions(1).dtype(torch::kFloat64)', input_size=(2, 2, 4, 4), # regular spatial algorithm fullname='softmax_spatial_dtype', pickle=False, test_cuda=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.softmax, dim=0), cpp_options_args='F::SoftmaxFuncOptions(0)', input_size=(2, 3, 4, 5), fullname='softmax_functional_dim0', test_cuda=False, pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.softmax, dim=3), cpp_options_args='F::SoftmaxFuncOptions(3)', input_size=(2, 3, 4, 5), fullname='softmax_functional_dim3', test_cuda=False, pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.softmax, dim=-1), cpp_options_args='F::SoftmaxFuncOptions(-1)', input_size=(), fullname='softmax_functional_scalar', test_cuda=False, pickle=False, ), dict( constructor=wrap_functional(F.log_softmax, dim=-1), cpp_options_args='F::LogSoftmaxFuncOptions(-1)', input_size=(2, 128), # trigger the last-dim algo in CUDA fullname='log_softmax_lastdim', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.log_softmax, dim=1), cpp_options_args='F::LogSoftmaxFuncOptions(1)', input_size=(2, 128, 2, 2), # trigger special case of spatial CUDA algo fullname='log_softmax_spatial_special', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.log_softmax, dim=1), cpp_options_args='F::LogSoftmaxFuncOptions(1)', input_size=(2, 2, 4, 4), # regular spatial algorithm fullname='log_softmax_spatial', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.log_softmax, dim=0), cpp_options_args='F::LogSoftmaxFuncOptions(0)', input_size=(2, 3, 4, 5), fullname='log_softmax_dim0', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.log_softmax, dim=3), cpp_options_args='F::LogSoftmaxFuncOptions(3)', input_size=(2, 3, 4, 5), fullname='log_softmax_dim3', pickle=False, default_dtype=torch.double, ), dict( constructor=wrap_functional(F.log_softmax, dim=0), cpp_options_args='F::LogSoftmaxFuncOptions(0)', input_size=(), fullname='log_softmax_scalar', pickle=False, ), dict( fullname='Unfold', constructor=lambda: nn.Unfold((2, 2), (1, 1), (0, 0), (1, 1)), cpp_constructor_args='torch::nn::UnfoldOptions({2, 2}).dilation({1, 1}).padding({0, 0}).stride({1, 1})', input_size=(2, 4, 3, 3), check_gradgrad=False, test_cuda=True, default_dtype=torch.double, ), dict( fullname='Fold', constructor=lambda: nn.Fold((3, 3), (2, 2), (1, 1), (0, 0), (1, 1)), cpp_constructor_args='torch::nn::FoldOptions({3, 3}, {2, 2}).dilation({1, 1}).padding({0, 0}).stride({1, 1})', input_size=(2, 16, 4), check_gradgrad=False, test_cuda=True, default_dtype=torch.double, ), dict( fullname='Fold_no_batch_dim_input', constructor=lambda: nn.Fold((3, 3), (2, 2), (1, 1), (0, 0), (1, 1)), cpp_constructor_args='torch::nn::FoldOptions({3, 3}, {2, 2}).dilation({1, 1}).padding({0, 0}).stride({1, 1})', input_size=(16, 4), check_gradgrad=False, ref=single_batch_reference_fn, test_cuda=True, default_dtype=torch.double, ), dict( fullname='Unfold_int_input', constructor=lambda: nn.Unfold(2, 1, 0, 1), cpp_constructor_args='torch::nn::UnfoldOptions(2).dilation(1).padding(0).stride(1)', input_size=(2, 4, 3, 3), check_gradgrad=False, test_cuda=True, default_dtype=torch.double, ), dict( fullname='Fold_int_input', constructor=lambda: nn.Fold(3, 2, 1, 0, 1), cpp_constructor_args='torch::nn::FoldOptions(3, 2).dilation(1).padding(0).stride(1)', input_size=(2, 16, 4), check_gradgrad=False, test_cuda=True, default_dtype=torch.double, ), dict( fullname='Fold_no_batch_dim_int_input', constructor=lambda: nn.Fold(3, 2, 1, 0, 1), cpp_constructor_args='torch::nn::FoldOptions(3, 2).dilation(1).padding(0).stride(1)', input_size=(16, 4), ref=single_batch_reference_fn, check_gradgrad=False, test_cuda=True, default_dtype=torch.double, ), dict( module_name='RReLU', constructor_args=(0.1, 0.9), cpp_constructor_args='torch::nn::RReLUOptions().lower(0.1).upper(0.9)', input_size=(), desc='with_up_down_scalar', test_cuda=False, default_dtype=torch.double, ), dict( module_name='PairwiseDistance', input_fn=lambda: (torch.randn(10, 8), torch.randn(10, 8)), default_dtype=torch.double, ), dict( module_name='PairwiseDistance', input_fn=lambda: (torch.randn(10, 1), torch.randn(10, 8)), desc='broadcast_lhs', default_dtype=torch.double, ), dict( module_name='PairwiseDistance', input_fn=lambda: (torch.randn(10, 8), torch.randn(1, 8)), desc='broadcast_rhs', default_dtype=torch.double, ), dict( module_name='PairwiseDistance', constructor_args=(1.5, 1e-05, True), cpp_constructor_args='torch::nn::PairwiseDistanceOptions().p(1.5).eps(1e-05).keepdim(true)', input_fn=lambda: (torch.randn(10, 8), torch.randn(10, 8)), desc='with_non_default_args', default_dtype=torch.double, ), dict( module_name='PairwiseDistance', input_fn=lambda: (torch.randn(8), torch.randn(8)), reference_fn=single_batch_reference_fn, desc='no_batch_dim', default_dtype=torch.double, ), dict( module_name='TransformerEncoderLayer', constructor_args=(4, 2, 16, 0.0), cpp_constructor_args='''torch::nn::TransformerEncoderLayerOptions(4, 2) .dim_feedforward(16) .dropout(0.0)''', input_size=(2, 3, 4), desc='relu_activation', with_tf32=True, tf32_precision=0.1, # TODO(#50743): figure out the error # RuntimeError: The size of tensor a (6) must match the size of tensor b (4) # at non-singleton dimension 2 check_batched_grad=False, check_gradgrad=False, default_dtype=torch.double, ), dict( module_name='TransformerEncoderLayer', constructor_args=(4, 2, 8, 0.0, F.gelu), cpp_constructor_args='''torch::nn::TransformerEncoderLayerOptions(4, 2) .dim_feedforward(8) .dropout(0.0) .activation(torch::kGELU)''', input_size=(2, 3, 4), check_gradgrad=False, desc='gelu_activation', with_tf32=True, tf32_precision=0.08 if SM90OrLater else 0.05, default_dtype=torch.double, ), dict( module_name='TransformerDecoderLayer', constructor_args=(4, 2, 8, 0.0), cpp_constructor_args='''torch::nn::TransformerDecoderLayerOptions(4, 2) .dim_feedforward(8) .dropout(0.0)''', input_fn=lambda: (torch.rand(3, 3, 4), torch.rand(2, 3, 4)), check_gradgrad=False, desc='relu_activation', with_tf32=True, tf32_precision=0.05, default_dtype=torch.double, ), dict( module_name='TransformerDecoderLayer', constructor_args=(4, 2, 8, 0.0, F.gelu), cpp_constructor_args='''torch::nn::TransformerDecoderLayerOptions(4, 2) .dim_feedforward(8) .dropout(0.0) .activation(torch::kGELU)''', input_fn=lambda: (torch.rand(3, 3, 4), torch.rand(2, 3, 4)), check_gradgrad=False, desc='gelu_activation', with_tf32=True, tf32_precision=0.05, default_dtype=torch.double, ), dict( module_name='Transformer', constructor_args=(4, 2, 2, 2, 8, 0.0, F.relu), cpp_constructor_args='''torch::nn::TransformerOptions() .d_model(4) .nhead(2) .num_encoder_layers(2) .num_decoder_layers(2) .dim_feedforward(8) .dropout(0.0) .activation(torch::kReLU)''', input_fn=lambda: (torch.rand(3, 3, 4), torch.rand(2, 3, 4), torch.rand(3, 3)), check_gradgrad=False, desc='multilayer_coder', with_tf32=True, tf32_precision=0.05 if SM90OrLater else 0.03, default_dtype=torch.double, ), dict( module_name='Linear', constructor_args=(3, 5), cpp_constructor_args='torch::nn::LinearOptions(3, 5)', input_fn=lambda: torch.rand(3), reference_fn=lambda i, p, _: torch.mm(i.view(1, -1), p[0].t()).view(-1) + p[1], desc="no_batch_dim", with_tf32=True, tf32_precision=0.005, default_dtype=torch.double, ), dict( module_name='Flatten', cpp_constructor_args='torch::nn::FlattenOptions().start_dim(-3).end_dim(-1)', constructor_args=(-3, -1), input_size=(3, 4, 5), reference_fn=single_batch_reference_fn, desc="no_batch_dim", default_dtype=torch.double, ), dict( module_name='Unflatten', cpp_constructor_args='torch::nn::UnflattenOptions(-2, {2, 2})', constructor_args=(-2, torch.Size([2, 2])), input_size=(3, 4, 5), reference_fn=single_batch_reference_fn, desc="no_batch_dim", default_dtype=torch.double, ), dict( module_name='LayerNorm', constructor_args=([56, 56, 56], 1e-5, False), cpp_constructor_args='torch::nn::LayerNormOptions({56, 56, 56}).eps(1e-5).elementwise_affine(false)', input_size=(4, 56, 56, 56), cudnn=True, check_eval=True, gradcheck_fast_mode=True, check_half=True, desc='3d_no_affine_large_feature', ), ] # add conv padding mode tests: for padding_mode, cpp_padding_mode in zip( ['reflect', 'circular', 'replicate', 'zeros'], ['torch::kReflect', 'torch::kCircular', 'torch::kReplicate', 'torch::kZeros']): # conv signature: # in_channels, out_channels, kernel_size, stride=1, # padding=0, dilation=1, groups=1, # bias=True, padding_mode='zeros' for d in (1, 2, 3): if d == 3 and padding_mode == 'reflect': # FIXME: remove after implementing reflection pad 3d # https://github.com/pytorch/pytorch/issues/27655 continue padding = tuple(range(1, d + 1)) cpp_padding = '{' + ', '.join(map(str, padding)) + '}' input_size = (2, 2) + (4,) * d output_size = (2, 3) + tuple(p + 1 for p in padding) # simplified from `(4 + 2 * p - 3) // 2 + 1` new_module_tests.append( dict( module_name=f'Conv{d}d', constructor_args=(2, 3, 3, 2, padding, 1, 1, True, padding_mode), cpp_constructor_args=f'''torch::nn::Conv{d}dOptions(2, 3, 3) .stride(2) .padding({cpp_padding}) .dilation(1) .groups(1) .bias(true) .padding_mode({cpp_padding_mode})''', input_size=input_size, output_size=output_size, cudnn=True, desc=f'{padding_mode}_stride2_pad2', with_tf32=True, tf32_precision=0.05, default_dtype=torch.double, ), ) # Check that non linear activations work with no batch dimensions non_linear_activations_no_batch = [ 'ELU', 'Hardshrink', 'Hardsigmoid', 'Hardtanh', 'Hardswish', 'LeakyReLU', 'LogSigmoid', 'PReLU', 'ReLU', 'ReLU6', 'RReLU', 'SELU', 'CELU', 'GELU', 'GLU', 'Sigmoid', 'SiLU', 'Mish', 'Softplus', 'Softshrink', 'Softsign', 'Tanh', 'Tanhshrink', 'Threshold' ] non_linear_activations_extra_info: dict[str, dict] = { 'CELU': {'constructor_args': (2.,), 'default_dtype': torch.double}, 'Threshold': {'constructor_args': (2., 1.)}, 'Hardsigmoid': {'check_gradgrad': False, 'check_jit': False, 'default_dtype': torch.double}, 'Hardswish': {'check_gradgrad': False, 'check_jit': False, 'default_dtype': torch.double}, # For RRelu, test that compare CPU and GPU results fail because RNG # is different between CPU and GPU 'RReLU': {'test_cuda': False, 'default_dtype': torch.double}, 'ELU': {'default_dtype': torch.double}, 'GELU': {'default_dtype': torch.double}, 'GLU': {'default_dtype': torch.double}, 'Hardshrink': {'default_dtype': torch.double}, 'Hardtanh': {'default_dtype': torch.double}, 'LeakyReLU': {'default_dtype': torch.double}, 'LogSigmoid': {'default_dtype': torch.double}, 'Mish': {'default_dtype': torch.double}, 'PReLU': {'default_dtype': torch.double}, 'ReLU6': {'default_dtype': torch.double}, 'ReLU': {'default_dtype': torch.double}, 'SELU': {'default_dtype': torch.double}, 'SiLU': {'default_dtype': torch.double}, 'Sigmoid': {'default_dtype': torch.double}, 'Softplus': {'default_dtype': torch.double}, 'Softshrink': {'default_dtype': torch.double}, 'Softsign': {'default_dtype': torch.double}, 'Tanh': {'default_dtype': torch.double}, 'Tanhshrink': {'default_dtype': torch.double}, } for non_linear_activation in non_linear_activations_no_batch: activation_test_info = dict( module_name=non_linear_activation, input_size=(4,), reference_fn=single_batch_reference_fn, desc='no_batch_dim', test_cpp_api_parity=False, ) extra_info = non_linear_activations_extra_info.get(non_linear_activation, {}) activation_test_info.update(extra_info) new_module_tests.append(activation_test_info) return new_module_tests def kldivloss_reference(input, target, reduction='mean', log_target=False): if log_target: result = torch.exp(target) * (target - input) else: result = target * (target.log() - input) if reduction == 'mean': return result.mean() elif reduction == 'sum': return result.sum() elif reduction == 'batchmean' and result.dim() != 0: return result.sum() / result.size(0) return result def nlllossNd_reference(input, target, weight=None, ignore_index=-100, reduction='mean'): assert input.dim() >= 3 N = input.size(0) C = input.size(1) out_size = (N,) + input.size()[2:] output = torch.zeros(out_size).type_as(input) if weight is None: weight = torch.ones(C).type_as(input) total_weight = 0 for tup in product(*[range(size) for size in out_size]): t_nx = target[tup] norm = 0. if ignore_index == t_nx else weight[t_nx].item() input_index = list(tup) input_index.insert(1, t_nx) output[tup] = -input[tuple(input_index)] * norm total_weight += norm if reduction == 'mean': return output.sum() / total_weight elif reduction == 'sum': return output.sum() return output def cross_entropy_loss_prob_target_reference(input, target, weight=None, reduction='mean', label_smoothing=0.0): assert input.dim() >= 2 input = torch.log_softmax(input, 1) C = input.size(1) if weight is None: weight = torch.ones(C).type_as(input) weight = weight.view(1, C, *(1 for _ in input.shape[2:])) if label_smoothing > 0.0: assert label_smoothing <= 1.0 target = (target * (1 - label_smoothing) + label_smoothing / C) output = -(input * target * weight).sum(dim=1) if reduction == 'mean': return output.mean() elif reduction == 'sum': return output.sum() return output def cross_entropy_loss_indices_target_reference(input, target, weight=None, ignore_index=-100, reduction='mean', label_smoothing=0.0): log_softmax_input = torch.log_softmax(input, 1) nllloss = F.nll_loss( log_softmax_input, target, weight, ignore_index=ignore_index, reduction=reduction) if label_smoothing == 0.0: return nllloss assert 0.0 < label_smoothing <= 1.0 input = torch.log_softmax(input, 1) C = input.size(1) if weight is not None: input = input * weight.view(1, C, *(1 for _ in input.shape[2:])) smooth_loss = -torch.sum(input, 1) ignore_mask = target == ignore_index smooth_loss.masked_fill_(ignore_mask, 0.0) if reduction == 'mean': if weight is not None: # TODO: This code can path can be removed if #61309 is resolved # loss is normalized by the weights to be consistent with nll_loss_nd ret = torch.sum(smooth_loss) / weight.gather(0, target.masked_select(ignore_mask.logical_not()).flatten()).sum() else: ret = torch.mean(smooth_loss.masked_select(ignore_mask.logical_not())) elif reduction == 'sum': ret = torch.sum(smooth_loss) else: ret = smooth_loss return (1 - label_smoothing) * nllloss + ret * (label_smoothing / C) def cross_entropy_loss_reference(input, target, weight=None, ignore_index=-100, reduction='mean', label_smoothing=0.0): if input.shape == target.shape: return cross_entropy_loss_prob_target_reference( input, target, weight=weight, reduction=reduction, label_smoothing=label_smoothing) else: return cross_entropy_loss_indices_target_reference( input, target, weight=weight, reduction=reduction, ignore_index=ignore_index, label_smoothing=label_smoothing ) def nllloss_reference(input, target, weight=None, ignore_index=-100, reduction='mean'): def nll_loss_helper(input, target, weight, ignore_index): if target == ignore_index: return (0, 0) norm = 1 if weight is None else weight[target] result = -input[target] * norm return (result, norm) losses_and_weights = [nll_loss_helper(i, t, weight, ignore_index) for i, t in zip(input, target)] losses, weights = zip(*losses_and_weights) losses_tensor = input.new_tensor(losses) if reduction == 'mean': return sum(losses_tensor) / sum(weights) elif reduction == 'sum': return sum(losses_tensor) else: return losses_tensor def smoothl1loss_reference(input, target, reduction='mean', beta=1.0): abs_diff = (input - target).abs() ge_beta_mask = (abs_diff >= beta).type_as(abs_diff) lt_beta_mask = (abs_diff < beta).type_as(abs_diff) # when beta <= 0 we should just use l1_loss if beta == 0: output = abs_diff else: output = ge_beta_mask * (abs_diff - 0.5 * beta) + lt_beta_mask * 0.5 * (abs_diff ** 2) / beta if reduction == 'mean': return output.mean() elif reduction == 'sum': return output.sum() return output def huberloss_reference(input, target, reduction='mean', delta=1.0): abs_diff = (input - target).abs() ge_delta_mask = (abs_diff >= delta) lt_delta_mask = (abs_diff < delta) output = ge_delta_mask * delta * (abs_diff - 0.5 * delta) + lt_delta_mask * 0.5 * (abs_diff ** 2) if reduction == 'mean': return output.mean() elif reduction == 'sum': return output.sum() return output def _multilabelmarginloss_reference(input, target): targets = [] for target_index in target: if target_index < 0: break targets.append(target_index) sum = 0 for target_index in targets: for i in range(0, len(input)): if i not in targets: sum += max(0, 1 - input[target_index] + input[i]) return sum def multilabelmarginloss_reference(input, target, reduction='mean'): # make everything 2-dimensional input_dim = input.dim() if input.dim() < 2: assert target.dim() < 2 input = input.unsqueeze(0) if input.dim() == 1 else input.unsqueeze(0).unsqueeze(0) target = target.unsqueeze(0) if target.dim() == 1 else target.unsqueeze(0).unsqueeze(0) n = input.size(0) dim = input.size(1) output = input.new(n).zero_() for i in range(0, n): output[i] = _multilabelmarginloss_reference(input[i], target[i]) if reduction == 'mean': return output.mean() / dim elif reduction == 'sum': return output.sum() / dim elif input_dim < 2: # we know we have (1, C) X (1, C) -> (1,), so squeeze will get us # back to correct dimensionality return output.squeeze() / dim else: return output / dim def hingeembeddingloss_reference(input, target, margin=1.0, reduction='mean'): margin_clamp = (margin - input).clamp(min=0).type_as(input) output = torch.where(target == 1, input, margin_clamp) if reduction == 'mean': return output.mean() elif reduction == 'sum': return output.sum() return output def softmarginloss_reference(input, target, reduction='mean'): output = (1 + (-input * target).exp()).log() if reduction == 'mean': return output.mean() elif reduction == 'sum': return output.sum() return output def _multimarginloss_reference(input, target_idx, p, margin, weight): if weight is None: weight = input.new(len(input)).fill_(1) output = 0 for i in range(0, len(input)): if i != target_idx: output += weight[target_idx] * (max(0, (margin - input[target_idx] + input[i])) ** p) return output def multimarginloss_reference(input, target, p=1, margin=1, weight=None, reduction='mean'): if input.dim() < 2: input = input.unsqueeze(0) if input.dim() == 1 else input.unsqueeze(0).unsqueeze(0) target_dim = target.dim() if target.dim() == 0: target = target.unsqueeze(0) n = input.size(0) dim = input.size(1) output = input.new(n) for x in range(0, n): output[x] = _multimarginloss_reference(input[x], target[x], p, margin, weight) if reduction == 'mean': return output.mean() / dim elif reduction == 'sum': return output.sum() / dim elif target_dim == 0: return output.squeeze(0) / dim return output / dim def cosineembeddingloss_reference(input1, input2, target, margin=0, reduction='mean'): def _cos(a, b): cos = a.new(a.size(0)) for i in range(0, a.size(0)): cos[i] = (a[i] * b[i]).sum() / ((((a[i] * a[i]).sum() + 1e-12) * ((b[i] * b[i]).sum() + 1e-12)) ** 0.5) return cos output = torch.where(target == 1, 1 - _cos(input1, input2), (_cos(input1, input2) - margin).clamp(min=0)) if reduction == 'mean': return output.mean() elif reduction == 'sum': return output.sum() return output def tripletmarginloss_reference(anchor, positive, negative, margin=1.0, p=2, eps=1e-6, swap=False, reduction='mean'): d_p = torch.pairwise_distance(anchor, positive, p, eps) d_n = torch.pairwise_distance(anchor, negative, p, eps) if swap: d_s = torch.pairwise_distance(positive, negative, p, eps) d_n = torch.min(d_n, d_s) output = torch.clamp(margin + d_p - d_n, min=0.0) if reduction == 'mean': return output.mean() elif reduction == 'sum': return output.sum() return output def marginrankingloss_reference(input1, input2, target, margin=0, reduction='mean'): output = (-target * (input1 - input2) + margin).clamp(min=0) if reduction == 'mean': return output.mean() elif reduction == 'sum': return output.sum() return output # this directly follows Graves et al.'s paper, in contrast to the production implementation, it does not use log-space def ctcloss_reference(log_probs, targets, input_lengths, target_lengths, blank=0, reduction='mean'): input_lengths = torch.as_tensor(input_lengths, dtype=torch.long) target_lengths = torch.as_tensor(target_lengths, dtype=torch.long) dt = log_probs.dtype log_probs = log_probs.double() # we need the accuracy as we are not in logspace targets = targets.long() cum_target_lengths = target_lengths.cumsum(0) losses = [] for i in range(log_probs.size(1)): input_length = input_lengths[i].item() target_length = target_lengths[i].item() cum_target_length = cum_target_lengths[i].item() targets_prime = targets.new_full((2 * target_length + 1,), blank) if targets.dim() == 2: targets_prime[1::2] = targets[i, :target_length] else: targets_prime[1::2] = targets[cum_target_length - target_length:cum_target_length] probs = log_probs[:input_length, i].exp() alpha = log_probs.new_zeros((target_length * 2 + 1,)) alpha[0] = probs[0, blank] alpha[1] = probs[0, targets_prime[1]] mask_third = (targets_prime[:-2] != targets_prime[2:]) for t in range(1, input_length): alpha_next = alpha.clone() alpha_next[1:] += alpha[:-1] alpha_next[2:] += torch.where(mask_third, alpha[:-2], alpha.new_zeros(1)) alpha = probs[t, targets_prime] * alpha_next losses.append(-alpha[-2:].sum().log()[None]) output = torch.cat(losses, 0) if reduction == 'mean': output = (output / target_lengths.to(dtype=output.dtype, device=output.device)).mean() elif reduction == 'sum': output = output.sum() output = output.to(dt) return output loss_reference_fns: dict['str', Callable] = { 'KLDivLoss': kldivloss_reference, 'KLDivLoss_log_target': partial(kldivloss_reference, log_target=True), 'NLLLoss': nllloss_reference, 'NLLLossNd': nlllossNd_reference, 'SmoothL1Loss': smoothl1loss_reference, 'HuberLoss': huberloss_reference, 'MultiLabelMarginLoss': multilabelmarginloss_reference, 'HingeEmbeddingLoss': hingeembeddingloss_reference, 'SoftMarginLoss': softmarginloss_reference, 'MultiMarginLoss': multimarginloss_reference, 'CosineEmbeddingLoss': cosineembeddingloss_reference, 'TripletMarginLoss': tripletmarginloss_reference, 'MarginRankingLoss': marginrankingloss_reference, 'CTCLoss': ctcloss_reference, 'CrossEntropyLoss': cross_entropy_loss_reference } criterion_tests = [] def single_batch_reference_criterion_fn(*args): """Reference function for criterion supporting no batch dimensions. The criterion is passed the input and target in batched form with a single item. The output is squeezed to compare with the no-batch input. """ criterion = args[-1] def unsqueeze_inp(inp): if isinstance(inp, (list, tuple)): return [t.unsqueeze(0) for t in inp] return inp.unsqueeze(0) def flatten(xs): result = [] if isinstance(xs, (list, tuple)): for x in xs: result.extend(flatten(x)) else: result.append(xs) return result single_batch_input_args = flatten([unsqueeze_inp(input) for input in args[:-1]]) output = criterion(*single_batch_input_args) reduction = get_reduction(criterion) if reduction == 'none': return output.squeeze(0) # reduction is 'sum' or 'mean' which results in a scalar return output # Check that regression criterion work with no batch dimensions regression_criterion_no_batch = [ 'L1Loss', 'MSELoss', 'PoissonNLLLoss', 'HuberLoss', 'SmoothL1Loss' ] reductions = ['none', 'mean', 'sum'] for name, reduction in product(regression_criterion_no_batch, reductions): regression_test_info = dict( fullname=f"{name}_no_batch_dim_{reduction}", constructor=lambda *args, name=name: getattr(nn, name)(reduction=reduction), input_size=(3, ), target_size=(3, ), reference_fn=single_batch_reference_criterion_fn, test_cpp_api_parity=False, default_dtype=torch.double, ) criterion_tests.append(regression_test_info) for reduction in reductions: regression_test_info = dict( fullname=f"KLDivLoss_no_batch_dim_{reduction}", constructor=lambda: nn.KLDivLoss(reduction=reduction), input_fn=lambda: torch.rand((3,)).log(), target_fn=lambda: torch.rand((3,)), reference_fn=single_batch_reference_criterion_fn, test_cpp_api_parity=False, default_dtype=torch.double, ) criterion_tests.append(regression_test_info) # Check that classification criterion work with no batch dimensions # List of tuples of (name, input_fn, target_fn) classification_criterion_no_batch = [ ( 'BCELoss', lambda: torch.sigmoid(torch.randn(9, dtype=torch.double)), lambda: torch.randn(9, dtype=torch.double).gt(0).to(torch.double) ), ('BCEWithLogitsLoss', lambda: torch.randn(9, dtype=torch.double), lambda: torch.randn(9, dtype=torch.double)), ('HingeEmbeddingLoss', lambda: torch.randn(9, dtype=torch.double), lambda: torch.tensor([-1, 1, 1] * 3)), ('MultiLabelMarginLoss', lambda: torch.randn(4, dtype=torch.double), lambda: torch.tensor([3, 0, -1, 1])), ('SoftMarginLoss', lambda: torch.randn(9, dtype=torch.double), lambda: torch.tensor([-1, 1, 1] * 3)), ('NLLLoss', lambda: F.log_softmax(torch.randn(3, dtype=torch.double), dim=0), lambda: torch.tensor(1)), ( 'CosineEmbeddingLoss', lambda: (torch.randn(9, dtype=torch.double), torch.randn(9, dtype=torch.double)), lambda: torch.tensor(1, dtype=torch.double) ), # For MarginRankingLoss, input_fn : (x1, x2) and target_fn : target ('MarginRankingLoss', lambda: (torch.randn(()), torch.randn(())), lambda: torch.randn(()).sign()), # For TripletMarginLoss, input_fn : (anchor, positive) and target_fn : negative ( 'TripletMarginLoss', lambda: (torch.randn(9, dtype=torch.double), torch.randn(9, dtype=torch.double)), lambda: torch.randn(9, dtype=torch.double) ), ('MultiLabelSoftMarginLoss', lambda: torch.randn(9, dtype=torch.double), lambda: torch.randn(9)), ] classification_criterion_no_batch_extra_info: dict[str, dict] = { 'MultiLabelMarginLoss': {'check_gradgrad': False}, } # TODO : Fix these discrepancies classification_cpp_parity = { 'BCELoss': False, 'BCEWithLogitsLoss': False, 'HingeEmbeddingLoss': False, 'NLLLoss': False, 'SoftMarginLoss': False, } reductions = ['none', 'mean', 'sum'] for (name, input_fn, target_fn), reduction in product(classification_criterion_no_batch, reductions): classification_test_info = dict( fullname=f"{name}_no_batch_dim_{reduction}", constructor=lambda *args, name=name: getattr(nn, name)(reduction=reduction), input_fn=lambda f=input_fn: f(), target_fn=lambda f=target_fn: f(), reference_fn=single_batch_reference_criterion_fn, test_cpp_api_parity=True, has_parity=classification_cpp_parity.get(name, True) ) extra_info = classification_criterion_no_batch_extra_info.get(name, {}) classification_test_info.update(extra_info) criterion_tests.append(classification_test_info) class NNTestCase(TestCase): # _forward is defined in classes inheriting from NNTestCase @abstractmethod def _forward(self, *args, **kwargs): raise NotImplementedError @abstractmethod def _get_parameters(self, module: nn.Module) -> tuple[list[nn.Parameter], list[nn.Parameter]]: raise NotImplementedError @abstractmethod def _zero_grad_parameters(self, module: nn.Module) -> None: raise NotImplementedError @abstractmethod def _backward(self, module: nn.Module, input: _TensorOrTensors, output: torch.Tensor, grad_output: Union[torch.Tensor, Sequence[torch.Tensor]], create_graph: bool = False): raise NotImplementedError def _jacobian(self, input, num_out): if isinstance(input, tuple): return tuple(self._jacobian(elem, num_out) for elem in input) elif isinstance(input, list): return [self._jacobian(elem, num_out) for elem in input] else: return torch.zeros(input.nelement(), num_out) def _flatten_tensors(self, x): if isinstance(x, torch.Tensor): if x.is_sparse: return x.to_dense().view(-1) else: return x.view(-1) else: return tuple(self._flatten_tensors(a) for a in x) def _zero_grad_input(self, input): if isinstance(input, torch.Tensor): if input.requires_grad and input.grad is not None: input.grad.zero_() input.grad.detach_() else: for i in input: self._zero_grad_input(i) def _analytical_jacobian(self, module, input: _TensorOrTensors, jacobian_input=True, jacobian_parameters=True): output = self._forward(module, input) output_size = output.nelement() if jacobian_input: jacobian_inp = self._jacobian(input, output_size) flat_jacobian_input = list(_iter_tensors(jacobian_inp)) if jacobian_parameters: num_param = sum(p.numel() for p in self._get_parameters(module)[0]) jacobian_param = torch.zeros(num_param, output_size) for i in range(output_size): param, d_param = self._get_parameters(module) # make non grad zeros d_param = [torch.zeros_like(p) if d is None else d for (p, d) in zip(param, d_param)] d_out = torch.zeros_like(output) flat_d_out = d_out.view(-1) flat_d_out[i] = 1 if jacobian_parameters: self._zero_grad_parameters(module) # Tensors will accumulate gradient from multiple steps if jacobian_input: self._zero_grad_input(input) d_input = self._backward(module, input, output, d_out) if jacobian_input: for jacobian_x, d_x in zip(flat_jacobian_input, _iter_tensors(d_input)): jacobian_x[:, i] = d_x.contiguous().view(-1) if jacobian_parameters: jacobian_param[:, i] = torch.cat(self._flatten_tensors(d_param), 0) res: tuple[torch.Tensor, ...] = () if jacobian_input: res += jacobian_inp, if jacobian_parameters: res += jacobian_param, return res def _numerical_jacobian(self, module, input: _TensorOrTensors, jacobian_input=True, jacobian_parameters=True): def fw(*input): return self._forward(module, input).detach() res: tuple[torch.Tensor, ...] = () if jacobian_input: res += _get_numerical_jacobian(fw, input, eps=1e-6), if jacobian_parameters: param, _ = self._get_parameters(module) to_cat = [] for p in param: jacobian = _get_numerical_jacobian(fw, input, target=p, eps=1e-6) # get_numerical_jacobian returns a list of tuples but we require a tensor to_cat.append(jacobian[0][0]) res += (torch.cat(to_cat, 0),) return res def check_jacobian(self, module, input: _TensorOrTensors, jacobian_input=True): jacobian_parameters = bool(self._get_parameters(module)[0]) analytical = self._analytical_jacobian(module, input, jacobian_input, jacobian_parameters) numerical = self._numerical_jacobian(module, input, jacobian_input, jacobian_parameters) analytical_t = list(_iter_tensors(analytical)) numerical_t = list(_iter_tensors(numerical)) differences = [] for a, n in zip(analytical_t, numerical_t): if a.numel() != 0: differences.append(a.add(n, alpha=-1).abs().max()) # TODO: compare structure (ensure analytic jacobian has correct shape) if len(differences) > 0: self.assertLessEqual(max(differences), PRECISION) # type: ignore[type-var] class TestBase: _required_arg_names = {'constructor_args', 'input', 'extra_args'} def __init__(self, constructor, desc='', reference_fn=None, fullname=None, **kwargs): self.desc = desc self.fullname = fullname self.constructor = constructor self.reference_fn = reference_fn for name in self._required_arg_names: if name not in kwargs and name + '_fn' not in kwargs and name + '_size' not in kwargs: if name in {'constructor_args', 'extra_args'}: kwargs[name] = () else: raise ValueError(f"{self.get_name()}: Specify {name} by a value, a function to generate it, or it's size!") self._extra_kwargs = kwargs self._arg_cache = {} def get_name(self): if self.fullname is not None: return 'test_' + self.fullname test_name = 'test_' + self.constructor.__name__ if self.desc: test_name += '_' + self.desc return test_name def _unpack(self, value): if isinstance(value, torch.Tensor): return value elif is_iterable(value): return type(value)(self._unpack(v) for v in value) else: return value @property def constructor_args(self): return self._get_arg('constructor_args', True) @property def extra_args(self): return self._get_arg('extra_args', True) def _get_arg(self, name, unpack): assert name in self._required_arg_names if name not in self._arg_cache: fn_name = name + '_fn' size_name = name + '_size' if name in self._extra_kwargs: self._arg_cache[name] = self._extra_kwargs[name] elif fn_name in self._extra_kwargs: self._arg_cache[name] = self._extra_kwargs[fn_name]() else: assert size_name in self._extra_kwargs, \ f"Missing `{name}`, `{size_name}` or `{fn_name}` for {self.get_name()}" def map_tensor_sizes(sizes): if isinstance(sizes, list): return [map_tensor_sizes(s) for s in sizes] elif isinstance(sizes, torch.Tensor): return sizes.double() else: return torch.randn(sizes) self._arg_cache[name] = map_tensor_sizes(self._extra_kwargs[size_name]) return self._unpack(self._arg_cache[name]) if unpack else self._arg_cache[name] def _get_input(self, unpack=True): return self._get_arg('input', unpack) def __call__(self, test_case): raise NotImplementedError class ModuleTest(TestBase): @abstractmethod def _do_test(self, test_case: Any, module: nn.Module, input: Any) -> Any: raise NotImplementedError def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.jacobian_input = kwargs.get('jacobian_input', True) self.should_test_cuda = kwargs.get('test_cuda', True) self.should_test_pickle = kwargs.get('pickle', True) self.check_gradgrad = kwargs.get('check_gradgrad', True) self.FIXME_no_cuda_gradgrad_comparison = \ kwargs.get('FIXME_no_cuda_gradgrad_comparison', False) self.precision = kwargs.get('precision', 2e-4) self.check_forward_only = kwargs.get('check_forward_only', False) self.default_dtype = kwargs.get('default_dtype', None) if self.default_dtype is None: self.default_dtype = torch.get_default_dtype() def __call__(self, test_case): with set_default_dtype(self.default_dtype): module = self.constructor(*self.constructor_args) input = self._get_input() if self.reference_fn is not None: out = test_case._forward(module, input) ref_input = deepcopy(input) ref_module = deepcopy(module) expected_out = self.reference_fn(ref_input, test_case._get_parameters(module)[0], ref_module) test_case.assertEqual(out, expected_out, exact_dtype=False) if self.check_forward_only: return self.test_noncontig(test_case, module, input) if self.should_test_pickle: # TODO: do this with in-memory files as soon as torch.save will support it with tempfile.TemporaryFile() as f: test_case._forward(module, input) torch.save(module, f) f.seek(0) # weights_only=False as this is legacy code that saves the model module_copy = torch.load(f, weights_only=False) test_case.assertEqual(test_case._forward(module, input), test_case._forward(module_copy, input)) self._do_test(test_case, module, input) def noncontiguize(self, obj): if isinstance(obj, list): return [self.noncontiguize(o) for o in obj] elif isinstance(obj, tuple): return tuple(self.noncontiguize(o) for o in obj) tensor = obj ndim = tensor.dim() # Always making only the last dimension noncontiguous is easy to hide # bugs because .view(-1) will still work. So try to find a dim with size # > 1 and make that non-contiguous, i.e., stack + select on the # dimension directly after that. dim = ndim for d in range(ndim): if tensor.size(d) > 1: dim = d + 1 break noncontig = torch.stack([torch.empty_like(tensor), tensor], dim).select(dim, 1).detach() assert noncontig.numel() == 1 or noncontig.numel() == 0 or not noncontig.is_contiguous() noncontig.requires_grad = tensor.requires_grad return noncontig def test_noncontig(self, test_case, module, input): # check no scalars, can't make non-contig if isinstance(input, torch.Tensor) and input.dim() == 0: return if any(i.dim() == 0 for i in input if isinstance(i, torch.Tensor)): return test_case._zero_grad_parameters(module) test_case._zero_grad_input(input) with freeze_rng_state(): output = test_case._forward(module, input) if getattr(module, "return_indices", False): output = output[0] grad_output = output.new(output.shape).normal_() output = output.clone() d_input = deepcopy(test_case._backward(module, input, output, grad_output)) d_param = deepcopy(test_case._get_parameters(module)[1]) nc_input = self.noncontiguize(input) nc_grad_output = self.noncontiguize(grad_output) for contig_i, contig_g in product((True, False), repeat=2): i = input if contig_i else nc_input # Some ops, e.g., nn.Flatten, return gradient that shares # storage with the grad_output. Hence we copy here. go = deepcopy(grad_output if contig_g else nc_grad_output) test_case._zero_grad_parameters(module) test_case._zero_grad_input(i) with freeze_rng_state(): out = test_case._forward(module, i) if getattr(module, "return_indices", False): out = out[0] grad = test_case._backward(module, i, out, go) test_case.assertEqual(out, output) test_case.assertEqual(grad, d_input, atol=1e-4, rtol=0) test_case.assertEqual(test_case._get_parameters(module)[1], d_param) def test_cuda(self, test_case): if not TEST_CUDA or not self.should_test_cuda: raise unittest.SkipTest('Excluded from CUDA tests') with set_default_dtype(self.default_dtype): cpu_input = self._get_input() type_map = {torch.double: torch.float} cpu_input_tuple = cpu_input if isinstance(cpu_input, tuple) else (cpu_input,) is_any_input_complex = any(isinstance(t, torch.Tensor) and t.dtype.is_complex for t in cpu_input_tuple) gpu_input_tuple = to_gpu(cpu_input_tuple, type_map=type_map) cpu_module = self.constructor(*self.constructor_args) gpu_module = self.constructor(*self.constructor_args).float().cuda() cpu_param = test_case._get_parameters(cpu_module) gpu_param = test_case._get_parameters(gpu_module) for cpu_p, gpu_p in zip(cpu_param[0], gpu_param[0]): gpu_p.data.copy_(cpu_p) test_case._zero_grad_input(cpu_input_tuple) test_case._zero_grad_input(gpu_input_tuple) test_case._zero_grad_parameters(cpu_module) test_case._zero_grad_parameters(gpu_module) cpu_output = test_case._forward(cpu_module, cpu_input_tuple) gpu_output = test_case._forward(gpu_module, gpu_input_tuple) if getattr(cpu_module, "return_indices", False): cpu_output = cpu_output[0] gpu_output = gpu_output[0] test_case.assertEqual(cpu_output, gpu_output, atol=self.precision, rtol=0, exact_dtype=False) # Run backwards on CPU and GPU and compare results for _ in range(5): cpu_gradOutput = cpu_output.clone().normal_() gpu_gradOutput = cpu_gradOutput.type_as(gpu_output) cpu_gradInput = test_case._backward(cpu_module, cpu_input_tuple, cpu_output, cpu_gradOutput) gpu_gradInput = test_case._backward(gpu_module, gpu_input_tuple, gpu_output, gpu_gradOutput) test_case.assertEqual(cpu_gradInput, gpu_gradInput, atol=self.precision, rtol=0, exact_dtype=False) for cpu_d_p, gpu_d_p in zip(cpu_param[1], gpu_param[1]): test_case.assertEqual(cpu_d_p, gpu_d_p, atol=self.precision, rtol=0) # Run double-backwards on CPU and GPU and compare results if self.check_gradgrad and not self.FIXME_no_cuda_gradgrad_comparison: cpu_output = cpu_module(*cpu_input_tuple) gpu_output = gpu_module(*gpu_input_tuple) if getattr(cpu_module, "return_indices", False): cpu_output = cpu_output[0] gpu_output = gpu_output[0] cpu_gradOutput = torch.randn_like(cpu_output, requires_grad=True) gpu_gradOutput = cpu_gradOutput.type_as(gpu_output).detach() gpu_gradOutput.requires_grad = True cpu_gradInputs = torch.autograd.grad( cpu_output, cpu_input_tuple + tuple(cpu_module.parameters()), cpu_gradOutput, create_graph=True) gpu_gradInputs = torch.autograd.grad( gpu_output, gpu_input_tuple + tuple(gpu_module.parameters()), gpu_gradOutput, create_graph=True) for cpu_d_i, gpu_d_i in zip(cpu_gradInputs, gpu_gradInputs): test_case.assertEqual(cpu_d_i, gpu_d_i, atol=self.precision, rtol=0, exact_dtype=False) # We mix output into the second backwards computation so that # torch.autograd.grad doesn't complain that some inputs # are unreachable (which can happen if you differentiate # only on the gradient. if is_any_input_complex: outputs_cpu = cpu_output.sum().abs() + sum(x.sum().abs() for x in cpu_gradInputs) outputs_gpu = gpu_output.sum().abs() + sum(x.sum().abs() for x in gpu_gradInputs) else: outputs_cpu = cpu_output.sum() + sum(x.sum() for x in cpu_gradInputs) outputs_gpu = gpu_output.sum() + sum(x.sum() for x in gpu_gradInputs) cpu_gg = torch.autograd.grad( outputs_cpu, cpu_input_tuple + (cpu_gradOutput,) + tuple(cpu_module.parameters()), retain_graph=True) gpu_gg = torch.autograd.grad( outputs_gpu, gpu_input_tuple + (gpu_gradOutput,) + tuple(gpu_module.parameters()), retain_graph=True) test_case.assertEqual(cpu_gradInput, gpu_gradInput, atol=self.precision, rtol=0, exact_dtype=False) for cpu_d_p, gpu_d_p in zip(cpu_gg, gpu_gg): test_case.assertEqual(cpu_d_p, gpu_d_p, atol=self.precision, rtol=0, exact_dtype=False) self.test_noncontig(test_case, gpu_module, gpu_input_tuple) class InputVariableMixin: def _get_input(self): input = TestBase._get_input(self, False) # type: ignore[arg-type] def map_variables(i): if isinstance(i, torch.Tensor): if i.is_floating_point() or i.is_complex(): i.requires_grad = True return i else: return type(i)(map_variables(elem) for elem in i) return map_variables(input) class NewModuleTest(InputVariableMixin, ModuleTest): # type: ignore[misc] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.cudnn = kwargs.get('cudnn', False) self.check_inplace = kwargs.get('check_inplace', False) self.check_gradgrad = kwargs.get('check_gradgrad', True) self.skip_double = kwargs.get('skip_double', False) self.skip_half = kwargs.get('skip_half', False) self.with_tf32 = kwargs.get('with_tf32', False) self.tf32_precision = kwargs.get('tf32_precision', 0.001) self.test_cpu = kwargs.get('test_cpu', True) self.has_sparse_gradients = kwargs.get('has_sparse_gradients', False) self.check_batched_grad = kwargs.get('check_batched_grad', True) self.gradcheck_fast_mode = kwargs.get('gradcheck_fast_mode', None) self.supports_forward_ad = kwargs.get('supports_forward_ad', False) self.supports_fwgrad_bwgrad = kwargs.get('supports_fwgrad_bwgrad', False) def _check_gradients(self, test_case, module, input_tuple): params = tuple(x for x in module.parameters()) num_inputs = len(input_tuple) def fn_to_gradcheck(*inputs_and_params, **kwargs): assert not kwargs return test_case._forward(module, inputs_and_params[:num_inputs]) # gradcheck doesn't support operators that take in dense inputs but # return sparse parameters. This only happens in the case of nn.Embedding # and nn.EmbeddingBag. Instead, we call `self.check_jacobian`, which # is a slightly different version of gradcheck that can handle this. if self.has_sparse_gradients: assert num_inputs == 1 test_input_jacobian = torch.is_floating_point(input_tuple[0]) test_case.check_jacobian(module, input_tuple[0], test_input_jacobian) else: test_case.assertTrue(gradcheck(fn_to_gradcheck, input_tuple + params, check_batched_grad=self.check_batched_grad, fast_mode=self.gradcheck_fast_mode, check_forward_ad=self.supports_forward_ad)) if self.check_gradgrad: test_case.assertTrue(gradgradcheck(fn_to_gradcheck, input_tuple + params, check_batched_grad=self.check_batched_grad, fast_mode=self.gradcheck_fast_mode, check_fwd_over_rev=self.supports_fwgrad_bwgrad)) def _do_test(self, test_case, module, input): num_threads = torch.get_num_threads() torch.set_num_threads(1) input_tuple = input if isinstance(input, tuple) else (input,) self._check_gradients(test_case, module, input_tuple) # check if module can be printed module.__repr__() if self.check_inplace: # check if the inplace variant of the module gives the same result # as the out-of-place # check_inplace doesn't support multiple input tensors, since we don't have any modules # that modify the inputs in-place and that accept more than one input assert len(input_tuple) == 1 input = input_tuple[0] module_ip = self.constructor(*self.constructor_args, inplace=True) input_version = input._version with freeze_rng_state(): output = module(input) test_case.assertEqual(input._version, input_version) input_ip = deepcopy(input) input_ip_clone = input_ip.clone() with freeze_rng_state(): output_ip = module_ip(input_ip_clone) test_case.assertNotEqual(input_ip_clone._version, input_version) test_case.assertEqual(output, output_ip) grad = output.data.clone().normal_() if input.grad is not None: with torch.no_grad(): input.grad.zero_() if input_ip.grad is not None: with torch.no_grad(): input_ip.grad.zero_() output.backward(grad) output_ip.backward(grad) test_case.assertEqual(input.grad, input_ip.grad) def assert_module_parameters_are(tensor_type, device_id=None): for p in module.parameters(): test_case.assertIsInstance(p, tensor_type) if device_id is not None: test_case.assertEqual(p.get_device(), device_id) if all(isinstance(t, torch.LongTensor) for t in input_tuple) and TEST_CUDA: # check that cuda() moves module parameters to correct GPU device, # and that float() casts parameters correctly input_tuple = tuple(t.cuda() for t in input_tuple) module.float().cuda() module(*input_tuple) assert_module_parameters_are(torch.cuda.FloatTensor, 0) # type: ignore[attr-defined] if torch.cuda.device_count() > 1: input_tuple = tuple(t.cuda(1) for t in input_tuple) module.cuda(1) with torch.cuda.device(1): module(*input_tuple) assert_module_parameters_are(torch.cuda.FloatTensor, 1) # type: ignore[attr-defined] else: # check that float()/double() casters work correctly def to_type(tensor, real, complex): if tensor.is_complex(): return tensor.to(complex) elif tensor.is_floating_point(): return tensor.to(real) else: return tensor def to_half(x): # TODO: torch.complex32 when properly supported return to_type(x, torch.float16, None) def to_single(x): return to_type(x, torch.float32, torch.complex64) def to_double(x): return to_type(x, torch.float64, torch.complex128) # to float input_tuple = tuple(to_single(t) for t in input_tuple) module.float() module(*input_tuple) assert_module_parameters_are(torch.FloatTensor) # and back to double input_tuple = tuple(to_double(t) for t in input_tuple) module.double() module(*input_tuple) assert_module_parameters_are(torch.DoubleTensor) if TEST_CUDA and self.should_test_cuda: # check that cuda() moves module parameters to correct GPU device, # and that float() casts parameters correctly # to GPU0 input_tuple = tuple(to_single(t).cuda() for t in input_tuple) module.float().cuda() module(*input_tuple) assert_module_parameters_are(torch.cuda.FloatTensor, 0) # type: ignore[attr-defined] # to CPU input_tuple = tuple(t.cpu() for t in input_tuple) module.cpu() module(*input_tuple) assert_module_parameters_are(torch.FloatTensor) # back to GPU0 input_tuple = tuple(t.cuda() for t in input_tuple) module.cuda() module(*input_tuple) assert_module_parameters_are(torch.cuda.FloatTensor, 0) # type: ignore[attr-defined] # test that forwards of module runs correctly without cuDNN if self.cudnn: with torch.backends.cudnn.flags(enabled=False): module(*input_tuple) assert_module_parameters_are(torch.cuda.FloatTensor, 0) # type: ignore[attr-defined] if torch.cuda.device_count() >= 2: # test cross-GPU transfer works # to GPU1 input_tuple = tuple(t.cuda(1) for t in input_tuple) module.cuda(1) with torch.cuda.device(1): module(*input_tuple) assert_module_parameters_are(torch.cuda.FloatTensor, 1) # type: ignore[attr-defined] if not self.skip_double: # test double() input_tuple = tuple(to_double(t).cuda() for t in input_tuple) module.double().cuda() module(*input_tuple) assert_module_parameters_are(torch.cuda.DoubleTensor, 0) # type: ignore[attr-defined] # test half() if not self.skip_half: input_tuple = tuple(to_half(t).cuda() for t in input_tuple) module.half().cuda() module(*input_tuple) assert_module_parameters_are(torch.cuda.HalfTensor, 0) # type: ignore[attr-defined] torch.set_num_threads(num_threads) def _get_target(self): return self._get_arg('target', False) @property def constructor_args(self): return self._get_arg('constructor_args', False) class CriterionTest(InputVariableMixin, TestBase): # type: ignore[misc] # TODO: check that criterions don't ignore grad_output _required_arg_names = TestBase._required_arg_names.union({'target'}) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.should_test_cuda = kwargs.get('test_cuda', True) self.check_forward_only = kwargs.get('check_forward_only', False) self.check_gradgrad = kwargs.get('check_gradgrad', True) self.check_half = kwargs.get('check_half', True) self.check_bfloat16 = kwargs.get('check_bfloat16', False) self.check_complex = kwargs.get('check_complex', False) self.test_cpu = kwargs.get('test_cpu', True) self.with_tf32 = kwargs.get('with_tf32', True) self.tf32_precision = kwargs.get('tf32_precision', 0.001) self.check_batched_grad = kwargs.get('check_batched_grad', True) self.default_dtype = kwargs.get('default_dtype', None) if self.default_dtype is None: self.default_dtype = torch.get_default_dtype() def __call__(self, test_case): with set_default_dtype(self.default_dtype): module = self.constructor(*self.constructor_args) input = self._get_input() # Check that these methods don't raise errors module.__repr__() str(module) target = self._get_target() if self.reference_fn is not None: out = test_case._forward_criterion(module, input, target, extra_args=self.extra_args) ref_args = (deepcopy(input), deepcopy(target)) + self.extra_args + (module,) expected_out = self.reference_fn(*ref_args) test_case.assertEqual(out, expected_out) if self.check_forward_only: return params = tuple(x for x in module.parameters()) if not isinstance(input, tuple): inputs = (input,) + params + (target,) def apply_fn(input, target, *params): return module(input, target) else: inputs = input + params + (target,) def apply_fn(input1, input2, target, *params): # type: ignore[misc] return module(input1, input2, target) gradcheck(apply_fn, inputs, check_batched_grad=self.check_batched_grad) if self.check_gradgrad: gradgradcheck(apply_fn, inputs, check_batched_grad=self.check_batched_grad) def test_cuda(self, test_case, dtype, extra_args=None): def convert_dtype(obj, dtype, requires_grad=False): if isinstance(obj, torch.Tensor): return obj.detach().to(dtype=dtype).requires_grad_(requires_grad) elif isinstance(obj, tuple): return tuple(convert_dtype(o, dtype, requires_grad) for o in obj) else: return obj if not TEST_CUDA or not self.should_test_cuda: raise unittest.SkipTest('Excluded from CUDA tests') with set_default_dtype(self.default_dtype): cpu_input = self._get_input() cpu_target = self._get_target() cpu_module = self.constructor(*self.constructor_args) gpu_module = self.constructor(*self.constructor_args) # Convert input, target and module parameters to dtype cpu_input = convert_dtype(cpu_input, dtype, True) if cpu_target.is_floating_point() or cpu_target.is_complex(): cpu_target = convert_dtype(cpu_target, dtype) cpu_module.type(dtype) gpu_module.type(dtype) # GPU setup gpu_input = to_gpu(cpu_input) gpu_target = to_gpu(cpu_target) gpu_module.cuda() # torch.HalfTensor doesn't support most operations, converting back to default if dtype in {torch.half, torch.bfloat16}: cpu_input = self._get_input() cpu_target = self._get_target() # Loss modules with weights require consistent input/module weight types cpu_module = self.constructor(*self.constructor_args) cpu_output = test_case._forward_criterion(cpu_module, cpu_input, cpu_target, extra_args=extra_args) gpu_output = test_case._forward_criterion(gpu_module, gpu_input, gpu_target, extra_args=extra_args) # dtype used to be able to be None, so set precision in this way instead of a precision map test_case.assertEqual(cpu_output, gpu_output, atol=1e-1 if dtype in {torch.half, torch.bfloat16} else 4e-4, rtol=0, exact_dtype=False) cpu_gradInput = test_case._backward_criterion( cpu_module, cpu_input, cpu_output, cpu_target, extra_args=extra_args) gpu_gradInput = test_case._backward_criterion( gpu_module, gpu_input, gpu_output, gpu_target, extra_args=extra_args) # dtype used to be able to be None, so set precision in this way instead of a precision map test_case.assertEqual(cpu_gradInput, gpu_gradInput, atol=1e-1 if dtype in {torch.half, torch.bfloat16} else 4e-4, rtol=0, exact_dtype=False) def _get_target(self): return self._get_arg('target', False) @property def constructor_args(self): return self._get_arg('constructor_args', False) @property def extra_args(self): return self._get_arg('extra_args', False) def _test_bfloat16_ops(test_case, op, device, inp_dims=(), prec=1e-2, scale_factor=None): # fp32 compute input1 = torch.randn(inp_dims, dtype=torch.float32, device=device, requires_grad=True) if scale_factor is not None: input1 = (torch.rand(inp_dims, dtype=torch.bfloat16, device=device) * scale_factor).float().requires_grad_() out1 = op(input1) grad_input1 = torch.randn_like(out1, device=device) out1.backward(grad_input1) # bfloat16 compute op_bfp16 = op.bfloat16() input2 = input1.detach().bfloat16().requires_grad_() grad_input2 = grad_input1.bfloat16() out2 = op_bfp16(input2) out2.backward(grad_input2) test_case.assertEqual(out1, out2, atol=prec, rtol=prec, exact_dtype=False) test_case.assertEqual(input1.grad.data, input2.grad.data, atol=prec, rtol=prec, exact_dtype=False) def _test_module_empty_input(test_case, module, inp, check_size=True, inference=False): if not inference: inp.requires_grad_(True) out = module(inp) if not inference: gO = torch.rand_like(out) out.backward(gO) if check_size: test_case.assertEqual(out.size(), inp.size()) if not inference: for p in module.parameters(): if p.requires_grad: test_case.assertEqual(p.grad, torch.zeros_like(p.grad)) test_case.assertEqual(inp.grad, torch.zeros_like(inp)) def _create_basic_net(): class Layer(nn.Module): def __init__(self) -> None: super().__init__() self.layer_dummy_param = nn.Parameter(torch.empty(3, 5)) self.layer_dummy_buf = nn.Buffer(torch.zeros(1, 3, 3, 7)) class Net(nn.Module): def __init__(self) -> None: super().__init__() self.l1 = Layer() self.dummy_param = nn.Parameter(torch.empty(3, 5)) self.dummy_buf = nn.Buffer(torch.zeros(7, 3, 3, 1)) l = Layer() n = Net() s = nn.Sequential(n, n) return l, n, s ```
==================================================================================================================================== SOURCE CODE FILE: common_optimizers.py LINES: 1 SIZE: 84.79 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\common_optimizers.py ENCODING: utf-8 ```py # mypy: ignore-errors import functools import itertools import sys import unittest from copy import deepcopy from enum import Enum from typing import Any, Union import torch from torch import Tensor from torch.nn import Parameter from torch.optim import ( Adadelta, Adafactor, Adagrad, Adam, Adamax, AdamW, ASGD, LBFGS, NAdam, Optimizer, RAdam, RMSprop, Rprop, SGD, SparseAdam, ) from torch.optim.lr_scheduler import ( ConstantLR, ExponentialLR, LinearLR, PolynomialLR, ReduceLROnPlateau, StepLR, ) from torch.testing._internal.common_device_type import tol, toleranceOverride from torch.testing._internal.common_methods_invocations import DecorateInfo from torch.testing._internal.common_utils import ( _TestParametrizer, skipIfMPS, skipIfTorchDynamo, skipIfXpu, TEST_WITH_TORCHDYNAMO, ) from torch.utils._foreach_utils import _get_foreach_kernels_supported_devices class OptimizerInput: """Contains args / kwargs to be passed to an optimizer constructor.""" __slots__ = ["params", "kwargs", "desc"] def __init__( self, params: Union[ list[Parameter], list[Tensor], dict[Any, Any], list[dict[str, Any]] ], kwargs: dict[str, Any], desc: str = "", ): # params can be a list of Tensors OR param_groups OR None self.params = params self.kwargs = kwargs self.desc = desc def __repr__(self): return f"params={self.params}, kwargs={self.kwargs}, desc={self.desc}" class OptimizerErrorEnum(Enum): """Enumerates when an error is raised when testing optimizers.""" CONSTRUCTION_ERROR = 0 STEP_ERROR = 1 class ErrorOptimizerInput: """ An OptimizerInput that will cause the optimizer to throw an error when constructed. Includes the type and string of the resulting error. """ __slots__ = ["optimizer_error_input", "error_on", "error_type", "error_regex"] def __init__( self, optimizer_error_input, *, error_on=OptimizerErrorEnum.CONSTRUCTION_ERROR, error_type=RuntimeError, error_regex="", ): self.optimizer_error_input = optimizer_error_input self.error_on = error_on self.error_type = error_type self.error_regex = error_regex class OptimizerInfo: """Optimizer information to be used in testing.""" def __init__( self, optim_cls: Optimizer, # Class object for the Optimizer under test *, # Function to generate optimizer inputs EXCLUDING params. We delegate params responsibility # to the test using the OptimizerInfo. OptimizerInput.params is likely None. # Can optionally take in device to filter out certain unsupported configs optim_inputs_func, # Tuple of lambdas to generate LRScheduler instances to run with the optimizer for the # LRScheduler tests like test_forloop_goes_right_direction with_lrsched. # We DO NOT expect to thoroughly test LRSchedulers through the optimizers, so not every # LRScheduler configuration will be included. See test_lrscheduler.py for that instead. # A few optimizers like SGD and Adam will test more LRSchedulers. scheduler_inputs=( [ lambda opt: StepLR(opt, gamma=0.9, step_size=10), lambda opt: ReduceLROnPlateau(opt), ], ), # A subset of the global-cliquey flags (fused, foreach, differentiable) the optimizer # supports. See NOTE: [optimizer kwarg categories] for what global-cliquey means. supported_impls: tuple[str, ...] = ("foreach", "differentiable"), # A subset of all flags, signifying which ones were only supported after the # original optimizer had already been released. aka impls where we need to check BC. not_og_supported_flags: tuple[str, ...] = ( "foreach", "differentiable", "maximize", "capturable", ), # the optim supports passing in sparse gradients as well as dense grads supports_sparse: bool = False, # the optimizer constructor supports passing in capturable as a kwarg has_capturable_arg: bool = False, # the optim only supports one config: sparse grads w/ dense params, see SparseAdam only_supports_sparse_grads: bool = False, # Tuple of (optimizer kwargs, schedulers_constructors) specifically for sparse tests, # with especially tuned hyperparameters. These only apply if the optimizer supports # sparse parameters or grads. metadata_for_sparse=({}, []), # the optim supports complex parameters supports_complex: bool = True, # whether the optimizer.step() function requires a closure to be passed step_requires_closure: bool = False, # whether the optimizer supports per-param options with parameter groups supports_param_groups: bool = True, # whether the optimizer supports parameters on multiple devices supports_multiple_devices: bool = True, skips=(), # Indicates which tests to skip decorators=None, # Additional decorators to apply to generated tests optim_error_inputs_func=None, # Function to generate optim inputs that error supports_fused_on: tuple[str, ...] = (), ): self.optim_cls = optim_cls self.optim_inputs_func = optim_inputs_func self.scheduler_inputs = scheduler_inputs self.supported_impls = supported_impls self.not_og_supported_flags = not_og_supported_flags self.supports_sparse = supports_sparse self.has_capturable_arg = has_capturable_arg self.metadata_for_sparse = metadata_for_sparse self.only_supports_sparse_grads = only_supports_sparse_grads self.supports_complex = supports_complex self.step_requires_closure = step_requires_closure self.supports_param_groups = supports_param_groups self.supports_multiple_devices = supports_multiple_devices self.decorators = ( *(decorators if decorators else []), *(skips if skips else []), ) self.optim_error_inputs_func = optim_error_inputs_func self.supports_fused_on = supports_fused_on def get_decorators(self, test_class, test_name, device, dtype, param_kwargs): result = [] for decorator in self.decorators: if isinstance(decorator, DecorateInfo): if decorator.is_active( test_class, test_name, device, dtype, param_kwargs ): result.extend(decorator.decorators) else: result.append(decorator) return result @property def name(self): return self.optim_cls.__name__ class optims(_TestParametrizer): """Decorator for specifying a list of optimizers over which to run a test.""" def __init__(self, optim_info_iterable, dtypes=None): self.optim_info_list = list(optim_info_iterable) # optimizers aren't limited to be one dtype as parameters can have different dtypes # We default to torch.float32, but dtypes should be specified through passed in # parameters. self.dtypes = dtypes if dtypes is not None else [torch.float32] def _parametrize_test(self, test, generic_cls, device_cls): if device_cls is None: raise RuntimeError( "The @optims decorator is only intended to be used in a device-specific " "context; use it with instantiate_device_type_tests() instead of " "instantiate_parametrized_tests()" ) for optim_info, dtype in itertools.product(self.optim_info_list, self.dtypes): # Construct the test name; device / dtype parts are handled outside. # See [Note: device and dtype suffix placement] test_name = optim_info.name # Construct parameter kwargs to pass to the test. param_kwargs = {"optim_info": optim_info, "dtype": dtype} try: @functools.wraps(test) def test_wrapper(*args, **kwargs): return test(*args, **kwargs) decorator_fn = functools.partial( optim_info.get_decorators, generic_cls.__name__, test.__name__, device_cls.device_type, dtype, ) yield (test_wrapper, test_name, param_kwargs, decorator_fn) except Exception as ex: # Provides an error message for debugging before rethrowing the exception print( f"Failed to instantiate {test_name} for module {optim_info.name}!" ) raise ex # Helper function for generating error inputs for all optimizers, used below. def get_error_inputs_for_all_optims(device, dtype): if _get_device_type(device) == "cpu": sample_param = Parameter(torch.randn(1, device=device, dtype=dtype)) sample_param2 = Parameter(torch.randn(1, device=device, dtype=dtype)) return [ ErrorOptimizerInput( OptimizerInput( params=sample_param, kwargs={}, desc="invalid param type", ), error_type=TypeError, error_regex="params argument given to the optimizer should be an iterable of Tensors or dicts", ), ErrorOptimizerInput( OptimizerInput( params=[sample_param, sample_param], kwargs={}, desc="a param group cannot have duplicate parameters", ), error_type=UserWarning, error_regex=".*a parameter group with duplicate parameters.*", ), ErrorOptimizerInput( OptimizerInput( params=[{"params": sample_param}, {"params": sample_param}], kwargs={}, desc="duplicate parameters should not occur across param groups either", ), error_type=ValueError, error_regex="some parameters appear in more than one parameter group", ), ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(lr=torch.tensor([0.001, 0.001])), desc="Tensor lr must be 1-element", ), error_type=ValueError, error_regex="Tensor lr must be 1-element", ), ErrorOptimizerInput( OptimizerInput( params=[("weight", sample_param), sample_param2], kwargs={}, desc="all optimizer params should be with/without names", ), error_type=ValueError, error_regex="all optimizer params should be with/without names. Some param names are missing", ), ErrorOptimizerInput( OptimizerInput( params=[ {"params": [sample_param], "lr": 1e-2}, {"params": [("weight", sample_param2)]}, ], kwargs={}, desc="all optimizer param groups should be with/without names.", ), error_type=ValueError, error_regex="all optimizer param groups should be with/without names. " "cannot add param group with names to the optimizer", ), ] else: return [] # ------------------------------------------------------------------------------------------ # NOTE: [optimizer kwarg categories] # We categorize optimizer kwargs as 3 types: # 1. optimizer-specific flags are like amsgrad or rho or beta, flags that are specific to # algorithms and thus only show up for certain optimizers. There are many of these, so I # do not bother gathering them all and listing them here. The converse to these would be # global flags that every optimizer ideally _should_ support. We break global flags into # 2 further categories and list them all below. # 2. global-friendly = ["lr", "weight_decay", "maximize", "capturable"] # global-friendly flags are global flags who play nicely with all other global flags, # i.e., are mutually exclusive in function. This means that any pair of the following # flags can be toggled at once (e.g., maximize and weight_decay). Furthermore, any of the # following flags theoretically can be enabled with ANY other global flag, including the # cliquey ones (e.g, capturable and foreach). # 3. global-cliquey = ["foreach", "fused", "differentiable"] # global-cliquey flags are global flags that do NOT coexist with other cliquey flags, # usually because they contradict each other in function. For example, one should not flip # both foreach AND fused to True, because they are two differing performance optimizations # in which you can only opt into one. # # The following optim_inputs_func_* sampling functions only return constructor combinations of # optimizer-specific and global-friendly flags. This is because we are confident they would mesh # well with additional kwargs. On the flip side of the same coin, we reserve setting the # global-cliquey flags to individual tests and fully expect tests to edit OptimizerInput.kwargs. def optim_inputs_func_adadelta(device, dtype=None): cuda_supported_configs = [ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), OptimizerInput( params=None, kwargs={"weight_decay": 0.1, "capturable": True}, desc="capturable with weight decay", ), OptimizerInput( params=None, kwargs={"lr": torch.tensor(0.001), "capturable": True}, desc="Tensor lr with capturable", ), ] return [ OptimizerInput(params=None, kwargs={}, desc="default"), OptimizerInput(params=None, kwargs={"lr": 0.01}, desc="non-default lr"), OptimizerInput( params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" ), OptimizerInput(params=None, kwargs={"maximize": True}, desc="maximize"), OptimizerInput( params=None, kwargs={"weight_decay": 0.1, "maximize": True}, desc="maximize, weight_decay", ), OptimizerInput( params=None, kwargs={"rho": 0.95, "weight_decay": 0.9}, desc="rho" ), ] + (cuda_supported_configs if _get_device_type(device) == "cuda" else []) def optim_error_inputs_func_adadelta(device, dtype): error_inputs = get_error_inputs_for_all_optims(device, dtype) if _get_device_type(device) == "cpu": error_inputs += [ ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(lr=1e-2, rho=1.1), desc="rho should be between 0 and 1", ), error_type=ValueError, error_regex="Invalid rho value: 1.1", ), ] return error_inputs def optim_inputs_func_adafactor(device, dtype=None): return [ OptimizerInput(params=None, kwargs={}, desc="default"), OptimizerInput( params=None, kwargs={"weight_decay": 0.1, "lr": 0.01}, desc="nonzero weight_decay", ), OptimizerInput( params=None, kwargs={"weight_decay": 0.1, "maximize": True}, desc="maximize", ), OptimizerInput( params=None, kwargs={"beta2_decay": -1.0}, desc="non-default beta2_decay", ), OptimizerInput( params=None, kwargs={"d": 1.5}, desc="non-default clipping threshold d", ), ] def optim_error_inputs_func_adafactor(device, dtype): error_inputs = get_error_inputs_for_all_optims(device, dtype) if _get_device_type(device) == "cpu": complex_param = torch.rand(2, 3, device=device, dtype=torch.complex64) complex_param.grad = torch.rand_like(complex_param) error_inputs += [ ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(eps=(-1e-30, 1e-3)), desc="epsilon1 should be >= 0", ), error_type=ValueError, error_regex="epsilon1 should be >= 0", ), ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(d=0.0), desc="invalid d", ), error_type=ValueError, error_regex="Clipping threshold d should be >= 1", ), ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(beta2_decay=0.8), desc="invalid beta2_decay", ), error_type=ValueError, error_regex="beta2_decay should be <= 0", ), ErrorOptimizerInput( OptimizerInput( params=[complex_param], kwargs=dict(), desc="does not support complex parameters", ), error_type=RuntimeError, error_regex="Adafactor does not support complex parameters", error_on=OptimizerErrorEnum.STEP_ERROR, ), ] return error_inputs def optim_inputs_func_adagrad(device, dtype=None): return [ OptimizerInput(params=None, kwargs={}, desc="default"), OptimizerInput( params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" ), OptimizerInput( params=None, kwargs={"weight_decay": 0.1, "maximize": True}, desc="maximize", ), OptimizerInput(params=None, kwargs={"lr": 0.1}, desc="non-default lr"), OptimizerInput( params=None, kwargs={"initial_accumulator_value": 0.1, "weight_decay": 0.1}, desc="initial_accumulator_value", ), OptimizerInput( params=None, kwargs={"lr": 0.1, "lr_decay": 0.5, "weight_decay": 0.1}, desc="lr_decay", ), # TODO: Move out to testing in param_group? OptimizerInput( params=None, kwargs={"lr": torch.tensor(0.001)}, desc="Tensor lr", ), ] def optim_error_inputs_func_adagrad(device, dtype): error_inputs = get_error_inputs_for_all_optims(device, dtype) if _get_device_type(device) == "cpu": error_inputs += [ ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(lr=1e-2, lr_decay=-0.5), desc="lr_decay must be bigger than 0", ), error_type=ValueError, error_regex="Invalid lr_decay value: -0.5", ), ] return error_inputs # TODO: consider tensor LR! See multi_tensor_optimizer_configs in test_optim.py --> tensor LR should work # with all implementation code paths... def optim_inputs_func_adam(device, dtype=None): cuda_supported_configs = [ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), OptimizerInput( params=None, kwargs={"weight_decay": 0.1, "amsgrad": True, "capturable": True}, desc="capturable, amsgrad", ), OptimizerInput( params=None, kwargs={"lr": torch.tensor(0.001), "amsgrad": True, "capturable": True}, desc="Tensor lr with capturable and amsgrad", ), OptimizerInput( params=None, kwargs={ "lr": torch.tensor(0.001), "betas": (torch.tensor(0.9), torch.tensor(0.99)), "amsgrad": True, "capturable": True, }, desc="Tensor lr, Tensor betas, with capturable and amsgrad", ), OptimizerInput( params=None, kwargs={ "lr": torch.tensor(0.001), "betas": (torch.tensor(0.9), torch.tensor(0.99)), "amsgrad": False, "capturable": True, }, desc="Tensor lr, Tensor betas, with capturable", ), ] mps_supported_configs = [ OptimizerInput( params=None, kwargs={"lr": torch.tensor(0.01)}, desc="Tensor lr" ), ] total = ( [ OptimizerInput(params=None, kwargs={}, desc="default"), OptimizerInput(params=None, kwargs={"lr": 0.01}, desc="non-default lr"), OptimizerInput( params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" ), OptimizerInput( params=None, kwargs={"weight_decay": 0.1, "maximize": True}, desc="maximize", ), OptimizerInput( params=None, kwargs={"weight_decay": 0.1, "amsgrad": True}, desc="amsgrad", ), ] + (cuda_supported_configs if _get_device_type(device) == "cuda" else []) + (mps_supported_configs if _get_device_type(device) == "mps" else []) ) if dtype in (torch.float16,): for input in total: """ Too small eps will make denom to be zero for low precision dtype denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps) For example, >>> a tensor([0.], dtype=torch.float16) >>> a + 1e-8 tensor([0.], dtype=torch.float16) """ input.kwargs["eps"] = 0.1 return total def optim_error_inputs_func_adam(device, dtype): error_inputs = get_error_inputs_for_all_optims(device, dtype) if _get_device_type(device) == "cpu": error_inputs += [ ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(lr=1e-2, betas=(1.0, 0.0)), desc="beta1 should be between 0 and 1", ), error_type=ValueError, error_regex="Invalid beta parameter at index 0: 1.0", ), ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(lr=1e-2, weight_decay=-1), desc="weight_decay should > 0", ), error_type=ValueError, error_regex="Invalid weight_decay value: -1", ), ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(lr=torch.tensor(0.001), foreach=True), desc="lr as Tensor doesn't work with foreach & not capturable", ), error_type=ValueError, error_regex="lr as a Tensor is not supported for capturable=False and foreach=True", ), ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(lr=1e-2, betas=(0.9, torch.tensor(0.99))), desc="betas must be either both floats or both Tensors", ), error_type=ValueError, error_regex="betas must be either both floats or both Tensors", ), ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(lr=1e-2, betas=(torch.tensor(0.9), 0.99)), desc="betas must be either both floats or both Tensors", ), error_type=ValueError, error_regex="betas must be either both floats or both Tensors", ), ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict( lr=1e-2, betas=(torch.tensor(0.9), torch.tensor(0.99)), foreach=True, ), desc=r"betas\[0\] as a Tensor is not supported for capturable=False and foreach=True", ), error_type=ValueError, error_regex=r"betas\[0\] as a Tensor is not supported for capturable=False and foreach=True", ), ] if _get_device_type(device) == "cuda": sample_tensor = torch.empty((), device=device, dtype=dtype) error_inputs += [ ErrorOptimizerInput( OptimizerInput( params=[sample_tensor], kwargs={"foreach": True, "fused": True}, desc="`fused` and `foreach` cannot be `True` together", ), error_type=RuntimeError, error_regex="`fused` and `foreach` cannot be `True` together", ), ErrorOptimizerInput( OptimizerInput( params=[sample_tensor], kwargs={"fused": True, "differentiable": True}, desc="`fused` does not support `differentiable`", ), error_type=RuntimeError, error_regex="`fused` does not support `differentiable`", ), ] return error_inputs def optim_inputs_func_adamax(device, dtype=None): cuda_supported_configs = [ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), OptimizerInput( params=None, kwargs={"weight_decay": 0.9, "maximize": True, "capturable": True}, desc="capturable, maximize, weight_decay", ), OptimizerInput( params=None, kwargs={"weight_decay": 0, "maximize": True, "capturable": True}, desc="capturable, maximize", ), OptimizerInput( params=None, kwargs={"weight_decay": 0.9, "maximize": False, "capturable": True}, desc="capturable, weight_decay", ), OptimizerInput( params=None, kwargs={ "lr": torch.tensor(0.001), "weight_decay": 0.9, "maximize": False, "capturable": True, }, desc="capturable, weight_decay, tensor LR", ), ] return [ OptimizerInput(params=None, kwargs={}, desc="default"), OptimizerInput(params=None, kwargs={"lr": 0.1}, desc="non-default lr"), OptimizerInput( params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" ), OptimizerInput( params=None, kwargs={"maximize": True}, desc="maximize", ), OptimizerInput( params=None, kwargs={"weight_decay": 0.1, "maximize": True}, desc="maximize, weight_decay", ), ] + (cuda_supported_configs if _get_device_type(device) == "cuda" else []) def optim_error_inputs_func_adamax(device, dtype): error_inputs = get_error_inputs_for_all_optims(device, dtype) if _get_device_type(device) == "cpu": error_inputs += [ ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(lr=1e-2, betas=(0.0, 1.0)), desc="beta2 should be between 0 and 1", ), error_type=ValueError, error_regex="Invalid beta parameter at index 1: 1.0", ), ] return error_inputs def optim_inputs_func_adamw(device, dtype=None): return optim_inputs_func_adam(device, dtype) def optim_error_inputs_func_adamw(device, dtype): return optim_error_inputs_func_adam(device, dtype) def optim_inputs_func_asgd(device, dtype=None): cuda_supported_configs = [ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), OptimizerInput( params=None, kwargs={"maximize": True, "capturable": True}, desc="maximize, capturable", ), OptimizerInput( params=None, kwargs={"weight_decay": 0.1, "capturable": True}, desc="weight_decay, capturable", ), OptimizerInput( params=None, kwargs={"weight_decay": 0.1, "maximize": True, "capturable": True}, desc="maximize, weight_decay, capturable", ), OptimizerInput( params=None, kwargs={ "lr": torch.tensor(0.001), "weight_decay": 0.1, "maximize": True, "capturable": True, }, desc="maximize, weight_decay, capturable, tensor LR", ), ] return [ OptimizerInput(params=None, kwargs={}, desc="default"), OptimizerInput(params=None, kwargs={"lambd": 0.1}, desc="non-default lambd"), OptimizerInput(params=None, kwargs={"lr": 0.02}, desc="non-default lr"), OptimizerInput(params=None, kwargs={"t0": 100}, desc="t0"), OptimizerInput(params=None, kwargs={"maximize": True}, desc="maximize"), OptimizerInput( params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" ), OptimizerInput( params=None, kwargs={"weight_decay": 0.1, "maximize": True}, desc="maximize, nonzero weight_decay", ), ] + (cuda_supported_configs if _get_device_type(device) == "cuda" else []) def optim_error_inputs_func_asgd(device, dtype): error_inputs = get_error_inputs_for_all_optims(device, dtype) if _get_device_type(device) == "cpu": error_inputs += [ ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(lr=1e-2, weight_decay=-0.5), desc="weight_decay should > 0", ), error_type=ValueError, error_regex="Invalid weight_decay value: -0.5", ), ] return error_inputs def optim_inputs_func_lbfgs(device, dtype=None): return [ OptimizerInput(params=None, kwargs={}, desc="default"), OptimizerInput(params=None, kwargs={"lr": 0.01}, desc="non-default lr"), OptimizerInput( params=None, kwargs={"lr": torch.tensor(0.001)}, desc="Tensor lr" ), OptimizerInput( params=None, kwargs={"tolerance_grad": 1e-6}, desc="tolerance_grad" ), OptimizerInput( params=None, kwargs={"line_search_fn": "strong_wolfe"}, desc="strong_wolfe", ), ] def optim_error_inputs_func_lbfgs(device, dtype): error_inputs = get_error_inputs_for_all_optims(device, dtype) return error_inputs def optim_inputs_func_nadam(device, dtype=None): cuda_supported_configs = [ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), OptimizerInput( params=None, kwargs={"weight_decay": 0.9, "momentum_decay": 6e-3, "capturable": True}, desc="weight_decay, capturable", ), OptimizerInput( params=None, kwargs={ "weight_decay": 0.9, "momentum_decay": 6e-3, "decoupled_weight_decay": True, "capturable": True, }, desc="decoupled_weight_decay, capturable", ), OptimizerInput( params=None, kwargs={ "lr": torch.tensor(0.001), "weight_decay": 0.9, "momentum_decay": 6e-3, "decoupled_weight_decay": True, "capturable": True, }, desc="decoupled_weight_decay, capturable", ), ] return [ OptimizerInput(params=None, kwargs={}, desc="default"), OptimizerInput(params=None, kwargs={"lr": 1e-3}, desc="non-default lr"), OptimizerInput( params=None, kwargs={"momentum_decay": 6e-3}, desc="non-zero momentum_decay", ), OptimizerInput( params=None, kwargs={ "weight_decay": 0.1, }, desc="weight_decay", ), OptimizerInput( params=None, kwargs={"weight_decay": 0.1, "momentum_decay": 6e-3}, desc="weight_decay, momentum_decay", ), OptimizerInput( params=None, kwargs={ "weight_decay": 0.1, "momentum_decay": 6e-3, "decoupled_weight_decay": True, }, desc="decoupled_weight_decay", ), OptimizerInput( params=None, kwargs={"weight_decay": 0.1, "maximize": True}, desc="maximize", ), ] + (cuda_supported_configs if _get_device_type(device) == "cuda" else []) def optim_error_inputs_func_nadam(device, dtype): error_inputs = get_error_inputs_for_all_optims(device, dtype) if _get_device_type(device) == "cpu": error_inputs += [ ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(lr=1e-2, betas=(1.0, 0.0)), desc="beta1 should be between 0 and 1", ), error_type=ValueError, error_regex="Invalid beta parameter at index 0: 1.0", ), ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(lr=1e-2, momentum_decay=-0.2), desc="momentum_decay should > 0", ), error_type=ValueError, error_regex="Invalid momentum_decay value: -0.2", ), ] return error_inputs # Weird story bro, NAdam and RAdam do not have maximize. def optim_inputs_func_radam(device=None, dtype=None): cuda_supported_configs = [ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), OptimizerInput( params=None, kwargs={ "capturable": True, "weight_decay": 0.1, }, desc="capturable, weight_decay", ), OptimizerInput( params=None, kwargs={ "capturable": True, "weight_decay": 0.1, "decoupled_weight_decay": True, }, desc="capturable, weight_decay, decoupled_weight_decay", ), OptimizerInput( params=None, kwargs={ "lr": torch.tensor(0.001), "capturable": True, "weight_decay": 0.1, "decoupled_weight_decay": True, }, desc="capturable, weight_decay, decoupled_weight_decay, tensor LR", ), ] return [ OptimizerInput(params=None, kwargs={}, desc="default"), OptimizerInput(params=None, kwargs={"lr": 2e-3}, desc="non-default lr"), OptimizerInput(params=None, kwargs={"eps": 1e-6}, desc="non-default eps"), OptimizerInput( params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" ), OptimizerInput( params=None, kwargs={"weight_decay": 0.1, "decoupled_weight_decay": True}, desc="decoupled_weight_decay", ), OptimizerInput( params=None, kwargs={"weight_decay": 0.1, "maximize": True}, desc="maximize", ), ] + (cuda_supported_configs if _get_device_type(device) == "cuda" else []) def optim_error_inputs_func_radam(device, dtype): error_inputs = get_error_inputs_for_all_optims(device, dtype) if _get_device_type(device) == "cpu": error_inputs += [ ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(lr=1e-2, betas=(1.0, 0.0)), desc="beta1 should be between 0 and 1", ), error_type=ValueError, error_regex="Invalid beta parameter at index 0: 1.0", ), ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(lr=1e-2, weight_decay=-1), desc="weight_decay should > 0", ), error_type=ValueError, error_regex="Invalid weight_decay value: -1", ), ] return error_inputs def optim_inputs_func_rmsprop(device, dtype=None): cuda_supported_configs = [ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), OptimizerInput( params=None, kwargs={"weight_decay": 0.1, "maximize": True, "capturable": True}, desc="capturable, maximize", ), OptimizerInput( params=None, kwargs={"lr": torch.tensor(0.001), "capturable": True}, desc="Tensor lr with capturable", ), ] return [ OptimizerInput(params=None, kwargs={}, desc="default"), OptimizerInput(params=None, kwargs={"lr": 1e-3}, desc="non-default lr"), OptimizerInput( params=None, kwargs={"weight_decay": 0.1}, desc="nonzero weight_decay" ), OptimizerInput( params=None, kwargs={ "maximize": True, }, desc="maximize", ), OptimizerInput( params=None, kwargs={"weight_decay": 0.1, "centered": True}, desc="centered", ), OptimizerInput( params=None, kwargs={ "maximize": True, "weight_decay": 0.1, }, desc="maximize, weight_decay", ), OptimizerInput( params=None, kwargs={"weight_decay": 0.1, "centered": True, "momentum": 0.1}, desc="momentum", ), OptimizerInput( params=None, kwargs={ "weight_decay": 0.1, "centered": True, "momentum": 0.1, "maximize": True, }, desc="maximize, centered, weight_decay, w/ momentum", ), ] + (cuda_supported_configs if _get_device_type(device) == "cuda" else []) def optim_error_inputs_func_rmsprop(device, dtype): error_inputs = get_error_inputs_for_all_optims(device, dtype) if _get_device_type(device) == "cpu": error_inputs += [ ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(lr=1e-2, momentum=-1.0), desc="momentum should be between 0 and 1", ), error_type=ValueError, error_regex="Invalid momentum value: -1.0", ), ] return error_inputs def optim_inputs_func_rprop(device, dtype=None): cuda_supported_configs = [ OptimizerInput(params=None, kwargs={"capturable": True}, desc="capturable"), OptimizerInput( params=None, kwargs={"lr": torch.tensor(0.001), "capturable": True}, desc="Tensor lr with capturable", ), ] return [ OptimizerInput(params=None, kwargs={}, desc="default"), OptimizerInput(params=None, kwargs={"lr": 2e-4}, desc="non-default lr"), OptimizerInput( params=None, kwargs={"etas": (0.5, 1.5)}, desc="non-default etas" ), OptimizerInput( params=None, kwargs={"step_sizes": (2e-6, 100)}, desc="non-default step_sizes", ), OptimizerInput(params=None, kwargs={"maximize": True}, desc="maximize"), ] + (cuda_supported_configs if _get_device_type(device) == "cuda" else []) def optim_error_inputs_func_rprop(device, dtype): error_inputs = get_error_inputs_for_all_optims(device, dtype) if _get_device_type(device) == "cpu": error_inputs += [ ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(lr=1e-2, etas=(1.0, 0.5)), desc="0 < eta1 < 1 < eta2", ), error_type=ValueError, error_regex="Invalid eta values: 1.0, 0.5", ), ] return error_inputs def optim_inputs_func_sgd(device, dtype=None): return [ OptimizerInput(params=None, kwargs={}, desc="default"), OptimizerInput(params=None, kwargs={"lr": 1e-2}, desc="non-default lr"), OptimizerInput( params=None, kwargs={"lr": torch.tensor(0.001)}, desc="tensor lr" ), OptimizerInput( params=None, kwargs={"weight_decay": 0.5}, desc="non-zero weight_decay" ), OptimizerInput(params=None, kwargs={"momentum": 0.9}, desc="momentum"), OptimizerInput( params=None, kwargs={"weight_decay": 0.1, "maximize": True}, desc="maximize", ), OptimizerInput( params=None, kwargs={"momentum": 0.9, "dampening": 0.5}, desc="dampening", ), OptimizerInput( params=None, kwargs={"momentum": 0.9, "weight_decay": 0.1}, desc="weight_decay w/ momentum", ), OptimizerInput( params=None, kwargs={"momentum": 0.9, "nesterov": True, "weight_decay": 0.1}, desc="nesterov", ), ] def optim_error_inputs_func_sgd(device, dtype): error_inputs = get_error_inputs_for_all_optims(device, dtype) if _get_device_type(device) == "cpu": error_inputs += [ ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(lr=1e-2, momentum=-0.5), desc="momentum should be between 0 and 1", ), error_type=ValueError, error_regex="Invalid momentum value: -0.5", ), ] return error_inputs def optim_inputs_func_sparseadam(device, dtype=None): return [ OptimizerInput(params=None, kwargs={}, desc="default"), OptimizerInput( params=None, kwargs={"lr": 0.01}, desc="non-default lr" ), # TODO: Move out to testing in param_group? OptimizerInput( params=None, kwargs={"lr": torch.tensor(0.001)}, desc="Tensor lr" ), OptimizerInput(params=None, kwargs={"maximize": True}, desc="maximize"), ] def optim_error_inputs_func_sparseadam(device, dtype): error_inputs = get_error_inputs_for_all_optims(device, dtype) if _get_device_type(device) == "cpu": error_inputs += [ ErrorOptimizerInput( OptimizerInput( params=None, kwargs=dict(lr=1e-2, betas=(1.0, 0.0)), desc="beta1 should be between 0 and 1", ), error_type=ValueError, error_regex="Invalid beta parameter at index 0: 1.0", ), ErrorOptimizerInput( OptimizerInput( params=[ torch.zeros( 3, layout=torch.sparse_coo, device=device, dtype=dtype ) ], kwargs={}, desc="dense params required", ), error_type=ValueError, error_regex="SparseAdam requires dense parameter tensors", ), ErrorOptimizerInput( OptimizerInput( params=[ { "params": [ torch.zeros( 3, layout=torch.sparse_coo, device=device, dtype=dtype, ) ] } ], kwargs={}, desc="dense params required in param_groups", ), error_type=ValueError, error_regex="SparseAdam requires dense parameter tensors", ), ErrorOptimizerInput( OptimizerInput( params=[torch.rand(2, 3, device=device, dtype=torch.complex64)], kwargs={}, desc="complex not supported", ), error_type=ValueError, error_regex="SparseAdam does not support complex parameters", ), ] return error_inputs def _get_device_type(device: Union[str, torch.device]) -> str: # Returns the device type as a string, e.g., "cpu" or "cuda" if isinstance(device, torch.device): device = str(device.type) assert isinstance(device, str) return device.split(":")[0] def _get_optim_inputs_including_global_cliquey_kwargs( device, dtype, optim_info, skip=() ) -> list[OptimizerInput]: """ Return a list of all configs for a given optimizer as a list of OptimizerInputs, including configs that have supported global cliquey kwargs (foreach, fused, differentiable) based on optim_info.supported_impls. The configs (optim_inputs) returned by optim_info.optim_inputs_func(...) intentionally do NOT include global cliquey kwargs to give flexibility to tests. For example, testing correctness between toggling foreach on and off is now trivial. That said, we sometimes want to test for all possible configs on an optimizer including all supported flags, so this helper returns all optim inputs. """ assert all( x in ["foreach", "fused", "differentiable"] for x in skip ), "skip must be a subset of ['foreach', 'fused', 'differentiable']" optim_inputs = optim_info.optim_inputs_func(device) supported_impls = tuple( x for x in optim_info.supported_impls if x not in skip and (_get_device_type(device) in optim_info.supports_fused_on or x != "fused") and ( _get_device_type(device) in _get_foreach_kernels_supported_devices() or x != "foreach" ) ) all_optim_inputs = [] for optim_input in optim_inputs: # Add the base config where all the flags are False base_kwargs = deepcopy(optim_input.kwargs) if len(supported_impls) != 0: for flag in supported_impls: base_kwargs[flag] = False all_optim_inputs.append( OptimizerInput(params=None, kwargs=base_kwargs, desc=optim_input.desc) ) else: all_optim_inputs.append(optim_input) # Add a config for when each of the global cliquey kwargs is True # Note that in [optimizer kwarg categories], these kwargs are mutually # exclusive, so we do not need to product them together. for flag in supported_impls: new_kwargs = deepcopy(base_kwargs) new_kwargs[flag] = True all_optim_inputs.append( OptimizerInput( params=None, kwargs=new_kwargs, desc=f"{optim_input.desc} & {flag}" ) ) return all_optim_inputs # Database of OptimizerInfo entries in alphabetical order. optim_db: list[OptimizerInfo] = [ OptimizerInfo( Adadelta, optim_inputs_func=optim_inputs_func_adadelta, optim_error_inputs_func=optim_error_inputs_func_adadelta, supported_impls=("foreach", "differentiable"), has_capturable_arg=True, skips=( DecorateInfo( skipIfTorchDynamo("See #116028"), "TestOptimRenewed", "test_set_default_dtype_works_with_foreach", ), DecorateInfo( skipIfTorchDynamo( "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" ), "TestOptimRenewed", "test_complex_2d", ), # Note on tolerances: # test_correctness_Adadelta_cuda_float32 # Mismatched elements: 10 / 100 (10.0%) # Greatest absolute difference: 4.838220775127411e-05 at index (7, 4) (up to 1e-05 allowed) # Greatest relative difference: 0.007270356640219688 at index (7, 2) (up to 1e-05 allowed) # This is due to floating point ordering error + usage of sqrt DecorateInfo( toleranceOverride( { torch.float32: tol( rtol=5.5e-4, atol=5e-5, ) } ), "CompiledOptimizerParityTests", "test_correctness", ), DecorateInfo( skipIfTorchDynamo( "This test uses mocks, which dynamo does not support" ), "TestOptimRenewed", "test_defaults_changed_to_foreach", ), ), ), OptimizerInfo( Adafactor, optim_inputs_func=optim_inputs_func_adafactor, optim_error_inputs_func=optim_error_inputs_func_adafactor, supported_impls=("foreach",), not_og_supported_flags=("foreach",), supports_complex=False, skips=( DecorateInfo( unittest.skip("See #133268 regarding dtype being None"), "CompiledOptimizerParityTests", "test_correctness", device_type="cuda", active_if=lambda kwargs: kwargs.get("use_closure", False), ), DecorateInfo( skipIfTorchDynamo("See #133268 regarding dtype being None"), "TestOptimRenewed", "test_can_load_older_state_dict", device_type="cuda", ), DecorateInfo( skipIfTorchDynamo("See #133268 regarding dtype being None"), "TestOptimRenewed", "test_deepcopy_copies_all_public_attrs", device_type="cuda", ), DecorateInfo( skipIfTorchDynamo("See #133268 regarding dtype being None"), "TestOptimRenewed", "test_foreach_large_tensor", ), DecorateInfo( skipIfTorchDynamo("See #133268 regarding dtype being None"), "TestOptimRenewed", "test_foreach_matches_forloop", ), DecorateInfo( skipIfTorchDynamo("See #133268 regarding dtype being None"), "TestOptimRenewed", "test_load_nontensor_step", device_type="cuda", ), DecorateInfo( skipIfTorchDynamo("See #133268 regarding dtype being None"), "TestOptimRenewed", "test_mixed_device_dtype", ), DecorateInfo( skipIfTorchDynamo("See #133268 regarding dtype being None"), "TestOptimRenewed", "test_param_groups_lr", device_type="cuda", ), DecorateInfo( skipIfTorchDynamo("See #133268 regarding dtype being None"), "TestOptimRenewed", "test_param_groups_weight_decay", device_type="cuda", ), DecorateInfo( skipIfTorchDynamo("See #133268 regarding dtype being None"), "TestOptimRenewed", "test_peak_memory_foreach", ), DecorateInfo( skipIfTorchDynamo("See #133268 regarding dtype being None"), "TestOptimRenewed", "test_save_load_equality_with_weights_only", device_type="cuda", ), DecorateInfo( skipIfTorchDynamo("See #116028 regarding copy not supported"), "TestOptimRenewed", "test_set_default_dtype_works_with_foreach", ), DecorateInfo( skipIfTorchDynamo("See #133268 regarding dtype being None"), "TestOptimRenewed", "test_state_dict_deterministic", device_type="cuda", ), DecorateInfo( skipIfTorchDynamo("See #133268 regarding dtype being None"), "TestOptimRenewed", "test_step_is_noop_for_zero_grads", device_type="cuda", ), DecorateInfo( unittest.skip("See #133268 regarding dtype being None"), "CompiledOptimizerParityTests", "test_correctness", device_type="xpu", ), DecorateInfo( skipIfTorchDynamo("See #133268 regarding dtype being None"), "TestOptimRenewed", "test_can_load_older_state_dict", device_type="xpu", ), DecorateInfo( skipIfTorchDynamo("See #133268 regarding dtype being None"), "TestOptimRenewed", "test_deepcopy_copies_all_public_attrs", device_type="xpu", ), DecorateInfo( skipIfTorchDynamo("See #133268 regarding dtype being None"), "TestOptimRenewed", "test_load_nontensor_step", device_type="xpu", ), DecorateInfo( skipIfTorchDynamo("See #133268 regarding dtype being None"), "TestOptimRenewed", "test_param_groups_lr", device_type="xpu", ), DecorateInfo( skipIfTorchDynamo("See #133268 regarding dtype being None"), "TestOptimRenewed", "test_param_groups_weight_decay", device_type="xpu", ), DecorateInfo( skipIfTorchDynamo("See #133268 regarding dtype being None"), "TestOptimRenewed", "test_save_load_equality_with_weights_only", device_type="xpu", ), DecorateInfo( skipIfTorchDynamo("See #133268 regarding dtype being None"), "TestOptimRenewed", "test_state_dict_deterministic", device_type="xpu", ), DecorateInfo( skipIfTorchDynamo("See #133268 regarding dtype being None"), "TestOptimRenewed", "test_step_is_noop_for_zero_grads", device_type="xpu", ), ), ), OptimizerInfo( Adagrad, optim_inputs_func=optim_inputs_func_adagrad, optim_error_inputs_func=optim_error_inputs_func_adagrad, supported_impls=("foreach", "differentiable", "fused"), not_og_supported_flags=( "foreach", "differentiable", "fused", "maximize", "capturable", ), supports_fused_on=("cpu",), supports_sparse=True, metadata_for_sparse=( {"lr": 0.1, "weight_decay": 0, "lr_decay": 0}, [ lambda opt: StepLR(opt, gamma=1 - 1e-5, step_size=500), lambda opt: ReduceLROnPlateau(opt, threshold=1e-4), ], ), decorators=( DecorateInfo( # Note on tolerances: # difference comes from the fact that the non fused kernel have # more dtype cast operations. We have another test test_fused_cpu_matches_cuda # to make sure there is no discrepancies between cuda fused kernel # and cpu fused kernel toleranceOverride( { torch.bfloat16: tol(atol=5e-3, rtol=5e-3), torch.float16: tol(atol=5e-3, rtol=5e-3), } ), "TestOptimRenewed", "test_fused_matches_forloop", ), ), skips=( DecorateInfo( skipIfMPS, # addcdiv doesn't work for non-contiguous, see #118115 "TestOptimRenewed", "test_forloop_goes_right_direction", active_if=lambda kwargs: not kwargs["contiguous"], device_type="mps", ), DecorateInfo( skipIfTorchDynamo("See #116028"), "TestOptimRenewed", "test_set_default_dtype_works_with_foreach", ), DecorateInfo( skipIfTorchDynamo( "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" ), "TestOptimRenewed", "test_complex_2d", ), DecorateInfo( skipIfTorchDynamo( "This test uses mocks, which dynamo does not support" ), "TestOptimRenewed", "test_defaults_changed_to_foreach", ), ), ), OptimizerInfo( Adam, optim_inputs_func=optim_inputs_func_adam, scheduler_inputs=( [lambda opt: ExponentialLR(opt, gamma=0.9)], [lambda opt: LinearLR(opt, start_factor=0.4, total_iters=4)], [ lambda opt: ConstantLR(opt, factor=0.4, total_iters=4), lambda opt: ExponentialLR(opt, gamma=0.9), ], [ lambda opt: ExponentialLR(opt, gamma=0.9), lambda opt: ReduceLROnPlateau(opt), ], [lambda opt: ConstantLR(opt, factor=0.4, total_iters=4)], [lambda opt: PolynomialLR(opt, power=0.9, total_iters=4)], [ lambda opt: StepLR(opt, gamma=0.9, step_size=10), lambda opt: ReduceLROnPlateau(opt), ], ), optim_error_inputs_func=optim_error_inputs_func_adam, supported_impls=("foreach", "differentiable", "fused"), has_capturable_arg=True, not_og_supported_flags=( "foreach", "differentiable", "fused", "maximize", "capturable", ), supports_fused_on=("cpu", "cuda", "mps"), decorators=( # Expected floating point error between fused and compiled forloop DecorateInfo( toleranceOverride({torch.float64: tol(atol=4.5e-7, rtol=2.2e-6)}), "TestOptimRenewed", "test_fused_matches_forloop", active_if=lambda kwargs: TEST_WITH_TORCHDYNAMO and kwargs["dtype"] == torch.float64, ), DecorateInfo( # Note on tolerances: # difference comes from the fact that the non fused kernel have # more dtype cast operations. We have another test test_fused_cpu_matches_cuda # to make sure there is no discrepancies between cuda fused kernel # and cpu fused kernel toleranceOverride( { torch.bfloat16: tol(atol=5e-3, rtol=5e-3), torch.float16: tol(atol=5e-3, rtol=5e-3), } ), "TestOptimRenewed", "test_fused_matches_forloop", ), DecorateInfo( # Note on tolerances: # Tracking through #127000 toleranceOverride( { torch.float32: tol(atol=3e-5, rtol=1.3e-06), } ), "TestCudaOptims", "test_grad_scaling_autocast_fused_optimizers", ), ), skips=( DecorateInfo( skipIfMPS, # addcdiv doesn't work for non-contiguous, see #118115 "TestOptimRenewed", "test_forloop_goes_right_direction", active_if=lambda kwargs: not kwargs["contiguous"], device_type="mps", ), DecorateInfo( skipIfTorchDynamo( "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028" ), "TestOptimRenewed", "test_set_default_dtype_works_with_foreach", ), DecorateInfo( skipIfTorchDynamo( "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" ), "TestOptimRenewed", "test_complex_2d", ), DecorateInfo( skipIfTorchDynamo( "This test uses mocks, which dynamo does not support" ), "TestOptimRenewed", "test_defaults_changed_to_foreach", ), ), ), OptimizerInfo( Adamax, optim_inputs_func=optim_inputs_func_adamax, optim_error_inputs_func=optim_error_inputs_func_adamax, supported_impls=("foreach", "differentiable"), has_capturable_arg=True, skips=( DecorateInfo( skipIfMPS, # addcdiv doesn't work for non-contiguous, see #118115 "TestOptimRenewed", "test_forloop_goes_right_direction", active_if=lambda kwargs: not kwargs["contiguous"], device_type="mps", ), DecorateInfo( skipIfTorchDynamo("See #116028"), "TestOptimRenewed", "test_set_default_dtype_works_with_foreach", ), DecorateInfo( skipIfTorchDynamo( "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" ), "TestOptimRenewed", "test_complex_2d", ), DecorateInfo( unittest.skip("Uses too much memory, even for H100, surprisingly."), "TestOptimRenewed", "test_foreach_large_tensor", ), DecorateInfo( skipIfTorchDynamo( "This test uses mocks, which dynamo does not support" ), "TestOptimRenewed", "test_defaults_changed_to_foreach", ), ), ), OptimizerInfo( AdamW, optim_inputs_func=optim_inputs_func_adamw, optim_error_inputs_func=optim_error_inputs_func_adamw, supported_impls=("foreach", "differentiable", "fused"), not_og_supported_flags=( "foreach", "differentiable", "fused", "maximize", "capturable", ), supports_fused_on=("cpu", "cuda", "mps"), has_capturable_arg=True, decorators=( # Expected error between compiled forloop and fused optimizers DecorateInfo( toleranceOverride({torch.float64: tol(atol=4.5e-7, rtol=2.2e-6)}), "TestOptimRenewed", "test_fused_matches_forloop", active_if=lambda kwargs: TEST_WITH_TORCHDYNAMO and kwargs["dtype"] == torch.float64, ), DecorateInfo( toleranceOverride( # Note on tolerances: # difference comes from the fact that the non fused kernel have # more dtype cast operations. We have another test test_fused_cpu_matches_cuda # to make sure there is no discrepancies between cuda fused kernel # and cpu fused kernel { torch.bfloat16: tol(atol=5e-3, rtol=5e-3), torch.float16: tol(atol=5e-3, rtol=5e-3), } ), "TestOptimRenewed", "test_fused_matches_forloop", ), # Note on tolerances: # Tracking through #127000 DecorateInfo( toleranceOverride( { torch.float32: tol( atol=3e-5, rtol=1.3e-06, ) } ), "TestCudaOptims", "test_grad_scaling_autocast_fused_optimizers", ), ), skips=( DecorateInfo( skipIfMPS, # addcdiv doesn't work for non-contiguous, see #118115 "TestOptimRenewed", "test_forloop_goes_right_direction", active_if=lambda kwargs: not kwargs["contiguous"], device_type="mps", ), DecorateInfo( skipIfTorchDynamo( "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028" ), "TestOptimRenewed", "test_set_default_dtype_works_with_foreach", ), DecorateInfo( skipIfTorchDynamo( "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" ), "TestOptimRenewed", "test_complex_2d", ), DecorateInfo( skipIfTorchDynamo( "This test uses mocks, which dynamo does not support" ), "TestOptimRenewed", "test_defaults_changed_to_foreach", ), ), ), OptimizerInfo( ASGD, optim_inputs_func=optim_inputs_func_asgd, optim_error_inputs_func=optim_error_inputs_func_asgd, supported_impls=("foreach", "differentiable"), has_capturable_arg=True, skips=( DecorateInfo( skipIfTorchDynamo( "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028" ), "TestOptimRenewed", "test_set_default_dtype_works_with_foreach", ), DecorateInfo( skipIfTorchDynamo( "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" ), "TestOptimRenewed", "test_complex_2d", ), DecorateInfo( toleranceOverride( { torch.float32: tol(atol=1.5e-5, rtol=1e-5), } ), "TestOptimRenewed", "test_step_is_noop_for_zero_grads", ), DecorateInfo( skipIfTorchDynamo( "This test uses mocks, which dynamo does not support" ), "TestOptimRenewed", "test_defaults_changed_to_foreach", ), DecorateInfo( unittest.skip( "ASGD internally changes the weights even with zero grad" ), "TestOptimRenewed", "test_step_is_noop_for_zero_grads", ), ), ), OptimizerInfo( LBFGS, optim_inputs_func=optim_inputs_func_lbfgs, optim_error_inputs_func=optim_error_inputs_func_lbfgs, supported_impls=(), step_requires_closure=True, supports_param_groups=False, supports_multiple_devices=False, skips=( # Fails on MacOS 13.2.1 in CI https://github.com/pytorch/pytorch/issues/117094 DecorateInfo( skipIfMPS, "TestOptimRenewed", "test_can_load_older_state_dict", device_type="mps", ), DecorateInfo( toleranceOverride( { torch.complex64: tol( rtol=4.5e-5, atol=5e-5, ) } ), "TestOptimRenewed", "test_complex_2d", ), DecorateInfo( unittest.skip("Does not support param groups"), "TestOptimRenewed", "test_param_groups_lr", ), DecorateInfo( unittest.skip("Does not support param groups"), "TestOptimRenewed", "test_param_groups_weight_decay", ), DecorateInfo( unittest.skip("LBFGS doesn't support multidevice"), "TestOptimRenewed", "test_forloop_goes_right_direction_multigpu", ), DecorateInfo( unittest.skip("Does not support param groups"), "TestOptimRenewed", "test_param_group_with_lrscheduler_goes_right_direction", ), # https://github.com/pytorch/pytorch/issues/131398 DecorateInfo( unittest.expectedFailure, "CompiledOptimizerParityTests", "test_correctness", active_if=lambda kwargs: sys.platform == "darwin" and kwargs["use_closure"], ), ), ), OptimizerInfo( NAdam, optim_inputs_func=optim_inputs_func_nadam, optim_error_inputs_func=optim_error_inputs_func_nadam, supported_impls=("foreach", "differentiable"), has_capturable_arg=True, skips=( DecorateInfo( skipIfMPS, # addcdiv doesn't work for non-contiguous, see #118115 "TestOptimRenewed", "test_forloop_goes_right_direction", active_if=lambda kwargs: not kwargs["contiguous"], device_type="mps", ), DecorateInfo( skipIfTorchDynamo( "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028" ), "TestOptimRenewed", "test_set_default_dtype_works_with_foreach", ), DecorateInfo( skipIfTorchDynamo( "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" ), "TestOptimRenewed", "test_complex_2d", ), DecorateInfo( skipIfTorchDynamo( "Errors, https://github.com/pytorch/pytorch/issues/117150" ), "TestOptimRenewed", "test_load_nontensor_step", ), DecorateInfo( skipIfTorchDynamo( "This test uses mocks, which dynamo does not support" ), "TestOptimRenewed", "test_defaults_changed_to_foreach", ), ), ), OptimizerInfo( RAdam, optim_inputs_func=optim_inputs_func_radam, optim_error_inputs_func=optim_error_inputs_func_radam, supported_impls=("foreach", "differentiable"), has_capturable_arg=True, skips=( DecorateInfo( skipIfTorchDynamo( "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028" ), "TestOptimRenewed", "test_set_default_dtype_works_with_foreach", ), DecorateInfo( skipIfTorchDynamo( "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" ), "TestOptimRenewed", "test_complex_2d", ), DecorateInfo( toleranceOverride( { # previously atol=1e-7, rtol=1e-7 torch.float64: tol(atol=1.5e-7, rtol=1.1e-7) } ), "TestOptimRenewed", "test_foreach_matches_forloop", ), DecorateInfo( skipIfTorchDynamo( "This test uses mocks, which dynamo does not support" ), "TestOptimRenewed", "test_defaults_changed_to_foreach", ), ), ), OptimizerInfo( RMSprop, optim_inputs_func=optim_inputs_func_rmsprop, optim_error_inputs_func=optim_error_inputs_func_rmsprop, supported_impls=("foreach", "differentiable"), has_capturable_arg=True, skips=( DecorateInfo( skipIfMPS, # addcdiv doesn't work for non-contiguous, see #118115 "TestOptimRenewed", "test_forloop_goes_right_direction", active_if=lambda kwargs: not kwargs["contiguous"], device_type="mps", ), DecorateInfo( skipIfTorchDynamo("See #116028"), "TestOptimRenewed", "test_set_default_dtype_works_with_foreach", ), DecorateInfo( skipIfTorchDynamo( "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" ), "TestOptimRenewed", "test_complex_2d", ), DecorateInfo( toleranceOverride( { # previously atol=5-05, rtol=0.001, https://github.com/pytorch/pytorch/issues/116202 torch.float32: tol(atol=5e-04, rtol=0.01), } ), "TestOptimRenewed", "test_mixed_device_dtype", active_if=TEST_WITH_TORCHDYNAMO, ), DecorateInfo( skipIfTorchDynamo( "This test uses mocks, which dynamo does not support" ), "TestOptimRenewed", "test_defaults_changed_to_foreach", ), ), ), OptimizerInfo( Rprop, optim_inputs_func=optim_inputs_func_rprop, optim_error_inputs_func=optim_error_inputs_func_rprop, supported_impls=("foreach", "differentiable"), has_capturable_arg=True, skips=( DecorateInfo( skipIfMPS, # Rprop doesn't update for non-contiguous, see #118117 "TestOptimRenewed", "test_forloop_goes_right_direction", active_if=lambda kwargs: not kwargs["contiguous"], device_type="mps", ), DecorateInfo( skipIfTorchDynamo("See #116028"), "TestOptimRenewed", "test_set_default_dtype_works_with_foreach", ), DecorateInfo( skipIfTorchDynamo( "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" ), "TestOptimRenewed", "test_complex_2d", ), DecorateInfo( skipIfTorchDynamo( "This test uses mocks, which dynamo does not support" ), "TestOptimRenewed", "test_defaults_changed_to_foreach", ), ), ), OptimizerInfo( SGD, optim_inputs_func=optim_inputs_func_sgd, scheduler_inputs=( [lambda opt: StepLR(opt, gamma=0.9, step_size=10)], [ lambda opt: LinearLR( opt, start_factor=0.4, end_factor=0.8, total_iters=4 ) ], [ lambda opt: StepLR(opt, gamma=0.9, step_size=10), lambda opt: LinearLR( opt, start_factor=0.4, end_factor=0.6, total_iters=4 ), ], [ lambda opt: StepLR(opt, gamma=0.99, step_size=10), lambda opt: ExponentialLR(opt, gamma=0.99), lambda opt: ReduceLROnPlateau(opt), ], [lambda opt: ConstantLR(opt, factor=0.4, total_iters=4)], [lambda opt: PolynomialLR(opt, power=0.9, total_iters=4)], [ lambda opt: StepLR(opt, gamma=0.9, step_size=10), lambda opt: ReduceLROnPlateau(opt), ], ), optim_error_inputs_func=optim_error_inputs_func_sgd, supported_impls=("foreach", "differentiable", "fused"), not_og_supported_flags=( "foreach", "differentiable", "fused", "maximize", "capturable", ), supports_sparse=True, metadata_for_sparse=( { "lr": 4.8e-3, "maximize": False, "momentum": 0, "nesterov": False, "weight_decay": 0, }, [lambda opt: StepLR(opt, gamma=0.99999, step_size=300)], ), supports_fused_on=( "cpu", "cuda", "mps", ), skips=( DecorateInfo( skipIfTorchDynamo( "Errors w/ Global state changed, see https://github.com/pytorch/pytorch/issues/116028" ), "TestOptimRenewed", "test_set_default_dtype_works_with_foreach", ), DecorateInfo( skipIfTorchDynamo( "Accessing grad.real errors, see https://github.com/pytorch/pytorch/issues/117184" ), "TestOptimRenewed", "test_complex_2d", ), DecorateInfo( toleranceOverride( { # previously atol=5-05, rtol=0.001, https://github.com/pytorch/pytorch/issues/116202 torch.float32: tol(atol=5e-04, rtol=0.007), } ), "TestOptimRenewed", "test_mixed_device_dtype", active_if=TEST_WITH_TORCHDYNAMO, ), DecorateInfo( skipIfTorchDynamo( "This test uses mocks, which dynamo does not support" ), "TestOptimRenewed", "test_defaults_changed_to_foreach", ), ), ), OptimizerInfo( SparseAdam, optim_inputs_func=optim_inputs_func_sparseadam, optim_error_inputs_func=optim_error_inputs_func_sparseadam, supported_impls=(), only_supports_sparse_grads=True, metadata_for_sparse=({"lr": 4e-2}, []), supports_complex=False, # Missing complex support, see #118153 skips=( DecorateInfo( skipIfMPS, # SparseAdam does not support MPS "TestOptimRenewed", device_type="mps", ), DecorateInfo( skipIfXpu(msg="SparseAdam is not yet supported on the XPU stack"), ), DecorateInfo( skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), "TestOptimRenewed", "test_param_groups_lr", ), DecorateInfo( skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), "TestOptimRenewed", "test_tensor_lr", ), DecorateInfo( unittest.skip( "SparseAdam does not support dense gradients, see #116507" ), "TestOptimRenewed", "test_can_load_older_state_dict", ), DecorateInfo( skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), "TestOptimRenewed", "test_load_nontensor_step", ), DecorateInfo( skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), "TestOptimRenewed", "test_forloop_goes_right_direction", ), DecorateInfo( skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), "TestOptimRenewed", "test_forloop_goes_right_direction_multigpu", ), DecorateInfo( skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), "TestOptimRenewed", "test_param_group_with_lrscheduler_goes_right_direction", ), DecorateInfo( skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), "TestOptimRenewed", "test_state_dict_with_cuda_params", ), DecorateInfo( skipIfTorchDynamo("cannot call to_sparse on p.grad, see #117184"), "TestOptimRenewed", "test_deepcopy_copies_all_public_attrs", ), ), ), ] class TensorTracker: """ A utility to track tensor clones in a list, with the expectation of popping them later (in order) to make fair comparisons between two multi-step computation. The intended use case is usually when comparing two supposed equal computations, such as an optimizer step that each individually consists of multiple steps, where numerical deviation could multiply. The goal is to be able to compare and align numbers at every milestone so as to minimize numerical discrepancies, and so when the test fails, it is likely a real problem. """ def __init__(self, assert_eq_kwargs=None): if assert_eq_kwargs is None: assert_eq_kwargs = {} self.assert_eq_kwargs = assert_eq_kwargs self.tensors = [] def add(self, tensor): """ Add a detach().clone()'d version of the tensor """ self.tensors.append(tensor.detach().clone()) # pops from beginning, like a queue and not a stack! def pop_check_set(self, tensor_to_set, testcase): """ Pop the first element in the tensor tracker, assert equality between the popped tensor and the input tensor, and then set the input tensor to have the same values as the popped tensor (with copy_). """ testcase.assertGreater(len(self.tensors), 0, "no tensors to pop") ref = self.tensors.pop(0) testcase.assertTrue(isinstance(ref, Tensor), f"{type(ref)=}") testcase.assertEqual(tensor_to_set, ref, **self.assert_eq_kwargs) with torch.no_grad(): tensor_to_set.copy_(ref) def all_popped(self): return len(self.tensors) == 0 ```
================================================================================================================================= SOURCE CODE FILE: common_pruning.py LINES: 1 SIZE: 13.71 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\common_pruning.py ENCODING: utf-8 ```py # Owner(s): ["module: unknown"] from typing import Any from torch.ao.pruning import BaseSparsifier import torch import torch.nn.functional as F from torch import nn class ImplementedSparsifier(BaseSparsifier): def __init__(self, **kwargs: dict[str, Any]) -> None: super().__init__(defaults=kwargs) def update_mask(self, module: nn.Module, tensor_name: str, **kwargs: dict[str, Any]) -> None: module.parametrizations.weight[0].mask[0] = 0 # type: ignore[index, union-attr] linear_state = self.state['linear1.weight'] linear_state['step_count'] = linear_state.get('step_count', 0) + 1 class MockSparseLinear(nn.Linear): """ This class is a MockSparseLinear class to check convert functionality. It is the same as a normal Linear layer, except with a different type, as well as an additional from_dense method. """ @classmethod def from_dense(cls, mod: nn.Linear) -> 'MockSparseLinear': """ """ linear = cls(mod.in_features, mod.out_features) return linear def rows_are_subset(subset_tensor: torch.Tensor, superset_tensor: torch.Tensor) -> bool: """ Checks to see if all rows in subset tensor are present in the superset tensor """ i = 0 for row in subset_tensor: while i < len(superset_tensor): if not torch.equal(row, superset_tensor[i]): i += 1 else: break else: return False return True class SimpleLinear(nn.Module): r"""Model with only Linear layers without biases, some wrapped in a Sequential, some following the Sequential. Used to test basic pruned Linear-Linear fusion.""" def __init__(self) -> None: super().__init__() self.seq = nn.Sequential( nn.Linear(7, 5, bias=False), nn.Linear(5, 6, bias=False), nn.Linear(6, 4, bias=False), ) self.linear1 = nn.Linear(4, 4, bias=False) self.linear2 = nn.Linear(4, 10, bias=False) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.seq(x) x = self.linear1(x) x = self.linear2(x) return x class LinearBias(nn.Module): r"""Model with only Linear layers, alternating layers with biases, wrapped in a Sequential. Used to test pruned Linear-Bias-Linear fusion.""" def __init__(self) -> None: super().__init__() self.seq = nn.Sequential( nn.Linear(7, 5, bias=True), nn.Linear(5, 6, bias=False), nn.Linear(6, 3, bias=True), nn.Linear(3, 3, bias=True), nn.Linear(3, 10, bias=False), ) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.seq(x) return x class LinearActivation(nn.Module): r"""Model with only Linear layers, some with bias, some in a Sequential and some following. Activation functions modules in between each Linear in the Sequential, and each outside layer. Used to test pruned Linear(Bias)-Activation-Linear fusion.""" def __init__(self) -> None: super().__init__() self.seq = nn.Sequential( nn.Linear(7, 5, bias=True), nn.ReLU(), nn.Linear(5, 6, bias=False), nn.Tanh(), nn.Linear(6, 4, bias=True), ) self.linear1 = nn.Linear(4, 3, bias=True) self.act1 = nn.ReLU() self.linear2 = nn.Linear(3, 10, bias=False) self.act2 = nn.Tanh() def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.seq(x) x = self.linear1(x) x = self.act1(x) x = self.linear2(x) x = self.act2(x) return x class LinearActivationFunctional(nn.Module): r"""Model with only Linear layers, some with bias, some in a Sequential and some following. Activation functions modules in between each Linear in the Sequential, and functional activationals are called in between each outside layer. Used to test pruned Linear(Bias)-Activation-Linear fusion.""" def __init__(self) -> None: super().__init__() self.seq = nn.Sequential( nn.Linear(7, 5, bias=True), nn.ReLU(), nn.Linear(5, 6, bias=False), nn.ReLU(), nn.Linear(6, 4, bias=True), ) self.linear1 = nn.Linear(4, 3, bias=True) self.linear2 = nn.Linear(3, 8, bias=False) self.linear3 = nn.Linear(8, 10, bias=False) self.act1 = nn.ReLU() def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.seq(x) x = self.linear1(x) x = F.relu(x) x = self.linear2(x) x = F.relu(x) x = self.linear3(x) x = F.relu(x) return x class SimpleConv2d(nn.Module): r"""Model with only Conv2d layers, all without bias, some in a Sequential and some following. Used to test pruned Conv2d-Conv2d fusion.""" def __init__(self) -> None: super().__init__() self.seq = nn.Sequential( nn.Conv2d(1, 32, 3, 1, bias=False), nn.Conv2d(32, 64, 3, 1, bias=False), ) self.conv2d1 = nn.Conv2d(64, 48, 3, 1, bias=False) self.conv2d2 = nn.Conv2d(48, 52, 3, 1, bias=False) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.seq(x) x = self.conv2d1(x) x = self.conv2d2(x) return x class Conv2dBias(nn.Module): r"""Model with only Conv2d layers, some with bias, some in a Sequential and some outside. Used to test pruned Conv2d-Bias-Conv2d fusion.""" def __init__(self) -> None: super().__init__() self.seq = nn.Sequential( nn.Conv2d(1, 32, 3, 1, bias=True), nn.Conv2d(32, 32, 3, 1, bias=True), nn.Conv2d(32, 64, 3, 1, bias=False), ) self.conv2d1 = nn.Conv2d(64, 48, 3, 1, bias=True) self.conv2d2 = nn.Conv2d(48, 52, 3, 1, bias=False) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.seq(x) x = self.conv2d1(x) x = self.conv2d2(x) return x class Conv2dActivation(nn.Module): r"""Model with only Conv2d layers, some with bias, some in a Sequential and some following. Activation function modules in between each Sequential layer, functional activations called in-between each outside layer. Used to test pruned Conv2d-Bias-Activation-Conv2d fusion.""" def __init__(self) -> None: super().__init__() self.seq = nn.Sequential( nn.Conv2d(1, 32, 3, 1, bias=True), nn.ReLU(), nn.Conv2d(32, 64, 3, 1, bias=True), nn.Tanh(), nn.Conv2d(64, 64, 3, 1, bias=False), nn.ReLU(), ) self.conv2d1 = nn.Conv2d(64, 48, 3, 1, bias=False) self.conv2d2 = nn.Conv2d(48, 52, 3, 1, bias=True) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.seq(x) x = self.conv2d1(x) x = F.relu(x) x = self.conv2d2(x) x = F.hardtanh(x) return x class Conv2dPadBias(nn.Module): r"""Model with only Conv2d layers, all with bias and some with padding > 0, some in a Sequential and some following. Activation function modules in between each layer. Used to test that bias is propagated correctly in the special case of pruned Conv2d-Bias-(Activation)Conv2d fusion, when the second Conv2d layer has padding > 0.""" def __init__(self) -> None: super().__init__() self.seq = nn.Sequential( nn.Conv2d(1, 32, 3, 1, padding=1, bias=True), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, bias=False), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, padding=1, bias=True), nn.ReLU(), nn.Conv2d(32, 32, 3, 1, padding=1, bias=True), nn.ReLU(), nn.Conv2d(32, 64, 3, 1, bias=True), nn.Tanh(), ) self.conv2d1 = nn.Conv2d(64, 48, 3, 1, padding=1, bias=True) self.act1 = nn.ReLU() self.conv2d2 = nn.Conv2d(48, 52, 3, 1, padding=1, bias=True) self.act2 = nn.Tanh() def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.seq(x) x = self.conv2d1(x) x = self.act1(x) x = self.conv2d2(x) x = self.act2(x) return x class Conv2dPool(nn.Module): r"""Model with only Conv2d layers, all with bias, some in a Sequential and some following. Activation function modules in between each layer, Pool2d modules in between each layer. Used to test pruned Conv2d-Pool2d-Conv2d fusion.""" def __init__(self) -> None: super().__init__() self.seq = nn.Sequential( nn.Conv2d(1, 32, kernel_size=3, padding=1, bias=True), nn.MaxPool2d(kernel_size=2, stride=2, padding=1), nn.ReLU(), nn.Conv2d(32, 64, kernel_size=3, padding=1, bias=True), nn.Tanh(), nn.AvgPool2d(kernel_size=2, stride=2, padding=1), ) self.conv2d1 = nn.Conv2d(64, 48, kernel_size=3, padding=1, bias=True) self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, padding=1) self.af1 = nn.ReLU() self.conv2d2 = nn.Conv2d(48, 52, kernel_size=3, padding=1, bias=True) self.conv2d3 = nn.Conv2d(52, 52, kernel_size=3, padding=1, bias=True) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.seq(x) x = self.conv2d1(x) x = self.maxpool(x) x = self.af1(x) x = self.conv2d2(x) x = F.avg_pool2d(x, kernel_size=2, stride=2, padding=1) x = F.relu(x) x = self.conv2d3(x) return x class Conv2dPoolFlattenFunctional(nn.Module): r"""Model with Conv2d layers, all with bias, some in a Sequential and some following, and then a Pool2d and a functional Flatten followed by a Linear layer. Activation functions and Pool2ds in between each layer also. Used to test pruned Conv2d-Pool2d-Flatten-Linear fusion.""" def __init__(self) -> None: super().__init__() self.seq = nn.Sequential( nn.Conv2d(1, 3, kernel_size=3, padding=1, bias=True), nn.MaxPool2d(kernel_size=2, stride=2, padding=1), nn.ReLU(), nn.Conv2d(3, 5, kernel_size=3, padding=1, bias=True), nn.Tanh(), nn.AvgPool2d(kernel_size=2, stride=2, padding=1), ) self.conv2d1 = nn.Conv2d(5, 7, kernel_size=3, padding=1, bias=True) self.af1 = nn.ReLU() self.conv2d2 = nn.Conv2d(7, 11, kernel_size=3, padding=1, bias=True) self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(11, 13, bias=True) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.seq(x) x = self.conv2d1(x) x = F.max_pool2d(x, kernel_size=2, stride=2, padding=1) x = self.af1(x) x = self.conv2d2(x) x = self.avg_pool(x) x = torch.flatten(x, 1) # test functional flatten x = self.fc(x) return x class Conv2dPoolFlatten(nn.Module): r"""Model with Conv2d layers, all with bias, some in a Sequential and some following, and then a Pool2d and a Flatten module followed by a Linear layer. Activation functions and Pool2ds in between each layer also. Used to test pruned Conv2d-Pool2d-Flatten-Linear fusion.""" def __init__(self) -> None: super().__init__() self.seq = nn.Sequential( nn.Conv2d(1, 3, kernel_size=3, padding=1, bias=True), nn.MaxPool2d(kernel_size=2, stride=2, padding=1), nn.ReLU(), nn.Conv2d(3, 5, kernel_size=3, padding=1, bias=True), nn.Tanh(), nn.AvgPool2d(kernel_size=2, stride=2, padding=1), ) self.conv2d1 = nn.Conv2d(5, 7, kernel_size=3, padding=1, bias=True) self.af1 = nn.ReLU() self.conv2d2 = nn.Conv2d(7, 11, kernel_size=3, padding=1, bias=True) self.avg_pool = nn.AdaptiveAvgPool2d((2, 2)) self.flatten = nn.Flatten() self.fc = nn.Linear(44, 13, bias=True) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.seq(x) x = self.conv2d1(x) x = F.max_pool2d(x, kernel_size=2, stride=2, padding=1) x = self.af1(x) x = self.conv2d2(x) x = self.avg_pool(x) x = self.flatten(x) x = self.fc(x) return x class LSTMLinearModel(nn.Module): """Container module with an encoder, a recurrent module, and a linear.""" def __init__( self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int ) -> None: super().__init__() self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers) self.linear = nn.Linear(hidden_dim, output_dim) def forward(self, input: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: output, _hidden = self.lstm(input) decoded = self.linear(output) return decoded, output class LSTMLayerNormLinearModel(nn.Module): """Container module with an LSTM, a LayerNorm, and a linear.""" def __init__( self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int ) -> None: super().__init__() self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers) self.norm = nn.LayerNorm(hidden_dim) self.linear = nn.Linear(hidden_dim, output_dim) def forward(self, x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: x, state = self.lstm(x) x = self.norm(x) x = self.linear(x) return x, state ```
====================================================================================================================================== SOURCE CODE FILE: common_quantization.py LINES: 6 SIZE: 113.78 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\common_quantization.py ENCODING: utf-8 ```py # mypy: ignore-errors r"""Importing this file includes common utility methods and base clases for checking quantization api and properties of resulting modules. """ from functorch.experimental import control_flow import torch import torch.nn as nn import torch.nn.functional as F import torch.ao.nn.intrinsic.quantized.dynamic as nniqd import torch.ao.nn.quantized as nnq import torch.ao.nn.quantized.dynamic as nnqd from torch.ao.nn.intrinsic import _FusedModule import torch.distributed as dist from torch.testing._internal.common_utils import TestCase, TEST_WITH_ROCM from torch.export import export_for_training from torch.ao.quantization import ( QuantType, default_dynamic_qat_qconfig, default_embedding_qat_qconfig, default_symmetric_qnnpack_qat_qconfig, ) from torch.ao.quantization.quantize_pt2e import ( _convert_to_reference_decomposed_fx, convert_pt2e, prepare_pt2e, prepare_qat_pt2e, ) from torch.ao.quantization.backend_config import ( get_executorch_backend_config, ) from torch.ao.quantization.quantizer.xnnpack_quantizer import ( XNNPACKQuantizer, get_symmetric_quantization_config, ) from torch.ao.quantization import QuantWrapper, QuantStub, DeQuantStub, \ default_qconfig, default_dynamic_qconfig, default_per_channel_qconfig, QConfig, default_observer, default_weight_observer, \ propagate_qconfig_, convert, get_default_qconfig, quantize_dynamic_jit, quantize_jit, float_qparams_weight_only_qconfig, \ get_default_qat_qconfig, PerChannelMinMaxObserver, default_dynamic_quant_observer, quantize, \ QConfigMapping, get_default_qconfig_mapping, get_default_qat_qconfig_mapping from torch.ao.quantization.quantization_mappings import ( get_default_dynamic_quant_module_mappings, get_default_qconfig_propagation_list, get_default_qat_module_mappings, ) from torch.testing._internal.common_quantized import ( override_quantized_engine, ) from torch.jit.mobile import _load_for_lite_interpreter try: # graph mode quantization based on fx from torch.ao.quantization.quantize_fx import ( prepare_fx, prepare_qat_fx, convert_fx, convert_to_reference_fx, ) from torch.ao.ns.fx.ns_types import NSSingleResultValuesType, NSSubgraph from torch.fx.graph import Node from torch.fx import GraphModule HAS_FX = True except ImportError: HAS_FX = False import copy import io import functools import os import unittest import numpy as np from torch.testing import FileCheck from typing import Callable, Any, Union, Optional import torch._dynamo as torchdynamo import torch.ao.quantization.quantizer.x86_inductor_quantizer as xiq import torch.ao.quantization.quantizer.xpu_inductor_quantizer as xpuiq from torch.ao.quantization.quantizer.x86_inductor_quantizer import X86InductorQuantizer from torch.ao.quantization.quantizer.xpu_inductor_quantizer import XPUInductorQuantizer import contextlib class NodeSpec: ''' Used for checking GraphModule Node ''' def __init__(self, op, target): ''' op: call_function | call_module target: for call_function, target would be a function for call_module, target would be the type of PyTorch module ''' self.op = op self.target = target @classmethod def call_function(cls, target): return NodeSpec('call_function', target) @classmethod def call_method(cls, target): return NodeSpec('call_method', target) @classmethod def call_module(cls, target): return NodeSpec('call_module', target) def __hash__(self): return hash((self.op, self.target)) def __eq__(self, other): if not isinstance(other, NodeSpec): return NotImplemented return self.op == other.op and self.target == other.target def __repr__(self): return repr(self.op) + " " + repr(self.target) def get_supported_device_types(): return ['cpu', 'cuda'] if torch.cuda.is_available() and not TEST_WITH_ROCM else ['cpu'] def test_only_eval_fn(model, calib_data): r""" Default evaluation function takes a torch.utils.data.Dataset or a list of input Tensors and run the model on the dataset """ for inp in calib_data: model(*inp) _default_loss_fn = torch.nn.CrossEntropyLoss() def test_only_train_fn(model, train_data, loss_fn=_default_loss_fn): r""" Default train function takes a torch.utils.data.Dataset and train the model on the dataset """ optimizer = torch.optim.Adam(model.parameters(), lr=0.001) train_loss, correct, total = 0, 0, 0 for _ in range(10): model.train() for data, target in train_data: optimizer.zero_grad() output = model(data) loss = loss_fn(output, target) loss.backward() optimizer.step() train_loss += loss.item() _, predicted = torch.max(output, 1) total += target.size(0) correct += (predicted == target).sum().item() return train_loss, correct, total class AverageMeter: """Computes and stores the average and current value""" def __init__(self, name, fmt=':f'): self.name = name self.fmt = fmt self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def __str__(self): fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' return fmtstr.format(**self.__dict__) def accuracy(output, target, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.no_grad(): maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res def train_one_epoch(model, criterion, optimizer, data_loader, device, ntrain_batches): model.train() for cnt, (image, target) in enumerate(data_loader, start=1): print('.', end='') image, target = image.to(device), target.to(device) output = model(image) loss = criterion(output, target) optimizer.zero_grad() loss.backward() optimizer.step() accuracy(output, target, topk=(1, 5)) if cnt >= ntrain_batches: return return def ddp_setup(rank, world_size): os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '12355' # initialize the process group dist.init_process_group("gloo", rank=rank, world_size=world_size) def ddp_cleanup(): dist.destroy_process_group() def run_ddp(rank, world_size, prepared): ddp_setup(rank, world_size) prepared.cuda() prepared = torch.nn.parallel.DistributedDataParallel(prepared, device_ids=[rank]) prepared.to(rank) model_with_ddp = prepared optimizer = torch.optim.SGD(model_with_ddp.parameters(), lr=0.0001) train_one_epoch(model_with_ddp, criterion, optimizer, dataset, rank, 1) # noqa: F821 ddp_cleanup() def convert_dynamic(module): convert(module, get_default_dynamic_quant_module_mappings(), inplace=True) def prepare_dynamic(model, qconfig_dict=None): propagate_qconfig_(model, qconfig_dict) def _make_conv_test_input( batch_size, in_channels_per_group, input_feature_map_size, out_channels_per_group, groups, kernel_size, X_scale, X_zero_point, W_scale, W_zero_point, use_bias, use_channelwise, ): in_channels = in_channels_per_group * groups out_channels = out_channels_per_group * groups (X_value_min, X_value_max) = (0, 4) X_init = torch.randint( X_value_min, X_value_max, (batch_size, in_channels,) + input_feature_map_size) X = X_scale * (X_init - X_zero_point).float() X_q = torch.quantize_per_tensor( X, scale=X_scale, zero_point=X_zero_point, dtype=torch.quint8) W_scale = W_scale * out_channels W_zero_point = W_zero_point * out_channels # Resize W_scale and W_zero_points arrays equal to out_channels W_scale = W_scale[:out_channels] W_zero_point = W_zero_point[:out_channels] # For testing, we use small values for weights and for activations so that # no overflow occurs in vpmaddubsw instruction. If the overflow occurs in # qconv implementation and if there is no overflow. # In reference we can't exactly match the results with reference. # Please see the comment in qconv implementation file # aten/src/ATen/native/quantized/cpu/qconv.cpp for more details. (W_value_min, W_value_max) = (-5, 5) # The operator expects them in the format # (out_channels, in_channels/groups,) + kernel_size W_init = torch.randint( W_value_min, W_value_max, (out_channels, in_channels_per_group,) + kernel_size) b_init = torch.randint(0, 10, (out_channels,)) if use_channelwise: W_shape = (-1, 1) + (1,) * len(kernel_size) W_scales_tensor = torch.tensor(W_scale, dtype=torch.float) W_zero_points_tensor = torch.tensor(W_zero_point, dtype=torch.float) W = W_scales_tensor.reshape(*W_shape) * ( W_init.float() - W_zero_points_tensor.reshape(*W_shape)).float() b = X_scale * W_scales_tensor * b_init.float() W_q = torch.quantize_per_channel( W, W_scales_tensor.double(), W_zero_points_tensor.long(), 0, dtype=torch.qint8) else: W = W_scale[0] * (W_init - W_zero_point[0]).float() b = X_scale * W_scale[0] * b_init.float() W_q = torch.quantize_per_tensor( W, scale=W_scale[0], zero_point=W_zero_point[0], dtype=torch.qint8) return (X, X_q, W, W_q, b if use_bias else None) def _make_conv_add_extra_input_tensor(scale, zero_point, sizes): (X_value_min, X_value_max) = (0, 4) X_init = torch.randint( X_value_min, X_value_max, sizes # Infer the size of tensor to do the add ) X = scale * (X_init - zero_point).float() X_q = torch.quantize_per_tensor( X, scale=scale, zero_point=zero_point, dtype=torch.quint8) return X, X_q def skipIfNoFBGEMM(fn): reason = 'Quantized operations require FBGEMM. FBGEMM is only optimized for CPUs with instruction set support AVX2 or newer.' if isinstance(fn, type): if 'fbgemm' not in torch.backends.quantized.supported_engines: fn.__unittest_skip__ = True fn.__unittest_skip_why__ = reason return fn @functools.wraps(fn) def wrapper(*args, **kwargs): if 'fbgemm' not in torch.backends.quantized.supported_engines: raise unittest.SkipTest(reason) else: fn(*args, **kwargs) return wrapper def skipIfNoQNNPACK(fn): reason = 'Quantized operations require QNNPACK.' if isinstance(fn, type): if 'qnnpack' not in torch.backends.quantized.supported_engines: fn.__unittest_skip__ = True fn.__unittest_skip_why__ = reason return fn @functools.wraps(fn) def wrapper(*args, **kwargs): if 'qnnpack' not in torch.backends.quantized.supported_engines: raise unittest.SkipTest(reason) else: fn(*args, **kwargs) return wrapper def withQNNPACKBackend(fn): # TODO(future PR): consider combining with skipIfNoQNNPACK, # will require testing of existing callsites reason = 'Quantized operations require QNNPACK.' if isinstance(fn, type): if 'qnnpack' not in torch.backends.quantized.supported_engines: fn.__unittest_skip__ = True fn.__unittest_skip_why__ = reason return fn @functools.wraps(fn) def wrapper(*args, **kwargs): if 'qnnpack' not in torch.backends.quantized.supported_engines: raise unittest.SkipTest(reason) with override_quantized_engine('qnnpack'): fn(*args, **kwargs) return wrapper def skipIfNoONEDNN(fn): reason = 'Quantized operations require ONEDNN.' if isinstance(fn, type): if 'onednn' not in torch.backends.quantized.supported_engines: fn.__unittest_skip__ = True fn.__unittest_skip_why__ = reason return fn @functools.wraps(fn) def wrapper(*args, **kwargs): if 'onednn' not in torch.backends.quantized.supported_engines: raise unittest.SkipTest(reason) else: fn(*args, **kwargs) return wrapper def skipIfNoONEDNNBF16(fn): reason = 'Quantized operations require BF16 support.' if isinstance(fn, type): if not torch.ops.mkldnn._is_mkldnn_bf16_supported(): fn.__unittest_skip__ = True fn.__unittest_skip_why__ = reason return fn @functools.wraps(fn) def wrapper(*args, **kwargs): if not torch.ops.mkldnn._is_mkldnn_bf16_supported(): raise unittest.SkipTest(reason) else: fn(*args, **kwargs) return wrapper def skipIfNoX86(fn): reason = 'Quantized operations require X86.' if isinstance(fn, type): if 'x86' not in torch.backends.quantized.supported_engines: fn.__unittest_skip__ = True fn.__unittest_skip_why__ = reason return fn @functools.wraps(fn) def wrapper(*args, **kwargs): if 'x86' not in torch.backends.quantized.supported_engines: raise unittest.SkipTest(reason) else: fn(*args, **kwargs) return wrapper def skipIfNoDynamoSupport(fn): reason = "dynamo doesn't support." if isinstance(fn, type): if not torchdynamo.is_dynamo_supported(): fn.__unittest_skip__ = True fn.__unittest_skip_why__ = reason return fn @functools.wraps(fn) def wrapper(*args, **kwargs): if not torchdynamo.is_dynamo_supported(): raise unittest.SkipTest(reason) else: fn(*args, **kwargs) return wrapper def skipIfNoInductorSupport(fn): reason = "inductor doesn't support." if isinstance(fn, type): if not torchdynamo.is_inductor_supported(): fn.__unittest_skip__ = True fn.__unittest_skip_why__ = reason return fn @functools.wraps(fn) def wrapper(*args, **kwargs): if not torchdynamo.is_inductor_supported(): raise unittest.SkipTest(reason) else: fn(*args, **kwargs) return wrapper try: import torchvision # noqa: F401 HAS_TORCHVISION = True except ImportError: HAS_TORCHVISION = False skip_if_no_torchvision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision") def get_script_module(model, tracing, data): return torch.jit.trace(model, data) if tracing else torch.jit.script(model) def lengths_to_offsets(t, offset_type=np.int64, use_begin_offset=True): """ Convert lengths to offsets for embedding_bag """ tt = np.zeros((t.shape[0] + 1,), dtype=offset_type) tt[1:] = t tt = torch.from_numpy(np.cumsum(tt, dtype=offset_type)) if use_begin_offset: return tt[:-1] return tt[1:] def _group_quantize_tensor(w, n_bit=4, q_group_size=16): assert w.dim() == 2 w = w.transpose(0, 1).contiguous() assert q_group_size > 1 assert w.shape[-1] % q_group_size == 0 to_quant = w.reshape(-1, q_group_size) assert torch.isnan(to_quant).sum() == 0 max_val = to_quant.amax(dim=1, keepdim=True) min_val = to_quant.amin(dim=1, keepdim=True) max_int = 2 ** n_bit - 1 min_int = 0 scales = (max_val - min_val).clamp(min=1e-6) / max_int assert torch.isnan(scales).sum() == 0 zeros = min_val + scales * (2 ** (n_bit - 1)) assert torch.isnan(zeros).sum() == 0 out = to_quant.sub(min_val).div(scales).round().clamp_(min_int, max_int) assert torch.isnan(out).sum() == 0 out = out.to(dtype=torch.int32).reshape(w.shape) if out.device != torch.device('cpu'): out = (out[::, ::2] << 4 | out[::, 1::2]).to(torch.uint8) # Scales and zeros for the same q-group should be contiguous, so we can # load as a 32-bit word scales = scales.view(w.shape[0], -1) zeros = zeros.view(w.shape[0], -1) scales_and_zeros = ( torch.cat( [ scales.reshape(scales.size(0), scales.size(1), 1), zeros.reshape(zeros.size(0), zeros.size(1), 1), ], 2, ).transpose(0, 1).contiguous() ) return out, scales_and_zeros def _group_quantize_tensor_symmetric( w, n_bit=4, groupsize=32 ): # W is of shape [K x N] # We transpose W as Quantization is applied on [N x K] w = w.transpose(0, 1).contiguous() assert w.dim() == 2 assert groupsize > 1 assert w.shape[-1] % groupsize == 0 # Calculate scale and zeros to_quant = w.reshape(-1, groupsize) max_val = to_quant.abs().amax(dim=1, keepdim=True) eps = torch.finfo(max_val.dtype).eps max_int = 2 ** (n_bit - 1) - 1 # For 4-bit, this is 7 scales = max_val.clamp(min=eps) / max_int zeros = torch.zeros_like(scales) # Quantize the weight scales = scales.to(torch.float32).reshape(w.shape[0], -1) zeros = zeros.to(torch.float32).reshape(w.shape[0], -1) scales = scales.reshape(-1, 1) zeros = zeros.reshape(-1, 1) max_int = 2**n_bit - 1 w_int8 = to_quant.div(scales).add(8.5).to(torch.int8).clamp(max=max_int) # We pack 2 signed int4 values in unsigned uint8 container. # This reduces the weight size by half and improves load perf out_uint8 = (w_int8[::, 1::2] << 4 | w_int8[::, ::2]).to(torch.uint8) scales_and_zeros = scales.squeeze().contiguous() return out_uint8, scales_and_zeros def _dynamically_quantize_per_channel(x, quant_min, quant_max, target_dtype): # source: https://github.com/pytorch-labs/gpt-fast/blob/main/quantize.py # default setup for affine quantization of activations x_dtype = x.dtype x = x.float() eps = torch.finfo(torch.float32).eps # get min and max min_val, max_val = torch.aminmax(x, dim=1) # calculate scales and zero_points based on min and max # reference: https://fburl.com/code/srbiybme min_val_neg = torch.min(min_val, torch.zeros_like(min_val)) max_val_pos = torch.max(max_val, torch.zeros_like(max_val)) device = min_val_neg.device # reference: https://fburl.com/code/4wll53rk max_val_pos = torch.max(-min_val_neg, max_val_pos) scales = max_val_pos / (float(quant_max - quant_min) / 2) # ensure scales is the same dtype as the original tensor scales = torch.clamp(scales, min=eps).to(x.dtype) zero_points = torch.zeros(min_val_neg.size(), dtype=torch.int64, device=device) # quantize based on qmin/qmax/scales/zp x_div = x / scales.unsqueeze(-1) x_round = torch.round(x_div) x_zp = x_round + zero_points.unsqueeze(-1) quant = torch.clamp(x_zp, quant_min, quant_max).to(target_dtype) return quant, scales.to(x_dtype), zero_points # QuantizationTestCase used as a base class for testing quantization on modules class QuantizationTestCase(TestCase): def setUp(self): super().setUp() self.calib_data = [[torch.rand(2, 5, dtype=torch.float)] for _ in range(2)] self.train_data = [[torch.rand(2, 5, dtype=torch.float), torch.randint(0, 1, (2,), dtype=torch.long)] for _ in range(2)] self.img_data_1d = [[torch.rand(2, 3, 10, dtype=torch.float)] for _ in range(2)] self.img_data_2d = [[torch.rand(1, 3, 10, 10, dtype=torch.float)] for _ in range(2)] self.img_data_3d = [[torch.rand(1, 3, 5, 5, 5, dtype=torch.float)] for _ in range(2)] self.img_data_1d_train = [[torch.rand(2, 3, 10, dtype=torch.float), torch.randint(0, 1, (1,), dtype=torch.long)] for _ in range(2)] self.img_data_2d_train = [[torch.rand(1, 3, 10, 10, dtype=torch.float), torch.randint(0, 1, (1,), dtype=torch.long)] for _ in range(2)] self.img_data_3d_train = [[torch.rand(1, 3, 5, 5, 5, dtype=torch.float), torch.randint(0, 1, (1,), dtype=torch.long)] for _ in range(2)] self.img_data_dict = {1 : self.img_data_1d, 2 : self.img_data_2d, 3 : self.img_data_3d} # Quant types that produce statically quantized ops self.static_quant_types = [QuantType.STATIC, QuantType.QAT] # All quant types for (fx based) graph mode quantization self.all_quant_types = [QuantType.DYNAMIC, QuantType.STATIC, QuantType.QAT] def checkNoPrepModules(self, module): r"""Checks the module does not contain child modules for quantization preparation, e.g. quant, dequant and observer """ self.assertFalse(hasattr(module, 'quant')) self.assertFalse(hasattr(module, 'dequant')) def checkNoQconfig(self, module): r"""Checks the module does not contain qconfig """ self.assertFalse(hasattr(module, 'qconfig')) for child in module.children(): self.checkNoQconfig(child) def checkHasPrepModules(self, module): r"""Checks the module contains child modules for quantization preparation, e.g. quant, dequant and observer """ self.assertTrue(hasattr(module, 'module')) self.assertTrue(hasattr(module, 'quant')) self.assertTrue(hasattr(module, 'dequant')) def checkObservers(self, module, propagate_qconfig_list=None, prepare_custom_config_dict=None): r"""Checks the module or module's leaf descendants have observers in preparation for quantization """ if propagate_qconfig_list is None: propagate_qconfig_list = get_default_qconfig_propagation_list() if prepare_custom_config_dict is None: prepare_custom_config_dict = {} float_to_observed_module_class_mapping = prepare_custom_config_dict.get("float_to_observed_custom_module_class", {}) # check if a module is a leaf module, ignoring activation_post_process attribute def is_leaf_module(module): submodule_name_count = 0 for name, _ in module.named_children(): if name != 'activation_post_process': submodule_name_count += 1 return submodule_name_count == 0 if hasattr(module, 'qconfig') and module.qconfig is not None and \ ((is_leaf_module(module) and not isinstance(module, torch.nn.Sequential) and type(module) in propagate_qconfig_list) or type(module) in float_to_observed_module_class_mapping.keys()) and \ not isinstance(module, torch.ao.quantization.DeQuantStub): self.assertTrue(hasattr(module, 'activation_post_process'), 'module: ' + str(type(module)) + ' do not have observer') # we don't need to check observers for child modules of the # qat modules if type(module) not in get_default_qat_module_mappings().values() and \ type(module) not in float_to_observed_module_class_mapping.values() and \ not isinstance(module, _FusedModule): for child in module.children(): if type(child) in [nn.Dropout]: continue self.checkObservers(child, propagate_qconfig_list, prepare_custom_config_dict) def checkQuantDequant(self, mod): r"""Checks that mod has nn.Quantize and nn.DeQuantize submodules inserted """ self.assertEqual(type(mod.quant), nnq.Quantize) self.assertEqual(type(mod.dequant), nnq.DeQuantize) def checkWrappedQuantizedLinear(self, mod): r"""Checks that mod has been swapped for an nnq.Linear module, the bias is qint32, and that the module has Quantize and DeQuantize submodules """ self.assertEqual(type(mod.module), nnq.Linear) self.checkQuantDequant(mod) def checkQuantizedLinear(self, mod): self.assertEqual(type(mod), nnq.Linear) def checkDynamicQuantizedLinear(self, mod, dtype): r"""Checks that mod has been swapped for an nnqd.Linear module, the bias is float. """ self.assertEqual(type(mod), nnqd.Linear) self.assertEqual(mod._packed_params.dtype, dtype) def checkDynamicQuantizedLinearRelu(self, mod, dtype): r"""Checks that mod has been swapped for an nnqd.Linear module, the bias is float. """ self.assertEqual(type(mod), nniqd.LinearReLU) self.assertEqual(mod._packed_params.dtype, dtype) def check_eager_serialization(self, ref_model, loaded_model, x): # Check state dict serialization and torch.save APIs model_dict = ref_model.state_dict() b = io.BytesIO() torch.save(model_dict, b) b.seek(0) # weights_only=False as we sometimes get a ScriptObect here (weird) loaded_dict = torch.load(b, weights_only=False) loaded_model.load_state_dict(loaded_dict) ref_out = ref_model(*x) load_out = loaded_model(*x) def check_outputs(ref_out, load_out): self.assertEqual(ref_out[0], load_out[0]) if isinstance(ref_out[1], tuple): self.assertEqual(ref_out[1][0], load_out[1][0]) self.assertEqual(ref_out[1][1], load_out[1][1]) else: self.assertEqual(ref_out[1], load_out[1]) check_outputs(ref_out, load_out) b = io.BytesIO() torch.save(ref_model, b) b.seek(0) # weights_only=False as this is legacy code that saves the model loaded = torch.load(b, weights_only=False) load_out = loaded(*x) check_outputs(ref_out, load_out) def check_weight_bias_api(self, ref_model, weight_keys, bias_keys): weight = ref_model.get_weight() bias = ref_model.get_bias() self.assertEqual(weight_keys ^ weight.keys(), set()) self.assertEqual(bias_keys ^ bias.keys(), set()) def checkDynamicQuantizedLSTM(self, mod, reference_module_type, dtype): r"""Checks that mod has been swapped for an nnqd.LSTM type module, the bias is float. """ wt_dtype_map = {torch.qint8: 'quantized_dynamic', torch.float16: 'quantized_fp16'} self.assertEqual(type(mod), reference_module_type) for packed_params in mod._all_weight_values: self.assertEqual(packed_params.param.__getstate__()[0][0], wt_dtype_map[dtype]) def checkLinear(self, mod): self.assertEqual(type(mod), torch.nn.Linear) def checkDynamicQuantizedModule(self, mod, reference_module_type, dtype): r"""Checks that mod has been swapped for an nnqd.Linear module, the bias is float. """ wt_dtype_map = {torch.qint8: 'quantized_dynamic', torch.float16: 'quantized_fp16'} self.assertEqual(type(mod), reference_module_type) if hasattr(mod, '_all_weight_values'): for packed_params in mod._all_weight_values: self.assertEqual(packed_params.param.__getstate__()[0][0], wt_dtype_map[dtype]) def checkScriptable(self, orig_mod, calib_data, check_save_load=False): scripted = torch.jit.script(orig_mod) self._checkScriptable(orig_mod, scripted, calib_data, check_save_load) # Use first calib_data entry as trace input traced = torch.jit.trace(orig_mod, calib_data[0]) self._checkScriptable(orig_mod, traced, calib_data, check_save_load) # Call this twice: once for a scripted module and once for a traced module def _checkScriptable(self, orig_mod, script_mod, calib_data, check_save_load): self._checkModuleCorrectnessAgainstOrig(orig_mod, script_mod, calib_data) # Test save/load buffer = io.BytesIO() torch.jit.save(script_mod, buffer) buffer.seek(0) loaded_mod = torch.jit.load(buffer) # Pending __get_state_ and __set_state__ support # See tracking task https://github.com/pytorch/pytorch/issues/23984 if check_save_load: self._checkModuleCorrectnessAgainstOrig(orig_mod, loaded_mod, calib_data) def _checkModuleCorrectnessAgainstOrig(self, orig_mod, test_mod, calib_data): for inp in calib_data: ref_output = orig_mod(*inp) scripted_output = test_mod(*inp) self.assertEqual(scripted_output, ref_output) def checkGraphModeOp(self, module, inputs, quantized_op, tracing=False, debug=False, check=True, eval_mode=True, dynamic=False, qconfig=None): if debug: print('Testing:', str(module)) qconfig_dict = {'': get_default_qconfig(torch.backends.quantized.engine)} if eval_mode: module = module.eval() if dynamic: qconfig_dict = {'': default_dynamic_qconfig if qconfig is None else qconfig} model = get_script_module(module, tracing, inputs[0]).eval() if debug: print('input graph:', model.graph) models = {} outputs = {} for debug in [True, False]: if dynamic: models[debug] = quantize_dynamic_jit(model, qconfig_dict, debug=debug) # make sure it runs outputs[debug] = models[debug](inputs) else: # module under test can contain in-place ops, and we depend on # input data staying constant for comparisons inputs_copy = copy.deepcopy(inputs) models[debug] = quantize_jit( model, qconfig_dict, test_only_eval_fn, [inputs_copy], inplace=False, debug=debug) # make sure it runs outputs[debug] = models[debug](*inputs[0]) if debug: print('debug graph:', models[True].graph) print('non debug graph:', models[False].graph) if check: # debug and non-debug option should have the same numerics self.assertEqual(outputs[True], outputs[False]) # non debug graph should produce quantized op FileCheck().check(quantized_op) \ .run(models[False].graph) return models[False] def checkGraphModuleNodes( self, graph_module, expected_node=None, expected_node_occurrence=None, expected_node_list=None): """ Check if GraphModule contains the target node Args: graph_module: the GraphModule instance we want to check expected_node, expected_node_occurrence, expected_node_list: see docs for checkGraphModeFxOp """ nodes_in_graph = {} node_list = [] modules = dict(graph_module.named_modules(remove_duplicate=False)) for node in graph_module.graph.nodes: n = None if node.op == 'call_function' or node.op == 'call_method': n = NodeSpec(node.op, node.target) elif node.op == 'call_module': n = NodeSpec(node.op, type(modules[node.target])) if n is not None: node_list.append(n) if n in nodes_in_graph: nodes_in_graph[n] += 1 else: nodes_in_graph[n] = 1 if expected_node is not None: self.assertTrue(expected_node in nodes_in_graph, 'node:' + str(expected_node) + ' not found in the graph module') if expected_node_occurrence is not None: for expected_node, occurrence in expected_node_occurrence.items(): if occurrence != 0: self.assertTrue( expected_node in nodes_in_graph, 'Check failed for node:' + str(expected_node) + ' not found') self.assertTrue( nodes_in_graph[expected_node] == occurrence, 'Check failed for node:' + str(expected_node) + ' Expected occurrence:' + str(occurrence) + ' Found occurrence:' + str(nodes_in_graph[expected_node])) else: self.assertTrue( expected_node not in nodes_in_graph, 'Check failed for node:' + str(expected_node) + ' expected no occurrence but found') if expected_node_list is not None: cur_index = 0 for n in node_list: if cur_index == len(expected_node_list): return if n == expected_node_list[cur_index]: cur_index += 1 self.assertTrue( cur_index == len(expected_node_list), "Check failed for graph:" + self.printGraphModule(graph_module, print_str=False) + "Expected ordered list:" + str(expected_node_list)) def printGraphModule(self, graph_module, print_str=True): modules = dict(graph_module.named_modules(remove_duplicate=False)) node_infos = [] for n in graph_module.graph.nodes: node_info = ' '.join(map(repr, [n.op, n.name, n.target, n.args, n.kwargs])) if n.op == 'call_module': node_info += ' module type: ' + repr(type(modules[n.target])) node_infos.append(node_info) str_to_print = '\n'.join(node_infos) if print_str: print(str_to_print) return str_to_print if HAS_FX: def assert_types_for_matched_subgraph_pairs( self, matched_subgraph_pairs: dict[str, tuple[NSSubgraph, NSSubgraph]], expected_types: dict[str, tuple[tuple[Callable, Callable], tuple[Callable, Callable]]], gm_a: GraphModule, gm_b: GraphModule, ) -> None: """ Verifies that the types specified in expected_types match the underlying objects pointed to by the nodes in matched_subgraph_pairs. An example successful test case: matched_subgraph_pairs = {'x0': (graph_a_conv_0_node, graph_b_conv_0_node)} expected_types = {'x0': (nn.Conv2d, nnq.Conv2d)} The function tests for key equivalence, and verifies types with instance checks. """ def _get_underlying_op_type( node: Node, gm: GraphModule ) -> Union[Callable, str]: if node.op == 'call_module': mod = getattr(gm, node.target) return type(mod) else: assert node.op in ('call_function', 'call_method') return node.target self.assertTrue( len(matched_subgraph_pairs) == len(expected_types), f'Expected length of results to match, but got {len(matched_subgraph_pairs)} and {len(expected_types)}' ) for k, v in expected_types.items(): expected_types_a, expected_types_b = v exp_type_start_a, exp_type_end_a = expected_types_a exp_type_start_b, exp_type_end_b = expected_types_b subgraph_a, subgraph_b = matched_subgraph_pairs[k] act_type_start_a = _get_underlying_op_type(subgraph_a.start_node, gm_a) act_type_start_b = _get_underlying_op_type(subgraph_b.start_node, gm_b) act_type_end_a = _get_underlying_op_type(subgraph_a.end_node, gm_a) act_type_end_b = _get_underlying_op_type(subgraph_b.end_node, gm_b) types_match = (exp_type_start_a is act_type_start_a) and \ (exp_type_end_a is act_type_end_a) and \ (exp_type_start_b is act_type_start_b) and \ (exp_type_end_b is act_type_end_b) self.assertTrue( types_match, f'Type mismatch at {k}: expected {(exp_type_start_a, exp_type_end_a, exp_type_start_b, exp_type_end_b)}, ' f'got {(act_type_start_a, act_type_end_a, act_type_start_b, act_type_end_b)}' ) def assert_ns_compare_dict_valid( self, act_compare_dict: dict[str, dict[str, dict[str, Any]]], ) -> None: """ Verifies that the act_compare_dict (output of Numeric Suite APIs) is valid: 1. for each layer, results are recorded for two models 2. number of seen tensors match 3. shapes of each pair of seen tensors match """ for layer_name, result_type_to_data in act_compare_dict.items(): for result_type, layer_data in result_type_to_data.items(): self.assertTrue( len(layer_data) == 2, f"Layer {layer_name} does not have exactly two model results.") model_name_0, model_name_1 = layer_data.keys() for res_idx in range(len(layer_data[model_name_0])): layer_data_0 = layer_data[model_name_0][res_idx] layer_data_1 = layer_data[model_name_1][res_idx] self.assertTrue( layer_data_0['type'] == layer_data_0['type'], f"Layer {layer_name}, {model_name_0} and {model_name_1} do not have the same type.") self.assertTrue( len(layer_data_0['values']) == len(layer_data_1['values']), f"Layer {layer_name}, {model_name_0} and {model_name_1} do not have the same number of seen Tensors.") # F.conv1d weight has rank 3, and toq.conv1d unpacked weight # has rank 4. For now, skip the length check for conv1d only. is_weight_functional_conv1d = ( result_type == NSSingleResultValuesType.WEIGHT.value and ( 'conv1d' in layer_data_0['prev_node_target_type'] or 'conv1d' in layer_data_1['prev_node_target_type'] ) ) if not is_weight_functional_conv1d: for idx in range(len(layer_data_0['values'])): values_0 = layer_data_0['values'][idx] values_1 = layer_data_1['values'][idx] if isinstance(values_0, torch.Tensor): self.assertTrue( values_0.shape == values_1.shape, f"Layer {layer_name}, {model_name_0} and {model_name_1} " + f"have a shape mismatch at idx {idx}.") elif isinstance(values_0, list): values_0 = values_0[0] values_1 = values_1[0] self.assertTrue( values_0.shape == values_1.shape, f"Layer {layer_name}, {model_name_0} and {model_name_1} " + f"have a shape mismatch at idx {idx}.") else: assert isinstance(values_0, tuple), \ f"unhandled type {type(values_0)}" assert len(values_0) == 2 assert len(values_0[1]) == 2 assert values_0[0].shape == values_1[0].shape assert values_0[1][0].shape == values_1[1][0].shape assert values_0[1][1].shape == values_1[1][1].shape # verify that ref_node_name is valid ref_node_name_0 = layer_data_0['ref_node_name'] ref_node_name_1 = layer_data_1['ref_node_name'] prev_node_name_0 = layer_data_0['prev_node_name'] prev_node_name_1 = layer_data_1['prev_node_name'] if layer_data_0['type'] == NSSingleResultValuesType.NODE_OUTPUT.value: self.assertTrue(ref_node_name_0 == prev_node_name_0) self.assertTrue(ref_node_name_1 == prev_node_name_1) elif layer_data_0['type'] == NSSingleResultValuesType.NODE_INPUT.value: self.assertTrue(ref_node_name_0 != prev_node_name_0) self.assertTrue(ref_node_name_1 != prev_node_name_1) def checkGraphModeFxOp( self, model, inputs, quant_type, expected_node=None, expected_node_occurrence=None, expected_node_list=None, is_reference=False, print_debug_info=False, custom_qconfig_dict=None, prepare_expected_node=None, prepare_expected_node_occurrence=None, prepare_expected_node_list=None, prepare_custom_config=None, backend_config=None): """ Quantizes model with graph mode quantization on fx and check if the quantized model contains the quantized_node Args: model: floating point torch.nn.Module inputs: one positional sample input arguments for model expected_node: NodeSpec e.g. NodeSpec.call_function(torch.quantize_per_tensor) expected_node_occurrence: a dict from NodeSpec to expected number of occurrences (int) e.g. {NodeSpec.call_function(torch.quantize_per_tensor) : 1, NodeSpec.call_method('dequantize'): 1} expected_node_list: a list of NodeSpec, used to check the order of the occurrence of Node e.g. [NodeSpec.call_function(torch.quantize_per_tensor), NodeSpec.call_module(nnq.Conv2d), NodeSpec.call_function(F.hardtanh_), NodeSpec.call_method('dequantize')] is_reference: if True, enables reference mode print_debug_info: if True, prints debug info custom_qconfig_dict: overrides default qconfig_dict prepare_expected_node: same as expected_node, but for prepare prepare_expected_node_occurrence: same as expected_node_occurrence, but for prepare prepare_expected_node_list: same as expected_node_list, but for prepare Returns: A dictionary with the following structure: { "prepared": ..., # the prepared model "quantized": ..., # the quantized non-reference model "quantized_reference": ..., # the quantized reference model "result": ..., # the result for either quantized or # quantized_reference model depending on the # is_reference argument } """ # TODO: make img_data a single example instead of a list if type(inputs) == list: inputs = inputs[0] if quant_type == QuantType.QAT: qconfig_mapping = get_default_qat_qconfig_mapping(torch.backends.quantized.engine) model.train() elif quant_type == QuantType.STATIC: qconfig_mapping = get_default_qconfig_mapping(torch.backends.quantized.engine) model.eval() else: qconfig = default_dynamic_qconfig qconfig_mapping = QConfigMapping().set_global(qconfig) model.eval() if quant_type == QuantType.QAT: prepare = prepare_qat_fx else: prepare = prepare_fx # overwrite qconfig_dict with custom_qconfig_dict if custom_qconfig_dict is not None: assert type(custom_qconfig_dict) in (QConfigMapping, dict), \ 'custom_qconfig_dict should be a QConfigMapping or a dict' if isinstance(custom_qconfig_dict, QConfigMapping): qconfig_mapping = custom_qconfig_dict else: qconfig_mapping = QConfigMapping.from_dict(custom_qconfig_dict) prepared = prepare( model, qconfig_mapping, example_inputs=inputs, prepare_custom_config=prepare_custom_config, backend_config=backend_config) if not quant_type == QuantType.DYNAMIC: prepared(*inputs) if print_debug_info: print() print('quant type:\n', quant_type) print('original model:\n', model) print() print('prepared model:\n', prepared) self.checkGraphModuleNodes( prepared, prepare_expected_node, prepare_expected_node_occurrence, prepare_expected_node_list) prepared_copy = copy.deepcopy(prepared) qgraph = convert_fx(copy.deepcopy(prepared)) qgraph_reference = convert_to_reference_fx(copy.deepcopy(prepared)) result = qgraph(*inputs) result_reference = qgraph_reference(*inputs) qgraph_copy = copy.deepcopy(qgraph) qgraph_reference_copy = copy.deepcopy(qgraph_reference) qgraph_to_check = qgraph_reference if is_reference else qgraph if print_debug_info: print() print('quantized model:\n', qgraph_to_check) self.printGraphModule(qgraph_to_check) print() self.checkGraphModuleNodes( qgraph_to_check, expected_node, expected_node_occurrence, expected_node_list) return {"prepared": prepared_copy, "quantized": qgraph_copy, "quantized_reference": qgraph_reference_copy, "quantized_output": result, "quantized_reference_output": result_reference} def checkEmbeddingSerialization(self, qemb, num_embeddings, embedding_dim, indices, offsets, set_qconfig, is_emb_bag, dtype=torch.quint8): # Test serialization of dynamic EmbeddingBag module using state_dict if is_emb_bag: inputs = [indices, offsets] else: inputs = [indices] emb_dict = qemb.state_dict() b = io.BytesIO() torch.save(emb_dict, b) b.seek(0) loaded_dict = torch.load(b) embedding_unpack = torch.ops.quantized.embedding_bag_unpack # Check unpacked weight values explicitly for key in emb_dict: if isinstance(emb_dict[key], torch._C.ScriptObject): assert isinstance(loaded_dict[key], torch._C.ScriptObject) emb_weight = embedding_unpack(emb_dict[key]) loaded_weight = embedding_unpack(loaded_dict[key]) self.assertEqual(emb_weight, loaded_weight) # Check state dict serialization and torch.save APIs if is_emb_bag: loaded_qemb = nnq.EmbeddingBag(num_embeddings=num_embeddings, embedding_dim=embedding_dim, include_last_offset=True, mode='sum', dtype=dtype) else: loaded_qemb = nnq.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim, dtype=dtype) self.check_eager_serialization(qemb, loaded_qemb, inputs) loaded_qemb.load_state_dict(loaded_dict) self.assertEqual(embedding_unpack(qemb._packed_params._packed_weight), embedding_unpack(loaded_qemb._packed_params._packed_weight)) # Test JIT serialization self.checkScriptable(qemb, [inputs], check_save_load=True) # Test from_float call if is_emb_bag: float_embedding = torch.nn.EmbeddingBag(num_embeddings=num_embeddings, embedding_dim=embedding_dim, include_last_offset=True, scale_grad_by_freq=False, mode='sum') else: float_embedding = torch.nn.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim) if set_qconfig: float_qparams_observer = PerChannelMinMaxObserver.with_args(dtype=dtype, qscheme=torch.per_channel_affine_float_qparams, ch_axis=0) float_embedding.qconfig = QConfig(activation=default_dynamic_quant_observer, weight=float_qparams_observer) prepare_dynamic(float_embedding) float_embedding(*inputs) if is_emb_bag: q_embeddingbag = nnq.EmbeddingBag.from_float(float_embedding) expected_name = "QuantizedEmbeddingBag" else: q_embeddingbag = nnq.Embedding.from_float(float_embedding) expected_name = "QuantizedEmbedding" q_embeddingbag(*inputs) self.assertTrue(expected_name in str(q_embeddingbag)) class QuantizationLiteTestCase(QuantizationTestCase): def _create_quantized_model(self, model_class: type[torch.nn.Module], **kwargs): # Creates quantized model for testing mobile script modules qengine = "qnnpack" with override_quantized_engine(qengine): # FIXME(rec): shouldn't qconfig be passed to quantize? qconfig = torch.ao.quantization.get_default_qconfig(qengine) # noqa: F841 model = model_class(**kwargs) model = quantize(model, test_only_eval_fn, [self.calib_data]) return model def _compare_script_and_mobile(self, model: torch.nn.Module, input: torch.Tensor): # Compares the numerical outputs for script and lite modules qengine = "qnnpack" with override_quantized_engine(qengine): script_module = torch.jit.script(model) script_module_result = script_module(input) max_retry = 5 for retry in range(1, max_retry + 1): # retries `max_retry` times; breaks iff succeeds else throws exception try: buffer = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter()) buffer.seek(0) mobile_module = _load_for_lite_interpreter(buffer) mobile_module_result = mobile_module(input) torch.testing.assert_close(script_module_result, mobile_module_result) mobile_module_forward_result = mobile_module.forward(input) torch.testing.assert_close(script_module_result, mobile_module_forward_result) mobile_module_run_method_result = mobile_module.run_method("forward", input) torch.testing.assert_close(script_module_result, mobile_module_run_method_result) except AssertionError as e: if retry == max_retry: raise e else: continue break class PT2EQuantizationTestCase(QuantizationTestCase): """ Base QuantizationTestCase for PT2 with some helper methods. """ _MAP_TO_FX_TRACED_OPS = { torch.ops.quantized_decomposed.quantize_per_tensor: torch.ops.quantized_decomposed.quantize_per_tensor.default, torch.ops.quantized_decomposed.dequantize_per_tensor: torch.ops.quantized_decomposed.dequantize_per_tensor.default, torch.ops.quantized_decomposed.quantize_per_channel: torch.ops.quantized_decomposed.quantize_per_channel.default, torch.ops.quantized_decomposed.dequantize_per_channel: torch.ops.quantized_decomposed.dequantize_per_channel.default, torch.ops.quantized_decomposed.quantize_per_tensor.tensor: torch.ops.quantized_decomposed.quantize_per_tensor.tensor, torch.ops.quantized_decomposed.dequantize_per_tensor.tensor: torch.ops.quantized_decomposed.dequantize_per_tensor.tensor, } def _test_quantizer( self, model, example_inputs, quantizer, expected_node_occurrence, expected_node_list=None, check_against_fx_quant=False, fx_qconfig_mapping=None, export_with_dynamic_shape=False, is_qat=False, is_debug_mode=False, training_ir_node_occurrence=None, ): # resetting dynamo cache torch._dynamo.reset() m_eager = model.eval() # program capture m = copy.deepcopy(m_eager) dynamic_shapes = tuple( {0: torch.export.Dim("dim")} if i == 0 else None for i in range(len(example_inputs)) ) m = export_for_training( m, example_inputs, dynamic_shapes=dynamic_shapes if export_with_dynamic_shape else None, ).module() if is_qat: m = prepare_qat_pt2e(m, quantizer) else: m = prepare_pt2e(m, quantizer) if is_debug_mode: print("prepared model:", m) # Calibrate m(*example_inputs) m = convert_pt2e(m) if is_debug_mode: print("quantized model", m) pt2_quant_output = m(*example_inputs) ns = NodeSpec node_occurrence = { ns.call_function(k): v for k, v in expected_node_occurrence.items() } if expected_node_list is None: expected_node_list = [] node_list = [ns.call_function(n) for n in expected_node_list] self.checkGraphModuleNodes( m, expected_node_occurrence=node_occurrence, expected_node_list=node_list ) if check_against_fx_quant: qconfig_mapping = fx_qconfig_mapping backend_config = get_executorch_backend_config() m_copy = copy.deepcopy(m_eager) m_fx = prepare_fx( m_copy, qconfig_mapping, example_inputs, backend_config=backend_config ) m_fx(*example_inputs) m_fx = _convert_to_reference_decomposed_fx( m_fx, backend_config=backend_config ) m_fx = export_for_training( m_fx, example_inputs, dynamic_shapes=dynamic_shapes if export_with_dynamic_shape else None, ).module() node_occurrence = {} for k, v in PT2EQuantizationTestCase._MAP_TO_FX_TRACED_OPS.items(): if k in expected_node_occurrence: node_occurrence[ns.call_function(v)] = expected_node_occurrence[k] if training_ir_node_occurrence is not None: node_occurrence = { ns.call_function(k): v for k, v in training_ir_node_occurrence.items() } self.checkGraphModuleNodes(m_fx, expected_node_occurrence=node_occurrence) fx_quant_output = m_fx(*example_inputs) self.assertEqual(fx_quant_output, pt2_quant_output) return m def _quantize(self, m, quantizer, example_inputs, is_qat: bool = False): # resetting dynamo cache torch._dynamo.reset() m = export_for_training( m, example_inputs, ).module() if is_qat: m = prepare_qat_pt2e(m, quantizer) else: m = prepare_pt2e(m, quantizer) m(*example_inputs) m = convert_pt2e(m) return m def _get_pt2e_quantized_linear(self, is_per_channel=False) -> torch.fx.GraphModule: class M(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = torch.nn.Linear(2, 2) def forward(self, x): return self.linear(x) quantizer = XNNPACKQuantizer() operator_config = get_symmetric_quantization_config(is_per_channel=is_per_channel) quantizer.set_global(operator_config) example_inputs = (torch.randn(2, 2),) m = M().eval() return self._quantize(m, quantizer, example_inputs) # Below are a series of toy models to use in testing quantization class SingleLayerLinearModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float) def forward(self, x): x = self.fc1(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 5),) class AnnotatedSingleLayerLinearModel(torch.nn.Module): def __init__(self, qengine='fbgemm'): super().__init__() self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) self.fc1 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) def forward(self, x): x = self.fc1(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 5),) class SingleLayerLinearDynamicModel(torch.nn.Module): def __init__(self, qengine='fbgemm'): super().__init__() self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float) def forward(self, x): x = self.fc1(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 5),) class LinearAddModel(nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float) def forward(self, x): x = self.fc1(x) x = torch.add(x, 5) x = self.fc2(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 5),) class RNNDynamicModel(torch.nn.Module): def __init__(self, mod_type): super().__init__() self.qconfig = default_dynamic_qconfig if mod_type == 'GRU': self.mod = torch.nn.GRU(2, 2).to(dtype=torch.float) if mod_type == 'LSTM': self.mod = torch.nn.LSTM(2, 2).to(dtype=torch.float) def forward(self, x): x = self.mod(x) return x class RNNCellDynamicModel(torch.nn.Module): def __init__(self, mod_type): super().__init__() self.qconfig = default_dynamic_qconfig if mod_type == 'GRUCell': self.mod = torch.nn.GRUCell(2, 2).to(dtype=torch.float) if mod_type == 'LSTMCell': self.mod = torch.nn.LSTMCell(2, 2).to(dtype=torch.float) if mod_type == 'RNNReLU': self.mod = torch.nn.RNNCell(2, 2, nonlinearity='relu').to(dtype=torch.float) if mod_type == 'RNNTanh': self.mod = torch.nn.RNNCell(2, 2, nonlinearity='tanh').to(dtype=torch.float) def forward(self, x): x = self.mod(x) return x class LSTMwithHiddenDynamicModel(torch.nn.Module): def __init__(self, qengine='fbgemm'): super().__init__() self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) self.lstm = torch.nn.LSTM(2, 2).to(dtype=torch.float) def forward(self, x, hid): x, hid = self.lstm(x, hid) return x, hid class ConvModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) def forward(self, x): x = self.conv(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 3, 5, 5),) class ConvTransposeModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.ConvTranspose2d(3, 5, 3, bias=False).to(dtype=torch.float) def forward(self, x): x = self.conv(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 3, 5, 5),) class AnnotatedConvModel(torch.nn.Module): def __init__(self, qengine): super().__init__() self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) self.quant = QuantStub() self.dequant = DeQuantStub() def forward(self, x): x = self.quant(x) x = self.conv(x) x = self.dequant(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 3, 5, 5),) class AnnotatedConvTransposeModel(torch.nn.Module): def __init__(self, qengine): super().__init__() self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) self.conv = torch.nn.ConvTranspose2d(3, 5, 3, bias=False).to(dtype=torch.float) self.quant = QuantStub() self.dequant = DeQuantStub() def forward(self, x): x = self.quant(x) x = self.conv(x) x = self.dequant(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 3, 5, 5),) class ConvBnModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float) def forward(self, x): x = self.conv(x) x = self.bn(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 3, 5, 5),) class AnnotatedConvBnModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.qconfig = default_qconfig self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float) self.quant = QuantStub() self.dequant = DeQuantStub() def forward(self, x): x = self.quant(x) x = self.conv(x) x = self.bn(x) x = self.dequant(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 3, 5, 5),) class ConvBnReLUModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.relu(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 3, 5, 5),) class AnnotatedConvBnReLUModel(torch.nn.Module): def __init__(self, qengine='fbgemm'): super().__init__() self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) self.bn = torch.nn.BatchNorm2d(5).to(dtype=torch.float) self.relu = nn.ReLU(inplace=True) self.quant = QuantStub() self.dequant = DeQuantStub() def forward(self, x): x = self.quant(x) x = self.conv(x) x = self.bn(x) x = self.relu(x) x = self.dequant(x) return x def fuse_model(self): # TODO: remove this check and define two fuse_modules function on this module if self.training: torch.ao.quantization.fuse_modules_qat(self, [['conv', 'bn', 'relu']], inplace=True) else: torch.ao.quantization.fuse_modules(self, [['conv', 'bn', 'relu']], inplace=True) def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 3, 5, 5),) class TwoLayerConvModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float) self.conv2 = torch.nn.Conv2d(5, 5, 1, bias=False).to(dtype=torch.float) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 3, 5, 5),) class TwoLayerLinearModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float) def forward(self, x): x = self.fc1(x) x = self.fc2(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 5),) class LinearModelWithSubmodule(nn.Module): def __init__(self) -> None: super().__init__() self.subm = TwoLayerLinearModel() self.fc = nn.Linear(5, 5) def forward(self, x): x = self.subm(x) x = self.fc(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return self.subm.get_example_inputs() class AnnotatedTwoLayerLinearModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) self.fc2 = QuantWrapper(torch.nn.Linear(8, 5).to(dtype=torch.float)) self.fc2.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm") def forward(self, x): x = self.fc1(x) x = self.fc2(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 5),) class ActivationsTestModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.qconfig = torch.ao.quantization.get_default_qconfig("fbgemm") self.quant = torch.ao.quantization.QuantStub() self.hardswish = torch.nn.Hardswish().to(dtype=torch.float) self.elu = torch.nn.ELU().to(dtype=torch.float) self.dequant = torch.ao.quantization.DeQuantStub() def forward(self, x): x = self.quant(x) x = self.hardswish(x) x = self.elu(x) x = self.dequant(x) return x class LinearReluModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) self.relu = torch.nn.ReLU() def forward(self, x): x = self.relu(self.fc(x)) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 5),) class LinearReluLinearModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) self.relu = torch.nn.ReLU() self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float) def forward(self, x): x = self.fc1(x) x = self.relu(x) x = self.fc2(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 5),) class LinearReluAddModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = torch.nn.Linear(5, 5).to(dtype=torch.float) self.relu = torch.nn.ReLU() self.fc2 = torch.nn.Linear(5, 5).to(dtype=torch.float) def forward(self, x): x = self.fc1(x) x = self.relu(x) x = torch.add(x, 5) x = self.fc2(x) self.relu = torch.nn.ReLU() return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 5),) class LinearBnLeakyReluModel(torch.nn.Module): def __init__(self, with_bn=True): super().__init__() self.linear = nn.Linear(5, 5) self.bn1d = nn.BatchNorm1d(5) self.leaky_relu = nn.LeakyReLU(0.01) self.with_bn = with_bn def forward(self, x): x = self.linear(x) if self.with_bn: x = self.bn1d(x) x = self.leaky_relu(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 5),) class LinearTanhModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear = nn.Linear(5, 5) self.tanh = nn.Tanh() def forward(self, x): x = self.linear(x) x = self.tanh(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 5),) class ConvBnAddReluModel(torch.nn.Module): def __init__(self, with_bn=True, with_relu=True, left_conv=True, two_conv=True, use_torch_add=True): super().__init__() self.conv = nn.Conv2d(5, 5, (2, 2)) self.conv2 = nn.Conv2d(5, 5, (2, 2)) self.bn = nn.BatchNorm2d(5) self.relu = nn.ReLU() self.with_bn = with_bn self.with_relu = with_relu self.two_conv = two_conv self.left_conv = left_conv self.use_torch_add = use_torch_add def forward(self, x1, x2): if self.two_conv: if self.use_torch_add: if self.with_bn: x = torch.add(self.bn(self.conv(x1)), self.conv2(x1)) else: x = torch.add(self.conv(x1), self.conv2(x1)) else: if self.with_bn: x = self.bn(self.conv(x1)) + self.conv2(x1) else: x = self.conv(x1) + self.conv2(x1) else: if self.use_torch_add: if self.left_conv: if self.with_bn: x = torch.add(self.bn(self.conv(x1)), x2) else: x = torch.add(self.conv(x1), x2) else: if self.with_bn: x = torch.add(x2, self.bn(self.conv(x1))) else: x = torch.add(x2, self.conv(x1)) else: if self.left_conv: if self.with_bn: x = self.bn(self.conv(x1)) + x2 else: x = self.conv(x1) + x2 else: if self.with_bn: x = x2 + self.bn(self.conv(x1)) else: x = x2 + self.conv(x1) if self.with_relu: x = self.relu(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 5, 3, 3), torch.rand(1, 5, 2, 2)) # TODO: self.fc should be self.conv class ConvReluModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.fc = torch.nn.Conv2d(3, 5, 3).to(dtype=torch.float) self.relu = torch.nn.ReLU() def forward(self, x): x = self.relu(self.fc(x)) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 3, 5, 5),) # TODO: self.fc should be self.conv class ConvReluConvModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = torch.nn.Conv2d(3, 5, 3).to(dtype=torch.float) self.relu = torch.nn.ReLU() self.fc2 = torch.nn.Conv2d(5, 5, 1).to(dtype=torch.float) def forward(self, x): x = self.fc1(x) x = self.relu(x) x = self.fc2(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 3, 5, 5),) # TODO: self.fc should be self.conv class ConvReluAddModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = torch.nn.Conv2d(3, 5, 3).to(dtype=torch.float) self.relu = torch.nn.ReLU() self.fc2 = torch.nn.Conv2d(5, 5, 1).to(dtype=torch.float) def forward(self, x): x = self.fc1(x) x = self.relu(x) x = torch.add(x, 5) x = self.fc2(x) self.relu = torch.nn.ReLU() return x def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 3, 5, 5),) class NormalizationTestModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.quant = torch.ao.quantization.QuantStub() self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) self.layer_norm = torch.nn.LayerNorm(8) self.group_norm = torch.nn.GroupNorm(2, 8) self.instance_norm1d = torch.nn.InstanceNorm1d(8) self.instance_norm2d = torch.nn.InstanceNorm2d(8) self.instance_norm3d = torch.nn.InstanceNorm3d(8) def forward(self, x): x = self.quant(x) x = self.fc1(x) x = self.layer_norm(x) x = self.group_norm(x.unsqueeze(-1).repeat(1, 1, 3)) x = self.instance_norm1d(x) x = self.instance_norm2d(x.unsqueeze(-1)) x = self.instance_norm3d(x.unsqueeze(-1)) return x class NestedModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.sub1 = LinearReluModel() self.sub2 = TwoLayerLinearModel() self.fc3 = torch.nn.Linear(5, 5).to(dtype=torch.float) def forward(self, x): x = self.sub1(x) x = self.sub2(x) x = self.fc3(x) return x class AnnotatedNestedModel(torch.nn.Module): def __init__(self, qengine): super().__init__() self.sub1 = LinearReluModel() self.sub2 = TwoLayerLinearModel() self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) self.fc3.qconfig = default_qconfig self.sub2.fc1 = QuantWrapper(self.sub2.fc1) if qengine == 'fbgemm': self.sub2.fc1.qconfig = default_per_channel_qconfig else: self.sub2.fc1.qconfig = default_qconfig def forward(self, x): x = self.sub1(x) x = self.sub2(x) x = self.fc3(x) return x class AnnotatedSubNestedModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.sub1 = LinearReluModel() self.sub2 = QuantWrapper(TwoLayerLinearModel()) self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) self.fc3.qconfig = default_qconfig self.sub2.qconfig = default_qconfig def forward(self, x): x = self.sub1(x) x = self.sub2(x) x = self.fc3(x) return x class AnnotatedCustomConfigNestedModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.sub1 = LinearReluModel() self.sub2 = TwoLayerLinearModel() self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float)) self.fc3.qconfig = default_qconfig self.sub2.qconfig = default_qconfig custom_options = { 'dtype': torch.quint8, 'qscheme': torch.per_tensor_affine } custom_qconfig = QConfig(activation=default_observer.with_args(**custom_options), weight=default_weight_observer) self.sub2.fc1.qconfig = custom_qconfig self.sub2.fc1 = QuantWrapper(self.sub2.fc1) self.sub2.fc2 = QuantWrapper(self.sub2.fc2) def forward(self, x): x = self.sub1(x) x = self.sub2(x) x = self.fc3(x) return x class QuantSubModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.sub1 = LinearReluModel() self.sub2 = QuantWrapper(TwoLayerLinearModel()) self.sub2.qconfig = default_qconfig self.fc3 = torch.nn.Linear(5, 5).to(dtype=torch.float) self.fc3.qconfig = default_qconfig def forward(self, x): x = self.sub1(x) x = self.sub2(x) x = self.fc3(x) return x class InnerModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = torch.nn.Linear(5, 8).to(dtype=torch.float) self.relu1 = torch.nn.ReLU() self.fc2 = torch.nn.Linear(8, 5).to(dtype=torch.float) self.relu2 = torch.nn.ReLU() def forward(self, x): return self.relu2(self.fc2(self.relu1(self.fc1(x)))) def fuse_modules(self): fusable_layers = [] named_children = list(self.named_children()) for idx, (current_name, layer) in enumerate(named_children): if isinstance(layer, torch.nn.Linear): if idx >= len(named_children) - 1: break if isinstance(named_children[idx + 1][1], torch.nn.ReLU): fusable_layers.append([current_name, named_children[idx + 1][0]]) # TODO: remove this check and define two fuse_modules function on this module if self.training: torch.ao.quantization.fuse_modules_qat(self, fusable_layers, inplace=True) else: torch.ao.quantization.fuse_modules(self, fusable_layers, inplace=True) class FunctionalLinear(torch.nn.Module): def __init__(self) -> None: super().__init__() self.weight = torch.rand((5, 5)) self.bias = torch.zeros(5) def forward(self, x): return F.linear(x, self.weight, self.bias) def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 5),) class SingleLayerFunctionalLinearModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear1 = FunctionalLinear() def forward(self, x): x = self.linear1(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return self.linear1.get_example_inputs() class TwoLayerFunctionalLinearModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear1 = FunctionalLinear() self.linear2 = FunctionalLinear() def forward(self, x): x = self.linear1(x) x = self.linear2(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return self.linear1.get_example_inputs() class FunctionalLinearAddModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear1 = FunctionalLinear() self.linear2 = FunctionalLinear() def forward(self, x): x = self.linear1(x) x = torch.add(x, 5) x = self.linear2(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return self.linear1.get_example_inputs() class FunctionalLinearReluModel(nn.Module): def __init__(self) -> None: super().__init__() self.linear = FunctionalLinear() def forward(self, x): x = self.linear(x) x = F.relu(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return self.linear.get_example_inputs() class FunctionalLinearReluLinearModel(nn.Module): def __init__(self) -> None: super().__init__() self.linear1 = FunctionalLinear() self.relu = nn.ReLU() self.linear2 = FunctionalLinear() def forward(self, x): x = self.linear1(x) x = self.relu(x) x = self.linear2(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return self.linear1.get_example_inputs() class FunctionalConv2d(torch.nn.Module): def __init__(self) -> None: super().__init__() self.weight = torch.rand(3, 3, 3, 3) self.bias = torch.rand(3) self.stride = (1, 1) self.padding = (0, 0) self.dilation = (1, 1) self.groups = 1 def forward(self, x): return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) def get_example_inputs(self) -> tuple[Any, ...]: return (torch.rand(1, 3, 5, 5),) class SingleLayerFunctionalConvModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv1 = FunctionalConv2d() def forward(self, x): x = self.conv1(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return self.conv1.get_example_inputs() class TwoLayerFunctionalConvModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv1 = FunctionalConv2d() self.conv2 = FunctionalConv2d() def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return self.conv1.get_example_inputs() class FunctionalConvReluModel(nn.Module): def __init__(self) -> None: super().__init__() self.conv = FunctionalConv2d() def forward(self, x): x = self.conv(x) x = F.relu(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return self.conv.get_example_inputs() class FunctionalConvReluConvModel(nn.Module): def __init__(self) -> None: super().__init__() self.conv1 = FunctionalConv2d() self.relu = nn.ReLU() self.conv2 = FunctionalConv2d() def forward(self, x): x = self.conv1(x) x = self.relu(x) x = self.conv2(x) return x def get_example_inputs(self) -> tuple[Any, ...]: return self.conv1.get_example_inputs() class SkipQuantModel(torch.nn.Module): r"""We can skip quantization by explicitly setting qconfig of a submodule to None """ def __init__(self) -> None: super().__init__() self.sub = InnerModule() self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) def forward(self, x): return self.fc(self.sub(x)) def fuse_modules(self): self.sub.fuse_modules() class AnnotatedSkipQuantModel(torch.nn.Module): r"""We can skip quantization by explicitly setting qconfig of a submodule to None """ def __init__(self, qengine): super().__init__() self.qconfig = torch.ao.quantization.get_default_qconfig(qengine) self.sub = QuantWrapper(InnerModule()) self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) # don't quantize this fc self.fc.qconfig = None def forward(self, x): return self.fc(self.sub(x)) def fuse_modules(self): self.sub.module.fuse_modules() class QuantStubModel(torch.nn.Module): r"""A Module with manually inserted `QuantStub` and `DeQuantStub` """ def __init__(self) -> None: super().__init__() self.qconfig = torch.ao.quantization.get_default_qconfig("qnnpack") self.quant = QuantStub() self.dequant = DeQuantStub() self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) def forward(self, x): x = self.quant(x) x = self.fc(x) return self.dequant(x) class ManualLinearQATModel(torch.nn.Module): r"""A Module with manually inserted `QuantStub` and `DeQuantStub` """ def __init__(self, qengine): super().__init__() self.qconfig = torch.ao.quantization.get_default_qat_qconfig(qengine) self.quant = QuantStub() self.dequant = DeQuantStub() self.fc1 = torch.nn.Linear(5, 1).to(dtype=torch.float) self.fc2 = torch.nn.Linear(1, 10).to(dtype=torch.float) def forward(self, x): x = self.quant(x) x = self.fc1(x) x = self.fc2(x) return self.dequant(x) class ManualDropoutQATModel(torch.nn.Module): r"""A Module with manually inserted `QuantStub` and `DeQuantStub` """ def __init__(self, qengine): super().__init__() self.qconfig = torch.ao.quantization.get_default_qat_qconfig(qengine) self.quant = QuantStub() self.dequant = DeQuantStub() self.fc1 = torch.nn.Linear(5, 1).to(dtype=torch.float) self.dropout = torch.nn.Dropout(0.5) def forward(self, x): x = self.quant(x) x = self.fc1(x) x = self.dropout(x) return self.dequant(x) class ManualLinearDynamicQATModel(torch.nn.Module): r"""A Module that uses a dynamic QAT by default. """ def __init__(self, qconfig=None): super().__init__() self.qconfig = qconfig or default_dynamic_qat_qconfig self.fc1 = torch.nn.Linear(5, 1).to(dtype=torch.float) self.fc2 = torch.nn.Linear(1, 10).to(dtype=torch.float) def forward(self, x): x = self.fc1(x) x = self.fc2(x) return x class ManualConvLinearQATModel(torch.nn.Module): r"""A module with manually inserted `QuantStub` and `DeQuantStub` and contains both linear and conv modules """ def __init__(self, qconfig=None): super().__init__() self.qconfig = qconfig if qconfig else torch.ao.quantization.get_default_qat_qconfig("qnnpack") self.quant = QuantStub() self.dequant = DeQuantStub() self.conv = torch.nn.Conv2d(3, 1, kernel_size=3).to(dtype=torch.float) self.fc1 = torch.nn.Linear(64, 10).to(dtype=torch.float) self.fc2 = torch.nn.Linear(10, 10).to(dtype=torch.float) def forward(self, x): x = self.quant(x) x = self.conv(x) x = x.view(-1, 64).contiguous() x = self.fc1(x) x = self.fc2(x) return self.dequant(x) class ManualConvLinearSymmQATModel(ManualConvLinearQATModel): r"""Same as ManualConvLinearQATModule but with Symmetric Quantization. Supported only with qnnpack. """ def __init__(self) -> None: super().__init__(default_symmetric_qnnpack_qat_qconfig) class ManualEmbeddingBagLinear(nn.Module): def __init__(self) -> None: super().__init__() self.emb = nn.EmbeddingBag(num_embeddings=10, embedding_dim=12, mode='sum') self.emb.qconfig = default_embedding_qat_qconfig self.quant = QuantStub() self.dequant = DeQuantStub() self.linear = nn.Linear(12, 1).to(dtype=torch.float) self.qconfig = get_default_qat_qconfig("qnnpack") def forward(self, input: torch.Tensor, offsets: Optional[torch.Tensor] = None, per_sample_weights: Optional[torch.Tensor] = None): x = self.emb(input, offsets, per_sample_weights) x = self.quant(x) x = self.linear(x) return self.dequant(x) class DeFusedEmbeddingBagLinear(nn.Module): r"""A module to simulate QAT embedding bag with a linear layer, this module uses a separate embedding and bagging op, similar to that which is described in the EmbeddingBag documentation. https://pytorch.org/docs/stable/generated/torch.nn.EmbeddingBag.html """ def __init__(self) -> None: super().__init__() self.emb = nn.Embedding(num_embeddings=10, embedding_dim=12) self.emb.qconfig = default_embedding_qat_qconfig self.bagging_op = torch.sum self.quant = QuantStub() self.dequant = DeQuantStub() self.linear = nn.Linear(12, 1).to(dtype=torch.float) self.qconfig = get_default_qat_qconfig("qnnpack") def forward(self, input: torch.Tensor) -> torch.Tensor: x = self.bagging_op(self.emb(input), dim=1) x = self.quant(x) x = self.linear(x) return self.dequant(x) class SubModelForFusion(nn.Module): def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float) self.bn = nn.BatchNorm2d(2).to(dtype=torch.float) def forward(self, x): x = self.conv(x) x = self.bn(x) return x class SubModelWithoutFusion(nn.Module): def __init__(self) -> None: super().__init__() self.conv = nn.Conv2d(2, 2, 1, bias=None).to(dtype=torch.float) self.relu = nn.ReLU(inplace=False).to(dtype=torch.float) def forward(self, x): return self.relu(self.conv(x)) class ModelForFusion(nn.Module): def __init__(self, qconfig): super().__init__() self.conv1 = nn.Conv2d(3, 2, 1, bias=None).to(dtype=torch.float) self.bn1 = nn.BatchNorm2d(2).to(dtype=torch.float) self.relu1 = nn.ReLU(inplace=True).to(dtype=torch.float) self.sub1 = SubModelForFusion() self.sub2 = SubModelWithoutFusion() self.fc = nn.Linear(36, 10).to(dtype=torch.float) self.quant = QuantStub() self.dequant = DeQuantStub() self.qconfig = qconfig self.conv2 = nn.Conv3d(3, 2, (1, 1, 1), bias=None).to(dtype=torch.float) self.relu2 = nn.ReLU(inplace=False).to(dtype=torch.float) self.bn2 = nn.BatchNorm3d(2).to(dtype=torch.float) self.relu3 = nn.ReLU(inplace=True).to(dtype=torch.float) self.conv3 = nn.Conv1d(3, 3, 2).to(dtype=torch.float) self.bn3 = nn.BatchNorm1d(3).to(dtype=torch.float) self.relu4 = nn.ReLU(inplace=True).to(dtype=torch.float) # don't quantize sub2 self.sub2.qconfig = None self.fc.qconfig = None def forward(self, x): x = x.squeeze(2) x = self.quant(x) x = self.conv3(x) x = self.bn3(x) x = self.relu4(x) x = x.unsqueeze(2) y = x.unsqueeze(2) x = self.conv1(x) x = self.bn1(x) x = self.relu1(x) x = self.sub1(x) x = self.dequant(x) x = self.sub2(x) x = x.reshape(-1, 36).contiguous() x = self.fc(x) y = self.conv2(y) y = self.relu2(y) y = self.bn2(y) y = self.relu3(y) y = self.dequant(y) return x class ConvBNReLU(nn.Sequential): def __init__(self) -> None: super().__init__( nn.Conv2d(3, 3, 1, 1, bias=False), nn.BatchNorm2d(3), nn.ReLU(inplace=False) ) class ModelWithSequentialFusion(nn.Module): def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(3, 3, 1) self.relu1 = nn.ReLU(inplace=False) layers = [ConvBNReLU() for _ in range(3)] self.features = nn.Sequential(*layers) head = [nn.Linear(300, 10), nn.ReLU(inplace=False)] self.classifier = nn.Sequential(*head) self.seq = nn.Sequential() self.quant = QuantStub() self.dequant = DeQuantStub() def forward(self, x): x = self.quant(x) x = self.conv1(x) x = self.relu1(x) x = self.features(x) x = torch.reshape(x, (-1, 3 * 10 * 10)) x = self.classifier(x) x = self.seq(x) x = self.dequant(x) return x class ModelForFusionWithBias(nn.Module): def __init__(self) -> None: super().__init__() self.conv1 = nn.Conv2d(3, 2, 5, bias=True).to(dtype=torch.float) self.bn1 = nn.BatchNorm2d(2).to(dtype=torch.float) self.relu1 = nn.ReLU(inplace=True).to(dtype=torch.float) self.conv2 = nn.Conv2d(2, 2, 1, bias=True).to(dtype=torch.float) self.bn2 = nn.BatchNorm2d(2).to(dtype=torch.float) self.quant = QuantStub() self.dequant = DeQuantStub() def forward(self, x): x = self.quant(x) x = self.conv1(x) x = self.bn1(x) x = self.relu1(x) x = self.conv2(x) x = self.bn2(x) x = self.dequant(x) return x class ModelForLinearBNFusion(nn.Module): def __init__(self) -> None: super().__init__() self.fc = nn.Linear(20, 10) self.bn = nn.BatchNorm1d(10) nn.init.uniform_(self.bn.weight) nn.init.uniform_(self.bn.bias) def forward(self, x): return self.bn(self.fc(x)) class DummyObserver(torch.nn.Module): def calculate_qparams(self): return 1.0, 0 def forward(self, x): return x class ModelForConvTransposeBNFusion(nn.Module): def __init__(self) -> None: super().__init__() self.conv1 = nn.ConvTranspose1d(3, 3, 1) self.bn1 = nn.BatchNorm1d(3) self.conv2 = nn.ConvTranspose2d(3, 3, 1) self.bn2 = nn.BatchNorm2d(3) self.conv3 = nn.ConvTranspose3d(3, 3, 1) self.bn3 = nn.BatchNorm3d(3) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = x.unsqueeze(2) x = self.conv2(x) x = self.bn2(x) x = x.unsqueeze(2) x = self.conv3(x) x = self.bn3(x) return x class ModelWithFunctionals(torch.nn.Module): def __init__(self) -> None: super().__init__() self.mycat = nnq.FloatFunctional() self.myadd = nnq.FloatFunctional() self.myadd_relu = nnq.FloatFunctional() self.mymatmul = nnq.FloatFunctional() # Tracing doesnt work yet for c10 ops with scalar inputs # https://github.com/pytorch/pytorch/issues/27097 # self.my_scalar_add = nnq.FloatFunctional() # self.my_scalar_mul = nnq.FloatFunctional() def forward(self, x): y = self.mycat.cat([x, x, x]) z = self.myadd.add(y, y) w = self.myadd_relu.add_relu(z, z) u = self.mymatmul.matmul(w, w.T) # Tracing doesnt work yet for c10 ops with scalar inputs # https://github.com/pytorch/pytorch/issues/27097 # w = self.my_scalar_add.add_scalar(w, -0.5) # w = self.my_scalar_mul.mul_scalar(w, 0.5) return u class ResNetBase(torch.nn.Module): def __init__(self) -> None: super().__init__() norm_layer = nn.BatchNorm2d inplanes = 3 self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False) self.bn1 = norm_layer(inplanes) self.relu1 = nn.ReLU() self.relu2 = nn.ReLU() self.downsample = torch.nn.Identity() self.myop = nn.quantized.FloatFunctional() self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = torch.nn.Linear(inplanes, 1) def forward(self, x): out = self.conv1(x) out = self.bn1(out) out = self.relu1(out) identity = self.downsample(x) out = self.myop.add(out, identity) out = self.relu2(out) out = self.avgpool(out) out = torch.flatten(out, 1) out = self.fc(out) return out def fuse_model(self): # TODO: remove this check and define two fuse_model function on this module if self.training: torch.ao.quantization.fuse_modules_qat(self, [['conv1', 'bn1', 'relu1']], inplace=True) else: torch.ao.quantization.fuse_modules(self, [['conv1', 'bn1', 'relu1']], inplace=True) class ModelMultipleOps(torch.nn.Module): def __init__(self) -> None: super().__init__() norm_layer = nn.BatchNorm2d inplanes = 3 self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False) self.conv2 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False) self.bn1 = norm_layer(inplanes) self.relu1 = nn.ReLU() self.relu2 = nn.ReLU() self.downsample = torch.nn.Identity() self.skip_add = nn.quantized.FloatFunctional() self.cat = nn.quantized.FloatFunctional() self.avgpool = nn.AdaptiveAvgPool2d((4, 4)) self.fc = nn.Linear(12, 6) def forward(self, x): out = self.conv1(x) out = self.bn1(out) out = self.relu1(out) identity = self.downsample(x) out = self.skip_add.add(out, identity) out = self.relu2(out) out = self.avgpool(out) out = self.conv2(out) out = torch.nn.functional.max_pool2d(out, 2, 2) out = self.cat.cat([out, out]) out = out.reshape(-1, 3 * 2 * 2) out = self.fc(out) return out # Model to ensure consistency of fake quant with true quant # Average pooling and mean operations are not modelled # accurately with fake-quant so this model does not # contain those operations class ModelMultipleOpsNoAvgPool(torch.nn.Module): def __init__(self) -> None: super().__init__() norm_layer = nn.BatchNorm2d inplanes = 3 self.conv1 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False) self.conv2 = nn.Conv2d(inplanes, inplanes, (1, 1), bias=False) self.bn1 = norm_layer(inplanes) self.relu1 = nn.ReLU() self.relu2 = nn.ReLU() self.skip_add = nn.quantized.FloatFunctional() self.cat = nn.quantized.FloatFunctional() self.maxpool = nn.MaxPool2d((4, 4)) self.fc = nn.Linear(12, 6) def forward(self, x): out = self.conv1(x) out = self.bn1(out) out = self.relu1(out) skip = self.conv2(x) out = self.skip_add.add(out, skip) out = self.relu2(out) out = self.maxpool(out) out = self.conv2(out) out = torch.nn.functional.max_pool2d(out, 2, 2) out = self.cat.cat([out, out]) out = out.reshape(-1, 3 * 2 * 2) out = self.fc(out) return out class EmbeddingBagModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12, include_last_offset=True, scale_grad_by_freq=False, mode='sum') def forward(self, indices, offsets, per_sample_weights): return self.emb(indices, offsets, per_sample_weights) class EmbeddingModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12) def forward(self, indices): return self.emb(indices) class EmbeddingWithStaticLinear(torch.nn.Module): def __init__(self) -> None: super().__init__() self.emb = torch.nn.EmbeddingBag(num_embeddings=10, embedding_dim=12) self.fc = torch.nn.Linear(4, 2) self.emb.qconfig = float_qparams_weight_only_qconfig self.qconfig = default_qconfig self.quant = QuantStub() self.dequant = DeQuantStub() def forward(self, indices, offsets, linear_in): emb = self.emb(indices, offsets) q_x = self.quant(linear_in) fc = self.fc(q_x) fc = self.dequant(fc) features = torch.cat([fc] + [emb], dim=1) return features class DenseTopMLP(nn.Module): def __init__(self, dense_dim, dense_out, embedding_dim, top_out_in, top_out_out) -> None: super().__init__() self.dense_mlp = nn.Sequential( nn.Linear(dense_dim, dense_out), ) self.top_mlp = nn.Sequential( nn.Linear(dense_out + embedding_dim, top_out_in), nn.Linear(top_out_in, top_out_out), ) def forward( self, sparse_feature: torch.Tensor, dense: torch.Tensor, ) -> torch.Tensor: dense_feature = self.dense_mlp(dense) features = torch.cat([dense_feature] + [sparse_feature], dim=1) out = self.top_mlp(features) return out # thin wrapper around embedding bag, because tracing inside nn.Embedding # bag is not supported at the moment and this is top level class EmbBagWrapper(nn.Module): def __init__(self, num_embeddings, embedding_dim): super().__init__() self.emb_bag = nn.EmbeddingBag(num_embeddings, embedding_dim, mode='sum') def forward(self, indices, offsets): return self.emb_bag(indices, offsets) class SparseNNModel(nn.Module): _NUM_EMBEDDINGS = 10 _EMBEDDING_DIM = 5 _DENSE_DIM = 4 _DENSE_OUTPUT = 2 _TOP_OUT_IN = 2 _TOP_OUT_OUT = 2 _TOP_MLP_DIM = 1 def __init__(self) -> None: super().__init__() self.model_sparse = EmbBagWrapper(self._NUM_EMBEDDINGS, self._EMBEDDING_DIM) self.dense_top = DenseTopMLP( self._DENSE_DIM, self._DENSE_OUTPUT, self._EMBEDDING_DIM, self._TOP_OUT_IN, self._TOP_OUT_OUT) def forward( self, sparse_indices: torch.Tensor, sparse_offsets: torch.Tensor, dense: torch.Tensor, ) -> torch.Tensor: sparse_feature = self.model_sparse(sparse_indices, sparse_offsets) out = self.dense_top(sparse_feature, dense) return out class TestHelperModules: class ControlFlow(torch.nn.Module): def forward( self, xs: torch.Tensor, pred1: torch.Tensor, pred2: torch.Tensor, y: torch.Tensor, ) -> torch.Tensor: def true_nested(y: torch.Tensor) -> torch.Tensor: y = y + y y = torch.mm(y, y) return y def false_nested(y: torch.Tensor) -> torch.Tensor: return torch.mm(y, y) def true_fn(x: torch.Tensor, pred2: torch.Tensor) -> torch.Tensor: z = control_flow.cond(pred2, true_nested, false_nested, [x]) return x + z def false_fn(x: torch.Tensor, _) -> torch.Tensor: return x.cos() def map_fn( x: torch.Tensor, pred1: torch.Tensor, pred2: torch.Tensor, y: torch.Tensor ) -> torch.Tensor: x = x.cos() y = control_flow.cond(pred1, true_fn, false_fn, [y, pred2]) x = x + y return x.sin() y = torch.mm(y, y) return control_flow.map(map_fn, xs, pred1, pred2, y) def example_inputs(self): return (torch.ones(2, 2), torch.tensor([False]), torch.tensor([False]), torch.ones(2, 2),) class Conv2dPropAnnotaton(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 3) self.linear = torch.nn.Linear(3, 3) def forward(self, x): x = self.conv(x) x = x.view(-1, 3) x = torch.nn.functional.hardtanh(x, -0.5, 0.5) x = self.linear(x) return x class Conv2dWithObsSharingOps(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 3) self.hardtanh = torch.nn.Hardtanh() self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d((1, 1)) def forward(self, x): x = self.conv(x) x = self.adaptive_avg_pool2d(x) x = self.hardtanh(x) x = torch.mean(x) return x class Conv2dWithTwoLinearPermute(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 16, 3) self.linear1 = torch.nn.Linear(16, 8, bias=False) self.linear2 = torch.nn.Linear(8, 8) def forward(self, x): conv_out = self.conv(x) permute_out = torch.permute(conv_out, (0, 2, 3, 1)) return self.linear2(self.linear1(permute_out)) class Conv2dWithTwoLinear(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 16, 3) self.linear1 = torch.nn.Linear(64, 8, bias=False) self.linear2 = torch.nn.Linear(8, 8) def forward(self, x): conv_out = self.conv(x) reshape_out = torch.reshape(conv_out, (2, 64)) return self.linear2(self.linear1(reshape_out)) class ConvLinearWPermute(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 8, 3) self.linear1 = torch.nn.Linear(8, 8) def forward(self, x): conv_out = self.conv(x) permute_out = torch.permute(conv_out, (0, 2, 3, 1)) return self.linear1(permute_out) class TwoLinearModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.linear1 = torch.nn.Linear(8, 16, bias=False) self.linear2 = torch.nn.Linear(16, 8) def forward(self, x): return self.linear2(self.linear1(x)) def example_inputs(self): return (torch.randn(2, 8),) class ConvMaxPool2d(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(2, 2, 1) self.pool = torch.nn.MaxPool2d(1, 1) def forward(self, x): x = self.conv(x) x = self.pool(x) return x class ConvWithAdaptiveAvgPool2d(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(3, 3, 3) self.adaptive_avg_pool2d = torch.nn.AdaptiveAvgPool2d((1, 1)) def forward(self, x): x = self.conv(x) x = self.adaptive_avg_pool2d(x) return x class ConvWithBNRelu(torch.nn.Module): def __init__(self, relu, dim=2, bn=True, bias=True): super().__init__() convs = {1: torch.nn.Conv1d, 2: torch.nn.Conv2d} bns = {1: torch.nn.BatchNorm1d, 2: torch.nn.BatchNorm2d} self.conv = convs[dim](3, 3, 3, bias=bias) if bn: self.bn = bns[dim](3) else: self.bn = torch.nn.Identity() if relu: self.relu = torch.nn.ReLU() else: self.relu = torch.nn.Identity() def forward(self, x): x = self.conv(x) x = self.bn(x) return self.relu(x) class ConvTWithBNRelu(torch.nn.Module): def __init__(self, relu, dim=2, bn=True, bias=True): super().__init__() convts = {1: torch.nn.ConvTranspose1d, 2: torch.nn.ConvTranspose2d} bns = {1: torch.nn.BatchNorm1d, 2: torch.nn.BatchNorm2d} self.convt = convts[dim](3, 3, 3, bias=bias) if bn: self.bn = bns[dim](3) else: self.bn = torch.nn.Identity() if relu: self.relu = torch.nn.ReLU() else: self.relu = torch.nn.Identity() def forward(self, x): x = self.convt(x) x = self.bn(x) return self.relu(x) class Conv2dThenConv1d(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv1d = torch.nn.Conv1d(3, 3, 3) self.conv2d = torch.nn.Conv2d(3, 3, 3) def forward(self, x): x = self.conv2d(x) x = x.squeeze(0) x = self.conv1d(x) return x def example_inputs(self): return (torch.randn(1, 3, 5, 5),) class Conv2dWithCat(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(3, 3, 3) self.conv2 = torch.nn.Conv2d(3, 3, 3) def forward(self, x, y): x = self.conv1(x) y = self.conv2(y) z = torch.cat([x, y], dim=1) return z class Conv2dWithTwoCat(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(3, 3, 3) self.conv2 = torch.nn.Conv2d(3, 3, 3) def forward(self, x1, x2, x3, x4): x1 = self.conv1(x1) x2 = self.conv2(x2) y = torch.cat([x1, x2], dim=1) z = x3 + x4 w = torch.cat([z, y]) return w class Conv2dWithSplit(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv1 = torch.nn.Conv2d(3, 3, 3) self.conv2 = torch.nn.Conv2d(3, 3, 3) def forward(self, x): x = self.conv1(x) # use split so we get a list of Tensors x1, x2 = torch.split(x, 2, dim=1) y = torch.cat([x1, x2], dim=1) return y def example_inputs(self): return (torch.randn(1, 3, 16, 16),) class ThreeAdd(torch.nn.Module): def forward(self, x1, x2, x3, x4): y = x1 + x2 z = x3 + x4 w = y + z return w class EmbeddingModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12) def forward(self, indices): return self.emb(indices) class EmbeddingConvLinearModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=8) self.conv = torch.nn.Conv2d(8, 16, (1, 3)) self.linear = torch.nn.Linear(16, 8) def forward(self, indices): embeddings = self.emb(indices) embeddings = torch.unsqueeze(embeddings, dim=0) embeddings = torch.permute(embeddings, (0, 3, 1, 2)) conv_out = self.conv(embeddings) conv_out = torch.permute(conv_out, (0, 2, 3, 1)) conv_out = torch.squeeze(conv_out, dim=0) return self.linear(conv_out) class AddInplaceAdd(torch.nn.Module): def forward(self, x, y): x = x + y x += y return x class MulInplaceMul(torch.nn.Module): def forward(self, x, y): x = x * y x *= y return x class AddMulScalar(torch.nn.Module): def forward(self, x): x = x + 3 x = x * 3 x += 3 x *= 3 return x class ConvBnReLU2dAndLinearReLU(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv_bn_relu = TestHelperModules.ConvWithBNRelu(relu=True) self.linear = torch.nn.Linear(3, 8, bias=False) self.relu = torch.nn.ReLU() def forward(self, x): x = self.conv_bn_relu(x) permute_out = torch.permute(x, (0, 2, 3, 1)) linear_out = self.linear(permute_out) return linear_out class GroupwiseConv2d(torch.nn.Module): def __init__(self) -> None: super().__init__() self.conv = torch.nn.Conv2d(4, 4, 3, groups=2) def forward(self, x): return self.conv(x) def example_inputs(self): return (torch.randn(2, 4, 10, 10),) class LinearReluModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float) self.relu = torch.nn.ReLU() def forward(self, x): x = self.relu(self.fc(x)) return x def _generate_qdq_quantized_model( mod, inputs, is_qat=False, is_dynamic=False, quantizer=None ): def get_default_quantizer(is_qat, is_dynamic, inputs): has_xpu = any(isinstance(input, torch.Tensor) and input.device.type == "xpu" for input in inputs) if has_xpu: quantizer = XPUInductorQuantizer() assert (not is_qat) and (not is_dynamic), "QAT and dynamic quantization is not supported at XPU backend currently" quantizer.set_global(xpuiq.get_default_xpu_inductor_quantization_config()) else: quantizer = X86InductorQuantizer() quantizer.set_global( xiq.get_default_x86_inductor_quantization_config( is_qat=is_qat, is_dynamic=is_dynamic ) ) return quantizer maybe_no_grad = contextlib.nullcontext() if is_qat else torch.no_grad() with maybe_no_grad: export_model = export_for_training( mod, inputs, ).module() quantizer = ( quantizer if quantizer else get_default_quantizer(is_qat, is_dynamic, inputs) ) prepare_model = ( prepare_qat_pt2e(export_model, quantizer) if is_qat else prepare_pt2e(export_model, quantizer) ) prepare_model(*inputs) torch.ao.quantization.move_exported_model_to_eval(prepare_model) convert_model = convert_pt2e(prepare_model) return convert_model ```
=================================================================================================================================== SOURCE CODE FILE: common_quantized.py LINES: 1 SIZE: 8.57 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\common_quantized.py ENCODING: utf-8 ```py # mypy: ignore-errors r"""Importing this file includes common utility methods for checking quantized tensors and modules. """ import numpy as np import torch from contextlib import contextmanager from torch.testing._internal.common_utils import TEST_WITH_TSAN, IS_PPC, IS_MACOS, IS_WINDOWS supported_qengines = torch.backends.quantized.supported_engines supported_qengines.remove('none') # Note: We currently do not run QNNPACK tests on WINDOWS and MACOS as it is flaky. Issue #29326 # QNNPACK is not supported on PPC if 'qnnpack' in supported_qengines and any([IS_PPC, TEST_WITH_TSAN, IS_MACOS, IS_WINDOWS]): supported_qengines.remove('qnnpack') def _conv_output_shape(input_size, kernel_size, padding, stride, dilation, output_padding=0): """Computes the output shape given convolution parameters.""" return np.floor((input_size + 2 * padding - kernel_size - (kernel_size - 1) * (dilation - 1)) / stride) + 2 * output_padding + 1 # Quantization references def _quantize(x, scale, zero_point, qmin=None, qmax=None, dtype=np.uint8): """Quantizes a numpy array.""" if qmin is None: qmin = np.iinfo(dtype).min if qmax is None: qmax = np.iinfo(dtype).max qx = np.round(x / scale + zero_point).astype(np.int64) qx = np.clip(qx, qmin, qmax) qx = qx.astype(dtype) return qx def _dequantize(qx, scale, zero_point): """Dequantizes a numpy array.""" x = (qx.astype(float) - zero_point) * scale return x def _requantize(x, multiplier, zero_point, qmin=0, qmax=255, qtype=np.uint8): """Requantizes a numpy array, i.e., intermediate int32 or int16 values are converted back to given type""" qx = (x * multiplier).round() + zero_point qx = np.clip(qx, qmin, qmax).astype(qtype) return qx def _calculate_dynamic_qparams(X, dtype, reduce_range=False, qscheme=torch.per_tensor_affine): """Calculate the dynamic quantization parameters (scale, zero_point) according to the min and max element of the tensor""" assert qscheme in (torch.per_tensor_affine, torch.per_tensor_symmetric) if qscheme == torch.per_tensor_symmetric: assert dtype == torch.qint8 if isinstance(X, torch.Tensor): X = X.numpy() if dtype == torch.qint8: if reduce_range: qmin, qmax = -64, 63 else: qmin, qmax = -128, 127 else: # dtype == torch.quint8 if reduce_range: qmin, qmax = 0, 127 else: qmin, qmax = 0, 255 min_val = X.min() max_val = X.max() is_symmetric = (qscheme == torch.per_tensor_symmetric) if min_val == max_val: scale = 1.0 zero_point = 0 else: if is_symmetric: max_val = max(max_val, -min_val) min_val = -max_val scale = (max_val - min_val) / (qmax - qmin) scale = max(scale, np.finfo(np.float32).eps) zero_point = 0 else: max_val = max(max_val, 0.0) min_val = min(min_val, 0.0) scale = (max_val - min_val) / (qmax - qmin) scale = max(scale, np.finfo(np.float32).eps) zero_point = qmin - round(min_val / scale) zero_point = max(qmin, zero_point) zero_point = min(qmax, zero_point) return [float(scale), int(zero_point)] def _calculate_dynamic_per_channel_qparams(X, dtype): """Calculate the dynamic quantization parameters (scale, zero_point) according to the min and max element of the tensor""" if isinstance(X, torch.Tensor): X = X.numpy() qmin, qmax = torch.iinfo(dtype).min, torch.iinfo(dtype).max n_levels = qmax - qmin scale = np.zeros(X.shape[0], dtype=np.float64) zero_point = np.zeros(X.shape[0], dtype=np.int64) for i in range(zero_point.shape[0]): min_val = X.min() max_val = X.max() if min_val == max_val: scale[i] = 1.0 zero_point[i] = 0 else: max_val = max(max_val, 0.0) min_val = min(min_val, 0.0) scale[i] = (max_val - min_val) / n_levels scale[i] = max(scale[i], np.finfo(np.float32).eps) zero_point[i] = qmin - round(min_val / scale[i]) zero_point[i] = max(qmin, zero_point[i]) zero_point[i] = min(qmax, zero_point[i]) return scale, zero_point def _snr(x, x_hat): """Calculates the signal to noise ratio and returns the signal and noise power, as well as the SNR in dB. If the input is a list/tuple this function is called recursively on each element. The result will have the same nested structure as the inputs. Args: x, x_hat: Either a tensor or a nested list/tuple of tensors. Returns: signal, noise, SNR(in dB): Either floats or a nested list of floats """ if isinstance(x, (list, tuple)): assert len(x) == len(x_hat) res = [_snr(x[idx], x_hat[idx]) for idx in range(len(x))] return res if x_hat.is_quantized: x_hat = x_hat.dequantize() if x.is_quantized: x = x.dequantize() noise = (x - x_hat).norm() if noise == 0: return 0.0, float('inf'), float('inf') signal = x.norm() snr = signal / noise snr_db = 20 * snr.log10() return signal, noise, snr_db @contextmanager def override_quantized_engine(qengine): previous = torch.backends.quantized.engine torch.backends.quantized.engine = qengine try: yield finally: torch.backends.quantized.engine = previous @contextmanager def override_cpu_allocator_for_qnnpack(qengine_is_qnnpack): try: if qengine_is_qnnpack: torch._C._set_default_mobile_cpu_allocator() yield finally: if qengine_is_qnnpack: torch._C._unset_default_mobile_cpu_allocator() # TODO: Update all quantization tests to use this decorator. # Currently for some of the tests it seems to have inconsistent params # for fbgemm vs qnnpack. def override_qengines(qfunction): def test_fn(*args, **kwargs): for qengine in supported_qengines: with override_quantized_engine(qengine): # qfunction should not return anything. qfunction(*args, **kwargs) return test_fn def qengine_is_fbgemm(): return torch.backends.quantized.engine == 'fbgemm' def qengine_is_qnnpack(): return torch.backends.quantized.engine == 'qnnpack' def qengine_is_onednn(): return torch.backends.quantized.engine == 'onednn' def qengine_is_x86(): return torch.backends.quantized.engine == 'x86' # Helper function used to simulate per-channel fake-quant against any axis def _permute_to_axis_zero(X, axis): new_axis_list = list(range(X.dim())) new_axis_list[axis] = 0 new_axis_list[0] = axis y = X.permute(tuple(new_axis_list)) return y, new_axis_list # Reference method for fake quantize # Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64 def _fake_quantize_per_channel_affine_reference(X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max): dtype = X.dtype X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis) res = torch.zeros_like(X) for i in range(X.size()[0]): res[i] = (torch.clamp(torch.round(X[i] * (1.0 / per_channel_scale[i]) + per_channel_zero_point[i]), quant_min, quant_max) - per_channel_zero_point[i]) * per_channel_scale[i] out = res.permute(tuple(permute_axis_list)) return out.to(dtype) # Reference method for the gradient of the fake quantize operator # Note: because scale/zero_point are left as float in the actual kernel, this mimics how fake_quant works for float16/64 def _fake_quantize_per_channel_affine_grad_reference(dY, X, per_channel_scale, per_channel_zero_point, axis, quant_min, quant_max): dtype = X.dtype X, permute_axis_list = _permute_to_axis_zero(X.to(torch.float32), axis) Xq = torch.zeros_like(X) for i in range(X.size()[0]): Xq[i] = torch.round(X[i] * (1.0 / per_channel_scale[i]) + per_channel_zero_point[i]) Xq = Xq.permute(tuple(permute_axis_list)) mask = (Xq >= quant_min) * (Xq <= quant_max) res = torch.zeros_like(dY) res[mask] = dY[mask] return res.to(dtype) def to_tensor(X, device): if not isinstance(X, torch.Tensor): X = torch.tensor(X) else: X = X.detach().clone() return X.to(device=torch.device(device), dtype=torch.float32) ```
================================================================================================================================== SOURCE CODE FILE: common_subclass.py LINES: 1 SIZE: 12.25 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\common_subclass.py ENCODING: utf-8 ```py # mypy: ignore-errors import torch from copy import deepcopy from torch.utils._pytree import tree_map import torch.utils._pytree as pytree # TODO: Move LoggingTensor here. from torch.testing._internal.logging_tensor import LoggingTensor # Base class for wrapper-style tensors. class WrapperTensor(torch.Tensor): @staticmethod def __new__(cls, *args, **kwargs): t, kwargs = cls.get_wrapper_properties(*args, **kwargs) if "size" not in kwargs: size = t.size() else: size = kwargs["size"] del kwargs["size"] if "dtype" not in kwargs: kwargs["dtype"] = t.dtype if "layout" not in kwargs: kwargs["layout"] = t.layout if "device" not in kwargs: kwargs["device"] = t.device if "requires_grad" not in kwargs: kwargs["requires_grad"] = False # Ignore memory_format and pin memory for now as I don't know how to # safely access them on a Tensor (if possible??) wrapper = torch.Tensor._make_wrapper_subclass(cls, size, **kwargs) wrapper._validate_methods() return wrapper @classmethod def get_wrapper_properties(cls, *args, **kwargs): # Should return both an example Tensor and a dictionary of kwargs # to override any of that example Tensor's properly. # This is very similar to the `t.new_*(args)` API raise NotImplementedError("You need to implement get_wrapper_properties") def _validate_methods(self): # Skip this if not in debug mode? # Changing these on the python side is wrong as it would not be properly reflected # on the c++ side # This doesn't catch attributes set in the __init__ forbidden_overrides = ["size", "stride", "dtype", "layout", "device", "requires_grad"] for el in forbidden_overrides: if getattr(self.__class__, el) is not getattr(torch.Tensor, el): raise RuntimeError(f"Subclass {self.__class__.__name__} is overwriting the " f"property {el} but this is not allowed as such change would " "not be reflected to c++ callers.") class WrapperTensorWithCustomSizes(WrapperTensor): @classmethod def get_wrapper_properties(cls, t, requires_grad=False): return t, {"requires_grad": requires_grad, "dispatch_sizes_strides_policy": "sizes"} def __init__(self, t, requires_grad=False): self.t = t @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): if not all(issubclass(cls, t) for t in types): return NotImplemented if kwargs is None: kwargs = {} def unwrap(e): return e.t if isinstance(e, WrapperTensorWithCustomSizes) else e def wrap(e): return WrapperTensorWithCustomSizes(e) if isinstance(e, torch.Tensor) else e rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs or {}))) return rs def __repr__(self): return super().__repr__(tensor_contents=f"t={self.t}") class WrapperTensorWithCustomStrides(WrapperTensor): @classmethod def get_wrapper_properties(cls, t, requires_grad=False): return t, {"requires_grad": requires_grad, "dispatch_sizes_strides_policy": "strides"} def __init__(self, t, requires_grad=False): self.t = t @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): if not all(issubclass(cls, t) for t in types): return NotImplemented if kwargs is None: kwargs = {} def unwrap(e): return e.t if isinstance(e, WrapperTensorWithCustomStrides) else e def wrap(e): return WrapperTensorWithCustomStrides(e) if isinstance(e, torch.Tensor) else e rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs or {}))) return rs def __repr__(self): return super().__repr__(tensor_contents=f"t={self.t}") class DiagTensorBelow(WrapperTensor): @classmethod def get_wrapper_properties(cls, diag, requires_grad=False): assert diag.ndim == 1 return diag, {"size": diag.size() + diag.size(), "requires_grad": requires_grad} def __init__(self, diag, requires_grad=False): self.diag = diag handled_ops = {} @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): if not all(issubclass(cls, t) for t in types): return NotImplemented # For everything else, call the handler: fn = cls.handled_ops.get(func.__name__, None) if fn: return fn(*args, **(kwargs or {})) else: # Note that here, because we don't need to provide the autograd formulas # we can have a default "fallback" that creates a plain Tensor based # on the diag elements and calls the func again. def unwrap(e): return e.diag.diag() if isinstance(e, DiagTensorBelow) else e def wrap(e): if isinstance(e, torch.Tensor) and e.ndim == 1: return DiagTensorBelow(e) if isinstance(e, torch.Tensor) and e.ndim == 2 and e.count_nonzero() == e.diag().count_nonzero(): return DiagTensorBelow(e.diag()) return e rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs or {}))) return rs def __repr__(self): return super().__repr__(tensor_contents=f"diag={self.diag}") class SparseTensor(WrapperTensor): @classmethod def get_wrapper_properties(cls, size, values, indices, requires_grad=False): assert values.device == indices.device return values, {"size": size, "requires_grad": requires_grad} def __init__(self, size, values, indices, requires_grad=False): self.values = values self.indices = indices def __repr__(self): return super().__repr__(tensor_contents=f"values={self.values}, indices={self.indices}") def sparse_to_dense(self): res = torch.zeros(self.size(), dtype=self.values.dtype) res[self.indices.unbind(1)] = self.values return res @staticmethod def from_dense(t): indices = t.nonzero() values = t[indices.unbind(1)] return SparseTensor(t.size(), values, indices) @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): func_name = f"{func.__module__}.{func.__name__}" res = cls._try_call_special_impl(func_name, args, kwargs) if res is not NotImplemented: return res # Otherwise, use a default implementation that construct dense # tensors and use that to compute values def unwrap(e): return e.sparse_to_dense() if isinstance(e, SparseTensor) else e # Wrap back all Tensors into our custom class def wrap(e): # Check for zeros and use that to get indices return SparseTensor.from_dense(e) if isinstance(e, torch.Tensor) else e rs = tree_map(wrap, func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs or {}))) return rs # To show how things happen later def __rmul__(self, other): return super().__rmul__(other) _SPECIAL_IMPLS = {} @classmethod def _try_call_special_impl(cls, func, args, kwargs): if func not in cls._SPECIAL_IMPLS: return NotImplemented return cls._SPECIAL_IMPLS[func](args, kwargs) # Example non-wrapper subclass that stores extra state. class NonWrapperTensor(torch.Tensor): def __new__(cls, data): t = torch.Tensor._make_subclass(cls, data) t.extra_state = { 'last_func_called': None } return t @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): result = super().__torch_function__(func, types, args, kwargs) if isinstance(result, cls): # Do something with the extra state. For the example here, just store the name of the # last function called (skip for deepcopy so the copy has the same extra state). if func is torch.Tensor.__deepcopy__: result.extra_state = deepcopy(args[0].extra_state) else: result.extra_state = { 'last_func_called': func.__name__, } return result # new_empty() must be defined for deepcopy to work def new_empty(self, shape): return type(self)(torch.empty(shape)) # Class used to store info about subclass tensors used in testing. class SubclassInfo: __slots__ = ['name', 'create_fn', 'closed_under_ops'] def __init__(self, name, create_fn, closed_under_ops=True): self.name = name self.create_fn = create_fn # create_fn(shape) -> tensor instance self.closed_under_ops = closed_under_ops # Helper function to create a subclass of the given class and possibly cache sizes / strides. def _create_and_access_shape(cls, shape): sub = cls(torch.randn(shape)) # NB: Wrapper subclasses with custom dispatched sizes / strides cache this info # on the first call via non-serializable PyCapsules. We purposefully trigger cache # population here for serialization / deepcopy tests to verify that the presence of this # cache info doesn't cause problems. sub.size() sub.stride() return sub subclass_db = { torch.Tensor: SubclassInfo( 'base_tensor', create_fn=torch.randn ), NonWrapperTensor: SubclassInfo( 'non_wrapper_tensor', create_fn=lambda shape: NonWrapperTensor(torch.randn(shape)) ), LoggingTensor: SubclassInfo( 'logging_tensor', create_fn=lambda shape: LoggingTensor(torch.randn(shape)) ), SparseTensor: SubclassInfo( 'sparse_tensor', create_fn=lambda shape: SparseTensor.from_dense(torch.randn(shape).relu()) ), DiagTensorBelow: SubclassInfo( 'diag_tensor_below', create_fn=lambda shape: DiagTensorBelow(torch.randn(shape)), closed_under_ops=False # sparse semantics ), WrapperTensorWithCustomSizes: SubclassInfo( 'wrapper_with_custom_sizes', create_fn=lambda shape: _create_and_access_shape(WrapperTensorWithCustomSizes, shape), closed_under_ops=False, ), WrapperTensorWithCustomStrides: SubclassInfo( 'wrapper_with_custom_strides', create_fn=lambda shape: _create_and_access_shape(WrapperTensorWithCustomStrides, shape), closed_under_ops=False, ), } class SubclassWithTensorFactory(torch.Tensor): @staticmethod def __new__(cls, src): shape = src.shape kwargs = {} kwargs["strides"] = src.stride() kwargs["storage_offset"] = src.storage_offset() kwargs["device"] = src.device kwargs["layout"] = src.layout kwargs["requires_grad"] = src.requires_grad kwargs["dtype"] = src.dtype out = torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs) return out def __init__(self, src): self.src = src def __repr__(self): return f"{self.__class__.__name__}" def __tensor_flatten__(self): return ["src"], None @classmethod def __tensor_unflatten__(cls, inner_tensors, meta, outer_size, outer_stride): src = inner_tensors["src"] return cls(src) @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if kwargs is None: kwargs = {} def _fn(x): return x.src * torch.ones(x.src.shape) if x.src.dtype == torch.float32 else x.src _args = pytree.tree_map_only(cls, _fn, args) _kwargs = pytree.tree_map_only(cls, _fn, kwargs) _out = func(*_args, **_kwargs) _out_flat, _out_spec = pytree.tree_flatten(_out) out_flat = [cls(o) if isinstance(o, torch.Tensor) else o for o in _out_flat] return pytree.tree_unflatten(out_flat, _out_spec) ```
=============================================================================================================================== SOURCE CODE FILE: common_utils.py LINES: 24 SIZE: 234.33 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\common_utils.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs r"""Importing this file must **not** initialize CUDA context. test_distributed relies on this assumption to properly run. This means that when this is imported no CUDA calls shall be made, including torch.cuda.device_count(), etc. torch.testing._internal.common_cuda.py can freely initialize CUDA context when imported. """ import argparse import contextlib import copy import ctypes import errno import functools import gc import hashlib import inspect import io import json import logging import math import operator import os import pathlib import platform import random import re import shutil import signal import socket import subprocess import sys import tempfile import threading import time import types import unittest import warnings from collections.abc import Mapping, Sequence from contextlib import closing, contextmanager from copy import deepcopy from dataclasses import dataclass from enum import Enum from functools import partial, wraps from itertools import product, chain from pathlib import Path from statistics import mean from typing import ( Any, Callable, Optional, TypeVar, Union, ) from collections.abc import Iterable, Iterator from unittest.mock import MagicMock import expecttest import numpy as np import __main__ # type: ignore[import] import torch import torch.backends.cudnn import torch.backends.mkl import torch.backends.mps import torch.backends.xnnpack import torch.cuda from torch import Tensor from torch._C import ScriptDict, ScriptList # type: ignore[attr-defined] from torch._dynamo.trace_rules import _as_posix_path from torch._utils_internal import get_writable_path from torch._logging.scribe import open_source_signpost from torch.nn import ( ModuleDict, ModuleList, ParameterDict, ParameterList, Sequential, ) from torch.onnx import ( register_custom_op_symbolic, unregister_custom_op_symbolic, ) from torch.testing import make_tensor from torch.testing._comparison import ( BooleanPair, NonePair, NumberPair, Pair, TensorLikePair, ) from torch.testing._comparison import not_close_error_metas from torch.testing._internal.common_dtype import get_all_dtypes from torch.utils._import_utils import _check_module_exists import torch.utils._pytree as pytree from torch.utils import cpp_extension try: import pytest has_pytest = True except ImportError: has_pytest = False MI300_ARCH = ("gfx942",) def freeze_rng_state(*args, **kwargs): return torch.testing._utils.freeze_rng_state(*args, **kwargs) # Class to keep track of test flags configurable by environment variables. # Flags set here are intended to be read-only and should not be modified after # definition. # TODO: Expand this class to handle abritrary settings in addition to boolean flags? class TestEnvironment: # Set of env vars to set for the repro command that is output on test failure. # Specifically, this includes env vars that are set to non-default values and # are not implied. Maps from env var name -> value (int) repro_env_vars: dict = {} # Defines a flag usable throughout the test suite, determining its value by querying # the specified environment variable. # # Args: # name (str): The name of the flag. A global variable with this name will be set # for convenient access throughout the test suite. # env_var (str): The name of the primary environment variable from which to # determine the value of this flag. If this is None or the environment variable # is unset, the default value will be used unless otherwise implied (see # implied_by_fn). Default: None # default (bool): The default value to use for the flag if unset by the environment # variable and unimplied. Default: False # include_in_repro (bool): Indicates whether this flag should be included in the # repro command that is output on test failure (i.e. whether it is possibly # relevant to reproducing the test failure). Default: True # enabled_fn (Callable): Callable returning whether the flag should be enabled # given the environment variable value and the default value. Default: Lambda # requiring "0" to disable if on by default OR "1" to enable if off by default. # implied_by_fn (Callable): Thunk returning a bool to imply this flag as enabled # by something outside of its primary environment variable setting. For example, # this can be useful if the value of another environment variable implies the flag # as enabled. Default: Lambda returning False to indicate no implications. @staticmethod def def_flag( name, env_var=None, default=False, include_in_repro=True, enabled_fn=lambda env_var_val, default: ( (env_var_val != "0") if default else (env_var_val == "1")), implied_by_fn=lambda: False, ): enabled = default env_var_val = None if env_var is not None: env_var_val = os.getenv(env_var) enabled = enabled_fn(env_var_val, default) implied = implied_by_fn() enabled = enabled or implied if include_in_repro and (env_var is not None) and (enabled != default) and not implied: TestEnvironment.repro_env_vars[env_var] = env_var_val # export flag globally for convenience assert name not in globals(), f"duplicate definition of flag '{name}'" globals()[name] = enabled return enabled # Defines a setting usable throughout the test suite, determining its value by querying # the specified environment variable. This differs from a flag in that it's not restricted # to a boolean value. # # Args: # name (str): The name of the setting. A global variable with this name will be set # for convenient access throughout the test suite. # env_var (str): The name of the primary environment variable from which to # determine the value of this setting. If this is None or the environment variable # is unset, the default value will be used. Default: None # default (Any): The default value to use for the setting if unset by the environment # variable. Default: None # include_in_repro (bool): Indicates whether this setting should be included in the # repro command that is output on test failure (i.e. whether it is possibly # relevant to reproducing the test failure). Default: True # parse_fn (Callable): Callable parsing the env var string. Default value just uses # the string itself. @staticmethod def def_setting( name, env_var=None, default=None, include_in_repro=True, parse_fn=lambda maybe_val_str: maybe_val_str, ): value = default if env_var is None else os.getenv(env_var) value = parse_fn(value) if include_in_repro and (value != default): TestEnvironment.repro_env_vars[env_var] = value # export setting globally for convenience assert name not in globals(), f"duplicate definition of setting '{name}'" globals()[name] = value return value # Returns a string prefix usable to set environment variables for any test # settings that should be explicitly set to match this instantiation of the # test suite. # Example: "PYTORCH_TEST_WITH_ASAN=1 PYTORCH_TEST_WITH_ROCM=1" @staticmethod def repro_env_var_prefix() -> str: return " ".join([f"{env_var}={value}" for env_var, value in TestEnvironment.repro_env_vars.items()]) log = logging.getLogger(__name__) torch.backends.disable_global_flags() FILE_SCHEMA = "file://" if sys.platform == 'win32': FILE_SCHEMA = "file:///" # NB: This flag differs semantically from others in that setting the env var to any # non-empty value will cause it to be true: # CI=1, CI="true", CI=0, etc. all set the flag to be true. # CI= and an unset CI set the flag to be false. # GitHub sets the value to CI="true" to enable it. IS_CI: bool = TestEnvironment.def_flag( "IS_CI", env_var="CI", include_in_repro=False, enabled_fn=lambda env_var_value, _: bool(env_var_value), ) IS_SANDCASTLE: bool = TestEnvironment.def_flag( "IS_SANDCASTLE", env_var="SANDCASTLE", implied_by_fn=lambda: os.getenv("TW_JOB_USER") == "sandcastle", include_in_repro=False, ) IN_RE_WORKER: bool = os.environ.get("INSIDE_RE_WORKER") is not None _is_fbcode_default = ( hasattr(torch._utils_internal, "IS_FBSOURCE") and torch._utils_internal.IS_FBSOURCE ) IS_FBCODE: bool = TestEnvironment.def_flag( "IS_FBCODE", env_var="PYTORCH_TEST_FBCODE", default=_is_fbcode_default, include_in_repro=False, ) IS_REMOTE_GPU: bool = TestEnvironment.def_flag( "IS_REMOTE_GPU", env_var="PYTORCH_TEST_REMOTE_GPU", include_in_repro=False, ) DISABLE_RUNNING_SCRIPT_CHK: bool = TestEnvironment.def_flag( "DISABLE_RUNNING_SCRIPT_CHK", env_var="PYTORCH_DISABLE_RUNNING_SCRIPT_CHK", include_in_repro=False, ) # NB: enabled by default unless in an fbcode context. PRINT_REPRO_ON_FAILURE: bool = TestEnvironment.def_flag( "PRINT_REPRO_ON_FAILURE", env_var="PYTORCH_PRINT_REPRO_ON_FAILURE", default=(not IS_FBCODE), include_in_repro=False, ) # possibly restrict OpInfo tests to a single sample input OPINFO_SAMPLE_INPUT_INDEX: Optional[int] = TestEnvironment.def_setting( "OPINFO_SAMPLE_INPUT_INDEX", env_var="PYTORCH_OPINFO_SAMPLE_INPUT_INDEX", default=None, # Don't include the env var value in the repro command because the info will # be queried from the tracked sample input instead include_in_repro=False, parse_fn=lambda val: None if val is None else int(val), ) DEFAULT_DISABLED_TESTS_FILE = '.pytorch-disabled-tests.json' DEFAULT_SLOW_TESTS_FILE = 'slow_tests.json' disabled_tests_dict = {} slow_tests_dict = {} def maybe_load_json(filename): if os.path.isfile(filename): with open(filename) as fp: return json.load(fp) log.warning("Attempted to load json file '%s' but it does not exist.", filename) return {} # set them here in case the tests are running in a subprocess that doesn't call run_tests if os.getenv("SLOW_TESTS_FILE", ""): slow_tests_dict = maybe_load_json(os.getenv("SLOW_TESTS_FILE", "")) if os.getenv("DISABLED_TESTS_FILE", ""): disabled_tests_dict = maybe_load_json(os.getenv("DISABLED_TESTS_FILE", "")) NATIVE_DEVICES = ('cpu', 'cuda', 'xpu', 'meta', torch._C._get_privateuse1_backend_name()) # used for managing devices testing for torch profiler UTs # for now cpu, cuda and xpu are added for testing torch profiler UTs DEVICE_LIST_SUPPORT_PROFILING_TEST = ('cpu', 'cuda', 'xpu') ALLOW_XPU_PROFILING_TEST = True check_names = ['orin', 'concord', 'galen', 'xavier', 'nano', 'jetson', 'tegra', 'thor'] IS_JETSON = any(name in platform.platform() for name in check_names) def gcIfJetson(fn): # Irregular Jetson host/device memory setup requires cleanup to avoid tests being killed @functools.wraps(fn) def wrapper(*args, **kwargs): if IS_JETSON: gc.collect() torch.cuda.empty_cache() fn(*args, **kwargs) return wrapper # Tries to extract the current test function by crawling the stack. # If unsuccessful, return None. def extract_test_fn() -> Optional[Callable]: try: stack = inspect.stack() for frame_info in stack: frame = frame_info.frame if "self" not in frame.f_locals: continue self_val = frame.f_locals["self"] if isinstance(self_val, unittest.TestCase): test_id = self_val.id() test_name = test_id.split('.')[2] test_fn = getattr(self_val, test_name).__func__ return test_fn except Exception: pass return None # Contains tracked input data useful for debugging purposes @dataclass class TrackedInput: index: int val: Any type_desc: str # Attempt to pull out tracked input information from the test function. # A TrackedInputIter is used to insert this information. def get_tracked_input() -> Optional[TrackedInput]: test_fn = extract_test_fn() if test_fn is None: return None return getattr(test_fn, "tracked_input", None) def clear_tracked_input() -> None: test_fn = extract_test_fn() if test_fn is None: return if not hasattr(test_fn, "tracked_input"): return test_fn.tracked_input = None # type: ignore[attr-defined] # Wraps an iterator and tracks the most recent value the iterator produces # for debugging purposes. Tracked values are stored on the test function. class TrackedInputIter: def __init__( self, child_iter, input_type_desc, item_callback=None, track_callback=None, set_seed=True, restrict_to_index=None ): self.child_iter = enumerate(child_iter) # Input type describes the things we're tracking (e.g. "sample input", "error input"). self.input_type_desc = input_type_desc # NB: The two types of callbacks below exist because the thing we want to track isn't # always the same as the thing we want returned from the iterator. An example of this # is ErrorInput, which we want returned from the iterator, but which contains a # SampleInput that we want to track. # Item callback is run on each (iterated thing, index) to get the thing to return. self.item_callback = item_callback if self.item_callback is None: self.item_callback = lambda x, i: x # Track callback is run on each iterated thing to get the thing to track. self.track_callback = track_callback if self.track_callback is None: self.track_callback = lambda x: x self.test_fn = extract_test_fn() # Indicates whether the random seed should be set before each call to the iterator self.set_seed = set_seed # Indicates that iteration should be restricted to only the provided index. # If None, no restriction is done self.restrict_to_index = restrict_to_index def __iter__(self): return self def __next__(self): while True: if self.set_seed: # use a test-name-specific hash for the seed if possible seed = ( int.from_bytes(hashlib.sha256( self.test_fn.__qualname__.encode("utf-8")).digest()[:4], 'little') if self.test_fn is not None else SEED ) set_rng_seed(seed) # allow StopIteration to bubble up input_idx, input_val = next(self.child_iter) if (self.restrict_to_index is None) or (input_idx == self.restrict_to_index): break self._set_tracked_input( TrackedInput( index=input_idx, val=self.track_callback(input_val), type_desc=self.input_type_desc ) ) return self.item_callback(input_val, input_idx) def _set_tracked_input(self, tracked_input: TrackedInput): if self.test_fn is None: return if not hasattr(self.test_fn, "tracked_input"): return self.test_fn.tracked_input = tracked_input # type: ignore[attr-defined] class _TestParametrizer: """ Decorator class for parametrizing a test function, yielding a set of new tests spawned from the original generic test, each specialized for a specific set of test inputs. For example, parametrizing a test across the set of ops will result in a test function per op. The decision of how to parametrize / what to parametrize over is intended to be implemented by each derived class. In the details, the decorator adds a 'parametrize_fn' property to the test function. This function is intended to be called later by one of: * Device-specific test instantiation via instantiate_device_type_tests(). Note that for this case there is no need to explicitly parametrize over device type, as that is handled separately. * Device-agnostic parametrized test instantiation via instantiate_parametrized_tests(). If the decorator is applied to a test function that already has a 'parametrize_fn' property, a new composite 'parametrize_fn' will be created that generates tests with the product of the parameters generated by the old and new parametrize_fns. This allows for convenient composability of decorators. """ def _parametrize_test(self, test, generic_cls, device_cls): """ Parametrizes the given test function across whatever dimension is specified by the derived class. Tests can be parametrized over any arbitrary dimension or combination of dimensions, such as all ops, all modules, or all ops + their associated dtypes. Args: test (fn): Test function to parametrize over generic_cls (class): Generic test class object containing tests (e.g. TestFoo) device_cls (class): Device-specialized test class object (e.g. TestFooCPU); set to None if the tests are not part of a device-specific set Returns: Generator object returning 4-tuples of: test (fn): Parametrized test function; must support a device arg and args for any params test_name (str): Parametrized suffix for the test (e.g. opname_int64); will be appended to the base name of the test param_kwargs (dict): Param kwargs to pass to the test (e.g. {'op': 'add', 'dtype': torch.int64}) decorator_fn (callable): Callable[[Dict], List] for list of decorators to apply given param_kwargs """ raise NotImplementedError def __call__(self, fn): if hasattr(fn, 'parametrize_fn'): # Do composition with the product of args. old_parametrize_fn = fn.parametrize_fn new_parametrize_fn = self._parametrize_test fn.parametrize_fn = compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn) else: fn.parametrize_fn = self._parametrize_test return fn def compose_parametrize_fns(old_parametrize_fn, new_parametrize_fn): """ Returns a parametrize_fn that parametrizes over the product of the parameters handled by the given parametrize_fns. Each given parametrize_fn should each have the signature f(test, generic_cls, device_cls). The test names will be a combination of the names produced by the parametrize_fns in "<new_name>_<old_name>" order. This order is done to match intuition for constructed names when composing multiple decorators; the names will be built in top to bottom order when stacking parametrization decorators. Args: old_parametrize_fn (callable) - First parametrize_fn to compose. new_parametrize_fn (callable) - Second parametrize_fn to compose. """ def composite_fn(test, generic_cls, device_cls, old_parametrize_fn=old_parametrize_fn, new_parametrize_fn=new_parametrize_fn): old_tests = list(old_parametrize_fn(test, generic_cls, device_cls)) for (old_test, old_test_name, old_param_kwargs, old_dec_fn) in old_tests: for (new_test, new_test_name, new_param_kwargs, new_dec_fn) in \ new_parametrize_fn(old_test, generic_cls, device_cls): redundant_params = set(old_param_kwargs.keys()).intersection(new_param_kwargs.keys()) if redundant_params: raise RuntimeError('Parametrization over the same parameter by multiple parametrization ' f'decorators is not supported. For test "{test.__name__}", the following parameters ' f'are handled multiple times: {redundant_params}') full_param_kwargs = {**old_param_kwargs, **new_param_kwargs} merged_test_name = '{}{}{}'.format(new_test_name, '_' if old_test_name != '' and new_test_name != '' else '', old_test_name) def merged_decorator_fn(param_kwargs, old_dec_fn=old_dec_fn, new_dec_fn=new_dec_fn): return list(old_dec_fn(param_kwargs)) + list(new_dec_fn(param_kwargs)) yield (new_test, merged_test_name, full_param_kwargs, merged_decorator_fn) return composite_fn def instantiate_parametrized_tests(generic_cls): """ Instantiates tests that have been decorated with a parametrize_fn. This is generally performed by a decorator subclass of _TestParametrizer. The generic test will be replaced on the test class by parametrized tests with specialized names. This should be used instead of instantiate_device_type_tests() if the test class contains device-agnostic tests. You can also use it as a class decorator. E.g. ``` @instantiate_parametrized_tests class TestFoo(TestCase): ... ``` Args: generic_cls (class): Generic test class object containing tests (e.g. TestFoo) """ for attr_name in tuple(dir(generic_cls)): class_attr = getattr(generic_cls, attr_name) if not hasattr(class_attr, 'parametrize_fn'): continue # Remove the generic test from the test class. delattr(generic_cls, attr_name) # Add parametrized tests to the test class. def instantiate_test_helper(cls, name, test, param_kwargs): @wraps(test) def instantiated_test(self, param_kwargs=param_kwargs): test(self, **param_kwargs) assert not hasattr(generic_cls, name), f"Redefinition of test {name}" setattr(generic_cls, name, instantiated_test) for (test, test_suffix, param_kwargs, decorator_fn) in class_attr.parametrize_fn( class_attr, generic_cls=generic_cls, device_cls=None): full_name = f'{test.__name__}_{test_suffix}' # Apply decorators based on full param kwargs. for decorator in decorator_fn(param_kwargs): test = decorator(test) instantiate_test_helper(cls=generic_cls, name=full_name, test=test, param_kwargs=param_kwargs) return generic_cls class subtest: """ Explicit subtest case for use with test parametrization. Allows for explicit naming of individual subtest cases as well as applying decorators to the parametrized test. Args: arg_values (iterable): Iterable of arg values (e.g. range(10)) or tuples of arg values (e.g. [(1, 2), (3, 4)]). name (str): Optional name to use for the test. decorators (iterable): Iterable of decorators to apply to the generated test. """ __slots__ = ['arg_values', 'name', 'decorators'] def __init__(self, arg_values, name=None, decorators=None): self.arg_values = arg_values self.name = name self.decorators = decorators if decorators else [] class parametrize(_TestParametrizer): """ Decorator for applying generic test parametrizations. The interface for this decorator is modeled after `@pytest.mark.parametrize`. Basic usage between this decorator and pytest's is identical. The first argument should be a string containing comma-separated names of parameters for the test, and the second argument should be an iterable returning values or tuples of values for the case of multiple parameters. Beyond this basic usage, the decorator provides some additional functionality that pytest does not. 1. Parametrized tests end up as generated test functions on unittest test classes. Since this differs from how pytest works, this decorator takes on the additional responsibility of naming these test functions. The default test names consists of the test's base name followed by each parameter name + value (e.g. "test_bar_x_1_y_foo"), but custom names can be defined using `name_fn` or the `subtest` structure (see below). 2. The decorator specially handles parameter values of type `subtest`, which allows for more fine-grained control over both test naming and test execution. In particular, it can be used to tag subtests with explicit test names or apply arbitrary decorators (see examples below). Examples:: @parametrize("x", range(5)) def test_foo(self, x): ... @parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')]) def test_bar(self, x, y): ... @parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')], name_fn=lambda x, y: '{}_{}'.format(x, y)) def test_bar_custom_names(self, x, y): ... @parametrize("x, y", [subtest((1, 2), name='double'), subtest((1, 3), name='triple', decorators=[unittest.expectedFailure]), subtest((1, 4), name='quadruple')]) def test_baz(self, x, y): ... To actually instantiate the parametrized tests, one of instantiate_parametrized_tests() or instantiate_device_type_tests() should be called. The former is intended for test classes that contain device-agnostic tests, while the latter should be used for test classes that contain device-specific tests. Both support arbitrary parametrizations using the decorator. Args: arg_str (str): String of arg names separate by commas (e.g. "x,y"). arg_values (iterable): Iterable of arg values (e.g. range(10)) or tuples of arg values (e.g. [(1, 2), (3, 4)]). name_fn (Callable): Optional function that takes in parameters and returns subtest name. """ def __init__(self, arg_str, arg_values, name_fn=None): self.arg_names: list[str] = [s.strip() for s in arg_str.split(',') if s != ''] self.arg_values = arg_values self.name_fn = name_fn def _formatted_str_repr(self, idx, name, value): """ Returns a string representation for the given arg that is suitable for use in test function names. """ if isinstance(value, torch.dtype): return dtype_name(value) elif isinstance(value, torch.device): return str(value) # Can't use isinstance as it would cause a circular import elif type(value).__name__ in {'OpInfo', 'ModuleInfo'}: return value.formatted_name elif isinstance(value, (int, float, str)): return f"{name}_{str(value).replace('.', '_')}" else: return f"{name}{idx}" def _default_subtest_name(self, idx, values): return '_'.join([self._formatted_str_repr(idx, a, v) for a, v in zip(self.arg_names, values)]) def _get_subtest_name(self, idx, values, explicit_name=None): if explicit_name: subtest_name = explicit_name elif self.name_fn: subtest_name = self.name_fn(*values) else: subtest_name = self._default_subtest_name(idx, values) return subtest_name def _parametrize_test(self, test, generic_cls, device_cls): if len(self.arg_names) == 0: # No additional parameters needed for the test. test_name = '' yield (test, test_name, {}, lambda _: []) else: # Each "values" item is expected to be either: # * A tuple of values with one for each arg. For a single arg, a single item is expected. # * A subtest instance with arg_values matching the previous. values = check_exhausted_iterator = object() for idx, values in enumerate(self.arg_values): maybe_name = None decorators: list[Any] = [] if isinstance(values, subtest): sub = values values = sub.arg_values maybe_name = sub.name @wraps(test) def test_wrapper(*args, **kwargs): return test(*args, **kwargs) decorators = sub.decorators gen_test = test_wrapper else: gen_test = test values = list(values) if len(self.arg_names) > 1 else [values] # type: ignore[call-overload] if len(values) != len(self.arg_names): raise RuntimeError(f'Expected # values == # arg names, but got: {len(values)} ' f'values and {len(self.arg_names)} names for test "{test.__name__}"') param_kwargs = dict(zip(self.arg_names, values)) test_name = self._get_subtest_name(idx, values, explicit_name=maybe_name) def decorator_fn(_, decorators=decorators): return decorators yield (gen_test, test_name, param_kwargs, decorator_fn) if values is check_exhausted_iterator: raise ValueError(f'{test}: An empty arg_values was passed to @parametrize. ' 'Note that this may result from reuse of a generator.') class reparametrize(_TestParametrizer): """ Decorator for adjusting the way an existing parametrizer operates. This class runs the given adapter_fn on each parametrization produced by the given parametrizer, allowing for on-the-fly parametrization more flexible than the default, product-based composition that occurs when stacking parametrization decorators. If the adapter_fn returns None for a given test parametrization, that parametrization will be excluded. Otherwise, it's expected that the adapter_fn returns an iterable of modified parametrizations, with tweaked test names and parameter kwargs. Examples:: def include_is_even_arg(test_name, param_kwargs): x = param_kwargs["x"] is_even = x % 2 == 0 new_param_kwargs = dict(param_kwargs) new_param_kwargs["is_even"] = is_even is_even_suffix = "_even" if is_even else "_odd" new_test_name = f"{test_name}{is_even_suffix}" yield (new_test_name, new_param_kwargs) ... @reparametrize(parametrize("x", range(5)), include_is_even_arg) def test_foo(self, x, is_even): ... def exclude_odds(test_name, param_kwargs): x = param_kwargs["x"] is_even = x % 2 == 0 yield None if not is_even else (test_name, param_kwargs) ... @reparametrize(parametrize("x", range(5)), exclude_odds) def test_bar(self, x): ... """ def __init__(self, parametrizer, adapter_fn): self.parametrizer = parametrizer self.adapter_fn = adapter_fn def _parametrize_test(self, test, generic_cls, device_cls): for (gen_test, test_name, param_kwargs, decorator_fn) in \ self.parametrizer._parametrize_test(test, generic_cls, device_cls): adapted = self.adapter_fn(test_name, param_kwargs) if adapted is not None: for adapted_item in adapted: if adapted_item is not None: new_test_name, new_param_kwargs = adapted_item yield (gen_test, new_test_name, new_param_kwargs, decorator_fn) class decorateIf(_TestParametrizer): """ Decorator for applying parameter-specific conditional decoration. Composes with other test parametrizers (e.g. @modules, @ops, @parametrize, etc.). Examples:: @decorateIf(unittest.skip, lambda params: params["x"] == 2) @parametrize("x", range(5)) def test_foo(self, x): ... @parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')]) @decorateIf( unittest.expectedFailure, lambda params: params["x"] == 3 and params["y"] == "baz" ) def test_bar(self, x, y): ... @decorateIf( unittest.expectedFailure, lambda params: params["op"].name == "add" and params["dtype"] == torch.float16 ) @ops(op_db) def test_op_foo(self, device, dtype, op): ... @decorateIf( unittest.skip, lambda params: params["module_info"].module_cls is torch.nn.Linear and \ params["device"] == "cpu" ) @modules(module_db) def test_module_foo(self, device, dtype, module_info): ... Args: decorator: Test decorator to apply if the predicate is satisfied. predicate_fn (Callable): Function taking in a dict of params and returning a boolean indicating whether the decorator should be applied or not. """ def __init__(self, decorator, predicate_fn): self.decorator = decorator self.predicate_fn = predicate_fn def _parametrize_test(self, test, generic_cls, device_cls): # Leave test as-is and return the appropriate decorator_fn. def decorator_fn(params, decorator=self.decorator, predicate_fn=self.predicate_fn): if predicate_fn(params): return [decorator] else: return [] @wraps(test) def test_wrapper(*args, **kwargs): return test(*args, **kwargs) test_name = '' yield (test_wrapper, test_name, {}, decorator_fn) class ProfilingMode(Enum): LEGACY = 1 SIMPLE = 2 PROFILING = 3 def cppProfilingFlagsToProfilingMode(): old_prof_exec_state = torch._C._jit_set_profiling_executor(True) old_prof_mode_state = torch._C._get_graph_executor_optimize(True) torch._C._jit_set_profiling_executor(old_prof_exec_state) torch._C._get_graph_executor_optimize(old_prof_mode_state) if old_prof_exec_state: if old_prof_mode_state: return ProfilingMode.PROFILING else: return ProfilingMode.SIMPLE else: return ProfilingMode.LEGACY @contextmanager def enable_profiling_mode_for_profiling_tests(): old_prof_exec_state = False old_prof_mode_state = False if GRAPH_EXECUTOR == ProfilingMode.PROFILING: old_prof_exec_state = torch._C._jit_set_profiling_executor(True) old_prof_mode_state = torch._C._get_graph_executor_optimize(True) try: yield finally: if GRAPH_EXECUTOR == ProfilingMode.PROFILING: torch._C._jit_set_profiling_executor(old_prof_exec_state) torch._C._get_graph_executor_optimize(old_prof_mode_state) @contextmanager def enable_profiling_mode(): old_prof_exec_state = torch._C._jit_set_profiling_executor(True) old_prof_mode_state = torch._C._get_graph_executor_optimize(True) try: yield finally: torch._C._jit_set_profiling_executor(old_prof_exec_state) torch._C._get_graph_executor_optimize(old_prof_mode_state) @contextmanager def num_profiled_runs(num_runs): old_num_runs = torch._C._jit_set_num_profiled_runs(num_runs) try: yield finally: torch._C._jit_set_num_profiled_runs(old_num_runs) func_call = torch._C.ScriptFunction.__call__ meth_call = torch._C.ScriptMethod.__call__ def prof_callable(callable, *args, **kwargs): if 'profile_and_replay' in kwargs: del kwargs['profile_and_replay'] if GRAPH_EXECUTOR == ProfilingMode.PROFILING: with enable_profiling_mode_for_profiling_tests(): callable(*args, **kwargs) return callable(*args, **kwargs) return callable(*args, **kwargs) def prof_func_call(*args, **kwargs): return prof_callable(func_call, *args, **kwargs) def prof_meth_call(*args, **kwargs): return prof_callable(meth_call, *args, **kwargs) torch._C.ScriptFunction.__call__ = prof_func_call # type: ignore[method-assign] torch._C.ScriptMethod.__call__ = prof_meth_call # type: ignore[method-assign] def _get_test_report_path(): # allow users to override the test file location. We need this # because the distributed tests run the same test file multiple # times with different configurations. override = os.environ.get('TEST_REPORT_SOURCE_OVERRIDE') test_source = override if override is not None else 'python-unittest' return os.path.join('test-reports', test_source) is_running_via_run_test = "run_test.py" in getattr(__main__, "__file__", "") parser = argparse.ArgumentParser(add_help=not is_running_via_run_test, allow_abbrev=False) parser.add_argument('--subprocess', action='store_true', help='whether to run each test in a subprocess') parser.add_argument('--seed', type=int, default=1234) parser.add_argument('--accept', action='store_true') parser.add_argument('--jit-executor', '--jit_executor', type=str) parser.add_argument('--repeat', type=int, default=1) parser.add_argument('--test-bailouts', '--test_bailouts', action='store_true') parser.add_argument('--use-pytest', action='store_true') parser.add_argument('--save-xml', nargs='?', type=str, const=_get_test_report_path(), default=_get_test_report_path() if IS_CI else None) parser.add_argument('--discover-tests', action='store_true') parser.add_argument('--log-suffix', type=str, default="") parser.add_argument('--run-parallel', type=int, default=1) parser.add_argument('--import-slow-tests', type=str, nargs='?', const=DEFAULT_SLOW_TESTS_FILE) parser.add_argument('--import-disabled-tests', type=str, nargs='?', const=DEFAULT_DISABLED_TESTS_FILE) parser.add_argument('--rerun-disabled-tests', action='store_true') parser.add_argument('--pytest-single-test', type=str, nargs=1) parser.add_argument('--showlocals', action=argparse.BooleanOptionalAction, default=False) # Only run when -h or --help flag is active to display both unittest and parser help messages. def run_unittest_help(argv): unittest.main(argv=argv) if '-h' in sys.argv or '--help' in sys.argv: help_thread = threading.Thread(target=run_unittest_help, args=(sys.argv,)) help_thread.start() help_thread.join() args, remaining = parser.parse_known_args() if args.jit_executor == 'legacy': GRAPH_EXECUTOR = ProfilingMode.LEGACY elif args.jit_executor == 'profiling': GRAPH_EXECUTOR = ProfilingMode.PROFILING elif args.jit_executor == 'simple': GRAPH_EXECUTOR = ProfilingMode.SIMPLE else: # infer flags based on the default settings GRAPH_EXECUTOR = cppProfilingFlagsToProfilingMode() RERUN_DISABLED_TESTS = args.rerun_disabled_tests SLOW_TESTS_FILE = args.import_slow_tests DISABLED_TESTS_FILE = args.import_disabled_tests LOG_SUFFIX = args.log_suffix RUN_PARALLEL = args.run_parallel TEST_BAILOUTS = args.test_bailouts USE_PYTEST = args.use_pytest PYTEST_SINGLE_TEST = args.pytest_single_test TEST_DISCOVER = args.discover_tests TEST_IN_SUBPROCESS = args.subprocess TEST_SAVE_XML = args.save_xml REPEAT_COUNT = args.repeat SEED = args.seed SHOWLOCALS = args.showlocals if not getattr(expecttest, "ACCEPT", False): expecttest.ACCEPT = args.accept UNITTEST_ARGS = [sys.argv[0]] + remaining torch.manual_seed(SEED) # CI Prefix path used only on CI environment CI_TEST_PREFIX = str(Path(os.getcwd())) CI_PT_ROOT = str(Path(os.getcwd()).parent) CI_FUNCTORCH_ROOT = str(os.path.join(Path(os.getcwd()).parent, "functorch")) def wait_for_process(p, timeout=None): try: return p.wait(timeout=timeout) except KeyboardInterrupt: # Give `p` a chance to handle KeyboardInterrupt. Without this, # `pytest` can't print errors it collected so far upon KeyboardInterrupt. exit_status = p.wait(timeout=5) if exit_status is not None: return exit_status else: p.kill() raise except subprocess.TimeoutExpired: # send SIGINT to give pytest a chance to make xml p.send_signal(signal.SIGINT) exit_status = None try: exit_status = p.wait(timeout=5) # try to handle the case where p.wait(timeout=5) times out as well as # otherwise the wait() call in the finally block can potentially hang except subprocess.TimeoutExpired: pass if exit_status is not None: return exit_status else: p.kill() raise except: # noqa: B001,E722, copied from python core library p.kill() raise finally: # Always call p.wait() to ensure exit p.wait() def shell(command, cwd=None, env=None, stdout=None, stderr=None, timeout=None): sys.stdout.flush() sys.stderr.flush() # The following cool snippet is copied from Py3 core library subprocess.call # only the with # 1. `except KeyboardInterrupt` block added for SIGINT handling. # 2. In Py2, subprocess.Popen doesn't return a context manager, so we do # `p.wait()` in a `final` block for the code to be portable. # # https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323 assert not isinstance(command, str), "Command to shell should be a list or tuple of tokens" p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env, stdout=stdout, stderr=stderr) return wait_for_process(p, timeout=timeout) def retry_shell( command, cwd=None, env=None, stdout=None, stderr=None, timeout=None, retries=1, was_rerun=False, ) -> tuple[int, bool]: # Returns exicode + whether it was rerun assert ( retries >= 0 ), f"Expecting non negative number for number of retries, got {retries}" try: exit_code = shell( command, cwd=cwd, env=env, stdout=stdout, stderr=stderr, timeout=timeout ) if exit_code == 0 or retries == 0: return exit_code, was_rerun print( f"Got exit code {exit_code}, retrying (retries left={retries})", file=stdout, flush=True, ) except subprocess.TimeoutExpired: if retries == 0: print( f"Command took >{timeout // 60}min, returning 124", file=stdout, flush=True, ) return 124, was_rerun print( f"Command took >{timeout // 60}min, retrying (retries left={retries})", file=stdout, flush=True, ) return retry_shell( command, cwd=cwd, env=env, stdout=stdout, stderr=stderr, timeout=timeout, retries=retries - 1, was_rerun=True, ) def discover_test_cases_recursively(suite_or_case): if isinstance(suite_or_case, unittest.TestCase): return [suite_or_case] rc = [] for element in suite_or_case: print(element) rc.extend(discover_test_cases_recursively(element)) return rc def get_test_names(test_cases): return ['.'.join(case.id().split('.')[-2:]) for case in test_cases] def _print_test_names(): suite = unittest.TestLoader().loadTestsFromModule(__main__) test_cases = discover_test_cases_recursively(suite) for name in get_test_names(test_cases): print(name) def chunk_list(lst, nchunks): return [lst[i::nchunks] for i in range(nchunks)] # sanitize filename e.g., distributed/pipeline/sync/skip/test_api.py -> distributed.pipeline.sync.skip.test_api def sanitize_test_filename(filename): # inspect.getfile returns absolute path in some CI jobs, converting it to relative path if needed if filename.startswith(CI_TEST_PREFIX): filename = filename[len(CI_TEST_PREFIX) + 1:] strip_py = re.sub(r'.py$', '', filename) return re.sub('/', r'.', strip_py) def lint_test_case_extension(suite): succeed = True for test_case_or_suite in suite: test_case = test_case_or_suite if isinstance(test_case_or_suite, unittest.TestSuite): first_test = test_case_or_suite._tests[0] if len(test_case_or_suite._tests) > 0 else None if first_test is not None and isinstance(first_test, unittest.TestSuite): return succeed and lint_test_case_extension(test_case_or_suite) test_case = first_test if test_case is not None: test_class = test_case.id().split('.', 1)[1].split('.')[0] if not isinstance(test_case, TestCase): err = "This test class should extend from torch.testing._internal.common_utils.TestCase but it doesn't." print(f"{test_class} - failed. {err}") succeed = False return succeed def get_report_path(argv=UNITTEST_ARGS, pytest=False): test_filename = sanitize_test_filename(argv[0]) test_report_path = TEST_SAVE_XML + LOG_SUFFIX test_report_path = os.path.join(test_report_path, test_filename) if pytest: test_report_path = test_report_path.replace('python-unittest', 'python-pytest') os.makedirs(test_report_path, exist_ok=True) test_report_path = os.path.join(test_report_path, f"{test_filename}-{os.urandom(8).hex()}.xml") return test_report_path os.makedirs(test_report_path, exist_ok=True) return test_report_path def sanitize_pytest_xml(xml_file: str): # pytext xml is different from unittext xml, this function makes pytest xml more similar to unittest xml # consider somehow modifying the XML logger in conftest to do this instead import xml.etree.ElementTree as ET tree = ET.parse(xml_file) for testcase in tree.iter('testcase'): full_classname = testcase.attrib.get("classname") if full_classname is None: continue # The test prefix is optional regex_result = re.search(r"^(test\.)?(?P<file>.*)\.(?P<classname>[^\.]*)$", full_classname) if regex_result is None: continue classname = regex_result.group("classname") file = regex_result.group("file").replace(".", "/") testcase.set("classname", classname) testcase.set("file", f"{file}.py") tree.write(xml_file) def get_pytest_test_cases(argv: list[str]) -> list[str]: class TestCollectorPlugin: def __init__(self) -> None: self.tests: list[Any] = [] def pytest_collection_finish(self, session): for item in session.items: self.tests.append(session.config.cwd_relative_nodeid(item.nodeid)) test_collector_plugin = TestCollectorPlugin() import pytest pytest.main( [arg for arg in argv if arg != '-vv'] + ['--collect-only', '-qq', '--use-main-module'], plugins=[test_collector_plugin] ) return test_collector_plugin.tests def run_tests(argv=UNITTEST_ARGS): # import test files. if SLOW_TESTS_FILE: if os.path.exists(SLOW_TESTS_FILE): with open(SLOW_TESTS_FILE) as fp: global slow_tests_dict slow_tests_dict = json.load(fp) # use env vars so pytest-xdist subprocesses can still access them os.environ['SLOW_TESTS_FILE'] = SLOW_TESTS_FILE else: warnings.warn(f'slow test file provided but not found: {SLOW_TESTS_FILE}') if DISABLED_TESTS_FILE: if os.path.exists(DISABLED_TESTS_FILE): with open(DISABLED_TESTS_FILE) as fp: global disabled_tests_dict disabled_tests_dict = json.load(fp) os.environ['DISABLED_TESTS_FILE'] = DISABLED_TESTS_FILE else: warnings.warn(f'disabled test file provided but not found: {DISABLED_TESTS_FILE}') # Determine the test launch mechanism if TEST_DISCOVER: _print_test_names() return # Before running the tests, lint to check that every test class extends from TestCase suite = unittest.TestLoader().loadTestsFromModule(__main__) if not lint_test_case_extension(suite): sys.exit(1) if SHOWLOCALS: argv = [ argv[0], *(["--showlocals", "--tb=long", "--color=yes"] if USE_PYTEST else ["--locals"]), *argv[1:], ] if TEST_IN_SUBPROCESS: other_args = [] if DISABLED_TESTS_FILE: other_args.append("--import-disabled-tests") if SLOW_TESTS_FILE: other_args.append("--import-slow-tests") if USE_PYTEST: other_args.append("--use-pytest") if RERUN_DISABLED_TESTS: other_args.append("--rerun-disabled-tests") if TEST_SAVE_XML: other_args += ['--save-xml', args.save_xml] test_cases = ( get_pytest_test_cases(argv) if USE_PYTEST else [case.id().split('.', 1)[1] for case in discover_test_cases_recursively(suite)] ) failed_tests = [] for test_case_full_name in test_cases: cmd = ( [sys.executable] + [argv[0]] + other_args + argv[1:] + (["--pytest-single-test"] if USE_PYTEST else []) + [test_case_full_name] ) string_cmd = " ".join(cmd) timeout = None if RERUN_DISABLED_TESTS else 15 * 60 exitcode, _ = retry_shell(cmd, timeout=timeout, retries=0 if RERUN_DISABLED_TESTS else 1) if exitcode != 0: # This is sort of hacky, but add on relevant env variables for distributed tests. if 'TestDistBackendWithSpawn' in test_case_full_name: backend = os.environ.get("BACKEND", "") world_size = os.environ.get("WORLD_SIZE", "") env_prefix = f"BACKEND={backend} WORLD_SIZE={world_size}" string_cmd = env_prefix + " " + string_cmd # Log the command to reproduce the failure. print(f"Test exited with non-zero exitcode {exitcode}. Command to reproduce: {string_cmd}") failed_tests.append(test_case_full_name) assert len(failed_tests) == 0, "{} unit test(s) failed:\n\t{}".format( len(failed_tests), '\n\t'.join(failed_tests)) elif RUN_PARALLEL > 1: test_cases = discover_test_cases_recursively(suite) test_batches = chunk_list(get_test_names(test_cases), RUN_PARALLEL) processes = [] for i in range(RUN_PARALLEL): command = [sys.executable] + argv + [f'--log-suffix=-shard-{i + 1}'] + test_batches[i] processes.append(subprocess.Popen(command, universal_newlines=True)) failed = False for p in processes: failed |= wait_for_process(p) != 0 assert not failed, "Some test shards have failed" elif USE_PYTEST: pytest_args = argv + ["--use-main-module"] test_report_path = "" if TEST_SAVE_XML: test_report_path = get_report_path(pytest=True) print(f'Test results will be stored in {test_report_path}') pytest_args.append(f'--junit-xml-reruns={test_report_path}') if PYTEST_SINGLE_TEST: pytest_args = PYTEST_SINGLE_TEST + pytest_args[1:] import pytest os.environ["NO_COLOR"] = "1" exit_code = pytest.main(args=pytest_args) if TEST_SAVE_XML: sanitize_pytest_xml(test_report_path) # exitcode of 5 means no tests were found, which happens since some test configs don't # run tests from certain files sys.exit(0 if exit_code == 5 else exit_code) elif TEST_SAVE_XML is not None: # import here so that non-CI doesn't need xmlrunner installed import xmlrunner # type: ignore[import] from xmlrunner.result import _XMLTestResult # type: ignore[import] class XMLTestResultVerbose(_XMLTestResult): """ Adding verbosity to test outputs: by default test summary prints 'skip', but we want to also print the skip reason. GH issue: https://github.com/pytorch/pytorch/issues/69014 This works with unittest_xml_reporting<=3.2.0,>=2.0.0 (3.2.0 is latest at the moment) """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def addSkip(self, test, reason): super().addSkip(test, reason) for c in self.callback.__closure__: if isinstance(c.cell_contents, str) and c.cell_contents == 'skip': # this message is printed in test summary; # it stands for `verbose_str` captured in the closure c.cell_contents = f"skip: {reason}" def printErrors(self) -> None: super().printErrors() self.printErrorList("XPASS", self.unexpectedSuccesses) test_report_path = get_report_path() verbose = '--verbose' in argv or '-v' in argv if verbose: print(f'Test results will be stored in {test_report_path}') unittest.main(argv=argv, testRunner=xmlrunner.XMLTestRunner( output=test_report_path, verbosity=2 if verbose else 1, resultclass=XMLTestResultVerbose)) elif REPEAT_COUNT > 1: for _ in range(REPEAT_COUNT): if not unittest.main(exit=False, argv=argv).result.wasSuccessful(): sys.exit(-1) else: unittest.main(argv=argv) IS_LINUX = sys.platform == "linux" IS_WINDOWS = sys.platform == "win32" IS_MACOS = sys.platform == "darwin" IS_PPC = platform.machine() == "ppc64le" IS_X86 = platform.machine() in ('x86_64', 'i386') IS_ARM64 = platform.machine() in ('arm64', 'aarch64') IS_S390X = platform.machine() == "s390x" def is_avx512_vnni_supported(): if sys.platform != 'linux': return False with open("/proc/cpuinfo", encoding="ascii") as f: lines = f.read() return "vnni" in lines IS_AVX512_VNNI_SUPPORTED = is_avx512_vnni_supported() if IS_WINDOWS: @contextmanager def TemporaryFileName(*args, **kwargs): # Ideally we would like to not have to manually delete the file, but NamedTemporaryFile # opens the file, and it cannot be opened multiple times in Windows. To support Windows, # close the file after creation and try to remove it manually if 'delete' in kwargs: if kwargs['delete'] is not False: raise UserWarning("only TemporaryFileName with delete=False is supported on Windows.") else: kwargs['delete'] = False f = tempfile.NamedTemporaryFile(*args, **kwargs) try: f.close() yield f.name finally: os.unlink(f.name) else: @contextmanager # noqa: T484 def TemporaryFileName(*args, **kwargs): with tempfile.NamedTemporaryFile(*args, **kwargs) as f: yield f.name if IS_WINDOWS: @contextmanager def TemporaryDirectoryName(suffix=None): # On Windows the directory created by TemporaryDirectory is likely to be removed prematurely, # so we first create the directory using mkdtemp and then remove it manually try: dir_name = tempfile.mkdtemp(suffix=suffix) yield dir_name finally: shutil.rmtree(dir_name) else: @contextmanager # noqa: T484 def TemporaryDirectoryName(suffix=None): with tempfile.TemporaryDirectory(suffix=suffix) as d: yield d def is_privateuse1_backend_available(): privateuse1_backend_name = torch._C._get_privateuse1_backend_name() privateuse1_backend_module = getattr(torch, privateuse1_backend_name, None) return (is_available := getattr(privateuse1_backend_module, "is_available", None)) and is_available() IS_FILESYSTEM_UTF8_ENCODING = sys.getfilesystemencoding() == 'utf-8' TEST_NUMPY = _check_module_exists('numpy') TEST_FAIRSEQ = _check_module_exists('fairseq') TEST_SCIPY = _check_module_exists('scipy') TEST_MKL = torch.backends.mkl.is_available() TEST_ACL = torch.backends.mkldnn.is_available() and torch.ops.mkldnn._is_mkldnn_acl_supported() TEST_MPS = torch.backends.mps.is_available() MACOS_VERSION = float('.'.join(platform.mac_ver()[0].split('.')[:2]) or -1) TEST_XPU = torch.xpu.is_available() TEST_HPU = True if (hasattr(torch, "hpu") and torch.hpu.is_available()) else False TEST_CUDA = torch.cuda.is_available() custom_device_mod = getattr(torch, torch._C._get_privateuse1_backend_name(), None) TEST_PRIVATEUSE1 = is_privateuse1_backend_available() TEST_PRIVATEUSE1_DEVICE_TYPE = torch._C._get_privateuse1_backend_name() TEST_NUMBA = _check_module_exists('numba') TEST_TRANSFORMERS = _check_module_exists('transformers') TEST_DILL = _check_module_exists('dill') TEST_LIBROSA = _check_module_exists('librosa') and not IS_ARM64 TEST_OPT_EINSUM = _check_module_exists('opt_einsum') TEST_Z3 = _check_module_exists('z3') def split_if_not_empty(x: str): return x.split(",") if len(x) != 0 else [] NOTEST_CPU = "cpu" in split_if_not_empty(os.getenv('PYTORCH_TESTING_DEVICE_EXCEPT_FOR', '')) skipIfNoDill = unittest.skipIf(not TEST_DILL, "no dill") NO_MULTIPROCESSING_SPAWN: bool = False TEST_WITH_ASAN: bool = TestEnvironment.def_flag( "TEST_WITH_ASAN", env_var="PYTORCH_TEST_WITH_ASAN", ) TEST_WITH_DEV_DBG_ASAN: bool = TestEnvironment.def_flag( "TEST_WITH_DEV_DBG_ASAN", env_var="PYTORCH_TEST_WITH_DEV_DBG_ASAN", ) TEST_WITH_TSAN: bool = TestEnvironment.def_flag( "TEST_WITH_TSAN", env_var="PYTORCH_TEST_WITH_TSAN", ) TEST_WITH_UBSAN: bool = TestEnvironment.def_flag( "TEST_WITH_UBSAN", env_var="PYTORCH_TEST_WITH_UBSAN", ) TEST_WITH_ROCM: bool = TestEnvironment.def_flag( "TEST_WITH_ROCM", env_var="PYTORCH_TEST_WITH_ROCM", ) # TODO: Remove PYTORCH_MIOPEN_SUGGEST_NHWC once ROCm officially supports NHWC in MIOpen # See #64427 TEST_WITH_MIOPEN_SUGGEST_NHWC = os.getenv('PYTORCH_MIOPEN_SUGGEST_NHWC', '0') == '1' # Enables tests that are slow to run (disabled by default) TEST_WITH_SLOW: bool = TestEnvironment.def_flag( "TEST_WITH_SLOW", env_var="PYTORCH_TEST_WITH_SLOW", ) # Disables non-slow tests (these tests enabled by default) # This is usually used in conjunction with TEST_WITH_SLOW to # run *only* slow tests. (I could have done an enum, but # it felt a little awkward. TEST_SKIP_FAST: bool = TestEnvironment.def_flag( "TEST_SKIP_FAST", env_var="PYTORCH_TEST_SKIP_FAST", ) # Enables crossref tests, in addition to standard tests which # are being run. crossref tests work by installing a torch # function mode that runs extra compute alongside the regular # computation that happens with the test. After both computations # are done, we cross-reference them (thus the name) to check for # correction, before throwing out the extra compute and proceeding # as we had before. By default, we don't run these tests. TEST_WITH_CROSSREF: bool = TestEnvironment.def_flag( "TEST_WITH_CROSSREF", env_var="PYTORCH_TEST_WITH_CROSSREF", ) TEST_SKIP_CUDAGRAPH: bool = TestEnvironment.def_flag( "TEST_SKIP_CUDAGRAPH", env_var="PYTORCH_TEST_SKIP_CUDAGRAPH", ) TEST_CUDA_GRAPH = TEST_CUDA and (not TEST_SKIP_CUDAGRAPH) and ( torch.version.cuda or (torch.version.hip and float(".".join(torch.version.hip.split(".")[0:2])) >= 5.3) ) TEST_CUDA_CUDSS = TEST_CUDA and (torch.version.cuda and int(torch.version.cuda.split(".")[0]) >= 12) def allocator_option_enabled_fn(allocator_config, _, option): if allocator_config is None: return False allocator_config = allocator_config.split(',') if ',' in allocator_config else [allocator_config] mapping = dict([var.split(':') for var in allocator_config]) if option in mapping and mapping[option] == 'True': return True else: return False EXPANDABLE_SEGMENTS: bool = TestEnvironment.def_flag( "EXPANDABLE_SEGMENTS", env_var="PYTORCH_CUDA_ALLOC_CONF", enabled_fn=functools.partial(allocator_option_enabled_fn, option='expandable_segments'), ) if TEST_CUDA and 'NUM_PARALLEL_PROCS' in os.environ: num_procs = int(os.getenv("NUM_PARALLEL_PROCS", "2")) gb_available = torch.cuda.mem_get_info()[1] / 2 ** 30 # other libraries take up about a little under 1 GB of space per process torch.cuda.set_per_process_memory_fraction(round((gb_available - num_procs * .85) / gb_available / num_procs, 2)) requires_cuda = unittest.skipUnless(torch.cuda.is_available(), "Requires CUDA") def skipIfCrossRef(fn): @wraps(fn) def wrapper(*args, **kwargs): if TEST_WITH_CROSSREF: raise unittest.SkipTest("test doesn't currently with crossref") else: fn(*args, **kwargs) return wrapper class CrossRefMode(torch.overrides.TorchFunctionMode): def __torch_function__(self, func, types, args=(), kwargs=None): kwargs = kwargs or {} r = func(*args, **kwargs) return r # Run PyTorch tests with TorchDynamo TEST_WITH_TORCHINDUCTOR: bool = TestEnvironment.def_flag( "TEST_WITH_TORCHINDUCTOR", env_var="PYTORCH_TEST_WITH_INDUCTOR", ) # AOT_EAGER not tested in ci, useful for debugging TEST_WITH_AOT_EAGER: bool = TestEnvironment.def_flag( "TEST_WITH_AOT_EAGER", env_var="PYTORCH_TEST_WITH_AOT_EAGER", ) TEST_WITH_TORCHDYNAMO: bool = TestEnvironment.def_flag( "TEST_WITH_TORCHDYNAMO", env_var="PYTORCH_TEST_WITH_DYNAMO", implied_by_fn=lambda: TEST_WITH_TORCHINDUCTOR or TEST_WITH_AOT_EAGER, ) if TEST_WITH_TORCHDYNAMO: import torch._dynamo # Do not spend time on helper functions that are called with different inputs torch._dynamo.config.accumulated_recompile_limit = 64 # Do not log compilation metrics from unit tests torch._dynamo.config.log_compilation_metrics = False # Silence 3.13.0 guard performance warnings torch._dynamo.config.issue_3_13_0_warning = False if TEST_WITH_TORCHINDUCTOR: import torch._inductor.config torch._inductor.config.fallback_random = True # seems like this is only used in test/torch_np def xpassIfTorchDynamo_np(func): # numpy 2.0+ is causing issues if TEST_WITH_TORCHDYNAMO and np.__version__[0] == '2': return unittest.skip("skipping numpy 2.0+ dynamo-wrapped test")(func) return func if TEST_WITH_TORCHDYNAMO else unittest.expectedFailure(func) def xfailIfACL(func): return unittest.expectedFailure(func) if TEST_ACL else func def xfailIfTorchDynamo(func): return unittest.expectedFailure(func) if TEST_WITH_TORCHDYNAMO else func def xfailIfPy312Plus(func): return unittest.expectedFailure(func) if sys.version_info >= (3, 12) else func def xfailIfLinux(func): return unittest.expectedFailure(func) if IS_LINUX and not TEST_WITH_ROCM and not IS_FBCODE else func def skipIfTorchDynamo(msg="test doesn't currently work with dynamo"): """ Usage: @skipIfTorchDynamo(msg) def test_blah(self): ... """ assert isinstance(msg, str), "Are you using skipIfTorchDynamo correctly?" def decorator(fn): if not isinstance(fn, type): @wraps(fn) def wrapper(*args, **kwargs): if TEST_WITH_TORCHDYNAMO: raise unittest.SkipTest(msg) else: fn(*args, **kwargs) return wrapper assert isinstance(fn, type) if TEST_WITH_TORCHDYNAMO: fn.__unittest_skip__ = True # type: ignore[attr-defined] fn.__unittest_skip_why__ = msg # type: ignore[attr-defined] return fn return decorator def skipIfTorchInductor(msg="test doesn't currently work with torchinductor", condition=TEST_WITH_TORCHINDUCTOR): def decorator(fn): if not isinstance(fn, type): @wraps(fn) def wrapper(*args, **kwargs): if condition: raise unittest.SkipTest(msg) else: fn(*args, **kwargs) return wrapper assert isinstance(fn, type) if condition: fn.__unittest_skip__ = True # type: ignore[attr-defined] fn.__unittest_skip_why__ = msg # type: ignore[attr-defined] return fn return decorator def serialTest(condition=True): """ Decorator for running tests serially. Requires pytest """ def decorator(fn): if has_pytest and condition: return pytest.mark.serial(fn) return fn return decorator def unMarkDynamoStrictTest(cls=None): def decorator(cls): cls.dynamo_strict = False return cls if cls is None: return decorator else: return decorator(cls) def markDynamoStrictTest(cls_or_func=None, nopython=False): """ Marks the test as 'strict'. In strict mode, we reset before and after the test, and run without suppress errors. Args: - nopython: if we should run torch._dynamo.optimize with nopython={True/False}. """ def decorator(cls_or_func): if inspect.isclass(cls_or_func): cls_or_func.dynamo_strict = True cls_or_func.dynamo_strict_nopython = nopython return cls_or_func fn = cls_or_func @wraps(fn) def wrapper(*args, **kwargs): torch._dynamo.reset() with unittest.mock.patch("torch._dynamo.config.suppress_errors", False): fn(*args, **kwargs) torch._dynamo.reset() return wrapper if cls_or_func is None: return decorator else: return decorator(cls_or_func) def skipRocmIfTorchInductor(msg="test doesn't currently work with torchinductor on the ROCm stack"): return skipIfTorchInductor(msg=msg, condition=TEST_WITH_ROCM and TEST_WITH_TORCHINDUCTOR) def skipIfLegacyJitExecutor(msg="test doesn't currently work with legacy JIT executor"): def decorator(fn): if not isinstance(fn, type): @wraps(fn) def wrapper(*args, **kwargs): if GRAPH_EXECUTOR == ProfilingMode.LEGACY: raise unittest.SkipTest(msg) else: fn(*args, **kwargs) return wrapper assert isinstance(fn, type) if GRAPH_EXECUTOR == ProfilingMode.LEGACY: fn.__unittest_skip__ = True # type: ignore[attr-defined] fn.__unittest_skip_why__ = msg # type: ignore[attr-defined] return fn return decorator def make_dynamo_test( fn: Optional[Callable[..., Any]] = None ) -> Callable[..., Any]: """ Decorator function to create a dynamo test case. A function annotate with this decorator takes as input a unittest object. """ from torch._dynamo.testing import CompileCounter, reset, optimize_assert if fn is None: return lambda fn: make_dynamo_test(fn) def standard_test( self: Any, fn: Callable[..., Any], kwargs, ) -> None: def dummy() -> None: fn(self, **kwargs) actual = CompileCounter() dummy() reset() opt_fn = optimize_assert(actual)(dummy) opt_fn() reset() @functools.wraps(fn) def test_fn(self: Any, **kwargs) -> None: return standard_test( self, fn=fn, kwargs=kwargs, ) return test_fn # Run PyTorch tests with translation validation on. TEST_WITH_TV = os.getenv('PYTORCH_TEST_WITH_TV') == '1' if TEST_WITH_TV: torch.fx.experimental._config.translation_validation = True # Some tests take too long when dynamic_shapes is combined with # translation_validation. Whenever that happens, we solve that by # disabling translation_validation. def disable_translation_validation_if_dynamic_shapes(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): if torch._dynamo.config.dynamic_shapes: # Turning TV off due to high latency on dynamic shapes. torch.fx.experimental._config.translation_validation = False return fn(*args, **kwargs) return wrapper # Determine whether to enable cuda memory leak check. # CUDA mem leak check is expensive and thus we don't want to execute it on every # test case / configuration. # If this is True then CUDA memory leak checks are skipped. If this is false # then CUDA memory leak checks are performed. # See: https://github.com/pytorch/pytorch/pull/59402#issuecomment-858811135 TEST_CUDA_MEM_LEAK_CHECK: bool = TestEnvironment.def_flag( "TEST_CUDA_MEM_LEAK_CHECK", env_var="PYTORCH_TEST_CUDA_MEM_LEAK_CHECK", ) # Dict of NumPy dtype -> torch dtype (when the correspondence exists) numpy_to_torch_dtype_dict = { np.bool_ : torch.bool, np.uint8 : torch.uint8, np.uint16 : torch.uint16, np.uint32 : torch.uint32, np.uint64 : torch.uint64, np.int8 : torch.int8, np.int16 : torch.int16, np.int32 : torch.int32, np.int64 : torch.int64, np.float16 : torch.float16, np.float32 : torch.float32, np.float64 : torch.float64, np.complex64 : torch.complex64, np.complex128 : torch.complex128 } # numpy dtypes like np.float64 are not instances, but rather classes. This leads to rather absurd cases like # np.float64 != np.dtype("float64") but np.float64 == np.dtype("float64").type. # Especially when checking against a reference we can't be sure which variant we get, so we simply try both. def numpy_to_torch_dtype(np_dtype): try: return numpy_to_torch_dtype_dict[np_dtype] except KeyError: return numpy_to_torch_dtype_dict[np_dtype.type] def has_corresponding_torch_dtype(np_dtype): try: numpy_to_torch_dtype(np_dtype) return True except KeyError: return False if IS_WINDOWS: # Size of `np.intc` is platform defined. # It is returned by functions like `bitwise_not`. # On Windows `int` is 32-bit # https://docs.microsoft.com/en-us/cpp/cpp/data-type-ranges?view=msvc-160 numpy_to_torch_dtype_dict[np.intc] = torch.int # Dict of torch dtype -> NumPy dtype torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()} torch_to_numpy_dtype_dict.update({ torch.bfloat16: np.float32, torch.complex32: np.complex64 }) def skipIfNNModuleInlined( msg="test doesn't currently work with nn module inlining", condition=torch._dynamo.config.inline_inbuilt_nn_modules, ): def decorator(fn): if not isinstance(fn, type): @wraps(fn) def wrapper(*args, **kwargs): if condition: raise unittest.SkipTest(msg) else: fn(*args, **kwargs) return wrapper assert isinstance(fn, type) if condition: fn.__unittest_skip__ = True # type: ignore[attr-defined] fn.__unittest_skip_why__ = msg # type: ignore[attr-defined] return fn return decorator def skipIfRocm(func=None, *, msg="test doesn't currently work on the ROCm stack"): def dec_fn(fn): reason = f"skipIfRocm: {msg}" @wraps(fn) def wrapper(*args, **kwargs): if TEST_WITH_ROCM: raise unittest.SkipTest(reason) else: return fn(*args, **kwargs) return wrapper if func: return dec_fn(func) return dec_fn def skipIfRocmArch(arch: tuple[str, ...]): def dec_fn(fn): @wraps(fn) def wrap_fn(self, *args, **kwargs): if TEST_WITH_ROCM: prop = torch.cuda.get_device_properties(0) if prop.gcnArchName.split(":")[0] in arch: reason = f"skipIfRocm: test skipped on {arch}" raise unittest.SkipTest(reason) return fn(self, *args, **kwargs) return wrap_fn return dec_fn def runOnRocm(fn): @wraps(fn) def wrapper(*args, **kwargs): if TEST_WITH_ROCM: fn(*args, **kwargs) else: raise unittest.SkipTest("test currently only works on the ROCm stack") return wrapper def runOnRocmArch(arch: tuple[str, ...]): def dec_fn(fn): @wraps(fn) def wrap_fn(self, *args, **kwargs): if TEST_WITH_ROCM: prop = torch.cuda.get_device_properties(0) if prop.gcnArchName.split(":")[0] not in arch: reason = f"skipIfRocm: test only runs on {arch}" raise unittest.SkipTest(reason) return fn(self, *args, **kwargs) return wrap_fn return dec_fn def xfailIfS390X(func): return unittest.expectedFailure(func) if IS_S390X else func def skipIfXpu(func=None, *, msg="test doesn't currently work on the XPU stack"): def dec_fn(fn): reason = f"skipIfXpu: {msg}" @wraps(fn) def wrapper(*args, **kwargs): if TEST_XPU: raise unittest.SkipTest(reason) else: return fn(*args, **kwargs) return wrapper if func: return dec_fn(func) return dec_fn def skipIfMPS(fn): @wraps(fn) def wrapper(*args, **kwargs): if TEST_MPS: raise unittest.SkipTest("test doesn't currently work with MPS") else: fn(*args, **kwargs) return wrapper def skipIfMPSOnMacOS13(fn): @wraps(fn) def wrapper(*args, **kwargs): if TEST_MPS and int(MACOS_VERSION) == 13: raise unittest.SkipTest("Test crashes MPSGraph on MacOS13") else: fn(*args, **kwargs) return wrapper def skipIfHpu(fn): @wraps(fn) def wrapper(*args, **kwargs): if TEST_HPU: raise unittest.SkipTest("test doesn't currently work with HPU") else: fn(*args, **kwargs) return wrapper # Skips a test on CUDA if ROCm is available and its version is lower than requested. def skipIfRocmVersionLessThan(version=None): def dec_fn(fn): @wraps(fn) def wrap_fn(self, *args, **kwargs): if TEST_WITH_ROCM: rocm_version = str(torch.version.hip) rocm_version = rocm_version.split("-")[0] # ignore git sha rocm_version_tuple = tuple(int(x) for x in rocm_version.split(".")) if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version): reason = f"ROCm {rocm_version_tuple} is available but {version} required" raise unittest.SkipTest(reason) return fn(self, *args, **kwargs) return wrap_fn return dec_fn def skipIfNotMiopenSuggestNHWC(fn): @wraps(fn) def wrapper(*args, **kwargs): if not TEST_WITH_MIOPEN_SUGGEST_NHWC: raise unittest.SkipTest("test doesn't currently work without MIOpen NHWC activation") else: fn(*args, **kwargs) return wrapper def skipIfWindows(func=None, *, msg="test doesn't currently work on the Windows stack"): def dec_fn(fn): reason = f"skipIfWindows: {msg}" @wraps(fn) def wrapper(*args, **kwargs): if IS_WINDOWS: # noqa: F821 raise unittest.SkipTest(reason) else: return fn(*args, **kwargs) return wrapper if func: return dec_fn(func) return dec_fn # Reverts the linalg backend back to default to make sure potential failures in one # test do not affect other tests def setLinalgBackendsToDefaultFinally(fn): @wraps(fn) def _fn(*args, **kwargs): _preferred_backend = torch.backends.cuda.preferred_linalg_library() try: fn(*args, **kwargs) finally: torch.backends.cuda.preferred_linalg_library(_preferred_backend) return _fn # Reverts the blas backend back to default to make sure potential failures in one # test do not affect other tests def setBlasBackendsToDefaultFinally(fn): @wraps(fn) def _fn(*args, **kwargs): _preferred_backend = torch.backends.cuda.preferred_blas_library() try: fn(*args, **kwargs) finally: torch.backends.cuda.preferred_blas_library(_preferred_backend) return _fn # Context manager for setting deterministic flag and automatically # resetting it to its original value class DeterministicGuard: def __init__(self, deterministic, *, warn_only=False, fill_uninitialized_memory=True): self.deterministic = deterministic self.warn_only = warn_only self.fill_uninitialized_memory = fill_uninitialized_memory @classmethod def _current_state(cls): return cls( torch.are_deterministic_algorithms_enabled(), warn_only=torch.is_deterministic_algorithms_warn_only_enabled(), fill_uninitialized_memory=torch.utils.deterministic.fill_uninitialized_memory, # type: ignore[attr-defined] ) def _update(self): torch.use_deterministic_algorithms(self.deterministic, warn_only=self.warn_only) torch.utils.deterministic.fill_uninitialized_memory = self.fill_uninitialized_memory # type: ignore[attr-defined] def __enter__(self): self._restore = self._current_state() self._update() def __exit__(self, exception_type, exception_value, traceback): self._restore._update() class AlwaysWarnTypedStorageRemoval: def __init__(self, always_warn): assert isinstance(always_warn, bool) self.always_warn = always_warn def __enter__(self): self.always_warn_restore = torch.storage._get_always_warn_typed_storage_removal() torch.storage._set_always_warn_typed_storage_removal(self.always_warn) def __exit__(self, exception_type, exception_value, traceback): torch.storage._set_always_warn_typed_storage_removal(self.always_warn_restore) # Context manager for setting cuda sync debug mode and reset it # to original value # we are not exposing it to the core because sync debug mode is # global and thus not thread safe class CudaSyncGuard: def __init__(self, sync_debug_mode): self.mode = sync_debug_mode def __enter__(self): self.debug_mode_restore = torch.cuda.get_sync_debug_mode() torch.cuda.set_sync_debug_mode(self.mode) def __exit__(self, exception_type, exception_value, traceback): torch.cuda.set_sync_debug_mode(self.debug_mode_restore) # Context manager for setting torch.__future__.set_swap_module_params_on_conversion # and automatically resetting it to its original value class SwapTensorsGuard: def __init__(self, use_swap_tensors): self.use_swap_tensors = use_swap_tensors def __enter__(self): self.swap_tensors_restore = torch.__future__.get_swap_module_params_on_conversion() if self.use_swap_tensors is not None: torch.__future__.set_swap_module_params_on_conversion(self.use_swap_tensors) def __exit__(self, exception_type, exception_value, traceback): torch.__future__.set_swap_module_params_on_conversion(self.swap_tensors_restore) # This decorator can be used for API tests that call # torch.use_deterministic_algorithms(). When the test is finished, it will # restore the previous deterministic flag setting. # # If CUDA >= 10.2, this will set the environment variable # CUBLAS_WORKSPACE_CONFIG=:4096:8 so that the error associated with that # setting is not thrown during the test unless the test changes that variable # on purpose. The previous CUBLAS_WORKSPACE_CONFIG setting will also be # restored once the test is finished. # # Note that if a test requires CUDA to actually register the changed # CUBLAS_WORKSPACE_CONFIG variable, a new subprocess must be created, because # CUDA only checks the variable when the runtime initializes. Tests can be # run inside a subprocess like so: # # import subprocess, sys, os # script = ''' # # Test code should go here # ''' # try: # subprocess.check_output( # [sys.executable, '-c', script], # stderr=subprocess.STDOUT, # cwd=os.path.dirname(os.path.realpath(__file__)), # env=os.environ.copy()) # except subprocess.CalledProcessError as e: # error_message = e.output.decode('utf-8') # # Handle exceptions raised by the subprocess here # def wrapDeterministicFlagAPITest(fn): @wraps(fn) def wrapper(*args, **kwargs): with DeterministicGuard( torch.are_deterministic_algorithms_enabled(), warn_only=torch.is_deterministic_algorithms_warn_only_enabled()): class CuBLASConfigGuard: cublas_var_name = 'CUBLAS_WORKSPACE_CONFIG' def __enter__(self): self.cublas_config_restore = os.environ.get(self.cublas_var_name) os.environ[self.cublas_var_name] = ':4096:8' def __exit__(self, exception_type, exception_value, traceback): cur_cublas_config = os.environ.get(self.cublas_var_name) if self.cublas_config_restore is None: if cur_cublas_config is not None: del os.environ[self.cublas_var_name] else: os.environ[self.cublas_var_name] = self.cublas_config_restore with CuBLASConfigGuard(): fn(*args, **kwargs) return wrapper # This decorator can be used for API tests that want to safely call # torch.__future__.set_swap_module_params_on_conversion. `swap` can be set to # True, False or None where None indicates that the context manager does not # set the flag. When the test is finished, it will restore the previous swap # flag setting. def wrapSwapTensorsTest(swap=None): def dec_fn(fn): @wraps(fn) def wrapper(*args, **kwargs): with SwapTensorsGuard(swap): fn(*args, **kwargs) return wrapper return dec_fn # test parametrizer for swapping class swap(_TestParametrizer): def __init__(self, swap_values): super().__init__() self.swap_values = swap_values def _parametrize_test(self, test, generic_cls, device_cls): for swap in self.swap_values: yield wrapSwapTensorsTest(swap)(test), f'swap_{swap}', {}, lambda _: [] def skipIfCompiledWithoutNumpy(fn): # Even if the numpy module is present, if `USE_NUMPY=0` is used during the # build, numpy tests will fail numpy_support = TEST_NUMPY if numpy_support: try: # The numpy module is present, verify that PyTorch is compiled with # numpy support torch.from_numpy(np.array([2, 2])) except RuntimeError: numpy_support = False @wraps(fn) def wrapper(*args, **kwargs): if not numpy_support: raise unittest.SkipTest("PyTorch was compiled without numpy support") else: fn(*args, **kwargs) return wrapper def _test_function(fn, device): def run_test_function(self): return fn(self, device) return run_test_function def skipIfNoXNNPACK(fn): @wraps(fn) def wrapper(*args, **kwargs): if not torch.backends.xnnpack.enabled: # type: ignore[attr-defined] raise unittest.SkipTest('XNNPACK must be enabled for these tests. Please build with USE_XNNPACK=1.') else: fn(*args, **kwargs) return wrapper def skipIfNoLapack(fn): @wraps(fn) def wrapper(*args, **kwargs): if not torch._C.has_lapack: raise unittest.SkipTest('PyTorch compiled without Lapack') else: fn(*args, **kwargs) return wrapper def skipIfNotRegistered(op_name, message): """Wraps the decorator to hide the import of the `core`. Args: op_name: Check if this op is registered in `core._REGISTERED_OPERATORS`. message: message to fail with. Usage: @skipIfNotRegistered('MyOp', 'MyOp is not linked!') This will check if 'MyOp' is in the caffe2.python.core """ return unittest.skip("Pytorch is compiled without Caffe2") def skipIfNoSciPy(fn): @wraps(fn) def wrapper(*args, **kwargs): if not TEST_SCIPY: raise unittest.SkipTest("test require SciPy, but SciPy not found") else: fn(*args, **kwargs) return wrapper def skip_if_pytest(fn): @wraps(fn) def wrapped(*args, **kwargs): if "PYTEST_CURRENT_TEST" in os.environ: raise unittest.SkipTest("does not work under pytest") return fn(*args, **kwargs) return wrapped def skipIfNoXPU(fn): @wraps(fn) def wrapper(*args, **kwargs): if not TEST_XPU: raise unittest.SkipTest("test required PyTorched compiled with XPU") else: fn(*args, **kwargs) return wrapper def slowTest(fn): @wraps(fn) def wrapper(*args, **kwargs): if not TEST_WITH_SLOW: raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test") else: fn(*args, **kwargs) wrapper.__dict__['slow_test'] = True return wrapper def slowTestIf(condition): return slowTest if condition else lambda fn: fn def skipCUDAMemoryLeakCheckIf(condition): def dec(fn): if getattr(fn, '_do_cuda_memory_leak_check', True): # if current True fn._do_cuda_memory_leak_check = not condition return fn return dec def skipCUDANonDefaultStreamIf(condition): def dec(fn): if getattr(fn, '_do_cuda_non_default_stream', True): # if current True fn._do_cuda_non_default_stream = not condition return fn return dec def suppress_warnings(fn): @wraps(fn) def wrapper(*args, **kwargs): with warnings.catch_warnings(): warnings.simplefilter("ignore") fn(*args, **kwargs) return wrapper def to_gpu(obj, type_map=None): if type_map is None: type_map = {} if isinstance(obj, torch.Tensor): assert obj.is_leaf t = type_map.get(obj.dtype, obj.dtype) with torch.no_grad(): res = obj.to(dtype=t, device="cuda", copy=True) res.requires_grad = obj.requires_grad return res elif torch.is_storage(obj): return obj.new().resize_(obj.size()).copy_(obj) # type: ignore[attr-defined, union-attr] elif isinstance(obj, list): return [to_gpu(o, type_map) for o in obj] elif isinstance(obj, tuple): return tuple(to_gpu(o, type_map) for o in obj) else: return deepcopy(obj) def get_function_arglist(func): return inspect.getfullargspec(func).args def set_rng_seed(seed): torch.manual_seed(seed) random.seed(seed) if TEST_NUMPY: np.random.seed(seed) @contextlib.contextmanager def set_default_dtype(dtype): saved_dtype = torch.get_default_dtype() torch.set_default_dtype(dtype) try: yield finally: torch.set_default_dtype(saved_dtype) @contextlib.contextmanager def set_default_tensor_type(tensor_type): saved_tensor_type = torch.tensor([]).type() torch.set_default_tensor_type(tensor_type) try: yield finally: torch.set_default_tensor_type(saved_tensor_type) def iter_indices(tensor): if tensor.dim() == 0: return range(0) if tensor.dim() == 1: return range(tensor.size(0)) return product(*(range(s) for s in tensor.size())) def is_iterable(obj): try: iter(obj) return True except TypeError: return False def is_iterable_of_tensors(iterable, include_empty=False): """ Returns True if iterable is an iterable of tensors and False o.w. If the iterable is empty, the return value is :attr:`include_empty` """ # Tensor itself is iterable so we check this first if isinstance(iterable, torch.Tensor): return False try: if len(iterable) == 0: return include_empty for t in iter(iterable): if not isinstance(t, torch.Tensor): return False except TypeError: return False return True class CudaNonDefaultStream: def __enter__(self): # Before starting CUDA test save currently active streams on all # CUDA devices and set new non default streams to all CUDA devices # to ensure CUDA tests do not use default stream by mistake. beforeDevice = torch.cuda.current_device() self.beforeStreams = [] for d in range(torch.cuda.device_count()): self.beforeStreams.append(torch.cuda.current_stream(d)) deviceStream = torch.cuda.Stream(device=d) self.beforeStreams[-1].synchronize() torch._C._cuda_setStream(stream_id=deviceStream.stream_id, device_index=deviceStream.device_index, device_type=deviceStream.device_type) torch._C._cuda_setDevice(beforeDevice) def __exit__(self, exec_type, exec_value, traceback): # After completing CUDA test load previously active streams on all # CUDA devices. beforeDevice = torch.cuda.current_device() for d in range(torch.cuda.device_count()): torch._C._cuda_setStream(stream_id=self.beforeStreams[d].stream_id, device_index=self.beforeStreams[d].device_index, device_type=self.beforeStreams[d].device_type) torch._C._cuda_setDevice(beforeDevice) class CudaMemoryLeakCheck: def __init__(self, testcase, name=None): self.name = testcase.id() if name is None else name self.testcase = testcase # initialize context & RNG to prevent false positive detections # when the test is the first to initialize those from torch.testing._internal.common_cuda import initialize_cuda_context_rng initialize_cuda_context_rng() # Stores CUDA memory data provided by PyTorch's caching allocator and # the CUDA driver. # # NOTE: The undocumented torch.cuda.mem_get_info() returns # (#free bytes, #total bytes available) on the GPU def __enter__(self): self.caching_allocator_befores = [] self.driver_befores = [] # Performs a gc if required (required if any CUDA memory is held) num_devices = torch.cuda.device_count() for i in range(num_devices): caching_allocator_mem_allocated = torch.cuda.memory_allocated(i) # NOTE: gc is based exclusively on caching allocator memory # because the driver will always have some bytes in use (context size?) if caching_allocator_mem_allocated > 0: gc.collect() torch._C._cuda_clearCublasWorkspaces() torch.cuda.empty_cache() break # Acquires caching allocator and driver statistics before the test is run for i in range(num_devices): self.caching_allocator_befores.append(torch.cuda.memory_allocated(i)) bytes_free, bytes_total = torch.cuda.mem_get_info(i) driver_mem_allocated = bytes_total - bytes_free self.driver_befores.append(driver_mem_allocated) def __exit__(self, exec_type, exec_value, traceback): # Don't check for leaks if an exception was thrown if exec_type is not None: return # Compares caching allocator before/after statistics # An increase in allocated memory is a discrepancy indicating a possible # memory leak discrepancy_detected = False num_devices = torch.cuda.device_count() for i in range(num_devices): # avoid counting cublasWorkspace allocations torch._C._cuda_clearCublasWorkspaces() caching_allocator_mem_allocated = torch.cuda.memory_allocated(i) if caching_allocator_mem_allocated > self.caching_allocator_befores[i]: discrepancy_detected = True break # Short-circuits if no discrepancy detected if not discrepancy_detected: return # Validates the discrepancy persists after garbage collection and # is confirmed by the driver API # NOTE: driver API iscrepancies alone are ignored because with the jiterator # some tests may permanently increase the CUDA context size and # that will appear as a driver memory leak but is the expected behavior. # GCs and clears the cache gc.collect() torch.cuda.empty_cache() for i in range(num_devices): discrepancy_detected = True # Query memory multiple items to ensure leak was not transient for _ in range(3): caching_allocator_mem_allocated = torch.cuda.memory_allocated(i) bytes_free, bytes_total = torch.cuda.mem_get_info(i) driver_mem_allocated = bytes_total - bytes_free caching_allocator_discrepancy = False driver_discrepancy = False if caching_allocator_mem_allocated > self.caching_allocator_befores[i]: caching_allocator_discrepancy = True if driver_mem_allocated > self.driver_befores[i]: driver_discrepancy = True if not (caching_allocator_discrepancy or driver_discrepancy): # Leak was false positive, exit loop discrepancy_detected = False break if not discrepancy_detected: continue if caching_allocator_discrepancy and not driver_discrepancy: # type: ignore[possibly-undefined] # Just raises a warning if the leak is not validated by the # driver API # NOTE: this may be a problem with how the caching allocator collects its # statistics or a leak too small to trigger the allocation of an # additional block of memory by the CUDA driver msg = ("CUDA caching allocator reports a memory leak not " # type: ignore[possibly-undefined] f"verified by the driver API in {self.name}! " f"Caching allocator allocated memory was {self.caching_allocator_befores[i]} " f"and is now reported as {caching_allocator_mem_allocated} " f"on device {i}. " f"CUDA driver allocated memory was {self.driver_befores[i]} and is now {driver_mem_allocated}.") warnings.warn(msg) elif caching_allocator_discrepancy and driver_discrepancy: # A caching allocator discrepancy validated by the driver API is a # failure (except on ROCm, see below) msg = (f"CUDA driver API confirmed a leak in {self.name}! " # type: ignore[possibly-undefined] f"Caching allocator allocated memory was {self.caching_allocator_befores[i]} " f"and is now reported as {caching_allocator_mem_allocated} " f"on device {i}. " f"CUDA driver allocated memory was {self.driver_befores[i]} and is now {driver_mem_allocated}.") raise RuntimeError(msg) @contextmanager def skip_exception_type(exc_type): try: yield except exc_type as e: raise unittest.SkipTest(f"not implemented: {e}") from e @contextmanager def print_repro_on_failure(repro_parts): try: yield except unittest.SkipTest: raise except Exception as e: # Get the index of the sample input that failed the test if possible. sample_isolation_prefix = "" tracked_input = getattr(e, "_tracked_input", None) if tracked_input is not None: sample_isolation_prefix = f"PYTORCH_OPINFO_SAMPLE_INPUT_INDEX={tracked_input.index}" repro_str = " ".join(filter(None, (sample_isolation_prefix, *repro_parts))) open_source_signpost( subsystem="test_repros", name="test_failure", parameters=json.dumps( { "repro": " ".join(filter(None, (sample_isolation_prefix, *repro_parts))), } ), ) repro_msg = f""" To execute this test, run the following from the base repo dir: {repro_str} This message can be suppressed by setting PYTORCH_PRINT_REPRO_ON_FAILURE=0""" # NB: Hacking the exception args is the cleanest way I've found to append # failure reproduction info without poisoning the stack trace. if len(e.args) >= 1: e.args = (f"{e.args[0]}\n{repro_msg}", *e.args[1:]) raise # "min_satisfying_examples" setting has been deprecated in hypothesis # 3.56.0 and removed in hypothesis 4.x try: import hypothesis def settings(*args, **kwargs): if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0): kwargs.pop('min_satisfying_examples') return hypothesis.settings(*args, **kwargs) hypothesis.settings.register_profile( "pytorch_ci", settings( derandomize=True, suppress_health_check=[hypothesis.HealthCheck.too_slow], database=None, max_examples=50, verbosity=hypothesis.Verbosity.normal)) hypothesis.settings.register_profile( "dev", settings( suppress_health_check=[hypothesis.HealthCheck.too_slow], database=None, max_examples=10, verbosity=hypothesis.Verbosity.normal)) hypothesis.settings.register_profile( "debug", settings( suppress_health_check=[hypothesis.HealthCheck.too_slow], database=None, max_examples=1000, verbosity=hypothesis.Verbosity.verbose)) hypothesis.settings.load_profile( "pytorch_ci" if IS_CI else os.getenv('PYTORCH_HYPOTHESIS_PROFILE', 'dev') ) except ImportError: warnings.warn('Fail to import hypothesis in common_utils, tests are not derandomized', ImportWarning) # Used in check_if_enable to see if a test method should be disabled by an issue, # sanitizes a test method name from appended suffixes by @dtypes parametrization. # e.g., an issue with title "DISABLED test_bitwise_ops (__main__.TestBinaryUfuncs)" should # disabled ALL parametrized test_bitwise_ops tests, such test_bitwise_ops_cuda_int32 def remove_device_and_dtype_suffixes(test_name: str) -> str: # import statement is localized to avoid circular dependency issues with common_device_type.py from torch.testing._internal.common_device_type import get_device_type_test_bases device_suffixes = [x.device_type for x in get_device_type_test_bases()] dtype_suffixes = [str(dt)[len("torch."):] for dt in get_all_dtypes()] test_name_chunks = test_name.split("_") if len(test_name_chunks) > 0 and test_name_chunks[-1] in dtype_suffixes: if len(test_name_chunks) > 1 and test_name_chunks[-2] in device_suffixes: return "_".join(test_name_chunks[0:-2]) return "_".join(test_name_chunks[0:-1]) return test_name def check_if_enable(test: unittest.TestCase): classname = str(test.__class__).split("'")[1].split(".")[-1] sanitized_testname = remove_device_and_dtype_suffixes(test._testMethodName) def matches_test(target: str): target_test_parts = target.split() if len(target_test_parts) < 2: # poorly formed target test name return False target_testname = target_test_parts[0] target_classname = target_test_parts[1][1:-1].split(".")[-1] # if test method name or its sanitized version exactly matches the disabled # test method name AND allow non-parametrized suite names to disable # parametrized ones (TestSuite disables TestSuiteCPU) return classname.startswith(target_classname) and (target_testname in (test._testMethodName, sanitized_testname)) if any(matches_test(x) for x in slow_tests_dict.keys()): getattr(test, test._testMethodName).__dict__['slow_test'] = True if not TEST_WITH_SLOW: raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test") if not IS_SANDCASTLE: should_skip = False skip_msg = "" for disabled_test, (issue_url, platforms) in disabled_tests_dict.items(): if matches_test(disabled_test): platform_to_conditional: dict = { "mac": IS_MACOS, "macos": IS_MACOS, "win": IS_WINDOWS, "windows": IS_WINDOWS, "linux": IS_LINUX, "rocm": TEST_WITH_ROCM, "xpu": TEST_XPU, "asan": TEST_WITH_ASAN, "dynamo": TEST_WITH_TORCHDYNAMO, "dynamo_wrapped": TEST_WITH_TORCHDYNAMO, "inductor": TEST_WITH_TORCHINDUCTOR, "slow": TEST_WITH_SLOW, } invalid_platforms = list(filter(lambda p: p not in platform_to_conditional, platforms)) if len(invalid_platforms) > 0: invalid_plats_str = ", ".join(invalid_platforms) valid_plats = ", ".join(platform_to_conditional.keys()) print(f"Test {disabled_test} is disabled for some unrecognized ", f"platforms: [{invalid_plats_str}]. Please edit issue {issue_url} to fix the platforms ", 'assigned to this flaky test, changing "Platforms: ..." to a comma separated ', f"subset of the following (or leave it blank to match all platforms): {valid_plats}") # Sanitize the platforms list so that we continue to disable the test for any valid platforms given platforms = list(filter(lambda p: p in platform_to_conditional, platforms)) if platforms == [] or any(platform_to_conditional[platform] for platform in platforms): should_skip = True skip_msg = f"Test is disabled because an issue exists disabling it: {issue_url}" \ f" for {'all' if platforms == [] else ''}platform(s) {', '.join(platforms)}. " \ "If you're seeing this on your local machine and would like to enable this test, " \ "please make sure CI is not set and you are not using the flag --import-disabled-tests." break if should_skip and not RERUN_DISABLED_TESTS: # Skip the disabled test when not running under --rerun-disabled-tests verification mode raise unittest.SkipTest(skip_msg) if not should_skip and RERUN_DISABLED_TESTS: # Probably test has disable issue but not for this platform skip_msg = "Test is enabled but --rerun-disabled-tests verification mode is set, so only" \ " disabled tests are run" raise unittest.SkipTest(skip_msg) if TEST_SKIP_FAST: if hasattr(test, test._testMethodName) and not getattr(test, test._testMethodName).__dict__.get('slow_test', False): raise unittest.SkipTest("test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST") # `TestCase.assertEqual` is very permissive and coerced the inputs into a format that could be compared. This is very # convenient when writing tests, but not so much while reviewing them. By default, the comparison `Pair` framework of # `torch.testing._comparison.are_equal`, used for example by the public testing function # `torch.testing.assert_close`, is more strict. In order to use the same framework and thus reduce the divergence # between internal and external comparison logic as much as possible, we define some "relaxed" pairs here. They only # change the supported inputs, but the comparison logic is the same. # TODO: Revisit the relaxed pairs and check how much work it is to fix the tests that would fail without the relaxation. class RelaxedBooleanPair(BooleanPair): """Pair for boolean-like inputs. In contrast to the builtin :class:`BooleanPair`, this class also supports one input being a number or a single element tensor-like. """ _supported_number_types = NumberPair(0, 0)._supported_types def _process_inputs(self, actual, expected, *, id): # We require only one of the inputs of the inputs to be a boolean and the other can also be a boolean, a # number, or a single element tensor or array, whereas in default BooleanPair both inputs have to be booleans. tensor_or_array_types: tuple[type, ...] = (torch.Tensor, np.ndarray) other_supported_types = (*self._supported_types, *self._supported_number_types, *tensor_or_array_types) if not ( (isinstance(actual, self._supported_types) and isinstance(expected, other_supported_types)) or (isinstance(expected, self._supported_types) and isinstance(actual, other_supported_types)) ): self._inputs_not_supported() return [self._to_bool(input, id=id) for input in (actual, expected)] def _to_bool(self, bool_like, *, id): if isinstance(bool_like, np.number): return bool(bool_like.item()) elif type(bool_like) in self._supported_number_types: return bool(bool_like) elif isinstance(bool_like, (torch.Tensor, np.ndarray)): numel = bool_like.numel() if isinstance(bool_like, torch.Tensor) else bool_like.size if numel > 1: self._fail( ValueError, f"Only single element tensor-likes can be compared against a boolean. " f"Got {numel} elements instead.", id=id ) return bool(bool_like.item()) else: return super()._to_bool(bool_like, id=id) class RelaxedNumberPair(NumberPair): """Pair for number-like inputs. In contrast to the builtin :class:`NumberPair`, this class also supports one input being a single element tensor-like or a :class:`enum.Enum`. (D)Type checks are disabled, meaning comparing 1 to 1.0 succeeds even when ``check_dtype=True`` is passed. In addition, this class uses looser default tolerances for :class:`float` and :class:`complex` inputs. Also supports overriding the absolute and relative tolerance through the ``@precisionOverride`` and ``@toleranceOverride`` decorators. """ _TYPE_TO_DTYPE = { int: torch.int64, float: torch.float32, complex: torch.complex64, } def __init__( self, actual, expected, *, rtol_override=0.0, atol_override=0.0, check_dtype=None, **other_parameters ) -> None: super().__init__(actual, expected, check_dtype=False, **other_parameters) self.rtol = max(self.rtol, rtol_override) self.atol = max(self.atol, atol_override) def _process_inputs(self, actual, expected, *, id): # We require only one of the inputs of the inputs to be a number and the other can also be a number or a single # element tensor or array, whereas in default NumberPair both inputs have to be numbers. tensor_or_array_types: tuple[type, ...] = (torch.Tensor, np.ndarray) other_supported_types = (*self._supported_types, *tensor_or_array_types) if not ( (isinstance(actual, self._supported_types) and isinstance(expected, other_supported_types)) or (isinstance(expected, self._supported_types) and isinstance(actual, other_supported_types)) ): self._inputs_not_supported() return [self._to_number(input, id=id) for input in (actual, expected)] def _to_number(self, number_like, *, id): if isinstance(number_like, (torch.Tensor, np.ndarray)): numel = number_like.numel() if isinstance(number_like, torch.Tensor) else number_like.size if numel > 1: self._fail( ValueError, f"Only single element tensor-likes can be compared against a number. " f"Got {numel} elements instead.", id=id ) number = number_like.item() if isinstance(number, bool): number = int(number) return number elif isinstance(number_like, Enum): return int(number_like) # type: ignore[call-overload] else: return super()._to_number(number_like, id=id) class TensorOrArrayPair(TensorLikePair): """Pair for tensor-like inputs. On the one hand this class is stricter than the builtin :class:`TensorLikePair` since it only allows instances of :class:`torch.Tensor` and :class:`numpy.ndarray` rather than allowing any tensor-like than can be converted into a tensor. On the other hand this class is looser since it converts all inputs into tensors with no regard of their relationship, e.g. comparing a :class:`torch.Tensor` to :class:`numpy.ndarray` is fine. In addition, this class supports overriding the absolute and relative tolerance through the ``@precisionOverride`` and ``@toleranceOverride`` decorators. """ def __init__(self, actual, expected, *, rtol_override=0.0, atol_override=0.0, **other_parameters): super().__init__(actual, expected, **other_parameters) self.rtol = max(self.rtol, rtol_override) self.atol = max(self.atol, atol_override) def _process_inputs(self, actual, expected, *, id, allow_subclasses): self._check_inputs_isinstance(actual, expected, cls=(torch.Tensor, np.ndarray)) actual, expected = (self._to_tensor(input) for input in (actual, expected)) for tensor in (actual, expected): self._check_supported(tensor, id=id) return actual, expected class TypedStoragePair(TensorLikePair): """Pair for :class:`torch.storage.TypedStorage` inputs.""" def __init__(self, actual, expected, *, rtol_override=0.0, atol_override=0.0, **other_parameters): self._check_inputs_isinstance(actual, expected, cls=torch.storage.TypedStorage) super().__init__(actual, expected, **other_parameters) self.rtol = max(self.rtol, rtol_override) self.atol = max(self.atol, atol_override) def _to_tensor(self, typed_storage): return torch.tensor( typed_storage._untyped_storage, dtype={ torch.quint8: torch.uint8, torch.quint4x2: torch.uint8, torch.quint2x4: torch.uint8, torch.qint32: torch.int32, torch.qint8: torch.int8 }.get(typed_storage.dtype, typed_storage.dtype), device=typed_storage.device, ) class UnittestPair(Pair): """Fallback ABC pair that handles non-numeric inputs. To avoid recreating the mismatch messages of :meth:`unittest.TestCase.assertEqual`, this pair simply wraps it in order to use it with the :class:`Pair` "framework" from :func:`are_equal`. Define the :attr:`UnittestPair.CLS` in a subclass to indicate which class(es) of the inputs the pair should support. """ CLS: Union[type, tuple[type, ...]] TYPE_NAME: Optional[str] = None def __init__(self, actual, expected, **other_parameters): self._check_inputs_isinstance(actual, expected, cls=self.CLS) super().__init__(actual, expected, **other_parameters) def compare(self): test_case = unittest.TestCase() try: return test_case.assertEqual(self.actual, self.expected) except test_case.failureException as error: msg = str(error) type_name = self.TYPE_NAME or (self.CLS if isinstance(self.CLS, type) else self.CLS[0]).__name__ self._fail(AssertionError, f"{type_name.title()} comparison failed: {msg}") class StringPair(UnittestPair): CLS = (str, bytes) TYPE_NAME = "string" class SetPair(UnittestPair): CLS = set class TypePair(UnittestPair): CLS = type class ObjectPair(UnittestPair): CLS = object # This implements a variant of assertRaises/assertRaisesRegex where we first test # if the exception is NotImplementedError, and if so just skip the test instead # of failing it. # # This is implemented by inheriting from the (private) implementation of # assertRaises from unittest.case, and slightly tweaking it for this new # behavior. The year is 2021: this private class hierarchy hasn't changed since # 2010, seems low risk to inherit from. class AssertRaisesContextIgnoreNotImplementedError(unittest.case._AssertRaisesContext): def __exit__(self, exc_type, exc_value, tb): if exc_type is not None and issubclass(exc_type, NotImplementedError): self.test_case.skipTest(f"not_implemented: {exc_value}") # type: ignore[attr-defined] return super().__exit__(exc_type, exc_value, tb) @contextmanager def set_warn_always_context(new_val: bool): old_val = torch.is_warn_always_enabled() torch.set_warn_always(new_val) try: yield finally: torch.set_warn_always(old_val) class NoTest: # causes pytest to not recognize this class as a test __test__ = False class TestCase(expecttest.TestCase): # NOTE: "precision" lets classes and generated tests set minimum # atol values when comparing tensors. Used by @precisionOverride and @toleranceOverride, for # example. # NOTE: "rel_tol" lets classes and generated tests set minimum # rtol values when comparing tensors. Used by @toleranceOverride, for example. _precision: float = 0 _rel_tol: float = 0 # Toggles whether to assert that `torch.get_default_dtype()` returns # `torch.float` when `setUp` and `tearDown` are called. _default_dtype_check_enabled: bool = False # Always use difflib to print diffs on multi line equality. # Undocumented feature in unittest _diffThreshold = sys.maxsize maxDiff = None # checker to early terminate test suite if unrecoverable failure occurs. def _should_stop_test_suite(self): if torch.cuda.is_initialized(): # CUDA device side error will cause subsequence test cases to fail. # stop entire test suite if catches RuntimeError during torch.cuda.synchronize(). try: torch.cuda.synchronize() except RuntimeError as rte: print("TEST SUITE EARLY TERMINATION due to torch.cuda.synchronize() failure", file=sys.stderr) print(str(rte), file=sys.stderr) return True return False else: return False @property def precision(self) -> float: return self._precision @precision.setter def precision(self, prec: float) -> None: self._precision = prec @property def rel_tol(self) -> float: return self._rel_tol @rel_tol.setter def rel_tol(self, prec: float) -> None: self._rel_tol = prec _do_cuda_memory_leak_check = False _do_cuda_non_default_stream = False # When True, if a test case raises a NotImplementedError, instead of failing # the test, skip it instead. _ignore_not_implemented_error = False def __init__(self, method_name='runTest', methodName='runTest'): # methodName is the correct naming in unittest and testslide uses keyword arguments. # So we need to use both to 1) not break BC and, 2) support testslide. if methodName != "runTest": method_name = methodName super().__init__(method_name) test_method = getattr(self, method_name, None) if test_method is not None: # Wraps the tested method if we should do CUDA memory check. if TEST_CUDA_MEM_LEAK_CHECK: self._do_cuda_memory_leak_check &= getattr(test_method, '_do_cuda_memory_leak_check', True) # FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044 if self._do_cuda_memory_leak_check and not IS_WINDOWS: self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors) # Wraps the tested method if we should enforce non default CUDA stream. self._do_cuda_non_default_stream &= getattr(test_method, '_do_cuda_non_default_stream', True) if self._do_cuda_non_default_stream and not IS_WINDOWS: self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream) if self._ignore_not_implemented_error: self.wrap_with_policy(method_name, lambda: skip_exception_type(NotImplementedError)) if PRINT_REPRO_ON_FAILURE: try: def _get_rel_test_path(abs_test_path): # Attempt to get relative path based on the "test" dir. # In CI, the working dir is not guaranteed to be the base repo dir so # we can't just compute relative path from that. parts = Path(abs_test_path).parts for i, part in enumerate(parts): if part == "test": base_dir = os.path.join(*parts[:i]) if i > 0 else '' return os.path.relpath(abs_test_path, start=base_dir) # Can't determine containing dir; just return the test filename. # The path isn't strictly correct but it's arguably better than nothing. return os.path.split(abs_test_path)[1] # NB: In Python 3.8, the getfile() call will return a path relative # to the working directory, so convert that to absolute. abs_test_path = os.path.abspath(inspect.getfile(type(self))) test_filename = _get_rel_test_path(abs_test_path) class_name = type(self).__name__ test_run_cmd = f"python {test_filename} {class_name}.{method_name}" env_var_prefix = TestEnvironment.repro_env_var_prefix() repro_parts = [env_var_prefix, test_run_cmd] self.wrap_with_policy( method_name, lambda repro_parts=repro_parts: print_repro_on_failure(repro_parts)) except Exception as e: # Don't fail entirely if we can't get the test filename log.info("could not print repro string", extra=str(e)) # type: ignore[arg-type] def assertLeaksNoCudaTensors(self, name=None): name = self.id() if name is None else name return CudaMemoryLeakCheck(self, name) def enforceNonDefaultStream(self): return CudaNonDefaultStream() def _remove_ansi_escape(self, input): # 7-bit C1 ANSI sequences ansi_escape = re.compile(r''' \x1B # ESC (?: # 7-bit C1 Fe (except CSI) [@-Z\\-_] | # or [ for CSI, followed by a control sequence \[ [0-?]* # Parameter bytes [ -/]* # Intermediate bytes [@-~] # Final byte ) ''', re.VERBOSE) return ansi_escape.sub('', input) def remove_comment_lines(self, input_string): lines = input_string.split('\n') filtered_lines = [line for line in lines if not line.strip().startswith('#')] return '\n'.join(filtered_lines) def remove_empty_lines(self, input_string): lines = input_string.split('\n') filtered_lines = [line for line in lines if not line.strip() == ''] return '\n'.join(filtered_lines) # ignore comments will ignore lines that starts with # after being stripped def assertExpectedInline(self, actual, expect, skip=0, ignore_comments=False, ignore_empty_lines=False): actual = actual if isinstance(actual, str) else str(actual) actual = self._remove_ansi_escape(actual) expect = self._remove_ansi_escape(expect) if ignore_comments: actual = self.remove_comment_lines(actual) expect = self.remove_comment_lines(expect) if ignore_empty_lines: actual = self.remove_empty_lines(actual) expect = self.remove_empty_lines(expect) return super().assertExpectedInline(actual if isinstance(actual, str) else str(actual), expect, skip + 1) # Munges exceptions that internally contain stack traces, using munge_exc def assertExpectedInlineMunged( self, exc_type, callable, expect, *, skip=0, suppress_suffix=True, post_munge=None, ): try: callable() except exc_type as e: munged = munge_exc(e, suppress_suffix=suppress_suffix, skip=skip + 1) if post_munge: munged = post_munge(munged) self.assertExpectedInline( munged, expect, skip=skip + 1 ) return self.fail(msg="Did not raise when expected to") def assertLogs(self, logger=None, level=None): if logger is None: logger = logging.getLogger("torch") return super().assertLogs(logger, level) def assertNoLogs(self, logger=None, level=None): if logger is None: logger = logging.getLogger("torch") return super().assertNoLogs(logger, level) def wrap_with_cuda_policy(self, method_name, policy): test_method = getattr(self, method_name) # the import below may initialize CUDA context, so we do it only if # self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream # is True. # TODO: sure looks like we unconditionally initialize the context here # -- ezyang from torch.testing._internal.common_cuda import TEST_CUDA fullname = self.id().lower() # class_name.method_name if TEST_CUDA and ('gpu' in fullname or 'cuda' in fullname): setattr(self, method_name, self.wrap_method_with_policy(test_method, policy)) def wrap_with_policy(self, method_name, policy): test_method = getattr(self, method_name) setattr(self, method_name, self.wrap_method_with_policy(test_method, policy)) # A policy is a zero-argument function that returns a context manager. # We don't take the context manager directly as it may be necessary to # construct it once per test method def wrap_method_with_policy(self, method, policy): # Assumes that `method` is the tested function in `self`. # NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope # alive, so this cannot be done in setUp and tearDown because # tearDown is run unconditionally no matter whether the test # passes or not. For the same reason, we can't wrap the `method` # call in try-finally and always do the check. @wraps(method) def wrapper(self, *args, **kwargs): with policy(): method(*args, **kwargs) return types.MethodType(wrapper, self) def wrap_with_cuda_memory_check(self, method): return self.wrap_method_with_policy(method, self.assertLeaksNoCudaTensors) def _run_custom(self, result=None): using_unittest = isinstance(result, unittest.TestResult) super_run = super().run test_cls = super_run.__self__ # type: ignore[attr-defined] # Are we compiling? compiled = TEST_WITH_TORCHDYNAMO or TEST_WITH_AOT_EAGER or TEST_WITH_TORCHINDUCTOR # Is the class strict and compiling? strict_default = False should_reset_dynamo = False # We disable size_asserts for test_ops since some tests fail # due to mismatch of strides returned from eager v.s. meta kernels # Only some of the ops has this problem, but since tests in # test_op.py are parametrized, it's hard to do this specifically # for the affected ops. # It's not a big deal since these problems are captured by # test_torchinductor_opinfo.py as well. should_disable_size_asserts = False if compiled: try: path = inspect.getfile(type(test_cls)) full_path = os.path.abspath(path) match = re.match(r".*/test/(.*).py", full_path) if match is not None: filename = match.group(1) if TEST_WITH_TORCHINDUCTOR: from .dynamo_test_failures import FIXME_inductor_non_strict strict_default = filename not in FIXME_inductor_non_strict should_reset_dynamo = True if filename == "test_ops": should_disable_size_asserts = True else: strict_default = True # inspect.getfile can fail with these except (OSError, TypeError): pass if "STRICT_DEFAULT" in os.environ: if os.environ["STRICT_DEFAULT"] == "1": strict_default = True strict_mode = False if compiled: test_method = getattr(self, self._testMethodName) if hasattr(test_method, "dynamo_strict"): strict_mode = test_method.dynamo_strict elif hasattr(test_cls, "dynamo_strict"): strict_mode = test_cls.dynamo_strict else: strict_mode = strict_default nopython = getattr(test_cls, "dynamo_strict_nopython", False) and compiled if strict_mode or should_reset_dynamo: torch._dynamo.reset() torch.compiler.set_stance("default") # TODO: Remove this; this is grandfathered in because we suppressed errors # on test suite previously # When strict mode is False, suppress_errors is True if compiled: suppress_errors = not strict_mode else: suppress_errors = torch._dynamo.config.suppress_errors maybe_disable_size_asserts = ( torch._inductor.config.patch(size_asserts=False) if should_disable_size_asserts else contextlib.nullcontext() ) with unittest.mock.patch("torch._dynamo.config.suppress_errors", suppress_errors), maybe_disable_size_asserts: if TEST_WITH_AOT_EAGER: super_run = torch._dynamo.optimize("aot_eager_decomp_partition")(super_run) elif TEST_WITH_TORCHDYNAMO or TEST_WITH_TORCHINDUCTOR: if TEST_WITH_TORCHINDUCTOR: super_run = torch._dynamo.optimize("inductor")(super_run) else: # Assume eager-generated GraphModules will not error out. # If we do, this is probably a Dynamo bug! super_run = torch._dynamo.optimize("eager_noexcept", nopython=nopython)(super_run) key = f"{self.__class__.__name__}.{self._testMethodName}" def expect_failure(f, file_name): @wraps(f) def wrapper(*args, **kwargs): try: f(*args, **kwargs) except BaseException as e: self.skipTest(e) raise RuntimeError(f"Unexpected success, please remove `{file_name}`") return wrapper if TEST_WITH_TORCHINDUCTOR: subdir = "test/inductor_expected_failures" from .dynamo_test_failures import inductor_expected_failures as expected_failures else: subdir = "test/dynamo_expected_failures" from .dynamo_test_failures import dynamo_expected_failures as expected_failures if key in expected_failures: method = getattr(self, self._testMethodName) file_name = os.path.join(subdir, key) setattr(self, self._testMethodName, expect_failure(method, file_name)) def ignore_failure(f, file_name): @wraps(f) def wrapper(*args, **kwargs): try: f(*args, **kwargs) except BaseException as e: self.skipTest(e) method = getattr(self, self._testMethodName) if getattr(method, "__unittest_expecting_failure__", False): self.skipTest("unexpected success") else: self.skipTest(f"This test passed, maybe we can remove `{file_name}`") return wrapper if TEST_WITH_TORCHINDUCTOR: subdir = "test/inductor_skips" from .dynamo_test_failures import inductor_skips as skips else: subdir = "test/dynamo_skips" from .dynamo_test_failures import dynamo_skips as skips if key in skips: method = getattr(self, self._testMethodName) file_name = os.path.join(subdir, key) setattr(self, self._testMethodName, ignore_failure(method, file_name)) super_run(result=result) if strict_mode or should_reset_dynamo: torch._dynamo.reset() # Early terminate test if necessary. If using pytest, use the -x flag instead if using_unittest and self._should_stop_test_suite(): if result.wasSuccessful(): case = TestCase() if TEST_SAVE_XML is not None: # This is a big hacky, XMLRunner modifies expected type from TestCase to TestInfo # Create dummy TestInfo to record results correctly from xmlrunner.result import _TestInfo # type: ignore[import] case = _TestInfo(result, case) case.output = _TestInfo.ERROR # type: ignore[attr-defined] case.elapsed_time = 0.0 # type: ignore[attr-defined] case.test_description = "TestSuiteEarlyFailure" # type: ignore[attr-defined] # This shouldn't really happen, but if does add fake failure # For more details see https://github.com/pytorch/pytorch/issues/71973 result.failures.append((case, "TestSuite execution was aborted early")) assert result.wasSuccessful() is False result.stop() def run(self, result=None): with contextlib.ExitStack() as stack: if TEST_WITH_CROSSREF: stack.enter_context(CrossRefMode()) self._run_custom( result=result, ) def setUp(self): check_if_enable(self) set_rng_seed(SEED) # Save global check sparse tensor invariants state that can be # restored from tearDown: self._check_invariants = torch.sparse.check_sparse_tensor_invariants.is_enabled() # Enable invariant checks for all sparse tensors constructions # including the unsafe ones. If this is not desired for some # test case, use check_invariants=False optional argument to # sparse tensor constructors or # @torch.sparse.check_sparse_tensor_invariants(False) # decorator to disable the invariant checks. torch.sparse.check_sparse_tensor_invariants.enable() if self._default_dtype_check_enabled: assert torch.get_default_dtype() == torch.float # attempt to reset some global state at the end of the test self._prev_grad_state = torch.is_grad_enabled() def tearDown(self): # There exists test cases that override TestCase.setUp # definition, so we cannot assume that _check_invariants # attribute is defined in general. if hasattr(self, '_check_invariants'): # Restore the global check sparse tensor invariants state if self._check_invariants: torch.sparse.check_sparse_tensor_invariants.enable() else: torch.sparse.check_sparse_tensor_invariants.disable() if self._default_dtype_check_enabled: assert torch.get_default_dtype() == torch.float # attribute may not be defined, per above if hasattr(self, '_prev_grad_state'): torch.set_grad_enabled(self._prev_grad_state) @staticmethod def _make_crow_indices(n_rows, n_cols, nnz, *, device, dtype, random=True): """Return crow_indices of a CSR tensor with size (n_rows, n_cols) and the number of specified elements nnz. If random is True, the column counts of rows are in random order. Otherwise, the column counts of rows are defined by the used sampling method. Sampling method --------------- The used sampling method was introduced in https://pearu.github.io/csr_sampling.html, and here we give only an overall description of the method. Notice that crow_indices can be defined as cumsum(counts) where counts is a sequence of non-negative integers satisfying the following conditions: len(counts) == n_rows + 1 counts.max() <= n_cols while counts[i + 1] is interpreted as the number of specified elements in the i-th row. The used sampling method aims at increasing the diversity of CSR samples, that is, a CSR sample should contain (i) rows that are all filled, (ii) rows with no elements at all, and (iii) rows that are partially filled. At the same time and for the given total number of specified elements (nnz), there should be minimal preference to rows with a given number of elements. To achieve this, the sampling method is built-up on using a sawteeth model for counts. In the simplest case, we would have counts = arange(n_rows + 1) % (n_cols + 1) that has equal number of all possible column counts per row. This formula can be used only for specific input values of n_rows, n_cols, and nnz. To generalize this model to any combinations of inputs, the counts model above is extended with an incomplete sawtooth, and the right and lower rectangular parts that will guarantee that counts.sum() == nnz for any combination of n_rows, n_cols, and nnz. Basically, we'll find a maximal window in (n_rows + 1, n_cols + 1)-grid that is able to hold a sequence of sawteeth and so-called final correction, while the external part of the window is filled with counts to meet the nnz constraint exactly. """ assert 0 <= nnz <= n_rows * n_cols, (nnz, n_rows, n_cols) def sawteeth(n, m): # return the total number of counts in the sequence of # sawteeth where n and m define a window in (n_rows+1, # n_cols+1) rectangle where the sequence of sawteeth # perfectly fit. M = (n_cols - m) * (n_cols - m + 1) // 2 K = (n_rows - n) % (n_cols - m + 1) return M * ((n_rows - n) // (n_cols - m + 1)) + K * (K - 1) // 2 # Different from the original method description, here counts # has leading 0 required by crow_indices: counts = torch.zeros(n_rows + 1, dtype=dtype, device=torch.device('cpu')) n = m = 0 N = sawteeth(n, m) if N and nnz >= max(N, n_cols): # determine the width of the sawteeth window. We use bisection to solve # N(n, 0) == 0 or nnz - n * n_cols < max(N(n, 0), n_cols) # for n n_left = n n_right = n_rows - 1 N_right = sawteeth(n_right, m) while n_right - n_left > 1: n_middle = (n_left + n_right) // 2 N_middle = sawteeth(n_middle, m) if N_middle == 0 or nnz - n_middle * n_cols < max(N_middle, n_cols): n_right, N_right = n_middle, N_middle else: n_left = n_middle n, N = n_right, N_right # fill the right rectangle with counts: assert n counts[-n:].fill_(n_cols) if N and nnz - n * n_cols >= max(N, n_rows - n): # determine the height of the sawteeth window. We use bisection to solve # N(n, m) == 0 or nnz - n * n_cols - m * (n_rows - n) < max(N(n, m), n_rows - n) # for m. m_left = m m_right = n_cols - 1 N_right = sawteeth(n, m_right) while m_right - m_left > 1: m_middle = (m_left + m_right) // 2 N_middle = sawteeth(n, m_middle) if N_middle == 0 or nnz - n * n_cols - m_middle * (n_rows - n) < max(N_middle, n_rows - n): m_right, N_right = m_middle, N_middle else: m_left = m_middle m, N = m_right, N_right # fill the bottom rectangle with counts: assert m counts[1:n_rows - n + 1].fill_(m) if N: # fill the sawteeth window with counts q, r = divmod(nnz - n * n_cols - m * (n_rows - n), (n_cols - m) * (n_cols - m + 1) // 2) p = 1 + q * (n_cols - m + 1) k = math.isqrt(2 * r) if k * (k + 1) > 2 * r: k -= 1 corr = r - k * (k + 1) // 2 assert not ((p > 1) and (m > 0)) # full sawteeth are never on top of a bottom rectangle # sequence of full sawteeth: counts[1:p] = torch.arange(p - 1, dtype=dtype, device=counts.device) % (n_cols - m + 1) # incomplete sawtooth: counts[p:p + k + 1] += torch.arange(k + 1, dtype=dtype, device=counts.device) else: # given input does not support sawteeth p = 1 corr = nnz - n * n_cols - m * (n_rows - n) # correction that will guarantee counts.sum() == nnz: counts[p] += corr if random: # randomize crow_indices by shuffling the sawteeth # sequence: perm = torch.randperm(n_rows, device=counts.device) counts[1:] = counts[1:][perm] # compute crow_indices: crow_indices = counts crow_indices.cumsum_(dim=0) return crow_indices.to(device=device) def genSparseCompressedTensor(self, size, nnz, *, layout, device, dtype, index_dtype, blocksize=(), dense_dims=0): from operator import mul from functools import reduce sparse_dim = 2 assert all(size[d] > 0 for d in range(len(size))) or nnz == 0, 'invalid arguments' assert len(size) >= sparse_dim if blocksize: assert len(blocksize) == 2, (size, blocksize) assert size[-2 - dense_dims] % blocksize[0] == 0, (size, blocksize) assert size[-1 - dense_dims] % blocksize[1] == 0, (size, blocksize) blocksize0, blocksize1 = blocksize else: blocksize0 = blocksize1 = 1 size = tuple(size) dense_size = size[(len(size) - dense_dims):] def random_sparse_compressed(n_compressed_dims, n_plain_dims, nnz): compressed_indices = self._make_crow_indices(n_compressed_dims, n_plain_dims, nnz, device=device, dtype=index_dtype) plain_indices = torch.zeros(nnz, dtype=index_dtype, device=device) for i in range(n_compressed_dims): count = compressed_indices[i + 1] - compressed_indices[i] plain_indices[compressed_indices[i]:compressed_indices[i + 1]], _ = torch.sort( torch.randperm(n_plain_dims, dtype=index_dtype, device=device)[:count]) low = -1 if dtype != torch.uint8 else 0 high = 1 if dtype != torch.uint8 else 2 values = make_tensor((nnz,) + blocksize + dense_size, device=device, dtype=dtype, low=low, high=high) return values, compressed_indices, plain_indices batch_shape = size[:-2 - dense_dims] n_batch = reduce(mul, batch_shape, 1) if layout in {torch.sparse_csr, torch.sparse_bsr}: n_compressed_dims, n_plain_dims = size[-2 - dense_dims] // blocksize0, size[-1 - dense_dims] // blocksize1 else: n_compressed_dims, n_plain_dims = size[-1 - dense_dims] // blocksize1, size[-2 - dense_dims] // blocksize0 blocknnz = nnz // (blocksize0 * blocksize1) sparse_tensors = [random_sparse_compressed(n_compressed_dims, n_plain_dims, blocknnz) for _ in range(n_batch)] sparse_tensors_it = map(list, zip(*sparse_tensors)) values = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, blocknnz, *blocksize, *dense_size) compressed_indices = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, -1) plain_indices = torch.stack(next(sparse_tensors_it)).reshape(*batch_shape, -1) return torch.sparse_compressed_tensor(compressed_indices, plain_indices, values, size=size, dtype=dtype, layout=layout, device=device) def genSparseCSRTensor(self, size, nnz, *, device, dtype, index_dtype, dense_dims=0): return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_csr, device=device, dtype=dtype, index_dtype=index_dtype, blocksize=(), dense_dims=dense_dims) def genSparseCSCTensor(self, size, nnz, *, device, dtype, index_dtype, dense_dims=0): return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_csc, device=device, dtype=dtype, index_dtype=index_dtype, blocksize=(), dense_dims=0) def genSparseBSRTensor(self, size, blocksize, nnz, *, device, dtype, index_dtype, dense_dims=0): assert len(blocksize) == 2 return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_bsr, device=device, dtype=dtype, index_dtype=index_dtype, blocksize=blocksize, dense_dims=dense_dims) def genSparseBSCTensor(self, size, blocksize, nnz, *, device, dtype, index_dtype, dense_dims=0): assert len(blocksize) == 2 return self.genSparseCompressedTensor(size, nnz, layout=torch.sparse_bsc, device=device, dtype=dtype, index_dtype=index_dtype, blocksize=blocksize, dense_dims=dense_dims) def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device, dtype): # Assert not given impossible combination, where the sparse dims have # empty numel, but nnz > 0 makes the indices containing values. assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments' v_size = [nnz] + list(size[sparse_dim:]) v = make_tensor(v_size, device=device, dtype=dtype, low=-1, high=1) i = torch.rand(sparse_dim, nnz, device=device) i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i)) i = i.to(torch.long) if is_uncoalesced: i1 = i[:, :(nnz // 2), ...] i2 = i[:, :((nnz + 1) // 2), ...] i = torch.cat([i1, i2], 1) x = torch.sparse_coo_tensor(i, v, torch.Size(size), dtype=dtype, device=device) if not is_uncoalesced: x = x.coalesce() else: # FIXME: `x` is a sparse view of `v`. Currently rebase_history for # sparse views is not implemented, so this workaround is # needed for inplace operations done on `x`, e.g., copy_(). # Remove after implementing something equivalent to CopySlice # for sparse views. # NOTE: We do clone() after detach() here because we need to be able to change size/storage of x afterwards x = x.detach().clone()._coalesced_(False) return x, x._indices().clone(), x._values().clone() def generate_simple_inputs(self, layout, device=None, dtype=None, index_dtype=None, pin_memory=None, members_pin_memory=None, enable_batch=True, enable_hybrid=True, enable_zero_sized=True, enable_non_contiguous_indices=True, enable_non_contiguous_values=True, enable_batch_variable_nse=False, output_tensor=True, patterns=None): """Generator of simple inputs for tensor constructors of the given layout. The generated tensor inputs have the following properties: - tensor shapes are minimal but not trivial - tensor values are sorted sequences for COO and CSR formats, e.g. [1, 2, 3, 4] - the generated tensors represent the same mathematical tensor for all layouts - the generated tensors include regular, zero-sized, and optionally, batched or/and hybrid tensors. - the generated tensors include contiguous or non-contiguous tensors both in indices and values If output_tensor is True, yield tensors with the given layout. Otherwise, yield inputs to the corresponding tensor constructors: - sparse compressed input is defined as (compressed_indices, plain_indices, values), dict(size=expected_size_from_shape_inference, device=device, dtype=dtype, pin_memory=pin_memory) - sparse COO input is defined as (indices, values), dict(size=expected_size_from_shape_inference, device=device, dtype=dtype, pin_memory=pin_memory) - strided input is defined as (values,), dict(device=device, dtype=dtype) """ if index_dtype is None: index_dtype = torch.int64 is_compressed_sparse_layout = layout in {torch.sparse_csr, torch.sparse_csc, torch.sparse_bsr, torch.sparse_bsc} if output_tensor: for args, kwargs in self.generate_simple_inputs(layout, device=device, dtype=dtype, index_dtype=index_dtype, pin_memory=pin_memory, enable_batch=enable_batch, enable_hybrid=enable_hybrid, enable_zero_sized=enable_zero_sized, enable_non_contiguous_indices=enable_non_contiguous_indices, enable_non_contiguous_values=enable_non_contiguous_values, enable_batch_variable_nse=enable_batch_variable_nse, output_tensor=False): if members_pin_memory: args = tuple(a.pin_memory() for a in args) if layout is torch.strided: assert len(args) == 1 size = kwargs.pop('size', None) # to ensure that a zero-sized tensor has the desired shape assert size is not None if pin_memory: yield args[0].reshape(size).pin_memory() else: yield args[0].reshape(size) elif layout is torch.sparse_coo: yield torch.sparse_coo_tensor(*args, **kwargs) elif is_compressed_sparse_layout: kwargs.update(layout=layout) yield torch.sparse_compressed_tensor(*args, **kwargs) else: assert 0 # unreachable return def get_blockpattern(pattern, blocksize): basesize = pattern.shape assert basesize[0] % blocksize[0] == 0, (basesize, blocksize) assert basesize[1] % blocksize[1] == 0, (basesize, blocksize) blockpattern = pattern.reshape(-1, blocksize[0], basesize[1] // blocksize[1], blocksize[1]).transpose(-3, -2).any(-1).any(-1) block_ids = torch.arange(1, blockpattern.numel() + 1).reshape(blockpattern.shape) return (blockpattern != 0) * block_ids def get_sparse_data(pattern): basesize = pattern.shape assert len(basesize) == 2, basesize # pattern is expected to be a matrix # We cannot use `torch.sparse_xyz_tensor(pattern)` to # compute the sparse layout indices and values because # generate_simple_inputs is used to generate the inputs to # test `torch.sparse_xyz_tensor` factory functions, so # we'll compute the indices and values independently of # the factory functions. indices = torch.where(pattern != 0) coo_indices = torch.stack(indices) crow_indices = torch.zeros(basesize[0] + 1, dtype=torch.int64) crow_indices[1:] = torch.cumsum(coo_indices[0].bincount(minlength=basesize[0]), 0) col_indices = coo_indices[1] strided_values = torch.zeros(basesize, dtype=torch.int64) # the property of `values == range(1, 1+nnz)` is used in # get_sparse_data_with_block to relate BSR and BSC values, # so, don't change the following line: values = torch.arange(1, 1 + len(indices[0]), dtype=torch.int64) strided_values[indices] = values indices_T = torch.where(pattern.transpose(0, 1) != 0) coo_indices_T = torch.stack(indices_T) ccol_indices = torch.zeros(basesize[1] + 1, dtype=torch.int64) ccol_indices[1:] = torch.cumsum(coo_indices_T[0].bincount(minlength=basesize[1]), 0) row_indices = coo_indices_T[1] csc_values = strided_values.transpose(0, 1)[indices_T] return {torch.sparse_coo: (coo_indices, values), torch.sparse_csr: (crow_indices, col_indices, values), torch.sparse_csc: (ccol_indices, row_indices, csc_values), torch.strided: (strided_values,)} def get_sparse_data_with_block(pattern, blocksize): nonblock_data = get_sparse_data(pattern) blockpattern = get_blockpattern(pattern, blocksize) block_data = get_sparse_data(blockpattern) strided_values = nonblock_data[torch.strided][0] block_indices = block_data[torch.sparse_coo][0] bsr_values = torch.stack([strided_values[bi * blocksize[0]:(bi + 1) * blocksize[0], bj * blocksize[1]:(bj + 1) * blocksize[1]] for bi, bj in block_indices.transpose(0, 1)]) # here we use the property `values == range(1, 1+nnz)` and # `values` relation to `csc_values` (see get_sparse_data) # to get BSC blocks via reordering the BSR blocks: bsc_values = bsr_values[block_data[torch.sparse_csc][2] - 1] return {torch.sparse_bsr: (*block_data[torch.sparse_csr][:2], bsr_values), torch.sparse_bsc: (*block_data[torch.sparse_csc][:2], bsc_values), **nonblock_data} def get_batch_sparse_data(pattern, blocksize): size = pattern.shape if len(size) <= 2: # non-batch return get_sparse_data_with_block(pattern, blocksize) # batch data is created recursively: batch_data = {} # type: ignore[var-annotated] for i, item in enumerate(pattern): for layout, d in get_batch_sparse_data(item, blocksize).items(): target = batch_data.get(layout) if layout is torch.sparse_coo: # a "batch COO" means a COO with the leading # sparse dimensions interpreted as batch # dimensions ext_coo_indices1 = torch.cat((torch.full((1, len(d[1])), i, dtype=torch.int64), d[0])) if target is None: target = batch_data[layout] = (ext_coo_indices1, d[1]) else: target[0].set_(torch.cat((target[0], ext_coo_indices1), 1)) target[1].set_(torch.cat((target[1], d[1]))) else: if target is None: target = batch_data[layout] = tuple(d[j].unsqueeze(0) for j in range(len(d))) else: for j in range(len(d)): target[j].set_(torch.cat((target[j], d[j].unsqueeze(0)))) return batch_data def generate_values(base, densesize): """Generates a tensor of shape densesize with values equal to base + i_1 * 10^0 + ... + i_d * 10^{d - 1} at indices i_1, ..., i_d (with 0 <= i_j < densesize[j] for any 1 <= j <= len(densesize)) This mapping produces unique values as long as densesize[i] < 10 for all i in range(len(densesize)). """ if not densesize: return base if not isinstance(base, int) and base.ndim > 0: return torch.stack([generate_values(b, densesize) for b in base]) if base == 0: return torch.zeros(densesize, dtype=torch.int64) r = torch.arange(densesize[0], dtype=torch.int64) for i, d in enumerate(densesize[1:]): y = torch.arange(d, dtype=torch.int64) * (10 ** (i + 1)) r = r[..., None] + y[None, ...] r.add_(base) return r if patterns is None: # A pattern is a 3-tuple with the following items: # # - a list of integers with the depth of two or more. The # integers define the sparsity patterns of the generated # inputs: zero values correspond to unspecified # elements/blocks, and non-zero values to the specified # elements. # # For debugging convenience, the elements with the same # value typically belong to the same block. However, it # is not a hard requirement: as long as the shape of a # pattern divides with block sizes, the pattern will be # a valid one. # # If the depth of the list is larger than two, inputs # with batch dimensions will be generated. # # - a list of 2-tuples of block sizes, used to generate # BSR/BSC tensors with various block size parameters # # - a list of tuples of dense dimensions, used to generate # hybrid tensors with various dense dimensions # patterns = [ # a simple 3 x 2 tensor: non-hybrid, hybrid with 1 and 2 dense dimensions ([[1, 2, 0], [1, 0, 3]], [(2, 1), (1, 3)], [(), (2,), (4, 5)]), # 2 x 3 batch of 3 x 2 tensors: non-hybrid and hybrid with 2 dense dimensions ([[[[1, 2, 0], [1, 0, 3]], [[1, 2, 3], [1, 0, 0]], [[1, 0, 0], [1, 2, 3]]], [[[0, 2, 0], [1, 2, 3]], [[1, 0, 3], [1, 2, 0]], [[1, 2, 3], [0, 2, 0]]]], [(2, 1), (2, 3)], [(), (2,)]), # tensor with non-trivial blocksize ([[0, 1, 0, 2, 0, 2], [0, 1, 0, 0, 2, 0], [3, 3, 3, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 5, 0, 6, 6, 6], [5, 0, 5, 6, 6, 6], [0, 0, 0, 0, 8, 8], [7, 7, 7, 0, 8, 8]], [(2, 3)], [(), (4, 5)]), # batch tensor with variable NSE # Requires https://github.com/pytorch/pytorch/pull/84843 or similar. ([[[1, 2], [3, 4]], [[1, 0], [0, 0]]], [(1, 1)], ([()] if enable_batch_variable_nse else []))] def non_contiguous_copy(t, dim=-1, offset=0): # return a copy of t that is non-contiguous along the # given dimension and with the given storage offset self.assertTrue(t.is_contiguous()) if dim < 0: dim = dim + t.ndim assert dim >= 0 and dim < t.ndim step = max(2, offset + 1) tmp = torch.zeros((*t.shape[:dim], t.shape[dim] * step, *t.shape[dim + 1:]), dtype=t.dtype, device=t.device) dim_slices = (*((slice(None),) * dim), slice(offset, None, step)) r = tmp[dim_slices].copy_(t) self.assertFalse(r.is_contiguous()) self.assertEqual(t, r) return r # the main loop of the method: for pattern, blocksizes, densesizes in patterns: if not enable_hybrid: densesizes = [s for s in densesizes if not s] if not (densesizes and blocksizes): continue pattern = torch.tensor(pattern, dtype=torch.int64) if not enable_batch and pattern.ndim > 2: continue for blocksize in blocksizes: data = get_batch_sparse_data(pattern, blocksize)[layout] for densesize in densesizes: indices = [a.to(device=device, dtype=index_dtype) for a in data[:-1]] values = generate_values(data[-1], densesize).to(device=device, dtype=dtype) kwargs = dict(device=device, dtype=dtype, size=pattern.shape + densesize) if pin_memory is not None: kwargs.update(pin_memory=pin_memory) yield (*indices, values), kwargs.copy() if enable_non_contiguous_indices and pattern.ndim > 2: # sparse compressed indices can be sliced only along batch dimensions for (dim, offset) in {(0, 1), (-2, 0)}: indices_copy = [non_contiguous_copy(a, dim=dim, offset=offset) for a in indices] yield (*indices_copy, values), kwargs.copy() if enable_non_contiguous_values: values_copy = non_contiguous_copy(values, dim=-1, offset=1) yield (*indices_copy, values_copy), kwargs.copy() if enable_non_contiguous_values: values_copy = non_contiguous_copy(values, dim=-1, offset=1) yield (*indices, values_copy), kwargs.copy() # zero-sized tensor inputs, non-batch, non-hybrid/hybrid if enable_zero_sized: for basesize, blocksizes, densesizes in [ ((2, 0), [(1, 2)], [(), (2,), (2, 3)] if enable_hybrid else [()]), ((0, 2), [(1, 2), (2, 1), (3, 2)], [()]), ((0, 0), [(1, 2)], [()]), ]: for blocksize in blocksizes: for densesize in densesizes: if layout == torch.strided: indices = () # type: ignore[assignment] values = torch.empty((basesize + densesize), device=device, dtype=dtype) elif layout == torch.sparse_coo: indices = (torch.empty(len(basesize), 0, device=device, dtype=index_dtype),) # type: ignore[assignment] values = torch.empty((0, *densesize), device=device, dtype=dtype) elif layout == torch.sparse_csr: crow_indices = torch.tensor([0] * (basesize[0] + 1), device=device, dtype=index_dtype) col_indices = torch.empty(0, device=device, dtype=index_dtype) indices = (crow_indices, col_indices) # type: ignore[assignment] values = torch.empty((0, *densesize), device=device, dtype=dtype) elif layout == torch.sparse_csc: ccol_indices = torch.tensor([0] * (basesize[1] + 1), device=device, dtype=index_dtype) row_indices = torch.empty(0, device=device, dtype=index_dtype) indices = (ccol_indices, row_indices) # type: ignore[assignment] values = torch.empty((0, *densesize), device=device, dtype=dtype) elif layout == torch.sparse_bsr: crow_indices = torch.tensor([0] * (basesize[0] // blocksize[0] + 1), device=device, dtype=index_dtype) col_indices = torch.empty(0, device=device, dtype=index_dtype) indices = (crow_indices, col_indices) # type: ignore[assignment] values = torch.empty((0, *blocksize, *densesize), device=device, dtype=dtype) elif layout == torch.sparse_bsc: ccol_indices = torch.tensor([0] * (basesize[1] // blocksize[1] + 1), device=device, dtype=index_dtype) row_indices = torch.empty(0, device=device, dtype=index_dtype) indices = (ccol_indices, row_indices) # type: ignore[assignment] values = torch.empty((0, *blocksize, *densesize), device=device, dtype=dtype) else: assert 0 # unreachable kwargs = dict(device=device, dtype=dtype, size=basesize + densesize) if pin_memory is not None: kwargs.update(pin_memory=pin_memory) yield (*indices, values), kwargs def safeToDense(self, t): # coalesce is only implemented for COO if t.layout == torch.sparse_coo: t = t.coalesce() return t.to_dense() # Compares a torch function with a reference function for a given sample input (object of SampleInput) # Note: only values are compared, type comparison is not done here def compare_with_reference(self, torch_fn, ref_fn, sample_input, **kwargs): numpy_sample = sample_input.numpy() n_inp, n_args, n_kwargs = numpy_sample.input, numpy_sample.args, numpy_sample.kwargs t_inp, t_args, t_kwargs = sample_input.input, sample_input.args, sample_input.kwargs actual = torch_fn(t_inp, *t_args, **t_kwargs) expected = ref_fn(n_inp, *n_args, **n_kwargs) self.assertEqual(actual, expected, exact_device=False, **kwargs) # Compares the given Torch and NumPy functions on the given tensor-like object. # NOTE: both torch_fn and np_fn should be functions that take a single # tensor (array). If the torch and/or NumPy function require additional # arguments then wrap the function in a lambda or pass a partial function. # TODO: add args/kwargs for passing to assertEqual (e.g. rtol, atol) def compare_with_numpy(self, torch_fn, np_fn, tensor_like, device=None, dtype=None, **kwargs): assert TEST_NUMPY if isinstance(tensor_like, torch.Tensor): assert device is None assert dtype is None t_cpu = tensor_like.detach().cpu() if t_cpu.dtype is torch.bfloat16: t_cpu = t_cpu.float() a = t_cpu.numpy() t = tensor_like else: d = copy.copy(torch_to_numpy_dtype_dict) d[torch.bfloat16] = np.float32 a = np.array(tensor_like, dtype=d[dtype]) t = torch.tensor(tensor_like, device=device, dtype=dtype) np_result = np_fn(a) torch_result = torch_fn(t).cpu() # Converts arrays to tensors if isinstance(np_result, np.ndarray): try: np_result = torch.from_numpy(np_result) except Exception: # NOTE: copying an array before conversion is necessary when, # for example, the array has negative strides. np_result = torch.from_numpy(np_result.copy()) if t.dtype is torch.bfloat16 and torch_result.dtype is torch.bfloat16 and np_result.dtype is torch.float: torch_result = torch_result.to(torch.float) self.assertEqual(np_result, torch_result, **kwargs) def assertEqualIgnoreType(self, *args, **kwargs) -> None: # If you are seeing this function used, that means test is written wrongly # and deserves detailed investigation return self.assertEqual(*args, exact_dtype=False, **kwargs) def assertEqualBroadcasting(self, x, y, *args, **kwargs) -> None: r"""Tests if tensor x equals to y, if y to be broadcast to x.shape. """ if not isinstance(y, Iterable): # int, float, etc. or different shape tensors y = torch.ones_like(x) * y if not isinstance(y, torch.Tensor): # iterable, but not a tensor y = torch.ones_like(x) * torch.tensor(y) return self.assertEqual(x, y, *args, **kwargs) def assertEqual( self, x, y, msg: Optional[Union[str, Callable[[str], str]]] = None, *, atol: Optional[float] = None, rtol: Optional[float] = None, equal_nan=True, exact_dtype=True, # TODO: default this to True exact_device=False, exact_layout=False, exact_stride=False, exact_is_coalesced=False ): # Hide this function from `pytest`'s traceback __tracebackhide__ = True # numpy's dtypes are a superset of what PyTorch supports. In case we encounter an unsupported dtype, we fall # back to an elementwise comparison. Note that this has to happen here and not for example in # `TensorOrArrayPair`, since at that stage we can no longer split the array into its elements and perform # multiple comparisons. if any( isinstance(input, np.ndarray) and not has_corresponding_torch_dtype(input.dtype) for input in (x, y) ): def to_list(input): return input.tolist() if isinstance(input, (torch.Tensor, np.ndarray)) else list(input) x = to_list(x) y = to_list(y) # When comparing a sequence of numbers to a tensor, we need to convert the sequence to a tensor here. # Otherwise, the pair origination of `are_equal` will fail, because the sequence is recognized as container # that should be checked elementwise while the tensor is not. elif isinstance(x, torch.Tensor) and isinstance(y, Sequence): y = torch.as_tensor(y, dtype=x.dtype, device=x.device) elif isinstance(x, Sequence) and isinstance(y, torch.Tensor): x = torch.as_tensor(x, dtype=y.dtype, device=y.device) # unbind NSTs to compare them; don't do this for NJTs if isinstance(x, torch.Tensor) and x.is_nested and x.layout == torch.strided: x = x.unbind() if isinstance(y, torch.Tensor) and y.is_nested and y.layout == torch.strided: y = y.unbind() error_metas = not_close_error_metas( x, y, pair_types=( NonePair, RelaxedBooleanPair, RelaxedNumberPair, TensorOrArrayPair, TypedStoragePair, StringPair, SetPair, TypePair, ObjectPair, ), sequence_types=( Sequence, Sequential, ModuleList, ParameterList, ScriptList, torch.utils.data.dataset.Subset, ), mapping_types=(Mapping, ModuleDict, ParameterDict, ScriptDict), rtol=rtol, rtol_override=self.rel_tol, atol=atol, atol_override=self.precision, equal_nan=equal_nan, check_device=exact_device, check_dtype=exact_dtype, check_layout=exact_layout, check_stride=exact_stride, check_is_coalesced=exact_is_coalesced, ) if error_metas: # See [ErrorMeta Cycles] error_metas = [error_metas] # type: ignore[list-item] # TODO: compose all metas into one AssertionError raise error_metas.pop()[0].to_error( # type: ignore[index] # This emulates unittest.TestCase's behavior if a custom message passed and # TestCase.longMessage (https://docs.python.org/3/library/unittest.html#unittest.TestCase.longMessage) # is True (default) (lambda generated_msg: f"{generated_msg}\n{msg}") if isinstance(msg, str) and self.longMessage else msg ) def assertNotEqual(self, x, y, msg: Optional[str] = None, *, # type: ignore[override] atol: Optional[float] = None, rtol: Optional[float] = None, **kwargs) -> None: with self.assertRaises(AssertionError, msg=msg): self.assertEqual(x, y, msg, atol=atol, rtol=rtol, **kwargs) def assertEqualTypeString(self, x, y) -> None: # This API is used simulate deprecated x.type() == y.type() self.assertEqual(x.device, y.device) self.assertEqual(x.dtype, y.dtype) self.assertEqual(x.is_sparse, y.is_sparse) def assertObjectIn(self, obj: Any, iterable: Iterable[Any]) -> None: for elem in iterable: if id(obj) == id(elem): return raise AssertionError("object not found in iterable") # Reimplemented to provide special behavior when # _ignore_not_implemented_error is True def assertRaises(self, expected_exception, *args, **kwargs): if self._ignore_not_implemented_error: context: Optional[AssertRaisesContextIgnoreNotImplementedError] = \ AssertRaisesContextIgnoreNotImplementedError(expected_exception, self) # type: ignore[call-arg] try: return context.handle('assertRaises', args, kwargs) # type: ignore[union-attr, arg-type] finally: # see https://bugs.python.org/issue23890 context = None else: return super().assertRaises(expected_exception, *args, **kwargs) # Reimplemented to provide special behavior when # _ignore_not_implemented_error is True def assertRaisesRegex(self, expected_exception, expected_regex, *args, **kwargs): # Verifies that an exception with the type expected_exception and message # matching the regular expression defined by expected_regex is thrown. # If the test is instantiated for a non-native device type (like XLA) # then the message is not validated. # Checks whether the test is instantiated for a device type by testing # if the test class has defined the device_type attribute and, # if so, tests whether the instantiated device type is native or not if hasattr(self, 'device_type') and self.device_type not in NATIVE_DEVICES and self.device_type != "mps": # type: ignore[attr-defined] # empty string matches any string expected_regex = '' if self._ignore_not_implemented_error: context = AssertRaisesContextIgnoreNotImplementedError( # type: ignore[call-arg] expected_exception, self, expected_regex) return context.handle('assertRaisesRegex', args, kwargs) # type: ignore[attr-defined, arg-type] else: return super().assertRaisesRegex(expected_exception, expected_regex, *args, **kwargs) # Verifies that no unraisable exceptions are raised by callable. Unlike regular # exceptions, these do not actually propagate to the caller and are # suppressed. We must test for them specially. def assertNoUnraisable(self, callable, *args, **kwargs): raised = None def record_unraisable(unraisable): nonlocal raised raised = unraisable # Disable GC when running the callable to prevent spurious flakiness # from unlucky GCs inside the callable prev = gc.isenabled() gc.disable() try: with unittest.mock.patch("sys.unraisablehook", record_unraisable): callable(*args, **kwargs) finally: if prev: gc.enable() self.assertIsNone(raised) # TODO: Support context manager interface # NB: The kwargs forwarding to callable robs the 'subname' parameter. # If you need it, manually apply your callable in a lambda instead. def assertExpectedRaises(self, exc_type, callable, *args, **kwargs): subname = None if 'subname' in kwargs: subname = kwargs['subname'] del kwargs['subname'] try: callable(*args, **kwargs) except exc_type as e: self.assertExpected(str(e), subname) return # Don't put this in the try block; the AssertionError will catch it self.fail(msg="Did not raise when expected to") def assertNotWarn(self, callable, msg=''): r""" Test if :attr:`callable` does not raise a warning. """ with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised with set_warn_always_context(True): callable() self.assertTrue(len(ws) == 0, msg) @contextmanager def assertWarnsOnceRegex(self, category, regex=''): """Context manager for code that *must always* warn This filters expected warnings from the test and fails if the expected warning is not caught. It uses set_warn_always() to force TORCH_WARN_ONCE to behave like TORCH_WARN """ pattern = re.compile(regex) with warnings.catch_warnings(record=True) as ws: warnings.simplefilter("always") # allow any warning to be raised with set_warn_always_context(True): yield if len(ws) == 0: self.fail('no warning caught') self.assertTrue(any(type(w.message) is category for w in ws)) self.assertTrue( any(re.match(pattern, str(w.message)) for w in ws), f'{pattern}, {[w.message for w in ws if type(w.message) is category]}') def assertExpected(self, s, subname=None): r""" Test that a string matches the recorded contents of a file derived from the name of this test and subname. This file is placed in the 'expect' directory in the same directory as the test script. You can automatically update the recorded test output using --accept. If you call this multiple times in a single function, you must give a unique subname each time. """ if not isinstance(s, str): raise TypeError("assertExpected is strings only") def remove_prefix(text, prefix): if text.startswith(prefix): return text[len(prefix):] return text # NB: we take __file__ from the module that defined the test # class, so we place the expect directory where the test script # lives, NOT where test/common_utils.py lives. This doesn't matter in # PyTorch where all test scripts are in the same directory as # test/common_utils.py, but it matters in onnx-pytorch module_id = self.__class__.__module__ munged_id = remove_prefix(self.id(), module_id + ".") test_file = os.path.realpath(sys.modules[module_id].__file__) # type: ignore[type-var] expected_file = os.path.join(os.path.dirname(test_file), # type: ignore[type-var, arg-type] "expect", munged_id) subname_output = "" if subname: expected_file += "-" + subname subname_output = f" ({subname})" expected_file += ".expect" expected = None def accept_output(update_type): print(f"Accepting {update_type} for {munged_id}{subname_output}:\n\n{s}") with open(expected_file, 'w') as f: # Adjust for producer_version, leave s unmodified s_tag = re.sub(r'(producer_version): "[0-9.]*"', r'\1: "CURRENT_VERSION"', s) f.write(s_tag) try: with open(expected_file) as f: expected = f.read() except OSError as e: if e.errno != errno.ENOENT: raise elif expecttest.ACCEPT: return accept_output("output") else: raise RuntimeError( f"I got this output for {munged_id}{subname_output}:\n\n{s}\n\n" "No expect file exists; to accept the current output, run:\n" f"python {__main__.__file__} {munged_id} --accept") from None # a hack for JIT tests if IS_WINDOWS: expected = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', expected) s = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', s) # Adjust for producer_version expected = expected.replace( 'producer_version: "CURRENT_VERSION"', f'producer_version: "{torch.onnx.producer_version}"' ) if expecttest.ACCEPT: if expected != s: return accept_output("updated output") else: if hasattr(self, "assertMultiLineEqual"): # Python 2.7 only # NB: Python considers lhs "old" and rhs "new". self.assertMultiLineEqual(expected, s) else: self.assertEqual(s, expected) def assertExpectedStripMangled(self, s, subname=None): s = re.sub(r'__torch__[^ ]+', '', s) self.assertExpected(s, subname) def assertGreaterAlmostEqual(self, first, second, places=None, msg=None, delta=None): """Assert that ``first`` is greater than or almost equal to ``second``. The equality of ``first`` and ``second`` is determined in a similar way to the ``assertAlmostEqual`` function of the standard library. """ if delta is not None and places is not None: raise TypeError("specify delta or places not both") if first >= second: return diff = second - first if delta is not None: if diff <= delta: return standardMsg = f"{first} not greater than or equal to {second} within {delta} delta" else: if places is None: places = 7 if round(diff, places) == 0: return standardMsg = f"{first} not greater than or equal to {second} within {places} places" msg = self._formatMessage(msg, standardMsg) raise self.failureException(msg) def assertAtenOp(self, onnx_model, operator, overload_name=""): all_aten_nodes = [p for p in onnx_model.graph.node if p.op_type == "ATen" and p.domain == "org.pytorch.aten"] self.assertTrue(all_aten_nodes) for op in all_aten_nodes: attrs = {attr.name: attr.s.decode() for attr in op.attribute} if attrs.get("operator") == operator: break self.assertEqual(attrs["operator"], operator) # type: ignore[possibly-undefined] self.assertEqual(attrs.get("overload_name", ""), overload_name) def check_nondeterministic_alert(self, fn, caller_name, should_alert=True): '''Checks that an operation produces a nondeterministic alert when expected while `torch.use_deterministic_algorithms(True)` is set. Args: fn (callable): Function to check for a nondeterministic alert caller_name (str): Name of the operation that produces the nondeterministic alert. This name is expected to appear at the beginning of the error/warning message. should_alert (bool, optional): If True, then the check will only pass if calling `fn` produces a nondeterministic error/warning with the expected message. If False, then the check will only pass if calling `fn` does not produce an error. Default: `True`. ''' alert_message = '^' + caller_name + ' does not have a deterministic implementation, but you set' # Check that errors are thrown correctly with DeterministicGuard(True): if should_alert: with self.assertRaisesRegex( RuntimeError, alert_message, msg='expected a non-deterministic error, but it was not raised'): fn() else: # If a nondeterministic error is not expected, make sure # that it is not raised try: fn() except RuntimeError as e: if 'does not have a deterministic implementation' in str(e): self.fail( 'did not expect non-deterministic error message, ' + 'but got one anyway: "' + str(e) + '"') # Reraise exceptions unrelated to nondeterminism raise # Check that warnings are thrown correctly with DeterministicGuard(True, warn_only=True): if should_alert: with self.assertWarnsRegex( UserWarning, alert_message): fn() else: with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") fn() for warning in w: if isinstance(warning, UserWarning): self.assertTrue(re.search(alert_message, str(warning)) is None) # run code in subprocess and capture exceptions. @staticmethod def run_process_no_exception(code, env=None): import subprocess popen = subprocess.Popen( [sys.executable, '-c', code], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) (stdout, stderr) = popen.communicate() return (stdout, stderr) # returns captured stderr @staticmethod def runWithPytorchAPIUsageStderr(code): env = os.environ.copy() env["PYTORCH_API_USAGE_STDERR"] = "1" # remove CI flag since this is a wrapped test process. # CI flag should be set in the parent process only. env.pop("CI", None) env.pop("TEST_SHOWLOCALS", None) _stdout, stderr = TestCase.run_process_no_exception(code, env=env) return stderr.decode('ascii') def _attempt_load_from_subprocess( self, file: pathlib.Path, import_string: str, expected_failure_message: Optional[str] = None ) -> None: """ Attempts weights_only `torch.load` in a subprocess. This is used to test that weights_only `torch.load` works as expected without global imports. Args: file (pathlib.Path): The path to the checkpoint to load. import_string (str): import string to add to the script exected_failure_message (str, optional): The expected failure message if the checkpoint fails to load. If None, the test will pass """ script = f"import torch;{import_string}torch.load(r'{file}', weights_only=True)" cm = ( self.assertRaisesRegex(RuntimeError, re.escape(expected_failure_message)) if expected_failure_message else contextlib.nullcontext() ) with cm: try: subprocess.check_output( [sys.executable, "-c", script], # On Windows, opening the subprocess with the default CWD makes `import torch` # fail, so just set CWD to this script's directory cwd=os.path.dirname(os.path.realpath(__file__)), stderr=subprocess.STDOUT, ) except subprocess.CalledProcessError as e: raise RuntimeError(e.output.decode("utf-8")) from None class TestCaseBase(TestCase): # Calls to super() in dynamically created classes are a bit odd. # See https://github.com/pytorch/pytorch/pull/118586 for more info # Subclassing this class and then calling super(TestCaseBase) will run # TestCase's setUp, tearDown etc functions pass def download_file(url, binary=True): from urllib.parse import urlsplit from urllib import request, error filename = os.path.basename(urlsplit(url)[2]) data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data')) path = os.path.join(data_dir, filename) if os.path.exists(path): return path try: data = request.urlopen(url, timeout=15).read() with open(path, 'wb' if binary else 'w') as f: f.write(data) return path except error.URLError as e: msg = f"could not download test file '{url}'" warnings.warn(msg, RuntimeWarning) raise unittest.SkipTest(msg) from e def find_free_port(): """ Finds an available port and returns that port number. NOTE: If this function is being used to allocate a port to Store (or indirectly via init_process_group or init_rpc), it should be used in conjuction with the `retry_on_connect_failures` decorator as there is a potential race condition where the allocated port may become unavailable before it can be used """ with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(('localhost', 0)) _, port = sock.getsockname() return port # Errors that we can get in c10d initialization for which we should retry tests for. ADDRESS_IN_USE = "Address already in use" CONNECT_TIMEOUT = "connect() timed out." def retry_on_connect_failures(func=None, connect_errors=(ADDRESS_IN_USE)): """Reruns a test if the test returns a RuntimeError and the exception contains one of the strings in connect_errors.""" # This if block is executed when using this function as a decorator with arguments. if func is None: return partial(retry_on_connect_failures, connect_errors=connect_errors) @wraps(func) def wrapper(*args, **kwargs): n_retries = 10 tries_remaining = n_retries while True: try: return func(*args, **kwargs) except RuntimeError as error: if any(connect_error in str(error) for connect_error in connect_errors): tries_remaining -= 1 if tries_remaining == 0: raise RuntimeError(f"Failing after {n_retries} retries with error: {str(error)}") from error time.sleep(random.random()) continue raise return wrapper # Decorator to retry upon certain Exceptions. def retry(ExceptionToCheck, tries=3, delay=3, skip_after_retries=False): def deco_retry(f): @wraps(f) def f_retry(*args, **kwargs): mtries, mdelay = tries, delay while mtries > 1: try: return f(*args, **kwargs) except ExceptionToCheck as e: msg = f"{e}, Retrying in {mdelay:d} seconds..." print(msg) time.sleep(mdelay) mtries -= 1 try: return f(*args, **kwargs) except ExceptionToCheck as e: raise unittest.SkipTest(f"Skipping after {tries} consecutive {str(e)}") from e if skip_after_retries else e return f_retry # true decorator return deco_retry # FIXME: modernize these to be consistent with make_tensor # and review including them in torch.testing # Methods for matrix generation def random_square_matrix_of_rank(l, rank, dtype=torch.double, device='cpu'): assert rank <= l A = torch.randn(l, l, dtype=dtype, device=device) u, s, vh = torch.linalg.svd(A, full_matrices=False) for i in range(l): if i >= rank: s[i] = 0 elif s[i] == 0: s[i] = 1 return (u * s.to(dtype).unsqueeze(-2)) @ vh def random_well_conditioned_matrix(*shape, dtype, device, mean=1.0, sigma=0.001): """ Returns a random rectangular matrix (batch of matrices) with singular values sampled from a Gaussian with mean `mean` and standard deviation `sigma`. The smaller the `sigma`, the better conditioned the output matrix is. """ primitive_dtype = { torch.float: torch.float, torch.double: torch.double, torch.cfloat: torch.float, torch.cdouble: torch.double } x = torch.rand(shape, dtype=dtype, device=device) m = x.size(-2) n = x.size(-1) u, _, vh = torch.linalg.svd(x, full_matrices=False) s = (torch.randn(*(shape[:-2] + (min(m, n),)), dtype=primitive_dtype[dtype], device=device) * sigma + mean) \ .sort(-1, descending=True).values.to(dtype) return (u * s.unsqueeze(-2)) @ vh # Returns a noncontiguous (tensor with the same shape and values as t # The noncontiguous tensor is constructed such that elements in the innermost # dimension are separated by zeros or (whenever possible) nans # TODO: consider more complicated noncontiguity schemes def noncontiguous_like(t): # Short-circuits if t is already noncontiguous if not t.is_contiguous(): return t # Choose a "weird" value that won't be accessed if t.dtype.is_floating_point or t.dtype.is_complex: value = math.nan elif t.dtype == torch.bool: value = True else: value = 12 result = t.new_empty(t.shape + (2,)) result[..., 0] = value result[..., 1] = t.detach() result = result[..., 1] result.requires_grad_(t.requires_grad) return result # TODO: remove this (prefer make_symmetric_matrices below) def random_symmetric_matrix(l, *batches, **kwargs): dtype = kwargs.get('dtype', torch.double) device = kwargs.get('device', 'cpu') A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device) A = (A + A.mT).div_(2) return A # Creates a symmetric matrix or batch of symmetric matrices # Shape must be a square matrix or batch of square matrices def make_symmetric_matrices(*shape, device, dtype): assert shape[-1] == shape[-2] t = make_tensor(shape, device=device, dtype=dtype) t = (t + t.mT).div_(2) return t def random_hermitian_matrix(l, *batches, **kwargs): dtype = kwargs.get('dtype', torch.double) device = kwargs.get('device', 'cpu') A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device) A = (A + A.mH).div_(2) return A def random_symmetric_psd_matrix(l, *batches, **kwargs): """ Returns a batch of random symmetric positive-semi-definite matrices. The shape of the result is batch_dims + (matrix_size, matrix_size) The following example creates a tensor of size 2 x 4 x 3 x 3 >>> # xdoctest: +SKIP("undefined variables") >>> matrices = random_symmetric_psd_matrix(3, 2, 4, dtype=dtype, device=device) """ dtype = kwargs.get('dtype', torch.double) device = kwargs.get('device', 'cpu') A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device) return A @ A.mT def random_hermitian_psd_matrix(matrix_size, *batch_dims, dtype=torch.double, device='cpu'): """ Returns a batch of random Hermitian positive-semi-definite matrices. The shape of the result is batch_dims + (matrix_size, matrix_size) The following example creates a tensor of size 2 x 4 x 3 x 3 >>> # xdoctest: +SKIP("undefined variables") >>> matrices = random_hermitian_psd_matrix(3, 2, 4, dtype=dtype, device=device) """ A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), dtype=dtype, device=device) return A @ A.mH # TODO: remove this (prefer make_symmetric_pd_matrices below) def random_symmetric_pd_matrix(matrix_size, *batch_dims, **kwargs): dtype = kwargs.get('dtype', torch.double) device = kwargs.get('device', 'cpu') A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), dtype=dtype, device=device) return torch.matmul(A, A.mT) \ + torch.eye(matrix_size, dtype=dtype, device=device) * 1e-5 # Creates a symmetric positive-definite matrix or batch of # such matrices def make_symmetric_pd_matrices(*shape, device, dtype): assert shape[-1] == shape[-2] t = make_tensor(shape, device=device, dtype=dtype) i = torch.eye(shape[-1], device=device, dtype=dtype) * 1e-5 return t @ t.mT + i def random_hermitian_pd_matrix(matrix_size, *batch_dims, dtype, device): """ Returns a batch of random Hermitian positive-definite matrices. The shape of the result is batch_dims + (matrix_size, matrix_size) The following example creates a tensor of size 2 x 4 x 3 x 3 >>> # xdoctest: +SKIP("undefined variables") >>> matrices = random_hermitian_pd_matrix(3, 2, 4, dtype=dtype, device=device) """ A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), dtype=dtype, device=device) return A @ A.mH + torch.eye(matrix_size, dtype=dtype, device=device) # Creates a full rank matrix with distinct singular values or # a batch of such matrices def make_fullrank_matrices_with_distinct_singular_values(*shape, device, dtype, requires_grad=False): with torch.no_grad(): t = make_tensor(shape, device=device, dtype=dtype) u, _, vh = torch.linalg.svd(t, full_matrices=False) real_dtype = t.real.dtype if t.dtype.is_complex else t.dtype k = min(shape[-1], shape[-2]) # We choose the singular values to be "around one" # This is to make the matrix well conditioned # s = [2, 3, ..., k+1] s = torch.arange(2, k + 2, dtype=real_dtype, device=device) # s = [2, -3, 4, ..., (-1)^k k+1] s[1::2] *= -1. # 1 + 1/s so that the singular values are in the range [2/3, 3/2] # This gives a condition number of 9/4, which should be good enough s.reciprocal_().add_(1.) # Note that the singular values need not be ordered in an SVD so # we don't need need to sort S x = (u * s.to(u.dtype)) @ vh x.requires_grad_(requires_grad) return x def random_matrix(rows, columns, *batch_dims, **kwargs): """Return rectangular matrix or batches of rectangular matrices. Parameters: dtype - the data type device - the device kind singular - when True, the output will be singular """ dtype = kwargs.get('dtype', torch.double) device = kwargs.get('device', 'cpu') silent = kwargs.get("silent", False) singular = kwargs.get("singular", False) if silent and not torch._C.has_lapack: return torch.ones(rows, columns, dtype=dtype, device=device) A = torch.randn(batch_dims + (rows, columns), dtype=dtype, device=device) if A.numel() == 0: return A u, _, vh = torch.linalg.svd(A, full_matrices=False) k = min(rows, columns) s = torch.linspace(1 / (k + 1), 1, k, dtype=dtype, device=device) if singular: # make matrix singular s[k - 1] = 0 if k > 2: # increase the order of singularity so that the pivoting # in LU factorization will be non-trivial s[0] = 0 return (u * s.unsqueeze(-2)) @ vh def random_lowrank_matrix(rank, rows, columns, *batch_dims, **kwargs): """Return rectangular matrix or batches of rectangular matrices with given rank. """ B = random_matrix(rows, rank, *batch_dims, **kwargs) C = random_matrix(rank, columns, *batch_dims, **kwargs) return B.matmul(C) def _generate_indices_prefer_all_rows(rows: int, cols: int, num_indices: int) -> torch.Tensor: """Generate indices for a row x cols matrix, preferring at least one index per row if possible.""" indices = [] # type: ignore[var-annotated] n_per_row = math.ceil(num_indices / rows) col_indices = list(range(cols)) for r in range(rows): # Note that this can yield overlapping indices indices.extend((r, c) for c in random.choices(col_indices, k=n_per_row)) return torch.tensor(indices[:num_indices]) def random_sparse_matrix(rows, columns, density=0.01, **kwargs): """Return rectangular random sparse matrix within given density. The density of the result approaches to given density as the size of the matrix is increased and a relatively small value of density is specified but higher than min(rows, columns)/(rows * columns) for non-singular matrices. """ dtype = kwargs.get('dtype', torch.double) device = kwargs.get('device', 'cpu') nonzero_elements = max(min(rows, columns), int(rows * columns * density)) indices = _generate_indices_prefer_all_rows(rows, columns, nonzero_elements) values = torch.randn(nonzero_elements, dtype=dtype, device=device) # ensure that the diagonal dominates values *= torch.tensor([-float(i - j)**2 for i, j in indices], dtype=dtype, device=device).exp() A = torch.sparse_coo_tensor(indices.t(), values, (rows, columns), device=device) return A.coalesce() def random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs): """Return random sparse positive-definite matrix with given density. The eigenvalues of the matrix are defined as:: arange(1, matrix_size+1)/matrix_size Algorithm: A = diag(arange(1, matrix_size+1)/matrix_size) while <A density is smaller than required>: <choose random i, j in range(matrix_size), theta in [0, 2*pi]> R = <rotation matrix (i,j,theta)> A = R^T A R """ import math torch = kwargs.get('torch', globals()['torch']) dtype = kwargs.get('dtype', torch.double) device = kwargs.get('device', 'cpu') data = {(i, i): float(i + 1) / matrix_size for i in range(matrix_size)} def multiply(data, N, i, j, cs, sn, left=True): for k in range(N): if left: ik, jk = (k, i), (k, j) else: ik, jk = (i, k), (j, k) aik, ajk = data.get(ik, 0), data.get(jk, 0) aik, ajk = cs * aik + sn * ajk, -sn * aik + cs * ajk if aik: data[ik] = aik else: data.pop(ik, None) if ajk: data[jk] = ajk else: data.pop(jk, None) target_nnz = density * matrix_size * matrix_size while len(data) < target_nnz: i = random.randint(0, matrix_size - 1) j = random.randint(0, matrix_size - 1) if i != j: theta = random.uniform(0, 2 * math.pi) cs = math.cos(theta) sn = math.sin(theta) multiply(data, matrix_size, i, j, cs, sn, left=True) multiply(data, matrix_size, i, j, cs, sn, left=False) icoords, jcoords, values = [], [], [] for (i, j), v in sorted(data.items()): icoords.append(i) jcoords.append(j) values.append(v) indices_tensor = torch.tensor([icoords, jcoords]) return torch.sparse_coo_tensor(indices_tensor, values, (matrix_size, matrix_size), dtype=dtype, device=device) # FIXME: remove this by updating test suites using it def do_test_dtypes(self, dtypes, layout, device): for dtype in dtypes: if dtype != torch.float16: out = torch.zeros((2, 3), dtype=dtype, layout=layout, device=device) self.assertIs(dtype, out.dtype) self.assertIs(layout, out.layout) self.assertEqual(device, out.device) # FIXME: remove this by updating test suites using it def do_test_empty_full(self, dtypes, layout, device): shape = torch.Size([2, 3]) def check_value(tensor, dtype, layout, device, value, requires_grad): self.assertEqual(shape, tensor.shape) self.assertIs(dtype, tensor.dtype) self.assertIs(layout, tensor.layout) self.assertEqual(tensor.requires_grad, requires_grad) if tensor.is_cuda and device is not None: self.assertEqual(device, tensor.device) if value is not None: fill = tensor.new(shape).fill_(value) self.assertEqual(tensor, fill) def get_int64_dtype(dtype): module = '.'.join(str(dtype).split('.')[1:-1]) if not module: return torch.int64 return operator.attrgetter(module)(torch).int64 default_dtype = torch.get_default_dtype() check_value(torch.empty(shape), default_dtype, torch.strided, -1, None, False) check_value(torch.full(shape, -5.), default_dtype, torch.strided, -1, None, False) for dtype in dtypes: for rg in {dtype.is_floating_point, False}: int64_dtype = get_int64_dtype(dtype) v = torch.empty(shape, dtype=dtype, device=device, layout=layout, requires_grad=rg) check_value(v, dtype, layout, device, None, rg) out = v.new() check_value(torch.empty(shape, out=out, device=device, layout=layout, requires_grad=rg), dtype, layout, device, None, rg) check_value(v.new_empty(shape), dtype, layout, device, None, False) check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False), int64_dtype, layout, device, None, False) check_value(torch.empty_like(v), dtype, layout, device, None, False) check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False), int64_dtype, layout, device, None, False) if dtype is not torch.float16 and layout != torch.sparse_coo: fv = 3 v = torch.full(shape, fv, dtype=dtype, layout=layout, device=device, requires_grad=rg) check_value(v, dtype, layout, device, fv, rg) check_value(v.new_full(shape, fv + 1), dtype, layout, device, fv + 1, False) out = v.new() check_value(torch.full(shape, fv + 2, out=out, device=device, layout=layout, requires_grad=rg), dtype, layout, device, fv + 2, rg) check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=False), int64_dtype, layout, device, fv + 3, False) check_value(torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False) check_value(torch.full_like(v, fv + 5, dtype=int64_dtype, layout=layout, device=device, requires_grad=False), int64_dtype, layout, device, fv + 5, False) # FIXME: improve load_tests() documentation here running_script_path = None # type: ignore[var-annotated] def set_running_script_path(): global running_script_path try: running_file = os.path.abspath(os.path.realpath(sys.argv[0])) if running_file.endswith('.py'): # skip if the running file is not a script running_script_path = running_file except Exception: pass def check_test_defined_in_running_script(test_case): if running_script_path is None: return test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__))) assert test_case_class_file == running_script_path, f'Class of loaded TestCase "{test_case.id()}" ' \ f'is not defined in the running script "{running_script_path}", but in "{test_case_class_file}". Did you ' \ "accidentally import a unittest.TestCase from another file?" def load_tests(loader, tests, pattern): set_running_script_path() test_suite = unittest.TestSuite() for test_group in tests: if not DISABLE_RUNNING_SCRIPT_CHK: for test in test_group: check_test_defined_in_running_script(test) if test_group._tests: test_suite.addTest(test_group) return test_suite # FIXME: document this and move it to test_serialization class BytesIOContext(io.BytesIO): def __enter__(self): return self def __exit__(self, *args): pass # Tentative value for nondet_tol for gradcheck when backward implementation # relies on nondeterministic operations, i.e., those listed here: # https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html # # For more information see https://github.com/pytorch/pytorch/issues/56202 GRADCHECK_NONDET_TOL = 1e-12 TEST_WITH_SLOW_GRADCHECK: bool = TestEnvironment.def_flag( "TEST_WITH_SLOW_GRADCHECK", env_var="PYTORCH_TEST_WITH_SLOW_GRADCHECK", ) skipIfSlowGradcheckEnv = unittest.skipIf( TEST_WITH_SLOW_GRADCHECK, "Tests that don't use gradcheck don't need to run on slow_gradcheck CI", ) def gradcheck(fn, inputs, **kwargs): # Wrapper around gradcheck that enables certain keys by default. # Use this testing-internal gradcheck instead of autograd.gradcheck so that new features like vmap and # forward-mode AD are tested by default. We create this wrapper because we'd like to keep new checks # to be disabled to default for the public-facing api to avoid breaking user code. # # All PyTorch devs doing testing should use this wrapper instead of autograd.gradcheck. default_values = { "check_batched_grad": True, "fast_mode": True, } if TEST_WITH_SLOW_GRADCHECK: default_values["fast_mode"] = False for key, value in default_values.items(): # default value override values explicitly set to None k = kwargs.get(key, None) kwargs[key] = k if k is not None else value return torch.autograd.gradcheck(fn, inputs, **kwargs) def gradgradcheck(fn, inputs, grad_outputs=None, **kwargs): # Wrapper around gradgradcheck that enables certain keys by default # See gradcheck above for an explanation of why we need something like this. # # All PyTorch devs doing testing should use this wrapper instead of autograd.gradgradcheck default_values = { "check_batched_grad": True, "fast_mode": True, } if TEST_WITH_SLOW_GRADCHECK: default_values["fast_mode"] = False for key, value in default_values.items(): # default value override values explicitly set to None k = kwargs.get(key, None) kwargs[key] = k if k is not None else value return torch.autograd.gradgradcheck(fn, inputs, grad_outputs, **kwargs) def _assertGradAndGradgradChecks(test_case, apply_fn, inputs, **kwargs): # call assert function rather than returning a bool since it's nicer # if we get whether this failed on the gradcheck or the gradgradcheck. test_case.assertTrue(gradcheck(apply_fn, inputs, **kwargs)) test_case.assertTrue(gradgradcheck(apply_fn, inputs, **kwargs)) @contextmanager def set_cwd(path: str) -> Iterator[None]: old_cwd = os.getcwd() try: os.chdir(path) yield finally: os.chdir(old_cwd) # FIXME: delete this # Using @toleranceOverride specific to your test is the recommended way # of doing this. These are just some values that worked for test_nn. dtype2prec_DONTUSE = {torch.float: 1e-5, torch.double: 1e-5, torch.half: 1e-2, torch.bfloat16: 1e-1} # FIXME: move to test_sparse or sparse utils # This is a wrapper that wraps a test to run this test twice, one with # coalesced=True, another with coalesced=False for coalesced/uncoalesced sparse tensors. def coalescedonoff(f): @wraps(f) def wrapped(self, *args, **kwargs): f(self, *args, **kwargs, coalesced=True) f(self, *args, **kwargs, coalesced=False) return wrapped def is_coalesced_indices(s): indices = s._indices() hash_coeffs = (1,) + s.shape[s.sparse_dim() - 1:0:-1] hash_indices = torch.tensor(hash_coeffs, device=s.device).cumprod(-1).flip(-1) if s.sparse_dim() > 1: hash_indices.unsqueeze_(-1) hash_indices = (indices * hash_indices).sum(0) else: hash_indices = indices * hash_indices # check if indices are sorted res = torch.allclose(hash_indices, hash_indices.sort()[0]) # check if there are no repeated indices res = res and torch.allclose(hash_indices, hash_indices.unique()) return res @contextlib.contextmanager def disable_gc(): if gc.isenabled(): try: gc.disable() yield finally: gc.enable() else: yield def find_library_location(lib_name: str) -> Path: # return the shared library file in the installed folder if exist, # else the file in the build folder torch_root = Path(torch.__file__).resolve().parent path = torch_root / 'lib' / lib_name if os.path.exists(path): return path torch_root = Path(__file__).resolve().parents[2] return torch_root / 'build' / 'lib' / lib_name def skip_but_pass_in_sandcastle(reason): """ Similar to unittest.skip, however in the sandcastle environment it just "passes" the test instead to avoid creating tasks complaining about tests skipping continuously. """ def decorator(func): if not IS_SANDCASTLE: func.__unittest_skip__ = True func.__unittest_skip_why__ = reason return func @wraps(func) def wrapper(*args, **kwargs): print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr) return return wrapper return decorator def mock_wrapper(method): """ Returns a function that calls the real implementation of a method in addition to passing args to a mock object. """ mock = MagicMock() @wraps(method) def wrapper(self, *args, **kwargs): mock(*args, **kwargs) return method(self, *args, **kwargs) wrapper.mock = mock # type: ignore[attr-defined] return wrapper def get_tensors_from(args, kwargs): """ Returns a set of all Tensor objects in the given args and kwargs. """ return set([arg for arg in args if isinstance(arg, Tensor)] + [v for v in kwargs.values() if isinstance(v, Tensor)]) # Returns scalar tensor representation of a list of integer byte values def bytes_to_scalar(byte_list: list[int], dtype: torch.dtype, device: torch.device): dtype_to_ctype: dict[torch.dtype, Any] = { torch.int8: ctypes.c_int8, torch.uint8: ctypes.c_uint8, torch.uint16: ctypes.c_uint16, torch.uint32: ctypes.c_uint32, torch.uint64: ctypes.c_uint64, torch.int16: ctypes.c_int16, torch.int32: ctypes.c_int32, torch.int64: ctypes.c_int64, torch.bool: ctypes.c_bool, torch.float32: ctypes.c_float, torch.complex64: ctypes.c_float, torch.float64: ctypes.c_double, torch.complex128: ctypes.c_double, } ctype = dtype_to_ctype[dtype] num_bytes = ctypes.sizeof(ctype) def check_bytes(byte_list): for byte in byte_list: assert 0 <= byte <= 255 if dtype.is_complex: assert len(byte_list) == (num_bytes * 2) check_bytes(byte_list) real = ctype.from_buffer((ctypes.c_byte * num_bytes)( *byte_list[:num_bytes])).value imag = ctype.from_buffer((ctypes.c_byte * num_bytes)( *byte_list[num_bytes:])).value res = real + 1j * imag else: assert len(byte_list) == num_bytes check_bytes(byte_list) res = ctype.from_buffer((ctypes.c_byte * num_bytes)( *byte_list)).value return torch.tensor(res, device=device, dtype=dtype) def copy_func(f): """Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)""" g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__, argdefs=f.__defaults__, closure=f.__closure__) g = functools.update_wrapper(g, f) g.__kwdefaults__ = f.__kwdefaults__ # type: ignore[attr-defined] return g def xfail_inherited_tests(tests): """ Given a list of test names which are defined by a superclass of the class this decorates, mark them as expected failure. This is useful if you are doing poor man's parameterized tests by subclassing a generic test class. """ def deco(cls): for t in tests: # NB: expectedFailure operates by mutating the method in question, # which is why you have to copy the function first setattr(cls, t, unittest.expectedFailure(copy_func(getattr(cls, t)))) return cls return deco def skip_but_pass_in_sandcastle_if(condition, reason): """ Similar to unittest.skipIf, however in the sandcastle environment it just "passes" the test instead to avoid creating tasks complaining about tests skipping continuously. """ def decorator(func): if condition: if IS_SANDCASTLE: @wraps(func) def wrapper(*args, **kwargs): print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr) return wrapper else: func.__unittest_skip__ = True func.__unittest_skip_why__ = reason return func return decorator def dtype_name(dtype): """ Returns the pretty name of the dtype (e.g. torch.int64 -> int64). """ return str(dtype).split('.')[1] dtype_abbrs = { torch.bfloat16: 'bf16', torch.float64: 'f64', torch.float32: 'f32', torch.float16: 'f16', torch.complex32: 'c32', torch.complex64: 'c64', torch.complex128: 'c128', torch.int8: 'i8', torch.int16: 'i16', torch.int32: 'i32', torch.int64: 'i64', torch.bool: 'b8', torch.uint8: 'u8', } @functools.lru_cache def get_cycles_per_ms() -> float: """Measure and return approximate number of cycles per millisecond for torch.cuda._sleep """ def measure() -> float: start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) start.record() torch.cuda._sleep(1000000) end.record() end.synchronize() cycles_per_ms = 1000000 / start.elapsed_time(end) return cycles_per_ms # Get 10 values and remove the 2 max and 2 min and return the avg. # This is to avoid system disturbance that skew the results, e.g. # the very first cuda call likely does a bunch of init, which takes # much longer than subsequent calls. # # Tested on both Tesla V100, Quadro GP100, Titan RTX, RTX 3090 GPUs # and seems to return stable values. Therefore, we enable caching # using lru_cache decorator above. num = 10 vals = [measure() for _ in range(num)] vals = sorted(vals) return mean(vals[2 : num - 2]) # OpInfo utils T = TypeVar('T') def first_sample(self: unittest.TestCase, samples: Iterable[T]) -> T: """ Returns the first sample from an iterable of samples, like those returned by OpInfo. The test will be skipped if no samples are available. """ try: return next(iter(samples)) except StopIteration as e: raise unittest.SkipTest('Skipped! Need at least 1 sample input') from e # this helper method is to recursively # clone the tensor-type input of operators tested by OpInfo def clone_input_helper(input): if isinstance(input, torch.Tensor): return torch.clone(input) if isinstance(input, Sequence): return tuple(map(clone_input_helper, input)) return input @contextmanager def custom_op(opname, symbolic_fn, opset_version): """Context manager/decorator to test ONNX export with custom operator""" try: register_custom_op_symbolic(opname, symbolic_fn, opset_version) yield finally: unregister_custom_op_symbolic(opname, opset_version) def outs_and_grads(fn, graph_inps, inps): outs = fn(*graph_inps) for out in pytree.tree_leaves(outs): if isinstance(out, torch.Tensor) and out.requires_grad: out.sum().backward(retain_graph=True) grads = [inp.grad for inp in pytree.tree_leaves(inps) if isinstance(inp, torch.Tensor)] for inp in pytree.tree_leaves(inps): if isinstance(inp, torch.Tensor): inp.grad = None return outs, grads def compare_equal_outs_and_grads(test, m1, m2, inps): r1, g1 = outs_and_grads(m1, inps, inps) r2, g2 = outs_and_grads(m2, inps, inps) test.assertEqual(r1, r2) test.assertEqual(g1, g2) class TestGradients(TestCase): exact_dtype = True # Copies inputs to inplace operations to avoid inplace modifications # to leaves requiring gradient def _get_safe_inplace(self, inplace_variant): @wraps(inplace_variant) def _fn(t, *args, **kwargs): return inplace_variant(t.clone(), *args, **kwargs) return _fn def _check_helper(self, device, dtype, op, variant, check, *, check_forward_ad=False, check_backward_ad=True, check_batched_grad=None, check_batched_forward_grad=False): assert check in ('gradcheck', 'bwgrad_bwgrad', 'fwgrad_bwgrad') # NB: check_backward_ad does not affect gradgradcheck (always True) if variant is None: self.skipTest("Skipped! Variant not implemented.") if not op.supports_dtype(dtype, torch.device(device).type): self.skipTest(f"Skipped! {op.name} does not support dtype {str(dtype)}") def is_inplace(variant): if hasattr(variant, "__wrapped__"): return variant.__wrapped__ is op.get_inplace() return variant is op.get_inplace() include_conjugated_inputs = op.test_conjugated_samples and dtype.is_complex samples = op.sample_inputs(device, dtype, requires_grad=True, include_conjugated_inputs=include_conjugated_inputs, small_inputs_only=TEST_WITH_SLOW_GRADCHECK) for sample in samples: if sample.broadcasts_input and is_inplace(variant): continue # Gradcheck expects tensors as its input, but autograd actually supports tensorlists # and tensors passed as kwargs. The following creates a function that accepts just # the tensors that require grad as varargs, and then recomposes them back into the # original input. # Creates gradcheck inputs by identifying tensors requiring grad all_args = None if is_iterable_of_tensors(sample.input): all_args = chain(sample.input, sample.args, sample.kwargs.values()) else: all_args = tuple(chain((sample.input,), sample.args, sample.kwargs.values())) # type: ignore[assignment] gradcheck_args = tuple(x for x in all_args if (isinstance(x, torch.Tensor) and x.requires_grad)) # type: ignore[union-attr] # Verifies sample input tensors should have no grad # This may happen if the same tensor is used in two different SampleInputs for t in gradcheck_args: self.assertIsNone(t.grad, "A sampled input has a gradient before running autograd. " "This usually means that (at least) one input tensor is reused " "across different SampleInputs. " "Please create a new tensor for each SampleInput.") def _input_recomposition_helper(inputs, inp, input_idx): if is_iterable_of_tensors(inp): tensor_list = [] for x in inp: if isinstance(x, torch.Tensor) and x.requires_grad: tensor_list.append(inputs[input_idx]) input_idx = input_idx + 1 else: tensor_list.append(x) return tensor_list, input_idx elif isinstance(inp, torch.Tensor) and inp.requires_grad: return inputs[input_idx], input_idx + 1 else: return inp, input_idx def fn(*inputs): # Puts inputs back into sample properly positional_args = [] input_idx = 0 inp, input_idx = _input_recomposition_helper(inputs, sample.input, input_idx) positional_args.append(inp) for x in sample.args: inp, input_idx = _input_recomposition_helper(inputs, x, input_idx) positional_args.append(inp) # Recreates kwargs kwargs = {} for k, v in sample.kwargs.items(): inp, input_idx = _input_recomposition_helper(inputs, v, input_idx) kwargs[k] = inp output = op.gradcheck_wrapper(variant, *positional_args, **kwargs) if sample.output_process_fn_grad is not None: return sample.output_process_fn_grad(output) return output if check == 'gradcheck': if check_batched_grad is None: check_batched_grad = op.check_batched_grad self.assertTrue(gradcheck(fn, gradcheck_args, check_batched_grad=check_batched_grad, check_grad_dtypes=True, nondet_tol=op.gradcheck_nondet_tol, fast_mode=op.gradcheck_fast_mode, check_forward_ad=check_forward_ad, check_backward_ad=check_backward_ad, check_undefined_grad=True, check_batched_forward_grad=check_batched_forward_grad)) elif check in ('bwgrad_bwgrad', 'fwgrad_bwgrad'): # gradgrad check self.assertFalse(check_forward_ad, msg="Cannot run forward AD check for gradgradcheck") for gen_non_contig_grad_outputs in (False, True): kwargs = { "gen_non_contig_grad_outputs": gen_non_contig_grad_outputs, "check_batched_grad": op.check_batched_gradgrad, "check_grad_dtypes": True, "nondet_tol": op.gradcheck_nondet_tol, "fast_mode": op.gradcheck_fast_mode } if check == "fwgrad_bwgrad": kwargs["check_fwd_over_rev"] = True kwargs["check_rev_over_rev"] = False kwargs["check_batched_grad"] = False kwargs["check_undefined_grad"] = False self.assertTrue(gradgradcheck(fn, gradcheck_args, **kwargs)) else: self.assertTrue(False, msg="Unknown check requested!") def _grad_test_helper(self, device, dtype, op, variant, *, check_forward_ad=False, check_backward_ad=True, check_batched_grad=None, check_batched_forward_grad=False): return self._check_helper(device, dtype, op, variant, 'gradcheck', check_forward_ad=check_forward_ad, check_backward_ad=check_backward_ad, check_batched_grad=check_batched_grad, check_batched_forward_grad=check_batched_forward_grad) def _skip_helper(self, op, device, dtype): if dtype not in op.supported_backward_dtypes(torch.device(device).type): self.skipTest("Skipped! Op doesn't support autograd for this dtype.") if not op.supports_autograd and not op.supports_forward_ad: self.skipTest("Skipped! autograd not supported.") def make_lazy_class(cls): def lazy_init(self, cb): self._cb = cb self._value = None cls.__init__ = lazy_init for basename in [ "add", "sub", "mul", "truediv", "floordiv", "mod", "divmod", "pow", "lshift", "rshift", "and", "or", "xor", "neg", "pos", "abs", "invert", "eq", "ne", "lt", "le", "gt", "ge", "bool", "int", "index", ]: name = f"__{basename}__" def inner_wrapper(name): use_operator = basename not in ("bool", "int") def wrapped(self, *args, **kwargs): if self._cb is not None: self._value = self._cb() self._cb = None if not use_operator: return getattr(self._value, name)(*args, **kwargs) else: return getattr(operator, name)(self._value, *args, **kwargs) return wrapped setattr(cls, name, inner_wrapper(name)) return cls # Base TestCase for NT tests; used to define common helpers, etc. class NestedTensorTestCase(TestCase): def assertEqualIgnoringNestedInts(self, a, b): # unbinding NJTs allows us to compare them as essentially equal without # caring about exact nested int comparison def _unbind_njts(x): if isinstance(x, torch.Tensor) and x.is_nested and x.layout == torch.jagged: return x.unbind() else: return x self.assertEqual(pytree.tree_map(_unbind_njts, a), pytree.tree_map(_unbind_njts, b)) def assertEqualNoncontigAware(self, a, b): # assertEqual() doesn't take into account lengths, so hack around this # by comparing unbound components and shapes self.assertEqualIgnoringNestedInts(a, b) def _get_njt_shapes(x): return ( x.shape if isinstance(x, torch.Tensor) and x.is_nested else None ) a_shapes = pytree.tree_map(_get_njt_shapes, a) b_shapes = pytree.tree_map(_get_njt_shapes, b) self.assertEqual(a_shapes, b_shapes) @contextlib.contextmanager def branch_nested_state(self): """Context manager to branch and restore the nested tensor state.""" nested_tensor_module = torch.nested._internal.nested_tensor original_tensor_symint_registry = nested_tensor_module._tensor_symint_registry.copy() original_tensor_id_counter = nested_tensor_module._tensor_id_counter try: yield finally: nested_tensor_module._tensor_id_counter = original_tensor_id_counter nested_tensor_module._tensor_symint_registry = original_tensor_symint_registry @make_lazy_class class LazyVal: pass def munge_exc(e, *, suppress_suffix=True, suppress_prefix=True, file=None, skip=0): if file is None: file = inspect.stack()[1 + skip].filename # skip one frame file = _as_posix_path(file) s = _as_posix_path(str(e)) # Remove everything that looks like stack frames in NOT this file def repl_frame(m): if m.group(1) != file: return "" # Don't accept top-level, even for this script, these will wobble # depending on how the testing script was invoked if m.group(2) == "<module>": return "" return m.group(0) s = re.sub(r' File "([^"]+)", line \d+, in (.+)\n( .+\n( +[~^]+ *\n)?)+', repl_frame, s) s = re.sub(r"line \d+", "line N", s) s = re.sub(r".py:\d+", ".py:N", s) s = re.sub(file, _as_posix_path(os.path.basename(file)), s) s = re.sub(_as_posix_path(os.path.join(os.path.dirname(torch.__file__), "")), "", s) if suppress_suffix: s = re.sub(r"\n*Set TORCH_LOGS.+", "", s, flags=re.DOTALL) s = re.sub(r"\n*You can suppress this exception.+", "", s, flags=re.DOTALL) s = re.sub(r"\n*Set TORCHDYNAMO_VERBOSE=1.+", "", s, flags=re.DOTALL) if suppress_prefix: s = re.sub(r"Cannot export model.+\n\n", "", s) s = re.sub(r" +$", "", s, flags=re.MULTILINE) return s @contextmanager def check_leaked_tensors(limit=1, matched_type=torch.Tensor): """Wrap around operations you want to ensure are not leaking tensor memory. This code intentionally ignores other reference cycles, which can be benign and which we have plenty of in pytorch code. It focuses on any reference cycles that directly or indirectly result holding a Tensor alive, since this is likely a more serious leak than typical python refcycles. limit specifies how many tensors to dump debug graphs for (default=1) """ def match_obj(obj): return isinstance(obj, matched_type) try: gc.collect() gc.set_debug(gc.DEBUG_SAVEALL) garbage_objs = [] # type: ignore[var-annotated] # run the user code, after cleaning any existing refcycles, and then check for new ones # also allow usercode to check the garbage objs (e.g. for assertion) after exiting ctxmgr yield garbage_objs gc.collect() garbage_objs.extend(filter(match_obj, gc.garbage)) num_garbage_objs = len(garbage_objs) if num_garbage_objs > 0: warnings.warn( f"{num_garbage_objs} tensors were found in the garbage. Did you introduce a reference cycle?" ) try: import objgraph # type: ignore[import-not-found] warnings.warn( f"Dumping first {limit} objgraphs of leaked {matched_type}s rendered to png" ) for g in garbage_objs[:limit]: objgraph.show_backrefs([g], max_depth=10) except ImportError: warnings.warn("`pip install objgraph` to enable memory leak debugging") finally: gc.set_debug(0) def remove_cpp_extensions_build_root(): """ Removes the default root folder under which extensions are built. """ default_build_root = cpp_extension.get_default_build_root() if os.path.exists(default_build_root): if IS_WINDOWS: # rmtree returns permission error: [WinError 5] Access is denied # on Windows, this is a workaround subprocess.run(["rm", "-rf", default_build_root], stdout=subprocess.PIPE) else: shutil.rmtree(default_build_root, ignore_errors=True) # Decorator to provide a helper to load inline extensions to a temp directory def scoped_load_inline(func): @wraps(func) def wrapper(*args, **kwargs): def load_inline(*args, **kwargs): if IS_WINDOWS: # TODO(xmfan): even using TemporaryDirectoryName will result in permission error return cpp_extension.load_inline(*args, **kwargs) assert "build_directory" not in kwargs with TemporaryDirectoryName() as temp_dir_name: if kwargs.get("verbose", False): print(f'Using temporary extension directory {temp_dir_name}...', file=sys.stderr) kwargs["build_directory"] = temp_dir_name return cpp_extension.load_inline(*args, **kwargs) return func(*args, load_inline=load_inline, **kwargs) return wrapper ```
======================================================================================================================================= SOURCE CODE FILE: composite_compliance.py LINES: 13 SIZE: 25.57 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\composite_compliance.py ENCODING: utf-8 ```py # mypy: ignore-errors import torch from torch import Tensor import itertools from torch.utils._python_dispatch import TorchDispatchMode from torch.utils._pytree import tree_map, tree_flatten, tree_unflatten from torch.utils import _pytree as pytree from functools import partial from torch.utils._mode_utils import no_dispatch, all_same_mode import torch.autograd.forward_ad as fwAD from typing import Callable import re def check_attr_consistency(wrapper_tensor, metadata_name, metadata_accessor): elem = wrapper_tensor.elem metadata_wrapper_tensor = metadata_accessor(wrapper_tensor) metadata_elem = metadata_accessor(elem) if metadata_wrapper_tensor == metadata_elem: return raise RuntimeError( f"This operator is not Composite Compliant: the " f"{metadata_name} of the tensor was modified directly without " f"going through the PyTorch dispatcher.") def check_metadata_consistency(wrapper_tensor, CCT): # CCT: CompositeCompliantTensor class which is generated using generate_cct if not isinstance(wrapper_tensor, CCT): return things_to_check = { 'shape': Tensor.size, 'dtype': lambda x: x.dtype, 'device': lambda x: x.device, 'numel': Tensor.numel, 'stride': Tensor.stride, 'storage_offset': Tensor.storage_offset, } for metadata_name, metadata_accessor in things_to_check.items(): check_attr_consistency(wrapper_tensor, metadata_name, metadata_accessor) def is_view_fn(func): return func.overloadpacket.__name__ in { 'as_strided', 'detach', 'diagonal', 'expand', 'expand_as', 'movedim', 'narrow', 'permute', 'select', 'squeeze', 'transpose', 't', 'real', 'imag', 'view_as_real', 'view_as_complex', 'unflatten', 'unfold', 'unsqueeze', 'view', 'view_as', 'unbind', 'split', 'split_with_sizes', 'vsplit', 'hsplit', 'tensor_split', 'chunk', 'swapaxes', 'slice', '_reshape_alias', '_unsafe_view', '_conj', 'alias', } # manually populated from native_functions that have inplace_view: True. # In the future we will probably be able to grab that list directly def is_inplace_view_fn(func): return func.overloadpacket.__name__ in { 'as_strided_', 'detach_', 'squeeze_', 'swapaxes_', 'swapdims_', 't_', 'transpose_', 'unsqueeze_', } # Introspection please save us def is_inplace(func): name = func.overloadpacket.__name__ if re.match('__i.+__', name): return True if re.match('__.+__', name): return False return name[-1] == '_' def generate_cct_and_mode(autograd_view_consistency=True): # This function returns a new class CompositeCompliantTensor # The two arguments control the behaviour described below. # autograd_view_consistency: # If True, alias result using `set_` if func returns a view # (See Note [Alias Result]). # Since Forward AD doesn't work with `set_` # we disable it by setting alias to False. class CompositeCompliantTensor(torch.Tensor): elem: torch.Tensor __slots__ = ['elem'] @staticmethod def __new__(cls, elem, mode, *args, **kwargs): assert type(elem) is not cls, \ "Wrapping a CompositeCompliantTensor in a CompositeCompliantTensor is not supported" # The storage of CompositeCompliantTensor should never be used directly # by a Composite operation; if the Composite # operator attempts to read from the storage without dispatching then it'll # raise a RuntimeError due to it being a meta storage. r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined] cls, elem.size(), dtype=elem.dtype, layout=elem.layout, device=elem.device, requires_grad=elem.requires_grad, strides=elem.stride(), storage_offset=elem.storage_offset()) if elem.requires_grad: # CompositeCompliantTensor steals the "requires_grad"-ness. # Why a new copy of `elem`? Because sometimes OpInfo shares inputs between tests... tmp = torch.empty( (), dtype=elem.dtype, device=elem.device, layout=elem.layout, requires_grad=False, ) # Use set_ rather than empty_strided() + copy_ so that we can preserve # things like storage_offset. tmp.set_( source=elem.untyped_storage().clone(), storage_offset=elem.storage_offset(), size=elem.size(), stride=elem.stride(), ) r.elem = tmp else: r.elem = elem assert r.stride() == r.elem.stride() # Propagate conjugate bits to the wrapper tensor # Ref: https://github.com/albanD/subclass_zoo/issues/24 # Ref: https://github.com/albanD/subclass_zoo/issues/21 torch._C._set_conj(r, r.elem.is_conj()) torch._C._set_neg(r, r.elem.is_neg()) r.mode = mode return r def __repr__(self): return f"CompositeCompliantTensor({self.elem})" @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): all_args = pytree.arg_tree_leaves(*args, **(kwargs or {})) modes = tuple(e.mode for e in all_args if isinstance(e, CompositeCompliantTensor)) if not all_same_mode(modes): raise RuntimeError("Multiple CompositeCompliantTensorModes NYI") with modes[0]: return func(*args, **kwargs) class CompositeCompliantTensorMode(TorchDispatchMode): def __torch_dispatch__(self, func, types, args=(), kwargs=None): def unwrap(e): return e.elem if isinstance(e, CompositeCompliantTensor) else e def wrap(e): return CompositeCompliantTensor(e, self) if isinstance(e, torch.Tensor) else e if func == torch.ops.aten._local_scalar_dense.default: raise RuntimeError( ".item() is not allowed to be called inside of composite " "functions in the PyTorch library because not all backends " "and/or Tensor subclasses (e.g. vmap, ProxyTensor) support them.") if func.overloadpacket.__name__ in ('set_', 'resize_'): raise RuntimeError( f"{func.__name__} is not allowed to be called inside of " f"Composite operators.") if is_inplace(func): # NB: We are making an assumption that if the function is in-place, # then the first argument is being written to. Introspection please save us! mutated_argument = args[0] if not isinstance(mutated_argument, CompositeCompliantTensor) and \ any(isinstance(a, CompositeCompliantTensor) for a in args[1:]): raise RuntimeError( 'Not composite compliant: performing in-place operation ' f'{func.__name__} where the Tensor being written to is ' 'regular Tensor but the other tensors are Tensor Subclasses. ' 'Please try to avoid this in-place operation.') unwrapped_args = tree_map(unwrap, args) unwrapped_kwargs = tree_map(unwrap, kwargs) unwrapped_rs = func(*unwrapped_args, **unwrapped_kwargs) rs = tree_map(wrap, unwrapped_rs) if is_view_fn(func) and autograd_view_consistency: # Note [Alias Result] # Autograd asserts that for B = A.view_fn(...), B and A's storages # are the same. Here we try to make B alias A to avoid those asserts. # See https://github.com/pytorch/pytorch/issues/65339 for more information # about the issue. with no_dispatch(): # Idea: this is a weird way of getting a storage that aliases the input. # This is a workaround for #65339. # 1. under no_dispatch, all of the wrapper tensors look like regular # tensors with special storage (the storage is nullptr and # advertises CPU/CUDA device. # 2. we run func, which ends up running the view operation # 3. All view operations reuse the input's storage and return # result Tensor(s) with new sizes/strides/offset that alias # the input. # 4. we set the storage (and sizes/strides/offset) of the wrapper # tensor results to be that of the tensors that alias the input result = func(*args, **kwargs) if isinstance(result, (tuple, list)): for a, b in zip(rs, result): a.set_(b) else: rs.set_(result) # Some operations are allowed to in-place modify the metadata of the # inputs. The only ones are the "inplace view functions"; when we # run into these, we manually modify the metadata of the input. with no_dispatch(): if is_inplace_view_fn(func): func(*args, **kwargs) # For each CompositeCompliantTensor t, we check that t and t.elem # have consistent metadata. If they don't have consistent metadata, # that means the operator did something fishy. check = partial(check_metadata_consistency, CCT=CompositeCompliantTensor) pytree.tree_map_(check, args) pytree.tree_map_(check, kwargs) pytree.tree_map_(check, rs) return rs return CompositeCompliantTensor, CompositeCompliantTensorMode() def is_tensorlist(lst): if not isinstance(lst, list) and not isinstance(lst, tuple): return False if len(lst) == 0: return False all_tensors = all(isinstance(elt, torch.Tensor) for elt in lst) if all_tensors: return True exists_one_tensor = all(isinstance(elt, torch.Tensor) for elt in lst) if exists_one_tensor: raise RuntimeError('This test assumes that PyTorch APIs cannot take ' 'mixed lists of Tensor and other things') return False def maybe_map(fn, should_map, arg): return fn(arg) if should_map else arg def wrap(arg, CCT, cct_mode): # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode if isinstance(arg, torch.Tensor): return CCT(arg, cct_mode) if is_tensorlist(arg): return [CCT(a, cct_mode) for a in arg] raise RuntimeError("wrap assumes that the input can be wrapped") # Given a list of flat arguments, some of which may be Tensors, return all # possible ways some of the arguments could be CompositeCompliantTensors (CCT). # For example, given Tensors A, B, C and flat_args = [A, 1, B], # We would return the following 4 options: # [CCT(A), 1, CCT(B)] # [CCT(A), 1, B] # [A, 1, CCT(B)] # [A, 1, B] # NB: Yes, this is exponential. No, we don't care too much because PyTorch ops # don't accept that many input Tensors. def generate_subclass_choices(flat_args, CCT, cct_mode): # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode is_tensor_likes = [isinstance(arg, torch.Tensor) or is_tensorlist(arg) for arg in flat_args] subclass_options = [[False, True] if is_tensor_like else [False] for is_tensor_like in is_tensor_likes] for which_args_are_wrapped in itertools.product(*subclass_options): result = [maybe_map(partial(wrap, CCT=CCT, cct_mode=cct_mode), should_wrap_arg, arg) for should_wrap_arg, arg in zip(which_args_are_wrapped, flat_args)] yield result, which_args_are_wrapped # For an operation f(*args, **kwargs), each Tensor argument may either be # a regular Tensor or a Tensor Subclass. This iterator iterates through # all of those options. def generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode): # CCT: CompositeCompliantTensor class which is generated using generate_cct_and_mode flat_kwargs, spec = tree_flatten(kwargs) flat_args_kwargs = list(args) + list(flat_kwargs) for choice, debug_metadata in generate_subclass_choices(flat_args_kwargs, CCT, cct_mode): new_args = choice[:len(args)] new_kwargs = tree_unflatten(choice[len(args):], spec) which_args_are_wrapped = debug_metadata[:len(args)] which_kwargs_are_wrapped = tree_unflatten(debug_metadata[len(args):], spec) yield new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped def raise_composite_compliance_error(err, additional_info=''): raise RuntimeError( "Composite compliance check failed with " "the above error.\n" f"{additional_info}" "If you are adding an OpInfo of an " "existing operator, please feel free to skip this test " "because the problem was pre-existing and file an issue. " "Otherwise, if you added a new operator, please read " "through the Composite Compliance section in " "aten/src/ATen/native/README.md for how to resolve this. " ) from err # This test checks ALL possible permutations of calling `op` with arguments # that are individually either a regular Tensor or a Tensor subclass. # # The general strategy is to wrap some Tensor args and kwargs in # CompositeCompliantTensor wrappers and call the operation. # If some composite operation does any non-compliant behavior, # CompositeCompliantTensor will raise an error. def check_all_permutations(op, args, kwargs, assert_equal_fn): CCT, cct_mode = generate_cct_and_mode() expected = op(*args, **kwargs) for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode): new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice try: actual = op(*new_args, **new_kwargs) # NOTE: [What errors are Composite Compliance trying to catch?] # # There's two things we want to catch: # - errors that would raise within the torch_dispatch impl # - data_ptr accesses # The first is easy to filter for (we could make the error a different # error class), the second is always going to be a RuntimeError due to # how it is implemented (if you try to access the data_ptr of thex # wrapper Tensor, it raises you some internal RuntimeError). # # So the most general thing to catch here was RuntimeError. If you # are here and debugging why your test failed, it's plausible that # the operator itself is broken and that there are other tests failing. except RuntimeError as err: raise_composite_compliance_error( err, f"- wrapped_args: {which_args_are_wrapped}\n" f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n" ) def unwrap(e): return e.elem if isinstance(e, CCT) else e assert_equal_fn(tree_map(unwrap, actual), expected) # Checks via the usage of torch dispatch mode certain anti-patterns that # are not composite compliant. # # In particular, the anti-pattern we are trying to prevent is a user # creating an empty tensor and then resize_-ing it. Torch Dispatch Mode helps # here because all factory functions will create tensors that are # CompositeCompliantTensor. # # The general strategy is to wrap all Tensor args and kwargs in # CompositeCompliantTensor wrappers. If an operator that is # Composite does any non-compliant behavior, # CompositeCompliantTensor will raise an error. def check_with_mode(op, args, kwargs, assert_equal_fn): CCT, cct_mode = generate_cct_and_mode() def wrap(e): return CCT(e, cct_mode) if isinstance(e, torch.Tensor) else e expected = op(*args, **kwargs) args = tree_map(wrap, args) kwargs = tree_map(wrap, kwargs) try: with cct_mode: actual = op(*args, **kwargs) # see NOTE: [What errors are Composite Compliance trying to catch?] except RuntimeError as err: raise_composite_compliance_error(err) def unwrap(e): return e.elem if isinstance(e, CCT) else e assert_equal_fn(tree_map(unwrap, actual), expected) def gather_leaf_tensors(args, kwargs): leaf_tensors = [] args, _args_spec = tree_flatten(args) kwargs, _kwargs_spec = tree_flatten(kwargs) args = args + kwargs for arg in args: if not isinstance(arg, torch.Tensor): continue if arg.requires_grad: leaf_tensors.append(arg) return leaf_tensors def compute_expected_grads(op, args, kwargs, output_process_fn_grad=None, gradcheck_wrapper=None): if gradcheck_wrapper is None: results = op(*args, **kwargs) else: results = gradcheck_wrapper(op, *args, **kwargs) if output_process_fn_grad is not None: results = output_process_fn_grad(results) flat_results = pytree.tree_leaves(results) flat_results = [r for r in flat_results if isinstance(r, torch.Tensor)] flat_diff_results = [r for r in flat_results if r.requires_grad] assert len(flat_diff_results) > 0 grads = [torch.ones(r.shape, device=r.device, dtype=r.dtype) for r in flat_diff_results] leaf_tensors = gather_leaf_tensors(args, kwargs) assert len(leaf_tensors) > 0 return torch.autograd.grad(flat_diff_results, leaf_tensors, grads, allow_unused=True, retain_graph=True) # Checks if the backward formula is composite compliant by testing # all possible permutations of {inputs, grad_outputs} being # CompositeCompliantTensor or regular Tensors. # # NB: it is important that op is accepted as a Callable and not an OpInfo, # this means we can apply check_backward_formula to things that aren't OpInfos # while debugging. def check_backward_formula(op: Callable, args, kwargs, output_process_fn_grad=None, gradcheck_wrapper=None, assert_equal_fn=None): CCT, cct_mode = generate_cct_and_mode() expected = compute_expected_grads(op, args, kwargs, output_process_fn_grad, gradcheck_wrapper) for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode): new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice leaf_tensors = gather_leaf_tensors(new_args, new_kwargs) assert len(leaf_tensors) > 0 try: if gradcheck_wrapper is None: results = op(*new_args, **new_kwargs) else: results = gradcheck_wrapper(op, *new_args, **new_kwargs) if output_process_fn_grad is not None: results = output_process_fn_grad(results) # see NOTE: [What errors are Composite Compliance trying to catch?] except RuntimeError as err: raise_composite_compliance_error( err, f"- wrapped_args: {which_args_are_wrapped}\n" f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n" ) flat_results = pytree.tree_leaves(results) flat_results = [r for r in flat_results if isinstance(r, torch.Tensor)] flat_diff_results = [r for r in flat_results if r.requires_grad] assert len(flat_diff_results) > 0 # NB: ones, not ones_like, so we get a regular Tensor here grads = [torch.ones(r.shape, device=r.device, dtype=r.dtype) for r in flat_diff_results] for flat_new_grads, which_grad_is_batched in generate_subclass_choices(grads, CCT, cct_mode): try: actual = torch.autograd.grad(flat_diff_results, leaf_tensors, flat_new_grads, allow_unused=True, retain_graph=True) # see NOTE: [What errors are Composite Compliance trying to catch?] except RuntimeError as err: raise_composite_compliance_error( err, f"- wrapped_args: {which_args_are_wrapped}\n" f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n" f"- wrapped_grads: {which_grad_is_batched}\n" ) def unwrap(e): return e.elem if isinstance(e, CCT) else e assert_equal_fn(tuple(map(unwrap, actual)), expected, equal_nan=True) # Checks if the forward AD formula is composite compliant by testing # all possible permutations of {primals, tangents} being # CompositeCompliantTensor or regular Tensors. # # NB: it is important that op is accepted as a Callable and not an OpInfo, # this means we can apply check_forward_ad_formula to things that aren't OpInfos # while debugging. def check_forward_ad_formula(op: Callable, args, kwargs, gradcheck_wrapper=None, assert_equal_fn=None): CCT, cct_mode = generate_cct_and_mode(autograd_view_consistency=False) def maybe_tangent(t): assert type(t) is not CCT # Generate `tangent` tensor # if given object is a Tensor and requires grad is set. if isinstance(t, torch.Tensor) and t.requires_grad: return torch.randn_like(t) elif is_tensorlist(t): return [torch.randn_like(e) if e.requires_grad else None for e in t] return None tangent_args = tuple(maybe_tangent(arg) for arg in args) flat_kwargs, spec = tree_flatten(kwargs) flat_tangent_kwargs = tuple(maybe_tangent(arg) for arg in flat_kwargs) tangent_kwargs = tree_unflatten(flat_tangent_kwargs, spec) with fwAD.dual_level(): def maybe_make_dual(dual): # Returns dual tensor if primal is a tensor/tensor subclass # with requires_grad set. primal, tangent = dual if isinstance(primal, torch.Tensor) and primal.requires_grad: return fwAD.make_dual(primal.detach(), tangent) elif is_tensorlist(primal): return tuple(fwAD.make_dual(pri.detach(), tang) if tang is not None else pri for pri, tang in zip(primal, tangent)) return primal def compute_expected_grad(args, tangent_args, kwargs, tangent_kwargs): op_args = tuple(map(maybe_make_dual, zip(args, tangent_args))) op_kwargs = {k: maybe_make_dual((v, tangent_kwargs[k])) for k, v in kwargs.items()} if gradcheck_wrapper is None: return op(*op_args, **op_kwargs) return gradcheck_wrapper(op, *op_args, **op_kwargs) expected = compute_expected_grad(args, tangent_args, kwargs, tangent_kwargs) expected = tree_map(fwAD.unpack_dual, expected) expected_primals = tree_map(lambda x: x.primal, expected) expected_tangents = tree_map(lambda x: x.tangent, expected) # Permutations of arg and kwargs in CCT. for choice in generate_subclass_choices_args_kwargs(args, kwargs, CCT, cct_mode): new_args, new_kwargs, which_args_are_wrapped, which_kwargs_are_wrapped = choice # Permutations tangent arg and tangent kwargs in CCT. for tang_choice in generate_subclass_choices_args_kwargs(tangent_args, tangent_kwargs, CCT, cct_mode): new_tang_args, new_tang_kwargs, \ which_tang_args_are_wrapped, which_tang_kwargs_are_wrapped = tang_choice op_args = tuple(map(maybe_make_dual, zip(new_args, new_tang_args))) op_kwargs = {k: maybe_make_dual((v, new_tang_kwargs[k])) for k, v in new_kwargs.items()} try: if gradcheck_wrapper is None: actual = op(*op_args, **op_kwargs) else: actual = gradcheck_wrapper(op, *op_args, **op_kwargs) # see NOTE: [What errors are Composite Compliance trying to catch?] except RuntimeError as err: raise_composite_compliance_error( err, f"- wrapped_args: {which_args_are_wrapped}\n" f"- wrapped_kwargs: {which_kwargs_are_wrapped}\n" f"- wrapped_tangent_args: {which_tang_args_are_wrapped}\n" f"- wrapped_tangent_kwargs: {which_tang_kwargs_are_wrapped}\n" ) def unwrap(e): return e.elem if isinstance(e, CCT) else e actual = tree_map(fwAD.unpack_dual, actual) actual_primals = tree_map(lambda x: unwrap(x.primal), actual) actual_tangents = tree_map(lambda x: unwrap(x.tangent), actual) assert_equal_fn(actual_primals, expected_primals, equal_nan=True) assert_equal_fn(actual_tangents, expected_tangents, equal_nan=True) ```
=============================================================================================================================== SOURCE CODE FILE: custom_op_db.py LINES: 1 SIZE: 19.76 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\custom_op_db.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import torch import functools from torch.testing import make_tensor from torch.testing._internal.opinfo.core import ( OpInfo, SampleInput, ) from torch.testing._internal.common_dtype import all_types_and import numpy as np from torch.testing._internal.autograd_function_db import ( sample_inputs_numpy_cube, sample_inputs_numpy_mul, sample_inputs_numpy_mul_scalar, sample_inputs_numpy_sort, sample_inputs_numpy_take, ) from torch import Tensor from torch.types import Number from typing import * # noqa: F403 # Note: [custom op db] # # This is a collection of custom operator test cases written as OpInfos # so they can easily be consumed by OpInfo-based tests to check if subsystems # support them correctly. def to_numpy(tensor): return tensor.cpu().numpy() @torch.library.custom_op("_torch_testing::numpy_cube", mutates_args=()) def numpy_cube(x: Tensor) -> tuple[Tensor, Tensor]: x_np = to_numpy(x) dx = torch.tensor(3 * x_np ** 2, device=x.device) return torch.tensor(x_np ** 3, device=x.device), dx @numpy_cube.register_fake def _(x): return x.clone(), x.clone() def numpy_cube_setup_context(ctx, inputs, output): x, = inputs _cube, dx = output ctx.save_for_backward(x, dx) def numpy_cube_backward(ctx, grad_out, grad_dx): x, dx = ctx.saved_tensors grad_x = numpy_mul(grad_out, dx) + 6 * numpy_mul(grad_dx, x) return grad_x numpy_cube.register_autograd(numpy_cube_backward, setup_context=numpy_cube_setup_context) def numpy_cube_vmap(info, in_dims, x): result = numpy_cube(x) return result, (in_dims[0], in_dims[0]) numpy_cube.register_vmap(numpy_cube_vmap) @torch.library.custom_op("_torch_testing::numpy_mul", mutates_args=()) def numpy_mul(x: Tensor, y: Tensor) -> Tensor: return torch.tensor(to_numpy(x) * to_numpy(y), device=x.device) @numpy_mul.register_fake def _(x, y): assert x.device == y.device return (x * y).contiguous() def numpy_mul_setup_context(ctx, inputs, output): ctx.save_for_backward(*inputs) def numpy_mul_backward(ctx, grad_out): x, y = ctx.saved_tensors grad_x = grad_out * y if ctx.needs_input_grad[0] else None grad_y = grad_out * x if ctx.needs_input_grad[1] else None return grad_x, grad_y numpy_mul.register_autograd(numpy_mul_backward, setup_context=numpy_mul_setup_context) def numpy_mul_vmap(info, in_dims, x, y): x_bdim, y_bdim = in_dims x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1) y = y.movedim(y_bdim, -1) if y_bdim is not None else y.unsqueeze(-1) result = x * y result = result.movedim(-1, 0) return result, 0 numpy_mul.register_vmap(numpy_mul_vmap) @torch.library.custom_op("_torch_testing::numpy_mul_scalar", mutates_args=()) def numpy_mul_scalar(x: Tensor, *, scalar: float) -> Tensor: return torch.tensor(to_numpy(x) * scalar, device=x.device) @numpy_mul_scalar.register_fake def _(x, *, scalar): return (x * scalar).contiguous() def numpy_mul_scalar_setup_context(ctx, inputs, keyword_only_inputs, output): ctx.scalar = keyword_only_inputs["scalar"] def numpy_mul_scalar_backward(ctx, grad_out): grad_x = grad_out * ctx.scalar return grad_x numpy_mul_scalar.register_autograd(numpy_mul_scalar_backward, setup_context=numpy_mul_scalar_setup_context) def numpy_mul_scalar_vmap(info, in_dims, x, *, scalar): x_bdim, = in_dims x = x.movedim(x_bdim, -1) if x_bdim is not None else x.unsqueeze(-1) result = x * scalar result = result.movedim(-1, 0) return result, 0 numpy_mul_scalar.register_vmap(numpy_mul_scalar_vmap) @torch.library.custom_op("_torch_testing::numpy_sort", mutates_args=()) def numpy_sort(x: Tensor, dim: int) -> tuple[Tensor, Tensor, Tensor]: device = x.device x = to_numpy(x) ind = np.argsort(x, axis=dim) ind_inv = np.argsort(ind, axis=dim) result = np.take_along_axis(x, ind, axis=dim) return ( torch.tensor(result, device=device), torch.tensor(ind, device=device), torch.tensor(ind_inv, device=device), ) @numpy_sort.register_fake def _(x, dim): return torch.empty_like(x), torch.empty_like(x, dtype=torch.long), torch.empty_like(x, dtype=torch.long) def numpy_sort_setup_context(ctx, inputs, output): _out, ind, ind_inv = output ctx.dim = inputs[1] ctx.save_for_backward(ind, ind_inv) ctx.mark_non_differentiable(ind, ind_inv) def numpy_sort_backward(ctx, grad_out, grad_ind, grad_ind_inv): ind, ind_inv = ctx.saved_tensors return numpy_take(grad_out, ind_inv, ind, ctx.dim), None numpy_sort.register_autograd(numpy_sort_backward, setup_context=numpy_sort_setup_context) def numpy_sort_vmap(info, in_dims, x, dim): x_bdim, _ = in_dims x = x.movedim(x_bdim, 0) dim = dim if dim >= 0 else dim + x.dim() - 1 result = numpy_sort(x, dim + 1) return result, (0, 0, 0) numpy_sort.register_vmap(numpy_sort_vmap) @torch.library.custom_op("_torch_testing::numpy_take", mutates_args=()) def numpy_take(x: Tensor, ind: Tensor, ind_inv: Tensor, dim: int) -> Tensor: device = x.device x = to_numpy(x) ind = to_numpy(ind) return torch.tensor(np.take_along_axis(x, ind, dim), device=device) @numpy_take.register_fake def _(x, ind, ind_inv, dim): assert x.device == ind.device assert x.device == ind_inv.device assert ind.dtype == torch.long assert ind_inv.dtype == torch.long return torch.empty_like(x) def numpy_take_setup_context(ctx, inputs, output): _x, ind, ind_inv, dim = inputs ctx.dim = dim ctx.save_for_backward(ind, ind_inv) def numpy_take_backward(ctx, grad_out): ind, ind_inv = ctx.saved_tensors grad_x = numpy_take(grad_out, ind_inv, ind, ctx.dim) return grad_x, None, None, None numpy_take.register_autograd(numpy_take_backward, setup_context=numpy_take_setup_context) def numpy_take_vmap(info, in_dims, x, ind, ind_inv, dim): x_bdim, ind_bdim, ind_inv_bdim, _ = in_dims # wrap dim logical_dim = x.dim() if x_bdim is None else x_bdim - 1 dim = dim if dim >= 0 else dim + logical_dim def expand_bdim(x, x_bdim): if x_bdim is None: return x.expand(info.batch_size, *x.shape) return x.movedim(x_bdim, 0) x = expand_bdim(x, x_bdim) ind = expand_bdim(ind, ind_bdim) ind_inv = expand_bdim(ind_inv, ind_inv_bdim) return numpy_take(x, ind, ind_inv, dim + 1), 0 numpy_take.register_vmap(numpy_take_vmap) @torch.library.custom_op("_torch_testing::numpy_nonzero", mutates_args=()) def numpy_nonzero(x: Tensor) -> Tensor: x_np = to_numpy(x) res = np.stack(np.nonzero(x_np), axis=1) if res.shape[0] <= 1: raise RuntimeError("not supported") return torch.tensor(res, device=x.device) @numpy_nonzero.register_fake def _(x): ctx = torch._custom_op.impl.get_ctx() i0 = ctx.create_unbacked_symint() shape = [i0, x.dim()] result = x.new_empty(shape, dtype=torch.long) return result def sample_inputs_numpy_nonzero(opinfo, device, dtype, requires_grad, **kwargs): make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) shape = 10 result = make_arg(shape, low=0.9, high=2) mask = make_tensor(shape, low=0, high=2, device=device, dtype=torch.long) with torch.no_grad(): result *= mask yield SampleInput(result, args=()) def numpy_nonzero_vmap(info, in_dims, x): raise NotImplementedError("Operator is data-dependent and cannot be vmapped.") numpy_nonzero.register_vmap(numpy_nonzero_vmap) @torch.library.custom_op("_torch_testing::numpy_view_copy", mutates_args=()) def numpy_view_copy(x: Tensor, shape: Sequence[int]) -> Tensor: return torch.tensor(np.copy(to_numpy(x).reshape(shape)), device=x.device) @numpy_view_copy.register_fake def _(x, shape) -> Tensor: return x.clone().view(shape).clone() def numpy_view_copy_setup_context(ctx, inputs, output) -> None: ctx.x_shape = inputs[0].shape def numpy_view_copy_backward(ctx, grad_out): return torch.ops._torch_testing.numpy_view_copy(grad_out, ctx.x_shape), None numpy_view_copy.register_autograd(numpy_view_copy_backward, setup_context=numpy_view_copy_setup_context) def numpy_view_copy_vmap(info, in_dims, x, shape): x_bdim, _ = in_dims x = x.movedim(x_bdim, 0) x_shape = x.shape[0] batch_shape = (x_shape, *shape) result = numpy_view_copy(x, batch_shape) return result, 0 numpy_view_copy.register_vmap(numpy_view_copy_vmap) def sample_inputs_numpy_view_copy(opinfo, device, dtype, requires_grad, **kwargs): make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) result = make_arg(2, 3, 4, low=0.9, high=2) yield SampleInput(result, args=([2, 12],)) @torch.library.custom_op('_torch_testing::numpy_cat', mutates_args=()) def numpy_cat(xs: Sequence[Tensor], dim: int) -> Tensor: assert len(xs) > 0 assert all(x.device == xs[0].device for x in xs) assert all(x.dtype == xs[0].dtype for x in xs) np_xs = [to_numpy(x) for x in xs] np_out = np.concatenate(np_xs, axis=dim) return torch.tensor(np_out, device=xs[0].device) @numpy_cat.register_fake def _(xs, dim): assert len(xs) > 0 assert all(x.device == xs[0].device for x in xs) assert all(x.dtype == xs[0].dtype for x in xs) return torch.cat(xs, dim=dim) def numpy_cat_setup_context(ctx, inputs, output): xs, dim = inputs ctx.dim_sizes = [x.shape[dim] for x in xs] ctx.dim = dim def numpy_cat_backward(ctx, grad_out): dim_sizes = ctx.dim_sizes dim = ctx.dim splits = list(np.cumsum(dim_sizes)[:-1]) grad_xs = torch.ops._torch_testing.numpy_split_copy(grad_out, splits, dim) return grad_xs, None numpy_cat.register_autograd(numpy_cat_backward, setup_context=numpy_cat_setup_context) def numpy_cat_vmap(info, in_dims, x, dim): x_bdim, = in_dims result = numpy_cat(x, dim) return result, x_bdim numpy_cat.register_vmap(numpy_cat_vmap) def sample_inputs_numpy_cat(opinfo, device, dtype, requires_grad, **kwargs): make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) r0 = make_arg(2, 3, 4, low=0.9, high=2) r1 = make_arg(4, 3, 4, low=0.9, high=2) r2 = make_arg(5, 3, 4, low=0.9, high=2) yield SampleInput([r0, r1, r2], args=(0,)) @torch.library.custom_op('_torch_testing::numpy_split_copy', mutates_args=()) def numpy_split_copy(x: Tensor, splits: Sequence[int], dim: int) -> List[Tensor]: x_np = to_numpy(x) arrs = np.split(x_np, splits, axis=dim) return [torch.tensor(arr, device=x.device, dtype=x.dtype) for arr in arrs] @numpy_split_copy.register_fake def _(x, splits, dim): return [xi.clone() for xi in torch.tensor_split(x, splits, dim)] def numpy_split_copy_setup_context(ctx, inputs, output): _, _, dim = inputs ctx.dim = dim def numpy_split_copy_backward(ctx, grad_out): result = torch.ops._torch_testing.numpy_cat(grad_out, dim=ctx.dim) return result, None, None numpy_split_copy.register_autograd(numpy_split_copy_backward, setup_context=numpy_split_copy_setup_context) def numpy_split_copy_vmap(info, in_dims, x, splits, dim): x_bdim, _ , _ = in_dims x = x.movedim(x_bdim, 0) result = numpy_split_copy(x, splits, dim + 1) return result, 0 numpy_split_copy.register_vmap(numpy_split_copy_vmap) def sample_inputs_numpy_split_copy(opinfo, device, dtype, requires_grad, **kwargs): make_arg = functools.partial(make_tensor, device=device, dtype=dtype, requires_grad=requires_grad) x = make_arg(2, 9, low=0.9, high=2) yield SampleInput(x, args=([1, 3, 6], 1)) @torch.library.custom_op('_torch_testing::numpy_split_copy_with_int', mutates_args=()) def numpy_split_copy_with_int(x: Tensor, splits: Sequence[int], dim: int) -> tuple[List[Tensor], int]: x_np = to_numpy(x) arrs = np.split(x_np, splits, axis=dim) return [torch.tensor(arr, device=x.device, dtype=x.dtype) for arr in arrs], len(splits) @numpy_split_copy_with_int.register_fake def _(x, splits, dim): return [xi.clone() for xi in torch.tensor_split(x, splits, dim)], len(splits) def numpy_split_copy_with_int_setup_context(ctx, inputs, output): _, _, dim = inputs ctx.dim = dim def numpy_split_copy_with_int_backward(ctx, grad_out, _): return torch.ops._torch_testing.numpy_cat(grad_out, dim=ctx.dim), None, None numpy_split_copy_with_int.register_autograd( numpy_split_copy_with_int_backward, setup_context=numpy_split_copy_with_int_setup_context) def numpy_split_copy_with_int_vmap(info, in_dims, x, splits, dim): x_bdim, _ , _ = in_dims x = x.movedim(x_bdim, 0) result, len_split = numpy_split_copy_with_int(x, splits, dim + 1) return (result, len_split), ([0 for _ in range(len(result))], None) numpy_split_copy_with_int.register_vmap(numpy_split_copy_with_int_vmap) @torch.library.custom_op("_torch_testing::numpy_nms", mutates_args=()) def numpy_nms(boxes: Tensor, scores: Tensor, iou_threshold: Number) -> Tensor: # Adapted from Ross Girshick's fast-rcnn implementation at # https://github.com/rbgirshick/fast-rcnn/blob/master/lib/utils/nms.py assert boxes.device == scores.device device = boxes.device boxes = to_numpy(boxes) scores = to_numpy(scores) N = boxes.shape[0] assert boxes.shape == (N, 4) assert scores.shape == (N,) x1 = boxes[:, 0] y1 = boxes[:, 1] x2 = boxes[:, 2] y2 = boxes[:, 3] areas = (x2 - x1 + 1) * (y2 - y1 + 1) order = scores.argsort()[::-1] keep = [] while order.size > 0: i = order[0] keep.append(i) xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h ovr = inter / (areas[i] + areas[order[1:]] - inter) inds = np.where(ovr <= iou_threshold)[0] order = order[inds + 1] result = torch.tensor(np.stack(keep), device=device) # Needed for data-dependent condition :( assert result.size(0) >= 2 return result @numpy_nms.register_fake def _(boxes, scores, iou_threshold): assert boxes.device == scores.device N = boxes.shape[0] assert boxes.shape == (N, 4) assert scores.shape == (N,) ctx = torch._custom_op.impl.get_ctx() i0 = ctx.create_unbacked_symint() result = boxes.new_empty([i0], dtype=torch.int64) return result def numpy_nms_vmap(info, in_dims, boxes, scores, iou_threshold): raise NotImplementedError("Operator is data-dependent and cannot be vmapped.") numpy_nms.register_vmap(numpy_nms_vmap) def sample_inputs_numpy_nms(opinfo, device, dtype, requires_grad, **kwargs): make_arg = functools.partial(make_tensor, device=device, dtype=dtype) N = 64 xs = make_arg([N], low=0, high=28) dx = make_arg([N], low=0, high=4) ys = make_arg([N], low=0, high=28) dy = make_arg([N], low=0, high=4) boxes = torch.stack([xs, ys, xs + dx, ys + dy], dim=1).requires_grad_(requires_grad) scores = make_arg([N], low=0, high=1, requires_grad=requires_grad) iou_threshold = make_arg([], low=0, high=1).item() yield SampleInput(boxes, args=(scores, iou_threshold)) custom_op_db = [ OpInfo( 'NumpyCubeCustomOp', op=numpy_cube._opoverload, sample_inputs_func=sample_inputs_numpy_cube, dtypes=all_types_and(torch.bool, torch.half), supports_out=False, ), OpInfo( 'NumpyMulCustomOp', op=numpy_mul._opoverload, sample_inputs_func=sample_inputs_numpy_mul, dtypes=all_types_and(torch.bool, torch.half), supports_out=False, ), OpInfo( 'NumpyMulScalarCustomOp', op=numpy_mul_scalar._opoverload, sample_inputs_func=sample_inputs_numpy_mul_scalar, dtypes=all_types_and(torch.bool, torch.half), supports_out=False, ), OpInfo( 'NumpySortCustomOp', op=numpy_sort._opoverload, sample_inputs_func=sample_inputs_numpy_sort, dtypes=all_types_and(torch.bool, torch.half), supports_out=False, ), OpInfo( 'NumpyTakeCustomOp', op=numpy_take._opoverload, sample_inputs_func=sample_inputs_numpy_take, dtypes=all_types_and(torch.bool, torch.half), supports_out=False, ), OpInfo( 'NumpyNonzeroCustomOp', op=numpy_nonzero._opoverload, sample_inputs_func=sample_inputs_numpy_nonzero, dtypes=all_types_and(torch.bool, torch.half), supports_autograd=False, supports_out=False, ), OpInfo( 'NumpyNMSCustomOp', op=torch.ops._torch_testing.numpy_nms, sample_inputs_func=sample_inputs_numpy_nms, dtypes=all_types_and(torch.bool, torch.half), supports_autograd=False, supports_out=False, ), OpInfo( 'NumpyViewCopyCustomOp', op=torch.ops._torch_testing.numpy_view_copy, sample_inputs_func=sample_inputs_numpy_view_copy, dtypes=all_types_and(torch.bool, torch.half), supports_autograd=True, supports_out=False, ), OpInfo( 'NumpyCatCustomOp', op=torch.ops._torch_testing.numpy_cat, sample_inputs_func=sample_inputs_numpy_cat, dtypes=all_types_and(torch.bool, torch.half), supports_autograd=True, check_batched_grad=False, check_batched_gradgrad=False, supports_out=False, ), OpInfo( 'NumpySplitCopyCustomOp', op=torch.ops._torch_testing.numpy_split_copy, sample_inputs_func=sample_inputs_numpy_split_copy, dtypes=all_types_and(torch.bool, torch.half), supports_autograd=True, check_batched_grad=False, check_batched_gradgrad=False, supports_out=False, ), OpInfo( 'NumpySplitCopyWithIntCustomOp', op=torch.ops._torch_testing.numpy_split_copy_with_int, sample_inputs_func=sample_inputs_numpy_split_copy, dtypes=all_types_and(torch.bool, torch.half), gradcheck_wrapper=lambda op, *args, **kwargs: op(*args, **kwargs)[0], supports_autograd=True, check_batched_grad=False, check_batched_gradgrad=False, supports_out=False, ), ] # ============================================================== # some mechanical test cases # ============================================================== lib = torch.library.Library("_torch_testing", "FRAGMENT") # noqa: TOR901 lib.define("source0(Tensor x) -> Tensor") @torch.library.register_fake("_torch_testing::source0", lib=lib) def _(x): return x.clone() lib.define("source1(Tensor x) -> Tensor") def source1_fake(x): return x.clone() torch.library.register_fake("_torch_testing::source1", source1_fake, lib=lib) lib.define("source2(Tensor x) -> Tensor") @torch.library.register_fake("_torch_testing::source2", lib=lib) def _(x): return x.clone() lib.define("source3(Tensor x) -> Tensor") def source3_fake(x): return x.clone() torch.library.register_fake("_torch_testing::source3", source3_fake, lib=lib) @torch.library.custom_op("_torch_testing::source4", mutates_args=()) def source4(x: Tensor) -> Tensor: return x.clone() @source4.register_fake def _(x): return x.clone() @torch.library.custom_op("_torch_testing::source5", mutates_args=()) def source5(x: Tensor) -> Tensor: return x.clone() def source5_fake(x): return x.clone() source5.register_fake(source5_fake) ```
================================================================================================================================ SOURCE CODE FILE: custom_tensor.py LINES: 1 SIZE: 5.25 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\custom_tensor.py ENCODING: utf-8 ```py # mypy: ignore-errors from collections import namedtuple import torch import torch.utils._pytree as pytree from torch.utils._python_dispatch import return_and_correct_aliasing FancyNamedTuple = namedtuple("FancyNamedTuple", ["foo", "bar"]) # A simple tensor subclass that holds a tensor with custom metadata and custom method class ConstantExtraMetadataTensor(torch.Tensor): @staticmethod def __new__(cls, elem): shape = elem.shape kwargs = {} kwargs["strides"] = elem.stride() kwargs["storage_offset"] = elem.storage_offset() kwargs["device"] = elem.device kwargs["layout"] = elem.layout kwargs["requires_grad"] = elem.requires_grad kwargs["dtype"] = elem.dtype return torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs) def __init__(self, elem): self.elem = elem self.constant_attribute = 4 def __repr__(self): inner_repr = repr(self.elem) return f"CustomTensor({inner_repr})" def get_complicated_metadata(self): return FancyNamedTuple(self.constant_attribute, self.constant_attribute) def __tensor_flatten__(self): return ["elem"], self.constant_attribute def add_constant(self, a): self.constant_attribute += a @staticmethod def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride): assert meta is not None elem = inner_tensors["elem"] out = ConstantExtraMetadataTensor(elem) out.constant_attribute = meta return out @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): if kwargs is None: kwargs = {} args_inner = pytree.tree_map_only( ConstantExtraMetadataTensor, lambda x: x.elem, args ) kwargs_inner = pytree.tree_map_only( ConstantExtraMetadataTensor, lambda x: x.elem, kwargs ) out_inner = func(*args_inner, **kwargs_inner) out_inner_flat, spec = pytree.tree_flatten(out_inner) # for aten ops that return non-tensors, just assume that # our cust inner tensors return the same value out_flat = [ ConstantExtraMetadataTensor(o_inner) if isinstance(o_inner, torch.Tensor) else o_inner for o_inner in out_inner_flat ] out = pytree.tree_unflatten(out_flat, spec) return return_and_correct_aliasing(func, args, kwargs, out) # A simple tensor subclass that always returns plain tensor during __torch_dispatch__ # It is similar to TwoTensor and is used to simulate torchao quantized tensors class CustomTensorPlainOut(torch.Tensor): @staticmethod def __new__(cls, elem1, elem2): shape = elem1.shape kwargs = {} kwargs["strides"] = elem1.stride() kwargs["storage_offset"] = elem1.storage_offset() kwargs["device"] = elem1.device kwargs["layout"] = elem1.layout kwargs["requires_grad"] = elem1.requires_grad kwargs["dtype"] = elem1.dtype return torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs) def __init__(self, elem1, elem2): self.elem1 = elem1 self.elem2 = elem2 def get_elem(self): return self.elem1 def __repr__(self): inner_repr_1 = repr(self.elem1) inner_repr_2 = repr(self.elem2) return f"CustomTensorPlainOut({inner_repr_1}, {inner_repr_2})" def __tensor_flatten__(self): return ["elem1", "elem2"], None @staticmethod def __tensor_unflatten__(inner_tensors, meta, outer_size, outer_stride): elem1 = inner_tensors["elem1"] elem2 = inner_tensors["elem2"] out = CustomTensorPlainOut(elem1, elem2) return out @classmethod def __torch_dispatch__(cls, func, types, args, kwargs): # Don't use this tensor with view ops if kwargs is None: kwargs = {} args_inner_1 = pytree.tree_map_only( CustomTensorPlainOut, lambda x: x.elem1, args ) kwargs_inner_1 = pytree.tree_map_only( CustomTensorPlainOut, lambda x: x.elem1, kwargs ) args_inner_2 = pytree.tree_map_only( CustomTensorPlainOut, lambda x: x.elem2, args ) kwargs_inner_2 = pytree.tree_map_only( CustomTensorPlainOut, lambda x: x.elem2, kwargs ) out_inner_1 = func(*args_inner_1, **kwargs_inner_1) out_inner_2 = func(*args_inner_2, **kwargs_inner_2) out_inner_flat_1, spec = pytree.tree_flatten(out_inner_1) out_inner_flat_2, spec = pytree.tree_flatten(out_inner_2) if func.is_view: new_out = pytree.tree_unflatten( ( CustomTensorPlainOut(tensor1, tensor2) for tensor1, tensor2 in zip(out_inner_flat_1, out_inner_flat_2) ), spec, ) return return_and_correct_aliasing(func, args, kwargs, new_out) out_new = ( out_inner_flat_1[ix] + out_inner_flat_2[ix] for ix in range(len(out_inner_flat_1)) ) return pytree.tree_unflatten(out_new, spec) ```
================================================================================================================================ SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.02 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\data\__init__.py ENCODING: utf-8 ```py # mypy: ignore-errors ```
================================================================================================================================ SOURCE CODE FILE: network1.py LINES: 1 SIZE: 0.17 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\data\network1.py ENCODING: utf-8 ```py # mypy: ignore-errors import torch.nn as nn class Net(nn.Module): def __init__(self) -> None: super().__init__() self.linear = nn.Linear(10, 20) ```
================================================================================================================================ SOURCE CODE FILE: network2.py LINES: 1 SIZE: 0.21 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\data\network2.py ENCODING: utf-8 ```py # mypy: ignore-errors import torch.nn as nn class Net(nn.Module): def __init__(self) -> None: super().__init__() self.linear = nn.Linear(10, 20) self.relu = nn.ReLU() ```
============================================================================================================================= SOURCE CODE FILE: dist_utils.py LINES: 1 SIZE: 7.28 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\dist_utils.py ENCODING: utf-8 ```py # mypy: ignore-errors import re import sys import time from functools import partial, wraps import torch.distributed as dist import torch.distributed.rpc as rpc from torch.distributed.rpc import _rref_context_get_debug_info from torch.testing._internal.common_utils import FILE_SCHEMA, TEST_WITH_TSAN if not dist.is_available(): print("c10d not available, skipping tests", file=sys.stderr) sys.exit(0) INIT_METHOD_TEMPLATE = FILE_SCHEMA + "{file_name}" def dist_init( old_test_method=None, setup_rpc: bool = True, clean_shutdown: bool = True, faulty_messages=None, messages_to_delay=None, ): """ We use this decorator for setting up and tearing down state since MultiProcessTestCase runs each `test*` method in a separate process and each process just runs the `test*` method without actually calling 'setUp' and 'tearDown' methods of unittest. Note: pass the string representation of MessageTypes that should be used with the faulty agent's send function. By default, all retriable messages ("RREF_FORK_REQUEST", "RREF_CHILD_ACCEPT", "RREF_USER_DELETE", "CLEANUP_AUTOGRAD_CONTEXT_REQ") will use the faulty send (this default is set from faulty_rpc_agent_test_fixture.py). """ # If we use dist_init without arguments (ex: @dist_init), old_test_method is # appropriately set and we return the wrapper appropriately. On the other # hand if dist_init has arguments (ex: @dist_init(clean_shutdown=False)), # old_test_method is None and we return a functools.partial which is the real # decorator that is used and as a result we recursively call dist_init with # old_test_method and the rest of the arguments appropriately set. if old_test_method is None: return partial( dist_init, setup_rpc=setup_rpc, clean_shutdown=clean_shutdown, faulty_messages=faulty_messages, messages_to_delay=messages_to_delay, ) @wraps(old_test_method) def new_test_method(self, *arg, **kwargs): # Setting _ignore_rref_leak to make sure OwnerRRefs are properly deleted # in tests. import torch.distributed.rpc.api as api api._ignore_rref_leak = False self.worker_id = self.rank self.setup_fault_injection(faulty_messages, messages_to_delay) rpc_backend_options = self.rpc_backend_options if setup_rpc: if TEST_WITH_TSAN: # TSAN runs much slower. rpc_backend_options.rpc_timeout = rpc.constants.DEFAULT_RPC_TIMEOUT_SEC * 5 rpc.constants.DEFAULT_SHUTDOWN_TIMEOUT = 60 rpc.init_rpc( name=f"worker{self.rank:d}", backend=self.rpc_backend, rank=self.rank, world_size=self.world_size, rpc_backend_options=rpc_backend_options, ) return_value = old_test_method(self, *arg, **kwargs) if setup_rpc: rpc.shutdown(graceful=clean_shutdown) return return_value return new_test_method def noop() -> None: pass def wait_until_node_failure(rank: int, expected_error_regex: str = ".*") -> str: """ Loops until an RPC to the given rank fails. This is used to indicate that the node has failed in unit tests. Args: rank (int): Rank of the node expected to fail expected_error_regex (optional, str): Regex of exception message expected. Useful to ensure a specific failure occurs, not just any. """ while True: try: rpc.rpc_sync(f"worker{rank}", noop, args=()) time.sleep(0.1) except Exception as e: if re.search(pattern=expected_error_regex, string=str(e)): return str(e) def wait_until_pending_futures_and_users_flushed(timeout: int = 20) -> None: """ The RRef protocol holds forkIds of rrefs in a map until those forks are confirmed by the owner. The message confirming the fork may arrive after our tests check whether this map is empty, which leads to failures and flaky tests. to_here also does not guarantee that we have finished processind the owner's confirmation message for the RRef. This function loops until the map is empty, which means the messages have been received as processed. Call this function before asserting the map returned by _get_debug_info is empty. """ start = time.time() while True: debug_info = _rref_context_get_debug_info() num_pending_futures = int(debug_info["num_pending_futures"]) num_pending_users = int(debug_info["num_pending_users"]) if num_pending_futures == 0 and num_pending_users == 0: break time.sleep(0.1) if time.time() - start > timeout: raise ValueError( f"Timed out waiting to flush pending futures and users, " f"had {num_pending_futures} pending futures and {num_pending_users} pending users" ) def get_num_owners_and_forks() -> tuple[str, str]: """ Retrieves number of OwnerRRefs and forks on this node from _rref_context_get_debug_info. """ rref_dbg_info = _rref_context_get_debug_info() num_owners = rref_dbg_info["num_owner_rrefs"] num_forks = rref_dbg_info["num_forks"] return num_owners, num_forks def wait_until_owners_and_forks_on_rank( num_owners: int, num_forks: int, rank: int, timeout: int = 20 ) -> None: """ Waits until timeout for num_forks and num_owners to exist on the rank. Used to ensure proper deletion of RRefs in tests. """ start = time.time() while True: num_owners_on_rank, num_forks_on_rank = rpc.rpc_sync( worker_name(rank), get_num_owners_and_forks, args=(), timeout=5 ) num_owners_on_rank = int(num_owners_on_rank) num_forks_on_rank = int(num_forks_on_rank) if num_owners_on_rank == num_owners and num_forks_on_rank == num_forks: return time.sleep(1) if time.time() - start > timeout: raise ValueError( f"Timed out waiting {timeout} sec for {num_owners} owners and {num_forks} forks on rank," f" had {num_owners_on_rank} owners and {num_forks_on_rank} forks" ) def initialize_pg(init_method, rank: int, world_size: int) -> None: # This is for tests using `dist.barrier`. if not dist.is_initialized(): dist.init_process_group( backend="gloo", init_method=init_method, rank=rank, world_size=world_size, ) def worker_name(rank: int) -> str: return f"worker{rank}" def get_function_event(function_events, partial_event_name): """ Returns the first event that matches partial_event_name in the provided function_events. These function_events should be the output of torch.autograd.profiler.function_events(). Args: function_events: function_events returned by the profiler. event_name (str): partial key that the event was profiled with. """ event = [event for event in function_events if partial_event_name in event.name][0] # noqa: RUF015 return event ```
======================================================================================================================================= SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.00 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\__init__.py ENCODING: utf-8 ```py ```
============================================================================================================================================== SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.03 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\_shard\__init__.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs ```
============================================================================================================================================================= SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 3.20 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\_shard\sharded_tensor\__init__.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import sys from functools import wraps, partial import torch import torch.distributed as dist from torch.distributed import rpc from torch.testing._internal.common_distributed import ( MultiProcessTestCase, TEST_SKIPS, tp_transports, ) TEST_GPU_NUM = 4 class ShardedTensorTestBase(MultiProcessTestCase): @property def world_size(self): return TEST_GPU_NUM def init_pg(self, backend="nccl"): if backend not in ["nccl", "gloo", "mpi"]: raise RuntimeError(f"Backend {backend} not supported!") dist.init_process_group( backend=backend, world_size=self.world_size, rank=self.rank, init_method=f"file://{self.file_name}", ) # set device for nccl pg for collectives if backend == "nccl": torch.cuda.set_device(self.rank) def init_rpc(self): rpc_backend_options = rpc.TensorPipeRpcBackendOptions(_transports=tp_transports()) rpc_backend_options.init_method = f"file://{self.file_name}" for rank in range(self.world_size): rpc_backend_options.set_device_map( f"worker{rank}", {rank: self.rank, self.rank: rank} ) rpc.init_rpc( name=f"worker{self.rank:d}", rank=self.rank, world_size=self.world_size, rpc_backend_options=rpc_backend_options, ) def init_comms(self, init_rpc=True, backend="nccl"): if init_rpc: self.init_rpc() self.init_pg(backend=backend) def destroy_comms(self, destroy_rpc=True): # Wait for all ranks to reach here before starting shutdown. dist.barrier() if destroy_rpc: rpc.shutdown() dist.destroy_process_group() def setUp(self) -> None: super().setUp() self._spawn_processes() def assert_sharded_tensor_equal(self, st1, st2): st1_local_shards = st1.local_shards() st2_local_shards = st2.local_shards() self.assertEqual(len(st1_local_shards), len(st2_local_shards)) for i, st1_local_shard in enumerate(st1_local_shards): self.assertEqual(st1_local_shard.tensor, st2_local_shards[i].tensor) self.assertEqual(st1_local_shard.metadata, st2_local_shards[i].metadata) self.assertEqual(st1.metadata(), st2.metadata()) self.assertEqual(st1.sharding_spec(), st2.sharding_spec()) self.assertEqual(len(st1.remote_shards()), len(st2.remote_shards())) # wrapper to initialize comms (processgroup + rpc) def with_comms(func=None, init_rpc=True, backend="nccl"): if func is None: return partial( with_comms, init_rpc=init_rpc, backend=backend, ) @wraps(func) def wrapper(self, *args, **kwargs): if backend == "nccl" and torch.cuda.device_count() < self.world_size: sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code) self.init_comms(init_rpc=init_rpc, backend=backend) func(self, *args, **kwargs) self.destroy_comms(destroy_rpc=init_rpc) return wrapper ```
===================================================================================================================================================================== SOURCE CODE FILE: _test_ops_common.py LINES: 6 SIZE: 4.05 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\_shard\sharded_tensor\_test_ops_common.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import builtins import torch from torch.distributed._shard.sharding_spec import ( ChunkShardingSpec, EnumerableShardingSpec, ShardMetadata, ) from torch.distributed._shard.sharding_spec._internals import ( get_chunked_dim_size, get_split_size, ) def generate_chunk_sharding_specs_for_test(sharding_dim): return [ ChunkShardingSpec( dim=sharding_dim, placements=[ "rank:0/cuda:0", "rank:1/cuda:1", "rank:2/cuda:2", "rank:3/cuda:3", ], ), # Test different ordering. (Case 1) ChunkShardingSpec( dim=sharding_dim, placements=[ "rank:2/cuda:2", "rank:3/cuda:3", "rank:0/cuda:0", "rank:1/cuda:1", ], ), # Test different ordering. (Case 2) ChunkShardingSpec( dim=sharding_dim, placements=[ "rank:3/cuda:3", "rank:0/cuda:0", "rank:1/cuda:1", "rank:2/cuda:2", ], ), ] def generate_enumerable_sharding_specs_for_test(): return [ EnumerableShardingSpec( [ ShardMetadata( shard_offsets=[0, 0], shard_sizes=[5, 5], placement="rank:0/cuda:0", ), ShardMetadata( shard_offsets=[5, 0], shard_sizes=[5, 5], placement="rank:1/cuda:1", ), ShardMetadata( shard_offsets=[0, 5], shard_sizes=[5, 5], placement="rank:2/cuda:2", ), ShardMetadata( shard_offsets=[5, 5], shard_sizes=[5, 5], placement="rank:3/cuda:3", ), ] ) ] def generate_local_weight_sharding_params_for_test( local_weight, sharded_dim, gpu_num, spec, rank ): """ Shard the local weight based the given spec, so we can compare against the one from sharded tensor. Args: local_weight: weight matrix to be sharded. sharded_dim: The dimension which we shard on. gpu_num: number of ranks. spec: sharding spec. rank: # of cuda process. Returns: start_pos: start position of sharded weight on the given rank. chunk_size: chunk size of sharded weight on the given rank. """ sharding_dim_size = local_weight.size(sharded_dim) split_size = get_split_size(sharding_dim_size, gpu_num) current_offsets = 0 start_pos = current_offsets for idx, placement in enumerate(spec.placements): chunk_size = get_chunked_dim_size(sharding_dim_size, split_size, idx) if rank == placement.rank(): start_pos = current_offsets break current_offsets += chunk_size return start_pos, chunk_size def clone_module_parameter(module, param_name): """ Clone a parameter from a given existing module. Args: module (:class:`torch.nn.Module`): Module whose parameter needs to be cloned. param_name (str): Name of the parameter of ``module`` that needs to be cloned. Returns: cloned tensor as :class:`torch.nn.Parameter`. """ tensor = getattr(module, param_name) return torch.nn.Parameter(tensor.detach().clone()) def gen_binary_op_func(python_op, inplace=False): src_lines = ['def f(lhs, rhs):'] if "torch" in python_op: src_lines.append(f' return {python_op}(lhs, rhs)\n') elif inplace: src_lines.append(f' lhs {python_op}= rhs\n return lhs\n') else: src_lines.append(f' return lhs {python_op} rhs\n') code_str = '\n'.join(src_lines) g = {'torch': torch} builtins.exec(code_str, g) return g["f"] ```
==================================================================================================================================================================== SOURCE CODE FILE: _test_st_common.py LINES: 1 SIZE: 1.73 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\_shard\sharded_tensor\_test_st_common.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import copy import random import torch from torch.distributed._shard import sharded_tensor from torch.distributed._shard.sharding_spec import ( ChunkShardingSpec, ) PLACEMENTS = [ "rank:0/cuda:0", "rank:1/cuda:1", "rank:2/cuda:2", "rank:3/cuda:3", ] DEFAULT_GPU_NUM = 4 def _chunk_sharding_specs_list_for_test(sharding_dims, seed=0): spec_list = [] for i in range(len(sharding_dims)): random.Random(seed + i).shuffle(PLACEMENTS) spec_list.append( ChunkShardingSpec( dim=sharding_dims[i], placements=copy.deepcopy(PLACEMENTS), ) ) return spec_list class MyShardedModel2(torch.nn.Module): def __init__( self, spec=None, group=None, init_rrefs=True ) -> None: super().__init__() if spec is not None: self.sharded_tensor2 = sharded_tensor.rand( spec, 10, 20, process_group=group, init_rrefs=init_rrefs ) else: self.sharded_tensor2 = None self.random_tensor2 = torch.nn.Parameter(torch.rand(2, 2)) class MyShardedModel1(torch.nn.Module): def __init__( self, spec=None, group=None, init_rrefs=True ) -> None: super().__init__() if spec is not None: self.sharded_tensor1 = sharded_tensor.rand( spec, 10, 20, process_group=group, init_rrefs=init_rrefs ) else: self.sharded_tensor1 = None self.random_tensor1 = torch.nn.Parameter(torch.rand(2, 2)) self.submodule = MyShardedModel2(spec, group, init_rrefs) ```
================================================================================================================================================= SOURCE CODE FILE: test_common.py LINES: 1 SIZE: 1.23 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\_shard\test_common.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import torch import torch.nn as nn from torch.distributed._shard.sharded_tensor import ShardedTensor class SimpleMegatronLM(nn.Module): def __init__(self, linear_size, rank=None, dtype=torch.float32): super().__init__() self.fc1 = nn.Linear(*linear_size[0], dtype=dtype) self.gelu = nn.GELU() self.fc2 = nn.Linear(*linear_size[1], dtype=dtype) if rank is not None: self.fc1.cuda(rank) self.fc2.cuda(rank) def forward(self, inp): return self.fc2(self.gelu(self.fc1(inp))) def get_weights(self): if isinstance(self.fc1.weight, ShardedTensor): weight1 = self.fc1.weight.local_tensor() else: weight1 = self.fc1.weight if isinstance(self.fc2.weight, ShardedTensor): weight2 = self.fc2.weight.local_tensor() else: weight2 = self.fc2.weight return (weight1, weight2) def get_biases(self): return (self.fc1.bias, self.fc2.bias) def get_weight_grads(self): return (self.fc1.weight.grad, self.fc2.weight.grad) def get_bias_grads(self): return (self.fc1.bias.grad, self.fc2.bias.grad) ```
=============================================================================================================================================== SOURCE CODE FILE: __init__.py LINES: 1 SIZE: 0.00 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\_tensor\__init__.py ENCODING: utf-8 ```py ```
===================================================================================================================================================== SOURCE CODE FILE: common_dtensor.py LINES: 1 SIZE: 21.14 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\_tensor\common_dtensor.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs # Copyright (c) Meta Platforms, Inc. and affiliates import itertools import sys from dataclasses import dataclass from functools import partial, wraps from typing import ( Any, Callable, cast, TypeVar, Union, ) from collections.abc import Iterator, Sequence import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch.distributed._tensor import DeviceMesh, distribute_tensor, Replicate, Shard from torch.distributed._tensor.placement_types import Placement from torch.distributed.tensor.parallel import ( ColwiseParallel, parallelize_module, PrepareModuleInput, RowwiseParallel, SequenceParallel, ) from torch.testing._internal.common_utils import ( TEST_HPU, TEST_CUDA, TEST_XPU ) from torch.testing._internal.common_distributed import ( MultiProcessTestCase, MultiThreadedTestCase, skip_if_lt_x_gpu, run_subtests, TEST_SKIPS, ) from torch.utils._pytree import tree_flatten, tree_unflatten, TreeSpec from torch._utils import _get_device_module if TEST_CUDA: DEVICE_TYPE = "cuda" PG_BACKEND = "nccl" DEVICE_COUNT = _get_device_module("cuda").device_count() elif TEST_HPU: DEVICE_TYPE = "hpu" PG_BACKEND = "hccl" DEVICE_COUNT = _get_device_module("hpu").device_count() elif TEST_XPU: DEVICE_TYPE = "xpu" PG_BACKEND = "xccl" DEVICE_COUNT = _get_device_module("xpu").device_count() else: DEVICE_TYPE = "cpu" PG_BACKEND = "gloo" NUM_DEVICES = 4 # We use this as a proxy for "multiple GPUs exist" if (TEST_CUDA or TEST_XPU) and DEVICE_COUNT > 1: # when we actually have multiple GPUs, relax the requirement to smaller counts. NUM_DEVICES = min(NUM_DEVICES, DEVICE_COUNT) T = TypeVar("T") # simple RMSNorm layer for testing class RMSNormPython(torch.nn.Module): def __init__(self, dim: int, eps: float = 1e-6): super().__init__() self.eps = eps self.weight = torch.nn.Parameter(torch.ones(dim)) def _norm(self, x): return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) def forward(self, x): output = self._norm(x) return output * self.weight class MLPModule(nn.Module): def __init__(self, device, bias: bool = True): super().__init__() torch.manual_seed(5) self.net1 = nn.Linear(10, 16, bias=bias, device=device) self.relu = nn.ReLU() self.net2 = nn.Linear(16, 10, bias=bias, device=device) def forward(self, x): return self.net2(self.relu(self.net1(x))) def reset_parameters(self): self.net1.reset_parameters() self.net2.reset_parameters() class MLPStacked(nn.Module): def __init__(self, device, n_layers: int = 2): super().__init__() self.layers = nn.ModuleList([MLPModule(device) for i in range(n_layers)]) def forward(self, x): for layer in self.layers: x = layer(x) return x @dataclass class ModelArgs: n_layers: int = 2 vocab_size: int = 8 max_seq_len: int = 16 dim: int = 16 n_heads: int = 4 dropout_p: float = 0.1 use_attn_mask: bool = True weight_tying: bool = True checkpoint_activations: bool = False class Attention(nn.Module): def __init__(self, args: ModelArgs): super().__init__() assert args.dim % args.n_heads == 0 self.head_dim = args.dim // args.n_heads self.n_heads = args.n_heads self.dropout_p = args.dropout_p self.resid_dropout = nn.Dropout(args.dropout_p) self.use_attn_mask = args.use_attn_mask self.wq = nn.Linear(args.dim, args.dim, bias=False) self.wk = nn.Linear(args.dim, args.dim, bias=False) self.wv = nn.Linear(args.dim, args.dim, bias=False) self.wo = nn.Linear(args.dim, args.dim, bias=False) def forward(self, x): bsz, seq_len, _ = x.size() queries, keys, values = self.wq(x), self.wk(x), self.wv(x) queries = queries.view(bsz, seq_len, self.n_heads, self.head_dim) keys = keys.view(bsz, seq_len, self.n_heads, self.head_dim) values = values.view(bsz, seq_len, self.n_heads, self.head_dim) queries = queries.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim) keys = keys.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim) values = values.transpose(1, 2) # (bsz, n_heads, seq_len, head_dim) output = F.scaled_dot_product_attention( queries, keys, values, None, self.dropout_p if self.training else 0, self.use_attn_mask, ) output = output.transpose(1, 2).contiguous().view(bsz, seq_len, -1) return self.resid_dropout(self.wo(output)) class FeedForward(nn.Module): def __init__(self, dim, hidden_dim, dropout_p): super().__init__() self.w1 = nn.Linear(dim, hidden_dim) self.gelu = nn.GELU() self.w2 = nn.Linear(hidden_dim, dim) self.resid_dropout = nn.Dropout(dropout_p) def forward(self, x): return self.resid_dropout(self.w2(self.gelu(self.w1(x)))) class TransformerBlock(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.attention_norm = nn.LayerNorm(args.dim) self.attention = Attention(args) self.ffn_norm = nn.LayerNorm(args.dim) self.feed_forward = FeedForward( args.dim, hidden_dim=4 * args.dim, dropout_p=args.dropout_p ) def forward(self, x): h = x + self.attention(self.attention_norm(x)) out = h + self.feed_forward(self.ffn_norm(h)) return out # A toy transformer model, partly inspired by the nanoGPT model: # https://github.com/karpathy/nanoGPT. class Transformer(nn.Module): def __init__(self, args: ModelArgs): super().__init__() assert args.vocab_size is not None assert args.max_seq_len is not None self.model_args = args self.max_seq_len = args.max_seq_len self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim) self.pos_embeddings = nn.Embedding(args.max_seq_len, args.dim) self.dropout = nn.Dropout(args.dropout_p) self.layers = nn.ModuleList() for _ in range(args.n_layers): self.layers.append(TransformerBlock(args)) self.norm = nn.LayerNorm(args.dim) self.output = nn.Linear(args.dim, args.vocab_size, bias=False) if args.weight_tying: self.output.weight = self.tok_embeddings.weight self.checkpoint_activations = args.checkpoint_activations def forward(self, tokens): _bsz, seq_len = tokens.size() assert seq_len <= self.max_seq_len h = self.tok_embeddings(tokens) pos = torch.arange(0, seq_len, device=tokens.device) p = self.pos_embeddings(pos) # positional embeddings of shape (seq_len, dim) h = h + p h = self.dropout(h) for layer in self.layers: if self.checkpoint_activations: h = torch.utils.checkpoint.checkpoint(layer, h, use_reentrant=False) else: h = layer(h) h = self.norm(h) output = self.output(h).float() return output @staticmethod def parallelize( module: "Transformer", device_mesh: DeviceMesh, use_seq_parallel: bool, local_output_for_attn: bool = False ) -> nn.Module: assert isinstance(module, Transformer), f"Requires Transformer but got {module}" # Parallelize the root submodules. if use_seq_parallel: root_plan = { "tok_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Shard(1)), "pos_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Shard(0)), "norm": SequenceParallel(), } else: root_plan = { "tok_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Replicate()), "pos_embeddings": RowwiseParallel(input_layouts=Replicate(), output_layouts=Replicate()), } module_tp = parallelize_module(module, device_mesh, root_plan) # Parallelize the attention and feed forward submodules. for layer in module_tp.layers: layer_parallelize_plan = {} if use_seq_parallel: layer_parallelize_plan["attention"] = PrepareModuleInput( input_layouts=Shard(1), desired_input_layouts=Replicate(), ) # shard the RMSNorms layer_parallelize_plan["attention_norm"] = SequenceParallel() layer_parallelize_plan["ffn_norm"] = SequenceParallel() layer_parallelize_plan["attention.wq"] = ColwiseParallel(use_local_output=local_output_for_attn) layer_parallelize_plan["attention.wk"] = ColwiseParallel(use_local_output=local_output_for_attn) layer_parallelize_plan["attention.wv"] = ColwiseParallel(use_local_output=local_output_for_attn) layer_parallelize_plan["attention.wo"] = ( RowwiseParallel(output_layouts=Shard(1)) if use_seq_parallel else RowwiseParallel() ) layer_parallelize_plan["feed_forward.w1"] = ( ColwiseParallel(input_layouts=Shard(1)) if use_seq_parallel else ColwiseParallel() ) layer_parallelize_plan["feed_forward.w2"] = ( RowwiseParallel(output_layouts=Shard(1)) if use_seq_parallel else RowwiseParallel() ) parallelize_module(layer, device_mesh, layer_parallelize_plan) # Parallelize the output submodule. If weight tying is enabled, we need to # make sure output.weight is sharded consistently as tok_embeddings.weight, # at the cost of the all_reduce operation using RowwiseParallel. output_parallelize_plan = ( ColwiseParallel( input_layouts=Shard(1), output_layouts=Replicate(), ) if use_seq_parallel else ColwiseParallel(output_layouts=Replicate()) ) parallelize_module(module_tp.output, device_mesh, output_parallelize_plan) if local_output_for_attn: for layer in module_tp.layers: layer.attention.n_heads = module_tp.model_args.n_heads // device_mesh.size() # Manually set output.weight so that parameters and gradients are shared. if module_tp.model_args.weight_tying: module_tp.output.weight = module_tp.tok_embeddings.weight return module_tp def skip_unless_torch_gpu(method: T) -> T: """ Test decorator which skips the test unless there's a GPU available to torch. >>> # xdoctest: +SKIP >>> @skip_unless_torch_gpu >>> def test_some_method(self) -> None: >>> ... """ # The builtin @skip_if_no_gpu relies on os.environ['WORLD_SIZE'] being set. return cast(T, skip_if_lt_x_gpu(NUM_DEVICES)(method)) class DTensorTestBase(MultiProcessTestCase): @property def world_size(self) -> int: return NUM_DEVICES @property def backend(self) -> str: backend = dist.get_default_backend_for_device(DEVICE_TYPE) return backend def build_device_mesh(self) -> DeviceMesh: return DeviceMesh(self.device_type, list(range(self.world_size))) def init_pg(self, eager_init) -> None: if "nccl" in self.backend and torch.cuda.device_count() < self.world_size: sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code) if self.backend not in ["nccl", "gloo", "mpi", "cpu:gloo,cuda:nccl", "hccl", "xccl"]: raise RuntimeError(f"Backend {self.backend} not supported!") device_id = None if "nccl" in self.backend or "xccl" in self.backend: # set device for nccl pg for collectives torch.accelerator.set_device_index(self.rank) # we only need to set device_id for nccl backend with eager init device_id = torch.device(f"{self.device_type}:{self.rank}") if eager_init else None # For nccl backend, bind the device to the process if device_id is not None # so the nccl communicator is immediately formed and we can use `ncclCommSplit` # for form subgroup to avoid unnecesssary overhead. dist.init_process_group( backend=self.backend, world_size=self.world_size, rank=self.rank, # pyre-ignore[16] init_method=f"file://{self.file_name}", # pyre-ignore[16] device_id=device_id, ) def destroy_pg(self) -> None: # Wait for all ranks to reach here before starting shutdown. # FIXME dist.barrier deadlocks with multiple threads and NCCL: https://github.com/pytorch/pytorch/issues/95895 # dist.all_reduce(torch.zeros((1,), device="cuda" if TEST_CUDA else "cpu")) # FIXME can't use the above all_reduce as it causes hangs on bionic and focal. It hangs: # test_dtensor.py -- DTensorMeshTest.test_dtensor_device_mesh_device_conversion dist.barrier() dist.destroy_process_group() def setUp(self) -> None: super().setUp() self._spawn_processes() # pyre-ignore[2]: def _test_op(self, mesh: DeviceMesh, op_call, *args, **kwargs) -> None: out = op_call(*args, **kwargs) dtc = DTensorConverter(mesh, args, kwargs) for d_args, d_kwargs in dtc: # pyre can't find assertTrue anymore? self.assertEqual(dtc.successful(), True) d_out = op_call(*d_args, **d_kwargs) self.assertEqual(d_out.full_tensor(), out) def run_subtests(self, *args, **kwargs): return run_subtests(self, *args, **kwargs) TestFunc = Callable[[...], object] # wrapper to initialize comms (processgroup) def with_comms(eager_init: Union[TestFunc, bool] = False) -> TestFunc: def decorator(func, eager_init: bool = False): @wraps(func) # pyre-ignore[6] def wrapper( self, *args: tuple[object], **kwargs: dict[str, Any] # type: ignore[misc] ) -> None: # if enough GPU we can use GPU, otherwise we fallback to CPU if not (TEST_CUDA or TEST_XPU) or torch.accelerator.device_count() < self.world_size: self.device_type = "cpu" else: self.device_type = DEVICE_TYPE self.init_pg(eager_init) try: func(self, *args, **kwargs) # type: ignore[misc] except Exception as e: dist.destroy_process_group() raise e self.destroy_pg() return wrapper return decorator(func=eager_init) if callable(eager_init) else partial(decorator, eager_init=eager_init) class DTensorOpTestBase(MultiThreadedTestCase): @property def world_size(self) -> int: return NUM_DEVICES @property def device_type(self) -> str: return DEVICE_TYPE def build_device_mesh(self): return DeviceMesh(self.device_type, list(range(self.world_size))) def setUp(self) -> None: super().setUp() self._spawn_threads() # This is a class for converting args/kwargs of an op into distributed args/kwargs class DTensorConverter: def __init__( self, mesh: DeviceMesh, args: tuple[object, ...], kwargs: dict[str, object], ) -> None: self.hit = 0 self.miss = 0 self.mesh = mesh self.args = args self.kwargs = kwargs flatten_args, flatten_args_spec = tree_flatten(args) flatten_kwargs, flatten_kwargs_spec = tree_flatten(kwargs) self.flatten_args: list[object] = flatten_args self.flatten_args_spec: TreeSpec = flatten_args_spec self.flatten_kwargs: list[object] = flatten_kwargs self.flatten_kwargs_spec: TreeSpec = flatten_kwargs_spec choices_for_args = [self.gen_sharding_choices_for_arg(arg) for arg in self.flatten_args if isinstance(arg, torch.Tensor)] choices_for_args.extend( self.gen_sharding_choices_for_arg(arg) for arg in self.flatten_kwargs if isinstance(arg, torch.Tensor) ) self.sharding_combs: Iterator[Sequence[Placement]] = iter( itertools.product(*choices_for_args) ) def successful(self) -> bool: return self.hit > 0 and self.miss == 0 def is_supported_tensor(self, t: torch.Tensor) -> bool: # TODO: dist tensor need to support quantized and sparse # tensors, quantized tensor might be relatively easy, but # sparse tensor have special layouts that we need to possibly # deal with, until we are clear about them, we don't officially # support them. return not any( [ t.is_sparse_csr, t.is_sparse, t.is_mkldnn, t.is_quantized, t.is_nested, torch._is_functional_tensor(t), t.is_neg(), t.is_conj(), t.device.type in ("lazy", "meta"), # We need a way to test if a tensor is batched but there # is no official APi to do it # torch._C._is_batched(t), ] ) def gen_sharding_choices_for_arg(self, arg: torch.Tensor) -> Sequence[Placement]: mesh_size = self.mesh.size() sharding_choices: list[Placement] = [Replicate()] # c10d collective does not support bool tensor # for bool tensor we treat it as replicated if arg.dtype != torch.bool: # only generating choices with: replicate, or sharding # evenly on a dimension that could be sharded sharding_choices = sharding_choices + [ Shard(i) for i, s in enumerate(arg.shape) if s > 1 and s % mesh_size == 0 ] # TODO: add multi mesh choices # all_choices = itertools.product( # *(self.mesh.ndim * [sharding_choices]) # ) return sharding_choices def __iter__(self) -> "DTensorConverter": return self def __next__(self) -> tuple[tuple[object, ...], dict[str, object]]: try: next_sharding_choices = next(self.sharding_combs) idx = 0 new_args: list[object] = [] for arg in self.flatten_args: if isinstance(arg, torch.Tensor): new_args.append( self.to_dist_tensor( arg, self.mesh, [next_sharding_choices[idx]] ) ) idx += 1 else: new_args.append(arg) new_kwargs: list[object] = [] for arg in self.flatten_kwargs: if isinstance(arg, torch.Tensor): new_kwargs.append( self.to_dist_tensor( arg, self.mesh, [next_sharding_choices[idx]] ) ) idx += 1 else: new_kwargs.append(arg) return ( tree_unflatten(new_args, self.flatten_args_spec), tree_unflatten(new_kwargs, self.flatten_kwargs_spec), ) except StopIteration as e: raise StopIteration from e def to_dist_tensor( self, t: torch.Tensor, mesh: DeviceMesh, placements: list[Placement] ) -> torch.Tensor: if type(t) is torch.Tensor or type(t) is nn.Parameter: if self.is_supported_tensor(t): self.hit += 1 if t.ndim == 0: # scalar tensor by default will be replicated r = distribute_tensor(t, mesh, [Replicate()] * mesh.ndim) else: # distribute non-scalar tensors r = distribute_tensor(t, mesh, placements) if type(t) is nn.Parameter: r = nn.Parameter( # type: ignore[assignment] r, requires_grad=r.requires_grad ) return r else: self.miss += 1 return t elif torch.overrides.is_tensor_like(t): # Blindly converting tensor subclasses to dist tensor can cause # unpredictable problems, we explicitly disable this conversion # for now (i.e. we don't support DTensor holding tensor subclass # until there's a strong reason later). self.miss += 1 return t else: raise RuntimeError(f"Trying to convert to DTensor, but got {type(t)}") ```
=============================================================================================================================================== SOURCE CODE FILE: checkpoint_utils.py LINES: 1 SIZE: 5.17 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\checkpoint_utils.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs # Copyright (c) Meta Platforms, Inc. and affiliates import io import os import shutil import tempfile from functools import wraps from typing import Any, Callable, cast, IO, Optional # introduced as collections.abc.Buffer in Python 3.12 from typing_extensions import Buffer import torch.distributed as dist from torch.distributed.checkpoint._extension import ( ExtensionRegistry, StreamTransformExtension, ) class Rot13Example(StreamTransformExtension): """ This is an example stream transform extension which just does rot13 on each alphanumeric character of the stream. It is mainly intended as a demonstration and for testing; there isn't a production use case for this. """ def __init__(self, chunk_size: int = io.DEFAULT_BUFFER_SIZE) -> None: super().__init__() self._chunk_size = chunk_size @staticmethod def from_descriptor(version: str) -> "Rot13Example": if version.partition(".")[0] != "1": raise ValueError(f"Unknown extension {version=}") return Rot13Example() @staticmethod def registry_name() -> str: return "stream.rot13" def get_descriptor(self) -> str: return f"{self.registry_name()}/1" @staticmethod def _rot13bytes(b: Buffer, count: int) -> None: b = memoryview(b) for i in range(count): ch = b[i] if ch >= ord("A") and ch <= ord("Z"): ch += ord("a") - ord("A") elif ch >= ord("a") and ch <= ord("z"): ch += ord("A") - ord("a") b[i] = ch def transform_to(self, output: IO[bytes]) -> IO[bytes]: class Writer(io.RawIOBase): def __init__(self, output: IO[bytes]) -> None: self.output = output def writeable(self) -> bool: return True def write(self, b: Buffer) -> Optional[int]: # Don't mutate the input chunk = bytearray(b) Rot13Example._rot13bytes(chunk, len(chunk)) return self.output.write(chunk) def flush(self) -> None: self.output.flush() return cast(IO[bytes], Writer(output)) def transform_from(self, input: IO[bytes]) -> IO[bytes]: class Reader(io.RawIOBase): def __init__(self, input: IO[bytes]) -> None: self.input = input def readable(self) -> bool: return True def readinto(self, b: Buffer) -> Optional[int]: if hasattr(self.input, "readinto"): count = self.input.readinto(b) else: # It's possible self.input is an IO[bytes] with no readinto method. # In that case, we emulate with a read and copy. In practice, # all of the current concrete extensions have readinto. view = memoryview(b) r = self.input.read(len(view)) if r is None: count = None else: count = len(r) view[:count] = r if count == 0 or count is None: return count Rot13Example._rot13bytes(b, count) return count def seekable(self) -> bool: return self.input.seekable() def seek(self, offset: int, whence: int = os.SEEK_SET) -> int: return self.input.seek(offset, whence) def tell(self) -> int: return self.input.tell() return cast(IO[bytes], Reader(input)) def get_test_extension_registry() -> ExtensionRegistry: registry = ExtensionRegistry() registry.register(Rot13Example) return registry def with_temp_dir( func: Optional[Callable] = None, ) -> Optional[Callable]: """ Wrapper to initialize temp directory for distributed checkpoint. """ assert func is not None @wraps(func) def wrapper(self, *args: tuple[object], **kwargs: dict[str, Any]) -> None: if dist.is_initialized(): # Only create temp_dir when rank is 0 if dist.get_rank() == 0: temp_dir = tempfile.mkdtemp() print(f"Using temp directory: {temp_dir}") else: temp_dir = "" object_list = [temp_dir] # Broadcast temp_dir to all the other ranks os.sync() dist.broadcast_object_list(object_list) self.temp_dir = object_list[0] os.sync() else: temp_dir = tempfile.mkdtemp() print(f"No process group initialized, using temp directory: {temp_dir}") self.temp_dir = temp_dir try: func(self, *args, **kwargs) finally: if dist.is_initialized() and dist.get_rank() == 0: shutil.rmtree(self.temp_dir, ignore_errors=True) else: shutil.rmtree(self.temp_dir, ignore_errors=True) return wrapper ```
================================================================================================================================================ SOURCE CODE FILE: common_state_dict.py LINES: 1 SIZE: 6.71 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\common_state_dict.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs # Owner(s): ["oncall: distributed"] import copy from itertools import chain from typing import Any import torch import torch.nn as nn from torch.distributed._sharded_tensor import ShardedTensor from torch.distributed._state_dict_utils import _gather_state_dict from torch.distributed._tensor import DTensor from torch.distributed.checkpoint.state_dict import ( _PG, _STATE, set_state_dict, StateDictOptions, ) class VerifyStateDictMixin: def _compare_tensor(self, orig_tensor, dist_tensor, offload_to_cpu=False): if isinstance(dist_tensor, (DTensor, ShardedTensor)): dist_tensor = _gather_state_dict({"mykey": dist_tensor}).pop("mykey") if offload_to_cpu: orig_tensor = orig_tensor.cpu() dist_tensor = dist_tensor.cpu() self.assertTrue(isinstance(dist_tensor, torch.Tensor)) self.assertTrue(torch.allclose(orig_tensor, dist_tensor)) def _verify_msd( self, msd: dict[str, Any], dist_msd: dict[str, Any], options: StateDictOptions = StateDictOptions(), offload_to_cpu=False, ) -> None: if not options.ignore_frozen_params: self.assertEqual(len(msd), len(dist_msd)) for fqn, param in msd.items(): dist_param = dist_msd.get(fqn, None) if not options.ignore_frozen_params: self.assertIsNotNone(dist_param, f"{fqn=}") try: self._compare_tensor(param, dist_param, offload_to_cpu) except AssertionError as e: raise AssertionError( f"{fqn} has mismatched value {param} {dist_param}" ) from e elif dist_param is None: self.assertFalse(param.requires_grad, f"{fqn=}") def _verify_osd( self, model: nn.Module, optim: torch.optim.Optimizer, osd: dict[str, Any], dist_osd: dict[str, Any], ) -> None: params = list(chain.from_iterable(g["params"] for g in optim.param_groups)) param_pid_mapping = dict(zip(params, range(len(params)))) fqn_pid_mapping = {} for fqn, param in model.named_parameters(): pid = param_pid_mapping[param] fqn_pid_mapping[fqn] = pid fqn_pid_mapping[pid] = fqn # Check optimizer_state_dict state self.assertEqual(len(osd[_STATE]), len(dist_osd[_STATE])) for pid, states in osd[_STATE].items(): fqn = fqn_pid_mapping[pid] dist_states = dist_osd[_STATE].get(fqn, None) self.assertIsNotNone(dist_states, fqn) self.assertEqual(len(states), len(dist_states)) for key, state in states.items(): dist_state = states.get(key, None) self.assertIsNotNone(dist_state) self._compare_tensor(state, dist_state) # Check optimizer_state_dict param_group old_dist_osd_pg = dist_osd[_PG] if len(osd[_PG]) != len(dist_osd[_PG]): self.assertTrue(len(dist_osd[_PG]) > len(osd[_PG])) new_pg = copy.deepcopy(dist_osd[_PG][0]) new_pg["params"] = [] for dist_group in dist_osd[_PG]: new_pg["params"].extend(dist_group["params"]) dist_osd[_PG] = [new_pg] self.assertEqual(len(osd[_PG]), len(dist_osd[_PG])) for group, dist_group in zip(osd[_PG], dist_osd[_PG]): self.assertEqual(len(group), len(dist_group)) for key, value in group.items(): # Below doesn't work because param_groups can have None # values. # dist_value = dist_group.get(key, None) # self.assertIsNotNone(dist_value, (dist_group, group)) dist_value = dist_group[key] if key == "params": fqns = [fqn_pid_mapping[pid] for pid in value] self.assertEqual(sorted(fqns), sorted(dist_value)) else: self.assertEqual(value, dist_value) dist_osd[_PG] = old_dist_osd_pg def _verify_osd_by_load( self, model: nn.Module, optim: torch.optim.Optimizer, new_optim: torch.optim.Optimizer, dist_osd: dict[str, Any], ) -> None: new_dist_osd = _gather_state_dict(dist_osd) set_state_dict( model, optimizers=new_optim, model_state_dict={}, optim_state_dict=new_dist_osd, ) self.assertEqual(optim.state_dict(), new_optim.state_dict()) class FusionEmbedding(nn.Module): def __init__(self, vocab_size: int, fusion_vocab_size: int, embed_dim: int) -> None: super().__init__() self.embedding = nn.Embedding(vocab_size, embed_dim) self.fusion_embedding = nn.Embedding(fusion_vocab_size, embed_dim) class FusionEmbeddingWithHook(nn.Module): def __init__(self, vocab_size: int, fusion_vocab_size: int, embed_dim: int) -> None: super().__init__() self.embedding = nn.Embedding(vocab_size, embed_dim) self.fusion_embedding = nn.Embedding(fusion_vocab_size, embed_dim) self._register_state_dict_hook(FusionEmbeddingWithHook._state_dict_hook) self._register_load_state_dict_pre_hook( FusionEmbeddingWithHook._load_state_dict_hook, with_module=True ) def _state_dict_hook(self, destination, prefix, keep_vars): """Remove "embedding" from the original embedding in the state_dict name. This keeps the orginal state dict name for the embedding from before fusing with the FusionEmbedding. """ key = prefix + "embedding.weight" new_key = prefix + "weight" destination[new_key] = destination[key] del destination[key] def _load_state_dict_hook(self, state_dict, prefix, *args, **kwargs): """Apply extra "embedding" prefix to the state_dict key to account for the FusionEmbedding wrapping. """ if state_dict: key = prefix + "weight" new_key = prefix + "embedding.weight" state_dict[new_key] = state_dict[key] del state_dict[key] class FusionEmbeddingWithModifier(FusionEmbeddingWithHook): # _fqn_modifiers is a private function as a contract between DSD. When users change the state_dict # keys, they need to provide a mapping from the new key to the original key. This is used to ensure # consistency between the state_dict keys and fqn. def _fqn_modifiers(self) -> dict[str, str]: return { "weight": "embedding", } ```
=========================================================================================================================================================== SOURCE CODE FILE: ddp_under_dist_autograd_test.py LINES: 3 SIZE: 26.86 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\ddp_under_dist_autograd_test.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import contextlib import enum import logging import os import threading from typing import NamedTuple import torch import torch.distributed as dist import torch.distributed.autograd as dist_autograd import torch.nn as nn from torch.distributed import rpc from torch.distributed.nn import RemoteModule from torch.nn.parallel import DistributedDataParallel from torch.testing._internal.common_distributed import ( requires_gloo, requires_nccl, skip_if_lt_x_gpu, skip_if_rocm_multiprocess, ) from torch.testing._internal.dist_utils import INIT_METHOD_TEMPLATE, dist_init from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import ( RpcAgentTestFixture, ) NUM_EM_ROW = 2 D_SPARSE = 3 D_DENSE = 2 D_HID = 3 D_OUT = 1 NUM_TRAINERS = 4 # Trainers + the master + the remote worker WORLD_SIZE = NUM_TRAINERS + 2 TRAINER_RANKS = list(range(NUM_TRAINERS)) REMOTE_WORKER_RANK = TRAINER_RANKS[-1] + 1 MASTER_RANK = REMOTE_WORKER_RANK + 1 class DdpMode(enum.Enum): # Don't apply DDP NONE = enum.auto() # Apply DDP to the top level nn.Module OUTSIDE = enum.auto() # Embed DDP inside the top level nn.Module INSIDE = enum.auto() def init_logger(): logger = logging.getLogger(__name__) level = logging.DEBUG if "debug" in os.environ else logging.INFO logger.setLevel(level) console = logging.StreamHandler() formatter = logging.Formatter( "%(asctime)s %(filename)s:%(lineno)s %(levelname)s p:%(processName)s t:%(threadName)s: %(message)s" ) console.setFormatter(formatter) console.setLevel(level) # add the handlers to the logger logger.addHandler(console) logger.propagate = False return logger gLogger = init_logger() class FeatureSet(NamedTuple): """ A feature set has 2 types of features""" dense_features: torch.Tensor sparse_features: torch.LongTensor values: torch.Tensor def _call_method(method, rref, *args, **kwargs): return method(rref.local_value(), *args, **kwargs) def _remote_method(method, rref, *args, **kwargs): args_tup = tuple([method, rref] + list(args)) return rpc.rpc_sync(rref.owner(), _call_method, args=args_tup, kwargs=kwargs) def _remote_method_async(method, rref, *args, **kwargs): args_tup = tuple([method, rref] + list(args)) return rpc.rpc_async(rref.owner(), _call_method, args=args_tup, kwargs=kwargs) class RemoteEM(nn.Module): def __init__(self, num_embeddings: int, embedding_dim: int): gLogger.info("Initing RemoteEM with %s %s", num_embeddings, embedding_dim) super().__init__() init_em = [0.5] * embedding_dim self.em = nn.EmbeddingBag( num_embeddings, embedding_dim, _weight=torch.tensor([init_em] * num_embeddings), ) def forward(self, input: torch.Tensor): gLogger.debug("Running RemoteEM.forward() on: %s", input) return self.em(input, offsets=torch.LongTensor(range(input.shape[0]))) # Return a linear module with predefined parameters. def getLinear(d_in, d_out): l = nn.Linear(d_in, d_out, bias=False) w = torch.ones((d_out, d_in)) w[0][0] = -1 w.requires_grad_() l.weight.data = w return l class RemoteNet(nn.Module): def __init__(self, d_in: int, d_out: int): gLogger.info("Initing RemoteNet with %s %s", d_in, d_out) super().__init__() self.fc = getLinear(d_in, d_out) self.relu = nn.ReLU() def forward(self, input: torch.Tensor): gLogger.debug("Running RemoteNet.forward() on: %s", input) return self.relu(self.fc(input)) class HybridModel(nn.Module): def __init__( self, remote_em_rref: rpc.RRef, remote_net_rref: rpc.RRef, process_group_for_ddp: dist.ProcessGroup = None, ): super().__init__() self.remote_em_rref = remote_em_rref self.remote_net_rref = remote_net_rref self.fc1 = getLinear(D_DENSE, D_DENSE) self.fc2 = getLinear(D_HID, D_OUT) self.non_ddp_params = tuple(self.fc1.parameters()) + tuple( self.fc2.parameters() ) self.ddp_params = () if process_group_for_ddp is not None: self.non_ddp_params, self.ddp_params = ( tuple(self.fc1.parameters()), tuple(self.fc2.parameters()), ) gLogger.info("Use DDP for the second local net.") self.fc2 = DistributedDataParallel( self.fc2, check_reduction=True, process_group=process_group_for_ddp ) gLogger.info( "HybridModel has %s groups of parameters.", len(list(self.parameters())) ) def forward(self, input: FeatureSet): gLogger.debug("Running HybridModel.forward on %s", input) sparse = _remote_method( RemoteEM.forward, self.remote_em_rref, input.sparse_features ) # The same size of mini batch. assert sparse.shape[0] == input.dense_features.shape[0] dense = self.fc1(input.dense_features) x = torch.cat((dense, sparse), 1) gLogger.debug("Concatenated feature: %s", x) x = _remote_method(RemoteNet.forward, self.remote_net_rref, x) return self.fc2(x) class Trainer: def __init__( self, remote_em_rref: rpc.RRef, remote_net_rref: rpc.RRef, ddp_mode: DdpMode, rank: int, ): self.rank = rank self.trainer_group = ( dist.new_group(TRAINER_RANKS) if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE) else None ) self.remote_em_rref = remote_em_rref self.remote_net_rref = remote_net_rref self.hybrid_module = HybridModel( self.remote_em_rref, self.remote_net_rref, self.trainer_group if ddp_mode in (DdpMode.INSIDE,) else None, ) self.ddp_params, self.non_ddp_params = ( self.hybrid_module.ddp_params, self.hybrid_module.non_ddp_params, ) if ddp_mode == DdpMode.OUTSIDE: gLogger.info("Wrapping the whole hybrid module into DDP.") self.ddp_params += self.non_ddp_params self.non_ddp_params = () self.hybrid_module = DistributedDataParallel( self.hybrid_module, check_reduction=True, process_group=self.trainer_group, ) gLogger.info( "Succeeded in creating a HybridModel instance with " "%s ddp params and %s other local params.", len(self.ddp_params), len(self.non_ddp_params) ) def destroy_pg(self): if self.trainer_group: dist.destroy_process_group(self.trainer_group) def train_batch( self, mini_batch: FeatureSet, trainer_has_less_inputs: bool, simulate_uneven_inputs: bool, ): grads_dict = None if not simulate_uneven_inputs: input_batches = [mini_batch] else: # Split into microbatches, and trim to simulate uneven inputs. dense_features = mini_batch.dense_features sparse_features = mini_batch.sparse_features values = mini_batch.values dense_microbatch = torch.split(dense_features, 2) sparse_microbatch = torch.split(sparse_features, 2) values_microbatch = torch.split(values, 2) batches = [] for d, s, v in zip(dense_microbatch, sparse_microbatch, values_microbatch): feature_set = FeatureSet(dense_features=d, sparse_features=s, values=v) batches.append(feature_set) if trainer_has_less_inputs: input_batches = batches[: len(batches) // 2] gLogger.info( "Trainer reduced input patches from %s " "to %s to simulate uneven inputs.", len(batches), len(input_batches) ) else: input_batches = batches with self.hybrid_module.join() if simulate_uneven_inputs else contextlib.nullcontext(): for b in input_batches: with dist_autograd.context() as context_id: output = self.hybrid_module.forward(b) loss = (output * mini_batch.values).sum() dist_autograd.backward(context_id, [loss]) grads_dict = dist_autograd.get_gradients(context_id) gLogger.info( "Loss is %s for mini batch: %s. " "Grads dict has %s entries: %s", loss, mini_batch, len(grads_dict), grads_dict ) return ( tuple(grads_dict[param] for param in self.ddp_params), tuple(grads_dict[param] for param in self.non_ddp_params), ) def get_training_examples(): n = 16 training_examples = FeatureSet( dense_features=torch.zeros((n, D_DENSE)), sparse_features=torch.zeros(n, dtype=torch.long), values=torch.zeros(n), ) idx = 0 # Every example has another one that has exactly the same features but an # opposite value. Therefore, their grads cancel each other in all-reduce. for value in (-1, 1): for x in (-1.0 * value, 1.0 * value): for y in (1.0 * value, -1.0 * value): for z in (0, 1): training_examples.dense_features[idx, :] = torch.tensor((x, y)) training_examples.sparse_features[idx] = z training_examples.values[idx] = value idx += 1 # Split the examples among NUM_TRAINERS trainers assert 0 == (n % NUM_TRAINERS) examples_per_trainer = int(n / NUM_TRAINERS) return [ FeatureSet( dense_features=training_examples.dense_features[ start : start + examples_per_trainer, : ], sparse_features=training_examples.sparse_features[ start : start + examples_per_trainer ], values=training_examples.values[start : start + examples_per_trainer], ) for start in range(0, n, examples_per_trainer) ] shutdown_signal = threading.Condition() def set_shutdown_signal(): global shutdown_signal with shutdown_signal: shutdown_signal.notify() class DdpUnderDistAutogradTest(RpcAgentTestFixture): @property def world_size(self) -> int: return WORLD_SIZE def remote_worker_name(self) -> str: # The name has to be consistent with that in 'dist_init' decorator. return f"worker{REMOTE_WORKER_RANK}" def trainer_name(self, rank): # The name has to be consistent with that in 'dist_init' decorator. return f"worker{rank}" def _remote_worker_process(self, ddp_mode): gLogger.info("The remote worker is running.") dist.init_process_group( backend="gloo", init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), world_size=self.world_size, rank=self.rank, ) if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE): # new_group needs to be called on ranks. dist.new_group(TRAINER_RANKS) global shutdown_signal with shutdown_signal: shutdown_signal.wait() gLogger.info("Exiting remote worker.") dist.destroy_process_group() def _trainer_process(self, rank: int): gLogger.info("Running the trainer #%s...", rank) gLogger.info( "Initing trainer process group by trainer #%s with ranks %s", rank, TRAINER_RANKS ) dist.init_process_group( backend="gloo", init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), world_size=self.world_size, rank=self.rank, ) gLogger.info("Waiting for shutdown signal on trainer #%s...", rank) global shutdown_signal with shutdown_signal: shutdown_signal.wait() gLogger.info("Exiting the trainer #%s...", rank) dist.destroy_process_group() def _master_process(self, ddp_mode: DdpMode, simulate_uneven_inputs: bool): gLogger.info("Running the master process...") dist.init_process_group( backend="gloo", init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), world_size=self.world_size, rank=self.rank, ) remote_em_rref = rpc.remote( self.remote_worker_name(), RemoteEM, args=(NUM_EM_ROW, D_SPARSE) ) remote_net_rref = rpc.remote( self.remote_worker_name(), RemoteNet, args=(D_DENSE + D_SPARSE, D_HID) ) gLogger.info("Created remote rrefs on master") self.do_test_on_master( ddp_mode, simulate_uneven_inputs, remote_em_rref, remote_net_rref ) def do_test_on_master( self, ddp_mode: DdpMode, simulate_uneven_inputs: bool, remote_em_rref: rpc.RRef, remote_net_rref: rpc.RRef, ): if simulate_uneven_inputs: gLogger.info( "Running DDP + RPC test with simulating uneven inputs across trainers." ) trainer_rrefs = [] for rank in TRAINER_RANKS: trainer = self.trainer_name(rank) trainer_rrefs.append( rpc.remote( trainer, Trainer, args=(remote_em_rref, remote_net_rref, ddp_mode, rank), ) ) if ddp_mode in (DdpMode.INSIDE, DdpMode.OUTSIDE): # new_group needs to be called on ranks. dist.new_group(TRAINER_RANKS) training_examples = get_training_examples() for _ in range(3): futures = [] num_trainers = len(trainer_rrefs) for idx, trainer_rref in enumerate(trainer_rrefs): # Half the trainers will deplete inputs earlier than the rest. trainer_has_less_inputs = ( simulate_uneven_inputs and idx < num_trainers // 2 ) futures.append( _remote_method_async( Trainer.train_batch, trainer_rref, training_examples[idx], trainer_has_less_inputs, simulate_uneven_inputs, ) ) for future in futures: ddp_grads, non_ddp_grads = future.wait() # When there are uneven inputs, it is not necessary that grads # cancel each other out, since some trainers contribute 0 grad. if not simulate_uneven_inputs: for grad in ddp_grads: self.assertEqual( grad, torch.zeros_like(grad), msg=f"The grad for any ddp parameter should be zeros, because " "the training examples' grads cancel each other. Received " f"gradient {grad}", ) for grad in non_ddp_grads: self.assertNotEqual( grad, torch.zeros_like(grad), msg="The grad for any non-ddp parameter shouldn't be zeros", ) # Destroy process groups for idx, trainer_rref in enumerate(trainer_rrefs): _remote_method_async(Trainer.destroy_pg, trainer_rref).wait() # Send shutdown signals. for rank in TRAINER_RANKS: trainer = self.trainer_name(rank) rpc.rpc_sync(trainer, set_shutdown_signal, args=()) rpc.rpc_sync(self.remote_worker_name(), set_shutdown_signal, args=()) def _do_test(self, ddp_mode, simulate_uneven_inputs=False): if self.rank == MASTER_RANK: self._master_process(ddp_mode, simulate_uneven_inputs) elif self.rank == REMOTE_WORKER_RANK: self._remote_worker_process(ddp_mode) elif self.rank in TRAINER_RANKS: self._trainer_process(self.rank) else: raise RuntimeError(f"Unknown process rank: {self.rank}") @requires_gloo() @dist_init def test_backward_no_ddp(self): self._do_test(DdpMode.NONE) @requires_gloo() @dist_init def test_backward_ddp_outside(self): self._do_test(DdpMode.OUTSIDE) @requires_gloo() @dist_init def test_backward_ddp_outside_uneven_inputs(self): self._do_test(DdpMode.OUTSIDE, simulate_uneven_inputs=True) @requires_gloo() @dist_init def test_backward_ddp_inside(self): self._do_test(DdpMode.INSIDE) # Common utils for both CPU and CUDA test suites class CommonDdpComparisonTest(RpcAgentTestFixture): @property def world_size(self) -> int: return NUM_TRAINERS def trainer_name(self, rank): # The name has to be consistent with that in 'dist_init' decorator. return f"worker{rank}" @staticmethod def get_remote_grads(rref, context_id): return dist_autograd.get_gradients(context_id)[rref.local_value().weight] class DdpComparisonTest(CommonDdpComparisonTest): def _run_test_ddp_comparision(self, simulate_uneven_inputs=False): gLogger.info("Running trainer rank: %s", self.rank) # Each trainer uses a different random seed. Otherwise, they are going # to have exactly the same initial model parameters, input, and # therefore grads. That means the grads will be the same before and # after DDP's all-reduce. torch.manual_seed(self.rank) dist.init_process_group( backend="gloo", # Postfix file_name with "pg" since file_name is also used by RPC agent init_method=INIT_METHOD_TEMPLATE.format(file_name=f"{self.file_name}_pg"), world_size=self.world_size, rank=self.rank, ) net = nn.Linear(2, 3) ddp_net = DistributedDataParallel(net) # Odd ranks join early if simulate_uneven_inputs. num_inputs = 1 if simulate_uneven_inputs: if self.rank % 2 == 0: num_inputs += 2 inputs_list = [torch.rand((3, 2)) for _ in range(num_inputs)] if simulate_uneven_inputs: gLogger.info("Rank %s training with %s inputs.", self.rank, len(inputs_list)) # Use distributed autograd. The gradients will be in RPC context map. grads_dict = {} with ddp_net.join(simulate_uneven_inputs): for i, inputs in enumerate(inputs_list): with dist_autograd.context() as context_id: loss = ddp_net(inputs).norm() dist_autograd.backward(context_id, [loss]) grads_dict = dist_autograd.get_gradients(context_id) gLogger.info("Trainer #%s got grad dict: %s", self.rank, grads_dict) # Use local autograd. The gradients will be in each variable's '.grad'. ddp_net.zero_grad() loss = ddp_net(inputs).norm() loss.backward() # The gradients should be the same for param in net.parameters(): self.assertTrue( param in grads_dict, msg=f"Param {param} is not in dist_auto grad dict {grads_dict} for iteration {i}", ) self.assertEqual( grads_dict[param], param.grad, msg=f"The grads for param {param} are different under local " f"and dist autograd: {param.grad} \n---\n {grads_dict[param]} for iteration {i}", ) dist.destroy_process_group() @requires_gloo() @dist_init def test_ddp_comparison(self): self._run_test_ddp_comparision() @requires_gloo() @dist_init def test_ddp_comparison_uneven_inputs(self): # test with simulating uneven inputs in DDP self._run_test_ddp_comparision(simulate_uneven_inputs=True) @requires_gloo() @dist_init def test_ddp_dist_autograd_sparse_grads(self): # Each trainer uses a different random seed. Otherwise, they are going # to have exactly the same initial model parameters, input, and # therefore grads. That means the grads will be the same before and # after DDP's all-reduce. torch.manual_seed(self.rank) dist.init_process_group( backend="gloo", init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), world_size=self.world_size, rank=self.rank, ) model = nn.EmbeddingBag(10, 3, sparse=True) ddp_model = DistributedDataParallel(model) # Different inputs for each input = torch.LongTensor(10).random_(0, 10) offsets = torch.LongTensor([0, 4]) # Run local. loss = ddp_model(input, offsets).sum() loss.backward() with dist_autograd.context() as context_id: loss = ddp_model(input, offsets).sum() dist_autograd.backward(context_id, [loss]) grads_dict = dist_autograd.get_gradients(context_id) self.assertEqual(1, len(grads_dict)) self.assertEqual(model.weight.grad, grads_dict[model.weight]) @requires_gloo() @dist_init def test_ddp_dist_autograd_local_vs_remote(self): # Each trainer uses a different random seed. Otherwise, they are going # to have exactly the same initial model parameters, input, and # therefore grads. That means the grads will be the same before and # after DDP's all-reduce. torch.manual_seed(self.rank) dist.init_process_group( backend="gloo", init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), world_size=self.world_size, rank=self.rank, ) # Use two different remote device input string, w/ and w/o the default # device string "cpu", respectively. for remote_device in ["worker0/cpu", "worker0"]: remote_layer1 = RemoteModule( remote_device=remote_device, module_cls=nn.Linear, args=(10, 5, False) ) layer1 = nn.Linear(10, 5, False) # Start with the same parameters for remote and local layer1.weight = remote_layer1.module_rref.to_here().weight # Run local case. layer2 = nn.Linear(5, 1) inputs = torch.rand((10, 10)) ddp_model = DistributedDataParallel(layer2) loss = ddp_model(layer1(inputs)).sum() loss.backward() # Run remote case. with dist_autograd.context() as context_id: loss = ddp_model(remote_layer1(inputs)).sum() dist_autograd.backward(context_id, [loss]) grads_dict = dist_autograd.get_gradients(context_id) dist.barrier() self.assertEqual(layer2.weight.grad, grads_dict[layer2.weight]) self.assertEqual( layer1.weight.grad, rpc.rpc_sync( "worker0", CommonDdpComparisonTest.get_remote_grads, args=(remote_layer1.module_rref, context_id), ), ) class CudaDdpComparisonTest(CommonDdpComparisonTest): @skip_if_lt_x_gpu(NUM_TRAINERS) @requires_nccl() @dist_init @skip_if_rocm_multiprocess def test_ddp_dist_autograd_local_vs_remote_gpu(self): # Each trainer uses a different random seed. Otherwise, they are going # to have exactly the same initial model parameters, input, and # therefore grads. That means the grads will be the same before and # after DDP's all-reduce. torch.manual_seed(self.rank) dist.init_process_group( backend="gloo", init_method=INIT_METHOD_TEMPLATE.format(file_name=self.file_name), world_size=self.world_size, rank=self.rank, ) remote_layer1 = RemoteModule( remote_device="worker0/cpu", module_cls=nn.Linear, args=(10, 7, False) ) layer1 = nn.Linear(10, 7, False) # Start with the same parameters for remote and local layer1.weight = remote_layer1.module_rref.to_here().weight layer2 = nn.Linear(7, 5).cuda(self.rank) ddp_layer2 = DistributedDataParallel(layer2, device_ids=[self.rank]) remote_layer3 = RemoteModule( remote_device="worker0/cpu", module_cls=nn.Linear, args=(5, 3, False) ) layer3 = nn.Linear(5, 3, False) # Start with the same parameters for remote and local layer3.weight = remote_layer3.module_rref.to_here().weight layer4 = nn.Linear(3, 1).cuda(self.rank) ddp_layer4 = DistributedDataParallel(layer4, device_ids=[self.rank]) # Run local case. inputs = torch.rand((10, 10)) loss = ddp_layer4( layer3(ddp_layer2(layer1(inputs).cuda(self.rank)).cpu()).cuda(self.rank) ).sum() loss.backward() # Run remote case. with dist_autograd.context() as context_id: loss = ddp_layer4( remote_layer3( ddp_layer2(remote_layer1(inputs).cuda(self.rank)).cpu() ).cuda(self.rank) ).sum() dist_autograd.backward(context_id, [loss]) grads_dict = dist_autograd.get_gradients(context_id) dist.barrier() self.assertEqual( layer1.weight.grad, rpc.rpc_sync( "worker0", CommonDdpComparisonTest.get_remote_grads, args=(remote_layer1.module_rref, context_id), ), ) self.assertEqual(layer2.weight.grad, grads_dict[layer2.weight]) self.assertEqual( layer3.weight.grad, rpc.rpc_sync( "worker0", CommonDdpComparisonTest.get_remote_grads, args=(remote_layer3.module_rref, context_id), ), ) self.assertEqual(layer4.weight.grad, grads_dict[layer4.weight]) ```
=============================================================================================================================================== SOURCE CODE FILE: distributed_test.py LINES: 2 SIZE: 435.45 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\distributed_test.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import copy import json import itertools import math import os import random import sys import tempfile import time from collections import namedtuple, OrderedDict, defaultdict from contextlib import contextmanager, nullcontext from dataclasses import dataclass from datetime import timedelta from functools import reduce from typing import Union, NamedTuple, Callable, Any import unittest import numpy as np import torch import torch.cuda import torch.distributed as dist import torch.distributed.algorithms.model_averaging.averagers as averagers import torch.distributed.algorithms.model_averaging.hierarchical_model_averager as hierarchicalSGD import torch.distributed.algorithms.model_averaging.utils as model_averaging_utils import torch.nn as nn import torch.nn.functional as F from torch._utils_internal import TEST_MASTER_ADDR as MASTER_ADDR from torch._utils_internal import TEST_MASTER_PORT as MASTER_PORT from torch.utils._python_dispatch import TorchDispatchMode from torch.autograd import DeviceType from torch.cuda.amp import GradScaler, autocast from torch.distributed.algorithms.ddp_comm_hooks import ( post_localSGD_hook as post_localSGD, powerSGD_hook as powerSGD, default_hooks as default, quantization as quantization_hooks, ) from torch.distributed.optim import _apply_optimizer_in_backward from torch.distributed.distributed_c10d import ( get_world_size, _get_default_group, _get_pg_config, ) from torch.distributed.utils import ( _verify_param_shape_across_processes, _sync_module_states, ) from torch.profiler import ( ExecutionTraceObserver, ProfilerActivity, ) from torch.nn.parallel import DistributedDataParallel from torch.nn.parallel.distributed import _dump_DDP_relevant_env_vars, _MixedPrecision from torch.testing._internal.common_distributed import ( MultiProcessTestCase, TEST_SKIPS, init_multigpu_helper, initialize_temp_directories, cleanup_temp_dir, simple_sparse_reduce_tests, skip_if_rocm_multiprocess, skip_if_small_worldsize, skip_if_odd_worldsize, skip_if_lt_x_gpu, nccl_skip_if_lt_x_gpu, skip_if_no_gpu, require_n_gpus_for_nccl_backend, requires_nccl_version, captured_output, with_nccl_blocking_wait, with_dist_debug_levels, verify_ddp_error_logged, DistTestCases, ) from torch.testing._internal.common_utils import ( instantiate_parametrized_tests, IS_MACOS, IS_WINDOWS, FILE_SCHEMA, IS_FBCODE, IS_SANDCASTLE, skip_but_pass_in_sandcastle, skip_but_pass_in_sandcastle_if, ) import torch.distributed.optim.post_localSGD_optimizer as post_localSGD_optimizer from torch.utils.data.distributed import DistributedSampler import operator try: import torchvision HAS_TORCHVISION = True except ImportError: HAS_TORCHVISION = False if sys.platform == "win32": import msvcrt else: import fcntl class NetWithBuffers(nn.Module): def __init__(self) -> None: super().__init__() self.a = nn.Linear(10, 10, bias=False) self.b = nn.Linear(10, 1, bias=False) self.register_buffer("buffer", torch.randn(1, 2)) def forward(self, x): self.buffer.add_(1) return self.b(self.a(x)) class Foo: def __init__(self, x): # Can be tensor or int self.x = x def __eq__(self, other): def eq(value, other): if isinstance(value, torch.Tensor): return torch.equal(value, other) return value == other for attr, value in self.__dict__.items(): other_value = other.__dict__[attr] if not eq(value, other_value): return False return True f = Foo(10) f.bar = 1 foo_cpu_tensor = Foo(torch.randn(3, 3)) COLLECTIVES_OBJECT_TEST_LIST = [ {"key1": 3, "key2": 4, "key3": {"nested": True}}, f, foo_cpu_tensor, "foo", [1, 2, True, "string", [4, 5, "nested"]], ] # Allowlist of distributed backends where profiling collectives is supported. PROFILING_SUPPORTED_BACKENDS = [ dist.Backend.NCCL, dist.Backend.GLOO, dist.Backend.MPI, dist.Backend.UCC, ] # Allowlist of distributed backends where profiling is supported with use_cuda=True CUDA_PROFILING_SUPPORTED_BACKENDS = [ dist.Backend.GLOO, dist.Backend.MPI, dist.Backend.NCCL, dist.Backend.UCC, ] # Allowlist of distributed backends where profiling is supported for p2p ops SEND_RECV_PROFILING_SUPPORTED_BACKENDS = [ dist.Backend.MPI, dist.Backend.GLOO, dist.Backend.NCCL, dist.Backend.UCC, ] # Dummy NamedTuple data structures to test DDP support for NamedTuple types. EXPECTED_FIELDS = ("a", "b") TestNamedTupleInput_0 = namedtuple("NamedTuple", EXPECTED_FIELDS) class TestNamedTupleInput_1(NamedTuple): a: torch.tensor b: torch.tensor skipIfNoTorchVision = skip_but_pass_in_sandcastle_if( not HAS_TORCHVISION, "no torchvision" ) BACKEND = os.environ["BACKEND"] INIT_METHOD = os.getenv("INIT_METHOD", "env://") DEFAULT_TIMEOUT = 300 CUSTOMIZED_TIMEOUT = {"test_DistributedDataParallel": 500} def get_profiling_event(event_name, profiler, dedup_gpu_user_annotation=False): event_list = ( profiler.events() if isinstance(profiler, torch.profiler.profile) else profiler.function_events ) return [ event for event in event_list if ( (event.name.endswith(event_name) or event.name.startswith(event_name)) and (not dedup_gpu_user_annotation or event.device_type != DeviceType.CUDA) ) ] def get_profiler_nccl_meta(prof): """Torch profiler includes nccl metadata in an inserted operator called "record_param_comms" We will need to test metadata obtained from profiler here""" tf = tempfile.NamedTemporaryFile( mode="w+t", suffix=".json", delete=False ) tf.close() trace_file = tf.name prof.export_chrome_trace(trace_file) with open(trace_file) as f: events = json.load(f)["traceEvents"] print(f"Trace saved to {trace_file}") # Comment to debug os.remove(trace_file) return [e for e in events if e.get("name") == "record_param_comms"] # Base error message substring on unfinished reductions. ddp_prev_reduction_unfinished_str = ( "Expected to have finished reduction in the prior iteration" ) # Error message substring when find_unused_parameters=True has not been passed ddp_recommend_find_unused_params_str = ( "passing the keyword argument `find_unused_parameters=True`" ) # Error message substring when find_unused_parameters=True is enabled ddp_find_unused_params_enabled_str = "Since `find_unused_parameters=True` is enabled" # Error message substring for possibility of not all model outputs being used # in loss computation ddp_outputs_not_used_in_loss_str = ( "`forward` function outputs participate in calculating loss" ) # Error message substring suggesting to use TORCH_DISTRIBUTED_DEBUG ddp_suggest_debug_mode_str = ( "set the environment variable TORCH_DISTRIBUTED_DEBUG to either INFO or DETAIL" ) class DDPUnevenTestInput(NamedTuple): name: str model: nn.Module inp: Union[torch.tensor, tuple] sync_interval: int throw_on_early_termination: bool = False hook: Callable = None state: Any = None class _FC2(nn.Module): def __init__(self) -> None: super().__init__() self.fc = nn.Linear(10, 50, bias=True) self.fc.bias.requires_grad = False def forward(self, x): x = self.fc(x) return x class Net(nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(2, 10, bias=False) self.fc2 = _FC2() self.fc3 = nn.Linear(50, 4, bias=False) self.relu = nn.ReLU() self.no_grad_param = nn.Parameter( torch.tensor([2, 2]).long(), requires_grad=False ) def forward(self, x): x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) x = self.fc3(x) return F.softmax(x, dim=1) class LargeNet(nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(1000, 2000, bias=False) self.fc2 = nn.Linear(2000, 500, bias=False) def forward(self, x): x = self.fc1(x) x = self.fc2(x) return x class Task(nn.Module): def __init__(self) -> None: super().__init__() self.p = nn.Parameter(torch.ones(2, 2)) def forward(self, x): return self.p + x class BatchNormNet(nn.Module): def __init__(self, affine=True): super().__init__() self.fc1 = nn.Linear(2, 40, bias=False) self.bn = nn.BatchNorm1d(4, affine=affine) self.fc2 = nn.Linear(40, 4, bias=False) def forward(self, x): x = torch.reshape(self.fc1(x), (-1, 4, 10)) x = self.bn(x) x = torch.reshape(x, (-1, 40)) x = self.fc2(x) return F.softmax(x, dim=1) class UnusedParamTwoLinLayerNet(nn.Module): def __init__(self) -> None: super().__init__() self.a = nn.Linear(10, 10, bias=False) self.b = nn.Linear(10, 10, bias=False) self.c = nn.Linear(5, 5, bias=False) def forward(self, x): a = self.a(x) b = self.b(x) return (a, b) class DictOutputModule(nn.Module): def __init__(self) -> None: super().__init__() self.module = UnusedParamTwoLinLayerNet() def forward(self, x): predictions = self.module(x) loss = (predictions[0] + predictions[1]).sum() return { "predictions": predictions, "loss": loss, } class TwoLinLayerNet(nn.Module): def __init__(self) -> None: super().__init__() self.a = nn.Linear(10, 10, bias=False) self.b = nn.Linear(10, 1, bias=False) def forward(self, x): a = self.a(x) b = self.b(x) return (a, b) class EmbeddingNetDifferentParams(nn.Module): """ A module containing an embedding with different dimension or different # of parameters depending on the rank. """ def __init__(self, rank, diff_num_params=False): super().__init__() embedding_dim = 500 if diff_num_params or rank == 0 else 50 self.embedding = nn.Embedding(num_embeddings=10, embedding_dim=embedding_dim) self.lin = nn.Linear(embedding_dim, 1) if diff_num_params: self.lin2 = nn.Linear(1, 1, bias=False) def forward(self, x): x = self.embedding(x) return self.lin(x) class ControlFlowToyModel(nn.Module): def __init__(self) -> None: super().__init__() self.lin1 = nn.Linear(10, 10, bias=False) self.lin2 = nn.Linear(10, 10, bias=False) def forward(self, x): # Second layer is used dependent on input x. use_second_layer = torch.equal(x, torch.ones(20, 10, device=x.device)) if use_second_layer: return self.lin2(F.relu(self.lin1(x))) else: return F.relu(self.lin1(x)) DDP_NET = Net() BN_NET = BatchNormNet() BN_NET_NO_AFFINE = BatchNormNet(affine=False) ONLY_SBN_NET = nn.SyncBatchNorm(2, momentum=0.99) def get_timeout(test_id): test_name = test_id.split(".")[-1] if test_name in CUSTOMIZED_TIMEOUT: return CUSTOMIZED_TIMEOUT[test_name] else: return DEFAULT_TIMEOUT default_pg_timeout = 60 CUSTOM_PG_TIMEOUT = { # This test runs slowly and needs additional time to complete, otherwise can # be taken down by TORCH_NCCL_ASYNC_ERROR_HANDLING "test_ddp_uneven_inputs": 300, # This test has a short timeout since it tests being taken down by # TORCH_NCCL_ASYNC_ERROR_HANDLING which we want to happen quickly. "test_ddp_model_diff_across_ranks": 5, # This test has a short timeout since it tests being taken down by # TORCH_NCCL_ASYNC_ERROR_HANDLING which we want to happen quickly. "test_ddp_has_finalized": 5, } def require_backend_is_available(backends): def check(backend): if backend == dist.Backend.GLOO: return dist.is_gloo_available() if backend == dist.Backend.NCCL: return dist.is_nccl_available() if backend == dist.Backend.MPI: return dist.is_mpi_available() if backend == dist.Backend.UCC: return dist.is_ucc_available() if backend in DistTestCases.backend_feature["plugin"]: return True return False if BACKEND not in backends: return skip_but_pass_in_sandcastle( f"Test requires backend {BACKEND} to be one of {backends}" ) if not check(dist.Backend(BACKEND)): return skip_but_pass_in_sandcastle( f"Test requires backend {BACKEND} to be available" ) return lambda func: func def require_world_size(world_size): if int(os.environ["WORLD_SIZE"]) < world_size: return skip_but_pass_in_sandcastle( f"Test requires world size of {world_size:d}" ) return lambda func: func @contextmanager def _lock(): TEMP_DIR = os.environ["TEMP_DIR"] lockfile = os.path.join(TEMP_DIR, "lockfile") with open(lockfile, "w") as lf: try: if sys.platform == "win32": msvcrt.locking(lf.fileno(), msvcrt.LK_RLCK, 1) yield else: fcntl.flock(lf.fileno(), fcntl.LOCK_EX) yield finally: if sys.platform == "win32": msvcrt.locking(lf.fileno(), msvcrt.LK_UNLCK, 1) else: fcntl.flock(lf.fileno(), fcntl.LOCK_UN) lf.close() @contextmanager def _rank_temp_file(): if dist.get_rank() == 0: fd, name = tempfile.mkstemp() os.close(fd) else: name = None object_list = [name] dist.broadcast_object_list(object_list) name = object_list[0] try: yield name finally: if dist.get_rank() == 0: os.remove(name) def _build_tensor(size, value=None, dtype=torch.float, device_id=None): if value is None: value = size if device_id is None: return torch.empty(size, size, size, dtype=dtype).fill_(value) else: return torch.empty(size, size, size, dtype=dtype).fill_(value).cuda(device_id) def _build_multidim_tensor(dim, dim_size, value=None, dtype=torch.float): if value is None: value = dim return torch.empty(size=[dim_size for _ in range(dim)], dtype=dtype).fill_(value) def _create_autograd_profiler(): return torch.autograd.profiler.profile(record_shapes=True) def _create_torch_profiler(): return torch.profiler.profile( activities=[ torch.profiler.ProfilerActivity.CPU, ], record_shapes=True, ) class Barrier: barrier_id = 0 @classmethod def init(cls): cls.barrier_id = 0 barrier_dir = os.path.join(os.environ["TEMP_DIR"], "barrier") for f_name in os.listdir(barrier_dir): os.unlink(os.path.join(barrier_dir, f_name)) @classmethod def sync(cls, wait_for=None, timeout=10): if wait_for is None: wait_for = dist.get_world_size() cls.barrier_id += 1 barrier_dir = os.path.join(os.environ["TEMP_DIR"], "barrier") pid = str(os.getpid()) barrier_file = os.path.join(barrier_dir, pid) with _lock(): with open(barrier_file, "w") as f: f.write(str(cls.barrier_id)) start_time = time.time() while True: arrived = 0 with _lock(): for f_name in os.listdir(barrier_dir): with open(os.path.join(barrier_dir, f_name)) as f: data = f.read() if int(data) >= cls.barrier_id: arrived += 1 if arrived == wait_for: break if time.time() - start_time > timeout: raise RuntimeError("barrier timeout") time.sleep(0.1) class TestDistBackend(MultiProcessTestCase): @classmethod def setUpClass(cls): os.environ["MASTER_ADDR"] = str(MASTER_ADDR) # Not setting MASTER_PORT and get a random free port super().setUpClass() def setUp(self): super().setUp() # initialize temp directories initialize_temp_directories() # initialize Barrier Barrier.init() # Skip return code checking for following tests as they are expected to # crash a process due to TORCH_NCCL_ASYNC_ERROR_HANDLING. self.skip_return_code_checks = [self.test_ddp_has_finalized.__wrapped__] def tearDown(self): cleanup_temp_dir() super().tearDown() @property def init_method(self): return f"{FILE_SCHEMA}{self.file_name}" @property def destroy_pg_upon_exit(self) -> bool: # Overriding base test class: do not auto destroy PG upon exit. return False @classmethod def _run(cls, rank, test_name, file_name, pipe, **kwargs): if BACKEND == "nccl" and not torch.cuda.is_available(): sys.exit(TEST_SKIPS["no_cuda"].exit_code) self = cls(test_name) self.rank = rank self.file_name = file_name if torch.cuda.is_available() and torch.cuda.device_count() < int( self.world_size ): sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code) try: pg_timeout_seconds = CUSTOM_PG_TIMEOUT.get(test_name, default_pg_timeout) timeout = timedelta(seconds=pg_timeout_seconds) dist.init_process_group( init_method=self.init_method, backend=BACKEND, world_size=int(self.world_size), rank=self.rank, timeout=timeout, ) except RuntimeError as e: if "recompile" in e.args[0]: sys.exit(TEST_SKIPS["backend_unavailable"].exit_code) raise # Execute barrier prior to running test to ensure that every process # has finished initialization and that the following test # immediately exiting due to a skip doesn't cause flakiness. self._barrier() self.run_test(test_name, pipe) self._barrier() dist.destroy_process_group() sys.exit(0) # Needed since MultiProcessTestCase assumes a world_size of 4, but we # run these tests under other various world_sizes. @property def world_size(self): return os.environ["WORLD_SIZE"] class DistributedTest: class _DistTestBase: def _barrier(self, *args, **kwargs): Barrier.sync(*args, **kwargs) def _init_group_test(self, **kwargs): group = [1, 2] group_id = dist.new_group(group, **kwargs) rank = dist.get_rank() if rank not in group: return ([], None, rank) return (group, group_id, rank) def _init_full_group_test(self, **kwargs): group = list(range(0, dist.get_world_size())) group_id = dist.new_group(**kwargs) rank = dist.get_rank() return (group, group_id, rank) def _init_global_test(self): group = list(range(0, dist.get_world_size())) group_id = dist.group.WORLD rank = dist.get_rank() return (group, group_id, rank) def _verify_buffers_equal(self, m1, m2): # verify buffers across models m1_buf_dict = dict(m1.module.named_buffers()) for name, buf in m2.module.named_buffers(): self.assertEqual(buf, m1_buf_dict[name]) # Verify buffers across ranks. m1_buffers = list(m1.buffers()) m2_buffers = list(m2.buffers()) for (buf1, buf2) in zip(m1_buffers, m2_buffers): gathered_bufs = [ torch.empty_like(buf1) for _ in range(dist.get_world_size()) ] dist.all_gather(gathered_bufs, buf1) gathered_bufs_m2 = [ torch.empty_like(buf2) for _ in range(dist.get_world_size()) ] for b in gathered_bufs: self.assertEqual(b, buf1) dist.all_gather(gathered_bufs_m2, buf2) for b in gathered_bufs_m2: self.assertEqual(b, buf2) def _sanity_check_profiler_nccl_meta(self, nccl_meta_events): """Torch profiler includes nccl metadata in an inserted operator called "record_param_comms" We test for basic fields in this profiler event that correspond to the nccl communication collectives""" per_coll_meta = defaultdict(list) for e in nccl_meta_events: args = e.get("args", {}) collname = args.get("Collective name", "") self.assertNotEqual(collname, "") self.assertNotEqual(args.get("dtype", ""), "") per_coll_meta[collname].append(args) if collname in {"wait"}: continue self.assertEqual(args["Process Group Description"], "default_pg") self.assertNotEqual(args["Process Group Ranks"], "") self.assertGreaterEqual(args.get("In msg nelems", -1), 0) self.assertGreaterEqual(args.get("Out msg nelems", -1), 0) self.assertGreaterEqual(args.get("Group size", -1), 0) self.assertGreaterEqual(args.get("Global rank start", -1), 0) self.assertGreaterEqual(args.get("Global rank stride", -1), 0) # print(per_coll_meta) return per_coll_meta def test_dump_DDP_relevant_env_vars(self): with captured_output() as (out, _): _dump_DDP_relevant_env_vars() lines = out.getvalue().splitlines() def format_line(var): return f"env:{var}={os.environ[var] if var in os.environ else 'N/A'}" # Check relevant env vars vars = [ "MASTER_ADDR", "MASTER_PORT", "WORLD_SIZE", "NCCL_TOPO_DUMP_FILE", # N/A "TORCH_NCCL_ASYNC_ERROR_HANDLING", ] for var in vars: line = format_line(var) self.assertIn(line, lines) # Check irrelevant env vars vars = [ "xxx", "yyy", "zzz", ] for var in vars: line = format_line(var) self.assertNotIn(line, lines) # GET RANK def test_get_rank(self): test_dir = os.path.join(os.environ["TEMP_DIR"], "test_dir") pid = str(os.getpid()) num_processes = dist.get_world_size() with open(os.path.join(test_dir, pid), "w") as f: f.write(str(dist.get_rank())) self._barrier() all_ranks = set() for f_name in os.listdir(test_dir): with open(os.path.join(test_dir, f_name)) as f: all_ranks.add(int(f.read())) self.assertEqual(len(all_ranks), num_processes) self._barrier() if dist.get_rank() == 0: for f_name in os.listdir(test_dir): os.unlink(os.path.join(test_dir, f_name)) self._barrier() def test_get_backend(self): if dist.get_world_size() > 2: group = [1, 2] else: group = [0, 1] group_id = dist.new_group(group) backend_str = BACKEND.lower() self.assertEqual(dist.get_backend(), backend_str) if dist.get_rank() in group: self.assertEqual(dist.get_backend(group_id), backend_str) else: with self.assertRaisesRegex( ValueError, "Invalid process group specified" ): dist.get_backend(group_id) def test_Backend_enum_class(self): # test parsing backend = BACKEND.lower() self.assertEqual(dist.Backend(BACKEND.upper()), backend) self.assertEqual(dist.Backend(BACKEND), backend) with self.assertRaises(ValueError): dist.Backend(None) with self.assertRaises(ValueError): dist.Backend(3) with self.assertRaises(ValueError): dist.Backend(["gloo"]) # Test destroy def test_destroy_group(self): if dist.get_world_size() > 2: group = [1, 2] else: group = [0, 1] group_id = dist.new_group(group) self._barrier() dist.destroy_process_group(group_id) # Test get rank and size of group def test_get_rank_size_group(self): if dist.get_world_size() > 2: group = [1, 2] else: group = [0, 1] group_id = dist.new_group(group) if dist.get_rank() in group: self.assertEqual(dist.get_world_size(group_id), 2) self.assertTrue(dist.get_rank(group_id) in list(range(2))) else: self.assertEqual(dist.get_world_size(group_id), -1) self.assertEqual(dist.get_rank(group_id), -1) # Test destroy full groups def test_destroy_full_group(self): _, group_id, _ = self._init_full_group_test() self._barrier() dist.destroy_process_group(group_id) # Test get rank and size of full group def test_get_rank_size_full_group(self): _, group_id, _ = self._init_full_group_test() self.assertEqual(dist.get_world_size(group_id), dist.get_world_size()) self.assertEqual(dist.get_rank(group_id), dist.get_rank()) def _test_barrier_timeout(self, group_id, timeout): local_rank = dist.get_rank(group_id) # Only execute barrier on rank == 0, causing it to timeout if local_rank == 0: expected_time = time.time() + timeout.total_seconds() # In debug mode, we execute a monitored_barrier before the # collective, so assert on that. if dist.get_debug_level() == dist.DebugLevel.DETAIL: exception_ctx = self.assertRaisesRegex( Exception, "failed to pass monitoredBarrier" ) else: exception_ctx = self.assertRaisesRegex( Exception, " (Timed out|closed|timeout) " ) with exception_ctx: dist.barrier(group_id) self.assertGreaterAlmostEqual(time.time(), expected_time, delta=0.1) else: pass @skip_but_pass_in_sandcastle_if( BACKEND != "gloo", "Only gloo backend supports timeouts" ) @skip_but_pass_in_sandcastle_if( not INIT_METHOD.startswith("file://"), "Requires file:// initialization method. " + "Both tcp:// and env:// rely on the TCP store for which " "reinitialization has proven racy.", ) def test_barrier_timeout_global(self): dist.destroy_process_group() # Explicitly pass world size to the barrier because we've # just destroyed any state in torch.distributed. self._barrier(wait_for=int(os.environ["WORLD_SIZE"])) # Reinitialize global process group timeout = timedelta(seconds=1) dist.init_process_group( init_method=INIT_METHOD, backend=BACKEND, world_size=int(os.environ["WORLD_SIZE"]), rank=self.rank, timeout=timeout, ) self._test_barrier_timeout(dist.group.WORLD, timeout) @skip_if_small_worldsize @skip_but_pass_in_sandcastle_if( BACKEND != "gloo", "Only gloo backend supports timeouts" ) def test_barrier_timeout_group(self): timeout = timedelta(seconds=5) _, group_id, _ = self._init_group_test(timeout=timeout) if group_id is not None: self._test_barrier_timeout(group_id, timeout) @skip_but_pass_in_sandcastle_if( BACKEND != "gloo", "Only gloo backend supports timeouts" ) def test_barrier_timeout_full_group(self): timeout = timedelta(seconds=1) _, group_id, _ = self._init_full_group_test(timeout=timeout) if group_id is not None: self._test_barrier_timeout(group_id, timeout) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["subgroup"], f"The {BACKEND} backend does not support creating subgroups on CUDA devices", ) @require_world_size(4) @skip_if_lt_x_gpu(2) def test_new_subgroups(self): subgroup_size = 2 cur_subgroup, subgroups = dist.new_subgroups(subgroup_size) world_size = dist.get_world_size() self.assertEqual(cur_subgroup.size(), subgroup_size) self.assertEqual(len(subgroups), world_size / subgroup_size) self.assertFalse(dist._rank_not_in_group(cur_subgroup)) for subgroup in subgroups: dist.destroy_process_group(subgroup) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["subgroup"], f"The {BACKEND} backend does not support creating subgroups on CUDA devices", ) @skip_if_no_gpu def test_new_subgroups_group_size_exceeds_world_size(self): with self.assertRaisesRegex(ValueError, "must not exceed"): dist.new_subgroups(100) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["subgroup"], f"The {BACKEND} backend does not support creating subgroups on CUDA devices", ) @require_world_size(4) @skip_if_lt_x_gpu(4) def test_new_subgroups_world_size_not_divisible_by_group_size(self): with self.assertRaisesRegex( ValueError, "The world size must be divisible by 'group_size'" ): dist.new_subgroups(3) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["subgroup"], f"The {BACKEND} backend does not support creating subgroups on CUDA devices", ) @require_world_size(4) @skip_if_lt_x_gpu(4) def test_new_subgroups_by_enumeration(self): _group, _group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) device_id = rank_to_GPU[rank][0] cur_subgroup, subgroups = dist.new_subgroups_by_enumeration( ranks_per_subgroup_list=[[0, 2], [1, 3]] ) if device_id >= 4: self.assertIsNone(cur_subgroup) else: self.assertEqual(cur_subgroup.size(), 2) self.assertEqual(len(subgroups), 2) if device_id == 0 or device_id == 2: self.assertEqual(cur_subgroup, subgroups[0]) else: self.assertEqual(cur_subgroup, subgroups[1]) for subgroup in subgroups: dist.destroy_process_group(subgroup) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["subgroup"], f"The {BACKEND} backend does not support creating subgroups on CUDA devices", ) @require_world_size(4) @skip_if_lt_x_gpu(4) def test_new_subgroups_by_enumeration_input_rank_exceeds_world_size(self): _group, group_id, _rank = self._init_global_test() init_multigpu_helper(dist.get_world_size(), BACKEND) world_size = get_world_size(group_id) with self.assertRaisesRegex( ValueError, "The new group's rank should be within the world_size set by init_process_group", ): dist.new_subgroups_by_enumeration( ranks_per_subgroup_list=[[0, 1], [world_size, 2]] ) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["subgroup"], f"The {BACKEND} backend does not support creating subgroups on CUDA devices", ) @skip_if_no_gpu def test_new_subgroups_by_enumeration_negative_input_rank(self): self._init_global_test() with self.assertRaisesRegex( ValueError, "The new group's rank should be within the world_size set by init_process_group", ): dist.new_subgroups_by_enumeration( ranks_per_subgroup_list=[[-1, -2], [-3, -4]] ) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["subgroup"], f"The {BACKEND} backend does not support creating subgroups on CUDA devices", ) @require_world_size(4) @skip_if_lt_x_gpu(4) def test_new_subgroups_overlap_not_allowed(self): with self.assertRaisesRegex( ValueError, "Rank 1 has appeared in both subgroup" ): dist.new_subgroups_by_enumeration( ranks_per_subgroup_list=[[0], [1, 2], [1, 3]] ) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["subgroup"], f"The {BACKEND} backend does not support creating subgroups on CUDA devices", ) @skip_if_lt_x_gpu(2) def test_average_parameters(self): rank = dist.get_rank() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) device_id = rank_to_GPU[rank][0] model = nn.Sequential( nn.Conv2d(3, 3, kernel_size=3, padding=1), nn.ReLU(), nn.Linear(1, 5, bias=False), ).cuda(device_id) # Test global model averaging for p in model.parameters(): p.data = torch.ones_like(p.data) model_averaging_utils.average_parameters( params=model.parameters(), process_group=None ) # Every element will be the same as the input. for p in model.parameters(): self.assertEqual(p.data, torch.ones_like(p.data)) # Test partial model averaging for p in model.parameters(): p.data = torch.ones_like(p.data) * rank group_nccl = dist.new_group(ranks=[0, 1], backend="nccl") model_averaging_utils.average_parameters( params=model.parameters(), process_group=group_nccl ) if not dist._rank_not_in_group(group_nccl): # Every element on device 0 or 1 should be the average of 0 and 1, i.e., 0.5. for p in model.parameters(): self.assertEqual(p.data, torch.ones_like(p.data) * 0.5) else: # Every element on device not in the subgroup should remain the same. for p in model.parameters(): self.assertEqual(p.data, torch.ones_like(p.data) * rank) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["subgroup"], f"The {BACKEND} backend does not support creating subgroups on CUDA devices", ) @skip_if_lt_x_gpu(2) def test_periodic_model_averager(self): rank = dist.get_rank() world_size = dist.get_world_size() rank_to_GPU = init_multigpu_helper(world_size, BACKEND) device_id = rank_to_GPU[rank][0] model = nn.Linear(1, 5, bias=False).cuda(device_id) param = next(model.parameters()) tensor = torch.ones_like(param.data) * rank expected_avg_tensor = ( torch.ones_like(param.data) * sum(range(world_size)) / world_size ) period = 4 for warmup_steps in [12, 13, 14, 15]: averager = averagers.PeriodicModelAverager( period=period, warmup_steps=warmup_steps ) for step in range(0, 20): # Reset the parameters at every step. param.data = copy.deepcopy(tensor) for params in model.parameters(): # mock grad params.grad = torch.ones_like(param.data) averager.average_parameters(model.parameters()) if step >= warmup_steps and (step - warmup_steps) % period == 0: self.assertEqual(param.data, expected_avg_tensor) else: # No model averaging, so the parameters are not updated. self.assertEqual(param.data, tensor) @skip_if_lt_x_gpu(2) def test_periodic_model_averager_param_group(self): rank = dist.get_rank() world_size = dist.get_world_size() rank_to_GPU = init_multigpu_helper(world_size, BACKEND) device_id = rank_to_GPU[rank][0] model = nn.Linear(1, 5, bias=False).cuda(device_id) param = next(model.parameters()) opt = torch.optim.SGD(model.parameters(), lr=0.1) period = 4 for warmup_steps in [12, 13, 14, 15]: averager = averagers.PeriodicModelAverager( period=period, warmup_steps=warmup_steps ) for step in range(0, 20): # Reset the parameters at every step. for param_group in opt.param_groups: for params in param_group["params"]: # mock grad params.grad = torch.ones_like(param.data) * rank params.data = torch.ones_like(param.data) * rank averager.average_parameters(opt.param_groups) if step >= warmup_steps and (step - warmup_steps) % period == 0: for param_group in opt.param_groups: for params in param_group["params"]: if params.grad is None: continue self.assertEqual( param.data, torch.ones_like(param.data) * sum(range(world_size)) / world_size, ) else: # No model averaging, so the parameters are not updated. for param_group in opt.param_groups: for params in param_group["params"]: if params.grad is None: continue self.assertEqual( param.data, torch.ones_like(param.data) * rank ) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["subgroup"], f"The {BACKEND} backend does not support creating subgroups on CUDA devices", ) @skip_if_lt_x_gpu(2) def test_1_level_hierarchical_model_averager_equivalent_to_periodic_model_averager( self, ): rank = dist.get_rank() world_size = dist.get_world_size() rank_to_GPU = init_multigpu_helper(world_size, BACKEND) device_id = rank_to_GPU[rank][0] model = nn.Linear(1, 5, bias=False).cuda(device_id) param = next(model.parameters()) tensor = torch.ones_like(param.data) * rank expected_avg_tensor = ( torch.ones_like(param.data) * sum(range(world_size)) / world_size ) period = 4 for warmup_steps in [12, 13, 14, 15]: averager = hierarchicalSGD.HierarchicalModelAverager( # Run the global averaging at a period of 4, # which is equivalent to the above periodic model averaging test case. period_group_size_dict=OrderedDict([(period, world_size)]), warmup_steps=warmup_steps, ) averager = averagers.PeriodicModelAverager( period=period, warmup_steps=warmup_steps ) for step in range(0, 20): # Reset the parameters at every step. param.data = copy.deepcopy(tensor) for params in model.parameters(): # mock grad params.grad = torch.ones_like(param.data) averager.average_parameters(model.parameters()) if step >= warmup_steps and (step - warmup_steps) % period == 0: self.assertEqual(param.data, expected_avg_tensor) else: # No model averaging, so the parameters are not updated. self.assertEqual(param.data, tensor) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["subgroup"], f"The {BACKEND} backend does not support creating subgroups on CUDA devices", ) @require_world_size(4) @skip_if_lt_x_gpu(4) def test_3_level_hierarchical_model_averager(self): rank = dist.get_rank() world_size = dist.get_world_size() rank_to_GPU = init_multigpu_helper(world_size, BACKEND) device_id = rank_to_GPU[rank][0] model = nn.Linear(1, 5, bias=False).cuda(device_id) param = next(model.parameters()) tensor = torch.ones_like(param.data) * rank # Set up such a hierarchical model averaging as follows: # after the first 10 warmup steps, # run model averaging every 2 steps within each subgroup of size 2, # run model averaging every 4 steps within each subgroup of size 3, # and run the global model averaging every 8 steps. # If there is a conflict in model averaging at a step, only run the highest-level model averaging. warmup_steps = 10 subgroup_size1 = 2 subgroup_avg_period1 = 2 subgroup_size2 = 4 subgroup_avg_period2 = 4 global_avg_period = 8 period_group_size_dict = OrderedDict( [ (subgroup_avg_period1, subgroup_size1), (subgroup_avg_period2, subgroup_size2), (global_avg_period, world_size), ] ) averager = hierarchicalSGD.HierarchicalModelAverager( period_group_size_dict=period_group_size_dict, warmup_steps=warmup_steps ) self.assertEqual(dist.get_pg_count(), len(period_group_size_dict)) subgroup1 = averager.period_process_group_dict[subgroup_avg_period1] subgroup2 = averager.period_process_group_dict[subgroup_avg_period2] real_group_ranks_res1 = _get_pg_config(subgroup1)['ranks'] real_group_ranks_res2 = _get_pg_config(subgroup2)['ranks'] expect_group_ranks_res1 = ( rank // subgroup_size1 * subgroup_size1 + np.array(list(range(subgroup_size1))) ).tolist() expect_group_ranks_res2 = ( rank // subgroup_size2 * subgroup_size2 + np.array(list(range(subgroup_size2))) ).tolist() self.assertEqual(real_group_ranks_res1, expect_group_ranks_res1) self.assertEqual(real_group_ranks_res2, expect_group_ranks_res2) expected_avg_tensor_within_subgroup1 = ( torch.ones_like(param.data) * sum(real_group_ranks_res1) / subgroup_size1 ) expected_avg_tensor_within_subgroup2 = ( torch.ones_like(param.data) * sum(real_group_ranks_res2) / subgroup_size2 ) expected_global_avg_tensor = ( torch.ones_like(param.data) * sum(range(world_size)) / world_size ) for step in range(0, 25): # Reset the parameters at every step. param.data = copy.deepcopy(tensor) for params in model.parameters(): # mock grad params.grad = torch.ones_like(param.data) averager.average_parameters(model.parameters()) if step == 16 or step == 24: # Run global model averaging when `step` can be divided by 8. self.assertEqual(param.data, expected_global_avg_tensor) elif step == 12 or step == 20: # Run model averaging within subgroup when `step` can be divided by 4 but not by 8. self.assertEqual(param.data, expected_avg_tensor_within_subgroup2) elif step == 10 or step == 14 or step == 18 or step == 22: # Run model averaging within subgroup when `step` can be divided by 2 but not by 4 or 8. self.assertEqual(param.data, expected_avg_tensor_within_subgroup1) else: # No model averaging, so the parameters are not updated. self.assertEqual(param.data, tensor) # Coalescing manager (sync mode) @skip_if_no_gpu @skip_but_pass_in_sandcastle_if( BACKEND != "nccl" or IS_FBCODE or IS_SANDCASTLE, "Coalescing manager currently tests with NCCL only; internal test flaky" ) def test_coalescing_manager(self): self._barrier() rank = dist.get_rank() world_size = dist.get_world_size() rank_to_GPU = init_multigpu_helper(world_size, BACKEND) device_id = rank_to_GPU[rank][0] torch.cuda.set_device(device_id) num_colls = 2 size_per_coll = 8 small_tensors = [ torch.ones(size_per_coll, device=device_id) for _ in range(num_colls) ] with dist._coalescing_manager(): for i in range(num_colls): dist.all_reduce(small_tensors[i]) big_tensor = torch.ones(num_colls * size_per_coll, device=device_id) dist.all_reduce(big_tensor) for i in range(num_colls): self.assertEqual( small_tensors[i], big_tensor[i * size_per_coll : (i + 1) * size_per_coll] ) self._barrier() # Coalescing manager (async mode) @skip_if_no_gpu @skip_but_pass_in_sandcastle_if( BACKEND != "nccl" or IS_FBCODE or IS_SANDCASTLE, "Coalescing manager currently tests with NCCL only; internal test flaky" ) def test_coalescing_manager_async(self): self._barrier() rank = dist.get_rank() world_size = dist.get_world_size() rank_to_GPU = init_multigpu_helper(world_size, BACKEND) device_id = rank_to_GPU[rank][0] torch.cuda.set_device(device_id) num_colls = 2 size_per_coll = 8 small_tensors = [ torch.ones(size_per_coll, device=device_id) for _ in range(num_colls) ] with dist._coalescing_manager(async_ops=True) as cm: for i in range(num_colls): dist.all_reduce(small_tensors[i]) cm.wait() big_tensor = torch.ones(num_colls * size_per_coll, device=device_id) dist.all_reduce(big_tensor) for i in range(num_colls): self.assertEqual( small_tensors[i], big_tensor[i * size_per_coll : (i + 1) * size_per_coll] ) self._barrier() # NCCL Batch SEND RECV @skip_if_no_gpu @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") def test_batch_isend_irecv_nccl(self): self._barrier() rank = dist.get_rank() world_size = dist.get_world_size() rank_to_GPU = init_multigpu_helper(world_size, BACKEND) device_id = rank_to_GPU[rank][0] torch.cuda.set_device(device_id) p2p_op_list = [] recv_tensors = [None for _ in range(world_size)] expected_tensors = [None for _ in range(world_size)] for val in ["1", "0"]: os.environ["TORCH_NCCL_BLOCKING_WAIT"] = val for src in range(0, world_size): send_tensor = _build_tensor(rank + 1, device_id=device_id).fill_( src ) recv_tensors[src] = _build_tensor( src + 1, value=-1, device_id=device_id ).fill_(-1) expected_tensors[src] = _build_tensor( src + 1, value=-1, device_id=device_id ).fill_(rank) recv_op = dist.P2POp(dist.irecv, recv_tensors[src], src) p2p_op_list.append(recv_op) send_op = dist.P2POp(dist.isend, send_tensor, src) p2p_op_list.append(send_op) reqs = dist.batch_isend_irecv(p2p_op_list) for req in reqs: req.wait() for src in range(0, world_size): self.assertEqual(recv_tensors[src], expected_tensors[src]) self._barrier() @skip_if_no_gpu @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") def test_batch_isend_irecv_ring_exchange_nccl(self): self._barrier() rank = dist.get_rank() world_size = dist.get_world_size() rank_to_GPU = init_multigpu_helper(world_size, BACKEND) device_id = rank_to_GPU[rank][0] torch.cuda.set_device(device_id) send_tensor = _build_tensor(world_size, device_id=device_id) recv_tensor = _build_tensor(world_size, value=-1, device_id=device_id) send_op = dist.P2POp(dist.isend, send_tensor, (rank + 1) % world_size) recv_op = dist.P2POp( dist.irecv, recv_tensor, (rank - 1 + world_size) % world_size ) reqs = dist.batch_isend_irecv([send_op, recv_op]) for req in reqs: req.wait() self._barrier() @skip_if_no_gpu @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") def test_batch_isend_irecv_self_nccl(self): self._barrier() # Ensure the process group has been fully initialized (needed by # the first sub-group batch_isend_irecv call) dist.barrier() rank = dist.get_rank() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) device_id = rank_to_GPU[rank][0] p2p_op_list = [] if rank == 0: send_tensor = _build_tensor(rank + 1, device_id=device_id) recv_tensor = _build_tensor(rank + 1, value=-1, device_id=device_id) recv_op = dist.P2POp(dist.irecv, recv_tensor, 0) p2p_op_list.append(recv_op) send_op = dist.P2POp(dist.isend, send_tensor, 0) p2p_op_list.append(send_op) reqs = dist.batch_isend_irecv(p2p_op_list) for req in reqs: req.wait() self._barrier() @skip_if_no_gpu @skip_if_small_worldsize @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") def test_batch_isend_irecv_no_rank_zero_nccl(self): self._barrier() # Ensure the process group has been fully initialized (needed by # the first sub-group batch_isend_irecv call) dist.barrier() rank = dist.get_rank() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) device_id = rank_to_GPU[rank][0] torch.cuda.set_device(device_id) p2p_op_list = [] if rank == 1: peer = 2 elif rank == 2: peer = 1 if rank in [1, 2]: send_tensor = _build_tensor(rank + 1, device_id=device_id) recv_tensor = _build_tensor(peer + 1, value=-1, device_id=device_id) recv_op = dist.P2POp(dist.irecv, recv_tensor, peer) p2p_op_list.append(recv_op) send_op = dist.P2POp(dist.isend, send_tensor, peer) p2p_op_list.append(send_op) reqs = dist.batch_isend_irecv(p2p_op_list) for req in reqs: req.wait() self._barrier() # GLOO Batch SEND RECV CPU @skip_but_pass_in_sandcastle_if(BACKEND != "gloo", "GLOO Batch Send Recv CPU") def test_batch_isend_irecv_gloo(self): self._barrier() rank = dist.get_rank() p2p_op_list = [] for src in range(0, dist.get_world_size()): if src == rank: continue send_tensor = _build_tensor(rank + 1) recv_tensor = _build_tensor(src + 1, value=-1) recv_op = dist.P2POp(dist.irecv, recv_tensor, src) p2p_op_list.append(recv_op) send_op = dist.P2POp(dist.isend, send_tensor, src) p2p_op_list.append(send_op) reqs = dist.batch_isend_irecv(p2p_op_list) for req in reqs: req.wait() self._barrier() # GLOO Batch SEND RECV CPU with provided tags @skip_but_pass_in_sandcastle_if(BACKEND != "gloo", "GLOO Batch Send Recv CPU") def test_batch_isend_irecv_gloo_tags(self): self._barrier() rank = dist.get_rank() p2p_op_list = [] for src in range(0, dist.get_world_size()): if src == rank: continue send_tensor = _build_tensor(rank + 1) recv_tensor = _build_tensor(src + 1, value=-1) recv_op = dist.P2POp(dist.irecv, recv_tensor, src, tag=src) p2p_op_list.append(recv_op) send_op = dist.P2POp(dist.isend, send_tensor, src, tag=rank) p2p_op_list.append(send_op) reqs = dist.batch_isend_irecv(p2p_op_list) for req in reqs: req.wait() self._barrier() # NCCL Batch SEND RECV Op Error @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") def test_batch_isend_irecv_op_err(self): self._barrier() rank = dist.get_rank() if rank == 0: rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) device_id = rank_to_GPU[rank][0] with self.assertRaisesRegex(ValueError, "^Invalid ``op``"): send_tensor = _build_tensor(rank + 1, device_id=device_id) send_op = dist.P2POp(dist.broadcast, send_tensor, 1) dist.batch_isend_irecv([send_op]) # NCCL Batch SEND RECV p2p_op_list Error @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") def test_batch_isend_irecv_op_list_err(self): self._barrier() rank = dist.get_rank() if rank == 0: with self.assertRaisesRegex(ValueError, "^Invalid ``p2p_op_list``"): dist.batch_isend_irecv([1, 2]) # NCCL Batch SEND RECV Mixed Backend Error @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Batch Send Recv Only") @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") def test_batch_isend_irecv_mixed_backend_err(self): self._barrier() rank = dist.get_rank() init_multigpu_helper(dist.get_world_size(), BACKEND) group_gloo = dist.new_group(ranks=[0, 1], backend="gloo") group_nccl = dist.new_group(ranks=[0, 1], backend="nccl") if rank == 0: with self.assertRaisesRegex( ValueError, "All ops need to use the same group" ): send_tensor = _build_tensor(rank + 1) send_op_gloo = dist.P2POp(dist.isend, send_tensor, 1, group_gloo) send_op_nccl = dist.P2POp(dist.isend, send_tensor, 1, group_nccl) dist.batch_isend_irecv([send_op_gloo, send_op_nccl]) # NCCL SEND RECV @skip_if_no_gpu @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only") @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") def _test_send_recv_nccl(self, profiler_ctx=None): # TODO: now that nccl send/recv is supported, there does not seem to # be a need to have nccl send/recv be tested separately. rank = dist.get_rank() world_size = dist.get_world_size() rank_to_GPU = init_multigpu_helper(world_size, BACKEND) device_id = rank_to_GPU[rank][0] torch.cuda.set_device(device_id) tensor = _build_tensor(rank + 1, device_id=device_id) profiler_cls = profiler_ctx if profiler_ctx is not None else nullcontext() with profiler_cls as prof: for src in range(0, world_size): if src == rank: # Send mode for dst in range(0, world_size): if dst == rank: continue dist.send(tensor, dst) else: # Recv mode expected_tensor = _build_tensor(src + 1) output_tensor = _build_tensor( src + 1, value=-1, device_id=device_id ) dist.recv(output_tensor, src) self.assertEqual(output_tensor, expected_tensor) self._barrier() if profiler_ctx is not None: backend = dist.get_backend() if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: for event_name in [f"{backend}:send", f"{backend}:recv"]: events = get_profiling_event(event_name, prof, dedup_gpu_user_annotation=True) self.assertTrue(events) # Event order is not deterministic, so simply assert their shape # is found in the following list. expected_shapes = [ [[rank + 1] * 3] for rank in range(dist.get_world_size()) ] for event in events: self.assertTrue(event.input_shapes in expected_shapes) @skip_if_no_gpu @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only") @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") def test_send_recv_nccl(self): self._test_send_recv_nccl() @skip_if_no_gpu @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only") @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") def test_send_recv_nccl_autograd_profiler(self): profiler_ctx = torch.autograd.profiler.profile(record_shapes=True) self._test_send_recv_nccl(profiler_ctx) @skip_if_no_gpu @skip_but_pass_in_sandcastle_if(BACKEND != "nccl", "NCCL Send Recv Only") @requires_nccl_version((2, 7, 0), "Need NCCL 2.7+ for send/recv") @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode causes hang") @skip_but_pass_in_sandcastle_if( IS_MACOS or IS_WINDOWS, "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", ) def test_send_recv_nccl_torch_profiler(self): profiler_ctx = torch.profiler.profile( activities=[ torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA, ], record_shapes=True, ) self._test_send_recv_nccl(profiler_ctx) # SEND RECV def _test_send_recv(self, profiler_ctx): rank = dist.get_rank() send_size = rank + 1 tensor = _build_tensor(send_size) ctx = profiler_ctx if profiler_ctx is not None else nullcontext() with ctx as prof: for src in range(0, dist.get_world_size()): if src == rank: # Send mode for dst in range(0, dist.get_world_size()): if dst == rank: continue dist.send(tensor, dst) else: # Recv mode recv_size = src + 1 expected_tensor = _build_tensor(recv_size) output_tensor = _build_tensor(recv_size, value=-1) dist.recv(output_tensor, src) self.assertEqual(output_tensor, expected_tensor) if profiler_ctx is not None: backend = dist.get_backend() if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: for event_name in [f"{backend}:send", f"{backend}:recv"]: events = get_profiling_event(event_name, prof) # Each rank sends/recvs from all other ranks. event_count = sum(e.count for e in events) expected_event_count = dist.get_world_size() - 1 self.assertEqual(event_count, expected_event_count) # Event order is not deterministic, so simply assert their shape # is found in the following list. expected_shapes = [ [[rank + 1] * 3] for rank in range(dist.get_world_size()) ] for event in events: self.assertTrue(event.is_async) self.assertTrue(event.input_shapes in expected_shapes) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl send/recv tested by test_send_recv_nccl" ) def test_send_recv(self): self._test_send_recv(profiler_ctx=None) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" ) def test_send_recv_autograd_profiler(self): autograd_profiler_ctx = _create_autograd_profiler() self._test_send_recv(profiler_ctx=autograd_profiler_ctx) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" ) @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode causes hang") @skip_but_pass_in_sandcastle_if( IS_MACOS or IS_WINDOWS, "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", ) def test_send_recv_torch_profiler(self): torch_profiler_ctx = _create_torch_profiler() return self._test_send_recv(profiler_ctx=torch_profiler_ctx) # SEND RECV ANY SOURCE def _test_send_recv_any_source(self, profiler_ctx): rank = dist.get_rank() send_recv_size = 10 tensor = _build_tensor(send_recv_size, value=rank) recv_ranks = [] irecv_ranks = [] ctx = profiler_ctx if profiler_ctx is not None else nullcontext() with ctx as prof: for dst in range(0, dist.get_world_size()): if dst == rank: # Recv mode for dst in range(0, dist.get_world_size()): if dst == rank: continue for recv in ["recv", "irecv"]: output_tensor = _build_tensor(send_recv_size, value=-1) if recv == "recv": sender = dist.recv(output_tensor) recv_ranks.append(sender) elif recv == "irecv": work = dist.irecv(output_tensor) work.wait() sender = work._source_rank() irecv_ranks.append(sender) # Assert the scalar value "sender" that should be # equal to the rank of the sender is equal to all # values in the received tensor. self.assertTrue(output_tensor.eq(sender).all()) else: # Send mode dist.send(tensor, dst) # recv dist.send(tensor, dst) # irecv if profiler_ctx is not None: backend = dist.get_backend() if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: for event_name in [f"{backend}:send", f"{backend}:recvAnySource"]: events = get_profiling_event(event_name, prof) # Each rank sends/recvs from other rank twice. self.assertEqual( sum(event.count for event in events), 2 * (dist.get_world_size() - 1), ) for event in events: self.assertTrue(event.is_async) self.assertEqual(event.input_shapes, [[send_recv_size] * 3]) # Each rank would have 2 * (world_size - 1) sends, verify that # globally we receive the same amount on the other end. recv_ranks_tensor = torch.cat( (torch.tensor(recv_ranks), torch.tensor(irecv_ranks)), 0 ) global_recv_ranks = [ torch.empty_like(recv_ranks_tensor) for _ in range(dist.get_world_size()) ] dist.all_gather(global_recv_ranks, recv_ranks_tensor) global_recv_ranks_list = [] for tensor in global_recv_ranks: global_recv_ranks_list += tensor.tolist() from itertools import groupby global_recv_ranks_list.sort() frequency = [ len(list(group)) for key, group in groupby(global_recv_ranks_list) ] self.assertEqual(dist.get_world_size(), len(frequency)) self.assertEqual( [2 * (dist.get_world_size() - 1)] * dist.get_world_size(), frequency ) self._barrier() @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["sendrecv anysource"], f"{BACKEND} does not support send/recv from any source", ) def test_send_recv_any_source(self): self._test_send_recv_any_source(profiler_ctx=None) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["sendrecv anysource"], f"{BACKEND} does not support send/recv from any source", ) def test_send_recv_any_source_autograd_profiler(self): autograd_profiler_ctx = _create_autograd_profiler() self._test_send_recv_any_source(profiler_ctx=autograd_profiler_ctx) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["sendrecv anysource"], f"{BACKEND} does not support send/recv from any source", ) @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") @skip_but_pass_in_sandcastle_if( IS_MACOS or IS_WINDOWS, "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", ) def test_send_recv_any_source_torch_profiler(self): torch_profiler_ctx = _create_torch_profiler() return self._test_send_recv_any_source(profiler_ctx=torch_profiler_ctx) # SEND RECV WITH TAG def _test_send_recv_with_tag(self, profiler_ctx): rank = dist.get_rank() world_size = dist.get_world_size() send_recv_size = 10 tensor = _build_tensor(send_recv_size, value=rank) ctx = profiler_ctx if profiler_ctx is not None else nullcontext() with ctx as prof: for dst in range(0, world_size): if dst == rank: # Recv mode for src in range(0, world_size): if src == rank: continue output_tensor = _build_tensor(send_recv_size, value=-1) dist.recv(output_tensor, src, tag=src) self.assertTrue(output_tensor.eq(src).all()) else: # Send mode dist.send(tensor, dst, tag=rank) if profiler_ctx is not None: backend = dist.get_backend() if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: for event_name in [f"{backend}:send", f"{backend}:recv"]: events = get_profiling_event(event_name, prof) # Each rank sends/recvs from all other ranks event_count = sum(e.count for e in events) expected_event_count = dist.get_world_size() - 1 self.assertEqual(event_count, expected_event_count) for event in events: self.assertTrue(event.is_async) self.assertEqual(event.name, event_name) self.assertEqual(event.input_shapes, [[send_recv_size] * 3]) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" ) def test_send_recv_with_tag(self): self._test_send_recv_with_tag(profiler_ctx=None) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" ) def test_send_recv_with_tag_autograd_profiler(self): autograd_profiler_ctx = _create_autograd_profiler() return self._test_send_recv_with_tag(profiler_ctx=autograd_profiler_ctx) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "NCCL send/recv tested by test_send_recv_nccl" ) @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") @skip_but_pass_in_sandcastle_if( IS_MACOS or IS_WINDOWS, "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", ) def test_send_recv_with_tag_torch_profiler(self): torch_profiler_ctx = _create_torch_profiler() return self._test_send_recv_with_tag(profiler_ctx=torch_profiler_ctx) # ISEND def _test_isend(self, profiler_ctx): rank = dist.get_rank() world_size = dist.get_world_size() ctx = profiler_ctx if profiler_ctx is not None else nullcontext() with ctx as prof: if rank == 0: requests = [ dist.isend(_build_tensor(dest, 10), dest) for dest in range(1, world_size) ] for request in requests: request.wait() self.assertTrue(request.is_completed()) else: tensor = _build_tensor(rank, -1) dist.recv(tensor, 0) self.assertEqual(tensor, _build_tensor(rank, 10)) self._barrier() if profiler_ctx is not None: backend = dist.get_backend() if backend in SEND_RECV_PROFILING_SUPPORTED_BACKENDS: expected_event_name = ( f"{backend}:send" if rank == 0 else f"{backend}:recv" ) events = get_profiling_event(expected_event_name, prof) event_count = sum(e.count for e in events) expected_count = dist.get_world_size() - 1 if rank == 0 else 1 self.assertEqual(expected_count, event_count) # Event ordering is not guaranteed, so simply ensure the shapes are # found in the following map. expected_shapes = { r: [[r] * 3] for r in range(1, dist.get_world_size()) } for event in events: self.assertTrue(event.is_async) self.assertEqual(event.name, expected_event_name) if rank == 0: self.assertTrue( event.input_shapes in expected_shapes.values() ) else: self.assertEqual(event.input_shapes, expected_shapes[rank]) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support isend" ) def test_isend(self): self._test_isend(profiler_ctx=None) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support isend" ) def test_isend_autograd_profiler(self): autograd_profiler_ctx = _create_autograd_profiler() self._test_isend(profiler_ctx=autograd_profiler_ctx) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support isend" ) @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") @skip_but_pass_in_sandcastle_if( IS_MACOS or IS_WINDOWS, "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", ) def test_isend_torch_profiler(self): torch_profiler_ctx = _create_torch_profiler() self._test_isend(profiler_ctx=torch_profiler_ctx) # IRECV @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support irecv" ) def test_irecv(self): rank = dist.get_rank() world_size = dist.get_world_size() if rank == 0: expected_tensors = [ _build_tensor(src, -1) for src in range(1, world_size) ] requests = [ dist.irecv(expected_tensors[src - 1], src) for src in range(1, world_size) ] for src in range(1, world_size): requests[src - 1].wait() self.assertTrue(requests[src - 1].is_completed()) self.assertEqual(expected_tensors[src - 1], _build_tensor(src, 10)) else: tensor = _build_tensor(rank, 10) dist.send(tensor, 0) self._barrier() # BROADCAST def _test_broadcast_helper( self, group, group_id, rank, cuda=False, rank_to_GPU=None, with_options=False, ): for dtype, value, requires_cuda in [ (torch.float, -1e-10, False), (torch.double, -1e-100, False), (torch.half, -0.1, True), (torch.int8, -2, False), (torch.uint8, 129, False), (torch.int, -1e5, False), (torch.long, -1e15, False), ]: if requires_cuda and not cuda: continue for src in group: expected_tensor = _build_tensor(src + 1, value, dtype) if cuda: expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0]) if rank == src: if with_options: opts = dist.BroadcastOptions() opts.rootTensor = 0 opts.rootRank = src self.call_dist_op( ":broadcast", True, group_id.broadcast, [expected_tensor], opts, ) else: self.call_dist_op( ":broadcast", False, dist.broadcast, expected_tensor, src, group_id, ) else: tensor = _build_tensor(src + 1, -1, dtype) if cuda: tensor = tensor.cuda(rank_to_GPU[rank][0]) if with_options: opts = dist.BroadcastOptions() opts.rootTensor = 0 opts.rootRank = src self.call_dist_op( ":broadcast", True, group_id.broadcast, [tensor], opts ) else: self.call_dist_op( ":broadcast", False, dist.broadcast, tensor, src, group_id, ) self.assertEqual(tensor.size(), expected_tensor.size()) self.assertEqual( tensor.ne(expected_tensor).max(), torch.tensor(False) ) self._barrier() @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_broadcast(self): group, group_id, rank = self._init_global_test() self._test_broadcast_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND != "gloo" and BACKEND != "nccl", "Only Gloo and Nccl backend supports CUDA allReduce", ) @skip_if_no_gpu def test_broadcast_cuda(self): group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) device_id = rank_to_GPU[rank][0] torch.cuda.set_device(device_id) self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU) @skip_if_small_worldsize @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_broadcast_group(self): group, group_id, rank = self._init_group_test() self._test_broadcast_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_broadcast_full_group(self): group, group_id, rank = self._init_full_group_test() self._test_broadcast_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only NCCL backend supports high priority stream", ) @skip_if_no_gpu def test_nccl_high_priority_stream(self): group, _, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) device_id = rank_to_GPU[rank][0] torch.cuda.set_device(device_id) new_port = str(MASTER_PORT + 1) os.environ["MASTER_PORT"] = new_port gen_iterator = dist.rendezvous("env://", rank, dist.get_world_size()) store, rank, size = next(gen_iterator) store = dist.PrefixStore(new_port, store) opts = dist.ProcessGroupNCCL.Options() opts.is_high_priority_stream = False group_id = dist.ProcessGroupNCCL(store, rank, size, opts) self._test_broadcast_helper(group, group_id, rank, True, rank_to_GPU, True) # REDUCE def _test_reduce_helper( self, group, group_id, rank, op, master_value, worker_value, expected_value, cuda=False, rank_to_GPU=None, ): for src in group: tensor = _build_tensor(src + 1).fill_( master_value if rank == src else worker_value ) if cuda: tensor = tensor.cuda(rank_to_GPU[rank][0]) self.call_dist_op( ":reduce", False, dist.reduce, tensor, src, op, group_id, tensor_shapes=[tensor.shape], ) if rank == src: self.assertEqual(tensor, _build_tensor(src + 1, expected_value)) self._barrier() @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce", ) def test_reduce_sum(self): group, group_id, rank = self._init_global_test() self._test_reduce_helper( group, group_id, rank, dist.ReduceOp.SUM, 2, 10, 2 + (10 * (len(group) - 1)), ) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports CUDA reduce" ) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce", ) @skip_if_no_gpu def test_reduce_sum_cuda(self): group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) device_id = rank_to_GPU[rank][0] torch.cuda.set_device(device_id) self._test_reduce_helper( group, group_id, rank, dist.ReduceOp.SUM, 2, 10, 2 + 10 * (len(group) - 1), True, rank_to_GPU, ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce", ) def test_reduce_product(self): group, group_id, rank = self._init_global_test() self._test_reduce_helper( group, group_id, rank, dist.ReduceOp.PRODUCT, 2, 10, reduce(operator.mul, [10] * (len(group) - 1), 2), ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce", ) def test_reduce_min(self): group, group_id, rank = self._init_global_test() self._test_reduce_helper( group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce", ) def test_reduce_max(self): group, group_id, rank = self._init_global_test() self._test_reduce_helper( group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce", ) @skip_if_small_worldsize def test_reduce_group_sum(self): group, group_id, rank = self._init_group_test() self._test_reduce_helper( group, group_id, rank, dist.ReduceOp.SUM, 2, 10, 2 + (10 * (len(group) - 1)), ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce", ) @skip_if_small_worldsize def test_reduce_group_product(self): group, group_id, rank = self._init_group_test() self._test_reduce_helper( group, group_id, rank, dist.ReduceOp.PRODUCT, 2, 10, reduce(operator.mul, [10] * (len(group) - 1), 2), ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce", ) @skip_if_small_worldsize def test_reduce_group_min(self): group, group_id, rank = self._init_group_test() self._test_reduce_helper( group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce", ) @skip_if_small_worldsize def test_reduce_group_max(self): group, group_id, rank = self._init_group_test() self._test_reduce_helper( group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce", ) def test_reduce_full_group_sum(self): group, group_id, rank = self._init_full_group_test() self._test_reduce_helper( group, group_id, rank, dist.ReduceOp.SUM, 2, 10, 2 + (10 * (len(group) - 1)), ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce", ) def test_reduce_full_group_product(self): group, group_id, rank = self._init_full_group_test() self._test_reduce_helper( group, group_id, rank, dist.ReduceOp.PRODUCT, 2, 10, reduce(operator.mul, [10] * (len(group) - 1), 2), ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce", ) def test_reduce_full_group_min(self): group, group_id, rank = self._init_full_group_test() self._test_reduce_helper( group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce", ) def test_reduce_full_group_max(self): group, group_id, rank = self._init_full_group_test() self._test_reduce_helper( group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 ) # REDUCE TWICE def _test_reduce_twice_helper( self, group, group_id, rank, op, master_value, worker_value, expected_value, cuda=False, rank_to_GPU=None, ): for src in group: tensors = [ _build_tensor(src + 1).fill_( master_value if rank == src else worker_value ) for i in range(2) ] if cuda: for i in range(2): tensors[i] = tensors[i].cuda(rank_to_GPU[rank][0]) self.call_dist_op( ":reduce", False, dist.reduce, tensors[0], src, op, group_id, secondary_op_call=lambda: dist.reduce( tensors[1], src, op, group_id ), tensor_shapes=[tensors[0].shape], ) if rank == src: for tensor in tensors: self.assertEqual(tensor, _build_tensor(src + 1, expected_value)) self._barrier() @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce", ) def test_reduce_sum_twice(self): group, group_id, rank = self._init_global_test() self._test_reduce_twice_helper( group, group_id, rank, dist.ReduceOp.SUM, 2, 10, 2 + (10 * (len(group) - 1)), ) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports CUDA reduce" ) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce", ) @skip_if_no_gpu def test_reduce_sum_cuda_twice(self): group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) device_id = rank_to_GPU[rank][0] torch.cuda.set_device(device_id) self._test_reduce_twice_helper( group, group_id, rank, dist.ReduceOp.SUM, 2, 10, 2 + 10 * (len(group) - 1), True, rank_to_GPU, ) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports reduce_scatter_v" ) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["reduce"], f"{BACKEND} does not support reduce", ) @skip_if_no_gpu def test_reduce_scatter_v_cuda(self): self._barrier() group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) device_id = rank_to_GPU[rank][0] input_split_sizes = [src + 1 for src in group] start_len = sum(input_split_sizes[:rank]) end_len = start_len + input_split_sizes[rank] sum_len = sum(input_split_sizes) master_value = 2 worker_value = 10 for async_val in [True, False]: tensor = _build_tensor(sum_len, worker_value, device_id=device_id) tensor[start_len:end_len].fill_(master_value) out_tensor = ( torch.empty( input_split_sizes[rank], sum_len, sum_len, dtype=torch.float ) .fill_(-1) .cuda(device_id) ) req = dist.reduce_scatter( out_tensor, list(torch.split(tensor, input_split_sizes)), dist.ReduceOp.SUM, group_id, async_val, ) if async_val: req.wait() expected_value = 2 + (10 * (len(group) - 1)) expected_tensor = torch.empty( input_split_sizes[rank], sum_len, sum_len, dtype=torch.float ) expected_tensor = expected_tensor.fill_(expected_value).cuda(device_id) self.assertEqual(out_tensor, expected_tensor) self._barrier() # Test reduce_scatter_tensor accepting single tensor as input def _reduce_scatter_tensor_helper( self, tensor_out, tensor_in, group_id, rank, cuda=True, rank_to_GPU=None ): if cuda: tensor_in = tensor_in.cuda(rank_to_GPU[rank][0]) tensor_out = tensor_out.cuda(rank_to_GPU[rank][0]) tensor_shapes = [tensor_out.shape] self.call_dist_op( ":reduce_scatter_tensor", False, dist.reduce_scatter_tensor, tensor_out, tensor_in, dist.ReduceOp.SUM, group_id, False, expect_event=False, tensor_shapes=tensor_shapes, ) return tensor_out @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports CUDA reduce_scatter_tensor" ) @skip_if_no_gpu def test_reduce_scatter_tensor_cuda(self): group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) size = 2 tensor_out = torch.zeros(size, dtype=torch.int64) # Concatenated input tensor_in = torch.arange(len(group) * size) tensor_out = self._reduce_scatter_tensor_helper( tensor_out, tensor_in, group_id, rank, True, rank_to_GPU ) # Check result expected_tensor = torch.arange(rank * size, (rank + 1) * size) * len(group) self.assertEqual(tensor_out, expected_tensor) self._barrier() # Stacked input tensor_in = torch.reshape(tensor_in, (len(group), size)) tensor_out = self._reduce_scatter_tensor_helper( tensor_out, tensor_in, group_id, rank, True, rank_to_GPU ) # Check result # Should be the same as the result in concatenated case self.assertEqual(tensor_out, expected_tensor) self._barrier() def call_dist_op( self, profiling_title_postfix, is_async, op, *args, expect_event=True, secondary_op_call=None, profile_cuda=False, tensor_shapes=None, **kwargs, ): op_calls = [lambda: op(*args, **kwargs)] if secondary_op_call is not None: op_calls.append(secondary_op_call) autograd_profiler_ctx = torch.autograd.profiler.profile( use_cuda=profile_cuda, record_shapes=True ) # TODO: move this test to use torch.profiler once kineto issues are # fixed internally. with autograd_profiler_ctx: works = [op_call() for op_call in op_calls] if is_async: for work in works: work.wait() if expect_event and dist.get_backend() in PROFILING_SUPPORTED_BACKENDS: # We are only interested in the backend's implementation not the dispatcher wrapper. events = get_profiling_event( dist.get_backend() + profiling_title_postfix, autograd_profiler_ctx ) # DETAIL debug mode can use a pg wrapper that issues more collectives # under the hood if dist.get_debug_level() != dist.DebugLevel.DETAIL: self.assertEqual(len(events), len(op_calls)) for e in events: self.assertTrue(e.is_async) self.assertEqual(e.count, 1) self.assertGreaterEqual(e.cpu_time, 0) # Verify tensor shapes if given # DETAIL debug mode can use a pg wrapper that issues more collectives # under the hood if ( tensor_shapes is not None and dist.get_debug_level() != dist.DebugLevel.DETAIL ): self.assertEqual( e.input_shapes, tensor_shapes, f"event shape: {e.input_shapes} vs tensor {tensor_shapes}", ) # ALL REDUCE def _test_all_reduce_helper( self, group, group_id, rank, op, master_value, worker_value, expected_value, cuda=False, rank_to_GPU=None, dtype=torch.float, async_op=False, ): for src in group: curr_value = master_value if rank == src else worker_value tensor = _build_tensor(src + 1, dtype=dtype).fill_(curr_value) if cuda: tensor = tensor.cuda(rank_to_GPU[rank][0]) if tensor.dtype == torch.complex64: tensor_shapes = [torch.view_as_real(tensor).shape] else: tensor_shapes = [tensor.shape] self.call_dist_op( ":all_reduce", async_op, dist.all_reduce, tensor, op, group_id, async_op=async_op, tensor_shapes=tensor_shapes, ) # Currently, only Gloo backend has profiling tested with CUDA enabled. # Only run cuda profiling test for one rank to speed up since # running with different src_rank does not affect the correctness. if ( src == 0 and cuda and dist.get_backend() in CUDA_PROFILING_SUPPORTED_BACKENDS ): self.call_dist_op( ":all_reduce", async_op, dist.all_reduce, tensor, op, group_id, async_op=async_op, profile_cuda=True, tensor_shapes=tensor_shapes, ) self._barrier() @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_all_reduce_sum(self): group, group_id, rank = self._init_global_test() self._test_all_reduce_helper( group, group_id, rank, dist.ReduceOp.SUM, 2, 10, 2 + (10 * (len(group) - 1)), ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_all_reduce_sum_async(self): group, group_id, rank = self._init_global_test() self._test_all_reduce_helper( group, group_id, rank, dist.ReduceOp.SUM, 2, 10, 2 + (10 * (len(group) - 1)), async_op=True, ) @skip_but_pass_in_sandcastle_if( BACKEND != "gloo" and BACKEND != "nccl", "Only Gloo and NCCL backends will have CUDA allReduce tested", ) @skip_if_no_gpu def test_all_reduce_sum_cuda(self): torch.cuda.set_device(self.rank) group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_all_reduce_helper( group, group_id, rank, dist.ReduceOp.SUM, 2, 10, 2 + (10 * (len(group) - 1)), True, rank_to_GPU, ) @skip_but_pass_in_sandcastle_if( BACKEND != "gloo" and BACKEND != "nccl", "Only Gloo and NCCL backends will have CUDA allReduce tested", ) @skip_if_no_gpu def test_all_reduce_sum_cuda_async(self): torch.cuda.set_device(self.rank) group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_all_reduce_helper( group, group_id, rank, dist.ReduceOp.SUM, 2, 10, 2 + (10 * (len(group) - 1)), True, rank_to_GPU, async_op=True, ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_all_reduce_sum_complex(self): group, group_id, rank = self._init_global_test() self._test_all_reduce_helper( group, group_id, rank, dist.ReduceOp.SUM, complex(2, 3), complex(10, 11), complex(2, 3) + (complex(10, 11) * (len(group) - 1)), dtype=torch.cfloat, ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_all_reduce_complex_unsupported_ops(self): unsupported_ops = [ dist.ReduceOp.MAX, dist.ReduceOp.MIN, dist.ReduceOp.PRODUCT, dist.ReduceOp.BAND, dist.ReduceOp.BOR, dist.ReduceOp.BXOR, ] _group, group_id, _rank = self._init_global_test() for unsupported_op in unsupported_ops: with self.assertRaisesRegex( ValueError, "all_reduce does not support" ): dist.all_reduce( _build_tensor(1, dtype=torch.cfloat), unsupported_op, group_id ) @skip_but_pass_in_sandcastle_if( BACKEND != "gloo" and BACKEND != "nccl", "Only Gloo and NCCL backends will have CUDA allReduce tested", ) @skip_if_no_gpu def test_all_reduce_sum_cuda_complex(self): torch.cuda.set_device(self.rank) group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_all_reduce_helper( group, group_id, rank, dist.ReduceOp.SUM, complex(2, 3), complex(10, 11), complex(2, 3) + (complex(10, 11) * (len(group) - 1)), True, rank_to_GPU, dtype=torch.cfloat, ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_all_reduce_product(self): group, group_id, rank = self._init_global_test() self._test_all_reduce_helper( group, group_id, rank, dist.ReduceOp.PRODUCT, 2, 10, reduce(operator.mul, [10] * (len(group) - 1), 2), ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_all_reduce_min(self): group, group_id, rank = self._init_global_test() self._test_all_reduce_helper( group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_all_reduce_max(self): group, group_id, rank = self._init_global_test() self._test_all_reduce_helper( group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 ) @skip_if_small_worldsize @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_all_reduce_group_sum(self): group, group_id, rank = self._init_group_test() self._test_all_reduce_helper( group, group_id, rank, dist.ReduceOp.SUM, 2, 10, 2 + (10 * (len(group) - 1)), ) @skip_if_small_worldsize @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_all_reduce_group_product(self): group, group_id, rank = self._init_group_test() self._test_all_reduce_helper( group, group_id, rank, dist.ReduceOp.PRODUCT, 2, 10, reduce(operator.mul, [10] * (len(group) - 1), 2), ) @skip_if_small_worldsize @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_all_reduce_group_min(self): group, group_id, rank = self._init_group_test() self._test_all_reduce_helper( group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 ) @skip_if_small_worldsize @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_all_reduce_group_max(self): group, group_id, rank = self._init_group_test() self._test_all_reduce_helper( group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_all_reduce_full_group_sum(self): group, group_id, rank = self._init_full_group_test() self._test_all_reduce_helper( group, group_id, rank, dist.ReduceOp.SUM, 2, 10, 2 + (10 * (len(group) - 1)), ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_all_reduce_full_group_product(self): group, group_id, rank = self._init_full_group_test() self._test_all_reduce_helper( group, group_id, rank, dist.ReduceOp.PRODUCT, 2, 10, reduce(operator.mul, [10] * (len(group) - 1), 2), ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_all_reduce_full_group_min(self): group, group_id, rank = self._init_full_group_test() self._test_all_reduce_helper( group, group_id, rank, dist.ReduceOp.MIN, 1010, 1, 1 ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_all_reduce_full_group_max(self): group, group_id, rank = self._init_full_group_test() self._test_all_reduce_helper( group, group_id, rank, dist.ReduceOp.MAX, -1, 10, 10 ) # SPARSE ALL REDUCE def _test_sparse_all_reduce_sum(self, fn): _group, group_id, rank = self._init_global_test() tests = simple_sparse_reduce_tests( rank, dist.get_world_size(), num_inputs=1 ) for inputs, outputs in tests: tensors = [fn(input) for input in inputs] dist.all_reduce(tensors[0], dist.ReduceOp.SUM, group_id) self.assertEqual(tensors[0], outputs[0]) @skip_but_pass_in_sandcastle_if( BACKEND != "gloo", "Only Gloo backend support sparse all reduce" ) def test_sparse_all_reduce_sum(self): self._test_sparse_all_reduce_sum(lambda t: t) @skip_but_pass_in_sandcastle_if( BACKEND != "gloo", "Only Gloo backend support sparse all reduce" ) @skip_if_no_gpu def test_sparse_all_reduce_sum_cuda(self): self._test_sparse_all_reduce_sum(lambda t: t.clone().cuda()) # ALL REDUCE - COALESCED @staticmethod def _all_reduce_coalesced_sum_test_cases(group_size): return ( [2, 3, complex(2, 3)], [10, 11, complex(10, 11)], [ 2 + 10 * (group_size - 1), 3 + 11 * (group_size - 1), complex(2, 3) + complex(10, 11) * (group_size - 1), ], [torch.float, torch.float, torch.cfloat], ) @staticmethod def _all_reduce_coalesced_product_test_cases(group_size): return ( [1, 2], [3, 4], [1 * 3 ** (group_size - 1), 2 * 4 ** (group_size - 1)], [torch.float, torch.float], ) @staticmethod def _all_reduce_coalesced_min_test_cases(group_size): return ( [1, 4], [2, 3], [1, 3], [torch.float, torch.float], ) @staticmethod def _all_reduce_coalesced_max_test_cases(group_size): return ( [1, 4], [2, 3], [2, 4], [torch.float, torch.float], ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_all_reduce_coalesced_max_complex_unsupported(self): _group, group_id, _rank = self._init_global_test() with self.assertRaisesRegex(ValueError, "all_reduce does not support"): dist.all_reduce_coalesced( [_build_tensor(1, dtype=torch.cfloat)], dist.ReduceOp.MAX, group_id ) def _test_all_reduce_coalesced_helper( self, group, group_id, rank, op, cuda=False, rank_to_GPU=None, ): test_case_func = { dist.ReduceOp.SUM: self._all_reduce_coalesced_sum_test_cases, dist.ReduceOp.PRODUCT: self._all_reduce_coalesced_product_test_cases, dist.ReduceOp.MIN: self._all_reduce_coalesced_min_test_cases, dist.ReduceOp.MAX: self._all_reduce_coalesced_max_test_cases, }[op] master_values, worker_values, expected_values, dtypes = test_case_func( len(group) ) for src in group: curr_values = master_values if rank == src else worker_values tensors = [ _build_tensor(src + 1, val, dtype=dtype) for dtype, val in zip(dtypes, curr_values) ] if cuda: tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors] tensor_shapes = [] for tensor in tensors: if tensor.dtype == torch.complex64: tensor_shapes.append(torch.view_as_real(tensor).shape) else: tensor_shapes.append(tensor.shape) self.call_dist_op( ":all_reduce", False, dist.all_reduce_coalesced, tensors, op, group_id, tensor_shapes=tensor_shapes, ) expected_tensors = [ _build_tensor(src + 1, expected_value, dtype=dtype) for dtype, expected_value in zip(dtypes, expected_values) ] self.assertEqual(tensors, expected_tensors) self._barrier() @require_backend_is_available({"gloo"}) def test_all_reduce_coalesced_sum(self): group, group_id, rank = self._init_global_test() self._test_all_reduce_coalesced_helper( group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None, ) @require_backend_is_available({"gloo"}) def test_all_reduce_coalesced_product(self): group, group_id, rank = self._init_global_test() self._test_all_reduce_coalesced_helper( group, group_id, rank, dist.ReduceOp.PRODUCT, cuda=False, rank_to_GPU=None, ) @require_backend_is_available({"gloo"}) def test_all_reduce_coalesced_min(self): group, group_id, rank = self._init_global_test() self._test_all_reduce_coalesced_helper( group, group_id, rank, dist.ReduceOp.MIN, cuda=False, rank_to_GPU=None, ) @require_backend_is_available({"gloo"}) def test_all_reduce_coalesced_max(self): group, group_id, rank = self._init_global_test() self._test_all_reduce_coalesced_helper( group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None ) @skip_if_small_worldsize @require_backend_is_available({"gloo"}) def test_all_reduce_coalesced_group_sum(self): group, group_id, rank = self._init_group_test() self._test_all_reduce_coalesced_helper( group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None ) @skip_if_small_worldsize @require_backend_is_available({"gloo"}) def test_all_reduce_coalesced_group_product(self): group, group_id, rank = self._init_group_test() self._test_all_reduce_coalesced_helper( group, group_id, rank, dist.ReduceOp.PRODUCT, cuda=False, rank_to_GPU=None, ) @skip_if_small_worldsize @require_backend_is_available({"gloo"}) def test_all_reduce_coalesced_group_min(self): group, group_id, rank = self._init_group_test() self._test_all_reduce_coalesced_helper( group, group_id, rank, dist.ReduceOp.MIN, cuda=False, rank_to_GPU=None ) @skip_if_small_worldsize @require_backend_is_available({"gloo"}) def test_all_reduce_coalesced_group_max(self): group, group_id, rank = self._init_group_test() self._test_all_reduce_coalesced_helper( group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None ) @require_backend_is_available({"gloo"}) def test_all_reduce_coalesced_full_group_sum(self): group, group_id, rank = self._init_full_group_test() self._test_all_reduce_coalesced_helper( group, group_id, rank, dist.ReduceOp.SUM, cuda=False, rank_to_GPU=None ) @require_backend_is_available({"gloo"}) def test_all_reduce_coalesced_full_group_product(self): group, group_id, rank = self._init_full_group_test() self._test_all_reduce_coalesced_helper( group, group_id, rank, dist.ReduceOp.PRODUCT, cuda=False, rank_to_GPU=None, ) @require_backend_is_available({"gloo"}) def test_all_reduce_coalesced_full_group_min(self): group, group_id, rank = self._init_full_group_test() self._test_all_reduce_coalesced_helper( group, group_id, rank, dist.ReduceOp.MIN, cuda=False, rank_to_GPU=None, ) @require_backend_is_available({"gloo"}) def test_all_reduce_coalesced_full_group_max(self): group, group_id, rank = self._init_full_group_test() self._test_all_reduce_coalesced_helper( group, group_id, rank, dist.ReduceOp.MAX, cuda=False, rank_to_GPU=None ) # SCATTER def _test_scatter_helper( self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float ): for dest in group: tensor = _build_tensor(dest + 1, -1, dtype=dtype) expected_tensor = _build_tensor(dest + 1, rank, dtype=dtype) tensors = ( [_build_tensor(dest + 1, i, dtype=dtype) for i in group] if rank == dest else [] ) if cuda: tensor = tensor.cuda(rank_to_GPU[rank][0]) tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors] if dtype == torch.complex64: tensor_shapes = [torch.view_as_real(t).shape for t in tensors] else: tensor_shapes = [t.shape for t in tensors] self.call_dist_op( ":scatter", False, dist.scatter, tensor, src=dest, scatter_list=tensors, group=group_id, expect_event=False, tensor_shapes=tensor_shapes, ) self.assertEqual(tensor, expected_tensor) self._barrier() @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" ) def test_scatter_checks(self): group, _group_id, rank = self._init_global_test() one = torch.ones([1]) # Specify scatter_list argument only on source rank. output = one.clone() * -1 if rank == 0: scatter_list = [one.clone() * i for i in group] dist.scatter(output, src=0, scatter_list=scatter_list) else: dist.scatter(output, src=0) self.assertEqual(output, one * rank) # Don't specify src argument. output = one.clone() * -1 if rank == 0: scatter_list = [one.clone() * i for i in group] dist.scatter(output, scatter_list=scatter_list) else: dist.scatter(output) self.assertEqual(output, one * rank) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" ) def test_scatter(self): group, group_id, rank = self._init_global_test() self._test_scatter_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports CUDA gather" ) @skip_if_no_gpu def test_scatter_cuda(self): group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_scatter_helper(group, group_id, rank, True, rank_to_GPU) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" ) def test_scatter_complex(self): group, group_id, rank = self._init_global_test() self._test_scatter_helper(group, group_id, rank, dtype=torch.cfloat) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports CUDA gather" ) @skip_if_no_gpu def test_scatter_cuda_complex(self): group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_scatter_helper( group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" ) @skip_if_small_worldsize def test_scatter_group(self): group, group_id, rank = self._init_group_test() self._test_scatter_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" ) def test_scatter_full_group(self): group, group_id, rank = self._init_full_group_test() self._test_scatter_helper(group, group_id, rank) # GATHER def _test_gather_helper( self, group, group_id, rank, cuda=False, rank_to_GPU=None ): for dest in group: tensor = _build_tensor(dest + 1, rank) tensors = ( [_build_tensor(dest + 1, -1) for i in group] if rank == dest else [] ) if cuda: tensor = tensor.cuda(rank_to_GPU[rank][0]) tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors] self.call_dist_op( ":gather", False, dist.gather, tensor, dst=dest, gather_list=tensors, group=group_id, expect_event=False, tensor_shapes=[tensors[0].shape] if len(tensors) > 0 else None, ) if rank == dest: expected_tensors = [_build_tensor(dest + 1, i) for i in group] for t1, t2 in zip(tensors, expected_tensors): self.assertEqual(t1, t2) self._barrier() @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" ) def test_gather_checks(self): group, _group_id, rank = self._init_global_test() one = torch.ones([1]) # Specify gather_list argument only on destination rank. if rank == 0: gather_list = [one.clone() for _ in group] dist.gather(one * rank, dst=0, gather_list=gather_list) for i in group: self.assertEqual(gather_list[i], one * i) else: dist.gather(one * rank, dst=0) # Don't specify dst argument. if rank == 0: gather_list = [one.clone() for _ in group] dist.gather(one * rank, gather_list=gather_list) for i in group: self.assertEqual(gather_list[i], one * i) else: dist.gather(one * rank) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" ) def test_gather(self): group, group_id, rank = self._init_global_test() self._test_gather_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports CUDA gather" ) @skip_if_no_gpu def test_gather_cuda(self): group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_gather_helper(group, group_id, rank, True, rank_to_GPU) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" ) @skip_if_small_worldsize def test_gather_group(self): group, group_id, rank = self._init_group_test() self._test_gather_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) @skip_but_pass_in_sandcastle_if( BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" ) def test_gather_full_group(self): group, group_id, rank = self._init_full_group_test() self._test_gather_helper(group, group_id, rank) # ALL GATHER def _test_all_gather_helper( self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float ): for dest in group: tensor = _build_tensor(dest + 1, rank, dtype=dtype) tensors = [_build_tensor(dest + 1, -1, dtype=dtype) for i in group] allgather = dist.all_gather if cuda: tensor = tensor.cuda(rank_to_GPU[rank][0]) tensors = [t.cuda(rank_to_GPU[rank][0]) for t in tensors] if tensors[0].dtype == torch.complex64: tensor_shapes = [torch.view_as_real(tensors[0]).shape] else: tensor_shapes = [tensors[0].shape] self.call_dist_op( ":all_gather", False, allgather, tensors, tensor, group_id, False, tensor_shapes=tensor_shapes, ) expected_tensors = [ _build_tensor(dest + 1, i, dtype=dtype) for i in group ] for t1, t2 in zip(tensors, expected_tensors): self.assertEqual(t1, t2) self._barrier() @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_all_gather(self): group, group_id, rank = self._init_global_test() self._test_all_gather_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports CUDA all gather" ) @skip_if_no_gpu def test_all_gather_cuda(self): group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_all_gather_helper(group, group_id, rank, True, rank_to_GPU) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_all_gather_complex(self): group, group_id, rank = self._init_global_test() self._test_all_gather_helper(group, group_id, rank, dtype=torch.cfloat) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports CUDA all gather" ) @skip_if_no_gpu def test_all_gather_cuda_complex(self): group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_all_gather_helper( group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat ) @skip_if_small_worldsize @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_all_gather_group(self): group, group_id, rank = self._init_group_test() self._test_all_gather_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "Nccl does not support CPU tensors" ) def test_all_gather_full_group(self): group, group_id, rank = self._init_full_group_test() self._test_all_gather_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports all_gather_v" ) @skip_if_no_gpu def test_all_gather_v_cuda(self): self._barrier() group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) device_id = rank_to_GPU[rank][0] output_split_sizes = [dst + 1 for dst in group] sum_len = sum(output_split_sizes) value = 2 for async_val in [True, False]: tensor = ( torch.empty( output_split_sizes[rank], sum_len, sum_len, dtype=torch.float ) .fill_(value) .cuda(device_id) ) out_tensor = _build_tensor(sum_len, -1, device_id=device_id) req = dist.all_gather( list(torch.split(out_tensor, output_split_sizes)), tensor, group_id, async_val, ) if async_val: req.wait() expected_value = value expected_tensor = _build_tensor( sum_len, expected_value, device_id=device_id ) self.assertEqual(out_tensor, expected_tensor) self._barrier() # Test all_gather accepting single tensor as output def _all_gather_into_tensor_helper( self, tensor_out, tensor_in, group_id, rank, cuda=True, rank_to_GPU=None ): if cuda: tensor_in = tensor_in.cuda(rank_to_GPU[rank][0]) tensor_out = tensor_out.cuda(rank_to_GPU[rank][0]) if tensor_out.dtype == torch.complex64: tensor_shapes = [torch.view_as_real(tensor_in).shape] else: tensor_shapes = [tensor_in.shape] self.call_dist_op( ":all_gather_into_tensor", False, dist.all_gather_into_tensor, tensor_out, tensor_in, group_id, False, expect_event=False, tensor_shapes=tensor_shapes, ) return tensor_out @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports CUDA all_gather_into_tensor" ) @skip_if_no_gpu def test_all_gather_into_cat_tensor_cuda(self): group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) size = 2 tensor_in = torch.ones([size, size]) * rank # Concatenated output tensor_out = torch.ones([len(group) * size, size]) * (-1) tensor_out = self._all_gather_into_tensor_helper( tensor_out, tensor_in, group_id, rank, True, rank_to_GPU ) # Check result # Concatenate all blocks into a bigger tensor expected_tensor = torch.cat([torch.ones([size, size]) * i for i in group]) self.assertEqual(tensor_out, expected_tensor) self._barrier() @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports CUDA all_gather_into_tensor" ) @skip_if_no_gpu def test_all_gather_into_stack_tensor_cuda(self): group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) size = 2 tensor_in = torch.ones([size, size]) * rank # Stacked output tensor_out = torch.ones([len(group), size, size]) * (-1) tensor_out = self._all_gather_into_tensor_helper( tensor_out, tensor_in, group_id, rank, True, rank_to_GPU ) # Check result # Stack all blocks into a bigger tensor expected_tensor = torch.stack([torch.ones([size, size]) * i for i in group]) self.assertEqual(tensor_out, expected_tensor) self._barrier() def _run_all_gather_coalesced_and_verify( self, output_tensor_lists, input_tensors, expected_tensors, group_id ): """ Helper that runs all_gather_coalesced and returns true if output matches expectations. """ tensor_shapes = [] for input_tensor in input_tensors: if input_tensor.dtype == torch.complex64: tensor_shapes.append(torch.view_as_real(input_tensor).shape) else: tensor_shapes.append(input_tensor.shape) self.call_dist_op( ":all_gather", False, dist.all_gather_coalesced, output_tensor_lists, input_tensors, group_id, tensor_shapes=tensor_shapes, ) for l1, l2 in zip(output_tensor_lists, expected_tensors): for t1, t2 in zip(l1, l2): if not torch.equal(t1, t2): return False return True def _test_all_gather_coalesced_helper( self, group, group_id, rank, dtype=torch.float ): # TODO: Instead we should probably go through _rank_not_in_group # mechanism to disable sending tensors if group_id is not None: for test_case_id in range(2, 5): # Make sure we create tensors of incompatible sizes, e.g. # [1], [2x2], [3x3x3] ... to be sent in one batch input_tensors = [ _build_multidim_tensor( tensor_id, tensor_id, rank + tensor_id, dtype=dtype ) for tensor_id in range(1, test_case_id) ] output_tensor_lists = [ [ _build_multidim_tensor( tensor_id, tensor_id, -1, dtype=dtype ) for tensor_id in range(1, test_case_id) ] for _ in group ] expected_tensors = [ [ _build_multidim_tensor( tensor_id, tensor_id, rank_iter + tensor_id, dtype=dtype ) for tensor_id in range(1, test_case_id) ] for rank_iter in group ] assert self._run_all_gather_coalesced_and_verify( output_tensor_lists, input_tensors, expected_tensors, group_id ), "output tensors do not match expected outputs" self._barrier() @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["allgather_coalesced"], f"{BACKEND} does not support all_gather_coalesced", ) def test_all_gather_coalesced_simple(self): group, group_id, rank = self._init_global_test() self._test_all_gather_coalesced_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["allgather_coalesced"], f"{BACKEND} does not support all_gather_coalesced", ) def test_all_gather_coalesced_complex(self): group, group_id, rank = self._init_global_test() self._test_all_gather_coalesced_helper( group, group_id, rank, dtype=torch.cfloat ) @skip_if_small_worldsize @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["allgather_coalesced"], f"{BACKEND} does not support all_gather_coalesced", ) def test_all_gather_coalesced_group(self): group, group_id, rank = self._init_group_test() self._test_all_gather_coalesced_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["allgather_coalesced"], f"{BACKEND} does not support all_gather_coalesced", ) def test_all_gather_coalesced_full_group(self): group, group_id, rank = self._init_full_group_test() self._test_all_gather_coalesced_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["allgather_coalesced"], f"{BACKEND} does not support all_gather_coalesced", ) def test_all_gather_coalesced_with_empty(self): group, group_id, rank = self._init_global_test() input_tensors = [ rank * torch.ones([2, 2]), torch.ones([0]), (rank + 1) * torch.ones([3, 3]), torch.ones([0]), torch.ones([0]), ] output_tensors_lists = [ [ -1 * torch.ones([2, 2]), -1 * torch.ones([0]), -1 * torch.ones([3, 3]), -1 * torch.ones([0]), -1 * torch.ones([0]), ] for _ in group ] expected_tensors = [ [ r * torch.ones([2, 2]), torch.ones([0]), (r + 1) * torch.ones([3, 3]), torch.ones([0]), torch.ones([0]), ] for r in group ] assert self._run_all_gather_coalesced_and_verify( output_tensors_lists, input_tensors, expected_tensors, group_id ) self._barrier() # AllToAll def _test_all_to_all_single_equal_split_helper( self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float ): if group_id is not None: size = len(group) in_tensor = torch.ones([size, size], dtype=dtype) * rank expected_tensor = torch.cat( [torch.ones([1, size], dtype=dtype) * i for i in group] ) out_tensor = torch.ones([size, size], dtype=dtype) * -1 if cuda: in_tensor = in_tensor.cuda(rank_to_GPU[rank][0]) expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0]) out_tensor = out_tensor.cuda(rank_to_GPU[rank][0]) if dtype == torch.complex64: tensor_shapes = [torch.view_as_real(in_tensor).shape] else: tensor_shapes = [in_tensor.shape] self.call_dist_op( ":all_to_all", False, dist.all_to_all_single, out_tensor, in_tensor, group=group_id, tensor_shapes=tensor_shapes, ) self.assertEqual(out_tensor, expected_tensor) self._barrier() def _test_all_to_all_single_unequal_split_helper( self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float ): if group_id is not None: size = len(group) in_splits = [i + 1 for i in group] out_splits = [rank + 1 for _ in group] in_tensor = torch.ones([sum(in_splits), size], dtype=dtype) * rank out_tensor = torch.ones([(rank + 1) * size, size], dtype=dtype) expected_tensor = torch.cat( [torch.ones([rank + 1, size], dtype=dtype) * i for i in group] ) if cuda: in_tensor = in_tensor.cuda(rank_to_GPU[rank][0]) expected_tensor = expected_tensor.cuda(rank_to_GPU[rank][0]) out_tensor = out_tensor.cuda(rank_to_GPU[rank][0]) dist.all_to_all_single( out_tensor, in_tensor, out_splits, in_splits, group=group_id ) self.assertEqual(out_tensor, expected_tensor) self._barrier() def _test_all_to_all_helper( self, group, group_id, rank, cuda=False, rank_to_GPU=None, dtype=torch.float, ): if group_id is not None: size = len(group) in_splits = [i + 1 for i in group] in_tensors = [ torch.ones([in_splits[i], size], dtype=dtype) * rank for i, _ in enumerate(group) ] out_tensors = [ torch.ones([(rank + 1), size], dtype=dtype) for _ in group ] expected_tensors = [ torch.ones([rank + 1, size], dtype=dtype) * i for i in group ] if cuda: in_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in in_tensors] expected_tensors = [ t.cuda(rank_to_GPU[rank][0]) for t in expected_tensors ] out_tensors = [t.cuda(rank_to_GPU[rank][0]) for t in out_tensors] dist.all_to_all(out_tensors, in_tensors, group=group_id) for t1, t2 in zip(out_tensors, expected_tensors): self.assertEqual(t1, t2) self._barrier() @skip_but_pass_in_sandcastle_if( BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" ) def test_all_to_all_single_equal_split(self): group, group_id, rank = self._init_global_test() self._test_all_to_all_single_equal_split_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" ) @skip_if_no_gpu def test_all_to_all_single_equal_split_cuda(self): group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_all_to_all_single_equal_split_helper( group, group_id, rank, True, rank_to_GPU, ) @skip_but_pass_in_sandcastle_if( BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" ) def test_all_to_all_single_equal_split_complex(self): group, group_id, rank = self._init_global_test() self._test_all_to_all_single_equal_split_helper( group, group_id, rank, dtype=torch.cfloat ) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" ) @skip_if_no_gpu def test_all_to_all_single_equal_split_cuda_complex(self): group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_all_to_all_single_equal_split_helper( group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat ) @skip_but_pass_in_sandcastle_if( BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" ) def test_all_to_all_single_unequal_split(self): group, group_id, rank = self._init_global_test() self._test_all_to_all_single_unequal_split_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" ) @skip_if_no_gpu def test_all_to_all_single_unequal_split_cuda(self): group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_all_to_all_single_unequal_split_helper( group, group_id, rank, True, rank_to_GPU, ) @skip_but_pass_in_sandcastle_if( BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" ) def test_all_to_all_single_unequal_split_complex(self): group, group_id, rank = self._init_global_test() self._test_all_to_all_single_unequal_split_helper( group, group_id, rank, dtype=torch.cfloat ) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" ) @skip_if_no_gpu def test_all_to_all_single_unequal_split_cuda_complex(self): group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_all_to_all_single_unequal_split_helper( group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat, ) @skip_but_pass_in_sandcastle_if( BACKEND != "mpi", "Only MPI supports all_to_all" ) def test_all_to_all(self): group, group_id, rank = self._init_global_test() self._test_all_to_all_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only NCCL supports CUDA all_to_all" ) @skip_if_rocm_multiprocess def test_all_to_all_cuda(self): group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU) @skip_but_pass_in_sandcastle_if( BACKEND != "mpi", "Only MPI supports all_to_all" ) def test_all_to_all_complex(self): group, group_id, rank = self._init_global_test() self._test_all_to_all_helper(group, group_id, rank, dtype=torch.cfloat) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only NCCL supports CUDA all_to_all" ) @skip_if_rocm_multiprocess def test_all_to_all_cuda_complex(self): group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_all_to_all_helper( group, group_id, rank, True, rank_to_GPU, dtype=torch.cfloat ) @skip_but_pass_in_sandcastle_if( BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" ) @skip_if_small_worldsize def test_all_to_all_single_equal_split_group(self): group, group_id, rank = self._init_group_test() self._test_all_to_all_single_equal_split_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" ) @skip_if_no_gpu @skip_if_small_worldsize def test_all_to_all_single_equal_split_group_cuda(self): group, group_id, rank = self._init_group_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_all_to_all_single_equal_split_helper( group, group_id, rank, True, rank_to_GPU, ) @skip_but_pass_in_sandcastle_if( BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" ) @skip_if_small_worldsize def test_all_to_all_single_unequal_split_group(self): group, group_id, rank = self._init_group_test() self._test_all_to_all_single_unequal_split_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" ) @skip_if_no_gpu @skip_if_small_worldsize def test_all_to_all_single_unequal_split_group_cuda(self): group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_all_to_all_single_unequal_split_helper( group, group_id, rank, True, rank_to_GPU, ) @skip_but_pass_in_sandcastle_if( BACKEND != "mpi", "Only MPI supports all_to_all" ) @skip_if_small_worldsize def test_all_to_all_group(self): group, group_id, rank = self._init_group_test() self._test_all_to_all_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" ) @skip_if_small_worldsize @skip_if_rocm_multiprocess def test_all_to_all_group_cuda(self): group, group_id, rank = self._init_group_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU) @skip_but_pass_in_sandcastle_if( BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" ) def test_all_to_all_single_equal_split_full_group(self): group, group_id, rank = self._init_full_group_test() self._test_all_to_all_single_equal_split_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" ) @skip_if_no_gpu def test_all_to_all_single_equal_split_full_group_cuda(self): group, group_id, rank = self._init_full_group_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_all_to_all_single_equal_split_helper( group, group_id, rank, True, rank_to_GPU, ) @skip_but_pass_in_sandcastle_if( BACKEND != "mpi", "Only MPI supports CPU all_to_all_single" ) def test_all_to_all_single_unequal_split_full_group(self): group, group_id, rank = self._init_full_group_test() self._test_all_to_all_single_unequal_split_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only Nccl supports CUDA all_to_all_single" ) @skip_if_no_gpu def test_all_to_all_single_unequal_split_full_group_cuda(self): group, group_id, rank = self._init_full_group_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_all_to_all_single_unequal_split_helper( group, group_id, rank, True, rank_to_GPU, ) @skip_but_pass_in_sandcastle_if( BACKEND != "mpi", "Only MPI supports all_to_all" ) def test_all_to_all_full_group(self): group, group_id, rank = self._init_full_group_test() self._test_all_to_all_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl", "Only NCCL supports CUDA all_to_all" ) @skip_if_rocm_multiprocess def test_all_to_all_full_group_cuda(self): group, group_id, rank = self._init_full_group_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_all_to_all_helper(group, group_id, rank, True, rank_to_GPU) # BARRIER def _test_barrier_helper( self, group, group_id, rank, cuda=False, rank_to_GPU=None ): WAIT_TIME = 0.3 # seconds for dest in group: expected_time = torch.DoubleTensor(1).fill_(0.0) if cuda: expected_time = expected_time.cuda(rank_to_GPU[rank][0]) if dest == rank: expected_time.fill_(time.time() + WAIT_TIME) dist.broadcast(expected_time, dest, group_id) time.sleep(WAIT_TIME + 0.1) # sleep a little bit longer dist.barrier(group_id) else: dist.broadcast(expected_time, dest, group_id) dist.barrier(group_id) self.assertGreaterAlmostEqual( float(time.time()), float(expected_time[0]), msg=f"destination rank: {dest:d}, my rank: {rank:d}" + " (if you see this failure, please report in #14554)", ) # Use higher timeout for the instance where the test runs # against a subgroup and uses a CUDA tensor for expected time. # The CUDA initialization for the participating processes can # take long enough for the barrier timeout to trigger on the # process that doesn't participate in the group. self._barrier(timeout=20) @skip_if_no_gpu @skip_but_pass_in_sandcastle_if( BACKEND == "mpi", "MPI doesn't supports GPU barrier" ) @skip_but_pass_in_sandcastle_if( BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" ) def test_barrier_cuda(self): group, group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU) @skip_if_small_worldsize @skip_if_no_gpu @skip_but_pass_in_sandcastle_if( BACKEND == "mpi", "MPI doesn't supports GPU barrier" ) def test_barrier_group_cuda(self): group, group_id, rank = self._init_group_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU) @skip_if_small_worldsize @skip_if_no_gpu @skip_but_pass_in_sandcastle_if( BACKEND == "mpi", "MPI doesn't supports GPU barrier" ) def test_barrier_full_group_cuda(self): group, group_id, rank = self._init_full_group_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) self._test_barrier_helper(group, group_id, rank, True, rank_to_GPU) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["cpu barrier"], f"{BACKEND} does not support CPU barrier", ) def test_barrier(self): group, group_id, rank = self._init_global_test() self._test_barrier_helper(group, group_id, rank) @skip_if_small_worldsize @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["cpu barrier"], f"{BACKEND} does not support CPU barrier", ) def test_barrier_group(self): group, group_id, rank = self._init_group_test() self._test_barrier_helper(group, group_id, rank) @skip_but_pass_in_sandcastle_if( BACKEND in DistTestCases.skip_collective["cpu barrier"], f"{BACKEND} does not support CPU barrier", ) def test_barrier_full_group(self): group, group_id, rank = self._init_full_group_test() self._test_barrier_helper(group, group_id, rank) def _model_step(self, model): for param in model.parameters(): if param.grad is not None: with torch.no_grad(): param += param.grad param.grad = None def _model_step_with_zero_grad(self, model): for param in model.parameters(): if param.grad is not None: with torch.no_grad(): param += param.grad param.grad.requires_grad_(False) param.grad.zero_() def _prepare_dummy_data(self, local_bs): # global_bs for DDP should be divisible by WORLD_SIZE world_size = int(os.environ["WORLD_SIZE"]) global_bs = world_size * local_bs input_cpu = torch.randn(global_bs, 2) target = torch.randn(global_bs, 4) loss = nn.MSELoss() return global_bs, input_cpu, target, loss # END TO END TEST FOR DISTRIBUTEDDATAPARALLEL def _test_DDP_helper( self, model, input_var, target, loss, scale_factor=1.0, memory_format=None ): model.train() output = model(input_var) l = loss(output, target) * scale_factor l.backward() if memory_format is not None: self.assertTrue(output.is_contiguous(memory_format=memory_format)) def _assert_equal_param(self, param_gpu, param_DDP): self.assertEqual(len(param_gpu), len(param_DDP)) for p_gpu, p_DDP in zip(param_gpu, param_DDP): self.assertEqual(p_gpu, p_DDP) def _test_DDP_niter( self, model_base, model_DDP, input, target, loss, local_bs, rank, batch_size, test_save, offset=None, world_size=0, zero_grad=False, memory_format=None, n_iter=5, ): for idx in range(n_iter): # single cpu/gpu training self._test_DDP_helper( model_base, input, target, loss, memory_format=memory_format ) if offset is None: offset = rank * local_bs # DDP training, DDP scatters subsets of input_cpu to nodes/GPUs self._test_DDP_helper( model_DDP, input[offset : offset + local_bs], target[offset : offset + local_bs], loss, world_size * local_bs / batch_size if world_size != 0 else 1, memory_format=memory_format, ) # Update weights and run a second iteration to shake out errors if zero_grad: self._model_step_with_zero_grad(model_base) self._model_step_with_zero_grad(model_DDP) else: self._model_step(model_base) self._model_step(model_DDP) self._assert_equal_param( list(model_base.parameters()), list(model_DDP.module.parameters()) ) # Shuffle the input so that DDP input is different input = input[torch.randperm(batch_size)] # save the model in the middle and reload if test_save and idx == 2 and INIT_METHOD.startswith("file://"): with tempfile.NamedTemporaryFile() as tmp: if sys.platform == "win32": torch.save(model_DDP, tmp) tmp.seek(0) # weights_only=False as this is legacy code that saves the model model_DDP = torch.load(tmp, weights_only=False) else: torch.save(model_DDP, tmp.name) # weights_only=False as this is legacy code that saves the model model_DDP = torch.load(tmp.name, weights_only=False) with tempfile.TemporaryFile() as tmp_file: torch.save(model_DDP, tmp_file) tmp_file.seek(0) # weights_only=False as this is legacy code that saves the model saved_model = torch.load(tmp_file, weights_only=False) for k in model_DDP.state_dict(): self.assertEqual(model_DDP.state_dict()[k], saved_model.state_dict()[k]) def _test_DistributedDataParallel( self, gpu_subset, rank, output_device=None, gradient_as_bucket_view=False, static_graph=False, set_static_graph_twice=False, ): # Run a simple end to end DDP model, use result of single node model # as baseline # cpu training setup model = DDP_NET # single gpu training setup model_gpu = copy.deepcopy(model) model_gpu.cuda(gpu_subset[0]) # DDP training setup model_DDP = copy.deepcopy(model) model_DDP.cuda(gpu_subset[0]) model_DDP = nn.parallel.DistributedDataParallel( model_DDP, device_ids=gpu_subset, gradient_as_bucket_view=gradient_as_bucket_view, static_graph=static_graph, ) if set_static_graph_twice: model_DDP._set_static_graph() # test serializable/unserializable with tempfile.NamedTemporaryFile() as tmp: if sys.platform == "win32": torch.save(model_DDP, tmp) tmp.seek(0) # weights_only=False as this is legacy code that saves the model model_DDP = torch.load(tmp, weights_only=False) else: torch.save(model_DDP, tmp.name) # weights_only=False as this is legacy code that saves the model model_DDP = torch.load(tmp.name, weights_only=False) # dummy data initialization local_bs = len(gpu_subset) global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs) # check two model parameters over 5 iterations self._test_DDP_niter( model_gpu, model_DDP, input_cpu.cuda(gpu_subset[0]), target.cuda(gpu_subset[0]), loss, local_bs, rank, global_bs, True, ) self._barrier() def _test_DistributedDataParallelCPU(self, gradient_as_bucket_view=False): # Run a simple end to end DDP-CPU model, use result of single node # model as baseline _group, _group_id, rank = self._init_global_test() # cpu training setup model_base = DDP_NET # DDP-CPU training setup model_DDP = copy.deepcopy(model_base) model_DDP = nn.parallel.DistributedDataParallel( model_DDP, gradient_as_bucket_view=gradient_as_bucket_view ) # dummy data initialization local_bs = 2 global_bs, input_cpu, target, loss = self._prepare_dummy_data(local_bs) # check two model parameters over 5 iterations self._test_DDP_niter( model_base, model_DDP, input_cpu, target, loss, local_bs, rank, global_bs, False, zero_grad=True, ) self._barrier() return model_DDP @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "nccl does not support DDP on CPU models" ) def test_DistributedDataParallelCPU(self): self._test_DistributedDataParallelCPU() @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "nccl does not support DDP on CPU models" ) def test_DistributedDataParallelCPU_grad_is_view(self): self._test_DistributedDataParallelCPU(gradient_as_bucket_view=True) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_DistributedDataParallel_requires_grad(self): # a module without gradients shouldn't be accepted self.assertRaises( RuntimeError, lambda: nn.parallel.DistributedDataParallel(nn.Module()) ) self._barrier() @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) def test_ddp_zero_output_features(self): class ToyModel(nn.Module): def __init__(self) -> None: super().__init__() self.net1 = nn.Linear(10, 10) self.relu = nn.ReLU() self.net2 = nn.Linear(10, 0) model = ToyModel().to(self.rank) nn.parallel.DistributedDataParallel( model, device_ids=[self.rank] ) @skip_but_pass_in_sandcastle_if(BACKEND == "nccl", "Gloo-only test") def test_ddp_create_graph(self): class Model(nn.Module): def __init__(self) -> None: super().__init__() self.p = nn.Parameter(torch.tensor(1.0)) def forward(self): return self.p.pow(2) model = Model() ddp_model = torch.nn.parallel.DistributedDataParallel(model) for _ in range(6): # Verify DDP doesn't throw when ran with create_graph=True. # Although we do warn about potential issues, please see # https://github.com/pytorch/pytorch/issues/63929 for details. ddp_model().backward(create_graph=True) # grad tensors should require grad. self.assertTrue( all(param.requires_grad for param in ddp_model.parameters()) ) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) def test_DistributedDataParallel_non_default_stream(self): stream = torch.cuda.Stream(self.rank) rank = self.rank with torch.cuda.stream(stream): net = torch.nn.parallel.DistributedDataParallel( torch.nn.Linear(1, 1, bias=False).cuda(rank), device_ids=[rank] ) for i in range(1000): # Clear gradients manually grad = net.module.weight.grad if grad is not None: grad.requires_grad_(False) grad.zero_() # Forward + BW batch = torch.tensor([rank]).float().cuda(rank) loss = net(batch).sum() loss.backward() # For each worker, the gradient on the weight should be worker_rank. grad = net.module.weight.grad avg = grad.clone() # All-reducing the gradient averages should give us the gradient # average. If not, then one of the workers has not correctly # written back the averaged gradient before this all-reduce call. dist.all_reduce(avg) world_size = int(os.environ["WORLD_SIZE"]) avg.div_(world_size) expected_grad = sum(i for i in range(world_size)) / world_size self.assertEqual( avg[0, 0], expected_grad, msg=f"Expected gradient of {expected_grad} but got {avg} on rank {self.rank}", ) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["cuda"], f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", ) @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) def test_ddp_comm_hook_logging(self): hooks = [ default.allreduce_hook, default.fp16_compress_hook, powerSGD.powerSGD_hook, powerSGD.batched_powerSGD_hook, quantization_hooks.quantization_pertensor_hook, quantization_hooks.quantization_perchannel_hook, ] cpp_builtin_hooks = [ dist.BuiltinCommHookType.ALLREDUCE, dist.BuiltinCommHookType.FP16_COMPRESS, ] for hook in hooks: ddp_model = torch.nn.parallel.DistributedDataParallel( torch.nn.Linear(1, 1, bias=False).cuda(self.rank), device_ids=[self.rank], ) ddp_logging_data = ddp_model._get_ddp_logging_data() # Hook not registered yet, so should be empty self.assertEqual(ddp_logging_data.get("comm_hook"), None) ddp_model.register_comm_hook(None, hook) ddp_logging_data = ddp_model._get_ddp_logging_data() self.assertEqual(ddp_logging_data.get("comm_hook"), hook.__qualname__) for hook in cpp_builtin_hooks: ddp_model = torch.nn.parallel.DistributedDataParallel( torch.nn.Linear(1, 1, bias=False).cuda(self.rank), device_ids=[self.rank], ) ddp_logging_data = ddp_model._get_ddp_logging_data() # Hook not registered yet, so should be empty self.assertEqual(ddp_logging_data.get("comm_hook"), None) ddp_model._register_builtin_comm_hook(hook) ddp_logging_data = ddp_model._get_ddp_logging_data() self.assertEqual(ddp_logging_data.get("comm_hook"), str(hook)) # No hook registered ddp_model = torch.nn.parallel.DistributedDataParallel( torch.nn.Linear(1, 1, bias=False).cuda(self.rank), device_ids=[self.rank], ) ddp_logging_data = ddp_model._get_ddp_logging_data() # Hook not registered yet, so should be empty self.assertEqual(ddp_logging_data.get("comm_hook"), None) # After second forward pass, hook should still be empty string for _ in range(2): inp = torch.ones(1, 1, device=self.rank) loss = ddp_model(inp).sum() loss.backward() ddp_logging_data = ddp_model._get_ddp_logging_data() # Note: DETAIL debug mode logs DDP logging data to stdout and # thus accesses std::map, which fills in a default value for the # type if it didn't exist. self.assertEqual(ddp_logging_data.get("comm_hook", ""), "") def _test_ddp_hook_with_optimizer_parity( self, grad_as_bucket_view, static_graph, optim_cls, optimize_subset, *functional_optim_args, **functional_optim_kwargs, ): rank = self.rank torch.cuda.set_device(rank) torch.manual_seed(rank) torch.cuda.manual_seed(rank) models_to_test = [ (LargeNet(), torch.randn(1, 1000).cuda()), ] if HAS_TORCHVISION: models_to_test.append( (torchvision.models.resnet50(), torch.randn(1, 3, 3, 1000).cuda()) ) for (model, inp) in models_to_test: # Enable determinism in cudnn operators with torch.backends.cudnn.flags( enabled=True, deterministic=True, benchmark=False ): # Create DDP model that runs optimizer in fused fashion. ddp_model_with_optimizer_hook = ( torch.nn.parallel.DistributedDataParallel( copy.deepcopy(model).cuda(), device_ids=[self.rank], gradient_as_bucket_view=grad_as_bucket_view, static_graph=static_graph, ) ) # Create DDP model with no hook that does optimizer after # backward. ddp_model_with_no_hook = torch.nn.parallel.DistributedDataParallel( copy.deepcopy(model).cuda(), device_ids=[self.rank], gradient_as_bucket_view=grad_as_bucket_view, static_graph=static_graph, ) hook_params = ddp_model_with_optimizer_hook.parameters() no_hook_params = ddp_model_with_no_hook.parameters() if optimize_subset: hook_params = list(hook_params) no_hook_params = list(no_hook_params) self.assertGreater(len(hook_params), 0) hook_params = [hook_params[0]] no_hook_params = [no_hook_params[0]] # Register a fused optimizer that will run optimizer in step # with allreduce. if optimize_subset: # API where optim_params is specified. ddp_model_with_optimizer_hook._register_fused_optim( optim_cls, *functional_optim_args, optim_params=hook_params, **functional_optim_kwargs, ) else: # API where optim_params is omitted ddp_model_with_optimizer_hook._register_fused_optim( optim_cls, *functional_optim_args, **functional_optim_kwargs, ) optimizer_no_hook = optim_cls( no_hook_params, *functional_optim_args, **functional_optim_kwargs, ) # Verify parameters are equal initially. for hook_param, allreduce_param in zip( ddp_model_with_optimizer_hook.parameters(), ddp_model_with_no_hook.parameters(), ): self.assertEqual(hook_param, allreduce_param) # Save old parameters to later verify optimizer modified them. opt_hook_init_params = copy.deepcopy( list(ddp_model_with_optimizer_hook.parameters()) ) # Run optimizer with hook model. for _ in range(6): ddp_model_with_optimizer_hook.zero_grad() out = ddp_model_with_optimizer_hook(inp) loss = out.sum() loss.backward() dist.barrier() # Run regular model. for _ in range(6): ddp_model_with_no_hook.zero_grad() out = ddp_model_with_no_hook(inp) loss = out.sum() loss.backward() optimizer_no_hook.step() dist.barrier() # Now verify parameters are equal. for hook_param, allreduce_param in zip( ddp_model_with_optimizer_hook.parameters(), ddp_model_with_no_hook.parameters(), ): self.assertEqual(hook_param, allreduce_param) # Verify optimizer modified appropriate parameter set, # otherwise they'd be trivially equal above. if optimize_subset: self.assertNotEqual( opt_hook_init_params[0], next(iter(ddp_model_with_optimizer_hook.parameters())), ) # Untouched params should be equal self.assertEqual( opt_hook_init_params[1:], list(ddp_model_with_optimizer_hook.parameters())[1:], ) else: self.assertNotEqual( opt_hook_init_params, list(ddp_model_with_optimizer_hook.parameters()), ) dist.barrier() """ # Commenting out the following 3 tests as they cause Sandcastle jobs to fail # Failure signature: # AttributeError: type object 'TestDistBackendWithSpawn' has no attribute 'test_ddp_hook_with_optimizer_parity_adamw from torch.testing._internal.common_utils import parametrize @skip_but_pass_in_sandcastle_if( BACKEND == "nccl" or BACKEND == "ucc", "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259", ) @skip_if_lt_x_gpu(2) @parametrize("grad_as_bucket_view", [True, False]) @parametrize("static_graph", [True, False]) @parametrize("optimize_subset", [True, False]) def test_ddp_hook_with_optimizer_parity_adamw( self, grad_as_bucket_view, static_graph, optimize_subset, ): adamw_lr = 1e-2 adamw_betas = (0.9, 0.99) adamw_eps = 1e-6 self._test_ddp_hook_with_optimizer_parity( grad_as_bucket_view, static_graph, torch.optim.AdamW, optimize_subset, adamw_lr, betas=adamw_betas, eps=adamw_eps, ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl" or BACKEND == "ucc", "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259", ) @skip_if_lt_x_gpu(2) @parametrize("optimize_subset", [True, False]) def test_ddp_hook_with_optimizer_parity_adam(self, optimize_subset): adam_lr = 1e-2 adam_betas = (0.9, 0.99) adam_eps = 1e-6 self._test_ddp_hook_with_optimizer_parity( True, # grad as bucket view False, # static graph torch.optim.Adam, optimize_subset, adam_lr, betas=adam_betas, eps=adam_eps, ) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl" or BACKEND == "ucc", "Issues with async error handling, see https://github.com/pytorch/pytorch/issues/73259", ) @skip_if_lt_x_gpu(2) @parametrize("optimize_subset", [True, False]) def test_ddp_hook_with_optimizer_parity_sgd(self, optimize_subset): sgd_lr = 1e-2 sgd_momentum = 0.9 sgd_weight_decay = 0.01 # Not testing grad_as_bucket_view and static_graph as they are # tested in AdamW test above. self._test_ddp_hook_with_optimizer_parity( True, # grad as bucket view False, # static_graph torch.optim.SGD, optimize_subset, sgd_lr, momentum=sgd_momentum, weight_decay=sgd_weight_decay, ) """ @skip_if_lt_x_gpu(2) def test_get_data_parallel_params(self): torch.cuda.set_device(self.rank) model = TwoLinLayerNet().cuda() # Parameters to ignore are in the format {module_name}.{param_name} params_to_ignore = ["a.weight"] torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( model, params_to_ignore ) torch.nn.parallel.DistributedDataParallel( model, device_ids=[self.rank] ) dp_params = torch.nn.parallel.DistributedDataParallel._get_data_parallel_params( model, named_params=True ) for name, _ in dp_params: self.assertNotEqual(f"module.{params_to_ignore[0]}", name) # test named_params=False, just check if returns the expected # no of parameters. num_ddp_params = len(list(model.parameters())) - 1 count = 0 dp_params = torch.nn.parallel.DistributedDataParallel._get_data_parallel_params(model, named_params=False) for _ in dp_params: count += 1 self.assertEqual(count, num_ddp_params) def _test_ddp_apply_optim_in_backward( self, optim_cls, optim_kwargs, init_before, gradient_as_bucket_view=True, ): # Need to seed to ensure inputs are unique across rank. Otherwise, # allreduce won't have any effect. torch.manual_seed(self.rank) torch.cuda.manual_seed(self.rank) torch.cuda.set_device(self.rank) # Test a simple linear as well as a ResNet model. models_to_test = [ nn.Sequential(nn.Linear(3, 3), nn.Linear(3, 3), nn.Linear(3, 3)).cuda() ] if HAS_TORCHVISION: models_to_test.append(torchvision.models.resnet50().cuda()) for j, model in enumerate(models_to_test): model_optim_in_bwd = copy.deepcopy(model) model = nn.parallel.DistributedDataParallel( model, device_ids=[self.rank], gradient_as_bucket_view=gradient_as_bucket_view, ) optim = optim_cls(model.parameters(), **optim_kwargs) if init_before: _apply_optimizer_in_backward( optimizer_class=optim_cls, params=model_optim_in_bwd.parameters(), optimizer_kwargs=optim_kwargs, ) model_optim_in_bwd = nn.parallel.DistributedDataParallel( model_optim_in_bwd, device_ids=[self.rank], gradient_as_bucket_view=gradient_as_bucket_view, ) if not init_before: _apply_optimizer_in_backward( optimizer_class=optim_cls, params=model_optim_in_bwd.parameters(), optimizer_kwargs=optim_kwargs, ) for p1, p2 in zip(model.parameters(), model_optim_in_bwd.parameters()): self.assertEqual(p1, p2, "Parameters not initially equal!") # Enable determinism in cudnn operators with torch.backends.cudnn.flags( enabled=True, deterministic=True, benchmark=False ): for i in range(8): inp = ( torch.randn(1, 3, 1000, 1000, device="cuda") if j == 1 else torch.randn(10, 3, device="cuda") ) model(inp).sum().backward() optim.step() model_optim_in_bwd( inp ).sum().backward() # runs optimizer as well for p1, p2 in zip( model.parameters(), model_optim_in_bwd.parameters() ): self.assertEqual( p1, p2, f"Params not equal at iteration {i}" ) self.assertTrue( p2.grad is None, f"Optim in backward grad is not None at {i}", ) # set_to_none for regular optimizer to match in backward # case. optim.zero_grad(set_to_none=True) @skip_if_lt_x_gpu(2) def test_ddp_apply_optim_in_backward(self): for optim_cls, init_before in itertools.product( [torch.optim.SGD, torch.optim.Adam], [True, False] ): with self.subTest(optim_cls=optim_cls): self._test_ddp_apply_optim_in_backward( optim_cls=optim_cls, optim_kwargs={"lr": 0.03}, init_before=init_before, ) @skip_if_lt_x_gpu(2) def test_ddp_apply_optim_in_backward_grad_as_bucket_view_false(self): for init_before in [True, False]: self._test_ddp_apply_optim_in_backward( optim_cls=torch.optim.SGD, optim_kwargs={"lr": 0.03}, init_before=init_before, gradient_as_bucket_view=False, ) @skip_if_lt_x_gpu(2) def test_ddp_apply_optim_in_backward_ignored_params(self): torch.cuda.set_device(self.rank) for init_before in [True, False]: with self.subTest(init_before=init_before): torch.manual_seed(self.rank) torch.cuda.manual_seed(self.rank) model = TwoLinLayerNet() # Parameters to ignore are in the format {module_name}.{param_name} params_to_ignore = ["a.weight"] torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( model, params_to_ignore ) if init_before: _apply_optimizer_in_backward( optimizer_class=torch.optim.SGD, params=model.parameters(), optimizer_kwargs={"lr": 0.03}, ) net = torch.nn.parallel.DistributedDataParallel( model.cuda(self.rank), device_ids=[self.rank], ) if not init_before: _apply_optimizer_in_backward( optimizer_class=torch.optim.SGD, params=model.parameters(), optimizer_kwargs={"lr": 0.03}, ) inp = torch.randn(1, 10) a, b = net(inp) (a.transpose(0, 1) @ b).sum().backward() # a.weight did not go through allreduce, so optimizer acted on local # gradient, which should be different across ranks. Remaining params # should be equal. models = [None for _ in range(dist.get_world_size())] dist.all_gather_object(models, model) rank0_model, remainder = models[0], models[1:] for m in remainder: self.assertNotEqual(rank0_model.a.weight, m.a.weight) self.assertEqual( list(rank0_model.b.parameters()), list(m.b.parameters()) ) self.assertEqual(rank0_model.a.bias, m.a.bias) def _get_fp16_config(self) -> _MixedPrecision: return _MixedPrecision( param_dtype=torch.float16, reduce_dtype=torch.float16, buffer_dtype=torch.float16, ) @skip_if_lt_x_gpu(2) def test_ddp_native_mixed_precision_ignored_params(self): rank = self.rank torch.manual_seed(rank) torch.cuda.manual_seed(rank) torch.cuda.set_device(rank) model = TwoLinLayerNet() model.register_buffer("buffer", torch.ones(5)) # Parameters to ignore are in the format {module_name}.{param_name} to_ignore = ["a.weight", "buffer"] torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( model, to_ignore, ) mp_config = self._get_fp16_config() net = torch.nn.parallel.DistributedDataParallel( model.to(rank), device_ids=[rank], mixed_precision=mp_config, gradient_as_bucket_view=True, ) to_ignore = [f"module.{name}" for name in to_ignore] expected_ignored = len(to_ignore) n_ignored = 0 # ignored params should not have _mp_param or _fp_param fields. for (n, p) in itertools.chain(net.named_parameters(), net.named_buffers()): if n in to_ignore: n_ignored += 1 self.assertFalse(hasattr(p, '_mp_param')) self.assertFalse(hasattr(p, '_fp_param')) else: self.assertEqual(mp_config.param_dtype, p._mp_param.dtype) self.assertEqual(torch.float32, p._fp_param.dtype) self.assertEqual(expected_ignored, n_ignored) def _test_ddp_native_mixed_precision( self, gradient_as_bucket_view, set_grad_to_none ): rank = self.rank torch.manual_seed(rank) torch.cuda.manual_seed(rank) torch.cuda.set_device(rank) inp = torch.randn(10, 1) mp_config = self._get_fp16_config() class MyModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.m = torch.nn.Linear(1, 5) self.register_buffer('buffer', torch.randn(1, 2)) self.p = torch.nn.Parameter( torch.randn(10, 5), requires_grad=False ) def forward(self_, x): # noqa: B902 params = self_.m.parameters() for p in params: self.assertEqual(mp_config.param_dtype, p.dtype) self.assertEqual(self_.buffer.dtype, mp_config.buffer_dtype) self.assertEqual(mp_config.param_dtype, x.dtype) return self_.m(x) + self_.p m = MyModel() net = torch.nn.parallel.DistributedDataParallel( m.to(rank), device_ids=[rank], mixed_precision=mp_config, gradient_as_bucket_view=gradient_as_bucket_view, ) # Buffers are casted in constructor. self.assertEqual(net.module.buffer.dtype, mp_config.buffer_dtype) # Each param should have an mp_param in the lower precision, and # an fp_param in the higher precision. for p in net.parameters(): self.assertEqual(mp_config.param_dtype, p._mp_param.dtype) self.assertEqual(torch.float32, p._fp_param.dtype) for _ in range(6): loss = net(inp).sum() loss.backward() # Verify gradient synchronization and params and grads are fp32. for n, param in net.named_parameters(): self.assertEqual(param.dtype, torch.float32) if param.grad is None: assert n == 'module.p' # Only param that doesn't require grad else: self.assertEqual(param.grad.dtype, torch.float32) tensor_list = [ torch.zeros_like(param.grad) for _ in range(dist.get_world_size(net.process_group)) ] dist.all_gather(tensor_list, param.grad) g, rest = tensor_list[0], tensor_list[1:] self.assertEqual(g.dtype, torch.float32) for g_ in rest: self.assertEqual(g_.dtype, torch.float32) self.assertEqual(g, g_) net.zero_grad(set_to_none=set_grad_to_none) @skip_if_lt_x_gpu(2) def test_ddp_native_mixed_precision_no_grad_as_bucket_view_no_set_grad_none(self): self._test_ddp_native_mixed_precision( gradient_as_bucket_view=False, set_grad_to_none=False, ) @skip_if_lt_x_gpu(2) def test_ddp_native_mixed_precision_grad_as_bucket_view_no_set_grad_none(self): self._test_ddp_native_mixed_precision( gradient_as_bucket_view=True, set_grad_to_none=False, ) @skip_if_lt_x_gpu(2) def test_ddp_native_mixed_precision_grad_as_bucket_view_set_grad_to_none(self): self._test_ddp_native_mixed_precision( gradient_as_bucket_view=True, set_grad_to_none=True ) @skip_if_lt_x_gpu(2) def test_ddp_native_mixed_precision_no_grad_as_bucket_view_set_grad_to_none(self): self._test_ddp_native_mixed_precision( gradient_as_bucket_view=True, set_grad_to_none=True ) def _test_ddp_hook_parity(self, state, hook, num_validated_iters=100): rank = self.rank m = torch.nn.Linear(1, 5) try: process_group = state.process_group except AttributeError: process_group = state net_with_hook = torch.nn.parallel.DistributedDataParallel( copy.deepcopy(m).to(rank), device_ids=[rank], process_group=process_group, ) net_with_hook.register_comm_hook(state=state, hook=hook) net_without_hook = torch.nn.parallel.DistributedDataParallel( copy.deepcopy(m).to(rank), device_ids=[rank], process_group=process_group, ) for i in range(100): # Clear gradients manually. for g in [ net_without_hook.module.weight.grad, net_with_hook.module.weight.grad, ]: if g is not None: g.requires_grad_(False) g.zero_() # Forward + BW batch = torch.tensor([rank]).float().cuda(rank) loss = net_without_hook(batch).sum() loss.backward() # For each worker, the gradient on the weight should be worker_rank. grad = net_without_hook.module.weight.grad avg = grad.clone() expected_grad = ( sum(i for i in range(dist.get_world_size())) / dist.get_world_size() ) loss_hook = net_with_hook(batch).sum() loss_hook.backward() grad_hook = net_with_hook.module.weight.grad avg_hook = grad_hook.clone() if i < num_validated_iters: # Verify hook grad with expected. self.assertEqual( avg_hook[0, 0].item(), expected_grad, msg=f"Expected hook grad of {expected_grad} but got {avg_hook[0, 0]}", ) # Verify hook grad with vanilla allreduce self.assertEqual( avg_hook[0, 0], avg[0, 0], msg=f"Expected hook grad to be close to allreduce {avg[0, 0]}, but got {avg_hook[0, 0]}", ) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["cuda"], f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", ) @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) def test_ddp_hook_parity_allreduce(self): self._test_ddp_hook_parity(state=None, hook=default.allreduce_hook) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["cuda"], f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", ) @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) def test_ddp_hook_parity_allreduce_process_group(self): # process_group is passed in to both DDP and comm. hook world_size = dist.get_world_size() rank_to_GPU = init_multigpu_helper(world_size, BACKEND) gpus = [rank_to_GPU[int(r)][0] for r in range(world_size)] process_group = torch.distributed.new_group(gpus) self._test_ddp_hook_parity(state=process_group, hook=default.allreduce_hook) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["cuda"], f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", ) @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) def test_ddp_hook_parity_powerSGD(self): for warm_start in [True, False]: powersgd_state = powerSGD.PowerSGDState( process_group=None, matrix_approximation_rank=1, start_powerSGD_iter=2, warm_start=warm_start, ) self._test_ddp_hook_parity( state=powersgd_state, hook=powerSGD.powerSGD_hook ) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["cuda"], f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", ) @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) def test_ddp_hook_parity_post_localSGD(self): # Although we start run local SGD at iteration 10, since we still use the global process group to run it, # the post-LocalSGD actually still allreduces gradients globally for the remaining iterations. state = post_localSGD.PostLocalSGDState( process_group=None, subgroup=dist.group.WORLD, start_localSGD_iter=10 ) self._test_ddp_hook_parity( state=state, hook=post_localSGD.post_localSGD_hook ) # Only validate the warmup iterations before local SGD is applied, # because when `post_local_gradient_allreduce` is disabled, the gradients will not be synchronized at all. # Note that in practice a model averager has to be applied to run model averaging, # so local gradient averaging is not necessary. start_localSGD_iter = 10 state = post_localSGD.PostLocalSGDState( process_group=None, subgroup=dist.group.WORLD, start_localSGD_iter=start_localSGD_iter, post_local_gradient_allreduce=False, ) self._test_ddp_hook_parity( state=state, hook=post_localSGD.post_localSGD_hook, num_validated_iters=start_localSGD_iter, ) # When `subgroup` is None, it is equivalent to the subgroup on the each node. # For this single-node test environment, the intra-node process group is equivalent to # the global process group. if self.world_size == dist.get_world_size(): state = post_localSGD.PostLocalSGDState( process_group=None, subgroup=None, start_localSGD_iter=10 ) self._test_ddp_hook_parity( state=state, hook=post_localSGD.post_localSGD_hook ) # Since we start local SGD later than the total number of 100 iterations, # no local SGD actually is executed, and we don't even need to provide a subgroup for this case. state = post_localSGD.PostLocalSGDState( process_group=None, subgroup=None, start_localSGD_iter=1000 ) self._test_ddp_hook_parity( state=state, hook=post_localSGD.post_localSGD_hook ) def _prepare_single_device_module( self, rank, process_group, devices, device_ids, global_batch_size, gradient_as_bucket_view=False, ): model = Net() device = devices[0] if devices else torch.device(f"cuda:{rank:d}") ddp_model = DistributedDataParallel( copy.deepcopy(model).to(device), device_ids=device_ids, process_group=process_group, bucket_cap_mb=0.001, gradient_as_bucket_view=gradient_as_bucket_view, ) model.to(device) input = torch.randn(global_batch_size, 2).to(device) target = torch.randn(global_batch_size, 4).to(device) return model, ddp_model, input, target def _prepare_cpu_module( self, process_group, global_batch_size, gradient_as_bucket_view=False, ): model = Net() ddp_model = DistributedDataParallel( copy.deepcopy(model), process_group=process_group, bucket_cap_mb=0.001, gradient_as_bucket_view=gradient_as_bucket_view, ) input = torch.randn(global_batch_size, 2) target = torch.randn(global_batch_size, 4) return model, ddp_model, input, target def _test_accumulate_gradients_no_sync( self, num_iters=2, ddp_comm_hook=None, gradient_as_bucket_view=False ): """ This is the recommended way to implement accumulate grads. If ``ddp_comm_hook`` input was specified, it will also register that hook to the ``ddp_model``. The hook fed into this function should not change the resulting gradients. """ _group, group_id, rank = self._init_global_test() world_size = get_world_size() # FIXME: Add testing for gloo/CUDA if BACKEND == "mpi" or BACKEND == "gloo": global_batch_size = world_size local_batch_size = 1 model, ddp_model, input, target = self._prepare_cpu_module( group_id, global_batch_size, gradient_as_bucket_view ) if BACKEND == "nccl": rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) int_devices = rank_to_GPU[rank][:1] devices = [torch.device("cuda:" + str(i)) for i in int_devices] global_batch_size = world_size local_batch_size = len(devices) model, ddp_model, input, target = self._prepare_single_device_module( rank, group_id, devices, devices, global_batch_size, gradient_as_bucket_view, ) if ddp_comm_hook is not None: ddp_model.register_comm_hook(group_id, ddp_comm_hook) def step_model(model, input, target): model.train() output = model(input) loss = F.mse_loss(output, target.to(output.device)) loss.backward() # ensure accumulate grads works with no_grad => no grads are accumulated. with torch.no_grad(): with ddp_model.no_sync(): ddp_model.train() ddp_model(input) # check two model parameters over num_iters iterations for iteration in range(num_iters): step_model(model, input, target) ddp_input = input[ rank * local_batch_size : (rank + 1) * local_batch_size ] ddp_target = target[ rank * local_batch_size : (rank + 1) * local_batch_size ] if iteration % 2 == 0: # accumulate grads locally with ddp_model.no_sync(): step_model(ddp_model, ddp_input, ddp_target) else: # sync grads step_model(ddp_model, ddp_input, ddp_target) for i, j in zip(model.parameters(), ddp_model.parameters()): if not i.requires_grad: continue if iteration % 2 == 0: self.assertNotEqual(i.grad, j.grad) else: self.assertEqual(i.grad, j.grad) # Shuffle the input so that DDP input is different torch.manual_seed(1337 + iteration) input = input[torch.randperm(global_batch_size)] @skip_but_pass_in_sandcastle_if( BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", "get_future is only supported on mpi, nccl and gloo", ) @nccl_skip_if_lt_x_gpu(BACKEND, 2) def test_accumulate_gradients_no_sync(self): """ Runs _test_accumulate_gradients_no_sync using default inputs """ self._test_accumulate_gradients_no_sync() @skip_but_pass_in_sandcastle_if( BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", "get_future is only supported on mpi, nccl and gloo", ) @nccl_skip_if_lt_x_gpu(BACKEND, 2) def test_accumulate_gradients_no_sync_grad_is_view(self): """ Runs _test_accumulate_gradients_no_sync using default inputs """ self._test_accumulate_gradients_no_sync(gradient_as_bucket_view=True) @skip_but_pass_in_sandcastle_if( BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", "get_future is only supported on mpi, nccl and gloo", ) @nccl_skip_if_lt_x_gpu(BACKEND, 2) def test_accumulate_gradients_no_sync_allreduce_hook(self): """ Runs multiple iterations on _test_accumulate_gradients_no_sync using allreduce hook and validates whether future result was properly passed as gradients in reducer. """ world_size = get_world_size() def allreduce_hook( group_id: object, bucket: dist.GradBucket ) -> torch.futures.Future[torch.Tensor]: tensors = [bucket.buffer() / world_size] return ( group_id.allreduce(tensors) .get_future() .then(lambda fut: fut.value()[0]) ) self._test_accumulate_gradients_no_sync( num_iters=4, ddp_comm_hook=allreduce_hook ) @skip_but_pass_in_sandcastle_if( BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", "get_future is only supported on mpi, nccl and gloo", ) @nccl_skip_if_lt_x_gpu(BACKEND, 2) def test_accumulate_gradients_no_sync_allreduce_with_then_hook(self): """ Runs multiple iterations on _test_accumulate_gradients_no_sync using allreduce hook that also uses then callbacks. In first then callback result is multiplied by 2, and the second callback divides the result by 2 * world_size. It validates whether final result was properly passed as gradients in reducer. """ world_size = get_world_size() def allreduce_with_then_hook( group_id: object, bucket: dist.GradBucket ) -> torch.futures.Future[torch.Tensor]: fut = group_id.allreduce([bucket.buffer()]).get_future() def mult(fut): # Multiply the result by 2. return 2 * fut.wait()[0] def div(fut): # Divide the result by 2 * world_size. return fut.wait() / (2 * world_size) return fut.then(mult).then(div) self._test_accumulate_gradients_no_sync( num_iters=4, ddp_comm_hook=allreduce_with_then_hook ) @skip_but_pass_in_sandcastle_if( BACKEND != "mpi" and BACKEND != "nccl" and BACKEND != "gloo", "get_future is only supported on mpi, nccl and gloo", ) @nccl_skip_if_lt_x_gpu(BACKEND, 2) def test_get_future(self): def mult(fut): return [t * 3 for t in fut.wait()] def add(fut): return [t + 1 for t in fut.wait()] group, group_id, rank = self._init_global_test() input = _build_tensor(3, 2) if BACKEND == "nccl": rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) device_id = rank_to_GPU[rank][0] input = input.to(device_id) fut = group_id.allreduce([input]).get_future() res = fut.then(mult).then(add).wait() expected = _build_tensor(3, 2 * len(group) * 3 + 1) self.assertEqual(res[0], expected) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @skip_if_no_gpu def test_DistributedDataParallel(self): _group, _group_id, rank = self._init_global_test() rank_to_GPU = init_multigpu_helper(dist.get_world_size(), BACKEND) gpus = list(rank_to_GPU[rank]) for use_bucket_view, static_graph in itertools.product( (False, True), (False, True) ): self._test_DistributedDataParallel( gpu_subset=gpus, rank=rank, gradient_as_bucket_view=use_bucket_view, static_graph=static_graph, ) # test set static graph twice self._test_DistributedDataParallel( gpu_subset=gpus, rank=rank, gradient_as_bucket_view=use_bucket_view, static_graph=static_graph, set_static_graph_twice=True, ) # test output_device self._test_DistributedDataParallel( gpu_subset=gpus, rank=rank, output_device=torch.device("cuda"), gradient_as_bucket_view=use_bucket_view, static_graph=static_graph, ) # test device_ids gpus_list = [torch.device("cuda:" + str(i)) for i in gpus] self._test_DistributedDataParallel( gpu_subset=gpus_list, rank=rank, output_device=torch.device("cuda"), gradient_as_bucket_view=use_bucket_view, static_graph=static_graph, ) def _test_DistributedDataParallel_with_amp(self, grad_is_view=False): torch.manual_seed(31415) # Creates model and optimizer in default precision model = copy.deepcopy(DDP_NET).cuda() optimizer = torch.optim.SGD(model.parameters(), lr=0.03) # Creates a GradScaler once at the beginning of training. scaler = GradScaler() ddp_model = nn.parallel.DistributedDataParallel( model, device_ids=[self.rank], gradient_as_bucket_view=grad_is_view ) input = torch.randn(dist.get_world_size() * 2, 2).cuda() target = torch.randn(dist.get_world_size() * 2, 4).cuda() loss_fn = nn.MSELoss() # verify grads are none before training for p in ddp_model.parameters(): self.assertTrue(p is not None) self.assertTrue(p.grad is None) for idx in range(20): optimizer.zero_grad() # Runs the forward pass with autocasting. with autocast(): output = ddp_model(input) loss = loss_fn(output, target) # Scales loss. Calls backward() on scaled loss to create scaled gradients. # Backward passes under autocast are not recommended. # Backward ops run in the same dtype autocast chose for corresponding forward ops. scaler.scale(loss).backward() # verify grads are not none and are valid during training for p in ddp_model.parameters(): if p.requires_grad: self.assertTrue(p.grad is not None) self.assertFalse(p.grad.isnan().any()) self.assertFalse(p.grad.isinf().any()) # scaler.step() first unscales the gradients of the optimizer's assigned params. # If these gradients do not contain infs or NaNs, optimizer.step() is then called, # otherwise, optimizer.step() is skipped. scaler.step(optimizer) # Updates the scale for next iteration. scaler.update() # Shuffle the input so that DDP input is different torch.manual_seed(1337 + idx) input = input[torch.randperm(dist.get_world_size() * 2)] return ddp_model @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @skip_if_no_gpu def test_DistributedDataParallel_with_amp_and_grad_is_view(self): torch.cuda.set_device(self.rank) ddp_model_grad_not_view = self._test_DistributedDataParallel_with_amp( grad_is_view=False ) ddp_model_grad_is_view = self._test_DistributedDataParallel_with_amp( grad_is_view=True ) for i, j in zip( ddp_model_grad_not_view.parameters(), ddp_model_grad_is_view.parameters(), ): self.assertEqual(i, j) def _test_DistributedDataParallel_SyncBatchNorm( self, gpu_subset, rank, local_bs, global_bs, offset, output_device=None, affine=True, ): # Run a simple end to end DDP model, use result of single node model # as baseline # cpu training setup model = BN_NET if affine else BN_NET_NO_AFFINE # single gpu training setup model_gpu = copy.deepcopy(model) model_gpu.cuda(gpu_subset[0]) # DDP training setup model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model)) model_DDP.cuda(gpu_subset[0]) model_DDP = nn.parallel.DistributedDataParallel( model_DDP, device_ids=gpu_subset ) # test serializable/unserializable with tempfile.NamedTemporaryFile() as tmp: if sys.platform == "win32": torch.save(model_DDP, tmp) tmp.seek(0) # weights_only=False as this is legacy code that saves the model model_DDP = torch.load(tmp, weights_only=False) else: torch.save(model_DDP, tmp.name) # weights_only=False as this is legacy code that saves the model model_DDP = torch.load(tmp.name, weights_only=False) # data initialization input_cpu = torch.randn(global_bs, 2) target = torch.randn(global_bs, 4) loss = nn.MSELoss() # check two model parameters over 5 iterations self._test_DDP_niter( model_gpu, model_DDP, input_cpu.cuda(gpu_subset[0]), target.cuda(gpu_subset[0]), loss, local_bs, rank, global_bs, True, offset, dist.get_world_size(), 5 if affine else 2, ) self._barrier() def _test_post_localSGD_optimizer_parity(self, create_averager, grad_is_view): learning_rate = 0.03 net = torch.nn.parallel.DistributedDataParallel( copy.deepcopy(DDP_NET).cuda(), device_ids=[self.rank], gradient_as_bucket_view=grad_is_view, ) averager = create_averager() opt = torch.optim.SGD(net.parameters(), lr=learning_rate) net_using_post_localSGD_opt = torch.nn.parallel.DistributedDataParallel( copy.deepcopy(DDP_NET).cuda(), device_ids=[self.rank], gradient_as_bucket_view=grad_is_view, ) # Process group cannot be pickled in some environments, # so cannot deep copy an averager. See: # https://github.com/pytorch/pytorch/pull/74737#pullrequestreview-922487496 averager2 = create_averager() post_localSGD_opt = self._create_post_localSGD_optimizer( net_using_post_localSGD_opt, learning_rate, averager2 ) input = torch.randn(dist.get_world_size() * 2, 2).cuda() target = torch.randn(dist.get_world_size() * 2, 4).cuda() loss_fn = nn.MSELoss() for _ in range(20): self._perform_a_train_step(opt, net, loss_fn, input, target) averager.average_parameters(net.parameters()) self._perform_a_train_step( post_localSGD_opt, net_using_post_localSGD_opt, loss_fn, input, target, ) for p1, p2 in zip( net.parameters(), net_using_post_localSGD_opt.parameters() ): self.assertEqual(p1.data, p2.data) # Also check if the built-in step counters are the same to prevent a bug like #74737. self.assertEqual(averager.step, averager2.step) def _create_periodic_model_averager(self): return averagers.PeriodicModelAverager(period=4, warmup_steps=10) def _create_post_localSGD_optimizer(self, net, learning_rate, averager): return post_localSGD_optimizer.PostLocalSGDOptimizer( optim=torch.optim.SGD(net.parameters(), lr=learning_rate), averager=averager, ) def _perform_a_train_step(self, optimizer, net, loss_fn, input, target): optimizer.zero_grad() output = net(input) loss = loss_fn(output, target) loss.backward() optimizer.step() def _test_post_localSGD_optimizer_step_reload( self, create_averager, chkpt_file ): learning_rate = 0.03 net_using_post_localSGD_opt = torch.nn.parallel.DistributedDataParallel( copy.deepcopy(DDP_NET).cuda(), device_ids=[self.rank] ) averager = create_averager() post_localSGD_opt = self._create_post_localSGD_optimizer( net_using_post_localSGD_opt, learning_rate, averager ) averager2 = create_averager() dummy_post_localSGD_opt = self._create_post_localSGD_optimizer( net_using_post_localSGD_opt, learning_rate, averager2 ) input = torch.randn(dist.get_world_size() * 2, 2).cuda() target = torch.randn(dist.get_world_size() * 2, 4).cuda() loss_fn = nn.MSELoss() for _ in range(20): self._perform_a_train_step( post_localSGD_opt, net_using_post_localSGD_opt, loss_fn, input, target, ) if self.rank == 0: torch.save( {"optimizer_state_dict": post_localSGD_opt.state_dict()}, chkpt_file ) dist.barrier() map_location = {"cuda:0": f"cuda:{self.rank:d}"} checkpoint = torch.load(chkpt_file, map_location=map_location) dummy_post_localSGD_opt.load_state_dict(checkpoint["optimizer_state_dict"]) # Check that we didn't hit the trivial case self.assertNotEqual(averager2.step, 0) # Check if dummy averager was initialized to a correct value self.assertEqual(averager.step, averager2.step) # Remove 'step' entry from a checkpoint. # And make sure it is not in the state dictionary del checkpoint["optimizer_state_dict"]["step"] self.assertNotIn("step", checkpoint["optimizer_state_dict"]) # Check if checkpoint without a 'step' entry invokes a warning with self.assertWarnsRegex( expected_warning=UserWarning, expected_regex="Loaded state dict does not contain a step counter for an averager. " "Setting step counter to 0.", ): dummy_post_localSGD_opt.load_state_dict( checkpoint["optimizer_state_dict"] ) self.assertEqual(averager2.step, 0) @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_post_localSGD_optimizer_parity(self): torch.cuda.set_device(self.rank) self._test_post_localSGD_optimizer_parity( self._create_periodic_model_averager, grad_is_view=False, ) @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_post_localSGD_optimizer_parity_grad_is_view(self): torch.cuda.set_device(self.rank) self._test_post_localSGD_optimizer_parity( self._create_periodic_model_averager, grad_is_view=True, ) def _create_hierarchical_model_averager(self): period_group_size_dict = OrderedDict([(2, 2), (4, dist.get_world_size())]) return hierarchicalSGD.HierarchicalModelAverager( period_group_size_dict=period_group_size_dict, warmup_steps=4 ) @skip_if_lt_x_gpu(4) @skip_if_odd_worldsize @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_post_localSGD_optimizer_parity_with_hierarchical_sgd(self): torch.cuda.set_device(self.rank) self._test_post_localSGD_optimizer_parity( self._create_hierarchical_model_averager, grad_is_view=False, ) @skip_if_lt_x_gpu(4) @skip_if_odd_worldsize @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_post_localSGD_optimizer_parity_with_hierarchical_sgd_grad_is_view( self, ): torch.cuda.set_device(self.rank) self._test_post_localSGD_optimizer_parity( self._create_hierarchical_model_averager, grad_is_view=True, ) @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_post_localSGD_optimizer_step_reload(self): torch.cuda.set_device(self.rank) with _rank_temp_file() as tmp_file: self._test_post_localSGD_optimizer_step_reload( self._create_periodic_model_averager, tmp_file ) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @skip_if_no_gpu def test_DistributedDataParallel_SyncBatchNorm_Channels_Last(self): self._test_DistributedDataParallel_SyncBatchNorm_with_memory_format( torch.channels_last ) self._test_DistributedDataParallel_SyncBatchNorm_with_memory_format( torch.channels_last_3d ) def _test_DistributedDataParallel_SyncBatchNorm_with_memory_format( self, memory_format ): _group, _group_id, rank = self._init_global_test() num_processes = dist.get_world_size() local_bs = 2 bs_offset = int(rank * 2) global_bs = int(num_processes * 2) model = ONLY_SBN_NET model_gpu = copy.deepcopy(model).cuda(rank) model_DDP = nn.parallel.DistributedDataParallel( model_gpu, device_ids=[rank] ) shapes = [global_bs, 2, 4, 4] + ( [] if memory_format is torch.channels_last else [4] ) input_gpu = ( torch.randn(*shapes, dtype=torch.float) .cuda(rank) .to(memory_format=memory_format) ) target_gpu = ( torch.randn(*shapes, dtype=torch.float) .cuda(rank) .to(memory_format=memory_format) ) loss = nn.MSELoss() # check two model parameters over 5 iterations self._test_DDP_niter( model_gpu, model_DDP, input_gpu, target_gpu, loss, local_bs, rank, global_bs, True, bs_offset, dist.get_world_size(), memory_format=memory_format, ) self._barrier() @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @skip_if_no_gpu def test_DistributedDataParallel_SyncBatchNorm(self): _group, _group_id, rank = self._init_global_test() world_size = dist.get_world_size() # DDP does not support replicating BN layers within a process, hence # testing with one module replica per process gpus = [rank] local_bs = 2 bs_offset = int(rank * 2) global_bs = int(world_size * 2) self._test_DistributedDataParallel_SyncBatchNorm( gpu_subset=gpus, rank=rank, local_bs=local_bs, global_bs=global_bs, offset=bs_offset, ) # test output_device self._test_DistributedDataParallel_SyncBatchNorm( gpu_subset=gpus, rank=rank, local_bs=local_bs, global_bs=global_bs, offset=bs_offset, output_device=torch.device("cuda"), ) # test device_ids gpus = [torch.device("cuda:" + str(i)) for i in gpus] self._test_DistributedDataParallel_SyncBatchNorm( gpu_subset=gpus, rank=rank, local_bs=local_bs, global_bs=global_bs, offset=bs_offset, output_device=torch.device("cuda"), ) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @skip_if_no_gpu def test_DistributedDataParallel_SyncBatchNorm_No_Affine(self): _group, _group_id, rank = self._init_global_test() world_size = dist.get_world_size() # DDP does not support replicating BN layers within a process, hence # testing with one module replica per process gpus = [rank] local_bs = 2 bs_offset = int(rank * 2) global_bs = int(world_size * 2) self._test_DistributedDataParallel_SyncBatchNorm( gpu_subset=gpus, rank=rank, local_bs=local_bs, global_bs=global_bs, offset=bs_offset, affine=False, ) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @skip_if_no_gpu def test_DistributedDataParallel_SyncBatchNorm_2D_Input(self): _group, _group_id, rank = self._init_global_test() # DDP does not support replicating BN layers within a process, hence # testing with one module replica per process gpus = [rank] model = nn.BatchNorm1d(2) # single gpu training setup model_gpu = copy.deepcopy(model) model_gpu.cuda(gpus[0]) # DDP training setup model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model)) model_DDP.cuda(gpus[0]) model_DDP = nn.parallel.DistributedDataParallel(model_DDP, device_ids=gpus) local_bs = len(gpus) * 2 global_bs = dist.get_world_size() * local_bs input_cpu = torch.randn(global_bs, 2) target = torch.randn(global_bs, 2) loss = nn.MSELoss() # disabling cudnn. # SyncBatchNorm goes through native_batch_norm kernel, this avoids the # numerical issue created by the divergent code path. with torch.backends.cudnn.flags(False): # check two model parameters over 5 iterations self._test_DDP_niter( model_gpu, model_DDP, input_cpu.cuda(gpus[0]), target.cuda(gpus[0]), loss, local_bs, rank, global_bs, True, ) self._barrier() @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @skip_if_no_gpu @require_world_size(2) def test_DistributedDataParallel_SyncBatchNorm_Single_Input_Per_Process(self): _group, _group_id, rank = self._init_global_test() # DDP does not support replicating BN layers within a process, hence # testing with one module replica per process gpus = [rank] model = nn.BatchNorm1d(2) # single gpu training setup model_gpu = copy.deepcopy(model) model_gpu.cuda(gpus[0]) # DDP training setup model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(copy.deepcopy(model)) model_DDP.cuda(gpus[0]) model_DDP = nn.parallel.DistributedDataParallel(model_DDP, device_ids=gpus) local_bs = 1 global_bs = dist.get_world_size() input_cpu = torch.randn(global_bs, 2) target = torch.randn(global_bs, 2) loss = nn.MSELoss() # disabling cudnn. # SyncBatchNorm goes through native_batch_norm kernel, this avoids the # numerical issue created by the divergent code path. with torch.backends.cudnn.flags(False): # check two model parameters over 5 iterations self._test_DDP_niter( model_gpu, model_DDP, input_cpu.cuda(gpus[0]), target.cuda(gpus[0]), loss, local_bs, rank, global_bs, True, ) self._barrier() @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @skip_if_no_gpu def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_Running_Value( self, ): _group, _group_id, rank = self._init_global_test() model = nn.parallel.DistributedDataParallel( ONLY_SBN_NET.cuda(rank), device_ids=[rank] ) input_var = [] for i in range(dist.get_world_size()): input_var_rank = torch.cat( [ torch.ones(2, 1, 10 ** (i + 1)) * (0.1 ** (i - 1)), torch.ones(2, 1, 10 ** (i + 1)) * (0.3 ** (i - 1)), ], dim=1, ) input_var.append(input_var_rank) all_input_var = torch.cat( [ x.permute(1, 0, 2).contiguous().view(ONLY_SBN_NET.num_features, -1) for x in input_var ], dim=1, ).cuda(rank) for i in range(100): y = model(input_var[rank].cuda(rank)) y.mean().backward() running_mean, running_var = ( model.module.running_mean, model.module.running_var, ) torch.testing.assert_close(running_mean, all_input_var.mean(1)) torch.testing.assert_close(running_var, all_input_var.var(1)) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @skip_if_no_gpu def test_DistributedDataParallel_SyncBatchNorm_Diff_Input_Sizes_gradient(self): _group, _group_id, rank = self._init_global_test() # only do single GPU per process gpus = [rank] # cpu training setup num_processes = dist.get_world_size() local_bs = rank + 2 bs_offset = int((rank + 3) * rank / 2) global_bs = int((num_processes + 3) * num_processes / 2) self._test_DistributedDataParallel_SyncBatchNorm( gpu_subset=gpus, rank=rank, local_bs=local_bs, global_bs=global_bs, offset=bs_offset, ) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @skip_if_no_gpu def test_DistributedDataParallel_SyncBatchNorm_half(self): _group, _group_id, rank = self._init_global_test() model = copy.deepcopy(BN_NET) model = model.half() model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) model = nn.parallel.DistributedDataParallel(model.cuda(rank), device_ids=[rank]) inp = torch.randn(2, 2, dtype=torch.float16, device=torch.device(rank)) # Check that forward/backward do not error with dtype mismatch out = model(inp) self.assertEqual(out.dtype, torch.float16) out.sum().backward() for param in model.parameters(): self.assertEqual(param.grad.dtype, torch.float16) def _test_ddp_logging_data(self, is_gpu): rank = dist.get_rank() model_DDP = copy.deepcopy(DDP_NET) if is_gpu: model_DDP = nn.parallel.DistributedDataParallel( model_DDP.cuda(rank), device_ids=[rank] ) else: model_DDP = nn.parallel.DistributedDataParallel(model_DDP) # dummy data initialization local_bs = 2 batch_size, input, target, loss = self._prepare_dummy_data(local_bs) if is_gpu: input = input.cuda(rank) target = target.cuda(rank) model_DDP._set_ddp_runtime_logging_sample_rate(2) for idx in range(20): offset = rank * local_bs # DDP training, DDP scatters subsets of input to nodes/GPUs self._test_DDP_helper( model_DDP, input[offset : offset + local_bs], target[offset : offset + local_bs], loss, 1, ) self._model_step_with_zero_grad(model_DDP) # Verify DDP logging data is sampled as expected # If it has ran more than 10 iterations and this is # the sampled iteration for measuring run time stats, # the run time stats for this idx-th iteration will not # be zeros. ddp_logging_data = model_DDP._get_ddp_logging_data() if idx > 0 and (idx < 10 or idx % 2 == 0): self.assertGreaterEqual( ddp_logging_data.get("forward_compute_time"), 1 ) self.assertGreaterEqual( ddp_logging_data.get("backward_compute_time"), 1 ) self.assertGreaterEqual( ddp_logging_data.get("backward_comm_time"), 1 ) self.assertGreaterEqual( ddp_logging_data.get("backward_compute_time"), ddp_logging_data.get("backward_compute_comm_overlap_time"), ) self.assertGreaterEqual( ddp_logging_data.get("backward_comm_time"), ddp_logging_data.get("backward_compute_comm_overlap_time"), ) self.assertEqual(ddp_logging_data.get("iteration"), idx) elif idx > 0: # if the idx-th iteration is not sampled to set runtime stats, # ddp_logging_data.iteration will not be updated to current # iteration. self.assertNotEqual(ddp_logging_data.get("iteration"), idx) # Shuffle the input so that DDP input is different input = input[torch.randperm(batch_size)] return model_DDP @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "nccl does not support DDP on CPU models" ) def test_ddp_logging_data_cpu(self): def parse_env(var): return os.environ[var] if var in os.environ else "N/A" dist.set_debug_level(dist.DebugLevel.INFO) _, group_id, _ = self._init_global_test() model_DDP = self._test_ddp_logging_data(is_gpu=False) ddp_logging_data = model_DDP._get_ddp_logging_data() self.assertEqual(ddp_logging_data.get("world_size"), dist.get_world_size()) self.assertEqual(ddp_logging_data.get("rank"), dist.get_rank()) self.assertEqual(ddp_logging_data.get("module_name"), "Net") self.assertEqual(ddp_logging_data.get("device_ids"), "") # output_device is -1 in default if it is not set, e.g. # output_device of CPU training is -1. self.assertEqual(ddp_logging_data.get("output_device"), -1) self.assertEqual(ddp_logging_data.get("broadcast_buffers"), 1) self.assertEqual(ddp_logging_data.get("bucket_cap_bytes"), 25 * 1024 * 1024) self.assertEqual(ddp_logging_data.get("find_unused_parameters"), 0) self.assertEqual(ddp_logging_data.get("gradient_as_bucket_view"), 0) self.assertEqual( ddp_logging_data.get("backend_name"), dist.get_backend(group_id) ) self.assertEqual(ddp_logging_data.get("iteration"), 18) params = list(model_DDP.parameters()) num_params = 0 param_size = 0 params = list(filter(lambda parameter: parameter.requires_grad, params)) for p in params: num_params += 1 param_size += p.numel() * p.element_size() self.assertEqual(ddp_logging_data.get("dtypes"), "float") self.assertEqual( ddp_logging_data.get("total_parameter_size_bytes"), param_size ) self.assertEqual(ddp_logging_data.get("num_parameter_tensors"), num_params) self.assertEqual(ddp_logging_data.get("bucket_sizes"), str(param_size)) self.assertEqual( ddp_logging_data.get("master_port"), parse_env("MASTER_PORT") ) self.assertEqual( ddp_logging_data.get("master_addr"), parse_env("MASTER_ADDR") ) self.assertEqual( ddp_logging_data.get("torch_distributed_debug"), parse_env("TORCH_DISTRIBUTED_DEBUG"), ) self.assertEqual( ddp_logging_data.get("cuda_visible_devices"), parse_env("CUDA_VISIBLE_DEVICES"), ) if ddp_logging_data.get("backend_name") == "gloo": self.assertEqual( ddp_logging_data.get("gloo_socket_ifname"), parse_env("GLOO_SOCKET_IFNAME"), ) self.assertEqual( ddp_logging_data.get("gloo_device_transport"), parse_env("GLOO_DEVICE_TRANSPORT"), ) default_gloo_threads = 2 self.assertEqual( ddp_logging_data.get("gloo_num_threads"), default_gloo_threads, ) self.assertEqual(ddp_logging_data.get("nccl_socket_ifname"), None) self.assertEqual(ddp_logging_data.get("nccl_blocking_wait"), None) self.assertEqual(ddp_logging_data.get("nccl_async_error_handling"), None) self.assertEqual(ddp_logging_data.get("nccl_debug"), None) self.assertEqual(ddp_logging_data.get("nccl_nthreads"), None) self.assertEqual(ddp_logging_data.get("nccl_ib_timeout"), None) # test runtime logging fields # Note: DETAIL debug mode logs DDP logging data to stdout and # thus accesses std::map, which fills in a default value for the # type if it didn't exist. self.assertEqual(ddp_logging_data.get("unused_parameter_size", 0), 0) self.assertEqual(ddp_logging_data.get("has_rebuilt_buckets"), 1) self.assertEqual( ddp_logging_data.get("rebuilt_bucket_sizes"), str(param_size) ) grad_ready_order = ddp_logging_data.get( "prev_iteration_grad_ready_order_indices" ) expected_order = list(reversed([str(x) for x in range(3)])) self.assertEqual(grad_ready_order, ", ".join(expected_order)) bucket_indices = ddp_logging_data.get("rebuilt_per_bucket_param_indices") self.assertEqual(bucket_indices, " ".join(expected_order)) # It is hard to test accurate latency, but it can test whether the latency is # a valid value and in the expected range. self.assertGreaterEqual(ddp_logging_data.get("avg_forward_compute_time"), 1) self.assertGreaterEqual( ddp_logging_data.get("avg_backward_compute_time"), 1 ) self.assertGreaterEqual(ddp_logging_data.get("avg_backward_comm_time"), 1) self.assertGreaterEqual( ddp_logging_data.get("avg_backward_compute_time"), ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), ) self.assertGreaterEqual( ddp_logging_data.get("avg_backward_comm_time"), ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), ) # Test host-side times are roughly in the order that we expect fwd_host_side_time = ddp_logging_data.get("forward_compute_time_start") bwd_comp_start_host_side_time = ddp_logging_data.get( "backward_compute_time_start" ) bwd_comp_end_host_side_time = ddp_logging_data.get( "backward_compute_time_end" ) bwd_comm_start_host_side_time = ddp_logging_data.get( "backward_comm_time_start" ) bwd_comm_end_host_side_time = ddp_logging_data.get("backward_comm_time_end") self.assertGreaterEqual( bwd_comm_end_host_side_time, bwd_comm_start_host_side_time ) self.assertGreaterEqual( bwd_comm_start_host_side_time, bwd_comp_start_host_side_time ) self.assertGreaterEqual( bwd_comp_end_host_side_time, bwd_comp_start_host_side_time ) self.assertGreaterEqual(bwd_comp_start_host_side_time, fwd_host_side_time) # test larger net with mixed data types, verify multiple bucket sizes model = LargeNet() model.float() model.fc1.double() model_DDP = nn.parallel.DistributedDataParallel(model, bucket_cap_mb=1.5) ddp_logging_data = model_DDP._get_ddp_logging_data() params = list(model_DDP.parameters()) self.assertEqual( ddp_logging_data.get("bucket_cap_bytes"), int(1.5 * 1024 * 1024) ) bucket_sizes = [ params[1].numel() * params[1].element_size(), params[0].numel() * params[0].element_size(), ] self.assertEqual( ddp_logging_data.get("bucket_sizes"), ", ".join(str(x) for x in bucket_sizes), ) self.assertEqual(ddp_logging_data.get("dtypes"), "double, float") @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @skip_if_no_gpu def test_ddp_logging_data_gpu(self): _group, _group_id, rank = self._init_global_test() model_DDP = self._test_ddp_logging_data(is_gpu=True) ddp_logging_data = model_DDP._get_ddp_logging_data() self.assertEqual(ddp_logging_data.get("device_ids"), str(rank)) self.assertEqual(ddp_logging_data.get("output_device"), rank) grad_ready_order = ddp_logging_data.get( "prev_iteration_grad_ready_order_indices" ) expected_order = list(reversed([str(x) for x in range(3)])) self.assertEqual(grad_ready_order, ", ".join(expected_order)) bucket_indices = ddp_logging_data.get("rebuilt_per_bucket_param_indices") self.assertEqual(bucket_indices, " ".join(expected_order)) # test runtime logging fields # It is hard to test accurate latency, but it can test whether the latency is # a valid value and in the expected range. self.assertGreaterEqual(ddp_logging_data.get("avg_forward_compute_time"), 1) self.assertGreaterEqual( ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), 1 ) self.assertGreaterEqual( ddp_logging_data.get("avg_backward_compute_time"), ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), ) self.assertGreaterEqual( ddp_logging_data.get("avg_backward_comm_time"), ddp_logging_data.get("avg_backward_compute_comm_overlap_time"), ) # Test host-side times are roughly in the order that we expect fwd_host_side_time = ddp_logging_data.get("forward_compute_time_start") bwd_comp_start_host_side_time = ddp_logging_data.get( "backward_compute_time_start" ) bwd_comp_end_host_side_time = ddp_logging_data.get( "backward_compute_time_end" ) bwd_comm_start_host_side_time = ddp_logging_data.get( "backward_comm_time_start" ) bwd_comm_end_host_side_time = ddp_logging_data.get("backward_comm_time_end") self.assertGreaterEqual( bwd_comm_end_host_side_time, bwd_comm_start_host_side_time ) self.assertGreaterEqual( bwd_comm_start_host_side_time, bwd_comp_start_host_side_time ) self.assertGreaterEqual( bwd_comp_end_host_side_time, bwd_comp_start_host_side_time ) self.assertGreaterEqual(bwd_comp_start_host_side_time, fwd_host_side_time) @skip_but_pass_in_sandcastle_if( BACKEND == "nccl", "nccl does not support DDP on CPU models" ) def test_static_graph_api_cpu(self): model_DDP = nn.parallel.DistributedDataParallel(DDP_NET) expected_err = "should be called before training loop starts" with self.assertRaisesRegex(RuntimeError, expected_err): local_bs = 2 _batch_size, input, target, loss = self._prepare_dummy_data(local_bs) offset = dist.get_rank() * local_bs # DDP training, DDP scatters subsets of input to nodes/GPUs self._test_DDP_helper( model_DDP, input[offset : offset + local_bs], target[offset : offset + local_bs], loss, 1, ) model_DDP._set_static_graph() # Verify error was logged in ddp_logging_data. verify_ddp_error_logged(model_DDP, expected_err) @skipIfNoTorchVision def test_SyncBatchNorm_process_group(self): # When adopting `convert_sync_batchnorm` to convert a `nn.modules`, # it need to recursively pass the `process_group` in the module when the `SyncBatchNorm` # is nested in a sub-module or sub-sub-module (e.g. resnet50 in torchvision.models). process_ids = 0 process_group = torch.distributed.new_group([process_ids]) res50_model = torchvision.models.resnet50() res50_model_sync = nn.SyncBatchNorm.convert_sync_batchnorm( copy.deepcopy(res50_model), process_group ) process_group_sync = res50_model_sync.layer1[0].bn1.process_group self.assertEqual(process_group_sync, process_group) def _run_reduction_test( self, tensor, expected_tensor, op, reduction_fn=dist.all_reduce, dst=None ): if reduction_fn != dist.all_reduce and dst is None: raise ValueError(f"Reduction fn {reduction_fn} must specify dst!") if dst is not None: reduction_fn(tensor, dst, op) # Only destination rank tensor is expected to have final result. if dist.get_rank() == dst: self.assertEqual(tensor, expected_tensor) else: reduction_fn(tensor, op) self.assertEqual(tensor, expected_tensor) @require_backend_is_available({"nccl"}) @skip_if_lt_x_gpu(2) def test_nccl_backend_bool_allreduce(self): torch.cuda.set_device(self.rank) # Run all_reduce with PRODUCT element = self.rank % 2 == 0 for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]: input_tensor = torch.tensor([element, element]).to(self.rank) self._run_reduction_test( input_tensor, torch.tensor([False, False]).to(self.rank), op ) # Ensure that all ranks contributing True (cast to 1) results in the # correct reduction. input_tensor = torch.tensor([True, True]).to(self.rank) expected_tensor = input_tensor.clone() self._run_reduction_test(input_tensor, expected_tensor, op) # Run all_reduce with SUM for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]: input_tensor = torch.tensor([element, element]).to(self.rank) self._run_reduction_test( input_tensor, torch.tensor([True, True]).to(self.rank), op ) # TODO: NCCL backend does not work correctly for bitwise reduction ops # (see https://github.com/pytorch/pytorch/issues/41362). Add tests for # these once it is supported. @require_backend_is_available({"nccl"}) @skip_if_lt_x_gpu(2) def test_nccl_backend_bool_allgather(self): torch.cuda.set_device(self.rank) inp = {0: [True, True], 1: [False, True]} input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank) # Preserve a copy of the tensor to compare against after allgather. input_tensor_copy = input_tensor.clone() tensor_list = [ torch.tensor([False, False]).to(self.rank) for _ in range(dist.get_world_size()) ] dist.all_gather(tensor_list, input_tensor) self.assertEqual(len(tensor_list), dist.get_world_size()) for i, t in enumerate(tensor_list): expected = torch.tensor(inp[i % 2]).to(self.rank) self.assertEqual(t, expected) # Ensure that the input tensor is not modified, since this collective # does not modify its input. self.assertEqual(input_tensor_copy, input_tensor) @require_backend_is_available({"nccl"}) @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) def test_nccl_backend_bool_reduce(self): torch.cuda.set_device(self.rank) inp = {0: [True, True], 1: [False, False]} # Run reduce() with product op for op in [dist.ReduceOp.PRODUCT, dist.ReduceOp.MIN]: # make sure rank 0 gets False if WORLD_SIZE=1 to match expected tensor input_tensor = torch.tensor(inp[(self.rank + 1) % 2]).to(self.rank) expected = torch.tensor([False, False]).to(self.rank) self._run_reduction_test(input_tensor, expected, op, dist.reduce, dst=0) # Ensure that all ranks contributing True (cast to 1) results in the # correct reduction. input_tensor = torch.tensor([True, True]).to(self.rank) expected_tensor = input_tensor.clone() self._run_reduction_test( input_tensor, expected_tensor, op, dist.reduce, dst=0 ) for op in [dist.ReduceOp.SUM, dist.ReduceOp.MAX]: input_tensor = torch.tensor(inp[self.rank % 2]).to(self.rank) expected = ( torch.tensor([True, True]).to(self.rank) if self.rank == 0 else input_tensor.clone() ) self._run_reduction_test(input_tensor, expected, op, dist.reduce, dst=0) @require_backend_is_available({"nccl"}) @skip_if_lt_x_gpu(2) def test_nccl_backend_bool_broadcast(self): tensor_size = 10 bcast_tensor = torch.tensor( [ (random.random() < 0.5 if self.rank == 0 else False) for _ in range(tensor_size) ] ).to(self.rank) dist.broadcast(bcast_tensor, src=0) # Now allgather and ensure the tensors are equal. tensor_list = [ torch.tensor([False for _ in range(tensor_size)]).to(self.rank) for _ in range(dist.get_world_size()) ] dist.all_gather(tensor_list, bcast_tensor) expected = tensor_list[0] for tensor in tensor_list[1:]: self.assertEqual(tensor, expected) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) def test_DistributedSampler_padding(self): # Tests padding of distributed sampler. world_size = dist.get_world_size() # Simulates the 'casual' dataset size dataset_size = 100 + world_size + 1 dataset = [torch.ones(1).to(self.rank) * i for i in range(dataset_size)] # Simulates the 'tiny' dataset size dataset_tiny_size = max(world_size // 2 - 1, 1) dataset_tiny = [ torch.ones(1).to(self.rank) * i for i in range(dataset_tiny_size) ] # Specifying drop_last=True will cause the tail of the data to be dropped. dist_sampler = DistributedSampler(dataset=dataset, drop_last=True) local_num_samples, local_dataset_size = ( dist_sampler.num_samples, dist_sampler.total_size, ) # The effective dataset size should be the greatest integer that is <= # dataset_size that is divisible by the world_size. This is to ensure each # rank processes the same number of samples. effective_dataset_size = ( math.ceil((dataset_size - world_size) / world_size) if dataset_size % world_size != 0 else dataset_size / world_size ) self.assertEqual(local_num_samples, effective_dataset_size) self.assertEqual(local_dataset_size, local_num_samples * world_size) indices_list = list(iter(dist_sampler)) self.assertEqual(len(indices_list), local_num_samples) def validate_global_samples(local_num_samples): # Ensure that each rank processes the same number of samples. world_samples = [ torch.LongTensor([0]).to(self.rank) for _ in range(world_size) ] dist.all_gather( world_samples, torch.tensor([local_num_samples]).to(self.rank) ) world_samples = [sample.item() for sample in world_samples] self.assertEqual(len(set(world_samples)), 1) validate_global_samples(local_num_samples) # drop_last=False is the default and will add additional indices to be sampled, # increasing the effective dataset size. dist_sampler_added_samples = DistributedSampler(dataset=dataset) local_num_samples, local_dataset_size = ( dist_sampler_added_samples.num_samples, dist_sampler_added_samples.total_size, ) # The effective dataset size is the smallest integer that is >= dataset_size # and divisible by the world size. self.assertEqual(local_num_samples, math.ceil(dataset_size / world_size)) self.assertEqual(local_dataset_size, local_num_samples * world_size) indices_list = list(iter(dist_sampler_added_samples)) self.assertEqual(len(indices_list), local_num_samples) # Ensure that each rank processes the same number of samples. validate_global_samples(local_num_samples) # Ensure additional samples are padded even when # the extremely small dataset is given. dist_sampler_added_samples_tiny = DistributedSampler(dataset=dataset_tiny) local_num_samples, local_dataset_size = ( dist_sampler_added_samples_tiny.num_samples, dist_sampler_added_samples_tiny.total_size, ) self.assertEqual( local_num_samples, math.ceil(dataset_tiny_size / world_size) ) self.assertEqual(local_dataset_size, local_num_samples * world_size) indices_list = list(iter(dist_sampler_added_samples_tiny)) self.assertEqual(len(indices_list), local_num_samples) validate_global_samples(local_num_samples) def _test_allgather_object(self, subgroup=None): # Only set device for NCCL backend since it must use GPUs. gather_objects = COLLECTIVES_OBJECT_TEST_LIST.copy() backend = os.environ["BACKEND"] if backend == "nccl": # Case where rank != GPU device. next_rank = (self.rank + 1) % int(self.world_size) torch.cuda.set_device(next_rank) # If GPU test, add object with GPU tensor if backend == "nccl": gather_objects.append(Foo(torch.randn(3, 3, device=0))) output_gathered = [None for _ in range(dist.get_world_size())] dist.all_gather_object( output_gathered, gather_objects[self.rank % len(gather_objects)], group=subgroup, ) for i, val in enumerate(output_gathered): expected = gather_objects[i % len(gather_objects)] self.assertEqual(val, expected) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @require_n_gpus_for_nccl_backend( int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"] ) @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) def test_all_gather_object_default_pg(self): return self._test_allgather_object() @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @require_n_gpus_for_nccl_backend( int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"] ) @with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"]) def test_all_gather_object_subgroup(self): default = _get_default_group() backend = dist.get_backend(default) subgroup = dist.new_group(backend=backend) return self._test_allgather_object(subgroup=subgroup) def _test_gather_object(self, pg=None): # Ensure stateful objects can be gathered gather_objects = COLLECTIVES_OBJECT_TEST_LIST.copy() my_rank = dist.get_rank(pg) backend = os.environ["BACKEND"] if backend == "nccl": # Case where rank != GPU device. next_rank = (self.rank + 1) % int(self.world_size) torch.cuda.set_device(next_rank) # If GPU test, add object with GPU tensor if backend == "nccl": gather_objects.append(Foo(torch.randn(3, 3, device=my_rank))) output_gathered = [None for _ in range(dist.get_world_size(pg))] gather_on_rank = 0 dist.gather_object( gather_objects[self.rank % len(gather_objects)], object_gather_list=output_gathered if my_rank == gather_on_rank else None, dst=gather_on_rank, group=pg, ) if my_rank != gather_on_rank: self.assertEqual( output_gathered, [None for _ in range(dist.get_world_size())] ) else: for i, val in enumerate(output_gathered): expected = gather_objects[i % len(gather_objects)] self.assertEqual(val, expected) # Validate errors when objects can't be pickled. class Bar: pass b = Bar() gather_objects = [b for _ in range(dist.get_world_size())] with self.assertRaises(AttributeError): dist.all_gather_object( [None for _ in range(dist.get_world_size())], gather_objects[self.rank], group=pg, ) @skip_but_pass_in_sandcastle_if( BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" ) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"]) def test_gather_object(self): return self._test_gather_object() @skip_but_pass_in_sandcastle_if( BACKEND == "ucc", "CPU tensor ops not supported by UCP TL" ) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @with_dist_debug_levels(levels=["DETAIL", "OFF", "INFO"]) def test_gather_object_subgroup(self): default = _get_default_group() backend = dist.get_backend(default) subgroup = dist.new_group(backend=backend) return self._test_gather_object(subgroup) def validate_net_equivalence(self, net): # Helper to validate synchronization of nets across ranks. net_module_states = list(net.module.state_dict().values()) # Check that all tensors in module's state_dict() are equal. for t in net_module_states: tensor_list = [ torch.zeros_like(t) for _ in range(dist.get_world_size()) ] dist.all_gather(tensor_list, t) for tensor in tensor_list: self.assertEqual(tensor, t) @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_sync_module_states(self): # Test that after calling _sync_module_states, models across ranks # are the same and are equal to the model on the input rank. dim = 2 rank = self.rank rank_to_broadcast = 1 # Seed to ensure that ranks are initialized with different initial models. torch.manual_seed(rank) model = nn.Linear(dim, dim, bias=False) net = torch.nn.parallel.DistributedDataParallel( model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1 ) new_model = nn.Linear(dim, dim, bias=False).cuda(rank) net.module = copy.deepcopy(new_model) # Assert params are different net_module_states = list(net.module.state_dict().values()) for t in net_module_states: tensor_list = [ torch.zeros_like(t) for _ in range(dist.get_world_size()) ] dist.all_gather(tensor_list, t) for i, tensor in enumerate(tensor_list): if i == rank: self.assertEqual(t, tensor) else: # tensor from another rank should be different. self.assertNotEqual(t, tensor) _sync_module_states( module=net.module, process_group=net.process_group, broadcast_bucket_size=net.broadcast_bucket_size, src=rank_to_broadcast, params_and_buffers_to_ignore=net.parameters_to_ignore, ) # Now all model params should be the same. self.validate_net_equivalence(net) # Since the network params were broadcast from rank_to_broadcast, validate that # they are the same as new_model on rank_to_broadcast. if rank == rank_to_broadcast: expected_states = new_model.state_dict().values() for t, expected in zip(net_module_states, expected_states): self.assertEqual(t, expected) @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_grad_div_uneven_inputs(self): # Test gradient division during training with join() API. If # divide_by_initial_world_size=False, we scale by the effective world # size when allreducing grads. dim = 5 batch = 1 grad_scale = 50 rank = self.rank model = nn.Linear(dim, dim, bias=False) inp = torch.ones(batch, dim, device=self.rank) * grad_scale net = torch.nn.parallel.DistributedDataParallel( model.cuda(rank), device_ids=[self.rank], bucket_cap_mb=1 ) n_iters = 3 if self.rank > 0: n_iters += 2 with net.join(divide_by_initial_world_size=False): for _ in range(n_iters): loss = net(inp).sum() loss.backward() # The grad is always expected_grad, since we divide by the number # of currently active processes and inactive processes contribute # zero gradient. If we kept dividing by static initial world # size as processes leave, the grad would be smaller. expected_grad = torch.ones(dim, dim, device=self.rank) * grad_scale param = next(iter(net.parameters())) self.assertEqual(expected_grad, param.grad) # Avoid accumulating grads so that it's the same every iteration net.zero_grad() torch.cuda.synchronize(device=self.rank) # If divide_by_initial_world_size=True (default), we always scale grads # by the initial world_size. with net.join(divide_by_initial_world_size=True): for i in range(n_iters): loss = net(inp).sum() loss.backward() effective_ws = dist.get_world_size() if i >= 3: effective_ws -= 1 expected_grad = ( torch.ones(dim, dim, device=self.rank) * grad_scale * effective_ws ) / dist.get_world_size() param = next(iter(net.parameters())) self.assertEqual(expected_grad, param.grad) # Avoid accumulating grad so that it's the same every iteration. net.zero_grad() torch.cuda.synchronize(device=self.rank) def _test_ddp_profiling(self, profiler_ctx, profiler_ctx2=None): """Runs DDP based model training and captures profiles. This test will do two profiler runs. 1. An inital basic run to check if profiler events are correctly captured. 2. A second profiling pass after running some iterations of DDP, to check robustness of thread local state. args profiler_ctx : Profiler context manager for pass 1 profiler_ctx2 : Profiler context manager for pass 2. This can be left out as None, in which case a deepcopy of profiler_ctx is used. Returns: prof: Instantiated profiler object that can be used for post analysis. """ batch = 3 dim = 10 num_iters = 6 torch.cuda.set_device(self.rank) model = nn.Linear(dim, dim, bias=False) inp = torch.rand(batch, dim, device=self.rank) net = torch.nn.parallel.DistributedDataParallel( model.cuda(self.rank), device_ids=[self.rank], ) if profiler_ctx2 is None: profiler_ctx2 = copy.deepcopy(profiler_ctx) with profiler_ctx as prof: for _ in range(num_iters): loss = net(inp).sum() loss.backward() all_reduce_event_name = f"{dist.get_backend()}:all_reduce" events = get_profiling_event(all_reduce_event_name, prof, dedup_gpu_user_annotation=True) event_count = sum(e.count for e in events) self.assertEqual(event_count, num_iters) for event in events: self.assertTrue(event.is_async) self.assertEqual(event.name, all_reduce_event_name) broadcast_event_name = f"{dist.get_backend()}:broadcast" broadcast_events = get_profiling_event(broadcast_event_name, prof, dedup_gpu_user_annotation=True) event_count = sum(e.count for e in broadcast_events) # Broadcast is called during rebuild_buckets self.assertGreaterEqual(event_count, 1) for event in broadcast_events: self.assertEqual(event.name, broadcast_event_name) # Run DDP with profiling for a few iterations, then enable profiling # for a single pass, and ensure it is recorded. This tests that the # thread local state is correctly updated. net = torch.nn.parallel.DistributedDataParallel( model.cuda(self.rank), device_ids=[self.rank], find_unused_parameters=True, ) for _ in range(3): loss = net(inp).sum() loss.backward() # Now enable the profiler. with profiler_ctx2 as prof: loss = net(inp).sum() loss.backward() events = get_profiling_event(all_reduce_event_name, prof, dedup_gpu_user_annotation=True) self.assertGreaterEqual(len(events), 1) self.assertGreaterEqual(events[0].count, 1) self.assertEqual(events[0].name, all_reduce_event_name) for event in events: self.assertTrue(event.is_async) # Ensure searching unused parameters was profiled events = get_profiling_event("search_unused_parameters", prof) self.assertEqual(len(events), 1) return prof @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle("Currently failing in NVIDIA internal CI") def test_ddp_profiling_autograd_profiler(self): autograd_profiler_ctx = torch.autograd.profiler.profile() return self._test_ddp_profiling(profiler_ctx=autograd_profiler_ctx) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") @skip_but_pass_in_sandcastle_if( IS_MACOS or IS_WINDOWS, "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", ) def test_ddp_profiling_torch_profiler(self): cpu_act = torch.profiler.ProfilerActivity.CPU cuda_act = torch.profiler.ProfilerActivity.CUDA torch_profiler_ctx = torch.profiler.profile(activities=[cpu_act, cuda_act]) prof = self._test_ddp_profiling(profiler_ctx=torch_profiler_ctx) if dist.get_backend() != "nccl": return # Note comment out the "os.remove(trace_file)" in `get_profiler_nccl_meta()` # to debug any mismatches. nccl_meta_events = get_profiler_nccl_meta(prof) self.assertGreater(len(nccl_meta_events), 0) nccl_meta = self._sanity_check_profiler_nccl_meta(nccl_meta_events) # additionally check the specific collectives in this test case self.assertEqual(len(nccl_meta["allreduce"]), 2) self.assertEqual(len(nccl_meta["wait"]), 1) # check allreduce message sizes a0 = nccl_meta["allreduce"][0] self.assertEqual(a0["Out msg nelems"], 100, msg=f"{a0}") self.assertEqual(a0["dtype"], "Float", msg=f"{a0}") a1 = nccl_meta["allreduce"][1] self.assertEqual(a1["Out msg nelems"], 1, msg=f"{a1}") self.assertEqual(a1["dtype"], "Int", msg=f"{a1}") def _validate_execution_trace_nccl(self, et_file: str) -> None: """Torch profiler includes nccl metadata in an inserted operator called "record_param_comms" We test for basic fields in theese nodes in the Execution Trace. """ with open(et_file) as f: et = json.load(f) pg_cfg_node = [n for n in et["nodes"] if n["name"] == "## process_group:init ##"] self.assertGreaterEqual(len(pg_cfg_node), 1) nccl_meta_nodes = [n for n in et["nodes"] if n["name"] == "record_param_comms"] self.assertEqual(len(nccl_meta_nodes), 3) per_coll_meta = defaultdict(list) # Sanity check NCCL metadata nodes for n in nccl_meta_nodes: attrs_list = n.get("attrs", []) self.assertGreater(len(attrs_list), 0) attrs = {a["name"]: a["value"] for a in attrs_list} collname = attrs.get("collective_name", "") self.assertNotEqual(collname, "") self.assertNotEqual(attrs.get("dtype", ""), "") per_coll_meta[collname].append(attrs) if collname in {"wait"}: continue self.assertEqual(attrs["pg_name"], "0") # yes this is a string self.assertEqual(attrs["pg_desc"], "default_pg") self.assertEqual(attrs["pg_size"], 2) self.assertGreaterEqual(attrs.get("in_msg_nelems", -1), 0) self.assertGreaterEqual(attrs.get("out_msg_nelems", -1), 0) self.assertTrue("in_split_size" in attrs.keys()) self.assertTrue("out_split_size" in attrs.keys()) self.assertEqual(attrs.get("global_rank_start", -1), 0) self.assertEqual(attrs.get("global_rank_stride", -1), 1) # print(per_coll_meta) self.assertEqual(len(per_coll_meta["allreduce"]), 2) self.assertEqual(len(per_coll_meta["wait"]), 1) # check allreduce message sizes a0 = per_coll_meta["allreduce"][0] self.assertEqual(a0["out_msg_nelems"], 100, msg=f"{a0}") self.assertEqual(a0["dtype"], "Float", msg=f"{a0}") a1 = per_coll_meta["allreduce"][1] self.assertEqual(a1["out_msg_nelems"], 1, msg=f"{a1}") self.assertEqual(a1["dtype"], "Int", msg=f"{a1}") @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if(IS_FBCODE, "Kineto in fbcode code causes hang") @skip_but_pass_in_sandcastle_if( IS_MACOS or IS_WINDOWS, "torch.profiler not enabled for mac/windows: https://github.com/pytorch/pytorch/pull/56124", ) @unittest.skipIf(BACKEND != "nccl", "Tests nccl metadata primarily.") def test_ddp_profiling_execution_trace(self): self.assertEqual(dist.get_backend(), "nccl") # Create a temp file to save execution trace data fp = tempfile.NamedTemporaryFile("w+t", suffix=".et.json", delete=False) fp.close() et_file = fp.name et = ExecutionTraceObserver().register_callback(et_file) # first profiler context need not have ET torch_profiler_ctx1 = torch.profiler.profile( activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], ) # collect ET in second profiler pass torch_profiler_ctx2 = torch.profiler.profile( activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], execution_trace_observer=et ) self._test_ddp_profiling( profiler_ctx=torch_profiler_ctx1, profiler_ctx2=torch_profiler_ctx2, ) print(f"Execution trace saved at {fp.name}") self._validate_execution_trace_nccl(et_file) @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_join_model_equivalence(self): # Verifies equivalence with model training locally and with DDP under # the join context manager. batch = 3 dim = 10 learning_rate = 0.03 model = nn.Linear(dim, dim, bias=False) inp = torch.rand(batch, dim, device=self.rank) local_model = copy.deepcopy(model) local_model = local_model.cuda(self.rank) rank_to_iter_mapping = { rank: 2 * (rank + 1) for rank in range(dist.get_world_size()) } # run local model local_iters = sum(rank_to_iter_mapping.values()) local_optim = torch.optim.SGD(local_model.parameters(), lr=learning_rate) for _ in range(local_iters): local_optim.zero_grad() out = local_model(inp) loss = out.sum() loss.backward() local_optim.step() # run DDP model with join API num_iters = rank_to_iter_mapping[self.rank] net = torch.nn.parallel.DistributedDataParallel( model.cuda(self.rank), device_ids=[self.rank] ) ddp_optim = torch.optim.SGD( model.parameters(), lr=learning_rate * dist.get_world_size() ) with net.join(): for _ in range(num_iters): ddp_optim.zero_grad() out = net(inp) loss = out.sum() loss.backward() torch.cuda.synchronize(device=self.rank) ddp_optim.step() # Validate model state dicts are equal for (_, local_tensor), (_, dist_tensor) in zip( local_model.state_dict().items(), net.module.state_dict().items() ): self.assertEqual(local_tensor, dist_tensor) def _run_uneven_inputs_test( self, test_case, iteration_mapping, find_unused_params, ): model = test_case.model inp = test_case.inp rank = self.rank sync_interval = test_case.sync_interval torch.cuda.set_device(rank) # Ensure all outstanding GPU work is completed so this test runs independently. dist.barrier() # Bucket_cap_mb is intentionally low to test allreduce scheduling when # there are many buckets. net = torch.nn.parallel.DistributedDataParallel( model.cuda(rank), device_ids=[rank], bucket_cap_mb=1, find_unused_parameters=find_unused_params, ) # Register hook if specified if test_case.hook is not None: net.register_comm_hook(test_case.state, test_case.hook) print(f"registered hook {test_case.hook}") # Determine num iters for this rank via the passed in mapping. num_iters = iteration_mapping[rank] # If we throw when earliest rank terminates, we should ensure # that we iterate for that minimum number of times. num_iters_tensor = torch.tensor( [num_iters], device=torch.cuda.current_device() ) dist.all_reduce(num_iters_tensor, op=dist.ReduceOp.MIN) min_num_iters = num_iters_tensor.item() total_iters = 0 if test_case.throw_on_early_termination: if min_num_iters == num_iters: # Early termination rank(s) exception_ctx = self.assertRaisesRegex( RuntimeError, f"Rank {self.rank} exhausted all inputs" ) else: # Non early termination rank exception_ctx = self.assertRaisesRegex( RuntimeError, "Detected at least one rank that exhausted inputs.", ) else: exception_ctx = nullcontext() with exception_ctx: with net.join( throw_on_early_termination=test_case.throw_on_early_termination ): for i in range(num_iters): # Use model.no_sync() to disable grad synchronization every # sync_interval. if i % sync_interval != 0: context = net.no_sync() else: context = nullcontext() with context: if isinstance(inp, tuple): loss = net(*inp).sum() else: loss = net(inp).sum() loss.backward() self._model_step(net) # Ensure completion of GPU kernels (including allreduce). If the # join API is not properly implemented, then this should hang # since the allreduce will hang. torch.cuda.synchronize(device=rank) total_iters += 1 if test_case.throw_on_early_termination: # Ensure we iterated min_num_iters times. self.assertEqual(total_iters, min_num_iters) else: # Ensure we iterated at least min_num_iters times. self.assertGreaterEqual(total_iters, min_num_iters) # Ensure completion of all GPU kernels. torch.cuda.synchronize(device=rank) # When throwing on early rank termination, we do not # broadcast model state from an authoritative rank. All models # should already be in sync. if not test_case.throw_on_early_termination: self.assertTrue(net._authoritative_rank) # All ranks should have agreed on the same authoritative_rank! final_rank_tensor = torch.tensor( [net._authoritative_rank], device=self.rank ) tensor_list = [ torch.zeros_like(final_rank_tensor) for _ in range(dist.get_world_size()) ] dist.all_gather(tensor_list, final_rank_tensor) max_rank = dist.get_world_size() - 1 self.assertSetEqual( {max_rank}, {tensor.item() for tensor in tensor_list} ) # Ensure that all models are the same across ranks after all have joined. self.validate_net_equivalence(net) # Ensure that running with DDP uneven inputs was logged. ddp_logging_data = net._get_ddp_logging_data() self.assertTrue(ddp_logging_data.get("join_uneven_inputs")) dist.barrier() @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_uneven_inputs_stop_iteration_sync_bn(self): # Tests that uneven inputs join handler correctly throws StopIteration # for models with SyncBN or general collective comm when # throw_on_early_termination=True. class ModelWithComm(torch.nn.Module): def __init__(self) -> None: super().__init__() self.lin = nn.Linear(2, 40, bias=False) def forward(self, x): x = self.lin(x) dist.all_reduce(x) return x torch.cuda.set_device(self.rank) model_bn = BN_NET model_bn = nn.SyncBatchNorm.convert_sync_batchnorm( copy.deepcopy(model_bn) ).cuda(self.rank) comm_model = ModelWithComm().cuda(self.rank) model_input = torch.randn(10, 2).cuda(torch.cuda.current_device()) for model in [model_bn, comm_model]: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[self.rank], ) min_num_iters = 5 if self.rank != 0: # Early termination rank(s) num_iters = min_num_iters exception_ctx = self.assertRaisesRegex( RuntimeError, f"Rank {self.rank} exhausted all inputs" ) else: # Non early termination rank num_iters = min_num_iters * 2 exception_ctx = self.assertRaisesRegex( RuntimeError, "Detected at least one rank that exhausted inputs.", ) n = 0 with exception_ctx: with model.join(throw_on_early_termination=True): for _ in range(num_iters): loss = model(model_input).sum() loss.backward() self._model_step(model) n += 1 self.assertEqual(n, min_num_iters) # Verify model equivalence self.validate_net_equivalence(model) @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_uneven_inputs(self): dim = 1000 batch = 1 # Create a variety of models to run uneven input tests on. large_model = nn.Sequential( nn.Conv2d(1, 20, 5), nn.ReLU(), nn.Conv2d(20, 32, 5), nn.ReLU(), nn.Conv2d(32, 256, 5), nn.ReLU(), ) small_model = nn.Linear(dim, dim, bias=False) bn_net = BatchNormNet() class UnusedParamModule(nn.Module): def __init__(self, unused_params_rank): super().__init__() self.t0 = Task() self.t1 = Task() self.unused_params_rank = unused_params_rank def task_parameters(self): return (self.t0.p, self.t1.p) def forward(self, x, rank): return ( self.t1(self.t0(x)) if rank != self.unused_params_rank else self.t1(x) ) unjoined_rank_with_unused_params_model = UnusedParamModule(1) joined_rank_with_unused_params_model = UnusedParamModule(0) rank = self.rank models_to_test = [ # Network with batchnorm DDPUnevenTestInput( name="batch_norm_net", model=bn_net, inp=torch.ones(batch, 2, device=rank), sync_interval=1, ), DDPUnevenTestInput( name="large_conv_model", model=large_model, inp=torch.ones(batch, batch, dim, dim, device=rank), sync_interval=1, ), DDPUnevenTestInput( name="small_model", model=small_model, inp=torch.ones(batch, dim, device=rank), sync_interval=1, ), # Unused parameter test where rank that does not join early has unused params DDPUnevenTestInput( name="unjoined_rank_with_unused_params_model", model=unjoined_rank_with_unused_params_model, inp=(torch.ones(batch, 2, device=rank), rank), sync_interval=1, ), # Unused parameter test where rank that does join early has unused params DDPUnevenTestInput( name="joined_rank_with_unused_params_model", model=joined_rank_with_unused_params_model, inp=(torch.ones(batch, 2, device=rank), rank), sync_interval=1, ), ] # Test models that have hook installed. models_with_hook = [ DDPUnevenTestInput( name="small_model_allreduce_hook", model=small_model, hook=default.allreduce_hook, state=None, inp=torch.ones(batch, dim, device=rank), sync_interval=1, ), DDPUnevenTestInput( name="small_model_power_sgd_hook", model=small_model, hook=powerSGD.powerSGD_hook, state=powerSGD.PowerSGDState( process_group=None, matrix_approximation_rank=1, # Config so that powerSGD runs immediately instead of # allreduce. start_powerSGD_iter=1, warm_start=False, use_error_feedback=False, ), inp=torch.ones(batch, dim, device=rank), sync_interval=1, ), ] models_to_test.extend(models_with_hook) # Add resnet model if we have torchvision installed. if HAS_TORCHVISION: resnet_model = torchvision.models.resnet50() models_to_test.append( DDPUnevenTestInput( name="resnet_model", model=resnet_model, inp=torch.ones(1, 3, 1000, 1000), sync_interval=1, ) ) # Test with no_sync every 2, 3, 4, ... iterations. models_with_sync = [] for i, test_input in enumerate(models_to_test): models_with_sync.append( DDPUnevenTestInput( name=test_input.name, model=test_input.model, inp=test_input.inp, sync_interval=i + 2, ) ) throw_on_early_term_tests = [] for test_input in models_to_test: throw_on_early_term_tests.append( DDPUnevenTestInput( name=test_input.name, model=test_input.model, inp=test_input.inp, sync_interval=test_input.sync_interval, throw_on_early_termination=True, ) ) models_to_test.extend(models_with_sync) models_to_test.extend(throw_on_early_term_tests) # 0 iteration tests for when one process does not train model at all, so # we must shadow the broadcast calls made when rebuilding buckets. baseline_num_iters = [0, 5] iteration_offsets = [2, 3, 10] num_uneven_ranks = [1] if dist.get_world_size() > 2: num_uneven_ranks.append(2) iteration_mappings = [] # Generate rank : num_iters mappings for various uneven input scenarios. # This includes cases where rank 0 joins early and all other ranks join # later, and scenarios where multiple ranks join early, but at different # iterations, and later ranks join later. for num_early_join_ranks in num_uneven_ranks: for baseline_iter in baseline_num_iters: for offset in iteration_offsets: mapping = dict.fromkeys(range(0, num_early_join_ranks), baseline_iter) # if num_early_join_ranks > 1, ranks > 0 that will join early # iterate offset//2 more times than rank 0, to test nodes # depleting inputs at different times. if num_early_join_ranks > 1: for rank in mapping.keys(): if rank > 0: mapping[rank] += offset // 2 mapping.update( dict.fromkeys(range(num_early_join_ranks, dist.get_world_size()), baseline_iter + offset) ) iteration_mappings.append(mapping) for (test_case, iteration_mapping) in itertools.product( models_to_test, iteration_mappings ): if self.rank == 0: print( f"""Running test: {test_case.name} sync interval {test_case.sync_interval} with iteration mapping {iteration_mapping}""" ) self._run_uneven_inputs_test( test_case, iteration_mapping, find_unused_params=("unused_params_model" in test_case.name), ) @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_uneven_input_join_disable(self): # tests that if net.join() with enable=False is specified, DDP works as # expected with even inputs. torch.manual_seed(self.rank) net = torch.nn.parallel.DistributedDataParallel( torch.nn.Linear(1, 1).cuda(self.rank), device_ids=[self.rank] ) inp = torch.ones(1) * self.rank n_iters = 5 world_size = dist.get_world_size() with net.join(enable=False): for _ in range(n_iters): # Clear grads grad = net.module.weight.grad if grad is not None: grad.requires_grad_(False) grad.zero_() out = net(inp) loss = out.sum() loss.backward() # Validate gradients to ensure that we divide by the correct # world_size when join mode is disabled. expected_grad = sum(i for i in range(world_size)) / world_size self.assertEqual(net.module.weight.grad.item(), expected_grad) join_config = net._join_config self.assertFalse(join_config.enable) self.validate_net_equivalence(net) @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_uneven_input_exception(self): # Tests that exceptions during training are correctly propagated by the # context manager. error_str = "Intentional error" class ExceptionModule(nn.Module): def __init__(self) -> None: super().__init__() self.param = nn.Parameter(torch.ones(1, requires_grad=True)) def forward(self, _): raise ValueError(error_str) exception_module = ExceptionModule() net = torch.nn.parallel.DistributedDataParallel( exception_module.cuda(self.rank), device_ids=[self.rank] ) inp = torch.ones(1) with self.assertRaisesRegex(ValueError, error_str): with net.join(): out = net(inp) loss = out.sum() loss.backward() def _test_broadcast_object_list(self, group=None): gather_objects = COLLECTIVES_OBJECT_TEST_LIST.copy() # Only set device for NCCL backend since it must use GPUs. # Case where rank != GPU device. next_rank = (self.rank + 1) % int(self.world_size) backend = os.environ["BACKEND"] if backend == "nccl": torch.cuda.set_device(next_rank) src_rank = 0 # If GPU test, add object with GPU tensor if backend == "nccl": gather_objects.append(Foo(torch.randn(3, 3, device=0))) if IS_FBCODE: # Create Tensor with > 2^31 Bytes storage requirements # Only on FBCODE as testing OOMs in OSS gather_objects.append(Foo(torch.randn(3, 178956971))) objects = ( gather_objects if self.rank == src_rank else [None for _ in gather_objects] ) # Single object test with device specified. Backend="gloo", device=cpu if backend != "nccl": single_obj_list = [objects[0]] if self.rank != src_rank: self.assertNotEqual(single_obj_list[0], gather_objects[0]) dist.broadcast_object_list( single_obj_list, src=0, group=group, device=torch.device("cpu") ) self.assertEqual(single_obj_list[0], gather_objects[0]) # Single object test with device specified. Backend="gloo", device=current_device+1 # The test is gated by the fact GPU count is the same as world size to avoid the case # when backend is gloo but there is no multiple GPU devices. if backend != "nccl" and torch.cuda.device_count() == int(self.world_size): single_obj_list = [objects[0]] if self.rank != src_rank: self.assertNotEqual(single_obj_list[0], gather_objects[0]) dist.broadcast_object_list( single_obj_list, src=0, group=group, device=torch.device(next_rank) ) self.assertEqual(single_obj_list[0], gather_objects[0]) # Single object test with device specified. Backend="nccl", device=current_device+1 if backend == "nccl" and torch.cuda.device_count() == int(self.world_size): single_obj_list = [objects[0]] if self.rank != src_rank: self.assertNotEqual(single_obj_list[0], gather_objects[0]) dist.broadcast_object_list( single_obj_list, src=0, group=group, device=torch.device(next_rank) ) self.assertEqual(single_obj_list[0], gather_objects[0]) # Single object test: backward compatibility with device unspecified single_obj_list = [objects[0]] if self.rank != src_rank: self.assertNotEqual(single_obj_list[0], gather_objects[0]) dist.broadcast_object_list(single_obj_list, src=0, group=group) self.assertEqual(single_obj_list[0], gather_objects[0]) # Multiple input objects test if self.rank != src_rank: self.assertNotEqual(objects, gather_objects) dist.broadcast_object_list(objects, src=0, group=group) self.assertEqual(objects, gather_objects) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @require_n_gpus_for_nccl_backend( int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"] ) @with_dist_debug_levels(levels=["DETAIL"]) @unittest.skip("Test is failing, see https://github.com/pytorch/pytorch/pull/113620") def test_broadcast_object_list(self): return self._test_broadcast_object_list() @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @require_n_gpus_for_nccl_backend( int(os.environ["WORLD_SIZE"]), os.environ["BACKEND"] ) @with_dist_debug_levels(levels=["DETAIL"]) def _test_broadcast_object_list_subgroup(self): default = _get_default_group() backend = dist.get_backend(default) subgroup = dist.new_group(backend=backend) return self._test_broadcast_object_list(subgroup) def _test_ddp_ignore_params_arg(self, static_graph=False): class TestModel(nn.Module): def __init__(self, rank): self.rank = rank super().__init__() self.fc1 = nn.Linear(1, 1, bias=False) # Proxy that will be materialized to another architecture later. # (after wrapping model with DDP) if self.rank == 0: self.fc2 = nn.Linear(1, 10, bias=False) else: self.fc2 = nn.Linear(10, 10, bias=False) def forward(self, x): x = self.fc1(x) x = self.fc2(x) return x device_id = self.rank # Ensure the test works for both find_unused_parameter and broadcast_buffer settings. for (find_unused, broadcast_buffers) in itertools.product( [False, True], [False, True] ): model = TestModel(self.rank).float().to(device_id) # Note that the model can have different shape buffers if we pass # them in to be ignored as well. model.fc2.register_buffer( "ignore_buffer", torch.zeros(5 + self.rank, device=self.rank) ) proxy_params = list(model.fc2.parameters()) model_fc2_name = next( module_name for module_name, module in model.named_modules() if module is model.fc2 ) proxy_param_names = [ f"{model_fc2_name}.{param_name}" for param_name, _ in model.fc2.named_parameters() ] proxy_buffer_names = [ f"{model_fc2_name}.{buf_name}" for buf_name, _ in model.fc2.named_buffers() ] # Specify that we should ignore proxy_params since it will be # materialized later. torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( model, proxy_param_names + proxy_buffer_names ) ddp = torch.nn.parallel.DistributedDataParallel( model, device_ids=[device_id], find_unused_parameters=find_unused, broadcast_buffers=broadcast_buffers, static_graph=static_graph, ) # Materialize new params. These are not registered in DDP and thus # don't have autograd hooks installed on them. ddp.module.fc2 = nn.Linear(1, 1, bias=False).to(device_id) # local model with the new materialized parameters. local_model = copy.deepcopy(ddp.module).cuda(self.rank) inp = torch.ones(1, dtype=torch.float).to(device_id) * (self.rank + 1) for _ in range(6): ddp(inp).sum().backward() local_model(inp).sum().backward() # materialized param grad is not touched by DDP, so its grad should # be the same as if running locally. for materialized_param, local_param in zip( ddp.module.fc2.parameters(), local_model.fc2.parameters() ): self.assertEqual(materialized_param.grad, local_param.grad) # fc1 parameter grad should still be different, due to allreduce. for synced_param, local_param in zip( ddp.module.fc1.parameters(), local_model.fc1.parameters() ): self.assertFalse(synced_param.grad == local_param.grad) # Proxy module grad should not be touched for proxy_param in proxy_params: self.assertTrue(proxy_param.grad is None) # Synchronize since we run multiple iterations of this test, to # isolate failure hangs. torch.cuda.synchronize(device=self.rank) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_if_lt_x_gpu(2) def test_ddp_ignore_params_arg(self): self._test_ddp_ignore_params_arg(static_graph=False) self._test_ddp_ignore_params_arg(static_graph=True) @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_if_lt_x_gpu(2) def test_ddp_unused_params_rebuild_buckets_exception(self): class ToyModel(nn.Module): def __init__(self) -> None: super().__init__() self.net1 = nn.Linear(10, 10, bias=False) self.net2 = nn.Linear(10, 10, bias=False) def forward(self, x): return self.net1(x) ddp = torch.nn.parallel.DistributedDataParallel( ToyModel().cuda(self.rank), device_ids=[self.rank] ) for i in range(2): inp = torch.rand(1, 10) if i > 0: # On 2nd iteration, this will fail during rebuild_buckets, # but we should report an error regarding unused parameters # since that is the underlying root cause. try: ddp(inp).sum().backward() except RuntimeError as e: msg = str(e) verify_ddp_error_logged(ddp, msg) expected_strs = [ ddp_prev_reduction_unfinished_str, ddp_recommend_find_unused_params_str, ddp_outputs_not_used_in_loss_str, ] # In debug mode, should show parameters that weren't reduced. # Without debug mode, should show suggestion to use debug mode. if dist.get_debug_level() == dist.DebugLevel.OFF: expected_strs.append(ddp_suggest_debug_mode_str) else: unreduced_params = ", ".join(["net2.weight"]) expected_strs.append( f"did not receive grad for rank {self.rank}: {unreduced_params}" ) for s in expected_strs: self.assertTrue(s in msg, f"Expected {s} to be in {msg}") self.assertFalse(ddp_find_unused_params_enabled_str in msg) else: self.assertFalse( True, "DDP unused parameters error not raised." ) else: ddp(inp).sum().backward() dist.barrier() @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_if_lt_x_gpu(2) def test_ddp_shared_grad_acc_unused_params(self): # When find_unused_parameters=True, ensure we mark unused parameters # even if they share gradient accumulators. class ToyModel(nn.Module): def __init__(self) -> None: super().__init__() # net1, bias, and net1.bias are all unused params. self.net1 = nn.Linear(10, 5, bias=False) self.bias = nn.Parameter(torch.zeros(5)) # net1.bias and self.bias are names for the same underlying # parameter, so they share the same grad acc. This caused # the bug reported in https://github.com/pytorch/pytorch/issues/41324. self.net1.bias = self.bias self.net2 = nn.Linear(10, 5) def forward(self, x): return self.net2(x).sum() torch.cuda.set_device(self.rank) model = ToyModel().to(torch.cuda.current_device()) for static in [True, False]: ddp_model = torch.nn.parallel.DistributedDataParallel( copy.deepcopy(model), device_ids=[self.rank], find_unused_parameters=True, static_graph=static, ) inp = torch.randn(20, 10, device=self.rank) for _ in range(6): loss = ddp_model(inp) # To test https://github.com/pytorch/pytorch/issues/61982 loss /= 10 loss.backward() @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_if_lt_x_gpu(2) def test_ddp_device(self): expected_len = 2 class TensorWrapper: __slots__ = ["t", "moved_to_gpu"] def __init__(self, t): self.t = t self.moved_to_gpu = False # Handlers for specific types of validation we want to do based on # the input type. def tuple_and_list_validator(x): self.assertTrue(len(x), expected_len) self.assertEqual(1, len({t.device for t in x})) self.assertEqual(x[0].device.index, self.rank) return x[0] + x[1] def namedtuple_validator(x): self.assertEqual(x._fields, EXPECTED_FIELDS) self.assertEqual(x.a.device.index, x.b.device.index) self.assertEqual(x.a.device.index, self.rank) return x.a + x.b def custom_type_validator(x): self.assertTrue(x.moved_to_gpu or (str(x.t.device) == "cpu")) x.t = x.t.to(self.rank) x.moved_to_gpu = True return x.t def dict_validator(x): self.assertTrue(EXPECTED_FIELDS[0] in x.keys()) self.assertTrue(EXPECTED_FIELDS[1] in x.keys()) self.assertEqual(1, len({t.device for t in x.values()})) self.assertEqual(x[EXPECTED_FIELDS[0]].device.index, self.rank) return x[EXPECTED_FIELDS[0]] + x[EXPECTED_FIELDS[1]] validators = { TensorWrapper: custom_type_validator, tuple: tuple_and_list_validator, list: tuple_and_list_validator, TestNamedTupleInput_0: namedtuple_validator, TestNamedTupleInput_1: namedtuple_validator, dict: dict_validator, } class ToyModel(torch.nn.Module): def __init__(self_): # noqa: B902 super().__init__() self_.lin = nn.Linear(10, 10, bias=False) def forward(self_, x, expected_type): # noqa: B902 # Similar to scatter, the recursive to in the single-device # case does not move tensors if they are in a custom type. self.assertTrue(isinstance(x, expected_type)) fwd_tensor = validators[expected_type](x) return self_.lin(fwd_tensor) model = torch.nn.parallel.DistributedDataParallel( ToyModel().to(self.rank), device_ids=[self.rank] ) def train_iter(inp, input_type): for _ in range(4): out = model(inp, input_type) out.sum().backward() # CPU tuple input, should be moved to the proper device before call # to forward. inp = tuple(torch.randn(10, 10) for _ in range(expected_len)) train_iter(inp, tuple) # List CPU input, should be moved to proper device before call to # forward. inp = [torch.randn(10, 10) for _ in range(expected_len)] train_iter(inp, list) # Custom type containing tensor. The type is maintained, but the # device is not propagated (which is what happens with scatter too) inp = TensorWrapper(torch.randn(10, 10)) train_iter(inp, TensorWrapper) # NamedTuple input. The type should be maintained and tensor inputs # should be moved to the correct device as in scatter. batch = 5 dim = 10 a = torch.rand(batch, dim) b = torch.rand(batch, dim) inp = TestNamedTupleInput_0(a, b) train_iter(inp, type(inp)) inp = TestNamedTupleInput_1(a, b) train_iter(inp, type(inp)) # dictionary input. inp = { EXPECTED_FIELDS[0]: a, EXPECTED_FIELDS[1]: b, } train_iter(inp, type(inp)) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_if_lt_x_gpu(2) def test_ddp_namedtuple(self): batch = 5 dim = 10 a = torch.rand(batch, dim, device=self.rank) b = torch.rand(batch, dim, device=self.rank) class NamedTupleModule(torch.nn.Module): def __init__(self_): # noqa: B902 super().__init__() self_.lin = nn.Linear(10, 1) def forward(self_, input, expected_type): # noqa: B902 # Without NamedTuple support, this would be of type tuple. self.assertTrue( isinstance(input, expected_type), f"Expected type {expected_type} but got {type(input)}", ) self.assertEqual(input._fields, EXPECTED_FIELDS) self.assertEqual(a, input.a) self.assertEqual(b, input.b) return self_.lin(torch.mul(input.a, input.b)) model = torch.nn.parallel.DistributedDataParallel( NamedTupleModule().cuda(self.rank), device_ids=[self.rank] ) inp = TestNamedTupleInput_0(a, b) # The following would fail if DDP does not propagate NamedTuples correctly. model(inp, type(inp)) inp = TestNamedTupleInput_1(a, b) model(inp, type(inp)) @require_backend_is_available({"gloo"}) def test_grads_same_across_ranks_with_no_sync(self): _group, _group_id, rank = self._init_global_test() world_size = dist.get_world_size() if world_size < 2: self.skipTest("This test requires at least two ranks.") class SimpleConditionalModel(nn.Module): # if rank is 0, uses nn1 on the first pass and nn2 on the second pass. # else, uses nn3 on the first pass and nn4 on the second pass. def __init__(self, rank): super().__init__() self.rank = rank self.nn1 = nn.Linear(1, 1) self.nn2 = nn.Linear(1, 1) self.nn3 = nn.Linear(1, 1) self.nn4 = nn.Linear(1, 1) self.state = 0 def forward(self, input): if self.state == 0: self.state = 1 if self.rank == 0: return self.nn1(input) else: return self.nn3(input) else: self.state = 0 if self.rank == 0: return self.nn2(input) else: return self.nn4(input) model = torch.nn.parallel.DistributedDataParallel( SimpleConditionalModel(rank), find_unused_parameters=True ) mse_loss = nn.MSELoss() grad_accumulation = 2 for microbatch_idx in range(grad_accumulation): if microbatch_idx < grad_accumulation - 1: context = model.no_sync else: context = nullcontext with context(): input = torch.rand((1, )) output = model.forward(input) target = torch.rand((1, )) loss = mse_loss(output, target) loss.backward() self.assertTrue( not any(p.grad is None for p in model.parameters()), "Gradients can't be None for any model parameter." ) grads = torch.cat([p.grad.view(-1) for p in model.parameters()]) # Gather all gradients to rank 0. if rank == 0: gathered_grads = [torch.zeros_like(grads) for _ in range(world_size)] else: gathered_grads = [] dist.gather(grads, gather_list=gathered_grads, dst=0) if rank == 0: for g in gathered_grads[1:]: self.assertTrue( torch.allclose(gathered_grads[0], g), "Gradients are not the same for all ranks." ) @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_if_lt_x_gpu(2) def test_ddp_control_flow_same_across_ranks(self): # Control flow that is the same across ranks. batch = 20 dim = 10 world_size = dist.get_world_size() torch.cuda.set_device(self.rank) model = torch.nn.parallel.DistributedDataParallel( ControlFlowToyModel().cuda(self.rank), device_ids=[self.rank], find_unused_parameters=True, ) random_input = torch.randn(batch, dim, device=self.rank) ones_input = torch.ones(batch, dim, device=self.rank) for i in range(6): if i % 2 == 0: out = model(random_input) else: out = model(ones_input) loss = out.sum() loss.backward() # On even iterations, 2nd param goes unused, on odd iterations, # it is used. local_used_map = model.reducer._get_local_used_map() if i % 2 == 0: expected = torch.tensor( [world_size, 0], device=self.rank, dtype=torch.int32 ) else: expected = torch.tensor( [world_size, world_size], device=self.rank, dtype=torch.int32 ) # Validate parameter usage. variable_usage_tensor = local_used_map self.assertEqual(variable_usage_tensor, expected) # Validate appropriate error message when DDP is used with # find_unused_parameters=False. model = torch.nn.parallel.DistributedDataParallel( ControlFlowToyModel().cuda(self.rank), device_ids=[self.rank], find_unused_parameters=False, ) for i in range(2): if i == 0: loss = model(random_input).sum() loss.backward() else: try: loss = model(random_input).sum() loss.backward() except RuntimeError as e: msg = str(e) verify_ddp_error_logged(model, msg) # 2nd linear layer is unused unused_param_index = 1 expected_strs = [ ddp_prev_reduction_unfinished_str, ddp_recommend_find_unused_params_str, ddp_outputs_not_used_in_loss_str, f"Parameter indices which did not receive grad for rank {self.rank}: {unused_param_index}", ] # In debug mode, should show parameters that weren't reduced. # Without debug mode, should show suggestion to use debug mode. if dist.get_debug_level() == dist.DebugLevel.OFF: expected_strs.append(ddp_suggest_debug_mode_str) else: unreduced_params = ", ".join(["lin2.weight"]) expected_strs.append( f"did not receive grad for rank {self.rank}: {unreduced_params}" ) for s in expected_strs: self.assertTrue(s in msg, f"Expected {s} to be in {msg}") self.assertFalse(ddp_find_unused_params_enabled_str in msg) else: self.assertFalse(True, "DDP error not raised") dist.barrier() @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_if_lt_x_gpu(2) def test_invalid_static_graph(self): torch.cuda.set_device(self.rank) model = torch.nn.parallel.DistributedDataParallel( ControlFlowToyModel().cuda(self.rank), device_ids=[self.rank], static_graph=True, ) random_input = torch.randn(20, 10, device=self.rank) ones_input = torch.ones(20, 10, device=self.rank) # unused parameter in the first iteration got used # in second iteration. expected_err = "Your training graph has changed in this iteration" with self.assertRaisesRegex(RuntimeError, expected_err): for i in range(2): if i % 2 == 0: out = model(random_input) else: out = model(ones_input) loss = out.sum() loss.backward() verify_ddp_error_logged(model, expected_err) # used parameter in the first iteration got unused # in second iteration. with self.assertRaisesRegex( RuntimeError, "Expected to have finished reduction in the prior iteration " "before starting a new one. This error indicates that your " "training graph has changed in this iteration, " "e.g., one parameter is used in first iteration, " "but then got unused in the second iteration. " "this is not compatible with static_graph set to True.\n" "Parameter indices which did not receive grad for", ): for i in range(2): if i % 2 != 0: out = model(random_input) else: out = model(ones_input) loss = out.sum() loss.backward() verify_ddp_error_logged(model, "Expected to have finished reduction") @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_if_lt_x_gpu(2) def test_ddp_control_flow_different_across_ranks(self): # Control flow that is different across ranks. batch = 20 dim = 10 class ToyModel(nn.Module): def __init__(self, rank): super().__init__() self.lin1 = nn.Linear(10, 10, bias=False) self.lin2 = nn.Linear(10, 10, bias=False) self.rank = rank def forward(self, x): # Control-flow that is rank and input dependent for the # model. use_second_layer = ( torch.equal(x, torch.ones(batch, dim, device=x.device)) and self.rank == 1 ) if use_second_layer: return self.lin2(F.relu(self.lin1(x))) else: return F.relu(self.lin1(x)) world_size = dist.get_world_size() torch.cuda.set_device(self.rank) model = torch.nn.parallel.DistributedDataParallel( ToyModel(self.rank).cuda(self.rank), device_ids=[self.rank], find_unused_parameters=True, ) random_input = torch.randn(batch, dim, device=self.rank) ones_input = torch.ones(batch, dim, device=self.rank) for i in range(6): if i % 2 == 0: out = model(random_input) else: out = model(ones_input) loss = out.sum() loss.backward() # On even iterations, 2nd param goes unused, on odd iterations, # it is used only on rank 1. local_used_map = model.reducer._get_local_used_map() if i % 2 == 0: expected = torch.tensor( [world_size, 0], device=self.rank, dtype=torch.int32 ) else: expected = torch.tensor( [world_size, 1], device=self.rank, dtype=torch.int32 ) variable_usage_tensor = local_used_map # Validate parameter usage. On odd iterations, 2nd param is only # used on rank 1. self.assertEqual(variable_usage_tensor, expected) # Validate appropriate error message when DDP is used with # find_unused_parameters=False. model = torch.nn.parallel.DistributedDataParallel( ToyModel(self.rank).cuda(self.rank), device_ids=[self.rank], find_unused_parameters=False, ) for i in range(2): if i == 0: loss = model(random_input).sum() loss.backward() else: try: loss = model(random_input).sum() loss.backward() except RuntimeError as e: msg = str(e) verify_ddp_error_logged(model, msg) unused_param_index = 1 expected_strs = [ ddp_prev_reduction_unfinished_str, ddp_recommend_find_unused_params_str, ddp_outputs_not_used_in_loss_str, f"Parameter indices which did not receive grad for rank {self.rank}: {unused_param_index}", ] # In debug mode, should show parameters that weren't reduced. # Without debug mode, should show suggestion to use debug mode. if dist.get_debug_level() == dist.DebugLevel.OFF: expected_strs.append(ddp_suggest_debug_mode_str) else: unreduced_params = ", ".join(["lin2.weight"]) expected_strs.append( f"did not receive grad for rank {self.rank}: {unreduced_params}" ) for s in expected_strs: self.assertTrue(s in msg, f"Expected {s} to be in {msg}") self.assertFalse(ddp_find_unused_params_enabled_str in msg) else: self.assertFalse(True, "DDP error not raised") dist.barrier() @require_backend_is_available({"gloo"}) def test_scatter_object_list(self): src_rank = 0 scatter_list = ( COLLECTIVES_OBJECT_TEST_LIST if self.rank == src_rank else [None for _ in COLLECTIVES_OBJECT_TEST_LIST] ) world_size = dist.get_world_size() scatter_list = scatter_list[:world_size] i = 0 while len(scatter_list) < world_size: scatter_list.append(scatter_list[i]) i += 1 output_obj_list = [None] dist.scatter_object_list(output_obj_list, scatter_list, src=src_rank) self.assertEqual( output_obj_list[0], COLLECTIVES_OBJECT_TEST_LIST[ self.rank % len(COLLECTIVES_OBJECT_TEST_LIST) ], ) # Ensure errors are raised upon incorrect arguments. with self.assertRaisesRegex( ValueError, "Expected argument scatter_object_output_list to be a list of size at least 1.", ): dist.scatter_object_list([], scatter_list, src=src_rank) def _generate_sparse_tensors_for_bucket_assignment_test(self): tensors = [ torch.empty([50], dtype=torch.float), torch.empty([25], dtype=torch.double), torch.empty([50], dtype=torch.float), torch.empty([25], dtype=torch.double), torch.empty([50], dtype=torch.float), torch.empty([25], dtype=torch.double), ] tensors_sparse = [t.to_sparse() for t in tensors] return tensors_sparse def _test_compute_bucket_assignment_by_size(self, use_logger): group_gloo = dist.new_group( timeout=timedelta(seconds=60), backend=dist.Backend.GLOO ) # Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test # determinism. os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" group_to_use = dist.new_group( backend=dist.get_backend(), timeout=timedelta(seconds=5) ) torch.cuda.set_device(self.rank) # Create a valid model. The constructor initializes the logger that we use later. # We never actually use the rest of the model - we only need its logger. net = EmbeddingNetDifferentParams(0) net = torch.nn.parallel.DistributedDataParallel( net.to(self.rank), device_ids=[self.rank], process_group=group_to_use, ) # if we don't pass a logger then we can only check that an exception was thrown. expected_err = "No support for sparse tensors." with self.assertRaisesRegex(RuntimeError, expected_err): tensors_sparse = ( self._generate_sparse_tensors_for_bucket_assignment_test() ) if use_logger: dist._compute_bucket_assignment_by_size( tensors_sparse, [400], logger=net.logger ) else: dist._compute_bucket_assignment_by_size( tensors_sparse, [400] ) if use_logger: verify_ddp_error_logged(net, expected_err) # Perform gloo-based barrier to ensure one rank doesn't exit test # early which causes failure with Barrier.sync. dist.barrier(group_gloo) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_if_lt_x_gpu(2) def test_compute_bucket_assignment_by_size_sparse_error_without_logger(self): self._test_compute_bucket_assignment_by_size(use_logger=False) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_if_lt_x_gpu(2) def test_compute_bucket_assignment_by_size_sparse_error_with_logger(self): self._test_compute_bucket_assignment_by_size(use_logger=True) def _determine_expected_error_verify_model_across_rank( self, group_to_use, diff_num_params=False ): # When running with NCCL backend, we don't expect an error on rank 0, # rather, it will be taken down by TORCH_NCCL_ASYNC_ERROR_HANDLING. When # running with Gloo or with debug mode wrapper, we expect the error # to be caught inline. # All ranks report same error when there is a # of parameter # mismatch since we use allgather in the impl. if diff_num_params: expected_err = "DDP expects same model across all ranks" ctx = self.assertRaisesRegex(RuntimeError, expected_err) return ctx, expected_err is_detail_dbg_mode = dist.get_debug_level() == dist.DebugLevel.DETAIL if self.rank == 0: if ( dist.get_backend(group_to_use) == dist.Backend.NCCL and not is_detail_dbg_mode ): expected_err = "caught collective operation timeout" ctx = self.assertRaisesRegex(RuntimeError, expected_err) else: expected_err = None ctx = self.assertRaises(RuntimeError) else: expected_err = "appears not to match" ctx = self.assertRaisesRegex(RuntimeError, expected_err) return ctx, expected_err def _test_verify_model_across_rank(self, use_logger): group_gloo = dist.new_group( timeout=timedelta(seconds=60), backend=dist.Backend.GLOO ) # Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test # determinism. os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" group_to_use = dist.new_group( backend=dist.get_backend(), timeout=timedelta(seconds=5) ) torch.cuda.set_device(self.rank) ctx, expected_err = self._determine_expected_error_verify_model_across_rank( group_to_use ) # Create a valid model. The constructor initializes the logger that we use later. net = EmbeddingNetDifferentParams(0) net = torch.nn.parallel.DistributedDataParallel( net.to(self.rank), device_ids=[self.rank], process_group=group_to_use, ) # Modify the model so that the number of parameters are different for each rank. # This will cause a RuntimeError to be thrown below in _verify_param_shape_across_processes, # so we can check if the correct error is thrown and is logged. # We can't do this in the constructor above otherwise the logger will # not be properly initialized. net.module.lin = nn.Linear(100 if self.rank == 0 else 10, 1) # if we pass a logger we can verify that it was logged with ctx: if use_logger: _verify_param_shape_across_processes( net.process_group, list(net.parameters()), net.logger ) else: _verify_param_shape_across_processes( net.process_group, list(net.parameters()) ) # Should only be run by rank 0, and blocking_wait catches and # reports exception. dist.barrier(group_to_use) # We don't check when self.rank != 0 because the logger doesn't log # the error "Caught collective operation" as that is not thrown in the reducer. if use_logger and self.rank != 0: verify_ddp_error_logged(net, expected_err) # Perform gloo-based barrier to ensure one rank doesn't exit test # early which causes failure with Barrier.sync. dist.barrier(group_gloo) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_but_pass_in_sandcastle_if( BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" ) @skip_if_lt_x_gpu(2) def test_verify_model_across_rank_with_logger(self): self._test_verify_model_across_rank(use_logger=True) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_but_pass_in_sandcastle_if( BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" ) @skip_if_lt_x_gpu(2) def test_verify_model_across_rank_without_logger(self): self._test_verify_model_across_rank(use_logger=False) def _run_test_ddp_model_with_diff_params(self, ctx, net, ddp_group, group_gloo): with ctx: net = torch.nn.parallel.DistributedDataParallel( net.to(self.rank), device_ids=[self.rank], process_group=ddp_group ) # Should only be run by rank 0, and blocking_wait catches and # reports exception. dist.barrier(ddp_group) # can't use verify_ddp_error_logged here because net was never properly constructed # Perform gloo-based barrier to ensure one rank doesn't exit test # early which causes failure with Barrier.sync. dist.barrier(group_gloo) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_but_pass_in_sandcastle_if( BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" ) @skip_if_lt_x_gpu(2) def test_ddp_model_diff_shape_across_ranks(self): group_gloo = dist.new_group( timeout=timedelta(seconds=60), backend=dist.Backend.GLOO ) # Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test # determinism. os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" group_to_use = dist.new_group( backend=dist.get_backend(), timeout=timedelta(seconds=10) ) torch.cuda.set_device(self.rank) ctx, _expected_err = self._determine_expected_error_verify_model_across_rank( group_to_use ) # Creates network with different sized embedding table on different # ranks. This should throw an error during DDP init. net = EmbeddingNetDifferentParams(self.rank) self._run_test_ddp_model_with_diff_params( ctx, net, group_to_use, group_gloo ) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_but_pass_in_sandcastle_if( BACKEND == "ucc" and IS_SANDCASTLE, "Skipped internally" ) @skip_if_lt_x_gpu(2) def test_ddp_model_diff_num_params_across_ranks(self): group_gloo = dist.new_group( timeout=timedelta(seconds=60), backend=dist.Backend.GLOO ) # Set TORCH_NCCL_BLOCKING_WAIT and use a new NCCL group to improve test # determinism. os.environ["TORCH_NCCL_BLOCKING_WAIT"] = "1" group_to_use = dist.new_group( backend=dist.get_backend(), timeout=timedelta(seconds=10) ) torch.cuda.set_device(self.rank) ctx, _expected_err = self._determine_expected_error_verify_model_across_rank( group_to_use, diff_num_params=True ) # Creates network with diff # of param across ranks, reducer should # recognize this and throw appropriate error. net = EmbeddingNetDifferentParams( self.rank, diff_num_params=(self.rank == 1) ) self._run_test_ddp_model_with_diff_params( ctx, net, group_to_use, group_gloo, ) def _test_output_unused_in_loss(self, module_cls, gradient_as_bucket_view): model = module_cls() local_net = copy.deepcopy(model) net = torch.nn.parallel.DistributedDataParallel( copy.deepcopy(model).cuda(self.rank), device_ids=[self.rank], find_unused_parameters=True, ) # Tests that certain parameters not getting gradient since the # output is unused in loss computation is supported. Specifically, # checks that the grads remain unchanged and are the same as local # training. inp = torch.randn(10, 10) # Ensure that if a param is not used in loss computation, its # gradient is untouched, i.e. if it is None before it is None after, # not zero. if module_cls == DictOutputModule: a, b = local_net(inp)["predictions"] a_dist, b_dist = net(inp)["predictions"] else: a, b = local_net(inp) a_dist, b_dist = net(inp) loss_dist = b_dist.sum() loss_dist.backward() # Ensure that gradient corresponding to parameter "a" was not # touched, i.e. it is None and matches the local grad. if module_cls == DictOutputModule: self.assertTrue(net.module.module.a.weight.grad is None) self.assertEqual( net.module.module.a.weight.grad, local_net.module.a.weight.grad ) else: self.assertTrue(net.module.a.weight.grad is None) self.assertEqual(net.module.a.weight.grad, local_net.a.weight.grad) saved_a_local_grad = None saved_a_dist_grad = None net.zero_grad() local_net.zero_grad() for i in range(6): if module_cls == DictOutputModule: a, b = local_net(inp)["predictions"] a_dist, b_dist = net(inp)["predictions"] else: a, b = local_net(inp) a_dist, b_dist = net(inp) if i < 2: # Use both params in loss computation. Later, "a" will go # unused and we check to ensure DDP supports this and # gradients remain the same as local training. t = a @ b t_dist = a_dist @ b_dist loss = t.sum() loss_dist = t_dist.sum() else: # Model output "a" unused in loss. loss = b.sum() loss_dist = b_dist.sum() loss.backward() loss_dist.backward() if i == 1: # Save grads to compare with them in next iterations. if module_cls == DictOutputModule: saved_a_local_grad = local_net.module.a.weight.grad saved_a_dist_grad = net.module.module.a.weight.grad else: saved_a_local_grad = local_net.a.weight.grad saved_a_dist_grad = net.module.a.weight.grad self.assertEqual(saved_a_local_grad, saved_a_dist_grad) elif i >= 2: # parameter "a" of both models should be the same and not change if module_cls == DictOutputModule: self.assertEqual( net.module.module.a.weight.grad, saved_a_dist_grad ) self.assertEqual( local_net.module.a.weight.grad, saved_a_local_grad ) else: self.assertEqual(net.module.a.weight.grad, saved_a_dist_grad) self.assertEqual(local_net.a.weight.grad, saved_a_local_grad) # Verify grads are the same for (local_param, dist_param) in zip( local_net.parameters(), net.parameters() ): local_grad = local_param.grad dist_grad = dist_param.grad self.assertEqual(local_grad, dist_grad) dist.barrier() @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @skip_if_lt_x_gpu(2) def test_output_unused_in_loss_tuple_module(self): module_cls = UnusedParamTwoLinLayerNet for grad_as_bucket_view in [True, False]: self._test_output_unused_in_loss(module_cls, grad_as_bucket_view) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @skip_if_lt_x_gpu(2) def test_output_unused_in_loss_dict_module(self): module_cls = DictOutputModule for grad_as_bucket_view in [True, False]: self._test_output_unused_in_loss(module_cls, grad_as_bucket_view) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @skip_if_lt_x_gpu(2) def test_undefined_grad_parity_unused_parameters(self): # TODO: enable this for general training use cases: # https://github.com/pytorch/pytorch/issues/58511. x = torch.ones(1, 2).to(self.rank) net = Net().to(self.rank) local_net = copy.deepcopy(net) net = torch.nn.parallel.DistributedDataParallel( net, device_ids=[self.rank], find_unused_parameters=True, ) out = net(x).sum() local_out = local_net(x).sum() # Simulates undefined gradients. torch._C._functions.UndefinedGrad()(out).backward() torch._C._functions.UndefinedGrad()(local_out).backward() for (dist_param_name, dist_param), (local_param_name, local_param) in zip( net.named_parameters(), local_net.named_parameters() ): dist_grad = dist_param.grad local_grad = local_param.grad self.assertEqual( dist_grad, local_grad, f"""DDP param {dist_param_name} with grad {dist_grad} does not match local param {local_param_name} with grad {local_grad}""", ) def _test_different_graph_across_ranks( self, find_unused_parameters=False, static_graph=False ): class ToyModel(nn.Module): def __init__(self, rank): super().__init__() self.lin1 = nn.Linear(10, 10, bias=False) self.lin2 = nn.Linear(10, 10, bias=False) self.rank = rank def forward(self, x): if self.rank == 0: return self.lin2(F.relu(self.lin1(x))) else: return F.relu(self.lin1(x)) torch.manual_seed(31415) torch.cuda.set_device(self.rank) model = ToyModel(self.rank).cuda(self.rank) ddp_model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[self.rank], find_unused_parameters=find_unused_parameters, gradient_as_bucket_view=True, static_graph=static_graph, ) random_input = torch.randn(20, 10, device=self.rank) for _ in range(10): out = ddp_model(random_input) loss = out.sum() loss.backward() return ddp_model @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_if_lt_x_gpu(2) def test_different_graph_across_ranks(self): base_model = self._test_different_graph_across_ranks( find_unused_parameters=True ) self.assertFalse( base_model._get_ddp_logging_data().get("has_rebuilt_buckets", 0) ) static_model = self._test_different_graph_across_ranks(static_graph=True) self.assertTrue( static_model._get_ddp_logging_data().get("has_rebuilt_buckets", 0) ) for i, j in zip(base_model.parameters(), static_model.parameters()): self.assertEqual(i, j) @require_backend_is_available({"gloo"}) @skip_but_pass_in_sandcastle_if( IS_MACOS or IS_WINDOWS, "MacOS uses uv transport which does not have as robust error handling as tcp transport", ) def test_monitored_barrier_gloo(self): tensors = [torch.ones(10) * self.rank] # Kick off some allreduce work on all ranks for _ in range(10): dist.all_reduce(torch.cat(tensors)) # Run monitored barrier and ensure it passes timeout = timedelta(seconds=2) dist.monitored_barrier(timeout=timeout) # Check monitored_barrier success with wait_all_ranks=True for _ in range(10): dist.all_reduce(torch.cat(tensors)) dist.monitored_barrier(timeout=timeout, wait_all_ranks=True) # All ranks besides 1 call into barrier, rank 0 should report failure # while others report gloo error. failed_rank = 1 src_rank = 0 if self.rank == src_rank: with self.assertRaisesRegex( RuntimeError, f"Rank {failed_rank} failed to pass monitoredBarrier" ): dist.monitored_barrier(timeout=timeout) elif self.rank != failed_rank: # Other ranks should not pass barrier since rank 0 failed. err_regex = ( f"Rank {self.rank} successfully reached monitoredBarrier," f" but received errors while waiting for send/recv from rank" f" {src_rank}" ) with self.assertRaisesRegex(RuntimeError, err_regex): dist.monitored_barrier(timeout=timeout) # We need a barrier since otherwise failed_rank exits too early # and cause a timeout. self._barrier(timeout=30) @require_backend_is_available({"gloo"}) def test_monitored_barrier_gloo_subgroup(self): # Tests that monitored_barrier works as expected on non-default # process groups. failed_rank = 1 timeout = 0.1 subgroup = dist.new_group(ranks=[0, 1]) if self.rank == failed_rank: return if self.rank == 0: with self.assertRaisesRegex( RuntimeError, f"Rank {failed_rank} failed to pass monitoredBarrier" ): dist.monitored_barrier(subgroup, timeout) else: # Other ranks call into monitored_barrier, but this should be a # noop because they are not part of the subgroup. Verify that # there are no errors here. dist.monitored_barrier(subgroup, timeout) def _test_monitored_barrier_allreduce_hang(self, wait_all_ranks): # tests expected behavior when nonzero rank hangs. nccl_pg = dist.new_group( ranks=list(range(int(self.world_size))), # provide sufficient timeout so communicators # can be initialized in ctor. timeout=timedelta(seconds=15), backend=dist.Backend.NCCL, ) gloo_pg = dist.new_group( ranks=list(range(int(self.world_size))), backend=dist.Backend.GLOO, ) tensors = [torch.ones(10, device=self.rank) * self.rank] # Let all ranks call allreduce first to set up communicators etc. # Directly simulating error here will run into store issue described # in https://github.com/pytorch/pytorch/issues/54524. nccl_pg.allreduce(tensors).wait(timedelta(seconds=5)) # All ranks besides 0 call into allreduce. This is to simulate a # desync across the world, where some ranks call into # monitored_barrier() and others are stuck in collective comm. In # practice, we don't need TORCH_NCCL_BLOCKING_WAIT, but we use it in this # test to ensure it exits cleanly. if self.rank != 0: # Can get different errors here depending on whether gloo-based # wrapper PG is enabled or not, since with wrapper pg, it will # fail in a collective synchronization check and not actually # call into the nccl pg. if dist.get_debug_level() == dist.DebugLevel.DETAIL: err_regex = "Timed out waiting" else: err_regex = "caught collective operation timeout" with self.assertRaisesRegex(RuntimeError, err_regex): nccl_pg.allreduce(tensors).wait(timedelta(seconds=0.1)) else: # Rank 0 should report first (in order) timed out rank or all ranks # depending on wait_all_ranks flag passed into monitored_barrier. if wait_all_ranks: rank_str = ", ".join( [str(i) for i in range(1, int(self.world_size))] ) err_regex = f"Ranks {rank_str} failed to pass monitoredBarrier" else: expected_first_fail_rank = 1 err_regex = f"Rank {expected_first_fail_rank} failed to pass monitoredBarrier" monitored_barrier_timeout_seconds = timedelta(seconds=0.1) with self.assertRaisesRegex(RuntimeError, err_regex): gloo_pg.monitored_barrier( monitored_barrier_timeout_seconds, wait_all_ranks=wait_all_ranks ) self._barrier(timeout=30) @with_nccl_blocking_wait @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) def test_monitored_barrier_allreduce_hang(self): # tests expected behavior when nonzero rank hangs and we want to # report first timed out rank. self._test_monitored_barrier_allreduce_hang(wait_all_ranks=False) @with_nccl_blocking_wait @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) def test_monitored_barrier_allreduce_hang_wait_all_ranks(self): # Need to disable TORCH_NCCL_DUMP_ON_TIMEOUT otherwise this test times out os.environ["TORCH_NCCL_DUMP_ON_TIMEOUT"] = "0" # tests expected behavior when nonzero rank hangs and we want to # report all timed out ranks. self._test_monitored_barrier_allreduce_hang(wait_all_ranks=True) @require_backend_is_available({"gloo"}) def test_monitored_barrier_gloo_rank_0_timeout(self): # tests error when rank 0 exhausts its given timeout. process_group = dist.new_group(ranks=list(range(int(self.world_size)))) timeout = timedelta(seconds=0) if self.rank == 0: with self.assertRaisesRegex( RuntimeError, f"Rank {self.rank} timed out in monitoredBarrier" ): process_group.monitored_barrier(timeout) @require_backend_is_available({"gloo"}) @skip_if_small_worldsize @skip_but_pass_in_sandcastle_if( IS_MACOS or IS_WINDOWS, "MacOS uses uv transport which does not have as robust error handling as tcp transport", ) def test_monitored_barrier_failure_order(self): # Ensure that the first (in sorted order) rank is reported when # multiple ranks fail to pass the monitored_barrier. # TODO(#54879): Provide ability to wait and report all failed ranks expected_first_failed_rank = 2 timeout = timedelta(seconds=2) src_rank = 0 if self.rank == src_rank: with self.assertRaisesRegex( RuntimeError, f"Rank {expected_first_failed_rank}" ): dist.monitored_barrier(timeout=timeout) elif self.rank == 1: err_regex = ( f"Rank {self.rank} successfully reached monitoredBarrier," f" but received errors while waiting for send/recv from rank" f" {src_rank}" ) with self.assertRaisesRegex(RuntimeError, err_regex): dist.monitored_barrier(timeout=timeout) @require_backend_is_available({"gloo"}) @skip_if_small_worldsize def test_monitored_barrier_wait_all_ranks(self): # Tests simple case where > 1 rank does not call into monitored # barrier and verifies all ranks are reported by rank 0. if self.rank == 0: timeout = timedelta(seconds=0.1) rank_str = ", ".join([str(i) for i in range(1, int(self.world_size))]) err_regex = f"Ranks {rank_str} failed to pass monitoredBarrier" with self.assertRaisesRegex(RuntimeError, err_regex): dist.monitored_barrier(timeout=timeout, wait_all_ranks=True) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @with_dist_debug_levels(levels=["INFO"]) @skip_if_lt_x_gpu(2) def test_ddp_build_debug_param_to_name_mapping(self): model = TwoLinLayerNet() net = torch.nn.parallel.DistributedDataParallel( model.cuda(self.rank), device_ids=[self.rank], ) expected_mapping = {0: "a.weight", 1: "b.weight"} net_params, _ = net._build_params_for_reducer() param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params) self.assertDictEqual(expected_mapping, param_to_name_mapping) # Test when DDP is used with ignored parameters. model = TwoLinLayerNet() # Parameters to ignore are in the format {module_name}.{param_name} params_to_ignore = ["a.weight"] torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( model, params_to_ignore ) net = torch.nn.parallel.DistributedDataParallel( model.cuda(self.rank), device_ids=[self.rank], ) expected_mapping = {0: "b.weight"} net_params, _ = net._build_params_for_reducer() param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params) self.assertDictEqual(expected_mapping, param_to_name_mapping) # Test errors are raised when DDP and module parameters mismatch. # This generally indicates a bug with DDP and is not expected to # happen in user applications. model = TwoLinLayerNet() net = torch.nn.parallel.DistributedDataParallel( model.cuda(self.rank), device_ids=[self.rank], ) net_params, _ = net._build_params_for_reducer() if self.rank == 0: print(type(net_params[0])) net_params.extend( [ torch.nn.Parameter(torch.ones(1)), torch.nn.Parameter(torch.ones(1)), ] ) with self.assertRaisesRegex(ValueError, "Expected param to name mapping"): net._build_debug_param_to_name_mapping(net_params) net_params = net_params[:-3] with self.assertRaisesRegex(ValueError, "Param with name"): net._build_debug_param_to_name_mapping(net_params) net_params.extend( [ torch.nn.Parameter(torch.ones(1)), torch.nn.Parameter(torch.ones(1)), ] ) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @with_dist_debug_levels(levels=["INFO"]) @skip_if_lt_x_gpu(2) def test_ddp_build_debug_param_to_name_mapping_requires_grad(self): class Net(nn.Module): def __init__(self) -> None: super().__init__() self.lin = nn.Linear(10, 10) # Is not tracked by DDP and should not show up in param to # name mapping. self.lin.bias.requires_grad_(False) def forward(self, x): return self.lin(x) model = Net() net = torch.nn.parallel.DistributedDataParallel( model.cuda(self.rank), device_ids=[self.rank] ) expected_mapping = { 0: "lin.weight", } net_params, _ = net._build_params_for_reducer() param_to_name_mapping = net._build_debug_param_to_name_mapping(net_params) self.assertEqual(param_to_name_mapping, expected_mapping) def _test_ddp_multiple_nested_unused_params_error(self, ignore_sparse): debug_mode_off = dist.get_debug_level() == dist.DebugLevel.OFF class SubModule(nn.Module): def __init__(self) -> None: super().__init__() self.embedding_net = EmbeddingNetDifferentParams(0) self.lin = TwoLinLayerNet() self.bn = BatchNormNet() self.lin_layer = nn.Linear(4, 10, bias=False) def forward(self, x): x = self.bn(x) x = self.lin_layer(x) x = self.lin.a(x) # self.lin.b param unused # EmbeddingNetDifferentParams entirely unused: self.embedding_net.embedding and # self.embedding_net.lin unused. return x class MyModel(nn.Module): def __init__(self) -> None: super().__init__() self.sub_module = SubModule() def forward(self, x): return self.sub_module(x) model = MyModel() sparse_embedding_fqns = [] if ignore_sparse: for module_name, module in model.named_modules(): if module == model.sub_module.embedding_net.embedding: for parameter_name, _param in module.named_parameters(recurse=False): fqn = f"{module_name}.{parameter_name}" sparse_embedding_fqns.append(fqn) torch.nn.parallel.DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( model, sparse_embedding_fqns ) unused_modules = [ model.sub_module.embedding_net.lin, model.sub_module.lin.b, ] else: unused_modules = list(model.sub_module.embedding_net.modules()) + [ model.sub_module.lin.b, ] expected_unused_param_fqns = [] used_param_fqns = [] # Validate that these don't mistakenly show up. fqn_to_param_index = {} index = 0 for module_name, module in model.named_modules(): for parameter_name, _param in module.named_parameters(recurse=False): fqn = f"{module_name}.{parameter_name}" fqn_to_param_index[fqn] = index if fqn not in sparse_embedding_fqns: index += 1 if module in unused_modules: expected_unused_param_fqns.append(fqn) else: if ( not ignore_sparse or module != model.sub_module.embedding_net.embedding ): used_param_fqns.append(fqn) net = torch.nn.parallel.DistributedDataParallel( model.cuda(self.rank), device_ids=[self.rank], ) batch, dim = 10, 2 inp = torch.ones(batch, dim) for i in range(2): if i == 0: out = net(inp) loss = out.sum() loss.backward() else: try: out = net(inp) loss = out.sum() loss.backward() except RuntimeError as e: e = str(e) unused_param_substr = e[e.find("did not receive grad") :] # Validate that each unused param fully qualified name # shows up in error logs. We do this instead of # constructing a joined string since order of parameters # can be different in Reducer. In addition, validate # param indices show up as well. for unused_param_fqn in expected_unused_param_fqns: self.assertTrue( unused_param_fqn in unused_param_substr or debug_mode_off ) self.assertTrue( str(fqn_to_param_index[unused_param_fqn]) in unused_param_substr, f"Did not find index {fqn_to_param_index[unused_param_fqn]} for {unused_param_fqn}", ) # Validate that used param fqns don't show up in error # logs. for used_param_fqn in used_param_fqns: self.assertFalse(used_param_fqn in unused_param_substr) # Validate that ignored param fqns don't show up as unused # (since DDP does not track them) for sparse_param_fqn in sparse_embedding_fqns: self.assertFalse(sparse_param_fqn in unused_param_substr) else: self.assertTrue(False, "Expected error was not raised!") @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_if_lt_x_gpu(2) def test_ddp_multiple_nested_unused_params_error(self): self._test_ddp_multiple_nested_unused_params_error(ignore_sparse=False) @with_dist_debug_levels(levels=["OFF", "INFO", "DETAIL"]) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_if_lt_x_gpu(2) def test_ddp_multiple_nested_unused_params_err_ignore_params(self): # Tests unused parameter reporting when DDP is configured to ignore # certain parameters. self._test_ddp_multiple_nested_unused_params_error(ignore_sparse=True) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @skip_if_lt_x_gpu(2) def test_ddp_inference(self): # tests that DDP module can be run on a single node with no_grad # or eval setting and there is no hang. rank = self.rank torch.cuda.set_device(rank) model = Net().cuda() local_model = copy.deepcopy(model) model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[rank], ) syncbn_model = nn.SyncBatchNorm( 2, momentum=0.99, track_running_stats=False ).cuda() local_syncbn_model = copy.deepcopy(syncbn_model) syncbn_model = torch.nn.parallel.DistributedDataParallel( syncbn_model, device_ids=[rank] ) inp = torch.randn(10, 2, device=rank) inp_syncbn = torch.randn(10, 2, 4, 4, device=rank) tests = [ (model, local_model, inp), (syncbn_model, local_syncbn_model, inp_syncbn), ] for test in tests: test_model, test_local_model, test_inp = test if self.rank == 0: test_model.eval() test_local_model.eval() for _ in range(6): self.assertEqual( test_model(test_inp), test_local_model(test_inp) ) # Barrier since only rank 0 runs inference. Test should be # much faster than 30s, but this is to avoid flakiness. self._barrier(timeout=30) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @skip_if_lt_x_gpu(2) @unittest.skip("Test is failing, see https://github.com/pytorch/pytorch/pull/113620") def test_ddp_sync_bn_training_vs_eval(self): rank = self.rank torch.cuda.set_device(rank) # Need to set track_running_stats=False, when track_running_stats=True, # bn_training is False and sync could not occur in eval model. model = nn.SyncBatchNorm(2, momentum=0.99, track_running_stats=False).cuda( rank ) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank]) # Test sync occurs in training mode. with torch.autograd.profiler.profile() as prof: for _ in range(6): inp = torch.randn(10, 2, 4, 4).cuda(rank) out = model(inp) loss = out.sum() loss.backward() # SyncBN allgathers stats across all ranks, so verify call to # all_gather in profiler. if BACKEND == "nccl": all_gather_calls = get_profiling_event("_all_gather_base", prof) else: all_gather_calls = get_profiling_event("all_gather", prof) self.assertNotEqual([], all_gather_calls) # Only do inference on one rank. If SyncBN did collective stats sync, # this would hang/error. model_inference = model.module if self.rank == 0: model_inference.eval() with torch.autograd.profiler.profile() as prof: for _ in range(6): inp = torch.randn(10, 2, 4, 4).cuda(rank) out = model_inference(inp) loss = out.sum() loss.backward() # Ensure sync does not occur in eval() mode. if BACKEND == "nccl": all_gather_calls = get_profiling_event("_all_gather_base", prof) else: all_gather_calls = get_profiling_event("all_gather", prof) self.assertEqual([], all_gather_calls) @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_python_error_logged(self): # Most python exceptions in DDP are raised during init before # reducer is constructed, so we don't have a logger in those cases. # However, the below is one example where a python error is thrown # after reducer is constructed. model = TwoLinLayerNet().cuda(self.rank) model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[self.rank], ) expected_err = "must be callable" with self.assertRaisesRegex(TypeError, expected_err): model.register_comm_hook({}, {}) verify_ddp_error_logged(model, expected_err) @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_static_graph_nested_types(self): # Tests for static graph training when outputs are not just tensors # but can be (nested) tuple, list, dict, etc. rank = self.rank torch.cuda.set_device(rank) class NestedOutputModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.lin = nn.Linear(100, 1, bias=False) def forward(self, inp, output_type): if output_type == "tuple": return ( self.lin(inp), ( self.lin(inp), self.lin(inp), ), ) elif output_type == "list": return [ self.lin(inp), [ self.lin(inp), self.lin(inp), ], ] elif output_type == "dict": return { "a": self.lin(inp), "b": { "c": self.lin(inp), }, } def get_loss(model_output): loss = 0.0 if isinstance(model_output, torch.Tensor): return model_output.sum() elif isinstance(model_output, dict): for value in model_output.values(): loss += get_loss(value) elif isinstance(model_output, (tuple, list)): for x in model_output: loss += get_loss(x) else: raise ValueError(f"Unknown model output type {type(model_output)}") return loss model = NestedOutputModule().cuda(rank) model_static_graph = copy.deepcopy(model) model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[rank], ) model_static_graph = torch.nn.parallel.DistributedDataParallel( model, device_ids=[rank], static_graph=True, ) inp = torch.randn(10, 100) type_mapping = { "list": list, "tuple": tuple, "dict": dict, } for output_type in type_mapping.keys(): for _ in range(6): out = model(inp, output_type=output_type) loss = get_loss(out) loss.backward() self._model_step(model) out_static = model_static_graph(inp, output_type=output_type) self.assertTrue(isinstance(out_static, type_mapping[output_type])) loss_static = get_loss(out_static) loss_static.backward() self._model_step(model_static_graph) for (p, p_static) in zip( model.parameters(), model_static_graph.parameters() ): self.assertEqual(p, p_static) @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_returns_tensor_with_no_grad(self): # Tests case where module returns tensor that does not require grad. torch.cuda.set_device(self.rank) class MyModel(nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(10, 10, bias=False) self.fc2 = nn.Linear(10, 10, bias=False) def forward(self, x): x = self.fc2(F.relu(self.fc1(x))) y = x.clone() x = x.detach() assert not x.requires_grad return (x, y) model = MyModel().to(self.rank) inp = torch.randn(1, 10, device=self.rank) for (find_unused, static_graph) in itertools.product( [True, False], [True, False] ): ddp = DistributedDataParallel( model, device_ids=[self.rank], output_device=self.rank, find_unused_parameters=find_unused, static_graph=static_graph, ) for _ in range(6): out = ddp(inp) self.assertFalse(out[0].requires_grad) o = (out[0] + out[1]).sum() o.backward() @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_detect_ddp_is_actually_static(self): class ToyModel(nn.Module): def __init__(self) -> None: super().__init__() self.net1 = nn.Linear(10, 10, bias=False) self.net2 = nn.Linear(10, 10) def forward(self, x, find_unused, dynamic): if find_unused: if dynamic: return self.net2(self.net1(x)) else: return self.net2(x) else: return self.net2(self.net1(x)) # Set of unused parameters don't change across iterations torch.cuda.set_device(self.rank) model = ToyModel().cuda() for find_unused in [True, False]: ddp = torch.nn.parallel.DistributedDataParallel( model, device_ids=[self.rank], find_unused_parameters=find_unused, ) inp = torch.randn(1, 10, device="cuda") for _ in range(6): out = ddp(inp, find_unused=find_unused, dynamic=False) loss = out.sum() loss.backward() self.assertTrue(ddp.reducer._ddp_graph_static()) # Set of unused parameters dynamically change ddp = torch.nn.parallel.DistributedDataParallel( model, device_ids=[self.rank], find_unused_parameters=True, ) inp = torch.randn(1, 10, device="cuda") for i in range(6): out = ddp(inp, find_unused=True, dynamic=i % 2 == 0) loss = out.sum() loss.backward() self.assertFalse(ddp.reducer._ddp_graph_static()) def _test_ddp_new_tensor_in_fwd(self, static_graph): # Test from https://github.com/pytorch/pytorch/issues/60733 class MyModel(nn.Module): def __init__(self) -> None: super().__init__() self.fc1 = nn.Linear(10, 10, bias=False) self.fc2 = nn.Linear(10, 10, bias=False) self.device = self.fc1.weight.device def __init_opt(self): opt = torch.randn(1, 10, device=self.device) return opt def forward(self, x, opt_1, opt_2, opt_nested): x = F.relu(self.fc1(x)) x = self.fc2(x) if opt_1 is None: opt_1 = self.__init_opt() if opt_2 is None: opt_2 = self.__init_opt() if opt_nested is None or not torch.is_tensor(opt_nested): opt_nested = self.__init_opt() # Test multiple tensors as well as newly created tensors # within a struct. return x, opt_1, opt_2, {"tensor": opt_nested} model = MyModel().to(self.rank) for find_unused in [True, False]: ddp = DistributedDataParallel( model, device_ids=[self.rank], output_device=self.rank, broadcast_buffers=False, find_unused_parameters=find_unused, static_graph=static_graph, ) opt = [None for _ in range(3)] for i in range(2): ddp.zero_grad() x = torch.randn(1, 10, device=self.rank) out, opt[0], opt[1], opt[2] = ddp( x, opt_1=opt[0], opt_2=opt[1], opt_nested=opt[2] ) for i in range(len(opt)): if torch.is_tensor(opt[i]): self.assertEqual(opt[i].grad_fn, None) else: self.assertEqual(opt[i]["tensor"].grad_fn, None) out.mean().backward() @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_new_tensor_in_fwd(self): return self._test_ddp_new_tensor_in_fwd(static_graph=False) @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_new_tensor_in_fwd_static_graph(self): return self._test_ddp_new_tensor_in_fwd(static_graph=True) def _test_ddp_buffer_hook_allreduce(self, return_futures): rank = self.rank torch.cuda.set_device(rank) torch.manual_seed(rank) torch.cuda.manual_seed(rank) def buffer_comm_hook(ddp, named_buffers): buffers = [buffer for (_, buffer) in named_buffers.items()] futs = [ dist.all_reduce( buffer, group=ddp.process_group, async_op=True ).get_future() for buffer in buffers ] if return_futures: return futs else: torch.futures.collect_all(futs).wait() hook_pre_fwd = ( torch.nn.parallel.distributed._BufferCommHookLocation.PRE_FORWARD ) hook_post_fwd = ( torch.nn.parallel.distributed._BufferCommHookLocation.POST_FORWARD ) for hook_run_location in [ hook_pre_fwd, hook_post_fwd, ]: model = NetWithBuffers().cuda(rank) model_ddp = torch.nn.parallel.DistributedDataParallel( model, device_ids=[self.rank], ) model_ddp._register_buffer_comm_hook( model_ddp, buffer_comm_hook, hook_run_location ) model_ddp_no_hook = torch.nn.parallel.DistributedDataParallel( copy.deepcopy(model), device_ids=[self.rank], broadcast_buffers=False, ) inp = torch.randn(2, 10, device=rank) for _ in range(2): loss_hook = model_ddp(inp).sum() # Since buffer reduction is done pre-forward, simulate it for # no hook case here. # Simulate allreduce appropriately depending on hook location. if hook_run_location == hook_pre_fwd: model_no_hook_buffers = list(model_ddp_no_hook.module.buffers()) for tensor in model_no_hook_buffers: dist.all_reduce(tensor) loss_no_hook = model_ddp_no_hook(inp).sum() if hook_run_location == hook_post_fwd: model_no_hook_buffers = list(model_ddp_no_hook.module.buffers()) for tensor in model_no_hook_buffers: dist.all_reduce(tensor) torch.cuda.synchronize() # if return_futures, they are only awaited on by DDP # at the end of the backwards pass for maximum overlap. if not return_futures: self._verify_buffers_equal(model_ddp, model_ddp_no_hook) loss_hook.backward() loss_no_hook.backward() # Note that when custom hooks return futures, this # comparison is not expected to work when hook run location # is pre-forward pass. This is because the hook does async # communication and forward pass modifies the buffer without # appropriate synchronization. Therefore, if returning # futures from custom buffer hooks, it is advised to set # hook run location to post forward. if return_futures and hook_run_location == hook_post_fwd: self._verify_buffers_equal(model_ddp, model_ddp_no_hook) dist.barrier() @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_buffer_hook_allreduce_return_future(self): self._test_ddp_buffer_hook_allreduce(return_futures=True) @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_buffer_hook_allreduce(self): self._test_ddp_buffer_hook_allreduce(return_futures=False) @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_broadcast_buffer_via_hook(self): # test that _distributed_broadcast_coalesced via registered hook is # equivalent to DDP's default broadcast coalesced. rank = self.rank torch.cuda.set_device(rank) torch.manual_seed(rank) torch.cuda.manual_seed(rank) def buffer_comm_hook(ddp, named_buffers): # named_buffers is a Dict[str, Tensor] representing a mapping # from buffer name to buffer. buffers = [buffer for (_, buffer) in named_buffers.items()] ddp._default_broadcast_coalesced(buffers) model = NetWithBuffers().cuda(rank) model_ddp = torch.nn.parallel.DistributedDataParallel( model, device_ids=[self.rank], ) model_ddp._register_buffer_comm_hook(model_ddp, buffer_comm_hook) model_ddp_no_hook = torch.nn.parallel.DistributedDataParallel( copy.deepcopy(model), device_ids=[self.rank], ) inp = torch.randn(2, 10, device=rank) for _ in range(2): loss_hook = model_ddp(inp).sum() loss_no_hook = model_ddp_no_hook(inp).sum() self._verify_buffers_equal(model_ddp, model_ddp_no_hook) loss_hook.backward() loss_no_hook.backward() @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_remove_autograd_hooks(self): class SimulateError(torch.autograd.Function): @staticmethod def forward(ctx, input): return input @staticmethod def backward(ctx, grad_output): raise RuntimeError class MyModel(nn.Module): def __init__(self, device): super().__init__() self.error = True self.fc1 = nn.Linear(10, 10).cuda(device) def forward(self, inp): if self.error: return self.fc1(SimulateError.apply(inp)) else: return self.fc1(inp) # Run with error to trigger backward pass that marks fc1 as being marked # ready. If we don't remove autograd hooks before running below it would # fail on the old autograd hook. model = MyModel(self.rank) input = torch.rand(10, 10, requires_grad=True).cuda(self.rank) model_ddp1 = torch.nn.parallel.DistributedDataParallel( model, device_ids=[self.rank], ) with self.assertRaises(RuntimeError): model_ddp1(input).sum().backward() # Remove autograd hooks on old instance. model_ddp1._remove_autograd_hooks() # Try another DDP instance without error now. model.error = False model_ddp2 = torch.nn.parallel.DistributedDataParallel( model, device_ids=[self.rank], ) model_ddp2(input).sum().backward() @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) @unittest.skip("Test is failing, tracking issue at https://github.com/pytorch/pytorch/issues/102751") def test_ddp_has_finalized(self): @dataclass class MyClass: obj: torch.Tensor class MyModel(nn.Module): def __init__(self, rank): super().__init__() self.rank = rank self.fc1 = nn.Linear(1024, 1024).cuda(rank) self.fc2 = nn.Linear(1024, 2 * 1024).cuda(rank) def forward(self, inp): if self.rank == 0: return self.fc1(inp), MyClass(self.fc2(inp)) else: return self.fc1(inp), self.fc2(inp) model = MyModel(self.rank) input = torch.rand(10, 1024, requires_grad=True).cuda(self.rank) ddp = torch.nn.parallel.DistributedDataParallel( model, device_ids=[self.rank], find_unused_parameters=True, bucket_cap_mb=(1024 * 4 / 1024 / 1024), # One bucket per parameter. ) if self.rank == 0: out1, _ = ddp(input) out1.sum().backward() else: out1, out2 = ddp(input) (out1.sum() + out2.sum()).backward() if self.rank == 0: with self.assertRaisesRegex(RuntimeError, "Expected to have finished reduction in the prior iteration"): ddp._check_reducer_finalized() with self.assertRaisesRegex(RuntimeError, "Expected to have finished reduction in the prior iteration"): ddp(input) else: ddp._check_reducer_finalized() ddp(input) """ # The set of "test_ddp_update_process_group..." below failed after # upgrading CI from 2 GPUs to 4 GPUs. # Commented out for now. # Test purpose needs better documentation. def _run_ddp_update_process_group(self, new_pg): def get_num_torch_recompiles(): guard_failures = torch._dynamo.utils.guard_failures num_recompiles = [len(guard_failures[code]) for code in guard_failures] return 0 if len(num_recompiles) == 0 else max(num_recompiles) class SimulateError(torch.autograd.Function): @staticmethod def forward(ctx, input): return input @staticmethod def backward(ctx, grad_output): raise RuntimeError class MyModel(torch.nn.Module): def __init__(self, device): super().__init__() # 4MB for multiple buckets. self.fc1 = torch.nn.Linear(1024, 1024).cuda(device) self.fc2 = torch.nn.Linear(1024, 1024).cuda(device) self.fc3 = torch.nn.Linear(1024, 1024).cuda(device) def forward(self, inp, error): if error: return self.fc3(self.fc2(self.fc1(SimulateError.apply(inp)))) else: return self.fc3(self.fc2(self.fc1(inp))) input = torch.rand(10, 1024, requires_grad=True).cuda(self.rank) ddp = torch.nn.parallel.DistributedDataParallel( MyModel(self.rank), device_ids=[self.rank], find_unused_parameters=True, bucket_cap_mb=1, ) model = torch.compile(ddp) def run_iteration(): # Run regular iteration. out = model(input, error=False) out.sum().backward() torch.cuda.synchronize() # Run with error. with self.assertRaises(RuntimeError): out = model(input, error=True) out.sum().backward() torch.cuda.synchronize() run_iteration() assert 0 == get_num_torch_recompiles() if new_pg: # Now reduce world_size and run iteration. group_size_2 = dist.new_group(ranks=[0, 1]) ddp._update_process_group(group_size_2) if self.rank in [0, 1]: run_iteration() # Increase the world size and run iteration. group_size_3 = dist.new_group(ranks=[1, 2, 3]) ddp._update_process_group(group_size_3) if self.rank in [1, 2, 3]: run_iteration() # Back to default size. ddp._update_process_group(_get_default_group()) run_iteration() else: # Create default pg of smaller size. dist.destroy_process_group() if self.rank in [1, 2, 3]: dist.init_process_group( init_method=self.init_method, backend=BACKEND, world_size=3, rank=self.rank - 1, timeout=timedelta(seconds=default_pg_timeout), ) ddp._update_process_group(_get_default_group()) run_iteration() dist.destroy_process_group() # Need a barrier here to ensure ranks 1, 2 and 3 are done. self._barrier(wait_for=4) # Need to init pg again for "_barrier" to succeed. dist.init_process_group( init_method=self.init_method, backend=BACKEND, world_size=4, rank=self.rank, timeout=timedelta(seconds=default_pg_timeout), ) # Validate no more recompiles. assert 0 == get_num_torch_recompiles() @skip_if_lt_x_gpu(4) @require_world_size(4) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_update_process_group_new_group(self): self._run_ddp_update_process_group(new_pg=True) @skip_if_lt_x_gpu(4) @require_world_size(4) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_update_process_group_default_group(self): self._run_ddp_update_process_group(new_pg=False) @skip_if_lt_x_gpu(4) @require_world_size(4) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_update_process_group_grad_undefined(self): class SimulateError(torch.autograd.Function): @staticmethod def forward(ctx, input): return input @staticmethod def backward(ctx, grad_output): raise RuntimeError class MyModel(torch.nn.Module): def __init__(self, device): super().__init__() self.fc1 = torch.nn.Linear(10, 10).cuda(device) self.fc2 = torch.nn.Linear(10, 10).cuda(device) self.fc3 = torch.nn.Linear(10, 10).cuda(device) def forward(self, inp, error): if error: return self.fc3(self.fc2(self.fc1(SimulateError.apply(inp)))) else: return self.fc2(self.fc1(inp)) input = torch.rand(10, 10, requires_grad=True).cuda(self.rank) ddp = torch.nn.parallel.DistributedDataParallel( MyModel(self.rank), device_ids=[self.rank], find_unused_parameters=True, bucket_cap_mb=1, ) try: ddp(input, True).sum().backward() except RuntimeError: ddp._update_process_group(_get_default_group()) # Reset grads. for param in ddp.parameters(): param.grad = None # Run ddp again. ddp(input, False).sum().backward() @skip_if_lt_x_gpu(4) @require_world_size(4) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_update_process_group_no_find_unused(self): ddp = torch.nn.parallel.DistributedDataParallel( torch.nn.Linear(10, 10).cuda(self.rank), device_ids=[self.rank], find_unused_parameters=False, ) ddp._update_process_group(_get_default_group()) """ @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_broadcast_buffer(self): rank = self.rank torch.cuda.set_device(rank) torch.manual_seed(rank) torch.cuda.manual_seed(rank) class NetWithBuffers(nn.Module): def __init__(self) -> None: super().__init__() self.a = nn.Linear(10, 10, bias=False) self.b = nn.Linear(10, 1, bias=False) self.register_buffer("buffer", torch.randn(1, 2)) def forward(self, x): return self.b(self.a(x)) model = NetWithBuffers().cuda(rank) model_ddp = torch.nn.parallel.DistributedDataParallel( model, device_ids=[self.rank], ) inp = torch.randn(2, 10, device=rank) for _ in range(2): if rank == 0: model_ddp.module.buffer = model_ddp.module.buffer + 1 loss = model_ddp(inp).sum() loss.backward() # Ensure all buffers are synchronized. bufs = [ torch.empty_like(model_ddp.module.buffer) for _ in range(dist.get_world_size()) ] dist.all_gather(bufs, model_ddp.module.buffer) rank_0_buf = bufs[0] for buf in bufs[1:]: self.assertEqual(rank_0_buf, buf) @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl" and BACKEND != "gloo", "Only Nccl & Gloo backend support DistributedDataParallel", ) def test_static_graph_multi_forward(self): class Net(nn.Module): def __init__(self) -> None: super().__init__() self.lin = nn.Linear(10, 10) self.relu = nn.ReLU() def forward(self, x): return self.relu(self.lin(x)) torch.cuda.set_device(self.rank) torch.manual_seed(42 << 1337 % (self.rank + 1)) model = Net().cuda(self.rank) local_model = copy.deepcopy(model) model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[self.rank], static_graph=True ) inp = torch.ones(2, 10, device="cuda") for _ in range(3): model.zero_grad() local_model.zero_grad() a = model(inp) b = model(inp) loss = a.sum() + b.sum() loss.backward() # Grads should be equal to a local model that ran through inp # `world_size` times and averaged grads if self.rank == 0: inp_clone = inp.clone() iters = dist.get_world_size() for _ in range(iters): a = local_model(inp_clone) b = local_model(inp_clone) loss = a.sum() + b.sum() loss.backward() for p in local_model.parameters(): p.grad.data = p.grad / iters for p_ddp, p_local in zip( model.parameters(), local_model.parameters() ): self.assertTrue( torch.allclose( p_ddp.grad, p_local.grad ), f"{p_ddp.grad} vs {p_local.grad}" ) dist.barrier() @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND != "nccl" and BACKEND != "gloo", "Only Nccl & Gloo backend support DistributedDataParallel", ) def test_sync_bn_logged(self): model = BN_NET rank = self.rank # single gpu training setup model_gpu = model.cuda(rank) no_sync_bn = torch.nn.parallel.DistributedDataParallel( copy.deepcopy(model_gpu), device_ids=[self.rank], ) ddp_logging_data = no_sync_bn._get_ddp_logging_data() sync_bn_logged = ddp_logging_data.get("has_sync_bn", True) self.assertFalse(sync_bn_logged) model_DDP = nn.SyncBatchNorm.convert_sync_batchnorm(model_gpu) model_DDP = torch.nn.parallel.DistributedDataParallel( model_DDP, device_ids=[self.rank], ) ddp_logging_data = model_DDP._get_ddp_logging_data() sync_bn_logged = ddp_logging_data.get("has_sync_bn", False) self.assertTrue(sync_bn_logged) @skip_if_lt_x_gpu(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_stateless_api_with_ddp(self): class MockModule(torch.nn.Module): def __init__(self) -> None: super().__init__() self.l1 = torch.nn.Linear(1, 1) buffer = torch.ones(1) self.register_buffer("buffer", buffer) def forward(self, x): return self.l1(x) + self.buffer device = self.rank module = MockModule().to(device) module = torch.nn.parallel.DistributedDataParallel( module, device_ids=[device] ) x = torch.rand((1, 1)).to(device) weight = torch.tensor([[1.0]], device=device, requires_grad=True) bias = torch.tensor([0.0], device=device, requires_grad=True) buffer = torch.tensor([0.0], device=device) parameters = { "module.l1.weight": weight, "module.l1.bias": bias, "module.buffer": buffer, } prev_weight = module.module.l1.weight.clone() prev_buffer = module.module.buffer.clone() res = torch.func.functional_call(module, parameters, x) self.assertEqual(x, res) # check that the weight remain unmodified cur_weight = module.module.l1.weight cur_buffer = module.module.buffer self.assertEqual(cur_weight, prev_weight) self.assertEqual(cur_buffer, prev_buffer) # run a backward pass and check the gradients res.backward() self.assertIsNotNone(weight.grad) self.assertIsNotNone(bias.grad) # Gradient was not calculated for the module stated and buffers self.assertIsNone(buffer.grad) self.assertIsNone(module.module.l1.weight.grad) self.assertIsNone(module.module.l1.bias.grad) self.assertIsNone(module.module.buffer.grad) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_if_lt_x_gpu(2) def test_ddp_forward_backward_hook(self): class DummyTestModel(nn.Module): def __init__(self) -> None: super().__init__() torch.manual_seed(0) self.fc = nn.Linear(2, 2) def forward(self, x): return self.fc(x) def relu_hook(module, input): return nn.functional.relu(input[0]) def gelu_hook(module, _input, output): return nn.functional.gelu(output) def celu_hook(module, _input, output): return (nn.functional.celu(output[0]),) local_model = DummyTestModel() ddp_model = DummyTestModel() local_model.fc.register_forward_pre_hook(relu_hook) local_model.fc.register_forward_hook(gelu_hook) ddp_model.fc.register_forward_pre_hook(relu_hook) ddp_model.fc.register_forward_hook(gelu_hook) local_model.fc.register_backward_hook(celu_hook) ddp_model.fc.register_backward_hook(celu_hook) ddp_model = DistributedDataParallel( ddp_model.to(self.rank), device_ids=[self.rank] ) input_data = torch.rand(5, 2) output_local = local_model(input_data) output_ddp = ddp_model(input_data.to(self.rank)) self.assertEqual(output_local, output_ddp) output_local.sum().backward() output_ddp.sum().backward() ddp_grads = [p.grad for p in ddp_model.parameters()] self.assertEqual(ddp_grads[0], local_model.fc.weight.grad) self.assertEqual(ddp_grads[1], local_model.fc.bias.grad) def _test_hook_pickling(self, hook, hook_state): torch.manual_seed(0) learning_rate = 0.01 chkpt_file = tempfile.gettempdir() + "/checkpoint.pt" rank = self.rank input = torch.randn(7, 1, device=rank) target = torch.randn(7, 5, device=rank) net = torch.nn.Linear(1, 5).to(rank) ddp_model = DistributedDataParallel(copy.deepcopy(net), device_ids=[rank]) dummy_ddp_model = DistributedDataParallel( copy.deepcopy(net), device_ids=[rank] ) optimizer = torch.optim.SGD(ddp_model.parameters(), lr=learning_rate) ddp_model.register_comm_hook(hook_state, hook) ddp_model.train() for _ in range(10): optimizer.zero_grad() out = ddp_model(input) loss = F.mse_loss(out, target) loss.backward() optimizer.step() state = { "state_dict": ddp_model.state_dict(), "comm_hook": hook, "comm_hook_state": hook_state, } if rank == 0: with self.assertLogs("torch.distributed") as captured: torch.save(state, chkpt_file) # Check that the logger has only one entry self.assertEqual(len(captured.records), 1) # Check that the logger has an expected entry self.assertEqual( captured.records[0].getMessage(), "NOTE: Process group is not serializable and excluded from a saved state.", ) dist.barrier() map_location = {"cuda:0": f"cuda:{rank:d}"} with self.assertLogs("torch.distributed") as captured: checkpoint = torch.load(chkpt_file, map_location=map_location) # Check that the logger has only one entry self.assertEqual(len(captured.records), 1) # Check that the logger has an expected entry self.assertEqual( captured.records[0].getMessage(), "NOTE: Process group will be set to a default group (i.e. the world size).\ If a different group is desired, please set `self.process_group` after PowerSGD state is loaded.", ) dummy_ddp_model.load_state_dict(checkpoint["state_dict"]) dummy_hook = checkpoint["comm_hook"] dummy_hook_state = checkpoint["comm_hook_state"] dummy_optimizer = torch.optim.SGD( dummy_ddp_model.parameters(), lr=learning_rate ) # Check that loaded function is correct self.assertEqual(dummy_hook.__qualname__, hook.__qualname__) # Check that all slots' keys were restored correctly self.assertEqual(hook_state.__slots__, dummy_hook_state.__slots__) # Check that all slots' attributes are restored correctly # Excluding ``process_group`` and ``rng``. for entry in dummy_hook_state.__slots__: if entry != "process_group" and entry != "rng": self.assertEqual( getattr(dummy_hook_state, entry), getattr(hook_state, entry) ) # Check that ``process_group`` was set to default self.assertEqual(dummy_hook_state.process_group, _get_default_group()) # Check that a random state was restored properly: # ``np.random.RandomState.get_state`` returns a tuple with entries: # ``bit_generator`` - str, # ``state.key`` - ndarray dtype[uint32], # ``state.pos`` - int, # ``has_gauss`` - int, # ``gauss`` - float # (refer to https://github.com/numpy/numpy/blob/266aad7478bc7fbcc55eea7f942a0d373b838396/numpy/random/mtrand.pyi) # To make sure random state was restored properly, all entries should equal the original for entry1, entry2 in zip( hook_state.rng.get_state(), dummy_hook_state.rng.get_state() ): np.testing.assert_array_equal(entry1, entry2) dummy_ddp_model.register_comm_hook(dummy_hook_state, dummy_hook) dummy_ddp_model.train() for _ in range(10): optimizer.zero_grad() dummy_optimizer.zero_grad() out_origin = ddp_model(input) out_dummy = dummy_ddp_model(input) loss_origin = F.mse_loss(out_origin, target) loss_dummy = F.mse_loss(out_dummy, target) loss_origin.backward() loss_dummy.backward() optimizer.step() dummy_optimizer.step() # Check that gradients after 10 epochs are the same for orig_param, dummy_param in zip( ddp_model.parameters(), dummy_ddp_model.parameters() ): self.assertEqual(orig_param.grad, dummy_param.grad) dist.barrier() if rank == 0: os.remove(chkpt_file) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["cuda"], f"The {BACKEND} backend does not support DDP communication hook on CUDA devices", ) @skip_if_lt_x_gpu(int(os.environ["WORLD_SIZE"])) @skip_but_pass_in_sandcastle_if( True, "Skipped due to flakiness" ) def test_ddp_hook_pickling_powerSGD(self): hook = powerSGD.powerSGD_hook powersgd_state = powerSGD.PowerSGDState( process_group=None, matrix_approximation_rank=1, start_powerSGD_iter=4, ) self._test_hook_pickling(hook, powersgd_state) @require_backend_is_available(DistTestCases.backend_feature["gpu"]) @skip_if_lt_x_gpu(2) def test_ddp_device_mesh_initialization(self): """ Test DDP with device_mesh initialization. """ world_size = int(os.environ["WORLD_SIZE"]) from torch.distributed.device_mesh import init_device_mesh device_mesh = init_device_mesh("cuda", (world_size,)) pg = _get_default_group() torch.cuda.set_device(self.rank) model = TwoLinLayerNet().cuda() ddp_model = torch.nn.parallel.DistributedDataParallel(model, device_mesh=device_mesh) self.assertEqual(ddp_model.device_mesh, device_mesh) with self.assertRaisesRegex( RuntimeError, "Cannot specify both process_group and device_mesh arguments." ): ddp_model = torch.nn.parallel.DistributedDataParallel( model, process_group=pg, device_mesh=device_mesh ) with self.assertRaisesRegex( RuntimeError, "Only 1D device mesh is supported," ): device_mesh = init_device_mesh("cuda", (2, world_size // 2)) ddp_model = torch.nn.parallel.DistributedDataParallel( model, device_mesh=device_mesh ) @skip_if_lt_x_gpu(2) @require_world_size(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_compile_static_graph(self): "Tests that DDP works with torch compile when static_graph=True" model = torch.nn.Linear(10, 10).cuda(self.rank) model_clone = copy.deepcopy(model) ddp = torch.nn.parallel.DistributedDataParallel( model, device_ids=[self.rank], ) ddp_static = torch.nn.parallel.DistributedDataParallel( model_clone, device_ids=[self.rank], static_graph=True ) ddp = torch.compile(ddp) ddp_static = torch.compile(ddp_static) input = torch.rand(10, 10).cuda(self.rank) # verify output and gradient parity for _ in range(6): out_ddp = ddp(input).sum() out_ddp_static = ddp_static(input).sum() self.assertEqual(out_ddp, out_ddp_static) out_ddp.backward() out_ddp_static.backward() for p1, p2 in zip(ddp.parameters(), ddp_static.parameters()): self.assertEqual(p1.grad, p2.grad) @skip_if_lt_x_gpu(2) @require_world_size(2) @skip_but_pass_in_sandcastle_if( BACKEND not in DistTestCases.backend_feature["ddp"], f"The {BACKEND} backend does not support DistributedDataParallel", ) def test_ddp_sink_noclone(self): "Tests that we can configure DDP to avoid clone" class OpPatcher(TorchDispatchMode): def __torch_dispatch__(self, func, types, args=(), kwargs=None): func_packet = func._overloadpacket if func_packet == torch.ops.aten.clone: raise RuntimeError("clone encountered!") kwargs = kwargs if kwargs else {} return func(*args, **kwargs) class MyModel(torch.nn.Module): def __init__(self) -> None: super().__init__() self.fc = torch.nn.Linear(10, 10) def forward(self, input): return self.fc(input) model = MyModel().cuda(self.rank) ddp = torch.nn.parallel.DistributedDataParallel( model, device_ids=[self.rank], find_unused_parameters=True, ) ddp._set_ddp_sink_clone(False) input = torch.rand(10, 10).cuda(self.rank) with OpPatcher(): ddp(input).sum().backward() instantiate_parametrized_tests(DistributedTest._DistTestBase) ```
================================================================================================================================================ SOURCE CODE FILE: distributed_utils.py LINES: 1 SIZE: 1.97 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\distributed_utils.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs from contextlib import contextmanager from datetime import timedelta from functools import ( partial, wraps, ) import torch.distributed as dist import torch.distributed.distributed_c10d as c10d class MockProcessGroup(dist.ProcessGroup): def __init__(self, rank, world): super().__init__(rank, world) def getBackendName(self): return "mock_process_group" def create_mock_pg(prefix_store, rank, world_size, timeout): return MockProcessGroup(rank, world_size) dist.Backend.register_backend('mock_process_group', create_mock_pg) def mock_init_dist(rank, world_size): # !!! WARNING !!! # Kids don't try this at home, this is a cute pile of hacks that # depends on a small mountain of c10d internals assert not dist.is_initialized() store = dist.HashStore() # Trick _store_based_barrier into believing everyone else already checked-in # Zero is the group index store.add(f"{c10d.STORE_BASED_BARRIER_PREFIX}:0", world_size - 1) dist.init_process_group( backend="mock_process_group", rank=rank, world_size=world_size, store=store, group_name="fake", timeout=timedelta(seconds=1)) @contextmanager def with_dist(rank=0, world_size=2): """ Context manager that initializer c10d with a fake process group. """ mock_init_dist(rank=rank, world_size=world_size) try: yield finally: dist.destroy_process_group() def with_fake_comms(func=None, rank=0, world_size=2): """ Function wrapper that inits a fake process group designed for testing. Right now only querying for world size is available """ if func is None: return partial(with_fake_comms, rank=rank, world_size=world_size) @wraps(func) def wrapper(self, *args, **kwargs): with with_dist(rank, world_size): func(self, *args, **kwargs) return wrapper ```
====================================================================================================================================== SOURCE CODE FILE: fake_pg.py LINES: 1 SIZE: 1.04 KB PATH: scripts\freecad_env\Lib\site-packages\torch\testing\_internal\distributed\fake_pg.py ENCODING: utf-8 ```py # mypy: allow-untyped-defs import torch.distributed as dist from torch._C._distributed_c10d import ( FakeProcessGroup, ) class FakeStore(dist.Store): """ A fake store is a fake Key-Value store simply for initialization usage the of fake process group, one can either use FakeStore or HashStore. """ def _create_fake_pg(prefix_store, rank, world_size, timeout): """ A fake process group (not related to FakeTensor) is a process group which doesn't actually do any communication, it just hallucinates some communication. You can run a single rank with a fake process group without needing multiple processes (simulates per-rank behavior) NOTE: This is not a real process group, and it would produce wrong results for every collective. It should be used as a convinient tool when playing with distributed but don't care about the actual data. """ return FakeProcessGroup(rank, world_size) dist.Backend.register_backend("fake", _create_fake_pg, devices=['cpu', 'cuda']) ```