python_code
stringlengths
0
1.02M
repo_name
stringlengths
9
48
file_path
stringlengths
5
114
from contextlib import contextmanager import types # The idea for this parameter is that we forbid bare assignment # to torch.backends.<cudnn|mkldnn>.enabled and friends when running our # test suite, where it's very easy to forget to undo the change # later. __allow_nonbracketed_mutation_flag = True def disable_global_flags(): global __allow_nonbracketed_mutation_flag __allow_nonbracketed_mutation_flag = False def flags_frozen(): return not __allow_nonbracketed_mutation_flag @contextmanager def __allow_nonbracketed_mutation(): global __allow_nonbracketed_mutation_flag old = __allow_nonbracketed_mutation_flag __allow_nonbracketed_mutation_flag = True try: yield finally: __allow_nonbracketed_mutation_flag = old class ContextProp(object): def __init__(self, getter, setter): self.getter = getter self.setter = setter def __get__(self, obj, objtype): return self.getter() def __set__(self, obj, val): if not flags_frozen(): self.setter(val) else: raise RuntimeError("not allowed to set %s flags " "after disable_global_flags; please use flags() context manager instead" % obj.__name__) class PropModule(types.ModuleType): def __init__(self, m, name): super(PropModule, self).__init__(name) self.m = m def __getattr__(self, attr): return self.m.__getattribute__(attr)
pytorch-master
torch/backends/__init__.py
import torch from functools import lru_cache as _lru_cache def is_built() -> bool: r"""Returns whether PyTorch is built with MPS support. Note that this doesn't necessarily mean MPS is available; just that if this PyTorch binary were run a machine with working MPS drivers and devices, we would be able to use it.""" return torch._C.has_mps @_lru_cache() def is_available() -> bool: r"""Returns a bool indicating if MPS is currently available.""" return torch._C._is_mps_available()
pytorch-master
torch/backends/mps/__init__.py
import hashlib import json from typing import Dict, Tuple import coremltools as ct # type: ignore[import] import torch from coremltools.converters.mil.input_types import TensorType # type: ignore[import] from coremltools.converters.mil.mil import types # type: ignore[import] from coremltools.models.neural_network import quantization_utils # type: ignore[import] CT_METADATA_VERSION = "com.github.apple.coremltools.version" CT_METADATA_SOURCE = "com.github.apple.coremltools.source" class ScalarType: Float = 0 Double = 1 Int = 2 Long = 3 Undefined = 4 # Supported Tensor types in coremltools: # https://github.com/apple/coremltools/blob/main/coremltools/converters/mil/frontend/torch/converter.py#L28 torch_to_mil_types = { ScalarType.Float: types.fp32, ScalarType.Double: types.fp64, ScalarType.Int: types.int32, ScalarType.Long: types.int64, } class CoreMLComputeUnit: CPU = "cpuOnly" CPUAndGPU = "cpuAndGPU" ALL = "all" class CoreMLQuantizationMode: LINEAR = "linear" LINEAR_SYMMETRIC = "linear_symmetric" NONE = "none" def TensorSpec(shape, dtype=ScalarType.Float): return (shape, dtype) def CompileSpec(inputs, outputs, backend=CoreMLComputeUnit.CPU, allow_low_precision=True, quantization_mode=CoreMLQuantizationMode.NONE): return (inputs, outputs, backend, allow_low_precision, quantization_mode) def _check_enumerated_shape(shape): for s in shape: if not isinstance(s, (list, tuple)): return False return True def _convert_to_mil_type(shape, dtype, name: str): mil_shape = shape if _check_enumerated_shape(shape): mil_shape = ct.EnumeratedShapes(shape) ml_type = TensorType(shape=mil_shape, dtype=torch_to_mil_types[dtype]) ml_type.name = name return ml_type def preprocess(script_module: torch._C.ScriptObject, compile_spec: Dict[str, Tuple]): spec = compile_spec["forward"] input_specs, output_specs, backend, allow_low_precision, quantization_mode = spec mil_inputs = [] inputs = [] for index, input in enumerate(input_specs): shape, dtype = input name = "input_" + str(index) inputs.append([name, str(dtype), str(shape)]) ml_type = _convert_to_mil_type(shape, dtype, name) mil_inputs.append(ml_type) model = torch.jit.RecursiveScriptModule._construct(script_module, lambda x: None) mlmodel = ct.convert(model, inputs=mil_inputs) if(quantization_mode != CoreMLQuantizationMode.NONE): quant_model_spec = quantization_utils.quantize_weights(mlmodel, nbits=8, quantization_mode=quantization_mode) mlmodel = ct.models.MLModel(quant_model_spec) spec = mlmodel.get_spec() assert len(spec.description.output) == len(output_specs) # type: ignore[attr-defined] outputs = [] for index, output in enumerate(output_specs): shape, dtype = output name = spec.description.output[index].name # type: ignore[attr-defined] outputs.append([name, str(dtype), str(shape)]) mlmodel = ct.models.model.MLModel(spec) print(mlmodel) config = { "spec_ver": str(spec.specificationVersion), # type: ignore[attr-defined] "backend": backend, "allow_low_precision": str(allow_low_precision), } metadata = { "coremltool_ver": mlmodel.user_defined_metadata[CT_METADATA_VERSION], "torch_ver": mlmodel.user_defined_metadata[CT_METADATA_SOURCE], } coreml_compile_spec = { "inputs": inputs, "outputs": outputs, "config": config, "metadata": metadata, } mlmodel = spec.SerializeToString() # type: ignore[attr-defined] return { "model": mlmodel, "hash": str(hashlib.sha256(mlmodel).hexdigest()), "extra": json.dumps(coreml_compile_spec), }
pytorch-master
torch/backends/_coreml/preprocess.py
pytorch-master
torch/backends/_coreml/__init__.py
import sys import torch from typing import Union def is_built(): r"""Returns whether PyTorch is built with CUDA support. Note that this doesn't necessarily mean CUDA is available; just that if this PyTorch binary were run a machine with working CUDA drivers and devices, we would be able to use it.""" return torch._C.has_cuda class cuFFTPlanCacheAttrContextProp(object): # Like regular ContextProp, but uses the `.device_index` attribute from the # calling object as the first argument to the getter and setter. def __init__(self, getter, setter): self.getter = getter self.setter = setter def __get__(self, obj, objtype): return self.getter(obj.device_index) def __set__(self, obj, val): if isinstance(self.setter, str): raise RuntimeError(self.setter) self.setter(obj.device_index, val) class cuFFTPlanCache(object): r""" Represents a specific plan cache for a specific `device_index`. The attributes `size` and `max_size`, and method `clear`, can fetch and/ or change properties of the C++ cuFFT plan cache. """ def __init__(self, device_index): self.device_index = device_index size = cuFFTPlanCacheAttrContextProp( torch._cufft_get_plan_cache_size, '.size is a read-only property showing the number of plans currently in the ' 'cache. To change the cache capacity, set cufft_plan_cache.max_size.') max_size = cuFFTPlanCacheAttrContextProp(torch._cufft_get_plan_cache_max_size, torch._cufft_set_plan_cache_max_size) def clear(self): return torch._cufft_clear_plan_cache(self.device_index) class cuFFTPlanCacheManager(object): r""" Represents all cuFFT plan caches. When indexed with a device object/index, this object returns the `cuFFTPlanCache` corresponding to that device. Finally, this object, when used directly as a `cuFFTPlanCache` object (e.g., setting the `.max_size`) attribute, the current device's cuFFT plan cache is used. """ __initialized = False def __init__(self): self.caches = [] self.__initialized = True def __getitem__(self, device): index = torch.cuda._utils._get_device_index(device) if index < 0 or index >= torch.cuda.device_count(): raise RuntimeError( ("cufft_plan_cache: expected 0 <= device index < {}, but got " "device with index {}").format(torch.cuda.device_count(), index)) if len(self.caches) == 0: self.caches.extend(cuFFTPlanCache(index) for index in range(torch.cuda.device_count())) return self.caches[index] def __getattr__(self, name): return getattr(self[torch.cuda.current_device()], name) def __setattr__(self, name, value): if self.__initialized: return setattr(self[torch.cuda.current_device()], name, value) else: return super(cuFFTPlanCacheManager, self).__setattr__(name, value) class cuBLASModule: def __getattr__(self, name): if name == "allow_tf32": return torch._C._get_cublas_allow_tf32() elif name == "allow_fp16_reduced_precision_reduction": return torch._C._get_cublas_allow_fp16_reduced_precision_reduction() raise AssertionError("Unknown attribute " + name) def __setattr__(self, name, value): if name == "allow_tf32": return torch._C._set_cublas_allow_tf32(value) elif name == "allow_fp16_reduced_precision_reduction": return torch._C._set_cublas_allow_fp16_reduced_precision_reduction(value) raise AssertionError("Unknown attribute " + name) _LinalgBackends = { 'default': torch._C._LinalgBackend.Default, 'cusolver': torch._C._LinalgBackend.Cusolver, 'magma': torch._C._LinalgBackend.Magma, } _LinalgBackends_str = ', '.join(_LinalgBackends.keys()) def preferred_linalg_library(backend: Union[None, str, torch._C._LinalgBackend] = None) -> torch._C._LinalgBackend: r''' .. warning:: This flag is experimental and subject to change. When PyTorch runs a CUDA linear algebra operation it often uses the cuSOLVER or MAGMA libraries, and if both are available it decides which to use with a heuristic. This flag (a :class:`str`) allows overriding those heuristics. * If `"cusolver"` is set then cuSOLVER will be used wherever possible. * If `"magma"` is set then MAGMA will be used wherever possible. * If `"default"` (the default) is set then heuristics will be used to pick between cuSOLVER and MAGMA if both are available. * When no input is given, this function returns the currently preferred library. Note: When a library is preferred other libraries may still be used if the preferred library doesn't implement the operation(s) called. This flag may achieve better performance if PyTorch's heuristic library selection is incorrect for your application's inputs. Currently supported linalg operators: * :func:`torch.linalg.inv` * :func:`torch.linalg.inv_ex` * :func:`torch.linalg.cholesky` * :func:`torch.linalg.cholesky_ex` * :func:`torch.cholesky_solve` * :func:`torch.cholesky_inverse` * :func:`torch.linalg.lu_factor` * :func:`torch.linalg.lu` * :func:`torch.linalg.lu_solve` * :func:`torch.linalg.qr` * :func:`torch.linalg.eigh` * :func:`torch.linalg.eighvals` * :func:`torch.linalg.svd` * :func:`torch.linalg.svdvals` ''' if backend is None: pass elif isinstance(backend, str): if backend not in _LinalgBackends: raise RuntimeError("Unknown input value. " f"Choose from: {_LinalgBackends_str}.") torch._C._set_linalg_preferred_backend(_LinalgBackends[backend]) elif isinstance(backend, torch._C._LinalgBackend): torch._C._set_linalg_preferred_backend(backend) else: raise RuntimeError("Unknown input value type.") return torch._C._get_linalg_preferred_backend() cufft_plan_cache = cuFFTPlanCacheManager() matmul = cuBLASModule()
pytorch-master
torch/backends/cuda/__init__.py
import sys import torch import types from typing import List # This function should correspond to the enums present in c10/core/QEngine.h def _get_qengine_id(qengine: str) -> int: if qengine == 'none' or qengine == '' or qengine is None: ret = 0 elif qengine == 'fbgemm': ret = 1 elif qengine == 'qnnpack': ret = 2 elif qengine == 'onednn': ret = 3 else: ret = -1 raise RuntimeError("{} is not a valid value for quantized engine".format(qengine)) return ret # This function should correspond to the enums present in c10/core/QEngine.h def _get_qengine_str(qengine: int) -> str: all_engines = {0 : 'none', 1 : 'fbgemm', 2 : 'qnnpack', 3 : 'onednn'} return all_engines.get(qengine, '*undefined') class _QEngineProp(object): def __get__(self, obj, objtype) -> str: return _get_qengine_str(torch._C._get_qengine()) def __set__(self, obj, val: str) -> None: torch._C._set_qengine(_get_qengine_id(val)) class _SupportedQEnginesProp(object): def __get__(self, obj, objtype) -> List[str]: qengines = torch._C._supported_qengines() return [_get_qengine_str(qe) for qe in qengines] def __set__(self, obj, val) -> None: raise RuntimeError("Assignment not supported") class QuantizedEngine(types.ModuleType): def __init__(self, m, name): super(QuantizedEngine, self).__init__(name) self.m = m def __getattr__(self, attr): return self.m.__getattribute__(attr) engine = _QEngineProp() supported_engines = _SupportedQEnginesProp() # This is the sys.modules replacement trick, see # https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273 sys.modules[__name__] = QuantizedEngine(sys.modules[__name__], __name__) engine: str supported_engines: List[str]
pytorch-master
torch/backends/quantized/__init__.py
import torch def is_available(): r"""Returns whether PyTorch is built with MKL support.""" return torch._C.has_mkl VERBOSE_OFF = 0 VERBOSE_ON = 1 class verbose(object): """ On-demand oneMKL verbosing functionality To make it easier to debug performance issues, oneMKL can dump verbose messages containing execution information like duration while executing the kernel. The verbosing functionality can be invoked via an environment variable named `MKL_VERBOSE`. However, this methodology dumps messages in all steps. Those are a large amount of verbose messages. Moreover, for investigating the performance issues, generally taking verbose messages for one single iteration is enough. This on-demand verbosing functionality makes it possible to control scope for verbose message dumping. In the following example, verbose messages will be dumped out for the second inference only. .. highlight:: python .. code-block:: python import torch model(data) with torch.backends.mkl.verbose(torch.backends.mkl.VERBOSE_ON): model(data) Args: level: Verbose level - ``VERBOSE_OFF``: Disable verbosing - ``VERBOSE_ON``: Enable verbosing """ def __init__(self, enable): self.enable = enable def __enter__(self): if self.enable == VERBOSE_OFF: return st = torch._C._verbose.mkl_set_verbose(self.enable) assert st, "Failed to set MKL into verbose mode. Please consider to disable this verbose scope." return self def __exit__(self, exc_type, exc_val, exc_tb): torch._C._verbose.mkl_set_verbose(VERBOSE_OFF) return False
pytorch-master
torch/backends/mkl/__init__.py
import sys import torch import types class _XNNPACKEnabled(object): def __get__(self, obj, objtype): return torch._C._is_xnnpack_enabled() def __set__(self, obj, val): raise RuntimeError("Assignment not supported") class XNNPACKEngine(types.ModuleType): def __init__(self, m, name): super(XNNPACKEngine, self).__init__(name) self.m = m def __getattr__(self, attr): return self.m.__getattribute__(attr) enabled = _XNNPACKEnabled() # This is the sys.modules replacement trick, see # https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273 sys.modules[__name__] = XNNPACKEngine(sys.modules[__name__], __name__)
pytorch-master
torch/backends/xnnpack/__init__.py
import sys import torch from contextlib import contextmanager from torch.backends import ContextProp, PropModule, __allow_nonbracketed_mutation def is_available(): r"""Returns whether PyTorch is built with MKL-DNN support.""" return torch._C.has_mkldnn VERBOSE_OFF = 0 VERBOSE_ON = 1 VERBOSE_ON_CREATION = 2 class verbose(object): """ On-demand oneDNN (former MKL-DNN) verbosing functionality To make it easier to debug performance issues, oneDNN can dump verbose messages containing information like kernel size, input data size and execution duration while executing the kernel. The verbosing functionality can be invoked via an environment variable named `DNNL_VERBOSE`. However, this methodology dumps messages in all steps. Those are a large amount of verbose messages. Moreover, for investigating the performance issues, generally taking verbose messages for one single iteration is enough. This on-demand verbosing functionality makes it possible to control scope for verbose message dumping. In the following example, verbose messages will be dumped out for the second inference only. .. highlight:: python .. code-block:: python import torch model(data) with torch.backends.mkldnn.verbose(torch.backends.mkldnn.VERBOSE_ON): model(data) Args: level: Verbose level - ``VERBOSE_OFF``: Disable verbosing - ``VERBOSE_ON``: Enable verbosing - ``VERBOSE_ON_CREATION``: Enable verbosing, including oneDNN kernel creation """ def __init__(self, level): self.level = level def __enter__(self): if self.level == VERBOSE_OFF: return st = torch._C._verbose.mkldnn_set_verbose(self.level) assert st, "Failed to set MKLDNN into verbose mode. Please consider to disable this verbose scope." return self def __exit__(self, exc_type, exc_val, exc_tb): torch._C._verbose.mkldnn_set_verbose(VERBOSE_OFF) return False def set_flags(_enabled): orig_flags = (torch._C._get_mkldnn_enabled(),) torch._C._set_mkldnn_enabled(_enabled) return orig_flags @contextmanager def flags(enabled=False): with __allow_nonbracketed_mutation(): orig_flags = set_flags(enabled) try: yield finally: with __allow_nonbracketed_mutation(): set_flags(orig_flags[0]) class MkldnnModule(PropModule): def __init__(self, m, name): super(MkldnnModule, self).__init__(m, name) enabled = ContextProp(torch._C._get_mkldnn_enabled, torch._C._set_mkldnn_enabled) # Cool stuff from torch/backends/cudnn/__init__.py and # https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273 sys.modules[__name__] = MkldnnModule(sys.modules[__name__], __name__)
pytorch-master
torch/backends/mkldnn/__init__.py
pytorch-master
torch/backends/xeon/__init__.py
""" This is a script for launching PyTorch inference on Intel(R) Xeon(R) Scalable Processors with optimal configurations. Single instance inference, multi-instance inference are enabled. Note: term "instance" here doesn't refer to a cloud instance. This script is executed as a single process. It invokes multiple "instances" which are formed from multiple threads for each. "instance" is kind of group of threads in this context. Illustrated as below: :: +-----------------------------+----------------------+-------+ | process | thread | core | +=============================+======================+=======+ | torch.backends.xeon.run_cpu | instance 0: thread 0 | 0 | | | thread 1 | 1 | | +----------------------+-------+ | | instance 1: thread 0 | 2 | | | thread 1 | 3 | | +----------------------+-------+ | | ... | ... | | +----------------------+-------+ | | instance N: thread 0 | M | | | thread 1 | M+1 | +-----------------------------+----------------------+-------+ To get the peak performance on Intel(R) Xeon(R) Scalable Processors, the script optimizes the configuration of thread and memory management. For thread management, the script configures thread affinity and the preload of Intel OMP library. For memory management, it configures NUMA binding and preload optimized memory allocation library (e.g. tcmalloc, jemalloc). Environment variables that will be set by this script: +------------------+-------------------------------------------------------------------------------------------------+ | Environ Variable | Value | +==================+=================================================================================================+ | LD_PRELOAD | Depending on knobs you set, <lib>/libiomp5.so, <lib>/libjemalloc.so, <lib>/libtcmalloc.so might | | | be appended to LD_PRELOAD. | +------------------+-------------------------------------------------------------------------------------------------+ | KMP_AFFINITY | If libiomp5.so is preloaded, KMP_AFFINITY could be set to "granularity=fine,compact,1,0". | +------------------+-------------------------------------------------------------------------------------------------+ | KMP_BLOCKTIME | If libiomp5.so is preloaded, KMP_BLOCKTIME is set to "1". | +------------------+-------------------------------------------------------------------------------------------------+ | OMP_NUM_THREADS | value of ncores_per_instance | +------------------+-------------------------------------------------------------------------------------------------+ | MALLOC_CONF | If libjemalloc.so is preloaded, MALLOC_CONF will be set to | | | "oversize_threshold:1,background_thread:true,metadata_thp:auto". | +------------------+-------------------------------------------------------------------------------------------------+ *Note*: This script respects environment variables set preliminarily. I.e. If you set the environment variables mentioned above before running the script, the script will not overwrite the values in the script. How to use this module: ~~~~~~~~~~~~~~~~~~~~~~~ Single instance inference ------------------------- 1. Run single-instance inference on a single node with all CPU nodes. :: >>> python -m torch.backends.xeon.run_cpu --throughput_mode script.py args 2. Run single-instance inference on a single CPU node. :: >>> python -m torch.backends.xeon.run_cpu --node_id 1 script.py args Multi-instance inference ------------------------ 1. Multi-instance By default this tool runs one process per node. If you want to set the instance numbers and core per instance, --ninstances and --ncores_per_instance should be set. :: >>> python -m torch.backends.xeon.run_cpu -- python_script args eg: on an Intel(R) Xeon(R) Scalable Processor with 14 instance, 4 cores per instance :: >>> python -m torch.backends.xeon.run_cpu --ninstances 14 --ncores_per_instance 4 python_script args 2. Run single-instance inference among multiple instances. By default, runs all ninstances. If you want to independently run a single instance among ninstances, specify rank. eg: run 0th instance on an Intel(R) Xeon(R) Scalable Processor with 2 instance (i.e., numactl -C 0-27) :: >>> python -m torch.backends.xeon.run_cpu --ninstances 2 --rank 0 python_script args eg: run 1st instance on an Intel(R) Xeon(R) Scalable Processor with 2 instance (i.e., numactl -C 28-55) :: >>> python -m torch.backends.xeon.run_cpu --ninstances 2 --rank 1 python_script args eg: run 0th instance on an Intel(R) Xeon(R) Scalable Processor with 2 instance, 2 cores per instance, first four cores (i.e., numactl -C 0-1) :: >>> python -m torch.backends.xeon.run_cpu --core_list "0, 1, 2, 3" --ninstances 2 --ncores_per_instance 2 --rank 0 python_script args 3. To look up what optional arguments this module offers: :: >>> python -m torch.backends.xeon.run_cpu --help Memory allocator ---------------- "--enable_tcmalloc" and "--enable_jemalloc" can be used to enable different memory allcator. """ import sys import platform import subprocess import os from os.path import expanduser import re import glob from argparse import ArgumentParser, REMAINDER from argparse import RawTextHelpFormatter import logging from torch.distributed.elastic.multiprocessing import Std, start_processes from typing import List, Dict format_str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" logging.basicConfig(level=logging.INFO, format=format_str) logger = logging.getLogger(__name__) class _CPUinfo(): """ Get CPU inforamation, such as cores list and NUMA information. """ def __init__(self, test_input=""): self.cpuinfo = [] if platform.system() in ["Windows", "Darwin"]: raise RuntimeError(f"{platform.system()} is not supported!!!") elif platform.system() == "Linux": # Sample output of: `lscpu --parse=CPU,Core,Socket,Node` # # # The following is the parsable format, which can be fed to other # # programs. Each different item in every column has an unique ID # # starting from zero. # # CPU,Core,Socket,Node # 0,0,0,0 # 1,1,0,0 # ... if test_input == "": lscpu_cmd = ["lscpu", "--parse=CPU,Core,Socket,Node"] lscpu_info = subprocess.check_output(lscpu_cmd, universal_newlines=True).split("\n") else: lscpu_info = test_input.split("\n") # Get information about cpu, core, socket and node for line in lscpu_info: pattern = r"^([\d]+,[\d]+,[\d]+,[\d]?)" regex_out = re.search(pattern, line) if regex_out: self.cpuinfo.append(regex_out.group(1).strip().split(",")) # physical cores := core column in lscpu output # logical cores := cPU column in lscpu output self.node_nums = int(max([line[3] for line in self.cpuinfo])) + 1 self.node_physical_cores: List[List[int]] = [] # node_id is index self.node_logical_cores: List[List[int]] = [] # node_id is index self.physical_core_node_map = {} # phyical core to numa node id self.logical_core_node_map = {} # logical core to numa node id for node_id in range(self.node_nums): cur_node_physical_core = [] cur_node_logical_core = [] for cpuinfo in self.cpuinfo: nid = cpuinfo[3] if cpuinfo[3] != "" else "0" if node_id == int(nid): if int(cpuinfo[1]) not in cur_node_physical_core: cur_node_physical_core.append(int(cpuinfo[1])) self.physical_core_node_map[int(cpuinfo[1])] = int(node_id) cur_node_logical_core.append(int(cpuinfo[0])) self.logical_core_node_map[int(cpuinfo[0])] = int(node_id) self.node_physical_cores.append(cur_node_physical_core) self.node_logical_cores.append(cur_node_logical_core) def _physical_core_nums(self): return len(self.node_physical_cores) * len(self.node_physical_cores[0]) def _logical_core_nums(self): return len(self.node_logical_cores) * len(self.node_logical_cores[0]) def get_node_physical_cores(self, node_id): if node_id < 0 or node_id > self.node_nums - 1: raise ValueError(f"Invalid node id: {node_id}. Valid node ids: {list(range(len(self.node_physical_cores)))}") return self.node_physical_cores[node_id] def get_node_logical_cores(self, node_id): if node_id < 0 or node_id > self.node_nums - 1: raise ValueError(f"Invalid node id: {node_id}. Valid node ids: {list(range(len(self.node_physical_cores)))}") return self.node_logical_cores[node_id] def get_all_physical_cores(self): all_cores = [] for cores in self.node_physical_cores: all_cores.extend(cores) return all_cores def get_all_logical_cores(self): all_cores = [] for cores in self.node_logical_cores: all_cores.extend(cores) return all_cores def numa_aware_check(self, core_list): """ Check whether all cores in core_list are in the same NUMA node. cross NUMA will reduce perforamnce. We strongly advice to not use cores on different nodes. """ cores_numa_map = self.logical_core_node_map numa_ids = [] for core in core_list: numa_id = cores_numa_map[core] if numa_id not in numa_ids: numa_ids.append(numa_id) if len(numa_ids) > 1: logger.warning(f"Numa Aware: cores:{str(core_list)} on different NUMA nodes:{str(numa_ids)}. To avoid \ this behavior, please use --ncores_per_instance knob to make sure number of cores is divisible by --ncores_per_\ instance. Alternatively, please use --skip_cross_node_cores knob.") if len(numa_ids) == 0: raise RuntimeError("invalid number of NUMA nodes; please make sure numa_ids >= 1") return numa_ids class _Launcher(): r""" Class for launcher """ msg_lib_notfound = f"Unable to find the {{0}} library file lib{{1}}.so in $CONDA_PREFIX/lib or $VIRTUAL_ENV/lib \ or /.local/lib/ or /usr/local/lib/ or /usr/local/lib64/ or /usr/lib or /usr/lib64 or \ {expanduser('~')}/.local/lib/ so the LD_PRELOAD environment variable will not be set." def __init__(self): self.cpuinfo = _CPUinfo() def add_lib_preload(self, lib_type): """ Enale TCMalloc/JeMalloc/intel OpenMP """ library_paths = [] if "CONDA_PREFIX" in os.environ: library_paths.append(f"{os.environ['CONDA_PREFIX']}/lib") if "VIRTUAL_ENV" in os.environ: library_paths.append(f"{os.environ['VIRTUAL_ENV']}/lib") library_paths += [f"{expanduser('~')}/.local/lib", "/usr/local/lib", "/usr/local/lib64", "/usr/lib", "/usr/lib64"] lib_find = False lib_set = False for item in os.getenv("LD_PRELOAD", "").split(":"): if item.endswith(f"lib{lib_type}.so"): lib_set = True break if not lib_set: for lib_path in library_paths: library_file = os.path.join(lib_path, f"lib{lib_type}.so") matches = glob.glob(library_file) if len(matches) > 0: ld_preloads = [f"{matches[0]}", os.getenv("LD_PRELOAD", "")] os.environ["LD_PRELOAD"] = os.pathsep.join([p.strip(os.pathsep) for p in ld_preloads if p]) lib_find = True break return lib_set or lib_find def set_memory_allocator(self, enable_tcmalloc=True, enable_jemalloc=False, use_default_allocator=False): """ Enable TCMalloc/JeMalloc with LD_PRELOAD and set configuration for JeMalloc. By default, PTMalloc will be used for PyTorch, but TCMalloc and JeMalloc can get better memory resue and reduce page fault to improve performance. """ if enable_tcmalloc and enable_jemalloc: raise RuntimeError("Unable to enable TCMalloc and JEMalloc at the same time.") if enable_tcmalloc: find_tc = self.add_lib_preload(lib_type="tcmalloc") if not find_tc: msg = f"{self.msg_lib_notfound} you can use \"conda install -c conda-forge gperftools\" to install {{0}}" logger.warning(msg.format("TCmalloc", "tcmalloc")) else: logger.info("Use TCMalloc memory allocator") elif enable_jemalloc: find_je = self.add_lib_preload(lib_type="jemalloc") if not find_je: msg = f"{self.msg_lib_notfound} you can use \"conda install -c conda-forge jemalloc\" to install {{0}}" logger.warning(msg.format("Jemalloc", "jemalloc")) else: logger.info("Use JeMalloc memory allocator") self.set_env("MALLOC_CONF", "oversize_threshold:1,background_thread:true,metadata_thp:auto") elif use_default_allocator: pass else: find_tc = self.add_lib_preload(lib_type="tcmalloc") if find_tc: logger.info("Use TCMalloc memory allocator") return find_je = self.add_lib_preload(lib_type="jemalloc") if find_je: logger.info("Use JeMalloc memory allocator") return logger.warning(f"""Neither TCMalloc nor JeMalloc is found in $CONDA_PREFIX/lib or $VIRTUAL_ENV/lib or /.local/lib/ or /usr/local/lib/ or /usr/local/lib64/ or /usr/lib or /usr/lib64 or {expanduser("~")}/.local/lib/ so the LD_PRELOAD environment variable will not be set. This may drop the performance""") def log_env_var(self, env_var_name=""): if env_var_name in os.environ: logger.info(f"{env_var_name}={os.environ[env_var_name]}") def set_env(self, env_name, env_value): if not env_value: logger.warning(f"{env_name} is None") if env_name not in os.environ: os.environ[env_name] = env_value elif os.environ[env_name] != env_value: logger.warning(f"Overriding value with the one set in environment variable: {env_name}. \ Value applied: {os.environ[env_name]}. Value ignored: {env_value}") self.log_env_var(env_name) # set_kmp_affinity is used to control whether to set KMP_AFFINITY or not. # In scenario that use all cores on all nodes, including logical cores, setting KMP_AFFINITY disables logical cores. # In this case, KMP_AFFINITY should not be set. def set_multi_thread_and_allocator(self, ncores_per_instance, disable_iomp=False, set_kmp_affinity=True, enable_tcmalloc=True, enable_jemalloc=False, use_default_allocator=False): """ Set multi-thread configuration and enable Intel openMP and TCMalloc/JeMalloc. By default, GNU openMP and PTMalloc are used in PyTorch. but Intel openMP and TCMalloc/JeMalloc are better alternatives to get performance benifit. """ self.set_memory_allocator(enable_tcmalloc, enable_jemalloc, use_default_allocator) self.set_env("OMP_NUM_THREADS", str(ncores_per_instance)) if not disable_iomp: find_iomp = self.add_lib_preload(lib_type="iomp5") if not find_iomp: msg = f"{self.msg_lib_notfound} you can use \"conda install mkl\" to install {{0}}" logger.warning(msg.format("iomp", "iomp5")) else: logger.info("Using Intel OpenMP") if set_kmp_affinity: self.set_env("KMP_AFFINITY", "granularity=fine,compact,1,0") self.set_env("KMP_BLOCKTIME", "1") self.log_env_var("LD_PRELOAD") r""" Launcher for single instance and multi-instance """ def launch(self, args): cores = [] set_kmp_affinity = True if args.core_list: # user specify what cores will be used by params cores = [int(x) for x in args.core_list.split(",")] if args.ncores_per_instance == -1: raise RuntimeError("please specify the \"--ncores_per_instance\" if you have pass the --core_list params") elif args.ninstances > 1 and args.ncores_per_instance * args.ninstances < len(cores): logger.warning(f"only first {args.ncores_per_instance * args.ninstances} cores will be used, \ but you specify {len(cores)} cores in core_list") else: args.ninstances = len(cores) // args.ncores_per_instance else: if args.use_logical_core: if args.node_id != -1: cores = self.cpuinfo.get_node_logical_cores(args.node_id) else: cores = self.cpuinfo.get_all_logical_cores() # When using all cores on all nodes, including logical cores, # setting KMP_AFFINITY disables logical cores. Thus, KMP_AFFINITY should not be set. set_kmp_affinity = False else: if args.node_id != -1: cores = self.cpuinfo.get_node_physical_cores(args.node_id) else: cores = self.cpuinfo.get_all_physical_cores() if not args.multi_instance and args.ninstances == -1 and args.ncores_per_instance == -1: args.ninstances = 1 args.ncores_per_instance = len(cores) elif args.multi_instance and args.ninstances == -1 and args.ncores_per_instance == -1: args.throughput_mode = True elif args.ncores_per_instance == -1 and args.ninstances != -1: if args.ninstances > len(cores): raise RuntimeError(f"there are {len(cores)} total cores but you specify {args.ninstances} ninstances; \ please make sure ninstances <= total_cores)") else: args.ncores_per_instance = len(cores) // args.ninstances elif args.ncores_per_instance != -1 and args.ninstances == -1: if not args.skip_cross_node_cores: args.ninstances = len(cores) // args.ncores_per_instance else: ncore_per_node = len(self.cpuinfo.node_physical_cores[0]) num_leftover_cores = ncore_per_node % args.ncores_per_instance if args.ncores_per_instance > ncore_per_node: # too many ncores_per_instance to skip cross-node cores logger.warning("there are {} core(s) per socket, but you specify {} ncores_per_instance and \ skip_cross_node_cores. Please make sure --ncores_per_instance < core(s) per \ socket".format(ncore_per_node, args.ncores_per_instance)) exit(-1) elif num_leftover_cores == 0: # aren't any cross-node cores logger.info('--skip_cross_node_cores is set, but there are no cross-node cores.') args.ninstances = len(cores) // args.ncores_per_instance else: # skip cross-node cores if args.ninstances != -1: logger.warning('--skip_cross_node_cores is exclusive to --ninstances. --ninstances \ won\'t take effect even if it is set explicitly.') i = 1 leftover_cores = set() while ncore_per_node * i <= len(cores): leftover_cores.update(cores[ncore_per_node * i - num_leftover_cores : ncore_per_node * i]) i += 1 cores = list(set(cores) - leftover_cores) assert len(cores) % args.ncores_per_instance == 0 args.ninstances = len(cores) // args.ncores_per_instance else: if args.ninstances * args.ncores_per_instance > len(cores): raise RuntimeError("Please make sure ninstances * ncores_per_instance <= total_cores") if args.latency_mode: logger.warning("--latency_mode is exclusive to --ninstances, --ncores_per_instance, --node_id and \ --use_logical_core. They won't take effect even they are set explicitly.") args.ncores_per_instance = 4 cores = self.cpuinfo.get_all_physical_cores() args.ninstances = len(cores) // args.ncores_per_instance if args.throughput_mode: logger.warning("--throughput_mode is exclusive to --ninstances, --ncores_per_instance, --node_id and \ --use_logical_core. They won't take effect even they are set explicitly.") args.ninstances = self.cpuinfo.node_nums cores = self.cpuinfo.get_all_physical_cores() args.ncores_per_instance = len(cores) // args.ninstances if args.ninstances > 1 and args.rank != -1: logger.info(f"assigning {args.ncores_per_instance} cores for instance {args.rank}") self.set_multi_thread_and_allocator(args.ncores_per_instance, args.disable_iomp, set_kmp_affinity, args.enable_tcmalloc, args.enable_jemalloc, args.use_default_allocator) entrypoint = "" launch_args = {} launch_envs: Dict[int, Dict] = {} launch_tee = {} for i in range(args.ninstances): cmd = [] cur_process_cores = "" if not args.disable_numactl: cmd = ["numactl"] cores = sorted(cores) if args.rank == -1: # sequentially assign ncores_per_instance to ninstances core_list = cores[i * args.ncores_per_instance : (i + 1) * args.ncores_per_instance] else: # assign ncores_per_instance from rank core_list = cores[args.rank * args.ncores_per_instance : (args.rank + 1) * args.ncores_per_instance] core_ranges: List[Dict] = [] for core in core_list: if len(core_ranges) == 0: range_elem = {"start": core, "end": core} core_ranges.append(range_elem) else: if core - core_ranges[-1]["end"] == 1: core_ranges[-1]["end"] = core else: range_elem = {"start": core, "end": core} core_ranges.append(range_elem) for r in core_ranges: cur_process_cores = f"{cur_process_cores}{r['start']}-{r['end']}," cur_process_cores = cur_process_cores[:-1] numa_params = f"-C {cur_process_cores} " numa_ids = ",".join([str(numa_id) for numa_id in self.cpuinfo.numa_aware_check(core_list)]) numa_params += f"-m {numa_ids}" cmd.extend(numa_params.split()) with_python = not args.no_python if with_python: cmd.append(sys.executable) cmd.append("-u") if args.module: cmd.append("-m") cmd.append(args.program) cmd.extend(args.program_args) cmd_s = " ".join(cmd) logger.info(cmd_s) if entrypoint == "": entrypoint = cmd[0] del cmd[0] launch_args[i] = tuple(cmd) launch_envs[i] = {} launch_tee[i] = Std.ALL if args.rank != -1: # launches single instance, rank, only break ctx = start_processes(name=args.log_file_prefix, entrypoint=entrypoint, args=launch_args, envs=launch_envs, log_dir=args.log_path, tee=launch_tee) ctx.wait() def _add_memory_allocator_params(parser): group = parser.add_argument_group("Memory Allocator Parameters") # allocator control group.add_argument("--enable_tcmalloc", action="store_true", default=False, help="Enable tcmalloc allocator") group.add_argument("--enable_jemalloc", action="store_true", default=False, help="Enable jemalloc allocator") group.add_argument("--use_default_allocator", action="store_true", default=False, help="Use default memory allocator") def _add_multi_instance_params(parser): group = parser.add_argument_group("Multi-instance Parameters") # multi-instance control group.add_argument("--ncores_per_instance", metavar="\b", default=-1, type=int, help="Cores per instance") group.add_argument("--ninstances", metavar="\b", default=-1, type=int, help="For multi-instance, you should give the cores number you used for per instance.") group.add_argument("--skip_cross_node_cores", action='store_true', default=False, help="If specified --ncores_per_instance, skips cross-node cores.") group.add_argument("--rank", metavar="\b", default="-1", type=int, help="Specify instance index to assign ncores_per_instance for rank; \ otherwise ncores_per_instance will be assigned sequentially to ninstances. Please refer to \ https://github.com/intel/intel-extension-for-pytorch/blob/master/docs/tutorials/performance_tuning/launch_script.md") group.add_argument("--latency_mode", action="store_true", default=False, help="By detault 4 core per instance and use all physical cores") group.add_argument("--throughput_mode", action="store_true", default=False, help="By default one instance per node and use all physical cores") group.add_argument("--node_id", metavar="\b", default=-1, type=int, help="node id for multi-instance, by default all nodes will be used") group.add_argument("--use_logical_core", action="store_true", default=False, help="Whether only use physical cores") group.add_argument("--disable_numactl", action="store_true", default=False, help="Disable numactl") group.add_argument("--core_list", metavar="\b", default=None, type=str, help="Specify the core list as \"core_id, core_id, ....\", otherwise, all the cores will be used.") group.add_argument("--log_path", metavar="\b", default="logs", type=str, help="The log file directory. Default path is "", which means disable logging to files.") group.add_argument("--log_file_prefix", metavar="\b", default="run", type=str, help="log file prefix") def _add_kmp_iomp_params(parser): group = parser.add_argument_group("IOMP Parameters") group.add_argument("--disable_iomp", action="store_true", default=False, help="By default, we use Intel OpenMP and libiomp5.so will be add to LD_PRELOAD") def create_args(parser=None): """ Helper function parsing the command line options @retval ArgumentParser """ parser.add_argument("--multi_instance", action="store_true", default=False, help="Enable multi-instance, by default one instance per node") parser.add_argument("-m", "--module", default=False, action="store_true", help="Changes each process to interpret the launch script " "as a python module, executing with the same behavior as" "\"python -m\".") parser.add_argument("--no_python", default=False, action="store_true", help="Do not prepend the --program script with \"python\" - just exec " "it directly. Useful when the script is not a Python script.") _add_memory_allocator_params(parser) _add_kmp_iomp_params(parser) _add_multi_instance_params(parser) # positional parser.add_argument("program", type=str, help="The full path to the proram/script to be launched. " "followed by all the arguments for the script") # rest from the training program parser.add_argument("program_args", nargs=REMAINDER) def main(args): env_before = set(os.environ.keys()) if platform.system() in ["Windows", "Darwin"]: raise RuntimeError(f"{platform.system()} is not supported!!!") if args.log_path: os.makedirs(args.log_path, exist_ok=True) if args.latency_mode and args.throughput_mode: raise RuntimeError("Either args.latency_mode or args.throughput_mode should be set") if not args.no_python and not args.program.endswith(".py"): raise RuntimeError("For non Python script, you should use \"--no_python\" parameter.") # Verify LD_PRELOAD if "LD_PRELOAD" in os.environ: lst_valid = [] tmp_ldpreload = os.environ["LD_PRELOAD"] for item in tmp_ldpreload.split(":"): matches = glob.glob(item) if len(matches) > 0: lst_valid.append(item) else: logger.warning(f"{item} doesn't exist. Removing it from LD_PRELOAD.") if len(lst_valid) > 0: os.environ["LD_PRELOAD"] = ":".join(lst_valid) else: os.environ["LD_PRELOAD"] = "" launcher = _Launcher() launcher.launch(args) for x in sorted(set(os.environ.keys()) - env_before): logger.debug("{x}={os.environ[x]}") if __name__ == "__main__": parser = ArgumentParser(description="This is a script for launching PyTorch inference on Intel(R) Xeon(R) Scalable " "Processors with optimal configurations. Single instance inference, " "multi-instance inference are enable. To get the peak performance on Intel(R) " "Xeon(R) Scalable Processors, the script optimizes the configuration " "of thread and memory management. For thread management, the script configures thread " "affinity and the preload of Intel OMP library. For memory management, it configures " "NUMA binding and preload optimized memory allocation library (e.g. tcmalloc, jemalloc) " "\n################################# Basic usage ############################# \n" "\n 1. single instance\n" "\n >>> python -m torch.backends.xeon.run_cpu python_script args \n" "\n2. multi-instance \n" "\n >>> python -m torch.backends.xeon.run_cpu --ninstances xxx " "--ncores_per_instance xx python_script args\n" "\n############################################################################# \n", formatter_class=RawTextHelpFormatter) create_args(parser) args = parser.parse_args() main(args)
pytorch-master
torch/backends/xeon/run_cpu.py
pytorch-master
torch/backends/_nnapi/__init__.py
import sys import enum import struct import array import logging import functools from typing import ( Tuple, NamedTuple, List, Optional, ) import torch # TODO: Add type annotations # TODO: Check tensor types for ops LOG = logging.getLogger("nnapi_serialize") class NNAPI_OperandCode(object): FLOAT32 = 0 INT32 = 1 UINT32 = 2 TENSOR_FLOAT32 = 3 TENSOR_INT32 = 4 TENSOR_QUANT8_ASYMM = 5 BOOL = 6 TENSOR_QUANT16_SYMM = 7 TENSOR_FLOAT16 = 8 TENSOR_BOOL8 = 9 FLOAT16 = 10 TENSOR_QUANT8_SYMM_PER_CHANNEL = 11 TENSOR_QUANT16_ASYMM = 12 class NNAPI_OperationCode(object): ADD = 0 AVERAGE_POOL_2D = 1 CONCATENATION = 2 CONV_2D = 3 DEPTHWISE_CONV_2D = 4 DEPTH_TO_SPACE = 5 DEQUANTIZE = 6 EMBEDDING_LOOKUP = 7 FLOOR = 8 FULLY_CONNECTED = 9 HASHTABLE_LOOKUP = 10 L2_NORMALIZATION = 11 L2_POOL_2D = 12 LOCAL_RESPONSE_NORMALIZATION = 13 LOGISTIC = 14 LSH_PROJECTION = 15 LSTM = 16 MAX_POOL_2D = 17 MUL = 18 RELU = 19 RELU1 = 20 RELU6 = 21 RESHAPE = 22 RESIZE_BILINEAR = 23 RNN = 24 SOFTMAX = 25 SPACE_TO_DEPTH = 26 SVDF = 27 TANH = 28 BATCH_TO_SPACE_ND = 29 DIV = 30 MEAN = 31 PAD = 32 SPACE_TO_BATCH_ND = 33 SQUEEZE = 34 STRIDED_SLICE = 35 SUB = 36 TRANSPOSE = 37 ABS = 38 ARGMAX = 39 ARGMIN = 40 AXIS_ALIGNED_BBOX_TRANSFORM = 41 BIDIRECTIONAL_SEQUENCE_LSTM = 42 BIDIRECTIONAL_SEQUENCE_RNN = 43 BOX_WITH_NMS_LIMIT = 44 CAST = 45 CHANNEL_SHUFFLE = 46 DETECTION_POSTPROCESSING = 47 EQUAL = 48 EXP = 49 EXPAND_DIMS = 50 GATHER = 51 GENERATE_PROPOSALS = 52 GREATER = 53 GREATER_EQUAL = 54 GROUPED_CONV_2D = 55 HEATMAP_MAX_KEYPOINT = 56 INSTANCE_NORMALIZATION = 57 LESS = 58 LESS_EQUAL = 59 LOG = 60 LOGICAL_AND = 61 LOGICAL_NOT = 62 LOGICAL_OR = 63 LOG_SOFTMAX = 64 MAXIMUM = 65 MINIMUM = 66 NEG = 67 NOT_EQUAL = 68 PAD_V2 = 69 POW = 70 PRELU = 71 QUANTIZE = 72 QUANTIZED_16BIT_LSTM = 73 RANDOM_MULTINOMIAL = 74 REDUCE_ALL = 75 REDUCE_ANY = 76 REDUCE_MAX = 77 REDUCE_MIN = 78 REDUCE_PROD = 79 REDUCE_SUM = 80 ROI_ALIGN = 81 ROI_POOLING = 82 RSQRT = 83 SELECT = 84 SIN = 85 SLICE = 86 SPLIT = 87 SQRT = 88 TILE = 89 TOPK_V2 = 90 TRANSPOSE_CONV_2D = 91 UNIDIRECTIONAL_SEQUENCE_LSTM = 92 UNIDIRECTIONAL_SEQUENCE_RNN = 93 RESIZE_NEAREST_NEIGHBOR = 94 class NNAPI_FuseCode(object): FUSED_NONE = 0 FUSED_RELU = 1 FUSED_RELU1 = 2 FUSED_RELU6 = 3 class OperandValueSourceType(object): IMMEDIATE = 0 NUMBERED_BUFFER = 2 NUMBERED_MEMORY = 3 # Scalar types that appear explicitly in models. # These must be kept in sync with # AT_FORALL_SCALAR_TYPES_WITH_COMPLEX_AND_QINTS. # TODO: Expose these directly to Python to avoid maintaining this list. class TorchScalarTypes(enum.Enum): QUINT8 = 13 def approx_equal(lhs, rhs, tolerance=1e-6): return abs(lhs - rhs) <= tolerance * min(lhs, rhs) def tensor_size(op_type, dims): ITEM_SIZES = { NNAPI_OperandCode.TENSOR_FLOAT32: 4, NNAPI_OperandCode.TENSOR_INT32: 4, NNAPI_OperandCode.TENSOR_QUANT8_ASYMM: 1, NNAPI_OperandCode.TENSOR_QUANT16_SYMM: 2, NNAPI_OperandCode.TENSOR_QUANT16_ASYMM: 2, } size = ITEM_SIZES[op_type] for d in dims: size *= d return size def change_element(tup, index, value): ls = list(tup) ls[index] = value return tuple(ls) class ConvPoolArgs2d(NamedTuple): """Configuration arguments for a convolution.""" kernel_h: int kernel_w: int stride_h: int stride_w: int pad_t: int pad_b: int pad_l: int pad_r: int dilation_h: int dilation_w: int group: int class DimOrder(enum.Enum): PRESUMED_CONTIGUOUS = 0 CHANNELS_LAST = 1 SCALAR_OR_VECTOR = 2 UNKNOWN_CONSTANT = 999 class Operand(NamedTuple): """Represenation of an NNAPI operand.""" # NNAPI operand type. One of NNAPI_OperandCode. # TODO: Make this an enum. op_type: int # This is always the PyTorch shape, which is NCHW for feature maps. # The actual NNAPI operand might have a transposed shape. # we use 0 for load time dynamic shapes & -1 for runtime dynamic shapes shape: Tuple[int, ...] # Specifies how the shape of the operand that we define in NNAPI # relates to the shape we track above. # - PRESUMED_CONTIGUOUS: physical NNAPI operand will exactly match # the shape of the PyTorch tensor. # - CHANNELS_LAST: The PyTorch tensor is expected to be NCHW, and # the NNAPI operand will be represented explicitly as NHWC. dim_order: DimOrder # Quantization params scale: float zero_point: int def use_nchw(self): if self.dim_order is DimOrder.PRESUMED_CONTIGUOUS: return True if self.dim_order is DimOrder.CHANNELS_LAST: return False raise Exception("Unknown dim order") def broadcast_shapes(shape1, shape2): assert len(shape1) > 0 assert len(shape2) > 0 s1 = list(shape1) s2 = list(shape2) # TODO: Support non-equal-rank broadcast where semantics match. # This can be tricky for NHWC tensors because dimension orders # don't match between PT and NNAPI, even though semantics match. if len(s1) > len(s2): # s2 = [1] * (len(s1) - len(s2)) + s2 raise Exception("Non-equal-rank broadcast is not supported yet.") if len(s2) > len(s1): # s3 = [1] * (len(s2) - len(s1)) + s1 raise Exception("Non-equal-rank broadcast is not supported yet.") ret = [] for d1, d2 in zip(s1, s2): if d1 == 1: ret.append(d2) elif d2 == 1: ret.append(d1) elif d1 == d2: ret.append(d1) else: raise Exception("Cannot broadcast shapes: {} and {}".format(shape1, shape2)) return tuple(ret) def get_conv_pool_shape(image_shape, args, out_ch, transpose): batch, in_c, in_h, in_w = image_shape # TODO: Handle dilation if args.dilation_h != 1 or args.dilation_w != 1: raise Exception("Dilation not supported yet.") if transpose: out_h = (in_h - 1) * args.stride_h + args.kernel_h - args.pad_t - args.pad_b out_w = (in_w - 1) * args.stride_w + args.kernel_w - args.pad_l - args.pad_l else: out_h = (in_h - args.kernel_h + args.pad_t + args.pad_b) // args.stride_h + 1 out_w = (in_w - args.kernel_w + args.pad_l + args.pad_r) // args.stride_w + 1 # Handle variable-sized tensors. if in_h == 0: out_h = 0 if in_w == 0: out_w = 0 out_shape = (batch, out_ch, out_h, out_w) return out_shape def fix_shape(shape, dim_order): # Return the actual shape that an operand should have in NNAPI, # given a PyTorch shape and dimension order. This is where we # convert from PyTorch's "always NCHW" shape to explicit NHWC. if dim_order is DimOrder.PRESUMED_CONTIGUOUS: return shape if dim_order is DimOrder.CHANNELS_LAST: return tuple([shape[0]] + list(shape[2:]) + [shape[1]]) if dim_order is DimOrder.SCALAR_OR_VECTOR: assert len(shape) == 0 or len(shape) == 1 return shape if dim_order is DimOrder.UNKNOWN_CONSTANT: # XXX think this through return shape raise Exception(f"Bad dim_order: {dim_order!r}.") def reverse_map_dim(dim_order, d): # Return the original PyTorch dimension position for a given dimension. # d should be the dimension that NNAPI will see. # reverse_map_dim(PRESUMED_CONTIGUOUS, x) == x # reverse_map_dim(CHANNELS_LAST, 3) == 1 if dim_order in (DimOrder.PRESUMED_CONTIGUOUS, DimOrder.SCALAR_OR_VECTOR): return d assert dim_order is DimOrder.CHANNELS_LAST return [0, 2, 3, 1][d] def flex_name(op_id, dim): # Return the local variable name for the computed flexible size # for a given op and dimension. return f"s_{op_id}_{dim}" class _NnapiSerializer(object): def __init__(self, config, use_int16_for_qint16=False): self.operands = [] self.values = [] self.operations = [] self.value_data = [] self.operation_args = [] self.inputs = [] self.outputs = [] self.flexible_shape_computation_lines = [] self.modules = {} self.constants = {} self.tensor_sequences = {} self.jitval_operand_map = {} self.cached_immediates = {} self.used_weights = [] self.weight_offset = 0 self.use_int16_for_qint16 = use_int16_for_qint16 if config is None: config = {} def get_next_operand_id(self): return len(self.operands) # Add a tensor operand corresponding to a JIT Value. # Returns the NNAPI operand ID. Can be looked up later with # get_tensor_operand_by_jitval. def add_tensor_operand(self, jitval, oper): assert isinstance(oper, Operand) if jitval in self.jitval_operand_map: raise Exception("Duplicate tensor: %r" % jitval) operand_id = self.get_next_operand_id() self.operands.append(oper) self.jitval_operand_map[jitval] = operand_id return operand_id # Add a tensor operand that does not correspond to a JIT Value. # Useful for cases where multiple NNAPI operands are required # to implement one JIT IR node. Returns the NNAPI operand ID. def add_anonymous_tensor_operand(self, oper): assert isinstance(oper, Operand) operand_id = self.get_next_operand_id() self.operands.append(oper) return operand_id def torch_tensor_to_operand(self, tensor, dim_order): dtype = str(tensor.dtype).replace("torch.", "") scale = 0.0 zero_point = 0 if dtype == "float32": op_type = NNAPI_OperandCode.TENSOR_FLOAT32 elif dtype == "int32": op_type = NNAPI_OperandCode.TENSOR_INT32 elif dtype == "quint8": op_type = NNAPI_OperandCode.TENSOR_QUANT8_ASYMM scale = tensor.q_scale() zero_point = tensor.q_zero_point() elif dtype == "qint32": op_type = NNAPI_OperandCode.TENSOR_INT32 scale = tensor.q_scale() zero_point = tensor.q_zero_point() assert zero_point == 0 elif dtype == "int16": if self.use_int16_for_qint16: nnapi_dtype = getattr(tensor, "nnapi_dtype", None) op_codes = (NNAPI_OperandCode.TENSOR_QUANT16_SYMM, NNAPI_OperandCode.TENSOR_QUANT16_ASYMM) if nnapi_dtype in op_codes: op_type = nnapi_dtype scale = tensor.nnapi_scale zero_point = tensor.nnapi_zero_point else: raise Exception(f"`nnapi_type` needs to be one of {op_codes} for `int16`") else: raise Exception( "`int16` isn't supported. If you're trying to represent NNAPI" " qint16 with Pytorch int16, set `use_int16_for_qint16 = True`") else: raise Exception(f"Can't handle input with dtype '{tensor.dtype}'") return Operand( shape=tuple(tensor.shape), op_type=op_type, dim_order=dim_order, scale=scale, zero_point=zero_point, ) def add_tensor_operand_for_input(self, arg_idx, jitval, tensor): dim_order = ( DimOrder.CHANNELS_LAST if getattr(tensor, "nnapi_nhwc", False) else DimOrder.PRESUMED_CONTIGUOUS) toper = self.torch_tensor_to_operand(tensor, dim_order) operand_id = self.add_tensor_operand(jitval, toper) self.inputs.append(operand_id) for dim, size in enumerate(tensor.shape): if size == 0: self.compute_operand_shape(operand_id, dim, f"args[{arg_idx}].shape[{dim}]") return operand_id def add_tensor_operand_for_weight(self, tensor, dim_order=DimOrder.UNKNOWN_CONSTANT): toper = self.torch_tensor_to_operand(tensor, dim_order) operand_id = len(self.operands) self.operands.append(toper) tsize = tensor_size(toper.op_type, toper.shape) psize = ((tsize - 1) | 0x3) + 1 self.values.append((operand_id, OperandValueSourceType.NUMBERED_BUFFER)) buf_num = len(self.used_weights) offset = 0 self.value_data.append(struct.pack( "iii", buf_num, offset, tsize)) # For NHWC NNAPI op, lay out data in the same dim order by permuting torch tensor if dim_order == DimOrder.CHANNELS_LAST: tensor = tensor.permute(0, 2, 3, 1) self.used_weights.append(tensor) return operand_id def add_immediate_operand(self, code, value, dims): assert isinstance(dims, tuple) cache_key = (code, value) if cache_key not in self.cached_immediates: operand_id = len(self.operands) self.operands.append(Operand(code, dims, DimOrder.SCALAR_OR_VECTOR, 0.0, 0)) self.values.append((operand_id, OperandValueSourceType.IMMEDIATE)) self.value_data.append(value) self.cached_immediates[cache_key] = operand_id return self.cached_immediates[cache_key] def add_immediate_int_scalar(self, value): return self.add_immediate_operand( NNAPI_OperandCode.INT32, struct.pack("i", value), ()) def add_immediate_float_scalar(self, value): return self.add_immediate_operand( NNAPI_OperandCode.FLOAT32, struct.pack("f", value), ()) def add_immediate_bool_scalar(self, value): return self.add_immediate_operand( NNAPI_OperandCode.BOOL, b"\x01" if value else b"\x00", ()) def add_immediate_int_vector(self, value): return self.add_immediate_operand( NNAPI_OperandCode.TENSOR_INT32, array.array("i", value).tobytes(), (len(value),)) def has_operand_for_jitval(self, jitval): return jitval in self.jitval_operand_map def get_tensor_operand_by_jitval(self, jitval): operand_id = self.jitval_operand_map[jitval] return (operand_id, self.operands[operand_id]) def get_tensor_operand_by_jitval_fixed_size(self, jitval): op_id, oper = self.get_tensor_operand_by_jitval(jitval) for s in oper.shape: if s == 0: # TODO: Improve this error message, possibly after converting # many callsites to support flexible size. raise Exception("Flexible size is not supported for this operand.") if s < 0: # runtime flex LOG.warn(f"Operand {oper} has runtime flex shape") return op_id, oper def get_tensor_operand_or_constant(self, jitval, dim_order=DimOrder.PRESUMED_CONTIGUOUS): operand_id = self.jitval_operand_map.get(jitval) if operand_id is None: _, value = self.get_constant_value(jitval, "TensorType") operand_id = self.add_tensor_operand_for_weight(value, dim_order) return (operand_id, self.operands[operand_id]) def get_tensor_operand_for_weight(self, jitval): _, value = self.get_constant_value(jitval, "TensorType") operand_id = self.add_tensor_operand_for_weight(value) return (operand_id, self.operands[operand_id]) def add_operation(self, opcode, inputs, outputs): self.operations.append((opcode, len(inputs), len(outputs))) self.operation_args.extend(inputs + outputs) def add_tensor_sequence(self, jitval, values): assert jitval not in self.tensor_sequences self.tensor_sequences[jitval] = values def add_constant_value(self, jitval, ctype, value): assert jitval not in self.constants self.constants[jitval] = (ctype, value) def get_constant_value(self, jitval, typekind=None): record = self.constants.get(jitval) if record is None: raise Exception(f"Could not find constant value for '{jitval!r}'.") ctype, _ = record if typekind is not None and ctype.kind() != typekind: raise Exception( f"Expected constant value of type {typekind}, but got {ctype.kind()} for value '{jitval!r}'") return record def operand_to_template_torchscript(self, op_id, oper, shape=None): """Return a TorchScript expression to build a template for a given operand.""" if shape is None: shape = oper.shape else: assert len(shape) == len(oper.shape) shape_parts = ["("] for d, s in enumerate(shape): if s > 0: # Fixed shape dimension: just add the value. shape_parts.append(str(s)) elif s == 0: # Load time flexible shape dimension: it should have been computed in a variable. shape_parts.append(flex_name(op_id, d)) elif s == -1: # Runtime flexible shape shape_parts.append('0') else: raise Exception("Unknown dim value, dimensions should be >= -1") shape_parts.append(",") shape_parts.append(")") shape_code = "".join(shape_parts) if oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32: return f"torch.zeros({shape_code}, dtype=torch.float32)" elif oper.op_type == NNAPI_OperandCode.TENSOR_INT32: return f"torch.zeros({shape_code}, dtype=torch.int32)" elif oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM: return ( f"torch.quantize_per_tensor(" f"torch.zeros(1), scale={oper.scale}, zero_point={oper.zero_point}, dtype=torch.quint8)" f".expand({shape_code}).contiguous()" ) elif oper.op_type in (NNAPI_OperandCode.TENSOR_QUANT16_ASYMM, NNAPI_OperandCode.TENSOR_QUANT16_SYMM): if self.use_int16_for_qint16: return f"torch.zeros({shape_code}, dtype=torch.int16)" else: raise Exception( "`int16` isn't supported. If you're trying to represent NNAPI" " qint16 with Pytorch int16, set `use_int16_for_qint16 = True`") raise Exception(f"Unsupported output operand type: {oper.op_type}") def forward_operand_shape(self, out_op_id, out_dim, in_op_id, in_dim): self.compute_operand_shape(out_op_id, out_dim, flex_name(in_op_id, in_dim)) def compute_operand_shape(self, op_id, dim, expr): self.flexible_shape_computation_lines.append(f"{flex_name(op_id, dim)} = {expr}") def transpose_to_nhwc(self, in_id, oper): if oper.shape[2:] != (1, 1): raise Exception("Automatic transpose only supported for H,W == 1,1") out_oper = oper._replace(dim_order=DimOrder.CHANNELS_LAST) inputs = [None] * 2 inputs[0] = in_id inputs[1] = self.add_immediate_int_vector([0, 2, 3, 1]) outputs = [None] * 1 outputs[0] = self.add_anonymous_tensor_operand(out_oper) self.add_operation(NNAPI_OperationCode.TRANSPOSE, inputs, outputs) return outputs[0], out_oper # Transpose inputs as necessary to allow broadcasting. def transpose_for_broadcast(self, in0_id, in0_oper, in1_id, in1_oper): if in0_oper.dim_order == in1_oper.dim_order: return in0_id, in0_oper, in1_id, in1_oper # Assume NHWC is preferred if there is a mismatch. orders = (in0_oper.dim_order, in1_oper.dim_order) if orders == (DimOrder.PRESUMED_CONTIGUOUS, DimOrder.CHANNELS_LAST): return self.transpose_to_nhwc(in0_id, in0_oper) + (in1_id, in1_oper) if orders == (DimOrder.CHANNELS_LAST, DimOrder.PRESUMED_CONTIGUOUS): return (in0_id, in0_oper) + self.transpose_to_nhwc(in1_id, in1_oper) raise Exception( "Automatic transpose not supported for dim_orders: %r, %r" % (in0_oper.dim_order, in1_oper.dim_order)) def get_size_arg(self, jitval): ctype, value = self.get_constant_value(jitval) if ctype.kind() == "ListType": assert ctype.getElementType().kind() == "IntType" return value raise Exception(f"Can't handle size arg of type '{ctype!r}' for '{jitval!r}'") def get_conv_pool_args_2d_from_pack(self, kernel_size, packed_config): pc = [i.item() for i in packed_config] assert pc[0] == 2 strides = [pc[1], pc[2]] paddings = [pc[3], pc[4]] dilations = [pc[5], pc[6]] output_padding = [pc[7], pc[8]] group_num = pc[9] assert len(pc) == 11 assert output_padding == [0, 0] return self.get_conv_pool_args_2d_common(kernel_size, strides, paddings, dilations, group_num) def get_conv_pool_args_2d_from_jit(self, kernel_size, stride, padding, dilation=None, group=None): strides = self.get_size_arg(stride) paddings = self.get_size_arg(padding) if dilation is None: dilations = [1, 1] else: dilations = self.get_size_arg(dilation) if group is not None: _, group_num = self.get_constant_value(group, "IntType") else: group_num = None return self.get_conv_pool_args_2d_common(kernel_size, strides, paddings, dilations, group_num) def get_conv_pool_args_2d_common(self, kernel_size, strides, paddings, dilations, group_num): kernels = list(kernel_size) assert len(kernels) == 2 assert len(strides) == 2 assert len(paddings) == 2 assert len(dilations) == 2 # NNAPI uses 4 values for padding. ph, pw = paddings real_paddings = [ph, ph, pw, pw] return ConvPoolArgs2d(*(kernels + strides + real_paddings + dilations + [group_num])) def serialize_model(self, model, inputs, return_shapes=None): self.add_immediate_bool_scalar(False) self.add_immediate_bool_scalar(True) inp_dim_orders = [] out_dim_orders = [] self_jitval = next(model.graph.inputs()) self.add_constant_value(self_jitval, self_jitval.type(), model) for arg_idx, (input_value, input_tensor) in enumerate(zip(list(model.graph.inputs())[1:], inputs)): op_id = self.add_tensor_operand_for_input(arg_idx, input_value, input_tensor) inp_dim_orders.append(self.operands[op_id].dim_order.value) for idx, node in enumerate(model.graph.nodes()): LOG.debug("Processing node #%d: %r", idx, node) self.add_node(node) retn = model.graph.return_node() assert retn.inputsSize() == 1 assert retn.outputsSize() == 0 retn_input = retn.inputsAt(0) template_return_lines = ["return ["] if retn_input.type().kind() == "TensorType": return_values = [retn_input] retval_count = -1 elif retn_input.type().kind() == "TupleType": return_values = self.tensor_sequences[retn_input] retval_count = len(return_values) else: raise Exception(f"Unsupported return type: {retn_input.type()}") if return_shapes is not None: assert len(return_shapes) == len(return_values) for i, v in enumerate(return_values): op_id = self.jitval_operand_map[v] self.outputs.append(op_id) out_dim_orders.append(self.operands[op_id].dim_order.value) shape = return_shapes[i] if return_shapes else None template_return_lines.append( self.operand_to_template_torchscript( op_id, self.operands[op_id], shape) + "," ) template_return_lines.append("]") model = [] version = 1 header = struct.pack( "iiiiii", version, len(self.operands), len(self.values), len(self.operations), len(self.inputs), len(self.outputs), ) model.append(header) serialized_values, serialized_value_data = self.serialize_values() model.extend(struct.pack("iifi", t, len(d), s, z) for (t, d, _m, s, z) in self.operands) model.extend(serialized_values) model.extend(struct.pack("iii", *x) for x in self.operations) # Compact the model so we can get its length so far. model = [b"".join(model)] model_offset = len(model[0]) # Model offset is the index into the model (in 32-bit words, not bytes) # of the next dimension we're about to serialize. If it's 0, # generate code to mutate it before passing to NNAPI. assert model_offset % 4 == 0 model_offset = int(model_offset / 4) for (op_id, (_, dims, dim_order, _, _)) in enumerate(self.operands): shape = fix_shape(dims, dim_order) for d, s in enumerate(shape): if s == 0: pt_d = reverse_map_dim(dim_order, d) self.flexible_shape_computation_lines.append( f"ser_model[{model_offset}] = {flex_name(op_id, pt_d)}") model_offset += 1 # convert runtime flex shape from -1 to 0 shape = tuple(d if d != -1 else 0 for d in shape) model.append(self.serialize_ints(shape)) model.extend(serialized_value_data) model.append(self.serialize_ints(self.operation_args)) model.append(self.serialize_ints(self.inputs)) model.append(self.serialize_ints(self.outputs)) self.flexible_shape_computation_lines.extend(template_return_lines) return ( array.array("i", b"".join(model)), self.used_weights, inp_dim_orders, out_dim_orders, self.flexible_shape_computation_lines, retval_count, ) def serialize_values(self): serialized_values = [] serialized_value_data = [] assert len(self.values) == len(self.value_data) for ((op_index, source_type), data) in zip(self.values, self.value_data): source_length = len(data) # Pad with 0 bytes out to a multiple of 4 for alignment. physical_length = ((source_length - 1) | 0x3) + 1 padded_data = data + (b"\0" * (physical_length - source_length)) serialized_values.append(struct.pack("iii", op_index, source_type, source_length)) serialized_value_data.append(padded_data) return serialized_values, serialized_value_data @staticmethod def serialize_ints(ints): return array.array("i", ints).tobytes() ADDER_MAP = { "prim::GetAttr": lambda self, node: self.add_getattr(node), "prim::Constant": lambda self, node: self.add_constant_node(node), "prim::ListConstruct": lambda self, node: self.add_list_construct(node), "prim::TupleConstruct": lambda self, node: self.add_tuple_construct(node), "aten::unsqueeze": lambda self, node: self.add_unsqueeze(node), "aten::to": lambda self, node: self.add_to(node), "aten::detach": lambda self, node: self._identity(node), "aten::reshape": lambda self, node: self.add_reshape(node), "aten::flatten": lambda self, node: self.add_flatten(node), "aten::slice": lambda self, node: self.add_slice(node), "aten::size": lambda self, node: self.add_size(node), "aten::cat": lambda self, node: self.add_cat(node), "aten::mean": lambda self, node: self.add_mean(node), "aten::quantize_per_tensor": lambda self, node: self.add_quantize(node), "aten::dequantize": lambda self, node: self.add_dequantize(node), "aten::add": lambda self, node: self.add_add_sub_op(node, NNAPI_OperationCode.ADD, NNAPI_FuseCode.FUSED_NONE), "aten::sub": lambda self, node: self.add_add_sub_op(node, NNAPI_OperationCode.SUB, NNAPI_FuseCode.FUSED_NONE), "aten::mul": lambda self, node: self.add_pointwise_simple_binary_broadcast_op(node, NNAPI_OperationCode.MUL, NNAPI_FuseCode.FUSED_NONE), "aten::div": lambda self, node: self.add_pointwise_simple_binary_broadcast_op(node, NNAPI_OperationCode.DIV, NNAPI_FuseCode.FUSED_NONE), "aten::relu": lambda self, node: self.add_pointwise_simple_unary_op(node, NNAPI_OperationCode.RELU), "aten::sigmoid": lambda self, node: self.add_pointwise_simple_unary_op(node, NNAPI_OperationCode.LOGISTIC), "aten::softmax": lambda self, node: self.add_softmax(node), "aten::hardtanh": lambda self, node: self.add_hardtanh(node), "aten::avg_pool2d": lambda self, node: self.add_avg_pool2d(node), "aten::max_pool2d": lambda self, node: self.add_pool2d_node(node, NNAPI_OperationCode.MAX_POOL_2D), "aten::adaptive_avg_pool2d": lambda self, node: self.add_adaptive_avg_pool2d(node), "aten::upsample_nearest2d": lambda self, node: self.add_upsample_nearest2d(node), "aten::prelu": lambda self, node: self.add_prelu_op(node), "aten::addmm": lambda self, node: self.add_addmm(node), "aten::linear": lambda self, node: self.add_linear(node), "aten::_convolution": lambda self, node: self.add_conv_underscore(node), "aten::conv2d": lambda self, node: self.add_conv2d(node), "aten::log_softmax": lambda self, node: self.add_log_softmax(node), "quantized::linear": lambda self, node: self.add_qlinear(node), "quantized::conv2d": lambda self, node: self.add_qconv2d(node, NNAPI_FuseCode.FUSED_NONE), "quantized::conv2d_relu": lambda self, node: self.add_qconv2d(node, NNAPI_FuseCode.FUSED_RELU), "quantized::conv_transpose2d": lambda self, node: self.add_qconv2d(node, NNAPI_FuseCode.FUSED_NONE, transpose=True), "quantized::add": lambda self, node: self.add_qadd(node, NNAPI_OperationCode.ADD, NNAPI_FuseCode.FUSED_NONE), "quantized::add_relu": lambda self, node: self.add_qadd(node, NNAPI_OperationCode.ADD, NNAPI_FuseCode.FUSED_RELU), "quantized::mul": lambda self, node: self.add_qadd(node, NNAPI_OperationCode.MUL, NNAPI_FuseCode.FUSED_NONE), } def add_node(self, node): adder = self.ADDER_MAP.get(node.kind()) if not adder: raise Exception("Unsupported node kind (%r) in node %r" % (node.kind(), node)) adder(self, node) def _identity(self, node): in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) jitval = node.outputsAt(0) self.jitval_operand_map[jitval] = in_id def add_getattr(self, node): assert node.inputsSize() == 1 assert node.outputsSize() == 1 obj_ctype, obj = self.get_constant_value(node.inputsAt(0)) assert str(obj_ctype).startswith("__torch__.") name = node.s("name") value = getattr(obj, name) output = node.outputsAt(0) ctype = output.type() self.add_constant_value(output, ctype, value) def add_constant_node(self, node): assert node.inputsSize() == 0 assert node.outputsSize() == 1 output = node.outputsAt(0) ctype = output.type() value = output.toIValue() self.add_constant_value(output, ctype, value) def add_list_construct(self, node): assert node.outputsSize() == 1 output = node.outputsAt(0) ctype = output.type() const_vals: Optional[List] = [] tensors: Optional[List] = [] for inp in node.inputs(): if const_vals is not None and inp in self.constants: _, val = self.get_constant_value(inp) const_vals.append(val) else: const_vals = None if tensors is not None and inp.type().kind() == "TensorType": tensors.append(inp) else: tensors = None if const_vals is not None: # NOTE: Now that TorchScript supports list constants, # this code path might not be used anymore. self.add_constant_value(output, ctype, const_vals) if tensors is not None: self.add_tensor_sequence(output, tensors) if const_vals is None and tensors is None: raise Exception( "Unable to handle ListConstruct node." " Neither all constants nor all tensors. %r" % node) def add_tuple_construct(self, node): assert node.outputsSize() == 1 output = node.outputsAt(0) values = [] for inp in node.inputs(): values.append(inp) self.add_tensor_sequence(output, values) def add_unsqueeze(self, node): assert node.inputsSize() == 2 assert node.outputsSize() == 1 in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) _, dim = self.get_constant_value(node.inputsAt(1), "IntType") assert in_oper.dim_order == DimOrder.PRESUMED_CONTIGUOUS real_dim = dim if dim >= 0 else dim + len(in_oper.shape) + 1 out_shape_list = list(in_oper.shape) out_shape_list.insert(real_dim, 1) out_shape = tuple(out_shape_list) out_oper = in_oper._replace(shape=out_shape) inputs = [None] * 2 inputs[0] = in_id inputs[1] = self.add_immediate_int_scalar(dim) outputs = [None] * 1 outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper) self.add_operation(NNAPI_OperationCode.EXPAND_DIMS, inputs, outputs) def add_to(self, node): # Handle to("cpu") / to("gpu") case self._identity(node) def add_reshape(self, node): assert node.inputsSize() == 2 assert node.outputsSize() == 1 in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) shape_ctype, shape = self.get_constant_value(node.inputsAt(1)) assert shape_ctype.kind() == "ListType" assert shape_ctype.getElementType().kind() == "IntType" is_trivial_reshape = len(shape) == 2 and shape[1] == -1 if in_oper.dim_order != DimOrder.PRESUMED_CONTIGUOUS and not is_trivial_reshape: raise Exception( "Currently, reshape is only supported on NHWC tensors if the target size is [X, -1].") # Bit of a hack here. Use a real tensor to infer the output shape. out_shape = torch.zeros(1).expand(in_oper.shape).reshape(shape).shape out_oper = in_oper._replace(shape=out_shape, dim_order=DimOrder.PRESUMED_CONTIGUOUS) inputs = [None] * 2 inputs[0] = in_id inputs[1] = self.add_immediate_int_vector(shape) outputs = [None] * 1 outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper) self.add_operation(NNAPI_OperationCode.RESHAPE, inputs, outputs) def add_flatten(self, node): assert node.inputsSize() == 3 assert node.outputsSize() == 1 in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) start_ctype, start_dim = self.get_constant_value(node.inputsAt(1), "IntType") end_ctype, end_dim = self.get_constant_value(node.inputsAt(2), "IntType") # channels last with channels == 1 or (height & width both 1) is_trivial_flatten = len(in_oper.shape) == 4 and ( in_oper.shape[1] == 1 or (in_oper.shape[2] == 1 and in_oper.shape[3] == 1)) if in_oper.dim_order != DimOrder.PRESUMED_CONTIGUOUS and not is_trivial_flatten: raise Exception( "Currently, flatten is not supported on NHWC tensors unless C=1 or H=W=1") if start_dim < 0: start_dim += len(in_oper.shape) if end_dim < 0: end_dim += len(in_oper.shape) out_shape = ( in_oper.shape[: start_dim] + (functools.reduce( lambda x, y: x * y, in_oper.shape[start_dim: end_dim + 1]),) + in_oper.shape[end_dim + 1:] ) if any(dim == 0 for dim in in_oper.shape[start_dim: end_dim + 1]): raise Exception("Flattening flexible dims is not supported yet") non_flattened_dims = in_oper.shape[: start_dim] + in_oper.shape[end_dim + 1:] if non_flattened_dims.count(0) > 1: raise Exception("Only 1 dim can be flexible") out_oper = in_oper._replace(shape=out_shape, dim_order=DimOrder.PRESUMED_CONTIGUOUS) out_id = self.add_tensor_operand(node.outputsAt(0), out_oper) for idx, dim in enumerate(out_shape): if dim == 0: self.forward_operand_shape(out_id, idx, in_id, in_oper.shape.index(0)) inputs_1 = tuple( dim if dim != 0 else -1 for dim in out_shape ) inputs = [None] * 2 inputs[0] = in_id inputs[1] = self.add_immediate_int_vector(inputs_1) outputs = [None] * 1 outputs[0] = out_id self.add_operation(NNAPI_OperationCode.RESHAPE, inputs, outputs) def add_slice(self, node): assert node.inputsSize() == 5 assert node.outputsSize() == 1 in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) _, dim_value = self.get_constant_value(node.inputsAt(1)) _, start_value = self.get_constant_value(node.inputsAt(2)) _, stop_value = self.get_constant_value(node.inputsAt(3)) _, step_value = self.get_constant_value(node.inputsAt(4)) if start_value is None: start_value = 0 if stop_value is None: stop_value = sys.maxsize if start_value < 0: start_value += in_oper.shape[dim_value] elif start_value == sys.maxsize: start_value = 0 if start_value == 0 and stop_value == sys.maxsize: self._identity(node) return if in_oper.shape[dim_value] == 0: raise Exception("Unable to slice with flexible shape") if stop_value < 0: stop_value += in_oper.shape[dim_value] elif stop_value == sys.maxsize: stop_value = in_oper.shape[dim_value] if start_value >= stop_value: raise Exception("Slice start value should be less than stop value") out_len = (stop_value - start_value) // step_value out_shape = tuple(out_len if i == dim_value else dim for i, dim in enumerate(in_oper.shape)) out_id = self.add_tensor_operand(node.outputsAt(0), in_oper._replace(shape=out_shape)) # flex inputs end_mask = 0 for idx, dim in enumerate(out_shape): if dim == 0: self.forward_operand_shape(out_id, idx, in_id, idx) end_mask |= (1 << idx) inputs = [None] * 7 inputs[0] = in_id inputs[1] = self.add_immediate_int_vector( [start_value if i == dim_value else 0 for i in range(len(in_oper.shape))]) inputs[2] = self.add_immediate_int_vector( [stop_value if i == dim_value else dim for i, dim in enumerate(in_oper.shape)]) inputs[3] = self.add_immediate_int_vector( [step_value if i == dim_value else 1 for i in range(len(in_oper.shape))]) inputs[4] = self.add_immediate_int_scalar(0) # begin mask inputs[5] = self.add_immediate_int_scalar(end_mask) inputs[6] = self.add_immediate_int_scalar(0) # shrink axis mas outputs = [None] * 1 outputs[0] = out_id self.add_operation(NNAPI_OperationCode.STRIDED_SLICE, inputs, outputs) def add_size(self, node): assert node.inputsSize() == 2 assert node.outputsSize() == 1 _, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) _, value = self.constants[node.inputsAt(1)] res = in_oper.shape[value] output = node.outputsAt(0) self.add_constant_value(output, output.type(), res) def add_cat(self, node): assert node.inputsSize() == 2 assert node.outputsSize() == 1 tensors = self.tensor_sequences[node.inputsAt(0)] _, dim = self.get_constant_value(node.inputsAt(1), "IntType") assert len(tensors) > 0 in_ids = [] out_oper = None out_dim_size = 0 for inp in tensors: in_id, in_oper = self.get_tensor_operand_by_jitval(inp) if out_oper is None: out_shape = change_element(in_oper.shape, dim, -1) out_oper = in_oper._replace(shape=out_shape) assert in_oper.op_type == out_oper.op_type assert in_oper.dim_order == out_oper.dim_order assert change_element(in_oper.shape, dim, -1) == change_element(out_oper.shape, dim, -1) # TODO: Possibly check scale and zero point. in_ids.append(in_id) # TODO: Possibly support variable-sized inputs. out_dim_size += in_oper.shape[dim] assert out_oper is not None out_oper = out_oper._replace(shape=change_element(out_oper.shape, dim, out_dim_size)) if in_oper.dim_order == DimOrder.CHANNELS_LAST: assert len(out_oper.shape) == 4 nnapi_dim = [0, 3, 1, 2][dim] else: nnapi_dim = dim out_id = self.add_tensor_operand(node.outputsAt(0), out_oper) for idx, d in enumerate(out_oper.shape): if d == 0: if idx == dim: shape = " + ".join(flex_name(ip_id, dim) for ip_id in in_ids) self.compute_operand_shape(out_id, idx, shape) else: self.forward_operand_shape(out_id, idx, in_ids[0], idx) inputs = in_ids + [self.add_immediate_int_scalar(nnapi_dim)] outputs = [None] * 1 outputs[0] = out_id self.add_operation(NNAPI_OperationCode.CONCATENATION, inputs, outputs) def add_mean(self, node): assert node.inputsSize() == 4 assert node.outputsSize() == 1 in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) dim_ctype, dim = self.get_constant_value(node.inputsAt(1)) assert dim_ctype.kind() == "ListType" assert dim_ctype.getElementType().kind() == "IntType" _, keep_dim = self.get_constant_value(node.inputsAt(2), "BoolType") # Expect None for dtype self.get_constant_value(node.inputsAt(3), "NoneType") if in_oper.dim_order == DimOrder.CHANNELS_LAST: assert len(in_oper.shape) == 4 nnapi_dim = [[0, 3, 1, 2][d] for d in dim] else: nnapi_dim = dim collapsed_dims = set() for d in dim: if d < 0: d += len(in_oper.shape) collapsed_dims.add(d) if in_oper.dim_order == DimOrder.CHANNELS_LAST and not keep_dim: assert collapsed_dims.issuperset({2, 3}) out_dim_order = DimOrder.PRESUMED_CONTIGUOUS else: out_dim_order = in_oper.dim_order out_shape = [] for i, s in enumerate(in_oper.shape): if i not in collapsed_dims: out_shape.append(s) elif keep_dim: out_shape.append(1) out_oper = in_oper._replace(shape=out_shape, dim_order=out_dim_order) inputs = [None] * 3 inputs[0] = in_id inputs[1] = self.add_immediate_int_vector(nnapi_dim) inputs[2] = self.add_immediate_int_scalar(keep_dim) outputs = [None] * 1 outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper) self.add_operation(NNAPI_OperationCode.MEAN, inputs, outputs) def add_quantize(self, node): assert node.inputsSize() == 4 assert node.outputsSize() == 1 in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) if in_oper.dim_order != DimOrder.CHANNELS_LAST: raise Exception( "Most hardware backends prefer NHWC quantized tensors. " "Try setting `t.nnapi_nhwc = True` on your tensor inputs. ") _, scale = self.get_constant_value(node.inputsAt(1), "FloatType") _, zero_point = self.get_constant_value(node.inputsAt(2), "IntType") _, scalar_type = self.get_constant_value(node.inputsAt(3), "IntType") if scalar_type != TorchScalarTypes.QUINT8.value: raise Exception( "PyTorch NNAPI export only supports quantized tensors " "with the quint8 dtype.") op_type = NNAPI_OperandCode.TENSOR_QUANT8_ASYMM out_oper = in_oper._replace( op_type=op_type, scale=scale, zero_point=zero_point, ) inputs = [None] * 1 inputs[0] = in_id outputs = [None] * 1 outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper) self.add_operation(NNAPI_OperationCode.QUANTIZE, inputs, outputs) def add_dequantize(self, node): assert node.inputsSize() == 1 assert node.outputsSize() == 1 in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) out_oper = in_oper._replace( op_type=NNAPI_OperandCode.TENSOR_FLOAT32, scale=0.0, zero_point=0, ) inputs = [None] * 1 inputs[0] = in_id outputs = [None] * 1 outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper) self.add_operation(NNAPI_OperationCode.DEQUANTIZE, inputs, outputs) def add_pointwise_simple_unary_op(self, node, opcode): assert node.inputsSize() == 1 assert node.outputsSize() == 1 in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) out_oper = in_oper if opcode == NNAPI_OperationCode.LOGISTIC: # NNAPI docs: For ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, the scale # must be 1.f / 256 and the zeroPoint must be 0. # https://fburl.com/h52stoog if in_oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM: out_oper = in_oper._replace(zero_point=0, scale=1.0 / 256) out_id = self.add_tensor_operand(node.outputsAt(0), out_oper) for idx, dim in enumerate(in_oper.shape): if dim == 0: self.forward_operand_shape(out_id, idx, in_id, idx) inputs = [None] * 1 inputs[0] = in_id outputs = [None] * 1 outputs[0] = out_id self.add_operation(opcode, inputs, outputs) def _do_add_binary(self, node, opcode, fuse_code, *, qparams=None): """Helper for pointwise binary broadcast ops with superfluous extra args""" assert node.outputsSize() == 1 assert node.inputsAt(0).type().kind() == "TensorType" assert node.inputsAt(1).type().kind() == "TensorType" if self.has_operand_for_jitval(node.inputsAt(0)): in0_id, in0_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) in1_id, in1_oper = self.get_tensor_operand_or_constant(node.inputsAt(1), in0_oper.dim_order) elif self.has_operand_for_jitval(node.inputsAt(1)): in1_id, in1_oper = self.get_tensor_operand_by_jitval(node.inputsAt(1)) in0_id, in0_oper = self.get_tensor_operand_or_constant(node.inputsAt(0), in1_oper.dim_order) else: raise Exception(f"Can't do a NNAPI binary op: {opcode} on two constants") assert in0_oper.op_type == in1_oper.op_type in0_id, in0_oper, in1_id, in1_oper = self.transpose_for_broadcast( in0_id, in0_oper, in1_id, in1_oper) # NOTE: PyTorch and NNAPI have the same broadcast semantics. out_shape = broadcast_shapes(in0_oper.shape, in1_oper.shape) out_oper = in0_oper._replace(shape=out_shape) if qparams is not None: scale, zp = qparams out_oper = out_oper._replace(scale=scale, zero_point=zp) out_id = self.add_tensor_operand(node.outputsAt(0), out_oper) for idx, (d0, d1) in enumerate(zip(in0_oper.shape, in1_oper.shape)): if d0 == 1 and d1 == 0: self.forward_operand_shape(out_id, idx, in1_id, idx) elif d0 == 0 and d1 == 1: self.forward_operand_shape(out_id, idx, in0_id, idx) elif d0 == 0 and d1 == 0: self.flexible_shape_computation_lines.append( f"assert {flex_name(in0_id, idx)} == {flex_name(in1_id, idx)}" ) self.forward_operand_shape(out_id, idx, in0_id, idx) inputs = [None] * 3 inputs[0] = in0_id inputs[1] = in1_id inputs[2] = self.add_immediate_int_scalar(fuse_code) outputs = [None] * 1 outputs[0] = out_id self.add_operation(opcode, inputs, outputs) def add_pointwise_simple_binary_broadcast_op(self, node, opcode, fuse_code): assert node.inputsSize() == 2 self._do_add_binary(node, opcode, fuse_code) def add_add_sub_op(self, node, opcode, fuse_code): assert node.inputsSize() == 3 _, alpha = self.get_constant_value(node.inputsAt(2), "IntType") if alpha != 1: raise Exception("NNAPI does not support add/sub with alpha.") self._do_add_binary(node, opcode, fuse_code) def add_qadd(self, node, opcode, fuse_code): assert node.inputsSize() == 4 _, scale = self.get_constant_value(node.inputsAt(2), "FloatType") _, zero_point = self.get_constant_value(node.inputsAt(3), "IntType") self._do_add_binary(node, opcode, fuse_code, qparams=(scale, zero_point)) def add_softmax(self, node): assert node.inputsSize() == 3 in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) _, softmax_dim = self.get_constant_value(node.inputsAt(1), "IntType") out_id = self.add_tensor_operand(node.outputsAt(0), in_oper) for dim, size in enumerate(in_oper.shape): if size == 0: self.forward_operand_shape(out_id, dim, in_id, dim) inputs = [None] * 3 inputs[0] = in_id inputs[1] = self.add_immediate_float_scalar(1.0) # positive scaling factor of exponent, beta inputs[2] = self.add_immediate_int_scalar(softmax_dim) outputs = [None] * 1 outputs[0] = out_id self.add_operation(NNAPI_OperationCode.SOFTMAX, inputs, outputs) def add_hardtanh(self, node): assert node.inputsSize() == 3 assert node.outputsSize() == 1 in_id, in_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) _, min_val = self.get_constant_value(node.inputsAt(1), "FloatType") _, max_val = self.get_constant_value(node.inputsAt(2), "FloatType") op_map = { (-1, 1): NNAPI_OperationCode.RELU1, ( 0, 6): NNAPI_OperationCode.RELU6, # noqa: E201 } opcode = op_map.get((min_val, max_val)) if opcode is None: raise Exception("NNAPI only supports hardtanh with args (-1, 1) or (0, 6).") inputs = [None] * 1 inputs[0] = in_id outputs = [None] * 1 outputs[0] = self.add_tensor_operand(node.outputsAt(0), in_oper) self.add_operation(opcode, inputs, outputs) def add_prelu_op(self, node): assert node.inputsSize() == 2 assert node.outputsSize() == 1 assert node.inputsAt(0).type().kind() == "TensorType" assert node.inputsAt(1).type().kind() == "TensorType" in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0)) w_id, w_oper = self.get_tensor_operand_for_weight(node.inputsAt(1)) assert len(w_oper.shape) == 1 assert w_oper.shape[0] > 0 if w_oper.shape[0] > 1: if in_oper.use_nchw(): # TODO: Support this by adding trailing 1 dims. raise Exception("Per-channel PReLU only supports channels_last right now.") out_id = self.add_tensor_operand(node.outputsAt(0), in_oper) for dim, size in enumerate(in_oper.shape): if size > 0: pass elif dim <= 1: raise Exception("PReLU requires fixed size for dim 0 and dim 1.") else: self.forward_operand_shape(out_id, dim, in_id, dim) inputs = [None] * 2 inputs[0] = in_id inputs[1] = w_id outputs = [None] * 1 outputs[0] = out_id self.add_operation(NNAPI_OperationCode.PRELU, inputs, outputs) def add_pool2d_node(self, node, opcode): assert node.inputsSize() == 6 assert node.outputsSize() == 1 image, kernel, stride, padding, dilation, ceil_mode = node.inputs() stride = stride or kernel # TODO: Validate ceil_mode semantics. args = self.get_conv_pool_args_2d_from_jit(self.get_size_arg(kernel), stride, padding, dilation) if args.dilation_h != 1 or args.dilation_w != 1: raise Exception("NNAPI does not support dilated pooling.") image_id, image_oper = self.get_tensor_operand_by_jitval_fixed_size(image) assert len(image_oper.shape) == 4 out_shape = get_conv_pool_shape(image_oper.shape, args, image_oper.shape[1], False) use_nchw = image_oper.use_nchw() inputs = [None] * 11 inputs[0] = image_id inputs[1] = self.add_immediate_int_scalar(args.pad_l) inputs[2] = self.add_immediate_int_scalar(args.pad_r) inputs[3] = self.add_immediate_int_scalar(args.pad_t) inputs[4] = self.add_immediate_int_scalar(args.pad_b) inputs[5] = self.add_immediate_int_scalar(args.stride_w) inputs[6] = self.add_immediate_int_scalar(args.stride_h) inputs[7] = self.add_immediate_int_scalar(args.kernel_w) inputs[8] = self.add_immediate_int_scalar(args.kernel_h) inputs[9] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE) inputs[10] = self.add_immediate_bool_scalar(use_nchw) outputs = [None] * 1 outputs[0] = self.add_tensor_operand(node.outputsAt(0), image_oper._replace(shape=out_shape)) self.add_operation(opcode, inputs, outputs) def add_avg_pool2d(self, node): assert node.inputsSize() == 7 assert node.outputsSize() == 1 image, kernel, stride, padding, ceil_mode, count_include_pad, divisor_override = node.inputs() _, count_include_pad_value = self.get_constant_value(count_include_pad) _, divisor_override_value = self.get_constant_value(divisor_override) if not count_include_pad_value or divisor_override_value: raise Exception("NNAPI doesn't support count_include_pad=False or divisor_override") args = self.get_conv_pool_args_2d_from_jit(self.get_size_arg(kernel), stride, padding) image_id, image_oper = self.get_tensor_operand_by_jitval(image) assert len(image_oper.shape) == 4 out_shape = get_conv_pool_shape(image_oper.shape, args, image_oper.shape[1], False) use_nchw = image_oper.use_nchw() inputs = [None] * 11 inputs[0] = image_id inputs[1] = self.add_immediate_int_scalar(args.pad_l) inputs[2] = self.add_immediate_int_scalar(args.pad_r) inputs[3] = self.add_immediate_int_scalar(args.pad_t) inputs[4] = self.add_immediate_int_scalar(args.pad_b) inputs[5] = self.add_immediate_int_scalar(args.stride_w) inputs[6] = self.add_immediate_int_scalar(args.stride_h) inputs[7] = self.add_immediate_int_scalar(args.kernel_w) inputs[8] = self.add_immediate_int_scalar(args.kernel_h) inputs[9] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE) inputs[10] = self.add_immediate_bool_scalar(use_nchw) outputs = [None] * 1 out_id = self.add_tensor_operand(node.outputsAt(0), image_oper._replace(shape=out_shape)) self._handle_conv_pool_flexible_input(out_id, image, args, False) outputs[0] = out_id self.add_operation(NNAPI_OperationCode.AVERAGE_POOL_2D, inputs, outputs) def add_adaptive_avg_pool2d(self, node): assert node.inputsSize() == 2 assert node.outputsSize() == 1 image_id, image_oper = self.get_tensor_operand_by_jitval_fixed_size(node.inputsAt(0)) assert len(image_oper.shape) == 4 size_ctype, size_arg = self.get_constant_value(node.inputsAt(1)) assert size_ctype.kind() == "ListType" assert size_ctype.getElementType().kind() == "IntType" if size_arg != [1, 1]: raise Exception("NNAPI only supports adaptive_avg_pool2d with output size (1, 1).") out_shape = image_oper.shape[0:2] + tuple(size_arg) use_nchw = image_oper.use_nchw() inputs = [None] * 11 inputs[0] = image_id inputs[1] = self.add_immediate_int_scalar(0) inputs[2] = self.add_immediate_int_scalar(0) inputs[3] = self.add_immediate_int_scalar(0) inputs[4] = self.add_immediate_int_scalar(0) inputs[5] = self.add_immediate_int_scalar(1) inputs[6] = self.add_immediate_int_scalar(1) inputs[7] = self.add_immediate_int_scalar(image_oper.shape[3]) inputs[8] = self.add_immediate_int_scalar(image_oper.shape[2]) inputs[9] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE) inputs[10] = self.add_immediate_bool_scalar(use_nchw) outputs = [None] * 1 outputs[0] = self.add_tensor_operand(node.outputsAt(0), image_oper._replace(shape=out_shape)) self.add_operation(NNAPI_OperationCode.AVERAGE_POOL_2D, inputs, outputs) def add_upsample_nearest2d(self, node): assert node.inputsSize() == 3 or node.inputsSize() == 4 assert node.outputsSize() == 1 if node.inputsSize() == 3: image, size_jit, scale_jit = node.inputs() else: image, size_jit, scale_h_jit, scale_w_jit = node.inputs() size_ctype, size_arg = self.get_constant_value(size_jit) if node.inputsSize() == 3: scale_ctype, scale_arg = self.get_constant_value(scale_jit) else: scale_h_ctype, scale_h_arg = self.get_constant_value(scale_h_jit) scale_w_ctype, scale_w_arg = self.get_constant_value(scale_w_jit) # The only way for the 4-argument overload of upsample_nearest2d to # have been added to the graph without error is if the scale_h and # scale_w arguments are None assert scale_h_ctype.kind() == "NoneType" assert scale_w_ctype.kind() == "NoneType" scale_ctype = scale_h_ctype scale_arg = scale_h_arg image_id, image_oper = self.get_tensor_operand_by_jitval(image) assert len(image_oper.shape) == 4 if size_ctype.kind() != "NoneType" and scale_ctype.kind() != "NoneType": raise Exception("Size and scale cannot both be non-None.") elif size_ctype.kind() != "NoneType": assert size_ctype.kind() == "ListType" assert size_ctype.getElementType().kind() == "IntType" assert scale_ctype.kind() == "NoneType" assert scale_arg is None assert isinstance(size_arg, list) assert size_arg assert all(isinstance(val, int) for val in size_arg) if len(size_arg) == 1: size_arg = size_arg * 2 assert len(size_arg) == 2 out_h = size_arg[0] out_w = size_arg[1] arg_h = self.add_immediate_int_scalar(out_h) arg_w = self.add_immediate_int_scalar(out_w) elif scale_ctype.kind() != "NoneType": assert scale_ctype.kind() == "ListType" assert scale_ctype.getElementType().kind() == "FloatType" assert size_ctype.kind() == "NoneType" assert size_arg is None assert isinstance(scale_arg, list) assert scale_arg assert all(isinstance(val, float) for val in scale_arg) if len(scale_arg) == 1: scale_arg = scale_arg * 2 assert len(scale_arg) == 2 out_h = int(scale_arg[0] * image_oper.shape[2]) out_w = int(scale_arg[1] * image_oper.shape[3]) arg_h = self.add_immediate_float_scalar(scale_arg[0]) arg_w = self.add_immediate_float_scalar(scale_arg[1]) else: raise Exception("Size and scale cannot both be None.") out_shape = (image_oper.shape[0], image_oper.shape[1], out_h, out_w) use_nchw = image_oper.use_nchw() out_id = self.add_tensor_operand(node.outputsAt(0), image_oper._replace(shape=out_shape)) if image_oper.shape[0] == 0 or image_oper.shape[1] == 0: raise Exception("Flexible batch or channels not supported") # Handle variable input size for dim in (2, 3): # h, w indices if image_oper.shape[dim] == 0: if size_ctype.kind() != "NoneType": self.compute_operand_shape(out_id, dim, size_arg[dim - 2]) elif scale_ctype.kind() != "NoneType": self.compute_operand_shape(out_id, dim, f"int({scale_arg[dim - 2]} * {flex_name(image_id, dim)})") else: raise Exception("Size and scale cannot both be None.") inputs = [None] * 4 inputs[0] = image_id inputs[1] = arg_w inputs[2] = arg_h inputs[3] = self.add_immediate_bool_scalar(use_nchw) outputs = [None] * 1 outputs[0] = out_id self.add_operation(NNAPI_OperationCode.RESIZE_NEAREST_NEIGHBOR, inputs, outputs) def add_addmm(self, node): assert node.inputsSize() == 5 assert node.outputsSize() == 1 jit_bias, jit_input, jit_weight, jit_beta, jit_alpha = node.inputs() for jitval in (jit_beta, jit_alpha): scale_ctype, scale_value = self.get_constant_value(jitval) assert scale_ctype.kind() in ("IntType", "FloatType") if scale_value != 1: raise Exception("NNAPI Fully-Connected does not support alpha and beta.") self.add_addmm_or_linear(node, True, jit_input, jit_weight, jit_bias) def add_linear(self, node): assert node.inputsSize() == 3 assert node.outputsSize() == 1 jit_input, jit_weight, jit_bias = node.inputs() self.add_addmm_or_linear(node, False, jit_input, jit_weight, jit_bias) def add_addmm_or_linear(self, node, transpose_weight, jit_input, jit_weight, jit_bias): input_id, input_oper = self.get_tensor_operand_by_jitval(jit_input) bias_id, bias_oper = self.get_tensor_operand_for_weight(jit_bias) assert len(input_oper.shape) == 2 assert len(bias_oper.shape) == 1 # TODO: Transform at load time to share weights with CPU model. _, weight_tensor = self.get_constant_value(jit_weight, "TensorType") assert len(weight_tensor.shape) == 2 if transpose_weight: nnapi_weight_tensor = weight_tensor.t().contiguous() else: nnapi_weight_tensor = weight_tensor.contiguous() weight_id = self.add_tensor_operand_for_weight(nnapi_weight_tensor) weight_oper = self.operands[weight_id] out_shape = (input_oper.shape[0], weight_oper.shape[0]) out_id = self.add_tensor_operand(node.outputsAt(0), input_oper._replace(shape=out_shape)) if input_oper.shape[0] == 0: self.forward_operand_shape(out_id, 0, input_id, 0) inputs = [None] * 4 inputs[0] = input_id inputs[1] = weight_id inputs[2] = bias_id inputs[3] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE) outputs = [None] * 1 outputs[0] = out_id self.add_operation(NNAPI_OperationCode.FULLY_CONNECTED, inputs, outputs) def add_qlinear(self, node): assert node.inputsSize() == 4 assert node.outputsSize() == 1 ( jit_input, jit_packed_weight, jit_scale, jit_zero_point, ) = node.inputs() input_id, input_oper = self.get_tensor_operand_by_jitval_fixed_size(jit_input) # TODO: Support automatic reshape assert len(input_oper.shape) == 2 _, out_scale = self.get_constant_value(jit_scale, "FloatType") _, out_zero_point = self.get_constant_value(jit_zero_point, "IntType") weight_ctype, packed_weight = self.get_constant_value(jit_packed_weight) assert weight_ctype.name() == "LinearPackedParamsBase" raw_weight, raw_bias = packed_weight.__getstate__()[0] assert raw_bias is not None assert len(raw_weight.shape) == 2 assert len(raw_bias.shape) == 1 assert raw_bias.shape[0] == raw_weight.shape[0] assert raw_weight.shape[1] == input_oper.shape[1] assert raw_weight.qscheme() == torch.per_tensor_affine if raw_weight.dtype == torch.quint8: unsigned_weight = raw_weight else: assert raw_weight.dtype == torch.qint8 unsigned_weight = torch._make_per_tensor_quantized_tensor( (raw_weight.int_repr().int() + 128).to(torch.uint8), scale=raw_weight.q_scale(), zero_point=raw_weight.q_zero_point() + 128) weight_scale = unsigned_weight.q_scale() bias_scale = input_oper.scale * weight_scale int_bias = torch.quantize_per_tensor(raw_bias, bias_scale, 0, torch.qint32) bias_id = self.add_tensor_operand_for_weight(int_bias) multiplier = input_oper.scale * weight_scale / out_scale assert multiplier > 0 if multiplier >= 1: raise Exception( "Quantized convolution multiplier is greater than 1. " "This is supported by NNAPI, but not by most hardware backends. " "Try training a model without quantization-aware training. ") # TODO: Transform at load time to share weights with CPU model. nnapi_weight_tensor = unsigned_weight.contiguous() weight_id = self.add_tensor_operand_for_weight(nnapi_weight_tensor) weight_oper = self.operands[weight_id] out_shape = (input_oper.shape[0], weight_oper.shape[0]) out_oper = input_oper._replace( shape=out_shape, scale=out_scale, zero_point=out_zero_point, ) inputs = [None] * 4 inputs[0] = input_id inputs[1] = weight_id inputs[2] = bias_id inputs[3] = self.add_immediate_int_scalar(NNAPI_FuseCode.FUSED_NONE) outputs = [None] * 1 outputs[0] = self.add_tensor_operand(node.outputsAt(0), out_oper) self.add_operation(NNAPI_OperationCode.FULLY_CONNECTED, inputs, outputs) def get_optional_bias(self, jit_bias, weight_tensor, transpose=False): ctype, value = self.get_constant_value(jit_bias) if ctype.kind() == "NoneType": bias_idx = 1 if transpose else 0 nnapi_bias_tensor = torch.zeros(weight_tensor.size()[bias_idx], dtype=weight_tensor.dtype) bias_id = self.add_tensor_operand_for_weight(nnapi_bias_tensor) bias_oper = self.operands[bias_id] return bias_id, bias_oper else: return self.get_tensor_operand_for_weight(jit_bias) def add_conv2d(self, node): assert node.inputsSize() == 7 assert node.outputsSize() == 1 ( jit_image, jit_weight, jit_bias, jit_stride, jit_pad, jit_dilation, jit_groups, ) = node.inputs() _, weight_tensor = self.get_constant_value(jit_weight, "TensorType") bias_id, bias_oper = self.get_optional_bias(jit_bias, weight_tensor) args = self.get_conv_pool_args_2d_from_jit( weight_tensor.shape[2:4], jit_stride, jit_pad, jit_dilation, jit_groups) return self.add_conv2d_common( node.outputsAt(0), 0.0, 0, jit_image, weight_tensor, bias_id, args, False, # transpose NNAPI_FuseCode.FUSED_NONE, ) def add_conv_underscore(self, node): assert node.inputsSize() == 13 assert node.outputsSize() == 1 ( jit_image, jit_weight, jit_bias, jit_stride, jit_pad, jit_dilation, jit_transpose, _, jit_groups, _, _, _, _, ) = node.inputs() _, weight_tensor = self.get_constant_value(jit_weight, "TensorType") _, transpose = self.get_constant_value(jit_transpose) bias_id, bias_oper = self.get_optional_bias(jit_bias, weight_tensor, transpose) args = self.get_conv_pool_args_2d_from_jit( weight_tensor.shape[2:4], jit_stride, jit_pad, jit_dilation, jit_groups) return self.add_conv2d_common( node.outputsAt(0), 0.0, 0, jit_image, weight_tensor, bias_id, args, transpose, NNAPI_FuseCode.FUSED_NONE, ) def add_log_softmax(self, node): assert node.inputsSize() == 3 assert node.outputsSize() == 1 ( jit_input, jit_dim, jit_half_to_float ) = node.inputs() input_id, input_oper = self.get_tensor_operand_by_jitval_fixed_size(jit_input) _, dim = self.get_constant_value(jit_dim, "IntType") out_shape = input_oper.shape inputs = [None] * 3 inputs[0] = input_id # specifying 1 as the scaling factor for the exponent, beta inputs[1] = self.add_immediate_float_scalar(1) inputs[2] = self.add_immediate_int_scalar(dim) outputs = [None] * 1 outputs[0] = self.add_tensor_operand(node.outputsAt(0), input_oper._replace(shape=out_shape)) self.add_operation(NNAPI_OperationCode.LOG_SOFTMAX, inputs, outputs) def add_qconv2d(self, node, fuse_code, transpose=False): assert node.inputsSize() == 4 assert node.outputsSize() == 1 ( jit_image, jit_packed_weight, jit_scale, jit_zero_point, ) = node.inputs() _, out_scale = self.get_constant_value(jit_scale, "FloatType") _, out_zero_point = self.get_constant_value(jit_zero_point, "IntType") weight_ctype, packed_weight = self.get_constant_value(jit_packed_weight) assert weight_ctype.name() == "Conv2dPackedParamsBase" ( pack_version, tensors, opt_tensors, ) = packed_weight.__getstate__()[0] assert pack_version == "2" packed_config, raw_weight = tensors raw_bias, = opt_tensors assert raw_bias is not None args = self.get_conv_pool_args_2d_from_pack(raw_weight.shape[2:4], packed_config) assert raw_weight.qscheme() == torch.per_tensor_affine if raw_weight.dtype == torch.quint8: unsigned_weight = raw_weight else: assert raw_weight.dtype == torch.qint8 unsigned_weight = torch._make_per_tensor_quantized_tensor( (raw_weight.int_repr().int() + 128).to(torch.uint8), scale=raw_weight.q_scale(), zero_point=raw_weight.q_zero_point() + 128) weight_scale = unsigned_weight.q_scale() _, image_oper = self.get_tensor_operand_by_jitval(jit_image) bias_scale = image_oper.scale * weight_scale int_bias = torch.quantize_per_tensor(raw_bias, bias_scale, 0, torch.qint32) bias_id = self.add_tensor_operand_for_weight(int_bias) multiplier = image_oper.scale * weight_scale / out_scale assert multiplier > 0 if multiplier >= 1: raise Exception( "Quantized convolution multiplier is greater than 1. " "This is supported by NNAPI, but not by most hardware backends. " "Try training a model without quantization-aware training. ") return self.add_conv2d_common( node.outputsAt(0), out_scale, out_zero_point, jit_image, unsigned_weight, bias_id, args, transpose, fuse_code, ) def add_conv2d_common( self, jit_out, out_scale, out_zero_point, jit_image, weight_tensor, bias_id, args, transpose, fuse_code): image_id, image_oper = self.get_tensor_operand_by_jitval(jit_image) in_c = image_oper.shape[1] if args.group == 1: # Full convolution depthwise = False if transpose: weight_permutation = (1, 2, 3, 0) else: weight_permutation = (0, 2, 3, 1) elif args.group == in_c: # Depthwise convolution depthwise = True weight_permutation = (1, 2, 3, 0) else: raise Exception("Group convolution not supported yet.") # TODO: Transform at load time to share weights with CPU model. nnapi_weight_tensor = weight_tensor.permute(*weight_permutation).contiguous() weight_id = self.add_tensor_operand_for_weight(nnapi_weight_tensor) weight_oper = self.operands[weight_id] bias_oper = self.operands[bias_id] if image_oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32: assert weight_oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32 assert bias_oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32 elif image_oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM: assert weight_oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM assert bias_oper.op_type == NNAPI_OperandCode.TENSOR_INT32 assert approx_equal(image_oper.scale * weight_oper.scale, bias_oper.scale) assert bias_oper.zero_point == 0 else: raise Exception( "Unsupported input type for conv2d: {}" .format(image_oper.op_type)) assert len(image_oper.shape) == 4 assert len(weight_oper.shape) == 4 assert len(bias_oper.shape) == 1 if depthwise: # Depthwise convolution one, kern_h, kern_w, out_c = weight_oper.shape assert one == 1 assert out_c % in_c == 0 channel_multiplier = out_c // in_c assert channel_multiplier == 1 # Don't support multiplier assert out_c == in_c else: # Full convolution out_c, kern_h, kern_w, kern_d = weight_oper.shape assert kern_d == in_c assert out_c == bias_oper.shape[0] use_nchw = image_oper.use_nchw() if depthwise: num_args = 12 opcode = NNAPI_OperationCode.DEPTHWISE_CONV_2D else: num_args = 11 if transpose: opcode = NNAPI_OperationCode.TRANSPOSE_CONV_2D else: opcode = NNAPI_OperationCode.CONV_2D inputs = [None] * num_args inputs[0] = image_id inputs[1] = weight_id inputs[2] = bias_id inputs[3] = self.add_immediate_int_scalar(args.pad_l) inputs[4] = self.add_immediate_int_scalar(args.pad_r) inputs[5] = self.add_immediate_int_scalar(args.pad_t) inputs[6] = self.add_immediate_int_scalar(args.pad_b) inputs[7] = self.add_immediate_int_scalar(args.stride_w) inputs[8] = self.add_immediate_int_scalar(args.stride_h) if depthwise: inputs[9] = self.add_immediate_int_scalar(1) inputs[10] = self.add_immediate_int_scalar(fuse_code) inputs[11] = self.add_immediate_bool_scalar(use_nchw) else: inputs[9] = self.add_immediate_int_scalar(fuse_code) inputs[10] = self.add_immediate_bool_scalar(use_nchw) outputs = [None] * 1 out_shape = get_conv_pool_shape(image_oper.shape, args, out_c, transpose) out_oper = image_oper._replace( shape=out_shape, scale=out_scale, zero_point=out_zero_point, ) out_id = self.add_tensor_operand(jit_out, out_oper) self._handle_conv_pool_flexible_input(out_id, jit_image, args, transpose) outputs[0] = out_id self.add_operation(opcode, inputs, outputs) def _handle_conv_pool_flexible_input(self, out_id, jit_image, args, transpose): image_id, image_oper = self.get_tensor_operand_by_jitval(jit_image) batch, in_ch, in_h, in_w = image_oper.shape if batch == 0: self.forward_operand_shape(out_id, 0, image_id, 0) if in_ch == 0: raise Exception("Input channels can't be flexible") # H & W if transpose: if in_h == 0: self.compute_operand_shape( out_id, 2, f"({flex_name(image_id, 2)} - 1) * {args.stride_h} + {args.kernel_h} - {args.pad_t} - {args.pad_b}" ) if in_w == 0: self.compute_operand_shape( out_id, 3, f"({flex_name(image_id, 3)} - 1) * {args.stride_w} + {args.kernel_w} - {args.pad_l} - {args.pad_r}" ) else: if in_h == 0: self.compute_operand_shape( out_id, 2, f"({flex_name(image_id, 2)} - {args.kernel_h} + {args.pad_t} + {args.pad_b}) // {args.stride_h} + 1" ) if in_w == 0: self.compute_operand_shape( out_id, 3, f"({flex_name(image_id, 3)} - {args.kernel_w} + {args.pad_l} + {args.pad_r}) // {args.stride_w} + 1" ) def serialize_model(module, inputs, *, config=None, return_shapes=None, use_int16_for_qint16=False): """Convert to NNAPI and serialize torchscript module: Parameters: module: Torchscript module to convert inputs: Tensors used to specify input details for NNAPI config (optional): Optional config to attach to module return_shapes (optional): Specify shape of outputs if your module uses runtime flexible shapes to set output buffer size for NNAPI use_int16_for_qint16 (optional): Use Pytorch int16 to represent NNAPI qint16 values """ return _NnapiSerializer(config, use_int16_for_qint16).serialize_model(module, inputs, return_shapes)
pytorch-master
torch/backends/_nnapi/serializer.py
from typing import Optional, List import torch from torch.backends._nnapi.serializer import _NnapiSerializer ANEURALNETWORKS_PREFER_LOW_POWER = 0 ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER = 1 ANEURALNETWORKS_PREFER_SUSTAINED_SPEED = 2 class NnapiModule(torch.nn.Module): """Torch Module that wraps an NNAPI Compilation. This module handles preparing the weights, initializing the NNAPI TorchBind object, and adjusting the memory formats of all inputs and outputs. """ # _nnapi.Compilation is defined comp: Optional[torch.classes._nnapi.Compilation] # type: ignore[name-defined] weights: List[torch.Tensor] out_templates: List[torch.Tensor] def __init__( self, shape_compute_module: torch.nn.Module, ser_model: torch.Tensor, weights: List[torch.Tensor], inp_mem_fmts: List[int], out_mem_fmts: List[int], compilation_preference: int, relax_f32_to_f16: bool, ): super().__init__() self.shape_compute_module = shape_compute_module self.ser_model = ser_model self.weights = weights self.inp_mem_fmts = inp_mem_fmts self.out_mem_fmts = out_mem_fmts self.out_templates = [] self.comp = None self.compilation_preference = compilation_preference self.relax_f32_to_f16 = relax_f32_to_f16 @torch.jit.export def init(self, args: List[torch.Tensor]): assert self.comp is None self.out_templates = self.shape_compute_module.prepare(self.ser_model, args) # type: ignore[operator] self.weights = [w.contiguous() for w in self.weights] comp = torch.classes._nnapi.Compilation() comp.init2(self.ser_model, self.weights, self.compilation_preference, self.relax_f32_to_f16) self.comp = comp def forward(self, args: List[torch.Tensor]) -> List[torch.Tensor]: if self.comp is None: self.init(args) comp = self.comp assert comp is not None outs = [torch.empty_like(out) for out in self.out_templates] assert len(args) == len(self.inp_mem_fmts) fixed_args = [] for idx in range(len(args)): fmt = self.inp_mem_fmts[idx] # These constants match the values in DimOrder in serializer.py # TODO: See if it's possible to use those directly. if fmt == 0: fixed_args.append(args[idx].contiguous()) elif fmt == 1: fixed_args.append(args[idx].permute(0, 2, 3, 1).contiguous()) else: raise Exception("Invalid mem_fmt") comp.run(fixed_args, outs) assert len(outs) == len(self.out_mem_fmts) for idx in range(len(self.out_templates)): fmt = self.out_mem_fmts[idx] # These constants match the values in DimOrder in serializer.py # TODO: See if it's possible to use those directly. if fmt in (0, 2): pass elif fmt == 1: outs[idx] = outs[idx].permute(0, 3, 1, 2) else: raise Exception("Invalid mem_fmt") return outs def convert_model_to_nnapi( model, inputs, serializer=None, return_shapes=None, use_int16_for_qint16=False, compilation_preference=ANEURALNETWORKS_PREFER_SUSTAINED_SPEED, relax_f32_to_f16=False, ): (shape_compute_module, ser_model_tensor, used_weights, inp_mem_fmts, out_mem_fmts, retval_count) = process_for_nnapi(model, inputs, serializer, return_shapes, use_int16_for_qint16) nnapi_model = NnapiModule( shape_compute_module, ser_model_tensor, used_weights, inp_mem_fmts, out_mem_fmts, compilation_preference, relax_f32_to_f16 ) class NnapiInterfaceWrapper(torch.nn.Module): """NNAPI list-ifying and de-list-ifying wrapper. NNAPI always expects a list of inputs and provides a list of outputs. This module allows us to accept inputs as separate arguments. It returns results as either a single tensor or tuple, matching the original module. """ def __init__(self, mod): super().__init__() self.mod = mod wrapper_model_py = NnapiInterfaceWrapper(nnapi_model) wrapper_model = torch.jit.script(wrapper_model_py) # TODO: Maybe make these names match the original. arg_list = ", ".join(f"arg_{idx}" for idx in range(len(inputs))) if retval_count < 0: ret_expr = "retvals[0]" else: ret_expr = "".join(f"retvals[{idx}], " for idx in range(retval_count)) wrapper_model.define( f"def forward(self, {arg_list}):\n" f" retvals = self.mod([{arg_list}])\n" f" return {ret_expr}\n" ) return wrapper_model def process_for_nnapi(model, inputs, serializer=None, return_shapes=None, use_int16_for_qint16=False): model = torch.jit.freeze(model) if isinstance(inputs, torch.Tensor): inputs = [inputs] serializer = serializer or _NnapiSerializer(config=None, use_int16_for_qint16=use_int16_for_qint16) (ser_model, used_weights, inp_mem_fmts, out_mem_fmts, shape_compute_lines, retval_count) = serializer.serialize_model(model, inputs, return_shapes) ser_model_tensor = torch.tensor(ser_model, dtype=torch.int32) # We have to create a new class here every time this function is called # because module.define adds a method to the *class*, not the instance. class ShapeComputeModule(torch.nn.Module): """Code-gen-ed module for tensor shape computation module.prepare will mutate ser_model according to the computed operand shapes, based on the shapes of args. Returns a list of output templates. """ pass shape_compute_module = torch.jit.script(ShapeComputeModule()) real_shape_compute_lines = [ "def prepare(self, ser_model: torch.Tensor, args: List[torch.Tensor]) -> List[torch.Tensor]:\n", ] + [ f" {line}\n" for line in shape_compute_lines ] shape_compute_module.define("".join(real_shape_compute_lines)) return ( shape_compute_module, ser_model_tensor, used_weights, inp_mem_fmts, out_mem_fmts, retval_count, )
pytorch-master
torch/backends/_nnapi/prepare.py
import torch def is_available(): r"""Returns whether PyTorch is built with OpenMP support.""" return torch._C.has_openmp
pytorch-master
torch/backends/openmp/__init__.py
import sys import os import torch import warnings from contextlib import contextmanager from torch.backends import ContextProp, PropModule, __allow_nonbracketed_mutation try: from torch._C import _cudnn except ImportError: _cudnn = None # type: ignore[assignment] # Write: # # torch.backends.cudnn.enabled = False # # to globally disable CuDNN/MIOpen __cudnn_version = None if _cudnn is not None: def _init(): global __cudnn_version if __cudnn_version is None: __cudnn_version = _cudnn.getVersionInt() runtime_version = _cudnn.getRuntimeVersion() compile_version = _cudnn.getCompileVersion() runtime_major, runtime_minor, _ = runtime_version compile_major, compile_minor, _ = compile_version # Different major versions are always incompatible # Starting with cuDNN 7, minor versions are backwards-compatible # Not sure about MIOpen (ROCm), so always do a strict check if runtime_major != compile_major: cudnn_compatible = False elif runtime_major < 7 or not _cudnn.is_cuda: cudnn_compatible = runtime_minor == compile_minor else: cudnn_compatible = runtime_minor >= compile_minor if not cudnn_compatible: base_error_msg = (f'cuDNN version incompatibility: ' f'PyTorch was compiled against {compile_version} ' f'but found runtime version {runtime_version}. ' f'PyTorch already comes bundled with cuDNN. ' f'One option to resolving this error is to ensure PyTorch ' f'can find the bundled cuDNN.') if 'LD_LIBRARY_PATH' in os.environ: ld_library_path = os.environ.get('LD_LIBRARY_PATH', '') if any(substring in ld_library_path for substring in ['cuda', 'cudnn']): raise RuntimeError(f'{base_error_msg}' f'Looks like your LD_LIBRARY_PATH contains incompatible version of cudnn' f'Please either remove it from the path or install cudnn {compile_version}') else: raise RuntimeError(f'{base_error_msg}' f'one possibility is that there is a ' f'conflicting cuDNN in LD_LIBRARY_PATH.') else: raise RuntimeError(base_error_msg) return True else: def _init(): return False def version(): """Returns the version of cuDNN""" if not _init(): return None return __cudnn_version CUDNN_TENSOR_DTYPES = { torch.half, torch.float, torch.double, } def is_available(): r"""Returns a bool indicating if CUDNN is currently available.""" return torch._C.has_cudnn def is_acceptable(tensor): if not torch._C._get_cudnn_enabled(): return False if tensor.device.type != 'cuda' or tensor.dtype not in CUDNN_TENSOR_DTYPES: return False if not is_available(): warnings.warn( "PyTorch was compiled without cuDNN/MIOpen support. To use cuDNN/MIOpen, rebuild " "PyTorch making sure the library is visible to the build system.") return False if not _init(): warnings.warn('cuDNN/MIOpen library not found. Check your {libpath}'.format( libpath={ 'darwin': 'DYLD_LIBRARY_PATH', 'win32': 'PATH' }.get(sys.platform, 'LD_LIBRARY_PATH'))) return False return True def set_flags(_enabled=None, _benchmark=None, _benchmark_limit=None, _deterministic=None, _allow_tf32=None): orig_flags = (torch._C._get_cudnn_enabled(), torch._C._get_cudnn_benchmark(), None if not is_available() else torch._C._cuda_get_cudnn_benchmark_limit(), torch._C._get_cudnn_deterministic(), torch._C._get_cudnn_allow_tf32()) if _enabled is not None: torch._C._set_cudnn_enabled(_enabled) if _benchmark is not None: torch._C._set_cudnn_benchmark(_benchmark) if _benchmark_limit is not None and is_available(): torch._C._cuda_set_cudnn_benchmark_limit(_benchmark_limit) if _deterministic is not None: torch._C._set_cudnn_deterministic(_deterministic) if _allow_tf32 is not None: torch._C._set_cudnn_allow_tf32(_allow_tf32) return orig_flags @contextmanager def flags(enabled=False, benchmark=False, benchmark_limit=10, deterministic=False, allow_tf32=True): with __allow_nonbracketed_mutation(): orig_flags = set_flags(enabled, benchmark, benchmark_limit, deterministic, allow_tf32) try: yield finally: # recover the previous values with __allow_nonbracketed_mutation(): set_flags(*orig_flags) # The magic here is to allow us to intercept code like this: # # torch.backends.<cudnn|mkldnn>.enabled = True class CudnnModule(PropModule): def __init__(self, m, name): super(CudnnModule, self).__init__(m, name) enabled = ContextProp(torch._C._get_cudnn_enabled, torch._C._set_cudnn_enabled) deterministic = ContextProp(torch._C._get_cudnn_deterministic, torch._C._set_cudnn_deterministic) benchmark = ContextProp(torch._C._get_cudnn_benchmark, torch._C._set_cudnn_benchmark) benchmark_limit = None if is_available(): benchmark_limit = ContextProp(torch._C._cuda_get_cudnn_benchmark_limit, torch._C._cuda_set_cudnn_benchmark_limit) allow_tf32 = ContextProp(torch._C._get_cudnn_allow_tf32, torch._C._set_cudnn_allow_tf32) # This is the sys.modules replacement trick, see # https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273 sys.modules[__name__] = CudnnModule(sys.modules[__name__], __name__) # Add type annotation for the replaced module enabled: bool deterministic: bool benchmark: bool allow_tf32: bool benchmark_limit: int
pytorch-master
torch/backends/cudnn/__init__.py
import torch.cuda try: from torch._C import _cudnn except ImportError: # Uses of all the functions below should be guarded by torch.backends.cudnn.is_available(), # so it's safe to not emit any checks here. _cudnn = None # type: ignore[assignment] def get_cudnn_mode(mode): if mode == 'RNN_RELU': return int(_cudnn.RNNMode.rnn_relu) elif mode == 'RNN_TANH': return int(_cudnn.RNNMode.rnn_tanh) elif mode == 'LSTM': return int(_cudnn.RNNMode.lstm) elif mode == 'GRU': return int(_cudnn.RNNMode.gru) else: raise Exception("Unknown mode: {}".format(mode)) # NB: We don't actually need this class anymore (in fact, we could serialize the # dropout state for even better reproducibility), but it is kept for backwards # compatibility for old models. class Unserializable(object): def __init__(self, inner): self.inner = inner def get(self): return self.inner def __getstate__(self): # Note: can't return {}, because python2 won't call __setstate__ # if the value evaluates to False return "<unserializable>" def __setstate__(self, state): self.inner = None def init_dropout_state(dropout, train, dropout_seed, dropout_state): dropout_desc_name = 'desc_' + str(torch.cuda.current_device()) dropout_p = dropout if train else 0 if (dropout_desc_name not in dropout_state) or (dropout_state[dropout_desc_name].get() is None): if dropout_p == 0: dropout_state[dropout_desc_name] = Unserializable(None) else: dropout_state[dropout_desc_name] = Unserializable(torch._cudnn_init_dropout_state( # type: ignore[call-arg] dropout_p, train, dropout_seed, self_ty=torch.uint8, device=torch.device('cuda'))) dropout_ts = dropout_state[dropout_desc_name].get() return dropout_ts
pytorch-master
torch/backends/cudnn/rnn.py
import functools from enum import Enum from typing import Callable, List, Optional, Tuple import torch import torch._prims_common as utils import torch.nn.functional as F from torch import Tensor from torch._decomp import register_decomposition from torch._prims_common.wrappers import out_wrapper from torch.utils._pytree import tree_flatten, tree_map # None of these functions are publicly accessible; get at them # from torch._decomps __all__: List[str] = [] aten = torch.ops.aten class Reduction(Enum): NONE = 0 MEAN = 1 SUM = 2 # This wraps a decomposition and performs various type promotion logic within it, depending on the strategy provided # We're currently re-using ELEMENTWISE_TYPE_PROMOTION_KIND, although some of the usages are on non-elementwise ops # Will need to validate the non-elementwise uses def type_casts(f: Callable, type_promotion: utils.ELEMENTWISE_TYPE_PROMOTION_KIND): @functools.wraps(f) def inner(*args, **kwargs): flat_args = [ x for x in tree_flatten((args, kwargs))[0] if isinstance(x, Tensor) ] computation_dtype, result_dtype = utils.elementwise_dtypes( *flat_args, type_promotion_kind=type_promotion ) # TODO: pretty sure this is not quite right def increase_prec(x): if isinstance(x, Tensor): return x.to(computation_dtype) else: return x def decrease_prec(x): if isinstance(x, Tensor): return x.to(result_dtype) else: return x r = f(*tree_map(increase_prec, args), **tree_map(increase_prec, kwargs)) return tree_map(decrease_prec, r) return inner pw_cast_for_opmath = functools.partial( type_casts, type_promotion=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT ) reduction_complex_to_real = functools.partial( type_casts, type_promotion=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT ) pw_cast_for_int_to_real = functools.partial( type_casts, type_promotion=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT ) # This expands x until x.dim() == dim. Might be useful as an operator def _unsqueeze_to_dim(x: Tensor, dim: int): for _ in range(dim - x.dim()): x = x.unsqueeze(-1) return x @register_decomposition(aten.tanh_backward) @pw_cast_for_opmath def tanh_backward(out_grad: Tensor, y: Tensor): return out_grad * (1 - y * y).conj_physical() @register_decomposition(aten.sigmoid_backward) @pw_cast_for_opmath def sigmoid_backward(out_grad: Tensor, y: Tensor): return out_grad * (y * (1 - y)).conj_physical() @register_decomposition(aten.softplus_backward) @pw_cast_for_opmath def softplus_backward(out_grad: Tensor, x: Tensor, beta: float, threshold: float): z = (x * beta).exp() return torch.where((x * beta) > threshold, out_grad, out_grad * z / (z + 1.0)) @register_decomposition(aten.elu) @pw_cast_for_opmath def elu( self: Tensor, alpha: float = 1, scale: float = 1, input_scale: float = 1 ) -> Tensor: negcoef = alpha * scale poscoef = scale negiptcoef = input_scale return torch.where( self > 0, self * poscoef, (torch.exp(self * negiptcoef) - 1) * negcoef ) @register_decomposition(aten.elu_backward) @pw_cast_for_opmath def elu_backward( grad_output: Tensor, alpha: float, scale: float, input_scale: float, is_result: bool, self_or_result: Tensor, ): negcoef = alpha * scale poscoef = scale negiptcoef = input_scale if is_result: return torch.where( self_or_result <= 0, grad_output * negiptcoef * (self_or_result + negcoef), self_or_result * poscoef, ) else: return torch.where( self_or_result <= 0, grad_output * negiptcoef * negcoef * torch.exp(self_or_result * negiptcoef), grad_output * poscoef, ) @register_decomposition(aten.hardsigmoid) @pw_cast_for_opmath def hardsigmoid(self: Tensor) -> Tensor: return torch.clamp(torch.clamp(self + 3, min=0), max=6) / 6 @register_decomposition(aten.hardsigmoid_backward) @pw_cast_for_opmath def hardsigmoid_backward(grad_output: Tensor, self: Tensor): return torch.where( (self > -3.0) & (self < 3.0), grad_output * (1.0 / 6.0), grad_output.new_zeros(()), ) @register_decomposition(aten.hardtanh_backward) @pw_cast_for_opmath def hardtanh_backward( grad_output: Tensor, self: Tensor, min_val: float, max_val: float ): return torch.where( (self <= min_val) | (self >= max_val), grad_output.new_zeros(()), grad_output ) @register_decomposition(aten.hardshrink_backward) @pw_cast_for_opmath def hardshrink_backward(grad_out: Tensor, self: Tensor, lambd: float): return torch.where( (self >= -lambd) & (self <= lambd), grad_out.new_zeros(()), grad_out ) @register_decomposition(aten.hardswish) @pw_cast_for_opmath def hardswish(self: Tensor) -> Tensor: return self * torch.clamp(torch.clamp(self + 3, min=0), max=6) / 6 @register_decomposition(aten.hardswish_backward) @pw_cast_for_opmath def hardswish_backward(grad_output: Tensor, self: Tensor) -> Tensor: return torch.where( self < -3, grad_output.new_zeros(()), torch.where(self <= 3, grad_output * ((self / 3) + 0.5), grad_output), ) @register_decomposition(aten.threshold_backward) @pw_cast_for_opmath def threshold_backward(grad_output: Tensor, self: Tensor, threshold: float): return torch.where(self <= threshold, grad_output.new_zeros(()), grad_output) @register_decomposition(aten.leaky_relu_backward) @pw_cast_for_opmath def leaky_relu_backward( grad_output: Tensor, self: Tensor, negative_slope: float, self_is_result: bool ): return torch.where(self > 0, grad_output, grad_output * negative_slope) @register_decomposition(aten.gelu_backward) @pw_cast_for_opmath def gelu_backward(grad: Tensor, self: Tensor, approximate: str = "none"): M_SQRT2 = 1.41421356237309504880 M_SQRT1_2 = 0.70710678118654752440 M_2_SQRTPI = 1.12837916709551257390 if approximate == "tanh": kBeta = M_SQRT2 * M_2_SQRTPI * 0.5 kKappa = 0.044715 x_sq = self * self x_cube = x_sq * self inner = kBeta * (self + kKappa * x_cube) tanh_inner = torch.tanh(inner) left = 0.5 * self right = 1 + tanh_inner left_derivative = 0.5 * right tanh_derivative = 1 - tanh_inner * tanh_inner inner_derivative = kBeta * (1 + 3 * kKappa * x_sq) right_derivative = left * tanh_derivative * inner_derivative return grad * (left_derivative + right_derivative) else: kAlpha = M_SQRT1_2 kBeta = M_2_SQRTPI * M_SQRT1_2 * 0.5 cdf = 0.5 * (1 + torch.erf(self * kAlpha)) pdf = kBeta * torch.exp(self * self * -0.5) return grad * (cdf + self * pdf) @register_decomposition(aten.mish_backward) @pw_cast_for_opmath def mish_backward(grad_output: Tensor, input: Tensor): input_tanh_softplus = torch.tanh(F.softplus(input)) input_sigmoid = torch.sigmoid(input) out = input * input_sigmoid * (1 - input_tanh_softplus * input_tanh_softplus) return grad_output * (input_tanh_softplus + out) @register_decomposition(aten.silu) @pw_cast_for_opmath def silu(self: Tensor) -> Tensor: return self * torch.sigmoid(self) @register_decomposition(aten.silu_backward) @pw_cast_for_opmath def silu_backward(grad_output: Tensor, self: Tensor) -> Tensor: sigmoid = 1 / (1 + torch.exp(-self)) return grad_output * sigmoid * (1 + self * (1 - sigmoid)) @register_decomposition(aten.softshrink_backward) def softshrink_backward(grad_output: Tensor, self: Tensor, lambd: float) -> Tensor: return torch.where( (self >= -lambd) & (self <= lambd), grad_output.new_zeros(()), grad_output ) @register_decomposition(aten.prelu_backward) @pw_cast_for_opmath def prelu_backward( grad_output: Tensor, self: Tensor, weight: Tensor ) -> Tuple[Tensor, Tensor]: # Logic is more complicated than I would like. Basically, weight can either # be a scalar or a vector of size [C], and in the forward pass it's # broadcast against [N, C, ...]. So now, we need to do the corresponding # reduction, which is harder than we'd like... cur_weight = weight for _ in range(2, grad_output.dim()): cur_weight = cur_weight.unsqueeze(-1) input_grad = torch.where(self > 0, grad_output, cur_weight * grad_output) weight_grad_collector = torch.where( self > 0, grad_output.new_zeros(()), self * grad_output ) out = weight_grad_collector.sum_to_size(cur_weight.shape) while out.dim() > weight.dim(): out = out.squeeze(-1) return (input_grad, out) @register_decomposition(aten.rrelu_with_noise_backward) @pw_cast_for_opmath def rrelu_with_noise_backward( grad_output: Tensor, self: Tensor, noise: Tensor, lower: float, upper: float, training: bool, self_is_result: bool, ) -> Tensor: if training and upper - lower > 1e-6: return grad_output.mul(noise) else: negative_slope = (lower + upper) / 2 return aten.leaky_relu_backward( grad_output, self, negative_slope, self_is_result ) @register_decomposition(aten.log_sigmoid_backward) @pw_cast_for_opmath def log_sigmoid_backward(grad_output: Tensor, self: Tensor, buffer: Tensor) -> Tensor: in_negative = self < 0 max_deriv = torch.where(in_negative, 1, 0) sign = torch.where(in_negative, 1, -1) z = torch.exp(-torch.abs(self)) return grad_output * (max_deriv - sign * (z / (1 + z))) # CPU has a special formula that uses buffer, but disabled for convenience sake # return (max_deriv - sign * (buffer / (1 + buffer))) * grad_output def apply_loss_reduction(loss: Tensor, reduction: int): if reduction == Reduction.MEAN.value: return torch.mean(loss) elif reduction == Reduction.SUM.value: return torch.sum(loss) else: return loss def to_real_dtype(dtype: torch.dtype): if dtype == torch.complex32: return torch.float16 elif dtype == torch.complex64: return torch.float32 elif dtype == torch.complex128: return torch.float64 # TODO: None of these loss castings are quite correct, see # https://github.com/pytorch/pytorch/issues/76870. Also, the ATen kernels # perform the pointwise portion in opmath, but don't maintain it between the # pointwise portion and the reduction @register_decomposition(aten.mse_loss) @pw_cast_for_opmath def mse_loss( self: Tensor, target: Tensor, reduction: int = Reduction.MEAN.value ) -> Tensor: loss = (self - target) ** 2 return apply_loss_reduction(loss, reduction) @register_decomposition(aten.mse_loss_backward) @pw_cast_for_opmath def mse_loss_backward( grad_output: Tensor, input: Tensor, target: Tensor, reduction: int ): norm = 2.0 / input.numel() if reduction == Reduction.MEAN.value else 2.0 return norm * (input - target) * grad_output @register_decomposition(aten.huber_loss) @pw_cast_for_opmath def huber_loss( self: Tensor, target: Tensor, reduction: int = Reduction.MEAN.value, delta: float = 1.0, ) -> Tensor: assert delta > 0, "huber_loss does not support non-positive values for delta." z = (self - target).abs() loss = torch.where(z < delta, 0.5 * z * z, delta * (z - 0.5 * delta)) return apply_loss_reduction(loss, reduction) @register_decomposition(aten.huber_loss_backward) @pw_cast_for_opmath def huber_loss_backward( grad_output: Tensor, self: Tensor, target: Tensor, reduction: int, delta: float ): norm = 1.0 / self.numel() if reduction == Reduction.MEAN.value else 1.0 x = self - target return torch.where( x < -delta, -norm * grad_output * delta, torch.where(x > delta, norm * grad_output * delta, norm * x * grad_output), ) def _nll_loss_backward( grad_output: Tensor, self: Tensor, target: Tensor, weight: Optional[Tensor], reduction: int, ignore_index: int, total_weight: Tensor, ) -> Tensor: channel_dim = 0 if self.dim() < 2 else 1 if reduction == Reduction.MEAN.value: grad_output = grad_output / total_weight target = target.unsqueeze(channel_dim) grad_input = torch.zeros_like(self) grad_input = torch.scatter(grad_input, channel_dim, target, -1.0) if grad_input.dim() > grad_output.dim() > 0: grad_output = grad_output.unsqueeze(channel_dim) if weight is not None: new_shape = [1 for _ in range(self.dim())] new_shape[channel_dim] = weight.shape[0] weight = weight.reshape(new_shape) grad_output = grad_output * weight has_ignore_index = ignore_index >= 0 if has_ignore_index: grad_output = torch.where(target != ignore_index, grad_output, 0) return grad_input * grad_output @register_decomposition(aten.glu_backward) @pw_cast_for_opmath def glu_backward(grad_output: Tensor, self: Tensor, dim: int) -> Tensor: assert self.dim() > 0, "glu does not support 0-dimensional tensors" wrap_dim = utils.canonicalize_dim(self.dim(), dim) nIn = self.size(wrap_dim) assert ( nIn % 2 == 0 ), f"Halving dimension must be even, but dimension {wrap_dim} is size {nIn}" inputSize = nIn // 2 firstHalf = self.narrow(wrap_dim, 0, inputSize) secondHalf = self.narrow(wrap_dim, inputSize, inputSize) gradInputFirstHalf = torch.sigmoid(secondHalf) gradInputSecondHalf = ( (1.0 - gradInputFirstHalf) * gradInputFirstHalf * firstHalf * grad_output ) gradInputFirstHalf = gradInputFirstHalf * grad_output return torch.cat([gradInputFirstHalf, gradInputSecondHalf], dim=wrap_dim) @register_decomposition(aten.nll_loss_backward) def nll_loss_backward( grad_output: Tensor, self: Tensor, target: Tensor, weight: Optional[Tensor], reduction: int, ignore_index: int, total_weight: Tensor, ) -> Tensor: assert 0 <= self.dim() <= 2, "input tensor should be 1D or 2D" assert ( target.dim() <= 1 ), "0D or 1D target tensor expected, multi-target not supported" no_batch_dim = self.dim() == 1 and target.dim() == 0 assert no_batch_dim or ( self.shape[0] == target.shape[0] ), f"size mismatch (got input: {self.shape}, target: {target.shape})" assert total_weight.numel() == 1, ( "expected total_weight to be a single element tensor, got: ", f"{total_weight.shape} ({total_weight.numel()} elements)", ) assert ( weight is None or weight.numel() == self.shape[-1] ), "weight tensor should be defined either for all or no classes" if reduction == Reduction.NONE.value and self.dim() == 2: assert grad_output.dim() == 1 and grad_output.shape[0] == self.shape[0], ( f"Expected a tensor of dimension 1 and tensor.size[0] == {self.shape[0]} but " f"got: dimension {grad_output.dim()} and tensor.size[0] == {grad_output.shape[0]}" ) else: assert ( grad_output.dim() <= 1 and grad_output.numel() == 1 ), f"Expected a single element grad_output tensor, but got: {grad_output.shape}" return _nll_loss_backward( grad_output, self, target, weight, reduction, ignore_index, total_weight ) @register_decomposition(aten.nll_loss2d_backward) def nll_loss2d_backward( grad_output: Tensor, self: Tensor, target: Tensor, weight: Optional[Tensor], reduction: int, ignore_index: int, total_weight: Tensor, ) -> Tensor: assert ( self.dim() == 4 ), f"only batches of spatial inputs supported (4D tensors), but got input of dimension: {self.dim()}" assert ( target.dim() == 3 ), f"only batches of spatial targets supported (3D tensors) but got targets of dimension: {target.dim()}" assert ( self.shape[0] == target.shape[0] and self.shape[2] == target.shape[1] and self.shape[3] == target.shape[2] ), f"size mismatch (got input: {self.shape}, target: {target.shape}" assert total_weight.numel() == 1, ( "expected total_weight to be a single element tensor, " f"got: {total_weight.shape} ( {total_weight.numel()}, elements)" ) return _nll_loss_backward( grad_output, self, target, weight, reduction, ignore_index, total_weight ) @register_decomposition(aten.binary_cross_entropy) @pw_cast_for_opmath def binary_cross_entropy( self: Tensor, target: Tensor, weight: Optional[Tensor] = None, reduction: int = Reduction.MEAN.value, ) -> Tensor: # We cannot currently model this without introducing data-dependent control flow # TORCH_CHECK( # (input_val >= 0) && (input_val <= 1), # "all elements of input should be between 0 and 1" # ) loss = (target - 1) * torch.maximum( torch.log(1 - self), self.new_full((), -100) ) - target * torch.maximum(torch.log(self), self.new_full((), -100)) if weight is not None: loss = loss * weight return apply_loss_reduction(loss, reduction) @register_decomposition(aten.binary_cross_entropy_backward) @pw_cast_for_opmath def binary_cross_entropy_backward( grad_output: Tensor, self: Tensor, target: Tensor, weight: Optional[Tensor] = None, reduction: int = Reduction.MEAN.value, ) -> Tensor: EPSILON = 1e-12 result = grad_output * (self - target) / torch.clamp(self * (1 - self), min=EPSILON) if weight is not None: result = result * weight if reduction == Reduction.MEAN.value: result = result / self.numel() return result @register_decomposition(aten._euclidean_dist) def _euclidean_dist(x1: Tensor, x2: Tensor) -> Tensor: x1_norm = x1.pow(2).sum(-1, True) x1_pad = torch.ones_like(x1_norm, memory_format=torch.contiguous_format) x2_norm = x2.pow(2).sum(-1, True) x2_pad = torch.ones_like(x2_norm, memory_format=torch.contiguous_format) x1_ = torch.cat([x1.mul(-2), x1_norm, x1_pad], -1) x2_ = torch.cat([x2, x2_pad, x2_norm], -1) result = x1_.matmul(x2_.mT) return result.clamp_min(0).sqrt() @register_decomposition(aten.slice_backward) def slice_backward( grad_output: Tensor, input_sizes: List[int], dim: int, start: int, end: int, step: int, ): grad_input = grad_output.new_zeros(input_sizes) return torch.slice_scatter(grad_input, grad_output, dim, start, end, step) @register_decomposition(aten.select_backward) def select_backward(grad_output: Tensor, input_sizes: List[int], dim: int, index: int): grad_input = grad_output.new_zeros(input_sizes) return torch.select_scatter(grad_input, grad_output, dim, index) @register_decomposition(aten.diagonal_backward) def diagonal_backward( grad_output: Tensor, input_sizes: List[int], offset: int, dim1: int, dim2: int ): grad_input = grad_output.new_zeros(input_sizes) return torch.diagonal_scatter(grad_input, grad_output, offset, dim1, dim2) @register_decomposition(aten._softmax_backward_data) @pw_cast_for_opmath def _softmax_backward_data( grad_output: Tensor, output: Tensor, dim: int, input_dtype: int ): new_grad = grad_output * output return new_grad - output * torch.sum(new_grad, dim=dim, keepdim=True) @register_decomposition(aten._log_softmax_backward_data) @pw_cast_for_opmath def _log_softmax_backward_data( grad_output: Tensor, output: Tensor, dim: int, input_dtype: int ): grad_input = grad_output - torch.exp(output) * torch.sum( grad_output, dim=dim, keepdim=True ) return grad_input # TODO: the type annotations on arguments are not quite right @register_decomposition(aten.im2col_backward) def im2col_backward( grad_output: Tensor, input_size: List[int], kernel_size: List[int], dilation: List[int], padding: List[int], stride: List[int], ) -> Tensor: return F.fold(grad_output, input_size, kernel_size, dilation, padding, stride) # type: ignore[arg-type] @register_decomposition(aten.col2im_backward) def col2im_backward( grad_output: Tensor, kernel_size: List[int], dilation: List[int], padding: List[int], stride: List[int], ) -> Tensor: return F.unfold(grad_output, kernel_size, dilation, padding, stride) # type: ignore[arg-type] @register_decomposition(aten.native_dropout_backward) @pw_cast_for_opmath def native_dropout_backward(grad_output: Tensor, mask: Tensor, scale: float): return grad_output * (mask.type_as(grad_output) * scale) @register_decomposition(aten.logit_backward.default) @pw_cast_for_opmath def logit_backward( grad_output: Tensor, self: Tensor, eps: Optional[float] = None ) -> Tensor: if eps is not None: lo = eps hi = 1.0 - lo return torch.where( torch.logical_and(self >= lo, self <= hi), grad_output / (self * (1.0 - self)), self.new_zeros(()), ) else: return torch.where( torch.logical_and(self >= 0.0, self <= 1.0), grad_output / (self * (1.0 - self)), self.new_full((), float("nan")), ) @register_decomposition(aten.native_dropout) def native_dropout(input: Tensor, p: float, train: Optional[bool]): if train: bool_mask = torch.rand_like(input) > p res = bool_mask * input * float(1.0 / (1.0 - p)) return (res, bool_mask) else: return (input, torch.ones_like(input, dtype=torch.bool)) # TODO: Correct the type promotion semantics @register_decomposition(aten._softmax) @pw_cast_for_opmath def _softmax(x: Tensor, dim: int, half_to_float: bool): x_max = torch.amax(x, dim, keepdim=True) unnormalized = torch.exp(x - x_max) return unnormalized / torch.sum(unnormalized, dim, keepdim=True) # TODO: Correct the type promotion semantics @register_decomposition(aten._log_softmax) @pw_cast_for_opmath def _log_softmax(x: Tensor, dim: int, half_to_float: bool): x_max = torch.amax(x, dim, keepdim=True) shifted = x - x_max shifted_logsumexp = torch.log(torch.sum(torch.exp(shifted), dim, keepdim=True)) return shifted - shifted_logsumexp @register_decomposition(aten.addcdiv) @pw_cast_for_opmath def addcdiv(self: Tensor, tensor1: Tensor, tensor2: Tensor, value: float = 1): return self + value * (tensor1 / tensor2) # Remove special case when https://github.com/pytorch/pytorch/pull/72949 is landed. @register_decomposition(aten.addcmul) @pw_cast_for_opmath def addcmul(self: Tensor, tensor1: Tensor, tensor2: Tensor, value: float = 1): if self.is_floating_point() or self.is_complex(): return self + value * tensor1 * tensor2 else: return self + int(value) * tensor1 * tensor2 @register_decomposition(aten.rsub.Tensor) def rsub_Tensor(self: Tensor, other: Tensor, alpha: float = 1) -> Tensor: return torch.sub(other, self, alpha=alpha) @register_decomposition(aten.rsub.Scalar) def rsub_Scalar(self: Tensor, other: float, alpha: float = 1) -> Tensor: return torch.sub(other, self, alpha=alpha) @register_decomposition(aten.embedding) def embedding( weight: Tensor, indices: Tensor, padding_idx: int = -1, scale_grad_by_freq: bool = False, sparse: bool = False, ) -> Tensor: assert weight.dim() == 2, "'weight' must be 2-D" # TODO: Assert not ported over yet # auto indices_arg = TensorArg(indices, "indices", 1); # checkScalarTypes("embedding", indices_arg, {kLong, kInt}); if indices.dim() == 1: return weight.index_select(0, indices) size = list(indices.shape) for d in weight.shape[1:]: size.append(d) return weight.index_select(0, indices.reshape(-1)).view(size) # TODO: Correct the type promotion semantics @register_decomposition(aten.embedding_dense_backward) def embedding_dense_backward( grad_output: Tensor, indices: Tensor, num_weights: int, padding_idx: int, scale_grad_by_freq: bool, ): numel = indices.numel() grad = grad_output.reshape(numel, grad_output.size(-1)) grad_weight = grad_output.new_zeros((num_weights, grad_output.shape[-1])) indices_rank1 = indices.reshape(numel) if scale_grad_by_freq: counts = indices.new_zeros((num_weights,)) ones = indices.new_ones((numel,)) counts = counts.index_put([indices_rank1], ones, accumulate=True) grad_weights_scale = counts[indices_rank1] grad = grad / grad_weights_scale.unsqueeze(1) skip_padding = (indices_rank1 != padding_idx).unsqueeze(1) skip_padding = skip_padding.expand_as(grad) zero_grad = torch.full_like(grad, 0) return grad_weight.index_put( [indices_rank1], torch.where(skip_padding, grad, zero_grad), accumulate=True ) def prod(x: List[int]): r = 1 for i in x: r *= i return r @register_decomposition(aten.split_with_sizes, disable_meta=True) def split_with_sizes( self: Tensor, split_sizes: List[int], dim: int = 0 ) -> List[Tensor]: num_splits = len(split_sizes) splits = [] start_idx = 0 for i in range(num_splits): length = split_sizes[i] splits.append(self.narrow(dim, start_idx, length)) start_idx += length return splits @register_decomposition(aten.split.Tensor, disable_meta=True) def split(self: Tensor, split_size: int, dim: int = 0) -> List[Tensor]: input_sizes = self.shape dim_size = input_sizes[dim] if split_size == 0: assert dim_size == 0 return [self] chunks = (dim_size + split_size - 1) // split_size split_sizes = [split_size for i in range(chunks)] split_sizes[chunks - 1] = split_size - (split_size * chunks - dim_size) return torch.split(self, split_sizes, dim) # TODO: this doesn't appear to have enough precision in bfloat16 @register_decomposition(aten.addmm) @pw_cast_for_opmath def addmm(self: Tensor, mat1: Tensor, mat2: Tensor, beta: int = 1, alpha: int = 1): if not self.is_floating_point() and not self.is_complex(): beta = int(beta) alpha = int(alpha) out = alpha * torch.mm(mat1, mat2) if beta == 0: return out return beta * self + out # This computes the mean and variance along the specifized normalization dims, # then normalizes along those dims. Finally, it returns the mean and variance of # the normalized dims. Note that it intentionally leaves outputs upcasted. # Example: # input: [2, 3, 4, 5], norm_dims: [1, 3] # mean: [2, 1, 4, 1] def normalize(input, norm_dims, eps): computation_dtype = utils.get_computation_dtype(input.dtype) input_acc = input.to(dtype=computation_dtype) biased_var = torch.var(input_acc, dim=norm_dims, unbiased=False, keepdim=True) mean = torch.mean(input_acc, dim=norm_dims, keepdim=True) rstd = torch.rsqrt(biased_var + eps) out = (input - mean) * rstd return out, mean, rstd @register_decomposition(aten.native_group_norm.default, disable_meta=True) def native_group_norm( input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], N: int, C: int, HxW: int, group: int, eps: float, ) -> Tuple[Tensor, Tensor, Tensor]: orig_shape = input.shape input = input.view(N, group, C // group, HxW) reduction_dims = [2, 3] out, mean, rstd = normalize(input, reduction_dims, eps) mean = _squeeze_multiple(mean, reduction_dims) rstd = _squeeze_multiple(rstd, reduction_dims) out = out.view(orig_shape) if weight is not None: weight = _unsqueeze_to_dim(weight, out.dim() - 1) out = out * weight if bias is not None: bias = _unsqueeze_to_dim(bias, out.dim() - 1) out = out + bias out = out.to(dtype=input.dtype) mean = mean.to(dtype=input.dtype) rstd = rstd.to(dtype=input.dtype) return (out, mean, rstd) def _maybe_cast(x: Optional[Tensor], dtype) -> Optional[Tensor]: if x is not None: return x.to(dtype) return x # TODO: Take a closer look at the type promotion semantics @register_decomposition(aten.native_layer_norm_backward) def native_layer_norm_backward( grad_out: Tensor, input: Tensor, normalized_shape: List[int], mean: Tensor, rstd: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], output_mask: List[bool], ) -> Tuple[Optional[Tensor], Optional[Tensor], Optional[Tensor]]: input_shape = input.shape input_ndim = input.dim() computation_dtype = utils.get_computation_dtype(input.dtype) grad_out_cast, input_cast, weight_cast, bias_cast = [ x.to(computation_dtype) if x is not None else x for x in (grad_out, input, weight, bias) ] assert grad_out_cast is not None axis = input_ndim - len(normalized_shape) inner_dims = input_shape[axis:] outer_dims = input_shape[:axis] inner_dim_indices: List[int] = [] outer_dim_indices: List[int] = [] for i in range(input_ndim): if i >= axis: inner_dim_indices.append(i) else: outer_dim_indices.append(i) N = prod(inner_dims) # type: ignore[arg-type] M = prod(outer_dims) # type: ignore[arg-type] if M <= 0 or N <= 0: return ( input.new_zeros(input_shape), input.new_zeros(input_shape[axis:]), input.new_zeros(input_shape[axis:]), ) x_hat = (input_cast - mean) * rstd if weight_cast is not None: grad_x_hat = grad_out_cast * weight_cast else: grad_x_hat = grad_out_cast a = grad_x_hat * N b = torch.sum(grad_x_hat, inner_dim_indices, True) c1 = torch.mul(grad_x_hat, x_hat) c2 = torch.sum(c1, inner_dim_indices, True) c3 = torch.mul(x_hat, c2) inner = a - b - c3 d_input: Optional[Tensor] = None d_weight: Optional[Tensor] = None d_bias: Optional[Tensor] = None if output_mask[0]: d_input = (rstd / N) * inner if output_mask[1] and weight_cast is not None: if len(outer_dim_indices) > 0: d_weight = torch.sum(grad_out_cast * x_hat, outer_dim_indices, False) else: d_weight = grad_out_cast * x_hat if output_mask[2] and bias_cast is not None: if len(outer_dim_indices) > 0: d_bias = torch.sum(grad_out_cast, outer_dim_indices, False) else: d_bias = grad_out_cast return ( _maybe_cast(d_input, input.dtype), _maybe_cast(d_weight, input.dtype), _maybe_cast(d_bias, input.dtype), ) @register_decomposition(aten.native_batch_norm) def native_batch_norm( input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: bool, momentum: float, eps: float, ) -> Tuple[Tensor, Tensor, Tensor]: reduction_dims = [0] + list(range(2, input.dim())) computation_dtype = utils.get_computation_dtype(input.dtype) if training: output, mean, rstd = normalize(input, reduction_dims, eps) save_mean = _squeeze_multiple(mean, reduction_dims) save_rstd = _squeeze_multiple(rstd, reduction_dims) if running_mean is not None: running_mean.copy_(momentum * save_mean + (1 - momentum) * running_mean) if running_var is not None: n = input.numel() / input.shape[1] # This doesn't strictly match eager's numerics, which accumulates var sum and then directly applies the correction # But... that would require re-implementing var here, for negligible numerics gain on a tensor whose # numerics probably don't matter. unbiased_var = torch.var(input, reduction_dims, unbiased=False) * ( n / (n - 1) ) running_var.copy_(momentum * unbiased_var + (1 - momentum) * running_var) else: assert running_mean is not None and running_var is not None running_mean = running_mean.to(dtype=computation_dtype) running_var = running_var.to(dtype=computation_dtype) mean = running_mean invstd = 1 / (torch.sqrt(running_var + eps)) # Very annoying inconsistency where CPU and CUDA give different shapes if input.device.type != "cpu": save_mean = running_mean save_rstd = invstd else: save_mean = input.new_zeros((0,)) save_rstd = input.new_zeros((0,)) mean = _unsqueeze_to_dim(mean, input.dim() - 1) invstd = _unsqueeze_to_dim(invstd, input.dim() - 1) output = (input - mean) * invstd if weight is None: weight = input.new_ones(()) if bias is None: bias = input.new_zeros(()) weight = _unsqueeze_to_dim(weight, input.dim() - 1) bias = _unsqueeze_to_dim(bias, input.dim() - 1) output = output * weight + bias if input.device.type == "cpu": save_mean = save_mean.to(dtype=input.dtype) save_rstd = save_rstd.to(dtype=input.dtype) return output.to(dtype=input.dtype), save_mean, save_rstd @register_decomposition(aten._fused_dropout) @pw_cast_for_opmath def _fused_dropout_decomposition(input, p, generator=None): mask = (torch.rand_like(input) < p).to(dtype=torch.uint8) res = mask.type_as(input) * input * (1.0 / p) return (res, mask) @register_decomposition(aten.xlogy.Tensor) @pw_cast_for_int_to_real def xlogy(self: Tensor, other: Tensor) -> Tensor: return aten.where( aten.isnan(self), self, aten.where( self == aten.new_zeros(self, ()), aten.new_zeros(self, ()), self * aten.log(other), ), ) @register_decomposition(aten.var.correction) @reduction_complex_to_real def var_correction( x: Tensor, dims: Optional[List[int]], correction: Optional[int] = None, keepdim: bool = False, ): if dims is None: dims = [] if x.is_complex(): # For complex, calculate variance of real and imaginary components # separately then add to get overall variance. real_in = x.real var_real = torch.var(real_in, dims, correction=correction, keepdim=keepdim) imag_in = x.imag var_imag = torch.var(imag_in, dims, correction=correction, keepdim=keepdim) return var_real + var_imag if correction is None: correction = 0 if len(dims) == 0: n = prod(x.shape) # type: ignore[arg-type] else: n = 1 for dim in dims: n *= x.shape[dim] mean = torch.mean(x, dims, True) sub = x - mean sq = sub * sub sum = torch.sum(sq, dims, keepdim) if correction: n = n - correction return sum / n @register_decomposition(aten.std.correction) @reduction_complex_to_real def std_decomposition( x: Tensor, dims: List[int], correction: int = 0, keepdim: bool = False ): return torch.sqrt(torch.var(x, dims, correction=correction, keepdim=keepdim)) # Questionable decompositions # This is only valid if we're running the graph without autograd, such as if the backward pass has been traced. # Note that this decomposition causes issues with in-place ops @register_decomposition( [aten.detach, aten.lift, aten.lift_fresh, aten.alias], disable_meta=True ) def nop_decomposition(x): return x @register_decomposition(aten.cudnn_batch_norm) def cudnn_batch_norm( input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: bool, exponential_average_factor: float, epsilon: float, ): a, b, c = aten.native_batch_norm( input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon, ) # Cudnn return running mean and variance when training is True if training: return (a, b, c, input.new_zeros((0,), dtype=torch.uint8)) return ( a, input.new_zeros((0,)), input.new_zeros((0,)), input.new_zeros((0,), dtype=torch.uint8), ) @register_decomposition(aten.native_batch_norm_backward) def native_batch_norm_backward( grad_out: Tensor, input: Tensor, weight: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], save_mean: Optional[Tensor], save_invstd: Optional[Tensor], train: bool, eps: float, output_mask: List[bool], ) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]: input_dtype = input.dtype computation_dtype = utils.get_computation_dtype(input.dtype) ( grad_out_cast, input_cast, weight_cast, running_mean_cast, running_var_cast, save_mean_cast, save_invstd_cast, ) = [ x.to(computation_dtype) if x is not None else x for x in ( grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, ) ] input_shape = input.shape input_rank = input.dim() assert input_rank >= 2, "rank of the input must be at least 2" axis = 1 num_features = prod(list(input_shape)) / input_shape[axis] mean = save_mean_cast invstd = save_invstd_cast if train: assert save_mean_cast is not None and save_invstd_cast is not None else: assert running_mean_cast is not None and running_var_cast is not None mean = running_mean_cast invstd = torch.rsqrt(running_var_cast + eps) broadcast_mask: List[int] = [1] * input_rank broadcast_mask[axis] = input_shape[axis] reduction_axes: List[int] = [] for i in range(input_rank): if i != axis: reduction_axes.append(i) mean = torch.reshape(mean, broadcast_mask) # type: ignore[arg-type] norm = 1.0 / num_features grad_output_sum = torch.sum(grad_out_cast, reduction_axes) # type: ignore[arg-type] dot_p = torch.sum(grad_out_cast * (input_cast - mean), reduction_axes) grad_mean = torch.reshape(grad_output_sum * norm, broadcast_mask) proj_scale = torch.reshape(torch.mul(dot_p * norm, invstd * invstd), broadcast_mask) # type: ignore[operator] if weight_cast is None: grad_scale = torch.reshape(invstd, broadcast_mask) * 1.0 # type: ignore[arg-type] else: grad_scale = torch.reshape(invstd * weight_cast, broadcast_mask) if train: proj = (input_cast - mean) * proj_scale grad_input = ((grad_out_cast - proj) - grad_mean) * grad_scale else: grad_input = grad_out_cast * grad_scale if output_mask[1]: grad_weight = dot_p * invstd else: grad_weight = None # "None" doesn't work with vjp, should use zeros for vjp if output_mask[2]: grad_bias = grad_output_sum else: grad_bias = None # "None" doesn't work with vjp, should use zeros for vjp return ( grad_input.to(input_dtype), _maybe_cast(grad_weight, input_dtype), _maybe_cast(grad_bias, input_dtype), ) @register_decomposition(aten.cudnn_batch_norm_backward) def cudnn_batch_norm_backward( input: Tensor, grad_output: Tensor, weight: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], save_mean: Optional[Tensor], save_var: Optional[Tensor], epsilon: float, reserveSpace: Tensor, ): return aten.native_batch_norm_backward( grad_output, input, weight, running_mean, running_var, save_mean, save_var, True, epsilon, [True, True, True], ) @register_decomposition(aten.transpose.int, disable_meta=True) def transpose_int(self: Tensor, dim0: int, dim1: int) -> Tensor: dim0, dim1 = utils.canonicalize_dims(self.dim(), (dim0, dim1)) # type: ignore[misc] if self.dim() <= 1: return self if dim0 == dim1: return self perm = list(range(self.dim())) perm[dim0], perm[dim1] = perm[dim1], perm[dim0] return torch.permute(self, perm) def _squeeze_multiple(self: Tensor, dims: List[int]) -> Tensor: ndim = self.dim() wrapped_dims = utils.canonicalize_dims(ndim, dims) assert isinstance(wrapped_dims, tuple) for idx in range(ndim - 1, -1, -1): if idx in wrapped_dims: self = self.squeeze(idx) return self @register_decomposition(aten.logsumexp.default) @pw_cast_for_int_to_real def logsumexp(self: Tensor, dim: List[int], keepdim: bool = False) -> Tensor: if self.numel() == 0: return torch.sum(torch.exp(self), dim, keepdim).log() maxes = torch.amax(self, dim, keepdim=True) maxes_squeezed = maxes if keepdim else _squeeze_multiple(maxes, dim) maxes_squeezed = torch.masked_fill( maxes_squeezed, maxes_squeezed.abs() == float("inf"), 0 ) result = torch.sum(torch.exp(self - maxes), dim, keepdim) return result.log().add(maxes_squeezed) # nb: Should use acc_t, not op_math @register_decomposition(aten.log_sigmoid_forward) @out_wrapper("output", "buffer") @pw_cast_for_opmath def log_sigmoid_forward(self: Tensor) -> Tuple[Tensor, Tensor]: min = torch.minimum(self.new_zeros(()), self) z = torch.exp(-torch.abs(self)) if self.is_cuda: buffer = self.new_zeros((0,)) else: buffer = z return min - torch.log1p(z), buffer @register_decomposition(aten.norm) @out_wrapper() @reduction_complex_to_real def norm( self: Tensor, p: Optional[float] = None, dim: List[int] = None, keepdim: bool = False, dtype: Optional[torch.dtype] = None, ): if p is None: p = 2.0 return torch.linalg.vector_norm(self, p, dim, keepdim, dtype=dtype) @register_decomposition(torch.ops.aten.upsample_bilinear2d.vec) @pw_cast_for_opmath def upsample_bilinear2d_vec( input: Tensor, output_size: Optional[List[int]], align_corners: bool, scale_factors: Optional[List[float]], ) -> Tensor: # get dimensions of original image n_batch, n_channels, in_h, in_w = input.shape if output_size is not None: out_h = float(output_size[0]) out_w = float(output_size[1]) elif scale_factors is not None: out_h = in_h * scale_factors[0] out_w = in_w * scale_factors[1] # Calculate horizontal and vertical scaling factor if out_h > 1: if align_corners: h_scale_factor = (in_h - 1) / (int(out_h) - 1) else: h_scale_factor = in_h / out_h else: h_scale_factor = 0.0 if out_w > 1: if align_corners: w_scale_factor = (in_w - 1) / (int(out_w) - 1) else: w_scale_factor = in_w / out_w else: w_scale_factor = 0.0 i = torch.arange(int(out_h), dtype=input.dtype, device=input.device) j = torch.arange(int(out_w), dtype=input.dtype, device=input.device) if align_corners: x = h_scale_factor * i y = w_scale_factor * j else: x = (h_scale_factor * (i + 0.5) - 0.5).clamp(min=0.0) y = (w_scale_factor * (j + 0.5) - 0.5).clamp(min=0.0) x_floor = torch.floor(x).to(torch.int64) x_ceil = torch.ceil(x).clamp(max=in_h - 1).to(torch.int64) y_floor = torch.floor(y).to(torch.int64) y_ceil = torch.ceil(y).clamp(max=in_w - 1).to(torch.int64) x_view = x.unsqueeze(1) x_floor_view = x_floor.unsqueeze(1) x_ceil_view = x_ceil.unsqueeze(1) v1 = input[:, :, x_floor_view, y_floor] v2 = input[:, :, x_ceil_view, y_floor] v3 = input[:, :, x_floor_view, y_ceil] v4 = input[:, :, x_ceil_view, y_ceil] xscale2 = x_view - x_floor_view xscale1 = 1.0 - xscale2 yscale2 = y - y_floor yscale1 = 1.0 - yscale2 q1 = torch.mul(v1, xscale1) + torch.mul(v2, xscale2) q2 = torch.mul(v3, xscale1) + torch.mul(v4, xscale2) result = torch.mul(q1, yscale1) + torch.mul(q2, yscale2) return result # We should be applying decompositions after all transformations @register_decomposition(aten.is_same_size.default) def is_same_size(a: Tensor, b: Tensor) -> bool: return a.shape == b.shape @register_decomposition(aten.nll_loss_forward) def nll_loss_forward( self: Tensor, target: Tensor, weight: Optional[Tensor], reduction: int, ignore_index: int, ) -> Tuple[Tensor, Tensor]: assert self.dim() > 0 and self.dim() <= 2, "input tensor should be 1D or 2D" assert ( target.dim() <= 1 ), "0D or 1D target tensor expected, multi-target not supported" no_batch_dim = self.dim() == 1 and target.dim() == 0 assert no_batch_dim or ( self.shape[0] == target.shape[0] ), f"size mismatch (got input: {self.shape}, target: {target.shape})" n_classes = self.shape[-1] assert weight is None or ( weight.dim() == 1 and weight.numel() == n_classes ), f"weight tensor should be defined either for all {n_classes} classes or no classes but got weight tensor of shape: {weight.shape}" # noqa: B950 # self can be [N, C] or [C] # target can be [N] or [] n_dims = self.dim() channel_dim = 1 if n_dims < 2: channel_dim = 0 if weight is not None: w = weight.unsqueeze(0) if n_dims > 1 else weight self = self * w target_ = target.unsqueeze(channel_dim) # target can be [N, 1] or [1] result = -torch.gather(self, channel_dim, target_).squeeze(channel_dim) if ignore_index >= 0: result = torch.where(target != ignore_index, result, 0) if reduction == Reduction.NONE.value and n_dims > 1: total_weight = self.new_full((), 0.0) return result, total_weight if weight is not None: w = weight.unsqueeze(0).expand(self.shape) if n_dims > 1 else weight wsum = torch.gather(w, channel_dim, target_).squeeze(channel_dim) if ignore_index >= 0: wsum = torch.where(target != ignore_index, wsum, 0) total_weight = wsum.sum() elif ignore_index >= 0: total_weight = (target != ignore_index).sum().to(self) else: total_weight = self.new_full((), 1.0 * result.numel()) if reduction == Reduction.SUM.value: result = result.sum() elif reduction == Reduction.MEAN.value: if weight is None: result = result.sum() / total_weight if ignore_index >= 0 else result.mean() else: result = result.sum() / total_weight return result, total_weight
pytorch-master
torch/_decomp/decompositions.py
import inspect from collections import defaultdict from functools import wraps from itertools import chain from typing import Callable, Dict, NamedTuple, Sequence, Tuple, Union import torch import torch._ops import torch.library from torch.utils._pytree import tree_map __all__ = ["decomposition_table", "register_decomposition", "get_decompositions"] # TODO: relax key type here; torch registrations should be possible to; but # right now this type is accurate decomposition_table: Dict[torch._ops.OpOverload, Callable] = {} meta_lib = torch.library.Library("aten", "IMPL", "Meta") def register_decomposition(aten_op, registry=None, *, disable_meta: bool = False): """ A decorator to register a function as a decomposition to the Python decomposition table. Use it like this:: @register_decomposition(torch.ops.aten.clamp_min) def clamp_min(x): return torch.clamp(self, min=min) If you are writing a new decomposition, consider contributing it directly to PyTorch in torch._decomp.decompositions. This API is experimental; we are almost certainly going to extend the API when we make decompositions eligible for use in transforms (e.g., autograd) and not just backend tracing, where we then need to know if a decomposition can be used to simulate a transform. By default, if the decomposition is for an operator that doesn't have a Meta implementation, we will register it to the dispatcher. Use `disable_meta` to disable this behavior. """ def decomposition_decorator(f: Callable) -> Callable: sig = inspect.signature(f) out_annotation = f.__annotations__.get("out") # Hack to detect when out is a Tuple. There seems to be no pretty way of doing this fn = f if out_annotation and getattr(out_annotation, "__origin__", None) is tuple: out_names = sig.return_annotation._fields # If out is a tuple, we need to register a function that unpacks all the out # elements as this is what native_functions.yaml expects @wraps(f) def _fn(*args, **kwargs): out_kwargs = tuple(kwargs.pop(o, None) for o in out_names) # Either all of the out kwargs are set or none of them is_none = out_kwargs[0] is None assert all((o is None) == is_none for o in out_kwargs) return f(*args, **kwargs, out=None if is_none else out_kwargs) out_params = [ inspect.Parameter( o, kind=inspect.Parameter.KEYWORD_ONLY, default=None, annotation=t, ) for o, t in zip(out_names, out_annotation.__args__) ] # Drop the out parameter and concatenate the new kwargs in the signature params = chain( (v for k, v in sig.parameters.items() if k != "out"), out_params ) _fn.__signature__ = inspect.Signature( # type: ignore[attr-defined] parameters=params, return_annotation=sig.return_annotation # type: ignore[arg-type] ) # Drop the out parameter and concatenate the new kwargs in the annotations _fn.__annotations__ = { k: v for k, v in f.__annotations__.items() if k != "out" } for o in out_params: _fn.__annotations__[o.name] = o.annotation fn = _fn nonlocal registry if registry is None: registry = decomposition_table def add_op_to_table(aten_op): overloads = [] if isinstance(aten_op, torch._ops.OpOverload): overloads.append(aten_op) else: assert isinstance(aten_op, torch._ops.OpOverloadPacket) for ol in aten_op.overloads(): overloads.append(getattr(aten_op, ol)) for op_overload in overloads: if op_overload in registry: raise RuntimeError(f"duplicate registrations for {op_overload}") registry[op_overload] = fn # TODO: factor this logic into OpOverload or Library API name = op_overload._schema.name if op_overload._schema.overload_name: name += "." + op_overload._schema.overload_name if ( not disable_meta # TorchScript dumps a bunch of extra nonsense overloads # which don't have corresponding dispatcher entries, we need # to filter those out and torch._C._dispatch_has_kernel(name) # Don't register a python meta kernel to any operator that has # should already work with meta tensors today. # We can check that by seeing if the "computed table" for the operator # has a registration to Meta; # either through a direct registration, or an indirect one through # an alias dispatch key (e.g. CompositeImplicitAutograd) and not torch._C._dispatch_has_computed_kernel_for_dispatch_key( name, "Meta" ) ): if any( a.alias_info is not None and not a.alias_info.is_write for a in op_overload._schema.arguments ): raise RuntimeError( f""" Attempting to register a python meta kernel for a view operator: {str(op_overload)}. We shouldn't do this, because the output will report as not having aliased storages. All view ops have meta kernels in C++ today, so we should use those instead. If you're registering an operator through the `@register_decomposition` decorator, Please set `disable_meta=True`. """ ) meta_lib.impl(op_overload, fn) # To handle allowing multiple aten_ops at once tree_map(add_op_to_table, aten_op) return fn return decomposition_decorator def get_decompositions( aten_ops: Sequence[Union[torch._ops.OpOverload, torch._ops.OpOverloadPacket]] ) -> Dict[torch._ops.OpOverload, Callable]: """ Retrieve a dictionary of decompositions corresponding to the list of operator overloads and overload packets passed as input. Overload packets will include all decomposed overloads in the packet. If there is no decomposition for a requested operator, it is silently ignored. This API is experimental; we are almost certainly going to give an alternate, more recommended formulation, where a user provides the set of operators they know how to implement, and we provide decompositions for everything not in this set. """ packets_to_overloads = defaultdict(list) for opo in decomposition_table: packets_to_overloads[opo.overloadpacket].append(opo) decompositions = {} for op in aten_ops: if isinstance(op, torch._ops.OpOverloadPacket) and op in packets_to_overloads: for op_overload in packets_to_overloads[op]: decompositions[op_overload] = decomposition_table[op_overload] elif isinstance(op, torch._ops.OpOverload) and op in decomposition_table: decompositions[op] = decomposition_table[op] return decompositions # populate the table import torch._decomp.decompositions import torch._refs
pytorch-master
torch/_decomp/__init__.py
import types import math from torch._six import inf from functools import wraps import warnings import weakref from collections import Counter from bisect import bisect_right from .optimizer import Optimizer __all__ = ['LambdaLR', 'MultiplicativeLR', 'StepLR', 'MultiStepLR', 'ConstantLR', 'LinearLR', 'ExponentialLR', 'SequentialLR', 'CosineAnnealingLR', 'ChainedScheduler', 'ReduceLROnPlateau', 'CyclicLR', 'CosineAnnealingWarmRestarts', 'OneCycleLR', 'PolynomialLR'] EPOCH_DEPRECATION_WARNING = ( "The epoch parameter in `scheduler.step()` was not necessary and is being " "deprecated where possible. Please use `scheduler.step()` to step the " "scheduler. During the deprecation, if epoch is different from None, the " "closed form is used instead of the new chainable form, where available. " "Please open an issue if you are unable to replicate your use case: " "https://github.com/pytorch/pytorch/issues/new/choose." ) class _LRScheduler(object): def __init__(self, optimizer, last_epoch=-1, verbose=False): # Attach optimizer if not isinstance(optimizer, Optimizer): raise TypeError('{} is not an Optimizer'.format( type(optimizer).__name__)) self.optimizer = optimizer # Initialize epoch and base learning rates if last_epoch == -1: for group in optimizer.param_groups: group.setdefault('initial_lr', group['lr']) else: for i, group in enumerate(optimizer.param_groups): if 'initial_lr' not in group: raise KeyError("param 'initial_lr' is not specified " "in param_groups[{}] when resuming an optimizer".format(i)) self.base_lrs = [group['initial_lr'] for group in optimizer.param_groups] self.last_epoch = last_epoch # Following https://github.com/pytorch/pytorch/issues/20124 # We would like to ensure that `lr_scheduler.step()` is called after # `optimizer.step()` def with_counter(method): if getattr(method, '_with_counter', False): # `optimizer.step()` has already been replaced, return. return method # Keep a weak reference to the optimizer instance to prevent # cyclic references. instance_ref = weakref.ref(method.__self__) # Get the unbound method for the same purpose. func = method.__func__ cls = instance_ref().__class__ del method @wraps(func) def wrapper(*args, **kwargs): instance = instance_ref() instance._step_count += 1 wrapped = func.__get__(instance, cls) return wrapped(*args, **kwargs) # Note that the returned function here is no longer a bound method, # so attributes like `__func__` and `__self__` no longer exist. wrapper._with_counter = True return wrapper self.optimizer.step = with_counter(self.optimizer.step) self.verbose = verbose self._initial_step() def _initial_step(self): """Initialize step counts and performs a step""" self.optimizer._step_count = 0 self._step_count = 0 self.step() def state_dict(self): """Returns the state of the scheduler as a :class:`dict`. It contains an entry for every variable in self.__dict__ which is not the optimizer. """ return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} def load_state_dict(self, state_dict): """Loads the schedulers state. Args: state_dict (dict): scheduler state. Should be an object returned from a call to :meth:`state_dict`. """ self.__dict__.update(state_dict) def get_last_lr(self): """ Return last computed learning rate by current scheduler. """ return self._last_lr def get_lr(self): # Compute learning rate using chainable form of the scheduler raise NotImplementedError def print_lr(self, is_verbose, group, lr, epoch=None): """Display the current learning rate. """ if is_verbose: if epoch is None: print('Adjusting learning rate' ' of group {} to {:.4e}.'.format(group, lr)) else: epoch_str = ("%.2f" if isinstance(epoch, float) else "%.5d") % epoch print('Epoch {}: adjusting learning rate' ' of group {} to {:.4e}.'.format(epoch_str, group, lr)) def step(self, epoch=None): # Raise a warning if old pattern is detected # https://github.com/pytorch/pytorch/issues/20124 if self._step_count == 1: if not hasattr(self.optimizer.step, "_with_counter"): warnings.warn("Seems like `optimizer.step()` has been overridden after learning rate scheduler " "initialization. Please, make sure to call `optimizer.step()` before " "`lr_scheduler.step()`. See more details at " "https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning) # Just check if there were two first lr_scheduler.step() calls before optimizer.step() elif self.optimizer._step_count < 1: warnings.warn("Detected call of `lr_scheduler.step()` before `optimizer.step()`. " "In PyTorch 1.1.0 and later, you should call them in the opposite order: " "`optimizer.step()` before `lr_scheduler.step()`. Failure to do this " "will result in PyTorch skipping the first value of the learning rate schedule. " "See more details at " "https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning) self._step_count += 1 class _enable_get_lr_call: def __init__(self, o): self.o = o def __enter__(self): self.o._get_lr_called_within_step = True return self def __exit__(self, type, value, traceback): self.o._get_lr_called_within_step = False with _enable_get_lr_call(self): if epoch is None: self.last_epoch += 1 values = self.get_lr() else: warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning) self.last_epoch = epoch if hasattr(self, "_get_closed_form_lr"): values = self._get_closed_form_lr() else: values = self.get_lr() for i, data in enumerate(zip(self.optimizer.param_groups, values)): param_group, lr = data param_group['lr'] = lr self.print_lr(self.verbose, i, lr, epoch) self._last_lr = [group['lr'] for group in self.optimizer.param_groups] class LambdaLR(_LRScheduler): """Sets the learning rate of each parameter group to the initial lr times a given function. When last_epoch=-1, sets initial lr as lr. Args: optimizer (Optimizer): Wrapped optimizer. lr_lambda (function or list): A function which computes a multiplicative factor given an integer parameter epoch, or a list of such functions, one for each group in optimizer.param_groups. last_epoch (int): The index of last epoch. Default: -1. verbose (bool): If ``True``, prints a message to stdout for each update. Default: ``False``. Example: >>> # Assuming optimizer has two groups. >>> lambda1 = lambda epoch: epoch // 30 >>> lambda2 = lambda epoch: 0.95 ** epoch >>> # xdoctest: +SKIP >>> scheduler = LambdaLR(optimizer, lr_lambda=[lambda1, lambda2]) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step() """ def __init__(self, optimizer, lr_lambda, last_epoch=-1, verbose=False): self.optimizer = optimizer if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple): self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups) else: if len(lr_lambda) != len(optimizer.param_groups): raise ValueError("Expected {} lr_lambdas, but got {}".format( len(optimizer.param_groups), len(lr_lambda))) self.lr_lambdas = list(lr_lambda) super(LambdaLR, self).__init__(optimizer, last_epoch, verbose) def state_dict(self): """Returns the state of the scheduler as a :class:`dict`. It contains an entry for every variable in self.__dict__ which is not the optimizer. The learning rate lambda functions will only be saved if they are callable objects and not if they are functions or lambdas. When saving or loading the scheduler, please make sure to also save or load the state of the optimizer. """ state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', 'lr_lambdas')} state_dict['lr_lambdas'] = [None] * len(self.lr_lambdas) for idx, fn in enumerate(self.lr_lambdas): if not isinstance(fn, types.FunctionType): state_dict['lr_lambdas'][idx] = fn.__dict__.copy() return state_dict def load_state_dict(self, state_dict): """Loads the schedulers state. When saving or loading the scheduler, please make sure to also save or load the state of the optimizer. Args: state_dict (dict): scheduler state. Should be an object returned from a call to :meth:`state_dict`. """ lr_lambdas = state_dict.pop('lr_lambdas') self.__dict__.update(state_dict) # Restore state_dict keys in order to prevent side effects # https://github.com/pytorch/pytorch/issues/32756 state_dict['lr_lambdas'] = lr_lambdas for idx, fn in enumerate(lr_lambdas): if fn is not None: self.lr_lambdas[idx].__dict__.update(fn) def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.") return [base_lr * lmbda(self.last_epoch) for lmbda, base_lr in zip(self.lr_lambdas, self.base_lrs)] class MultiplicativeLR(_LRScheduler): """Multiply the learning rate of each parameter group by the factor given in the specified function. When last_epoch=-1, sets initial lr as lr. Args: optimizer (Optimizer): Wrapped optimizer. lr_lambda (function or list): A function which computes a multiplicative factor given an integer parameter epoch, or a list of such functions, one for each group in optimizer.param_groups. last_epoch (int): The index of last epoch. Default: -1. verbose (bool): If ``True``, prints a message to stdout for each update. Default: ``False``. Example: >>> lmbda = lambda epoch: 0.95 >>> # xdoctest: +SKIP >>> scheduler = MultiplicativeLR(optimizer, lr_lambda=lmbda) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step() """ def __init__(self, optimizer, lr_lambda, last_epoch=-1, verbose=False): self.optimizer = optimizer if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple): self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups) else: if len(lr_lambda) != len(optimizer.param_groups): raise ValueError("Expected {} lr_lambdas, but got {}".format( len(optimizer.param_groups), len(lr_lambda))) self.lr_lambdas = list(lr_lambda) super(MultiplicativeLR, self).__init__(optimizer, last_epoch, verbose) def state_dict(self): """Returns the state of the scheduler as a :class:`dict`. It contains an entry for every variable in self.__dict__ which is not the optimizer. The learning rate lambda functions will only be saved if they are callable objects and not if they are functions or lambdas. """ state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', 'lr_lambdas')} state_dict['lr_lambdas'] = [None] * len(self.lr_lambdas) for idx, fn in enumerate(self.lr_lambdas): if not isinstance(fn, types.FunctionType): state_dict['lr_lambdas'][idx] = fn.__dict__.copy() return state_dict def load_state_dict(self, state_dict): """Loads the schedulers state. Args: state_dict (dict): scheduler state. Should be an object returned from a call to :meth:`state_dict`. """ lr_lambdas = state_dict.pop('lr_lambdas') self.__dict__.update(state_dict) # Restore state_dict keys in order to prevent side effects # https://github.com/pytorch/pytorch/issues/32756 state_dict['lr_lambdas'] = lr_lambdas for idx, fn in enumerate(lr_lambdas): if fn is not None: self.lr_lambdas[idx].__dict__.update(fn) def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning) if self.last_epoch > 0: return [group['lr'] * lmbda(self.last_epoch) for lmbda, group in zip(self.lr_lambdas, self.optimizer.param_groups)] else: return [group['lr'] for group in self.optimizer.param_groups] class StepLR(_LRScheduler): """Decays the learning rate of each parameter group by gamma every step_size epochs. Notice that such decay can happen simultaneously with other changes to the learning rate from outside this scheduler. When last_epoch=-1, sets initial lr as lr. Args: optimizer (Optimizer): Wrapped optimizer. step_size (int): Period of learning rate decay. gamma (float): Multiplicative factor of learning rate decay. Default: 0.1. last_epoch (int): The index of last epoch. Default: -1. verbose (bool): If ``True``, prints a message to stdout for each update. Default: ``False``. Example: >>> # Assuming optimizer uses lr = 0.05 for all groups >>> # lr = 0.05 if epoch < 30 >>> # lr = 0.005 if 30 <= epoch < 60 >>> # lr = 0.0005 if 60 <= epoch < 90 >>> # ... >>> # xdoctest: +SKIP >>> scheduler = StepLR(optimizer, step_size=30, gamma=0.1) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step() """ def __init__(self, optimizer, step_size, gamma=0.1, last_epoch=-1, verbose=False): self.step_size = step_size self.gamma = gamma super(StepLR, self).__init__(optimizer, last_epoch, verbose) def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning) if (self.last_epoch == 0) or (self.last_epoch % self.step_size != 0): return [group['lr'] for group in self.optimizer.param_groups] return [group['lr'] * self.gamma for group in self.optimizer.param_groups] def _get_closed_form_lr(self): return [base_lr * self.gamma ** (self.last_epoch // self.step_size) for base_lr in self.base_lrs] class MultiStepLR(_LRScheduler): """Decays the learning rate of each parameter group by gamma once the number of epoch reaches one of the milestones. Notice that such decay can happen simultaneously with other changes to the learning rate from outside this scheduler. When last_epoch=-1, sets initial lr as lr. Args: optimizer (Optimizer): Wrapped optimizer. milestones (list): List of epoch indices. Must be increasing. gamma (float): Multiplicative factor of learning rate decay. Default: 0.1. last_epoch (int): The index of last epoch. Default: -1. verbose (bool): If ``True``, prints a message to stdout for each update. Default: ``False``. Example: >>> # Assuming optimizer uses lr = 0.05 for all groups >>> # lr = 0.05 if epoch < 30 >>> # lr = 0.005 if 30 <= epoch < 80 >>> # lr = 0.0005 if epoch >= 80 >>> # xdoctest: +SKIP >>> scheduler = MultiStepLR(optimizer, milestones=[30,80], gamma=0.1) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step() """ def __init__(self, optimizer, milestones, gamma=0.1, last_epoch=-1, verbose=False): self.milestones = Counter(milestones) self.gamma = gamma super(MultiStepLR, self).__init__(optimizer, last_epoch, verbose) def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning) if self.last_epoch not in self.milestones: return [group['lr'] for group in self.optimizer.param_groups] return [group['lr'] * self.gamma ** self.milestones[self.last_epoch] for group in self.optimizer.param_groups] def _get_closed_form_lr(self): milestones = list(sorted(self.milestones.elements())) return [base_lr * self.gamma ** bisect_right(milestones, self.last_epoch) for base_lr in self.base_lrs] class ConstantLR(_LRScheduler): """Decays the learning rate of each parameter group by a small constant factor until the number of epoch reaches a pre-defined milestone: total_iters. Notice that such decay can happen simultaneously with other changes to the learning rate from outside this scheduler. When last_epoch=-1, sets initial lr as lr. Args: optimizer (Optimizer): Wrapped optimizer. factor (float): The number we multiply learning rate until the milestone. Default: 1./3. total_iters (int): The number of steps that the scheduler decays the learning rate. Default: 5. last_epoch (int): The index of the last epoch. Default: -1. verbose (bool): If ``True``, prints a message to stdout for each update. Default: ``False``. Example: >>> # Assuming optimizer uses lr = 0.05 for all groups >>> # lr = 0.025 if epoch == 0 >>> # lr = 0.025 if epoch == 1 >>> # lr = 0.025 if epoch == 2 >>> # lr = 0.025 if epoch == 3 >>> # lr = 0.05 if epoch >= 4 >>> # xdoctest: +SKIP >>> scheduler = ConstantLR(self.opt, factor=0.5, total_iters=4) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step() """ def __init__(self, optimizer, factor=1.0 / 3, total_iters=5, last_epoch=-1, verbose=False): if factor > 1.0 or factor < 0: raise ValueError('Constant multiplicative factor expected to be between 0 and 1.') self.factor = factor self.total_iters = total_iters super(ConstantLR, self).__init__(optimizer, last_epoch, verbose) def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning) if self.last_epoch == 0: return [group['lr'] * self.factor for group in self.optimizer.param_groups] if (self.last_epoch > self.total_iters or (self.last_epoch != self.total_iters)): return [group['lr'] for group in self.optimizer.param_groups] if (self.last_epoch == self.total_iters): return [group['lr'] * (1.0 / self.factor) for group in self.optimizer.param_groups] def _get_closed_form_lr(self): return [base_lr * (self.factor + (self.last_epoch >= self.total_iters) * (1 - self.factor)) for base_lr in self.base_lrs] class LinearLR(_LRScheduler): """Decays the learning rate of each parameter group by linearly changing small multiplicative factor until the number of epoch reaches a pre-defined milestone: total_iters. Notice that such decay can happen simultaneously with other changes to the learning rate from outside this scheduler. When last_epoch=-1, sets initial lr as lr. Args: optimizer (Optimizer): Wrapped optimizer. start_factor (float): The number we multiply learning rate in the first epoch. The multiplication factor changes towards end_factor in the following epochs. Default: 1./3. end_factor (float): The number we multiply learning rate at the end of linear changing process. Default: 1.0. total_iters (int): The number of iterations that multiplicative factor reaches to 1. Default: 5. last_epoch (int): The index of the last epoch. Default: -1. verbose (bool): If ``True``, prints a message to stdout for each update. Default: ``False``. Example: >>> # Assuming optimizer uses lr = 0.05 for all groups >>> # lr = 0.025 if epoch == 0 >>> # lr = 0.03125 if epoch == 1 >>> # lr = 0.0375 if epoch == 2 >>> # lr = 0.04375 if epoch == 3 >>> # lr = 0.05 if epoch >= 4 >>> # xdoctest: +SKIP >>> scheduler = LinearLR(self.opt, start_factor=0.5, total_iters=4) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step() """ def __init__(self, optimizer, start_factor=1.0 / 3, end_factor=1.0, total_iters=5, last_epoch=-1, verbose=False): if start_factor > 1.0 or start_factor < 0: raise ValueError('Starting multiplicative factor expected to be between 0 and 1.') if end_factor > 1.0 or end_factor < 0: raise ValueError('Ending multiplicative factor expected to be between 0 and 1.') self.start_factor = start_factor self.end_factor = end_factor self.total_iters = total_iters super(LinearLR, self).__init__(optimizer, last_epoch, verbose) def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning) if self.last_epoch == 0: return [group['lr'] * self.start_factor for group in self.optimizer.param_groups] if (self.last_epoch > self.total_iters): return [group['lr'] for group in self.optimizer.param_groups] return [group['lr'] * (1. + (self.end_factor - self.start_factor) / (self.total_iters * self.start_factor + (self.last_epoch - 1) * (self.end_factor - self.start_factor))) for group in self.optimizer.param_groups] def _get_closed_form_lr(self): return [base_lr * (self.start_factor + (self.end_factor - self.start_factor) * min(self.total_iters, self.last_epoch) / self.total_iters) for base_lr in self.base_lrs] class ExponentialLR(_LRScheduler): """Decays the learning rate of each parameter group by gamma every epoch. When last_epoch=-1, sets initial lr as lr. Args: optimizer (Optimizer): Wrapped optimizer. gamma (float): Multiplicative factor of learning rate decay. last_epoch (int): The index of last epoch. Default: -1. verbose (bool): If ``True``, prints a message to stdout for each update. Default: ``False``. """ def __init__(self, optimizer, gamma, last_epoch=-1, verbose=False): self.gamma = gamma super(ExponentialLR, self).__init__(optimizer, last_epoch, verbose) def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning) if self.last_epoch == 0: return [group['lr'] for group in self.optimizer.param_groups] return [group['lr'] * self.gamma for group in self.optimizer.param_groups] def _get_closed_form_lr(self): return [base_lr * self.gamma ** self.last_epoch for base_lr in self.base_lrs] class SequentialLR(_LRScheduler): """Receives the list of schedulers that is expected to be called sequentially during optimization process and milestone points that provides exact intervals to reflect which scheduler is supposed to be called at a given epoch. Args: optimizer (Optimizer): Wrapped optimizer. schedulers (list): List of chained schedulers. milestones (list): List of integers that reflects milestone points. last_epoch (int): The index of last epoch. Default: -1. verbose (bool): Does nothing. Example: >>> # Assuming optimizer uses lr = 1. for all groups >>> # lr = 0.1 if epoch == 0 >>> # lr = 0.1 if epoch == 1 >>> # lr = 0.9 if epoch == 2 >>> # lr = 0.81 if epoch == 3 >>> # lr = 0.729 if epoch == 4 >>> # xdoctest: +SKIP >>> scheduler1 = ConstantLR(self.opt, factor=0.1, total_iters=2) >>> scheduler2 = ExponentialLR(self.opt, gamma=0.9) >>> scheduler = SequentialLR(self.opt, schedulers=[scheduler1, scheduler2], milestones=[2]) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step() """ def __init__(self, optimizer, schedulers, milestones, last_epoch=-1, verbose=False): for scheduler_idx in range(len(schedulers)): if schedulers[scheduler_idx].optimizer != optimizer: raise ValueError( "Sequential Schedulers expects all schedulers to belong to the same optimizer, but " f"got schedulers at index {scheduler_idx} to be different than the optimizer passed in." ) if (schedulers[scheduler_idx].optimizer != schedulers[0].optimizer): raise ValueError( "Sequential Schedulers expects all schedulers to belong to the same optimizer, but " f"got schedulers at index {0} and {scheduler_idx} to be different." ) if (len(milestones) != len(schedulers) - 1): raise ValueError( "Sequential Schedulers expects number of schedulers provided to be one more " "than the number of milestone points, but got number of schedulers {} and the " "number of milestones to be equal to {}".format(len(schedulers), len(milestones)) ) self._schedulers = schedulers self._milestones = milestones self.last_epoch = last_epoch + 1 self.optimizer = optimizer # Reset learning rates back to initial values for group in self.optimizer.param_groups: group["lr"] = group["initial_lr"] # "Undo" the step performed by other schedulers for scheduler in self._schedulers: scheduler.last_epoch -= 1 # Perform the initial step for only the first scheduler self._schedulers[0]._initial_step() self._last_lr = schedulers[0].get_last_lr() def step(self): self.last_epoch += 1 idx = bisect_right(self._milestones, self.last_epoch) scheduler = self._schedulers[idx] if idx > 0 and self._milestones[idx - 1] == self.last_epoch: scheduler.step(0) else: scheduler.step() self._last_lr = scheduler.get_last_lr() def state_dict(self): """Returns the state of the scheduler as a :class:`dict`. It contains an entry for every variable in self.__dict__ which is not the optimizer. The wrapped scheduler states will also be saved. """ state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', '_schedulers')} state_dict['_schedulers'] = [None] * len(self._schedulers) for idx, s in enumerate(self._schedulers): state_dict['_schedulers'][idx] = s.state_dict() return state_dict def load_state_dict(self, state_dict): """Loads the schedulers state. Args: state_dict (dict): scheduler state. Should be an object returned from a call to :meth:`state_dict`. """ _schedulers = state_dict.pop('_schedulers') self.__dict__.update(state_dict) # Restore state_dict keys in order to prevent side effects # https://github.com/pytorch/pytorch/issues/32756 state_dict['_schedulers'] = _schedulers for idx, s in enumerate(_schedulers): self._schedulers[idx].load_state_dict(s) class PolynomialLR(_LRScheduler): """Decays the learning rate of each parameter group using a polynomial function in the given total_iters. When last_epoch=-1, sets initial lr as lr. Args: optimizer (Optimizer): Wrapped optimizer. total_iters (int): The number of steps that the scheduler decays the learning rate. Default: 5. power (int): The power of the polynomial. Default: 1.0. verbose (bool): If ``True``, prints a message to stdout for each update. Default: ``False``. Example: >>> # Assuming optimizer uses lr = 0.001 for all groups >>> # lr = 0.001 if epoch == 0 >>> # lr = 0.00075 if epoch == 1 >>> # lr = 0.00050 if epoch == 2 >>> # lr = 0.00025 if epoch == 3 >>> # lr = 0.0 if epoch >= 4 >>> scheduler = PolynomialLR(self.opt, total_iters=4, power=1.0) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step() """ def __init__(self, optimizer, total_iters=5, power=1.0, last_epoch=-1, verbose=False): self.total_iters = total_iters self.power = power super().__init__(optimizer, last_epoch, verbose) def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning) if self.last_epoch == 0 or self.last_epoch > self.total_iters: return [group["lr"] for group in self.optimizer.param_groups] decay_factor = ((1.0 - self.last_epoch / self.total_iters) / (1.0 - (self.last_epoch - 1) / self.total_iters)) ** self.power return [group["lr"] * decay_factor for group in self.optimizer.param_groups] def _get_closed_form_lr(self): return [ ( base_lr * (1.0 - min(self.total_iters, self.last_epoch) / self.total_iters) ** self.power ) for base_lr in self.base_lrs ] class CosineAnnealingLR(_LRScheduler): r"""Set the learning rate of each parameter group using a cosine annealing schedule, where :math:`\eta_{max}` is set to the initial lr and :math:`T_{cur}` is the number of epochs since the last restart in SGDR: .. math:: \begin{aligned} \eta_t & = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right), & T_{cur} \neq (2k+1)T_{max}; \\ \eta_{t+1} & = \eta_{t} + \frac{1}{2}(\eta_{max} - \eta_{min}) \left(1 - \cos\left(\frac{1}{T_{max}}\pi\right)\right), & T_{cur} = (2k+1)T_{max}. \end{aligned} When last_epoch=-1, sets initial lr as lr. Notice that because the schedule is defined recursively, the learning rate can be simultaneously modified outside this scheduler by other operators. If the learning rate is set solely by this scheduler, the learning rate at each step becomes: .. math:: \eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + \cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right) It has been proposed in `SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only implements the cosine annealing part of SGDR, and not the restarts. Args: optimizer (Optimizer): Wrapped optimizer. T_max (int): Maximum number of iterations. eta_min (float): Minimum learning rate. Default: 0. last_epoch (int): The index of last epoch. Default: -1. verbose (bool): If ``True``, prints a message to stdout for each update. Default: ``False``. .. _SGDR\: Stochastic Gradient Descent with Warm Restarts: https://arxiv.org/abs/1608.03983 """ def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1, verbose=False): self.T_max = T_max self.eta_min = eta_min super(CosineAnnealingLR, self).__init__(optimizer, last_epoch, verbose) def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning) if self.last_epoch == 0: return [group['lr'] for group in self.optimizer.param_groups] elif self._step_count == 1 and self.last_epoch > 0: return [self.eta_min + (base_lr - self.eta_min) * (1 + math.cos((self.last_epoch) * math.pi / self.T_max)) / 2 for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)] elif (self.last_epoch - 1 - self.T_max) % (2 * self.T_max) == 0: return [group['lr'] + (base_lr - self.eta_min) * (1 - math.cos(math.pi / self.T_max)) / 2 for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)] return [(1 + math.cos(math.pi * self.last_epoch / self.T_max)) / (1 + math.cos(math.pi * (self.last_epoch - 1) / self.T_max)) * (group['lr'] - self.eta_min) + self.eta_min for group in self.optimizer.param_groups] def _get_closed_form_lr(self): return [self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * self.last_epoch / self.T_max)) / 2 for base_lr in self.base_lrs] class ChainedScheduler(_LRScheduler): """Chains list of learning rate schedulers. It takes a list of chainable learning rate schedulers and performs consecutive step() functions belonging to them by just one call. Args: schedulers (list): List of chained schedulers. Example: >>> # Assuming optimizer uses lr = 1. for all groups >>> # lr = 0.09 if epoch == 0 >>> # lr = 0.081 if epoch == 1 >>> # lr = 0.729 if epoch == 2 >>> # lr = 0.6561 if epoch == 3 >>> # lr = 0.59049 if epoch >= 4 >>> # xdoctest: +SKIP >>> scheduler1 = ConstantLR(self.opt, factor=0.1, total_iters=2) >>> scheduler2 = ExponentialLR(self.opt, gamma=0.9) >>> scheduler = ChainedScheduler([scheduler1, scheduler2]) >>> for epoch in range(100): >>> train(...) >>> validate(...) >>> scheduler.step() """ def __init__(self, schedulers): for scheduler_idx in range(1, len(schedulers)): if (schedulers[scheduler_idx].optimizer != schedulers[0].optimizer): raise ValueError( "ChainedScheduler expects all schedulers to belong to the same optimizer, but " "got schedulers at index {} and {} to be different".format(0, scheduler_idx) ) self._schedulers = list(schedulers) self.optimizer = schedulers[0].optimizer self._last_lr = [group['lr'] for group in self._schedulers[-1].optimizer.param_groups] def step(self): for scheduler in self._schedulers: scheduler.step() self._last_lr = [group['lr'] for group in self._schedulers[-1].optimizer.param_groups] def state_dict(self): """Returns the state of the scheduler as a :class:`dict`. It contains an entry for every variable in self.__dict__ which is not the optimizer. The wrapped scheduler states will also be saved. """ state_dict = {key: value for key, value in self.__dict__.items() if key not in ('optimizer', '_schedulers')} state_dict['_schedulers'] = [None] * len(self._schedulers) for idx, s in enumerate(self._schedulers): state_dict['_schedulers'][idx] = s.state_dict() return state_dict def load_state_dict(self, state_dict): """Loads the schedulers state. Args: state_dict (dict): scheduler state. Should be an object returned from a call to :meth:`state_dict`. """ _schedulers = state_dict.pop('_schedulers') self.__dict__.update(state_dict) # Restore state_dict keys in order to prevent side effects # https://github.com/pytorch/pytorch/issues/32756 state_dict['_schedulers'] = _schedulers for idx, s in enumerate(_schedulers): self._schedulers[idx].load_state_dict(s) class ReduceLROnPlateau(object): """Reduce learning rate when a metric has stopped improving. Models often benefit from reducing the learning rate by a factor of 2-10 once learning stagnates. This scheduler reads a metrics quantity and if no improvement is seen for a 'patience' number of epochs, the learning rate is reduced. Args: optimizer (Optimizer): Wrapped optimizer. mode (str): One of `min`, `max`. In `min` mode, lr will be reduced when the quantity monitored has stopped decreasing; in `max` mode it will be reduced when the quantity monitored has stopped increasing. Default: 'min'. factor (float): Factor by which the learning rate will be reduced. new_lr = lr * factor. Default: 0.1. patience (int): Number of epochs with no improvement after which learning rate will be reduced. For example, if `patience = 2`, then we will ignore the first 2 epochs with no improvement, and will only decrease the LR after the 3rd epoch if the loss still hasn't improved then. Default: 10. threshold (float): Threshold for measuring the new optimum, to only focus on significant changes. Default: 1e-4. threshold_mode (str): One of `rel`, `abs`. In `rel` mode, dynamic_threshold = best * ( 1 + threshold ) in 'max' mode or best * ( 1 - threshold ) in `min` mode. In `abs` mode, dynamic_threshold = best + threshold in `max` mode or best - threshold in `min` mode. Default: 'rel'. cooldown (int): Number of epochs to wait before resuming normal operation after lr has been reduced. Default: 0. min_lr (float or list): A scalar or a list of scalars. A lower bound on the learning rate of all param groups or each group respectively. Default: 0. eps (float): Minimal decay applied to lr. If the difference between new and old lr is smaller than eps, the update is ignored. Default: 1e-8. verbose (bool): If ``True``, prints a message to stdout for each update. Default: ``False``. Example: >>> # xdoctest: +SKIP >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) >>> scheduler = ReduceLROnPlateau(optimizer, 'min') >>> for epoch in range(10): >>> train(...) >>> val_loss = validate(...) >>> # Note that step should be called after validate() >>> scheduler.step(val_loss) """ def __init__(self, optimizer, mode='min', factor=0.1, patience=10, threshold=1e-4, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-8, verbose=False): if factor >= 1.0: raise ValueError('Factor should be < 1.0.') self.factor = factor # Attach optimizer if not isinstance(optimizer, Optimizer): raise TypeError('{} is not an Optimizer'.format( type(optimizer).__name__)) self.optimizer = optimizer if isinstance(min_lr, list) or isinstance(min_lr, tuple): if len(min_lr) != len(optimizer.param_groups): raise ValueError("expected {} min_lrs, got {}".format( len(optimizer.param_groups), len(min_lr))) self.min_lrs = list(min_lr) else: self.min_lrs = [min_lr] * len(optimizer.param_groups) self.patience = patience self.verbose = verbose self.cooldown = cooldown self.cooldown_counter = 0 self.mode = mode self.threshold = threshold self.threshold_mode = threshold_mode self.best = None self.num_bad_epochs = None self.mode_worse = None # the worse value for the chosen mode self.eps = eps self.last_epoch = 0 self._init_is_better(mode=mode, threshold=threshold, threshold_mode=threshold_mode) self._reset() def _reset(self): """Resets num_bad_epochs counter and cooldown counter.""" self.best = self.mode_worse self.cooldown_counter = 0 self.num_bad_epochs = 0 def step(self, metrics, epoch=None): # convert `metrics` to float, in case it's a zero-dim Tensor current = float(metrics) if epoch is None: epoch = self.last_epoch + 1 else: warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning) self.last_epoch = epoch if self.is_better(current, self.best): self.best = current self.num_bad_epochs = 0 else: self.num_bad_epochs += 1 if self.in_cooldown: self.cooldown_counter -= 1 self.num_bad_epochs = 0 # ignore any bad epochs in cooldown if self.num_bad_epochs > self.patience: self._reduce_lr(epoch) self.cooldown_counter = self.cooldown self.num_bad_epochs = 0 self._last_lr = [group['lr'] for group in self.optimizer.param_groups] def _reduce_lr(self, epoch): for i, param_group in enumerate(self.optimizer.param_groups): old_lr = float(param_group['lr']) new_lr = max(old_lr * self.factor, self.min_lrs[i]) if old_lr - new_lr > self.eps: param_group['lr'] = new_lr if self.verbose: epoch_str = ("%.2f" if isinstance(epoch, float) else "%.5d") % epoch print('Epoch {}: reducing learning rate' ' of group {} to {:.4e}.'.format(epoch_str, i, new_lr)) @property def in_cooldown(self): return self.cooldown_counter > 0 def is_better(self, a, best): if self.mode == 'min' and self.threshold_mode == 'rel': rel_epsilon = 1. - self.threshold return a < best * rel_epsilon elif self.mode == 'min' and self.threshold_mode == 'abs': return a < best - self.threshold elif self.mode == 'max' and self.threshold_mode == 'rel': rel_epsilon = self.threshold + 1. return a > best * rel_epsilon else: # mode == 'max' and epsilon_mode == 'abs': return a > best + self.threshold def _init_is_better(self, mode, threshold, threshold_mode): if mode not in {'min', 'max'}: raise ValueError('mode ' + mode + ' is unknown!') if threshold_mode not in {'rel', 'abs'}: raise ValueError('threshold mode ' + threshold_mode + ' is unknown!') if mode == 'min': self.mode_worse = inf else: # mode == 'max': self.mode_worse = -inf self.mode = mode self.threshold = threshold self.threshold_mode = threshold_mode def state_dict(self): return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} def load_state_dict(self, state_dict): self.__dict__.update(state_dict) self._init_is_better(mode=self.mode, threshold=self.threshold, threshold_mode=self.threshold_mode) class CyclicLR(_LRScheduler): r"""Sets the learning rate of each parameter group according to cyclical learning rate policy (CLR). The policy cycles the learning rate between two boundaries with a constant frequency, as detailed in the paper `Cyclical Learning Rates for Training Neural Networks`_. The distance between the two boundaries can be scaled on a per-iteration or per-cycle basis. Cyclical learning rate policy changes the learning rate after every batch. `step` should be called after a batch has been used for training. This class has three built-in policies, as put forth in the paper: * "triangular": A basic triangular cycle without amplitude scaling. * "triangular2": A basic triangular cycle that scales initial amplitude by half each cycle. * "exp_range": A cycle that scales initial amplitude by :math:`\text{gamma}^{\text{cycle iterations}}` at each cycle iteration. This implementation was adapted from the github repo: `bckenstler/CLR`_ Args: optimizer (Optimizer): Wrapped optimizer. base_lr (float or list): Initial learning rate which is the lower boundary in the cycle for each parameter group. max_lr (float or list): Upper learning rate boundaries in the cycle for each parameter group. Functionally, it defines the cycle amplitude (max_lr - base_lr). The lr at any cycle is the sum of base_lr and some scaling of the amplitude; therefore max_lr may not actually be reached depending on scaling function. step_size_up (int): Number of training iterations in the increasing half of a cycle. Default: 2000 step_size_down (int): Number of training iterations in the decreasing half of a cycle. If step_size_down is None, it is set to step_size_up. Default: None mode (str): One of {triangular, triangular2, exp_range}. Values correspond to policies detailed above. If scale_fn is not None, this argument is ignored. Default: 'triangular' gamma (float): Constant in 'exp_range' scaling function: gamma**(cycle iterations) Default: 1.0 scale_fn (function): Custom scaling policy defined by a single argument lambda function, where 0 <= scale_fn(x) <= 1 for all x >= 0. If specified, then 'mode' is ignored. Default: None scale_mode (str): {'cycle', 'iterations'}. Defines whether scale_fn is evaluated on cycle number or cycle iterations (training iterations since start of cycle). Default: 'cycle' cycle_momentum (bool): If ``True``, momentum is cycled inversely to learning rate between 'base_momentum' and 'max_momentum'. Default: True base_momentum (float or list): Lower momentum boundaries in the cycle for each parameter group. Note that momentum is cycled inversely to learning rate; at the peak of a cycle, momentum is 'base_momentum' and learning rate is 'max_lr'. Default: 0.8 max_momentum (float or list): Upper momentum boundaries in the cycle for each parameter group. Functionally, it defines the cycle amplitude (max_momentum - base_momentum). The momentum at any cycle is the difference of max_momentum and some scaling of the amplitude; therefore base_momentum may not actually be reached depending on scaling function. Note that momentum is cycled inversely to learning rate; at the start of a cycle, momentum is 'max_momentum' and learning rate is 'base_lr' Default: 0.9 last_epoch (int): The index of the last batch. This parameter is used when resuming a training job. Since `step()` should be invoked after each batch instead of after each epoch, this number represents the total number of *batches* computed, not the total number of epochs computed. When last_epoch=-1, the schedule is started from the beginning. Default: -1 verbose (bool): If ``True``, prints a message to stdout for each update. Default: ``False``. Example: >>> # xdoctest: +SKIP >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) >>> scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.01, max_lr=0.1) >>> data_loader = torch.utils.data.DataLoader(...) >>> for epoch in range(10): >>> for batch in data_loader: >>> train_batch(...) >>> scheduler.step() .. _Cyclical Learning Rates for Training Neural Networks: https://arxiv.org/abs/1506.01186 .. _bckenstler/CLR: https://github.com/bckenstler/CLR """ def __init__(self, optimizer, base_lr, max_lr, step_size_up=2000, step_size_down=None, mode='triangular', gamma=1., scale_fn=None, scale_mode='cycle', cycle_momentum=True, base_momentum=0.8, max_momentum=0.9, last_epoch=-1, verbose=False): # Attach optimizer if not isinstance(optimizer, Optimizer): raise TypeError('{} is not an Optimizer'.format( type(optimizer).__name__)) self.optimizer = optimizer base_lrs = self._format_param('base_lr', optimizer, base_lr) if last_epoch == -1: for lr, group in zip(base_lrs, optimizer.param_groups): group['lr'] = lr self.max_lrs = self._format_param('max_lr', optimizer, max_lr) step_size_up = float(step_size_up) step_size_down = float(step_size_down) if step_size_down is not None else step_size_up self.total_size = step_size_up + step_size_down self.step_ratio = step_size_up / self.total_size if mode not in ['triangular', 'triangular2', 'exp_range'] \ and scale_fn is None: raise ValueError('mode is invalid and scale_fn is None') self.mode = mode self.gamma = gamma if scale_fn is None: if self.mode == 'triangular': self.scale_fn = self._triangular_scale_fn self.scale_mode = 'cycle' elif self.mode == 'triangular2': self.scale_fn = self._triangular2_scale_fn self.scale_mode = 'cycle' elif self.mode == 'exp_range': self.scale_fn = self._exp_range_scale_fn self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.cycle_momentum = cycle_momentum if cycle_momentum: if 'momentum' not in optimizer.defaults: raise ValueError('optimizer must support momentum with `cycle_momentum` option enabled') base_momentums = self._format_param('base_momentum', optimizer, base_momentum) if last_epoch == -1: for momentum, group in zip(base_momentums, optimizer.param_groups): group['momentum'] = momentum self.base_momentums = [group['momentum'] for group in optimizer.param_groups] self.max_momentums = self._format_param('max_momentum', optimizer, max_momentum) super(CyclicLR, self).__init__(optimizer, last_epoch, verbose) self.base_lrs = base_lrs def _format_param(self, name, optimizer, param): """Return correctly formatted lr/momentum for each param group.""" if isinstance(param, (list, tuple)): if len(param) != len(optimizer.param_groups): raise ValueError("expected {} values for {}, got {}".format( len(optimizer.param_groups), name, len(param))) return param else: return [param] * len(optimizer.param_groups) def _triangular_scale_fn(self, x): return 1. def _triangular2_scale_fn(self, x): return 1 / (2. ** (x - 1)) def _exp_range_scale_fn(self, x): return self.gamma**(x) def get_lr(self): """Calculates the learning rate at batch index. This function treats `self.last_epoch` as the last batch index. If `self.cycle_momentum` is ``True``, this function has a side effect of updating the optimizer's momentum. """ if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning) cycle = math.floor(1 + self.last_epoch / self.total_size) x = 1. + self.last_epoch / self.total_size - cycle if x <= self.step_ratio: scale_factor = x / self.step_ratio else: scale_factor = (x - 1) / (self.step_ratio - 1) lrs = [] for base_lr, max_lr in zip(self.base_lrs, self.max_lrs): base_height = (max_lr - base_lr) * scale_factor if self.scale_mode == 'cycle': lr = base_lr + base_height * self.scale_fn(cycle) else: lr = base_lr + base_height * self.scale_fn(self.last_epoch) lrs.append(lr) if self.cycle_momentum: momentums = [] for base_momentum, max_momentum in zip(self.base_momentums, self.max_momentums): base_height = (max_momentum - base_momentum) * scale_factor if self.scale_mode == 'cycle': momentum = max_momentum - base_height * self.scale_fn(cycle) else: momentum = max_momentum - base_height * self.scale_fn(self.last_epoch) momentums.append(momentum) for param_group, momentum in zip(self.optimizer.param_groups, momentums): param_group['momentum'] = momentum return lrs class CosineAnnealingWarmRestarts(_LRScheduler): r"""Set the learning rate of each parameter group using a cosine annealing schedule, where :math:`\eta_{max}` is set to the initial lr, :math:`T_{cur}` is the number of epochs since the last restart and :math:`T_{i}` is the number of epochs between two warm restarts in SGDR: .. math:: \eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 + \cos\left(\frac{T_{cur}}{T_{i}}\pi\right)\right) When :math:`T_{cur}=T_{i}`, set :math:`\eta_t = \eta_{min}`. When :math:`T_{cur}=0` after restart, set :math:`\eta_t=\eta_{max}`. It has been proposed in `SGDR: Stochastic Gradient Descent with Warm Restarts`_. Args: optimizer (Optimizer): Wrapped optimizer. T_0 (int): Number of iterations for the first restart. T_mult (int, optional): A factor increases :math:`T_{i}` after a restart. Default: 1. eta_min (float, optional): Minimum learning rate. Default: 0. last_epoch (int, optional): The index of last epoch. Default: -1. verbose (bool): If ``True``, prints a message to stdout for each update. Default: ``False``. .. _SGDR\: Stochastic Gradient Descent with Warm Restarts: https://arxiv.org/abs/1608.03983 """ def __init__(self, optimizer, T_0, T_mult=1, eta_min=0, last_epoch=-1, verbose=False): if T_0 <= 0 or not isinstance(T_0, int): raise ValueError("Expected positive integer T_0, but got {}".format(T_0)) if T_mult < 1 or not isinstance(T_mult, int): raise ValueError("Expected integer T_mult >= 1, but got {}".format(T_mult)) self.T_0 = T_0 self.T_i = T_0 self.T_mult = T_mult self.eta_min = eta_min self.T_cur = last_epoch super(CosineAnnealingWarmRestarts, self).__init__(optimizer, last_epoch, verbose) def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning) return [self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * self.T_cur / self.T_i)) / 2 for base_lr in self.base_lrs] def step(self, epoch=None): """Step could be called after every batch update Example: >>> # xdoctest: +SKIP("Undefined vars") >>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult) >>> iters = len(dataloader) >>> for epoch in range(20): >>> for i, sample in enumerate(dataloader): >>> inputs, labels = sample['inputs'], sample['labels'] >>> optimizer.zero_grad() >>> outputs = net(inputs) >>> loss = criterion(outputs, labels) >>> loss.backward() >>> optimizer.step() >>> scheduler.step(epoch + i / iters) This function can be called in an interleaved way. Example: >>> # xdoctest: +SKIP("Undefined vars") >>> scheduler = CosineAnnealingWarmRestarts(optimizer, T_0, T_mult) >>> for epoch in range(20): >>> scheduler.step() >>> scheduler.step(26) >>> scheduler.step() # scheduler.step(27), instead of scheduler(20) """ if epoch is None and self.last_epoch < 0: epoch = 0 if epoch is None: epoch = self.last_epoch + 1 self.T_cur = self.T_cur + 1 if self.T_cur >= self.T_i: self.T_cur = self.T_cur - self.T_i self.T_i = self.T_i * self.T_mult else: if epoch < 0: raise ValueError("Expected non-negative epoch, but got {}".format(epoch)) if epoch >= self.T_0: if self.T_mult == 1: self.T_cur = epoch % self.T_0 else: n = int(math.log((epoch / self.T_0 * (self.T_mult - 1) + 1), self.T_mult)) self.T_cur = epoch - self.T_0 * (self.T_mult ** n - 1) / (self.T_mult - 1) self.T_i = self.T_0 * self.T_mult ** (n) else: self.T_i = self.T_0 self.T_cur = epoch self.last_epoch = math.floor(epoch) class _enable_get_lr_call: def __init__(self, o): self.o = o def __enter__(self): self.o._get_lr_called_within_step = True return self def __exit__(self, type, value, traceback): self.o._get_lr_called_within_step = False return self with _enable_get_lr_call(self): for i, data in enumerate(zip(self.optimizer.param_groups, self.get_lr())): param_group, lr = data param_group['lr'] = lr self.print_lr(self.verbose, i, lr, epoch) self._last_lr = [group['lr'] for group in self.optimizer.param_groups] class OneCycleLR(_LRScheduler): r"""Sets the learning rate of each parameter group according to the 1cycle learning rate policy. The 1cycle policy anneals the learning rate from an initial learning rate to some maximum learning rate and then from that maximum learning rate to some minimum learning rate much lower than the initial learning rate. This policy was initially described in the paper `Super-Convergence: Very Fast Training of Neural Networks Using Large Learning Rates`_. The 1cycle learning rate policy changes the learning rate after every batch. `step` should be called after a batch has been used for training. This scheduler is not chainable. Note also that the total number of steps in the cycle can be determined in one of two ways (listed in order of precedence): #. A value for total_steps is explicitly provided. #. A number of epochs (epochs) and a number of steps per epoch (steps_per_epoch) are provided. In this case, the number of total steps is inferred by total_steps = epochs * steps_per_epoch You must either provide a value for total_steps or provide a value for both epochs and steps_per_epoch. The default behaviour of this scheduler follows the fastai implementation of 1cycle, which claims that "unpublished work has shown even better results by using only two phases". To mimic the behaviour of the original paper instead, set ``three_phase=True``. Args: optimizer (Optimizer): Wrapped optimizer. max_lr (float or list): Upper learning rate boundaries in the cycle for each parameter group. total_steps (int): The total number of steps in the cycle. Note that if a value is not provided here, then it must be inferred by providing a value for epochs and steps_per_epoch. Default: None epochs (int): The number of epochs to train for. This is used along with steps_per_epoch in order to infer the total number of steps in the cycle if a value for total_steps is not provided. Default: None steps_per_epoch (int): The number of steps per epoch to train for. This is used along with epochs in order to infer the total number of steps in the cycle if a value for total_steps is not provided. Default: None pct_start (float): The percentage of the cycle (in number of steps) spent increasing the learning rate. Default: 0.3 anneal_strategy (str): {'cos', 'linear'} Specifies the annealing strategy: "cos" for cosine annealing, "linear" for linear annealing. Default: 'cos' cycle_momentum (bool): If ``True``, momentum is cycled inversely to learning rate between 'base_momentum' and 'max_momentum'. Default: True base_momentum (float or list): Lower momentum boundaries in the cycle for each parameter group. Note that momentum is cycled inversely to learning rate; at the peak of a cycle, momentum is 'base_momentum' and learning rate is 'max_lr'. Default: 0.85 max_momentum (float or list): Upper momentum boundaries in the cycle for each parameter group. Functionally, it defines the cycle amplitude (max_momentum - base_momentum). Note that momentum is cycled inversely to learning rate; at the start of a cycle, momentum is 'max_momentum' and learning rate is 'base_lr' Default: 0.95 div_factor (float): Determines the initial learning rate via initial_lr = max_lr/div_factor Default: 25 final_div_factor (float): Determines the minimum learning rate via min_lr = initial_lr/final_div_factor Default: 1e4 three_phase (bool): If ``True``, use a third phase of the schedule to annihilate the learning rate according to 'final_div_factor' instead of modifying the second phase (the first two phases will be symmetrical about the step indicated by 'pct_start'). last_epoch (int): The index of the last batch. This parameter is used when resuming a training job. Since `step()` should be invoked after each batch instead of after each epoch, this number represents the total number of *batches* computed, not the total number of epochs computed. When last_epoch=-1, the schedule is started from the beginning. Default: -1 verbose (bool): If ``True``, prints a message to stdout for each update. Default: ``False``. Example: >>> data_loader = torch.utils.data.DataLoader(...) >>> # xdoctest: +SKIP >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) >>> scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=len(data_loader), epochs=10) >>> for epoch in range(10): >>> for batch in data_loader: >>> train_batch(...) >>> scheduler.step() .. _Super-Convergence\: Very Fast Training of Neural Networks Using Large Learning Rates: https://arxiv.org/abs/1708.07120 """ def __init__(self, optimizer, max_lr, total_steps=None, epochs=None, steps_per_epoch=None, pct_start=0.3, anneal_strategy='cos', cycle_momentum=True, base_momentum=0.85, max_momentum=0.95, div_factor=25., final_div_factor=1e4, three_phase=False, last_epoch=-1, verbose=False): # Validate optimizer if not isinstance(optimizer, Optimizer): raise TypeError('{} is not an Optimizer'.format( type(optimizer).__name__)) self.optimizer = optimizer # Validate total_steps if total_steps is None and epochs is None and steps_per_epoch is None: raise ValueError("You must define either total_steps OR (epochs AND steps_per_epoch)") elif total_steps is not None: if total_steps <= 0 or not isinstance(total_steps, int): raise ValueError("Expected positive integer total_steps, but got {}".format(total_steps)) self.total_steps = total_steps else: if epochs <= 0 or not isinstance(epochs, int): raise ValueError("Expected positive integer epochs, but got {}".format(epochs)) if steps_per_epoch <= 0 or not isinstance(steps_per_epoch, int): raise ValueError("Expected positive integer steps_per_epoch, but got {}".format(steps_per_epoch)) self.total_steps = epochs * steps_per_epoch if three_phase: self._schedule_phases = [ { 'end_step': float(pct_start * self.total_steps) - 1, 'start_lr': 'initial_lr', 'end_lr': 'max_lr', 'start_momentum': 'max_momentum', 'end_momentum': 'base_momentum', }, { 'end_step': float(2 * pct_start * self.total_steps) - 2, 'start_lr': 'max_lr', 'end_lr': 'initial_lr', 'start_momentum': 'base_momentum', 'end_momentum': 'max_momentum', }, { 'end_step': self.total_steps - 1, 'start_lr': 'initial_lr', 'end_lr': 'min_lr', 'start_momentum': 'max_momentum', 'end_momentum': 'max_momentum', }, ] else: self._schedule_phases = [ { 'end_step': float(pct_start * self.total_steps) - 1, 'start_lr': 'initial_lr', 'end_lr': 'max_lr', 'start_momentum': 'max_momentum', 'end_momentum': 'base_momentum', }, { 'end_step': self.total_steps - 1, 'start_lr': 'max_lr', 'end_lr': 'min_lr', 'start_momentum': 'base_momentum', 'end_momentum': 'max_momentum', }, ] # Validate pct_start if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float): raise ValueError("Expected float between 0 and 1 pct_start, but got {}".format(pct_start)) # Validate anneal_strategy if anneal_strategy not in ['cos', 'linear']: raise ValueError("anneal_strategy must by one of 'cos' or 'linear', instead got {}".format(anneal_strategy)) elif anneal_strategy == 'cos': self.anneal_func = self._annealing_cos elif anneal_strategy == 'linear': self.anneal_func = self._annealing_linear # Initialize learning rate variables max_lrs = self._format_param('max_lr', self.optimizer, max_lr) if last_epoch == -1: for idx, group in enumerate(self.optimizer.param_groups): group['initial_lr'] = max_lrs[idx] / div_factor group['max_lr'] = max_lrs[idx] group['min_lr'] = group['initial_lr'] / final_div_factor # Initialize momentum variables self.cycle_momentum = cycle_momentum if self.cycle_momentum: if 'momentum' not in self.optimizer.defaults and 'betas' not in self.optimizer.defaults: raise ValueError('optimizer must support momentum with `cycle_momentum` option enabled') self.use_beta1 = 'betas' in self.optimizer.defaults max_momentums = self._format_param('max_momentum', optimizer, max_momentum) base_momentums = self._format_param('base_momentum', optimizer, base_momentum) if last_epoch == -1: for m_momentum, b_momentum, group in zip(max_momentums, base_momentums, optimizer.param_groups): if self.use_beta1: _, beta2 = group['betas'] group['betas'] = (m_momentum, beta2) else: group['momentum'] = m_momentum group['max_momentum'] = m_momentum group['base_momentum'] = b_momentum super(OneCycleLR, self).__init__(optimizer, last_epoch, verbose) def _format_param(self, name, optimizer, param): """Return correctly formatted lr/momentum for each param group.""" if isinstance(param, (list, tuple)): if len(param) != len(optimizer.param_groups): raise ValueError("expected {} values for {}, got {}".format( len(optimizer.param_groups), name, len(param))) return param else: return [param] * len(optimizer.param_groups) def _annealing_cos(self, start, end, pct): "Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0." cos_out = math.cos(math.pi * pct) + 1 return end + (start - end) / 2.0 * cos_out def _annealing_linear(self, start, end, pct): "Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0." return (end - start) * pct + start def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning) lrs = [] step_num = self.last_epoch if step_num > self.total_steps: raise ValueError("Tried to step {} times. The specified number of total steps is {}" .format(step_num + 1, self.total_steps)) for group in self.optimizer.param_groups: start_step = 0 for i, phase in enumerate(self._schedule_phases): end_step = phase['end_step'] if step_num <= end_step or i == len(self._schedule_phases) - 1: pct = (step_num - start_step) / (end_step - start_step) computed_lr = self.anneal_func(group[phase['start_lr']], group[phase['end_lr']], pct) if self.cycle_momentum: computed_momentum = self.anneal_func(group[phase['start_momentum']], group[phase['end_momentum']], pct) break start_step = phase['end_step'] lrs.append(computed_lr) if self.cycle_momentum: if self.use_beta1: _, beta2 = group['betas'] group['betas'] = (computed_momentum, beta2) else: group['momentum'] = computed_momentum return lrs
pytorch-master
torch/optim/lr_scheduler.py
import torch from torch import Tensor from .optimizer import Optimizer from typing import List, Optional __all__ = ['RMSprop', 'rmsprop'] class RMSprop(Optimizer): r"""Implements RMSprop algorithm. .. math:: \begin{aligned} &\rule{110mm}{0.4pt} \\ &\textbf{input} : \alpha \text{ (alpha)},\: \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)} \\ &\hspace{13mm} \lambda \text{ (weight decay)},\: \mu \text{ (momentum)},\: centered\\ &\textbf{initialize} : v_0 \leftarrow 0 \text{ (square average)}, \: \textbf{b}_0 \leftarrow 0 \text{ (buffer)}, \: g^{ave}_0 \leftarrow 0 \\[-1.ex] &\rule{110mm}{0.4pt} \\ &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ &\hspace{5mm}if \: \lambda \neq 0 \\ &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ &\hspace{5mm}v_t \leftarrow \alpha v_{t-1} + (1 - \alpha) g^2_t \hspace{8mm} \\ &\hspace{5mm} \tilde{v_t} \leftarrow v_t \\ &\hspace{5mm}if \: centered \\ &\hspace{10mm} g^{ave}_t \leftarrow g^{ave}_{t-1} \alpha + (1-\alpha) g_t \\ &\hspace{10mm} \tilde{v_t} \leftarrow \tilde{v_t} - \big(g^{ave}_{t} \big)^2 \\ &\hspace{5mm}if \: \mu > 0 \\ &\hspace{10mm} \textbf{b}_t\leftarrow \mu \textbf{b}_{t-1} + g_t/ \big(\sqrt{\tilde{v_t}} + \epsilon \big) \\ &\hspace{10mm} \theta_t \leftarrow \theta_{t-1} - \gamma \textbf{b}_t \\ &\hspace{5mm} else \\ &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma g_t/ \big(\sqrt{\tilde{v_t}} + \epsilon \big) \hspace{3mm} \\ &\rule{110mm}{0.4pt} \\[-1.ex] &\bf{return} \: \theta_t \\[-1.ex] &\rule{110mm}{0.4pt} \\[-1.ex] \end{aligned} For further details regarding the algorithm we refer to `lecture notes <https://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_ by G. Hinton. and centered version `Generating Sequences With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_. The implementation here takes the square root of the gradient average before adding epsilon (note that TensorFlow interchanges these two operations). The effective learning rate is thus :math:`\gamma/(\sqrt{v} + \epsilon)` where :math:`\gamma` is the scheduled learning rate and :math:`v` is the weighted moving average of the squared gradient. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-2) momentum (float, optional): momentum factor (default: 0) alpha (float, optional): smoothing constant (default: 0.99) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) centered (bool, optional) : if ``True``, compute the centered RMSProp, the gradient is normalized by an estimation of its variance weight_decay (float, optional): weight decay (L2 penalty) (default: 0) foreach (bool, optional): whether foreach implementation of optimizer is used (default: None) maximize (bool, optional): maximize the params based on the objective, instead of minimizing (default: False) """ def __init__(self, params, lr=1e-2, alpha=0.99, eps=1e-8, weight_decay=0, momentum=0, centered=False, foreach: Optional[bool] = None, maximize: bool = False): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= momentum: raise ValueError("Invalid momentum value: {}".format(momentum)) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) if not 0.0 <= alpha: raise ValueError("Invalid alpha value: {}".format(alpha)) defaults = dict(lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay, foreach=foreach, maximize=maximize) super(RMSprop, self).__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('momentum', 0) group.setdefault('centered', False) group.setdefault('foreach', None) group.setdefault('maximize', False) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] square_avgs = [] grad_avgs = [] momentum_buffer_list = [] for p in group['params']: if p.grad is None: continue params_with_grad.append(p) if p.grad.is_sparse: raise RuntimeError('RMSprop does not support sparse gradients') grads.append(p.grad) state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) if group['momentum'] > 0: state['momentum_buffer'] = torch.zeros_like(p, memory_format=torch.preserve_format) if group['centered']: state['grad_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) square_avgs.append(state['square_avg']) if group['momentum'] > 0: momentum_buffer_list.append(state['momentum_buffer']) if group['centered']: grad_avgs.append(state['grad_avg']) state['step'] += 1 rmsprop(params_with_grad, grads, square_avgs, grad_avgs, momentum_buffer_list, lr=group['lr'], alpha=group['alpha'], eps=group['eps'], weight_decay=group['weight_decay'], momentum=group['momentum'], centered=group['centered'], foreach=group['foreach'], maximize=group["maximize"]) return loss def rmsprop(params: List[Tensor], grads: List[Tensor], square_avgs: List[Tensor], grad_avgs: List[Tensor], momentum_buffer_list: List[Tensor], # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 # setting this as kwarg for now as functional API is compiled by torch/distributed/optim foreach: bool = None, maximize: bool = False, *, lr: float, alpha: float, eps: float, weight_decay: float, momentum: float, centered: bool): r"""Functional API that performs rmsprop algorithm computation. See :class:`~torch.optim.RMSProp` for details. """ if foreach is None: # Placeholder for more complex foreach logic to be added when value is not set foreach = False if foreach and torch.jit.is_scripting(): raise RuntimeError('torch.jit.script not supported with foreach optimizers') if foreach and not torch.jit.is_scripting(): func = _multi_tensor_rmsprop else: func = _single_tensor_rmsprop func(params, grads, square_avgs, grad_avgs, momentum_buffer_list, lr=lr, alpha=alpha, eps=eps, weight_decay=weight_decay, momentum=momentum, centered=centered, maximize=maximize) def _single_tensor_rmsprop(params: List[Tensor], grads: List[Tensor], square_avgs: List[Tensor], grad_avgs: List[Tensor], momentum_buffer_list: List[Tensor], *, lr: float, alpha: float, eps: float, weight_decay: float, momentum: float, centered: bool, maximize: bool): for i, param in enumerate(params): grad = grads[i] grad = grad if not maximize else -grad square_avg = square_avgs[i] if weight_decay != 0: grad = grad.add(param, alpha=weight_decay) square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha) if centered: grad_avg = grad_avgs[i] grad_avg.mul_(alpha).add_(grad, alpha=1 - alpha) avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).sqrt_().add_(eps) else: avg = square_avg.sqrt().add_(eps) if momentum > 0: buf = momentum_buffer_list[i] buf.mul_(momentum).addcdiv_(grad, avg) param.add_(buf, alpha=-lr) else: param.addcdiv_(grad, avg, value=-lr) def _multi_tensor_rmsprop(params: List[Tensor], grads: List[Tensor], square_avgs: List[Tensor], grad_avgs: List[Tensor], momentum_buffer_list: List[Tensor], *, lr: float, alpha: float, eps: float, weight_decay: float, momentum: float, centered: bool, maximize: bool): if len(params) == 0: return if maximize: grads = torch._foreach_neg(grads) if weight_decay != 0: torch._foreach_add_(grads, params, alpha=weight_decay) torch._foreach_mul_(square_avgs, alpha) torch._foreach_addcmul_(square_avgs, grads, grads, value=1 - alpha) if centered: torch._foreach_mul_(grad_avgs, alpha) torch._foreach_add_(grad_avgs, grads, alpha=1 - alpha) avg = torch._foreach_addcmul(square_avgs, grad_avgs, grad_avgs, value=-1) torch._foreach_sqrt_(avg) torch._foreach_add_(avg, eps) else: avg = torch._foreach_sqrt(square_avgs) torch._foreach_add_(avg, eps) if momentum > 0: torch._foreach_mul_(momentum_buffer_list, momentum) torch._foreach_addcdiv_(momentum_buffer_list, grads, avg) torch._foreach_add_(params, momentum_buffer_list, alpha=-lr) else: torch._foreach_addcdiv_(params, grads, avg, value=-lr)
pytorch-master
torch/optim/rmsprop.py
import torch from . import _functional as F from .optimizer import Optimizer __all__ = ['SparseAdam'] class SparseAdam(Optimizer): r"""Implements lazy version of Adam algorithm suitable for sparse tensors. In this variant, only moments that show up in the gradient get updated, and only those portions of the gradient get applied to the parameters. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) maximize (bool, optional): maximize the params based on the objective, instead of minimizing (default: False) .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, maximize: bool = False): if not 0.0 < lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 < eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) params = list(params) sparse_params = [] for index, param in enumerate(params): if isinstance(param, dict): for d_index, d_param in enumerate(param.get("params", [])): if d_param.is_sparse: sparse_params.append([index, d_index]) elif param.is_sparse: sparse_params.append(index) if sparse_params: raise ValueError( f"Sparse params at indices {sparse_params}: SparseAdam requires dense parameter tensors" ) defaults = dict(lr=lr, betas=betas, eps=eps, maximize=maximize) super(SparseAdam, self).__init__(params, defaults) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] exp_avgs = [] exp_avg_sqs = [] state_steps = [] eps = group['eps'] lr = group['lr'] beta1, beta2 = group['betas'] maximize = group.get('maximize', False) for p in group['params']: if p.grad is not None: params_with_grad.append(p) if not p.grad.is_sparse: raise RuntimeError('SparseAdam does not support dense gradients, please consider Adam instead') grads.append(p.grad) state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) exp_avgs.append(state['exp_avg']) exp_avg_sqs.append(state['exp_avg_sq']) # update the steps for each param group update state['step'] += 1 # record the step after step update state_steps.append(state['step']) F.sparse_adam(params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps, beta1=beta1, beta2=beta2, lr=group['lr'], eps=group['eps'], maximize=maximize) return loss
pytorch-master
torch/optim/sparse_adam.py
import torch from torch import Tensor from .optimizer import Optimizer from typing import List, Optional __all__ = ['Rprop', 'rprop'] class Rprop(Optimizer): r"""Implements the resilient backpropagation algorithm. .. math:: \begin{aligned} &\rule{110mm}{0.4pt} \\ &\textbf{input} : \theta_0 \in \mathbf{R}^d \text{ (params)},f(\theta) \text{ (objective)}, \\ &\hspace{13mm} \eta_{+/-} \text{ (etaplus, etaminus)}, \Gamma_{max/min} \text{ (step sizes)} \\ &\textbf{initialize} : g^0_{prev} \leftarrow 0, \: \eta_0 \leftarrow \text{lr (learning rate)} \\ &\rule{110mm}{0.4pt} \\ &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ &\hspace{5mm} \textbf{for} \text{ } i = 0, 1, \ldots, d-1 \: \mathbf{do} \\ &\hspace{10mm} \textbf{if} \: g^i_{prev} g^i_t > 0 \\ &\hspace{15mm} \eta^i_t \leftarrow \mathrm{min}(\eta^i_{t-1} \eta_{+}, \Gamma_{max}) \\ &\hspace{10mm} \textbf{else if} \: g^i_{prev} g^i_t < 0 \\ &\hspace{15mm} \eta^i_t \leftarrow \mathrm{max}(\eta^i_{t-1} \eta_{-}, \Gamma_{min}) \\ &\hspace{15mm} g^i_t \leftarrow 0 \\ &\hspace{10mm} \textbf{else} \: \\ &\hspace{15mm} \eta^i_t \leftarrow \eta^i_{t-1} \\ &\hspace{5mm}\theta_t \leftarrow \theta_{t-1}- \eta_t \mathrm{sign}(g_t) \\ &\hspace{5mm}g_{prev} \leftarrow g_t \\ &\rule{110mm}{0.4pt} \\[-1.ex] &\bf{return} \: \theta_t \\[-1.ex] &\rule{110mm}{0.4pt} \\[-1.ex] \end{aligned} For further details regarding the algorithm we refer to the paper `A Direct Adaptive Method for Faster Backpropagation Learning: The RPROP Algorithm <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.21.1417>`_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-2) etas (Tuple[float, float], optional): pair of (etaminus, etaplis), that are multiplicative increase and decrease factors (default: (0.5, 1.2)) step_sizes (Tuple[float, float], optional): a pair of minimal and maximal allowed step sizes (default: (1e-6, 50)) foreach (bool, optional): whether foreach implementation of optimizer is used (default: None) maximize (bool, optional): maximize the params based on the objective, instead of minimizing (default: False) """ def __init__(self, params, lr=1e-2, etas=(0.5, 1.2), step_sizes=(1e-6, 50), foreach: Optional[bool] = None, maximize: bool = False): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 < etas[0] < 1.0 < etas[1]: raise ValueError("Invalid eta values: {}, {}".format(etas[0], etas[1])) defaults = dict(lr=lr, etas=etas, step_sizes=step_sizes, foreach=foreach, maximize=maximize) super(Rprop, self).__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('foreach', None) group.setdefault('maximize', False) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params = [] grads = [] prevs = [] step_sizes = [] etaminus, etaplus = group['etas'] step_size_min, step_size_max = group['step_sizes'] foreach = group['foreach'] maximize = group['maximize'] for p in group['params']: if p.grad is None: continue params.append(p) grad = p.grad if grad.is_sparse: raise RuntimeError('Rprop does not support sparse gradients') grads.append(grad) state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 state['prev'] = torch.zeros_like(p, memory_format=torch.preserve_format) state['step_size'] = grad.new().resize_as_(grad).fill_(group['lr']) prevs.append(state['prev']) step_sizes.append(state['step_size']) state['step'] += 1 rprop(params, grads, prevs, step_sizes, step_size_min=step_size_min, step_size_max=step_size_max, etaminus=etaminus, etaplus=etaplus, foreach=foreach, maximize=maximize) return loss def rprop(params: List[Tensor], grads: List[Tensor], prevs: List[Tensor], step_sizes: List[Tensor], # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 # setting this as kwarg for now as functional API is compiled by torch/distributed/optim foreach: bool = None, maximize: bool = False, *, step_size_min: float, step_size_max: float, etaminus: float, etaplus: float): r"""Functional API that performs rprop algorithm computation. See :class:`~torch.optim.Rprop` for details. """ if foreach is None: # Placeholder for more complex foreach logic to be added when value is not set foreach = False if foreach and torch.jit.is_scripting(): raise RuntimeError('torch.jit.script not supported with foreach optimizers') if foreach and not torch.jit.is_scripting(): func = _multi_tensor_rprop else: func = _single_tensor_rprop func(params, grads, prevs, step_sizes, step_size_min=step_size_min, step_size_max=step_size_max, etaminus=etaminus, etaplus=etaplus, maximize=maximize) def _single_tensor_rprop(params: List[Tensor], grads: List[Tensor], prevs: List[Tensor], step_sizes: List[Tensor], *, step_size_min: float, step_size_max: float, etaminus: float, etaplus: float, maximize: bool): for i, param in enumerate(params): grad = grads[i] grad = grad if not maximize else -grad prev = prevs[i] step_size = step_sizes[i] sign = grad.mul(prev).sign() sign[sign.gt(0)] = etaplus sign[sign.lt(0)] = etaminus sign[sign.eq(0)] = 1 # update stepsizes with step size updates step_size.mul_(sign).clamp_(step_size_min, step_size_max) # for dir<0, dfdx=0 # for dir>=0 dfdx=dfdx grad = grad.clone(memory_format=torch.preserve_format) grad[sign.eq(etaminus)] = 0 # update parameters param.addcmul_(grad.sign(), step_size, value=-1) prev.copy_(grad) def _multi_tensor_rprop(params: List[Tensor], grads: List[Tensor], prevs: List[Tensor], step_sizes: List[Tensor], *, step_size_min: float, step_size_max: float, etaminus: float, etaplus: float, maximize: bool): if len(params) == 0: return if maximize: torch._foreach_neg_(grads) signs = torch._foreach_mul(grads, prevs) signs = [s.sign() for s in signs] for sign in signs: sign[sign.gt(0)] = etaplus sign[sign.lt(0)] = etaminus sign[sign.eq(0)] = 1 # update stepsizes with step size updates torch._foreach_mul_(step_sizes, signs) for step_size in step_sizes: step_size.clamp_(step_size_min, step_size_max) # for dir<0, dfdx=0 # for dir>=0 dfdx=dfdx for i in range(len(grads)): grads[i] = grads[i].clone(memory_format=torch.preserve_format) grads[i][signs[i].eq(etaminus)] = 0 # update parameters grad_signs = [grad.sign() for grad in grads] torch._foreach_addcmul_(params, grad_signs, step_sizes, value=-1) for i in range(len(prevs)): prevs[i].copy_(grads[i])
pytorch-master
torch/optim/rprop.py
import torch from torch import Tensor from .optimizer import Optimizer, required, _use_grad_for_differentiable from typing import List, Optional __all__ = ['SGD', 'sgd'] class SGD(Optimizer): r"""Implements stochastic gradient descent (optionally with momentum). .. math:: \begin{aligned} &\rule{110mm}{0.4pt} \\ &\textbf{input} : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)}, \: \lambda \text{ (weight decay)}, \\ &\hspace{13mm} \:\mu \text{ (momentum)}, \:\tau \text{ (dampening)}, \:\textit{ nesterov,}\:\textit{ maximize} \\[-1.ex] &\rule{110mm}{0.4pt} \\ &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ &\hspace{5mm}\textbf{if} \: \lambda \neq 0 \\ &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ &\hspace{5mm}\textbf{if} \: \mu \neq 0 \\ &\hspace{10mm}\textbf{if} \: t > 1 \\ &\hspace{15mm} \textbf{b}_t \leftarrow \mu \textbf{b}_{t-1} + (1-\tau) g_t \\ &\hspace{10mm}\textbf{else} \\ &\hspace{15mm} \textbf{b}_t \leftarrow g_t \\ &\hspace{10mm}\textbf{if} \: \textit{nesterov} \\ &\hspace{15mm} g_t \leftarrow g_{t} + \mu \textbf{b}_t \\ &\hspace{10mm}\textbf{else} \\[-1.ex] &\hspace{15mm} g_t \leftarrow \textbf{b}_t \\ &\hspace{5mm}\textbf{if} \: \textit{maximize} \\ &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} + \gamma g_t \\[-1.ex] &\hspace{5mm}\textbf{else} \\[-1.ex] &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma g_t \\[-1.ex] &\rule{110mm}{0.4pt} \\[-1.ex] &\bf{return} \: \theta_t \\[-1.ex] &\rule{110mm}{0.4pt} \\[-1.ex] \end{aligned} Nesterov momentum is based on the formula from `On the importance of initialization and momentum in deep learning`__. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float): learning rate momentum (float, optional): momentum factor (default: 0) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) dampening (float, optional): dampening for momentum (default: 0) nesterov (bool, optional): enables Nesterov momentum (default: False) maximize (bool, optional): maximize the params based on the objective, instead of minimizing (default: False) foreach (bool, optional): whether foreach implementation of optimizer is used (default: None) Example: >>> # xdoctest: +SKIP >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9) >>> optimizer.zero_grad() >>> loss_fn(model(input), target).backward() >>> optimizer.step() __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf .. note:: The implementation of SGD with Momentum/Nesterov subtly differs from Sutskever et. al. and implementations in some other frameworks. Considering the specific case of Momentum, the update can be written as .. math:: \begin{aligned} v_{t+1} & = \mu * v_{t} + g_{t+1}, \\ p_{t+1} & = p_{t} - \text{lr} * v_{t+1}, \end{aligned} where :math:`p`, :math:`g`, :math:`v` and :math:`\mu` denote the parameters, gradient, velocity, and momentum respectively. This is in contrast to Sutskever et. al. and other frameworks which employ an update of the form .. math:: \begin{aligned} v_{t+1} & = \mu * v_{t} + \text{lr} * g_{t+1}, \\ p_{t+1} & = p_{t} - v_{t+1}. \end{aligned} The Nesterov version is analogously modified. """ def __init__(self, params, lr=required, momentum=0, dampening=0, weight_decay=0, nesterov=False, *, maximize=False, foreach: Optional[bool] = None, differentiable=False): if lr is not required and lr < 0.0: raise ValueError("Invalid learning rate: {}".format(lr)) if momentum < 0.0: raise ValueError("Invalid momentum value: {}".format(momentum)) if weight_decay < 0.0: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov, maximize=maximize, foreach=foreach, differentiable=differentiable) if nesterov and (momentum <= 0 or dampening != 0): raise ValueError("Nesterov momentum requires a momentum and zero dampening") super(SGD, self).__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('nesterov', False) group.setdefault('maximize', False) group.setdefault('foreach', None) @_use_grad_for_differentiable def step(self, closure=None): """Performs a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] d_p_list = [] momentum_buffer_list = [] has_sparse_grad = False for p in group['params']: if p.grad is not None: params_with_grad.append(p) d_p_list.append(p.grad) if p.grad.is_sparse: has_sparse_grad = True state = self.state[p] if 'momentum_buffer' not in state: momentum_buffer_list.append(None) else: momentum_buffer_list.append(state['momentum_buffer']) sgd(params_with_grad, d_p_list, momentum_buffer_list, weight_decay=group['weight_decay'], momentum=group['momentum'], lr=group['lr'], dampening=group['dampening'], nesterov=group['nesterov'], maximize=group['maximize'], has_sparse_grad=has_sparse_grad, foreach=group['foreach']) # update momentum_buffers in state for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list): state = self.state[p] state['momentum_buffer'] = momentum_buffer return loss def sgd(params: List[Tensor], d_p_list: List[Tensor], momentum_buffer_list: List[Optional[Tensor]], # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 # setting this as kwarg for now as functional API is compiled by torch/distributed/optim has_sparse_grad: bool = None, foreach: bool = None, *, weight_decay: float, momentum: float, lr: float, dampening: float, nesterov: bool, maximize: bool): r"""Functional API that performs SGD algorithm computation. See :class:`~torch.optim.SGD` for details. """ if foreach is None: # Placeholder for more complex foreach logic to be added when value is not set foreach = False if foreach and torch.jit.is_scripting(): raise RuntimeError('torch.jit.script not supported with foreach optimizers') if foreach and not torch.jit.is_scripting(): func = _multi_tensor_sgd else: func = _single_tensor_sgd func(params, d_p_list, momentum_buffer_list, weight_decay=weight_decay, momentum=momentum, lr=lr, dampening=dampening, nesterov=nesterov, has_sparse_grad=has_sparse_grad, maximize=maximize) def _single_tensor_sgd(params: List[Tensor], d_p_list: List[Tensor], momentum_buffer_list: List[Optional[Tensor]], *, weight_decay: float, momentum: float, lr: float, dampening: float, nesterov: bool, maximize: bool, has_sparse_grad: bool): for i, param in enumerate(params): d_p = d_p_list[i] if not maximize else -d_p_list[i] if weight_decay != 0: d_p = d_p.add(param, alpha=weight_decay) if momentum != 0: buf = momentum_buffer_list[i] if buf is None: buf = torch.clone(d_p).detach() momentum_buffer_list[i] = buf else: buf.mul_(momentum).add_(d_p, alpha=1 - dampening) if nesterov: d_p = d_p.add(buf, alpha=momentum) else: d_p = buf param.add_(d_p, alpha=-lr) def _multi_tensor_sgd(params: List[Tensor], grads: List[Tensor], momentum_buffer_list: List[Optional[Tensor]], *, weight_decay: float, momentum: float, lr: float, dampening: float, nesterov: bool, maximize: bool, has_sparse_grad: bool): if len(params) == 0: return if has_sparse_grad is None: has_sparse_grad = any(grad.is_sparse for grad in grads) if maximize: grads = torch._foreach_neg(tuple(grads)) # type: ignore[assignment] if weight_decay != 0: grads = torch._foreach_add(grads, params, alpha=weight_decay) if momentum != 0: bufs = [] all_states_with_momentum_buffer = True for i in range(len(momentum_buffer_list)): if momentum_buffer_list[i] is None: all_states_with_momentum_buffer = False break else: bufs.append(momentum_buffer_list[i]) if all_states_with_momentum_buffer: torch._foreach_mul_(bufs, momentum) torch._foreach_add_(bufs, grads, alpha=1 - dampening) else: bufs = [] for i in range(len(momentum_buffer_list)): if momentum_buffer_list[i] is None: buf = momentum_buffer_list[i] = torch.clone(grads[i]).detach() else: buf = momentum_buffer_list[i] buf.mul_(momentum).add_(grads[i], alpha=1 - dampening) bufs.append(buf) if nesterov: torch._foreach_add_(grads, bufs, alpha=momentum) else: grads = bufs if not has_sparse_grad: torch._foreach_add_(params, grads, alpha=-lr) else: # foreach APIs dont support sparse for i in range(len(params)): params[i].add_(grads[i], alpha=-lr)
pytorch-master
torch/optim/sgd.py
""" :mod:`torch.optim` is a package implementing various optimization algorithms. Most commonly used methods are already supported, and the interface is general enough, so that more sophisticated ones can be also easily integrated in the future. """ from .adadelta import Adadelta from .adagrad import Adagrad from .adam import Adam from .adamw import AdamW from .sparse_adam import SparseAdam from .adamax import Adamax from .asgd import ASGD from .sgd import SGD from .radam import RAdam from .rprop import Rprop from .rmsprop import RMSprop from .optimizer import Optimizer from .nadam import NAdam from .lbfgs import LBFGS from . import lr_scheduler from . import swa_utils del adadelta del adagrad del adam del adamw del sparse_adam del adamax del asgd del sgd del radam del rprop del rmsprop del optimizer del nadam del lbfgs
pytorch-master
torch/optim/__init__.py
import torch from torch import Tensor from .optimizer import Optimizer from typing import List, Optional __all__ = ['Adamax', 'adamax'] class Adamax(Optimizer): r"""Implements Adamax algorithm (a variant of Adam based on infinity norm). .. math:: \begin{aligned} &\rule{110mm}{0.4pt} \\ &\textbf{input} : \gamma \text{ (lr)}, \beta_1, \beta_2 \text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)}, \: \lambda \text{ (weight decay)}, \\ &\hspace{13mm} \epsilon \text{ (epsilon)} \\ &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, u_0 \leftarrow 0 \text{ ( infinity norm)} \\[-1.ex] &\rule{110mm}{0.4pt} \\ &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ &\hspace{5mm}if \: \lambda \neq 0 \\ &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ &\hspace{5mm}u_t \leftarrow \mathrm{max}(\beta_2 u_{t-1}, |g_{t}|+\epsilon) \\ &\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \frac{\gamma m_t}{(1-\beta^t_1) u_t} \\ &\rule{110mm}{0.4pt} \\[-1.ex] &\bf{return} \: \theta_t \\[-1.ex] &\rule{110mm}{0.4pt} \\[-1.ex] \end{aligned} For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 2e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) foreach (bool, optional): whether foreach implementation of optimizer is used (default: None) maximize (bool, optional): maximize the params based on the objective, instead of minimizing (default: False) .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 """ def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, foreach: Optional[bool] = None, *, maximize: bool = False): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, foreach=foreach, maximize=maximize) super(Adamax, self).__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('foreach', None) group.setdefault('maximize', False) state_values = list(self.state.values()) step_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['step']) if not step_is_tensor: for s in state_values: s['step'] = torch.tensor(float(s['step'])) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] exp_avgs = [] exp_infs = [] state_steps = [] beta1, beta2 = group['betas'] eps = group['eps'] lr = group['lr'] weight_decay = group['weight_decay'] foreach = group['foreach'] maximize = group['maximize'] for p in group['params']: if p.grad is None: continue params_with_grad.append(p) if p.grad.is_sparse: raise RuntimeError('Adamax does not support sparse gradients') grads.append(p.grad) state = self.state[p] # State initialization if len(state) == 0: state['step'] = torch.tensor(0.) state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) state['exp_inf'] = torch.zeros_like(p, memory_format=torch.preserve_format) exp_avgs.append(state['exp_avg']) exp_infs.append(state['exp_inf']) state_steps.append(state['step']) adamax(params_with_grad, grads, exp_avgs, exp_infs, state_steps, eps=eps, beta1=beta1, beta2=beta2, lr=lr, weight_decay=weight_decay, foreach=foreach, maximize=maximize) return loss def adamax(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_infs: List[Tensor], state_steps: List[Tensor], # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 # setting this as kwarg for now as functional API is compiled by torch/distributed/optim foreach: bool = None, maximize: bool = False, *, eps: float, beta1: float, beta2: float, lr: float, weight_decay: float): r"""Functional API that performs adamax algorithm computation. See :class:`~torch.optim.Adamax` for details. """ if not all(isinstance(t, torch.Tensor) for t in state_steps): raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors") if foreach is None: # Placeholder for more complex foreach logic to be added when value is not set foreach = False if foreach and torch.jit.is_scripting(): raise RuntimeError('torch.jit.script not supported with foreach optimizers') if foreach and not torch.jit.is_scripting(): func = _multi_tensor_adamax else: func = _single_tensor_adamax func(params, grads, exp_avgs, exp_infs, state_steps, eps=eps, beta1=beta1, beta2=beta2, lr=lr, weight_decay=weight_decay, maximize=maximize) def _single_tensor_adamax(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_infs: List[Tensor], state_steps: List[Tensor], *, eps: float, beta1: float, beta2: float, lr: float, weight_decay: float, maximize: bool): for i, param in enumerate(params): grad = grads[i] grad = grad if not maximize else -grad exp_avg = exp_avgs[i] exp_inf = exp_infs[i] step_t = state_steps[i] # update step step_t += 1 step = step_t.item() if weight_decay != 0: grad = grad.add(param, alpha=weight_decay) if torch.is_complex(param): param = torch.view_as_real(param) grad = torch.view_as_real(grad) exp_avg = torch.view_as_real(exp_avg) exp_inf = torch.view_as_real(exp_inf) # Update biased first moment estimate. exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # Update the exponentially weighted infinity norm. norm_buf = torch.cat([ exp_inf.mul_(beta2).unsqueeze(0), grad.abs().add_(eps).unsqueeze_(0) ], 0) torch.amax(norm_buf, 0, keepdim=False, out=exp_inf) bias_correction = 1 - beta1 ** step clr = lr / bias_correction param.addcdiv_(exp_avg, exp_inf, value=-clr) def _multi_tensor_adamax(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_infs: List[Tensor], state_steps: List[Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, maximize: bool): if len(params) == 0: return if maximize: grads = torch._foreach_neg(grads) params = [torch.view_as_real(x) if torch.is_complex(x) else x for x in params] grads = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grads] exp_avgs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avgs] exp_infs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_infs] # Update steps torch._foreach_add_(state_steps, 1) if weight_decay != 0: torch._foreach_add_(grads, params, alpha=weight_decay) # Update biased first moment estimate. torch._foreach_mul_(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) # Update the exponentially weighted infinity norm. torch._foreach_mul_(exp_infs, beta2) for exp_inf, grad in zip(exp_infs, grads): norm_buf = torch.cat([ exp_inf.unsqueeze(0), grad.abs().add_(eps).unsqueeze_(0) ], 0) torch.max(norm_buf, 0, keepdim=False, out=(exp_inf, exp_inf.new().long())) bias_corrections = [1 - beta1 ** step.item() for step in state_steps] clr = [-1 * (lr / bias_correction) for bias_correction in bias_corrections] torch._foreach_addcdiv_(params, exp_avgs, exp_infs, clr)
pytorch-master
torch/optim/adamax.py
import torch from torch import Tensor from .optimizer import Optimizer from typing import List, Optional __all__ = ['Adagrad', 'adagrad'] class Adagrad(Optimizer): r"""Implements Adagrad algorithm. .. math:: \begin{aligned} &\rule{110mm}{0.4pt} \\ &\textbf{input} : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)}, \: \lambda \text{ (weight decay)}, \\ &\hspace{12mm} \tau \text{ (initial accumulator value)}, \: \eta\text{ (lr decay)}\\ &\textbf{initialize} : state\_sum_0 \leftarrow 0 \\[-1.ex] &\rule{110mm}{0.4pt} \\ &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ &\hspace{5mm} \tilde{\gamma} \leftarrow \gamma / (1 +(t-1) \eta) \\ &\hspace{5mm} \textbf{if} \: \lambda \neq 0 \\ &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ &\hspace{5mm}state\_sum_t \leftarrow state\_sum_{t-1} + g^2_t \\ &\hspace{5mm}\theta_t \leftarrow \theta_{t-1}- \tilde{\gamma} \frac{g_t}{\sqrt{state\_sum_t}+\epsilon} \\ &\rule{110mm}{0.4pt} \\[-1.ex] &\bf{return} \: \theta_t \\[-1.ex] &\rule{110mm}{0.4pt} \\[-1.ex] \end{aligned} For further details regarding the algorithm we refer to `Adaptive Subgradient Methods for Online Learning and Stochastic Optimization`_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-2) lr_decay (float, optional): learning rate decay (default: 0) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-10) foreach (bool, optional): whether foreach implementation of optimizer is used (default: None) maximize (bool, optional): maximize the params based on the objective, instead of minimizing (default: False) .. _Adaptive Subgradient Methods for Online Learning and Stochastic Optimization: http://jmlr.org/papers/v12/duchi11a.html """ def __init__( self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0, eps=1e-10, foreach: Optional[bool] = None, *, maximize: bool = False ): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= lr_decay: raise ValueError("Invalid lr_decay value: {}".format(lr_decay)) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) if not 0.0 <= initial_accumulator_value: raise ValueError( "Invalid initial_accumulator_value value: {}".format( initial_accumulator_value ) ) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) defaults = dict( lr=lr, lr_decay=lr_decay, eps=eps, weight_decay=weight_decay, initial_accumulator_value=initial_accumulator_value, foreach=foreach, maximize=maximize, ) super(Adagrad, self).__init__(params, defaults) for group in self.param_groups: for p in group["params"]: state = self.state[p] state["step"] = torch.tensor(0.0) init_value = ( complex(initial_accumulator_value, initial_accumulator_value) if torch.is_complex(p) else initial_accumulator_value ) state["sum"] = torch.full_like( p, init_value, memory_format=torch.preserve_format ) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault("foreach", None) group.setdefault("maximize", False) state_values = list(self.state.values()) step_is_tensor = (len(state_values) != 0) and torch.is_tensor( state_values[0]["step"] ) if not step_is_tensor: for s in state_values: s["step"] = torch.tensor(float(s["step"])) def share_memory(self): for group in self.param_groups: for p in group["params"]: state = self.state[p] state["sum"].share_memory_() @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] state_sums = [] state_steps = [] has_sparse_grad = False for p in group["params"]: if p.grad is not None: if p.grad.is_sparse: has_sparse_grad = True params_with_grad.append(p) grads.append(p.grad) state = self.state[p] state_sums.append(state["sum"]) state_steps.append(state["step"]) adagrad( params_with_grad, grads, state_sums, state_steps, lr=group["lr"], weight_decay=group["weight_decay"], lr_decay=group["lr_decay"], eps=group["eps"], has_sparse_grad=has_sparse_grad, foreach=group["foreach"], maximize=group["maximize"], ) return loss def adagrad( params: List[Tensor], grads: List[Tensor], state_sums: List[Tensor], state_steps: List[Tensor], # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 # setting these as kwargs for now as functional API is compiled by torch/distributed/optim has_sparse_grad: bool = None, foreach: bool = None, *, lr: float, weight_decay: float, lr_decay: float, eps: float, maximize: bool, ): r"""Functional API that performs Adagrad algorithm computation. See :class:`~torch.optim.Adagrad` for details. """ if not all(isinstance(t, torch.Tensor) for t in state_steps): raise RuntimeError( "API has changed, `state_steps` argument must contain a list of singleton tensors" ) if foreach is None: # Placeholder for more complex foreach logic to be added when value is not set foreach = False if foreach and torch.jit.is_scripting(): raise RuntimeError("torch.jit.script not supported with foreach optimizers") if foreach and not torch.jit.is_scripting(): func = _multi_tensor_adagrad else: func = _single_tensor_adagrad func( params, grads, state_sums, state_steps, lr=lr, weight_decay=weight_decay, lr_decay=lr_decay, eps=eps, has_sparse_grad=has_sparse_grad, maximize=maximize, ) def _make_sparse(grad, grad_indices, values): size = grad.size() if grad_indices.numel() == 0 or values.numel() == 0: return torch.empty_like(grad) return torch.sparse_coo_tensor(grad_indices, values, size) def _single_tensor_adagrad( params: List[Tensor], grads: List[Tensor], state_sums: List[Tensor], state_steps: List[Tensor], *, lr: float, weight_decay: float, lr_decay: float, eps: float, has_sparse_grad: bool, maximize: bool, ): for (param, grad, state_sum, step_t) in zip(params, grads, state_sums, state_steps): # update step step_t += 1 step = step_t.item() grad = grad if not maximize else -grad if weight_decay != 0: if grad.is_sparse: raise RuntimeError( "weight_decay option is not compatible with sparse gradients" ) grad = grad.add(param, alpha=weight_decay) clr = lr / (1 + (step - 1) * lr_decay) if grad.is_sparse: grad = grad.coalesce() # the update is non-linear so indices must be unique grad_indices = grad._indices() grad_values = grad._values() size = grad.size() state_sum.add_(_make_sparse(grad, grad_indices, grad_values.pow(2))) std = state_sum.sparse_mask(grad) std_values = std._values().sqrt_().add_(eps) param.add_( _make_sparse(grad, grad_indices, grad_values / std_values), alpha=-clr ) else: is_complex = torch.is_complex(param) if is_complex: grad = torch.view_as_real(grad) state_sum = torch.view_as_real(state_sum) param = torch.view_as_real(param) state_sum.addcmul_(grad, grad, value=1) std = state_sum.sqrt().add_(eps) param.addcdiv_(grad, std, value=-clr) if is_complex: param = torch.view_as_complex(param) state_sum = torch.view_as_complex(state_sum) def _multi_tensor_adagrad( params: List[Tensor], grads: List[Tensor], state_sums: List[Tensor], state_steps: List[Tensor], *, lr: float, weight_decay: float, lr_decay: float, eps: float, has_sparse_grad: bool, maximize: bool, ): # Foreach functions will throw errors if given empty lists if len(params) == 0: return if maximize: grads = torch._foreach_neg(grads) if has_sparse_grad is None: has_sparse_grad = any(grad.is_sparse for grad in grads) if has_sparse_grad: return _single_tensor_adagrad( params, grads, state_sums, state_steps, lr=lr, weight_decay=weight_decay, lr_decay=lr_decay, eps=eps, has_sparse_grad=has_sparse_grad, maximize=False, ) # Update steps torch._foreach_add_(state_steps, 1) if weight_decay != 0: torch._foreach_add_(grads, params, alpha=weight_decay) minus_clr = [-lr / (1 + (step - 1) * lr_decay) for step in state_steps] grads = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grads] state_sums = [ torch.view_as_real(x) if torch.is_complex(x) else x for x in state_sums ] torch._foreach_addcmul_(state_sums, grads, grads, value=1) std = torch._foreach_add(torch._foreach_sqrt(state_sums), eps) toAdd = torch._foreach_div(torch._foreach_mul(grads, minus_clr), std) toAdd = [ torch.view_as_complex(x) if torch.is_complex(params[i]) else x for i, x in enumerate(toAdd) ] torch._foreach_add_(params, toAdd) state_sums = [ torch.view_as_complex(x) if torch.is_complex(params[i]) else x for i, x in enumerate(state_sums) ]
pytorch-master
torch/optim/adagrad.py
import math import torch from torch import Tensor from .optimizer import Optimizer from typing import List, Optional __all__ = ['AdamW', 'adamw'] class AdamW(Optimizer): r"""Implements AdamW algorithm. .. math:: \begin{aligned} &\rule{110mm}{0.4pt} \\ &\textbf{input} : \gamma \text{(lr)}, \: \beta_1, \beta_2 \text{(betas)}, \: \theta_0 \text{(params)}, \: f(\theta) \text{(objective)}, \: \epsilon \text{ (epsilon)} \\ &\hspace{13mm} \lambda \text{(weight decay)}, \: \textit{amsgrad}, \: \textit{maximize} \\ &\textbf{initialize} : m_0 \leftarrow 0 \text{ (first moment)}, v_0 \leftarrow 0 \text{ ( second moment)}, \: \widehat{v_0}^{max}\leftarrow 0 \\[-1.ex] &\rule{110mm}{0.4pt} \\ &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ &\hspace{5mm}\textbf{if} \: \textit{maximize}: \\ &\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\ &\hspace{5mm}\textbf{else} \\ &\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ &\hspace{5mm} \theta_t \leftarrow \theta_{t-1} - \gamma \lambda \theta_{t-1} \\ &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ &\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\ &\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\ &\hspace{5mm}\textbf{if} \: amsgrad \\ &\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max}, \widehat{v_t}) \\ &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/ \big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big) \\ &\hspace{5mm}\textbf{else} \\ &\hspace{10mm}\theta_t \leftarrow \theta_t - \gamma \widehat{m_t}/ \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\ &\rule{110mm}{0.4pt} \\[-1.ex] &\bf{return} \: \theta_t \\[-1.ex] &\rule{110mm}{0.4pt} \\[-1.ex] \end{aligned} For further details regarding the algorithm we refer to `Decoupled Weight Decay Regularization`_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay coefficient (default: 1e-2) amsgrad (bool, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) maximize (bool, optional): maximize the params based on the objective, instead of minimizing (default: False) foreach (bool, optional): whether foreach implementation of optimizer is used (default: None) capturable (bool, optional): whether this instance is safe to capture in a CUDA graph. Passing True can impair ungraphed performance, so if you don't intend to graph capture this instance, leave it False (default: False) .. _Decoupled Weight Decay Regularization: https://arxiv.org/abs/1711.05101 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=1e-2, amsgrad=False, *, maximize: bool = False, foreach: Optional[bool] = None, capturable: bool = False): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad, foreach=foreach, maximize=maximize, capturable=capturable) super(AdamW, self).__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('amsgrad', False) group.setdefault('maximize', False) group.setdefault('foreach', None) group.setdefault('capturable', False) state_values = list(self.state.values()) step_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['step']) if not step_is_tensor: for s in state_values: s['step'] = torch.tensor(float(s['step'])) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss. """ self._cuda_graph_capture_health_check() loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] exp_avgs = [] exp_avg_sqs = [] max_exp_avg_sqs = [] state_steps = [] amsgrad = group['amsgrad'] beta1, beta2 = group['betas'] for p in group['params']: if p.grad is None: continue params_with_grad.append(p) if p.grad.is_sparse: raise RuntimeError('AdamW does not support sparse gradients') grads.append(p.grad) state = self.state[p] # State initialization if len(state) == 0: state['step'] = torch.zeros((1,), dtype=torch.float, device=p.device) \ if self.defaults['capturable'] else torch.tensor(0.) # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) exp_avgs.append(state['exp_avg']) exp_avg_sqs.append(state['exp_avg_sq']) if amsgrad: max_exp_avg_sqs.append(state['max_exp_avg_sq']) state_steps.append(state['step']) adamw(params_with_grad, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, amsgrad=amsgrad, beta1=beta1, beta2=beta2, lr=group['lr'], weight_decay=group['weight_decay'], eps=group['eps'], maximize=group['maximize'], foreach=group['foreach'], capturable=group['capturable']) return loss def adamw(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], max_exp_avg_sqs: List[Tensor], state_steps: List[Tensor], # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 # setting this as kwarg for now as functional API is compiled by torch/distributed/optim foreach: bool = None, capturable: bool = False, *, amsgrad: bool, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, maximize: bool): r"""Functional API that performs AdamW algorithm computation. See :class:`~torch.optim.AdamW` for details. """ if not all(isinstance(t, torch.Tensor) for t in state_steps): raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors") if foreach is None: # Placeholder for more complex foreach logic to be added when value is not set foreach = False if foreach and torch.jit.is_scripting(): raise RuntimeError('torch.jit.script not supported with foreach optimizers') if foreach and not torch.jit.is_scripting(): func = _multi_tensor_adamw else: func = _single_tensor_adamw func(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, amsgrad=amsgrad, beta1=beta1, beta2=beta2, lr=lr, weight_decay=weight_decay, eps=eps, maximize=maximize, capturable=capturable) def _single_tensor_adamw(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], max_exp_avg_sqs: List[Tensor], state_steps: List[Tensor], *, amsgrad: bool, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, maximize: bool, capturable: bool): for i, param in enumerate(params): grad = grads[i] if not maximize else -grads[i] exp_avg = exp_avgs[i] exp_avg_sq = exp_avg_sqs[i] step_t = state_steps[i] if capturable: assert param.is_cuda and step_t.is_cuda, "If capturable=True, params and state_steps must be CUDA tensors." if torch.is_complex(param): grad = torch.view_as_real(grad) exp_avg = torch.view_as_real(exp_avg) exp_avg_sq = torch.view_as_real(exp_avg_sq) param = torch.view_as_real(param) # update step step_t += 1 # Perform stepweight decay param.mul_(1 - lr * weight_decay) # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) if capturable: step = step_t # 1 - beta1 ** step can't be captured in a CUDA graph, even if step is a CUDA tensor # (incurs "RuntimeError: CUDA error: operation not permitted when stream is capturing") bias_correction1 = 1 - torch.pow(beta1, step) bias_correction2 = 1 - torch.pow(beta2, step) step_size = lr / bias_correction1 step_size_neg = step_size.neg() bias_correction2_sqrt = bias_correction2.sqrt() if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i]) # Uses the max. for normalizing running avg. of gradient # Folds in (admittedly ugly) 1-elem step_size math here to avoid extra param-set-sized read+write # (can't fold it into addcdiv_ below because addcdiv_ requires value is a Number, not a Tensor) denom = (max_exp_avg_sqs[i].sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg) else: denom = (exp_avg_sq.sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg) param.addcdiv_(exp_avg, denom) else: step = step_t.item() bias_correction1 = 1 - beta1 ** step bias_correction2 = 1 - beta2 ** step step_size = lr / bias_correction1 bias_correction2_sqrt = math.sqrt(bias_correction2) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i]) # Use the max. for normalizing running avg. of gradient denom = (max_exp_avg_sqs[i].sqrt() / bias_correction2_sqrt).add_(eps) else: denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps) param.addcdiv_(exp_avg, denom, value=-step_size) def _multi_tensor_adamw(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], max_exp_avg_sqs: List[Tensor], state_steps: List[Tensor], *, amsgrad: bool, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, maximize: bool, capturable: bool): if len(params) == 0: return if capturable: assert all(p.is_cuda and step.is_cuda for p, step in zip(params, state_steps)), \ "If capturable=True, params and state_steps must be CUDA tensors." if maximize: grads = torch._foreach_neg(tuple(grads)) # type: ignore[assignment] grads = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grads] exp_avgs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avgs] exp_avg_sqs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avg_sqs] params = [torch.view_as_real(x) if torch.is_complex(x) else x for x in params] # update steps torch._foreach_add_(state_steps, 1) # Perform stepweight decay torch._foreach_mul_(params, 1 - lr * weight_decay) # Decay the first and second moment running average coefficient torch._foreach_mul_(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) torch._foreach_mul_(exp_avg_sqs, beta2) torch._foreach_addcmul_(exp_avg_sqs, grads, grads, 1 - beta2) if capturable: # TODO: use foreach_pow if/when foreach_pow is added bias_correction1 = [torch.pow(beta1, step) for step in state_steps] bias_correction2 = [torch.pow(beta2, step) for step in state_steps] # foreach_sub doesn't allow a scalar as the first arg torch._foreach_sub_(bias_correction1, 1) torch._foreach_sub_(bias_correction2, 1) torch._foreach_neg_(bias_correction1) torch._foreach_neg_(bias_correction2) # foreach_div doesn't allow a scalar as the first arg step_size = torch._foreach_div(bias_correction1, lr) torch._foreach_reciprocal_(step_size) torch._foreach_neg_(step_size) bias_correction2_sqrt = torch._foreach_sqrt(bias_correction2) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch._foreach_maximum_(max_exp_avg_sqs, exp_avg_sqs) # Use the max. for normalizing running avg. of gradient max_exp_avg_sq_sqrt = torch._foreach_sqrt(max_exp_avg_sqs) # Folds in (admittedly ugly) 1-elem step_size math here to avoid extra param-set-sized read+write # (can't fold it into addcdiv_ below because addcdiv_ requires value is a Number, not a Tensor) torch._foreach_div_(max_exp_avg_sq_sqrt, torch._foreach_mul(bias_correction2_sqrt, step_size)) eps_over_step_size = torch._foreach_div(step_size, eps) torch._foreach_reciprocal_(eps_over_step_size) denom = torch._foreach_add(max_exp_avg_sq_sqrt, eps_over_step_size) else: exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) torch._foreach_div_(exp_avg_sq_sqrt, torch._foreach_mul(bias_correction2_sqrt, step_size)) eps_over_step_size = torch._foreach_div(step_size, eps) torch._foreach_reciprocal_(eps_over_step_size) denom = torch._foreach_add(exp_avg_sq_sqrt, eps_over_step_size) torch._foreach_addcdiv_(params, exp_avgs, denom) else: bias_correction1 = [1 - beta1 ** step.item() for step in state_steps] bias_correction2 = [1 - beta2 ** step.item() for step in state_steps] step_size = [(lr / bc) * -1 for bc in bias_correction1] bias_correction2_sqrt = [math.sqrt(bc) for bc in bias_correction2] if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch._foreach_maximum_(max_exp_avg_sqs, exp_avg_sqs) # Use the max. for normalizing running avg. of gradient max_exp_avg_sq_sqrt = torch._foreach_sqrt(max_exp_avg_sqs) torch._foreach_div_(max_exp_avg_sq_sqrt, bias_correction2_sqrt) denom = torch._foreach_add(max_exp_avg_sq_sqrt, eps) else: exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt) denom = torch._foreach_add(exp_avg_sq_sqrt, eps) torch._foreach_addcdiv_(params, exp_avgs, denom, step_size)
pytorch-master
torch/optim/adamw.py
import itertools import math from copy import deepcopy import warnings import torch from torch.nn import Module from torch.optim.lr_scheduler import _LRScheduler __all__ = ['AveragedModel', 'update_bn', 'SWALR'] class AveragedModel(Module): r"""Implements averaged model for Stochastic Weight Averaging (SWA). Stochastic Weight Averaging was proposed in `Averaging Weights Leads to Wider Optima and Better Generalization`_ by Pavel Izmailov, Dmitrii Podoprikhin, Timur Garipov, Dmitry Vetrov and Andrew Gordon Wilson (UAI 2018). AveragedModel class creates a copy of the provided module :attr:`model` on the device :attr:`device` and allows to compute running averages of the parameters of the :attr:`model`. Args: model (torch.nn.Module): model to use with SWA device (torch.device, optional): if provided, the averaged model will be stored on the :attr:`device` avg_fn (function, optional): the averaging function used to update parameters; the function must take in the current value of the :class:`AveragedModel` parameter, the current value of :attr:`model` parameter and the number of models already averaged; if None, equally weighted average is used (default: None) use_buffers (bool): if ``True``, it will compute running averages for both the parameters and the buffers of the model. (default: ``False``) Example: >>> # xdoctest: +SKIP("undefined variables") >>> loader, optimizer, model, loss_fn = ... >>> swa_model = torch.optim.swa_utils.AveragedModel(model) >>> scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, >>> T_max=300) >>> swa_start = 160 >>> swa_scheduler = SWALR(optimizer, swa_lr=0.05) >>> for i in range(300): >>> for input, target in loader: >>> optimizer.zero_grad() >>> loss_fn(model(input), target).backward() >>> optimizer.step() >>> if i > swa_start: >>> swa_model.update_parameters(model) >>> swa_scheduler.step() >>> else: >>> scheduler.step() >>> >>> # Update bn statistics for the swa_model at the end >>> torch.optim.swa_utils.update_bn(loader, swa_model) You can also use custom averaging functions with `avg_fn` parameter. If no averaging function is provided, the default is to compute equally-weighted average of the weights. Example: >>> # xdoctest: +SKIP("undefined variables") >>> # Compute exponential moving averages of the weights and buffers >>> ema_avg = lambda averaged_model_parameter, model_parameter, num_averaged: ( ... 0.1 * averaged_model_parameter + 0.9 * model_parameter) >>> swa_model = torch.optim.swa_utils.AveragedModel(model, avg_fn=ema_avg, use_buffers=True) .. note:: When using SWA with models containing Batch Normalization you may need to update the activation statistics for Batch Normalization. This can be done either by using the :meth:`torch.optim.swa_utils.update_bn` or by setting :attr:`use_buffers` to `True`. The first approach updates the statistics in a post-training step by passing data through the model. The second does it during the parameter update phase by averaging all buffers. Empirical evidence has shown that updating the statistics in normalization layers increases accuracy, but you may wish to empirically test which approach yields the best results in your problem. .. note:: :attr:`avg_fn` is not saved in the :meth:`state_dict` of the model. .. note:: When :meth:`update_parameters` is called for the first time (i.e. :attr:`n_averaged` is `0`) the parameters of `model` are copied to the parameters of :class:`AveragedModel`. For every subsequent call of :meth:`update_parameters` the function `avg_fn` is used to update the parameters. .. _Averaging Weights Leads to Wider Optima and Better Generalization: https://arxiv.org/abs/1803.05407 .. _There Are Many Consistent Explanations of Unlabeled Data: Why You Should Average: https://arxiv.org/abs/1806.05594 .. _SWALP: Stochastic Weight Averaging in Low-Precision Training: https://arxiv.org/abs/1904.11943 .. _Stochastic Weight Averaging in Parallel: Large-Batch Training That Generalizes Well: https://arxiv.org/abs/2001.02312 """ def __init__(self, model, device=None, avg_fn=None, use_buffers=False): super(AveragedModel, self).__init__() self.module = deepcopy(model) if device is not None: self.module = self.module.to(device) self.register_buffer('n_averaged', torch.tensor(0, dtype=torch.long, device=device)) if avg_fn is None: def avg_fn(averaged_model_parameter, model_parameter, num_averaged): return averaged_model_parameter + \ (model_parameter - averaged_model_parameter) / (num_averaged + 1) self.avg_fn = avg_fn self.use_buffers = use_buffers def forward(self, *args, **kwargs): return self.module(*args, **kwargs) def update_parameters(self, model): self_param = ( itertools.chain(self.module.parameters(), self.module.buffers()) if self.use_buffers else self.parameters() ) model_param = ( itertools.chain(model.parameters(), model.buffers()) if self.use_buffers else model.parameters() ) for p_swa, p_model in zip(self_param, model_param): device = p_swa.device p_model_ = p_model.detach().to(device) if self.n_averaged == 0: p_swa.detach().copy_(p_model_) else: p_swa.detach().copy_(self.avg_fn(p_swa.detach(), p_model_, self.n_averaged.to(device))) self.n_averaged += 1 @torch.no_grad() def update_bn(loader, model, device=None): r"""Updates BatchNorm running_mean, running_var buffers in the model. It performs one pass over data in `loader` to estimate the activation statistics for BatchNorm layers in the model. Args: loader (torch.utils.data.DataLoader): dataset loader to compute the activation statistics on. Each data batch should be either a tensor, or a list/tuple whose first element is a tensor containing data. model (torch.nn.Module): model for which we seek to update BatchNorm statistics. device (torch.device, optional): If set, data will be transferred to :attr:`device` before being passed into :attr:`model`. Example: >>> # xdoctest: +SKIP("Undefined variables") >>> loader, model = ... >>> torch.optim.swa_utils.update_bn(loader, model) .. note:: The `update_bn` utility assumes that each data batch in :attr:`loader` is either a tensor or a list or tuple of tensors; in the latter case it is assumed that :meth:`model.forward()` should be called on the first element of the list or tuple corresponding to the data batch. """ momenta = {} for module in model.modules(): if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): module.running_mean = torch.zeros_like(module.running_mean) module.running_var = torch.ones_like(module.running_var) momenta[module] = module.momentum if not momenta: return was_training = model.training model.train() for module in momenta.keys(): module.momentum = None module.num_batches_tracked *= 0 for input in loader: if isinstance(input, (list, tuple)): input = input[0] if device is not None: input = input.to(device) model(input) for bn_module in momenta.keys(): bn_module.momentum = momenta[bn_module] model.train(was_training) class SWALR(_LRScheduler): r"""Anneals the learning rate in each parameter group to a fixed value. This learning rate scheduler is meant to be used with Stochastic Weight Averaging (SWA) method (see `torch.optim.swa_utils.AveragedModel`). Args: optimizer (torch.optim.Optimizer): wrapped optimizer swa_lrs (float or list): the learning rate value for all param groups together or separately for each group. annealing_epochs (int): number of epochs in the annealing phase (default: 10) annealing_strategy (str): "cos" or "linear"; specifies the annealing strategy: "cos" for cosine annealing, "linear" for linear annealing (default: "cos") last_epoch (int): the index of the last epoch (default: -1) The :class:`SWALR` scheduler can be used together with other schedulers to switch to a constant learning rate late in the training as in the example below. Example: >>> # xdoctest: +SKIP("Undefined variables") >>> loader, optimizer, model = ... >>> lr_lambda = lambda epoch: 0.9 >>> scheduler = torch.optim.lr_scheduler.MultiplicativeLR(optimizer, >>> lr_lambda=lr_lambda) >>> swa_scheduler = torch.optim.swa_utils.SWALR(optimizer, >>> anneal_strategy="linear", anneal_epochs=20, swa_lr=0.05) >>> swa_start = 160 >>> for i in range(300): >>> for input, target in loader: >>> optimizer.zero_grad() >>> loss_fn(model(input), target).backward() >>> optimizer.step() >>> if i > swa_start: >>> swa_scheduler.step() >>> else: >>> scheduler.step() .. _Averaging Weights Leads to Wider Optima and Better Generalization: https://arxiv.org/abs/1803.05407 """ def __init__(self, optimizer, swa_lr, anneal_epochs=10, anneal_strategy='cos', last_epoch=-1): swa_lrs = self._format_param(optimizer, swa_lr) for swa_lr, group in zip(swa_lrs, optimizer.param_groups): group['swa_lr'] = swa_lr if anneal_strategy not in ['cos', 'linear']: raise ValueError("anneal_strategy must by one of 'cos' or 'linear', " f"instead got {anneal_strategy}") elif anneal_strategy == 'cos': self.anneal_func = self._cosine_anneal elif anneal_strategy == 'linear': self.anneal_func = self._linear_anneal if not isinstance(anneal_epochs, int) or anneal_epochs < 0: raise ValueError(f"anneal_epochs must be equal or greater than 0, got {anneal_epochs}") self.anneal_epochs = anneal_epochs super(SWALR, self).__init__(optimizer, last_epoch) @staticmethod def _format_param(optimizer, swa_lrs): if isinstance(swa_lrs, (list, tuple)): if len(swa_lrs) != len(optimizer.param_groups): raise ValueError("swa_lr must have the same length as " f"optimizer.param_groups: swa_lr has {len(swa_lrs)}, " f"optimizer.param_groups has {len(optimizer.param_groups)}") return swa_lrs else: return [swa_lrs] * len(optimizer.param_groups) @staticmethod def _linear_anneal(t): return t @staticmethod def _cosine_anneal(t): return (1 - math.cos(math.pi * t)) / 2 @staticmethod def _get_initial_lr(lr, swa_lr, alpha): if alpha == 1: return swa_lr return (lr - alpha * swa_lr) / (1 - alpha) def get_lr(self): if not self._get_lr_called_within_step: warnings.warn("To get the last learning rate computed by the scheduler, " "please use `get_last_lr()`.", UserWarning) step = self._step_count - 1 if self.anneal_epochs == 0: step = max(1, step) prev_t = max(0, min(1, (step - 1) / max(1, self.anneal_epochs))) prev_alpha = self.anneal_func(prev_t) prev_lrs = [self._get_initial_lr(group['lr'], group['swa_lr'], prev_alpha) for group in self.optimizer.param_groups] t = max(0, min(1, step / max(1, self.anneal_epochs))) alpha = self.anneal_func(t) return [group['swa_lr'] * alpha + lr * (1 - alpha) for group, lr in zip(self.optimizer.param_groups, prev_lrs)]
pytorch-master
torch/optim/swa_utils.py
import torch from functools import reduce from .optimizer import Optimizer __all__ = ['LBFGS'] def _cubic_interpolate(x1, f1, g1, x2, f2, g2, bounds=None): # ported from https://github.com/torch/optim/blob/master/polyinterp.lua # Compute bounds of interpolation area if bounds is not None: xmin_bound, xmax_bound = bounds else: xmin_bound, xmax_bound = (x1, x2) if x1 <= x2 else (x2, x1) # Code for most common case: cubic interpolation of 2 points # w/ function and derivative values for both # Solution in this case (where x2 is the farthest point): # d1 = g1 + g2 - 3*(f1-f2)/(x1-x2); # d2 = sqrt(d1^2 - g1*g2); # min_pos = x2 - (x2 - x1)*((g2 + d2 - d1)/(g2 - g1 + 2*d2)); # t_new = min(max(min_pos,xmin_bound),xmax_bound); d1 = g1 + g2 - 3 * (f1 - f2) / (x1 - x2) d2_square = d1**2 - g1 * g2 if d2_square >= 0: d2 = d2_square.sqrt() if x1 <= x2: min_pos = x2 - (x2 - x1) * ((g2 + d2 - d1) / (g2 - g1 + 2 * d2)) else: min_pos = x1 - (x1 - x2) * ((g1 + d2 - d1) / (g1 - g2 + 2 * d2)) return min(max(min_pos, xmin_bound), xmax_bound) else: return (xmin_bound + xmax_bound) / 2. def _strong_wolfe(obj_func, x, t, d, f, g, gtd, c1=1e-4, c2=0.9, tolerance_change=1e-9, max_ls=25): # ported from https://github.com/torch/optim/blob/master/lswolfe.lua d_norm = d.abs().max() g = g.clone(memory_format=torch.contiguous_format) # evaluate objective and gradient using initial step f_new, g_new = obj_func(x, t, d) ls_func_evals = 1 gtd_new = g_new.dot(d) # bracket an interval containing a point satisfying the Wolfe criteria t_prev, f_prev, g_prev, gtd_prev = 0, f, g, gtd done = False ls_iter = 0 while ls_iter < max_ls: # check conditions if f_new > (f + c1 * t * gtd) or (ls_iter > 1 and f_new >= f_prev): bracket = [t_prev, t] bracket_f = [f_prev, f_new] bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)] bracket_gtd = [gtd_prev, gtd_new] break if abs(gtd_new) <= -c2 * gtd: bracket = [t] bracket_f = [f_new] bracket_g = [g_new] done = True break if gtd_new >= 0: bracket = [t_prev, t] bracket_f = [f_prev, f_new] bracket_g = [g_prev, g_new.clone(memory_format=torch.contiguous_format)] bracket_gtd = [gtd_prev, gtd_new] break # interpolate min_step = t + 0.01 * (t - t_prev) max_step = t * 10 tmp = t t = _cubic_interpolate( t_prev, f_prev, gtd_prev, t, f_new, gtd_new, bounds=(min_step, max_step)) # next step t_prev = tmp f_prev = f_new g_prev = g_new.clone(memory_format=torch.contiguous_format) gtd_prev = gtd_new f_new, g_new = obj_func(x, t, d) ls_func_evals += 1 gtd_new = g_new.dot(d) ls_iter += 1 # reached max number of iterations? if ls_iter == max_ls: bracket = [0, t] bracket_f = [f, f_new] bracket_g = [g, g_new] # zoom phase: we now have a point satisfying the criteria, or # a bracket around it. We refine the bracket until we find the # exact point satisfying the criteria insuf_progress = False # find high and low points in bracket low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[-1] else (1, 0) while not done and ls_iter < max_ls: # line-search bracket is so small if abs(bracket[1] - bracket[0]) * d_norm < tolerance_change: break # compute new trial value t = _cubic_interpolate(bracket[0], bracket_f[0], bracket_gtd[0], bracket[1], bracket_f[1], bracket_gtd[1]) # test that we are making sufficient progress: # in case `t` is so close to boundary, we mark that we are making # insufficient progress, and if # + we have made insufficient progress in the last step, or # + `t` is at one of the boundary, # we will move `t` to a position which is `0.1 * len(bracket)` # away from the nearest boundary point. eps = 0.1 * (max(bracket) - min(bracket)) if min(max(bracket) - t, t - min(bracket)) < eps: # interpolation close to boundary if insuf_progress or t >= max(bracket) or t <= min(bracket): # evaluate at 0.1 away from boundary if abs(t - max(bracket)) < abs(t - min(bracket)): t = max(bracket) - eps else: t = min(bracket) + eps insuf_progress = False else: insuf_progress = True else: insuf_progress = False # Evaluate new point f_new, g_new = obj_func(x, t, d) ls_func_evals += 1 gtd_new = g_new.dot(d) ls_iter += 1 if f_new > (f + c1 * t * gtd) or f_new >= bracket_f[low_pos]: # Armijo condition not satisfied or not lower than lowest point bracket[high_pos] = t bracket_f[high_pos] = f_new bracket_g[high_pos] = g_new.clone(memory_format=torch.contiguous_format) bracket_gtd[high_pos] = gtd_new low_pos, high_pos = (0, 1) if bracket_f[0] <= bracket_f[1] else (1, 0) else: if abs(gtd_new) <= -c2 * gtd: # Wolfe conditions satisfied done = True elif gtd_new * (bracket[high_pos] - bracket[low_pos]) >= 0: # old high becomes new low bracket[high_pos] = bracket[low_pos] bracket_f[high_pos] = bracket_f[low_pos] bracket_g[high_pos] = bracket_g[low_pos] bracket_gtd[high_pos] = bracket_gtd[low_pos] # new point becomes new low bracket[low_pos] = t bracket_f[low_pos] = f_new bracket_g[low_pos] = g_new.clone(memory_format=torch.contiguous_format) bracket_gtd[low_pos] = gtd_new # return stuff t = bracket[low_pos] f_new = bracket_f[low_pos] g_new = bracket_g[low_pos] return f_new, g_new, t, ls_func_evals class LBFGS(Optimizer): """Implements L-BFGS algorithm, heavily inspired by `minFunc <https://www.cs.ubc.ca/~schmidtm/Software/minFunc.html>`_. .. warning:: This optimizer doesn't support per-parameter options and parameter groups (there can be only one). .. warning:: Right now all parameters have to be on a single device. This will be improved in the future. .. note:: This is a very memory intensive optimizer (it requires additional ``param_bytes * (history_size + 1)`` bytes). If it doesn't fit in memory try reducing the history size, or use a different algorithm. Args: lr (float): learning rate (default: 1) max_iter (int): maximal number of iterations per optimization step (default: 20) max_eval (int): maximal number of function evaluations per optimization step (default: max_iter * 1.25). tolerance_grad (float): termination tolerance on first order optimality (default: 1e-5). tolerance_change (float): termination tolerance on function value/parameter changes (default: 1e-9). history_size (int): update history size (default: 100). line_search_fn (str): either 'strong_wolfe' or None (default: None). """ def __init__(self, params, lr=1, max_iter=20, max_eval=None, tolerance_grad=1e-7, tolerance_change=1e-9, history_size=100, line_search_fn=None): if max_eval is None: max_eval = max_iter * 5 // 4 defaults = dict( lr=lr, max_iter=max_iter, max_eval=max_eval, tolerance_grad=tolerance_grad, tolerance_change=tolerance_change, history_size=history_size, line_search_fn=line_search_fn) super(LBFGS, self).__init__(params, defaults) if len(self.param_groups) != 1: raise ValueError("LBFGS doesn't support per-parameter options " "(parameter groups)") self._params = self.param_groups[0]['params'] self._numel_cache = None def _numel(self): if self._numel_cache is None: self._numel_cache = reduce(lambda total, p: total + p.numel(), self._params, 0) return self._numel_cache def _gather_flat_grad(self): views = [] for p in self._params: if p.grad is None: view = p.new(p.numel()).zero_() elif p.grad.is_sparse: view = p.grad.to_dense().view(-1) else: view = p.grad.view(-1) views.append(view) return torch.cat(views, 0) def _add_grad(self, step_size, update): offset = 0 for p in self._params: numel = p.numel() # view as to avoid deprecated pointwise semantics p.add_(update[offset:offset + numel].view_as(p), alpha=step_size) offset += numel assert offset == self._numel() def _clone_param(self): return [p.clone(memory_format=torch.contiguous_format) for p in self._params] def _set_param(self, params_data): for p, pdata in zip(self._params, params_data): p.copy_(pdata) def _directional_evaluate(self, closure, x, t, d): self._add_grad(t, d) loss = float(closure()) flat_grad = self._gather_flat_grad() self._set_param(x) return loss, flat_grad @torch.no_grad() def step(self, closure): """Performs a single optimization step. Args: closure (Callable): A closure that reevaluates the model and returns the loss. """ assert len(self.param_groups) == 1 # Make sure the closure is always called with grad enabled closure = torch.enable_grad()(closure) group = self.param_groups[0] lr = group['lr'] max_iter = group['max_iter'] max_eval = group['max_eval'] tolerance_grad = group['tolerance_grad'] tolerance_change = group['tolerance_change'] line_search_fn = group['line_search_fn'] history_size = group['history_size'] # NOTE: LBFGS has only global state, but we register it as state for # the first param, because this helps with casting in load_state_dict state = self.state[self._params[0]] state.setdefault('func_evals', 0) state.setdefault('n_iter', 0) # evaluate initial f(x) and df/dx orig_loss = closure() loss = float(orig_loss) current_evals = 1 state['func_evals'] += 1 flat_grad = self._gather_flat_grad() opt_cond = flat_grad.abs().max() <= tolerance_grad # optimal condition if opt_cond: return orig_loss # tensors cached in state (for tracing) d = state.get('d') t = state.get('t') old_dirs = state.get('old_dirs') old_stps = state.get('old_stps') ro = state.get('ro') H_diag = state.get('H_diag') prev_flat_grad = state.get('prev_flat_grad') prev_loss = state.get('prev_loss') n_iter = 0 # optimize for a max of max_iter iterations while n_iter < max_iter: # keep track of nb of iterations n_iter += 1 state['n_iter'] += 1 ############################################################ # compute gradient descent direction ############################################################ if state['n_iter'] == 1: d = flat_grad.neg() old_dirs = [] old_stps = [] ro = [] H_diag = 1 else: # do lbfgs update (update memory) y = flat_grad.sub(prev_flat_grad) s = d.mul(t) ys = y.dot(s) # y*s if ys > 1e-10: # updating memory if len(old_dirs) == history_size: # shift history by one (limited-memory) old_dirs.pop(0) old_stps.pop(0) ro.pop(0) # store new direction/step old_dirs.append(y) old_stps.append(s) ro.append(1. / ys) # update scale of initial Hessian approximation H_diag = ys / y.dot(y) # (y*y) # compute the approximate (L-BFGS) inverse Hessian # multiplied by the gradient num_old = len(old_dirs) if 'al' not in state: state['al'] = [None] * history_size al = state['al'] # iteration in L-BFGS loop collapsed to use just one buffer q = flat_grad.neg() for i in range(num_old - 1, -1, -1): al[i] = old_stps[i].dot(q) * ro[i] q.add_(old_dirs[i], alpha=-al[i]) # multiply by initial Hessian # r/d is the final direction d = r = torch.mul(q, H_diag) for i in range(num_old): be_i = old_dirs[i].dot(r) * ro[i] r.add_(old_stps[i], alpha=al[i] - be_i) if prev_flat_grad is None: prev_flat_grad = flat_grad.clone(memory_format=torch.contiguous_format) else: prev_flat_grad.copy_(flat_grad) prev_loss = loss ############################################################ # compute step length ############################################################ # reset initial guess for step size if state['n_iter'] == 1: t = min(1., 1. / flat_grad.abs().sum()) * lr else: t = lr # directional derivative gtd = flat_grad.dot(d) # g * d # directional derivative is below tolerance if gtd > -tolerance_change: break # optional line search: user function ls_func_evals = 0 if line_search_fn is not None: # perform line search, using user function if line_search_fn != "strong_wolfe": raise RuntimeError("only 'strong_wolfe' is supported") else: x_init = self._clone_param() def obj_func(x, t, d): return self._directional_evaluate(closure, x, t, d) loss, flat_grad, t, ls_func_evals = _strong_wolfe( obj_func, x_init, t, d, loss, flat_grad, gtd) self._add_grad(t, d) opt_cond = flat_grad.abs().max() <= tolerance_grad else: # no line search, simply move with fixed-step self._add_grad(t, d) if n_iter != max_iter: # re-evaluate function only if not in last iteration # the reason we do this: in a stochastic setting, # no use to re-evaluate that function here with torch.enable_grad(): loss = float(closure()) flat_grad = self._gather_flat_grad() opt_cond = flat_grad.abs().max() <= tolerance_grad ls_func_evals = 1 # update func eval current_evals += ls_func_evals state['func_evals'] += ls_func_evals ############################################################ # check conditions ############################################################ if n_iter == max_iter: break if current_evals >= max_eval: break # optimal condition if opt_cond: break # lack of progress if d.mul(t).abs().max() <= tolerance_change: break if abs(loss - prev_loss) < tolerance_change: break state['d'] = d state['t'] = t state['old_dirs'] = old_dirs state['old_stps'] = old_stps state['ro'] = ro state['H_diag'] = H_diag state['prev_flat_grad'] = prev_flat_grad state['prev_loss'] = prev_loss return orig_loss
pytorch-master
torch/optim/lbfgs.py
import math import torch from torch import Tensor from .optimizer import Optimizer from typing import List, Optional __all__ = ['RAdam', 'radam'] class RAdam(Optimizer): r"""Implements RAdam algorithm. .. math:: \begin{aligned} &\rule{110mm}{0.4pt} \\ &\textbf{input} : \gamma \text{ (lr)}, \: \beta_1, \beta_2 \text{ (betas)}, \: \theta_0 \text{ (params)}, \:f(\theta) \text{ (objective)}, \: \lambda \text{ (weightdecay)}, \\ &\hspace{13mm} \epsilon \text{ (epsilon)} \\ &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, v_0 \leftarrow 0 \text{ ( second moment)}, \\ &\hspace{18mm} \rho_{\infty} \leftarrow 2/(1-\beta_2) -1 \\[-1.ex] &\rule{110mm}{0.4pt} \\ &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ &\hspace{6mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ &\hspace{5mm} \textbf{if} \: \lambda \neq 0 \\ &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ &\hspace{6mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ &\hspace{6mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ &\hspace{6mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\ &\hspace{6mm}\rho_t \leftarrow \rho_{\infty} - 2 t \beta^t_2 /\big(1-\beta_2^t \big) \\[0.1.ex] &\hspace{6mm}\textbf{if} \: \rho_t > 5 \\ &\hspace{12mm} l_t \leftarrow \sqrt{ (1-\beta^t_2) / \big( v_t +\epsilon \big) } \\ &\hspace{12mm} r_t \leftarrow \sqrt{\frac{(\rho_t-4)(\rho_t-2)\rho_{\infty}}{(\rho_{\infty}-4)(\rho_{\infty}-2) \rho_t}} \\ &\hspace{12mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t} r_t l_t \\ &\hspace{6mm}\textbf{else} \\ &\hspace{12mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t} \\ &\rule{110mm}{0.4pt} \\[-1.ex] &\bf{return} \: \theta_t \\[-1.ex] &\rule{110mm}{0.4pt} \\[-1.ex] \end{aligned} For further details regarding the algorithm we refer to `On the variance of the adaptive learning rate and beyond`_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) foreach (bool, optional): whether foreach implementation of optimizer is used (default: None) .. _On the variance of the adaptive learning rate and beyond: https://arxiv.org/abs/1908.03265 """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, foreach: Optional[bool] = None): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, foreach=foreach) super(RAdam, self).__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('foreach', None) state_values = list(self.state.values()) step_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['step']) if not step_is_tensor: for s in state_values: s['step'] = torch.tensor(float(s['step'])) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] exp_avgs = [] exp_avg_sqs = [] state_steps = [] beta1, beta2 = group['betas'] for p in group['params']: if p.grad is not None: params_with_grad.append(p) if p.grad.is_sparse: raise RuntimeError('RAdam does not support sparse gradients') grads.append(p.grad) state = self.state[p] # Lazy state initialization if len(state) == 0: state['step'] = torch.tensor(0.) # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) exp_avgs.append(state['exp_avg']) exp_avg_sqs.append(state['exp_avg_sq']) state_steps.append(state['step']) radam(params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps, beta1=beta1, beta2=beta2, lr=group['lr'], weight_decay=group['weight_decay'], eps=group['eps'], foreach=group['foreach']) return loss def radam(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], state_steps: List[Tensor], # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 # setting this as kwarg for now as functional API is compiled by torch/distributed/optim foreach: bool = None, *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float): r"""Functional API that performs RAdam algorithm computation. See :class:`~torch.optim.RAdam` for details. """ if not all(isinstance(t, torch.Tensor) for t in state_steps): raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors") if foreach is None: # Placeholder for more complex foreach logic to be added when value is not set foreach = False if foreach and torch.jit.is_scripting(): raise RuntimeError('torch.jit.script not supported with foreach optimizers') if foreach and not torch.jit.is_scripting(): func = _multi_tensor_radam else: func = _single_tensor_radam func(params, grads, exp_avgs, exp_avg_sqs, state_steps, beta1=beta1, beta2=beta2, lr=lr, weight_decay=weight_decay, eps=eps) def _single_tensor_radam(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], state_steps: List[Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float): for i, param in enumerate(params): grad = grads[i] exp_avg = exp_avgs[i] exp_avg_sq = exp_avg_sqs[i] step_t = state_steps[i] # update step step_t += 1 step = step_t.item() bias_correction1 = 1 - beta1 ** step bias_correction2 = 1 - beta2 ** step if weight_decay != 0: grad = grad.add(param, alpha=weight_decay) # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) # correcting bias for the first moving moment bias_corrected_exp_avg = exp_avg / bias_correction1 # maximum length of the approximated SMA rho_inf = 2 / (1 - beta2) - 1 # compute the length of the approximated SMA rho_t = rho_inf - 2 * step * (beta2 ** step) / bias_correction2 if rho_t > 5.: # Compute the variance rectification term and update parameters accordingly rect = math.sqrt((rho_t - 4) * (rho_t - 2) * rho_inf / ((rho_inf - 4) * (rho_inf - 2) * rho_t)) adaptive_lr = math.sqrt(bias_correction2) / exp_avg_sq.sqrt().add_(eps) param.add_(bias_corrected_exp_avg * lr * adaptive_lr * rect, alpha=-1.0) else: param.add_(bias_corrected_exp_avg * lr, alpha=-1.0) def _multi_tensor_radam(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], state_steps: List[Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float): if len(params) == 0: return # Update steps torch._foreach_add_(state_steps, 1) # maximum length of the approximated SMA rho_inf = 2 / (1 - beta2) - 1 # compute the length of the approximated SMA rho_t_list = [rho_inf - 2 * step.item() * (beta2 ** step.item()) / (1 - beta2 ** step.item()) for step in state_steps] bias_correction1 = [1 - beta1 ** step.item() for step in state_steps] bias_correction2 = [1 - beta2 ** step.item() for step in state_steps] if weight_decay != 0: torch._foreach_add_(grads, params, alpha=weight_decay) # Decay the first and second moment running average coefficient torch._foreach_mul_(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) torch._foreach_mul_(exp_avg_sqs, beta2) torch._foreach_addcmul_(exp_avg_sqs, grads, grads, 1 - beta2) rect = [math.sqrt((rho_t - 4) * (rho_t - 2) * rho_inf / ((rho_inf - 4) * (rho_inf - 2) * rho_t)) if rho_t > 5 else 0 for rho_t in rho_t_list] unrectified = [0 if rect > 0 else 1. for rect in rect] exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) bias_correction_sqrt = [math.sqrt(bc) for bc in bias_correction2] denom = torch._foreach_div(exp_avg_sq_sqrt, bias_correction_sqrt) step_size = [(lr * rect / bc) * -1 for rect, bc in zip(rect, bias_correction1)] torch._foreach_addcdiv_(params, exp_avgs, denom, step_size) denom = [torch.ones_like(exp_av, memory_format=torch.preserve_format) for exp_av in exp_avgs] step_size = [(lr * rect / bc) * -1 for rect, bc in zip(unrectified, bias_correction1)] torch._foreach_addcdiv_(params, exp_avgs, denom, step_size)
pytorch-master
torch/optim/radam.py
import math import torch from torch import Tensor from .optimizer import Optimizer from typing import List, Optional __all__ = ['Adam', 'adam'] class Adam(Optimizer): r"""Implements Adam algorithm. .. math:: \begin{aligned} &\rule{110mm}{0.4pt} \\ &\textbf{input} : \gamma \text{ (lr)}, \beta_1, \beta_2 \text{ (betas)},\theta_0 \text{ (params)},f(\theta) \text{ (objective)} \\ &\hspace{13mm} \lambda \text{ (weight decay)}, \: \textit{amsgrad}, \:\textit{maximize} \\ &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, v_0\leftarrow 0 \text{ (second moment)},\: \widehat{v_0}^{max}\leftarrow 0\\[-1.ex] &\rule{110mm}{0.4pt} \\ &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ &\hspace{5mm}\textbf{if} \: \textit{maximize}: \\ &\hspace{10mm}g_t \leftarrow -\nabla_{\theta} f_t (\theta_{t-1}) \\ &\hspace{5mm}\textbf{else} \\ &\hspace{10mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ &\hspace{5mm}\textbf{if} \: \lambda \neq 0 \\ &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ &\hspace{5mm}\widehat{m_t} \leftarrow m_t/\big(1-\beta_1^t \big) \\ &\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\ &\hspace{5mm}\textbf{if} \: amsgrad \\ &\hspace{10mm}\widehat{v_t}^{max} \leftarrow \mathrm{max}(\widehat{v_t}^{max}, \widehat{v_t}) \\ &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/ \big(\sqrt{\widehat{v_t}^{max}} + \epsilon \big) \\ &\hspace{5mm}\textbf{else} \\ &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/ \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\ &\rule{110mm}{0.4pt} \\[-1.ex] &\bf{return} \: \theta_t \\[-1.ex] &\rule{110mm}{0.4pt} \\[-1.ex] \end{aligned} For further details regarding the algorithm we refer to `Adam: A Method for Stochastic Optimization`_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (bool, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) foreach (bool, optional): whether foreach implementation of optimizer is used (default: None) maximize (bool, optional): maximize the params based on the objective, instead of minimizing (default: False) capturable (bool, optional): whether this instance is safe to capture in a CUDA graph. Passing True can impair ungraphed performance, so if you don't intend to graph capture this instance, leave it False (default: False) .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, *, foreach: Optional[bool] = None, maximize: bool = False, capturable: bool = False): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad, maximize=maximize, foreach=foreach, capturable=capturable) super(Adam, self).__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('amsgrad', False) group.setdefault('maximize', False) group.setdefault('foreach', None) group.setdefault('capturable', False) state_values = list(self.state.values()) step_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['step']) if not step_is_tensor: for s in state_values: s['step'] = torch.tensor(float(s['step'])) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss. """ self._cuda_graph_capture_health_check() loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] exp_avgs = [] exp_avg_sqs = [] max_exp_avg_sqs = [] state_steps = [] beta1, beta2 = group['betas'] for p in group['params']: if p.grad is not None: params_with_grad.append(p) if p.grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') grads.append(p.grad) state = self.state[p] # Lazy state initialization if len(state) == 0: state['step'] = torch.zeros((1,), dtype=torch.float, device=p.device) \ if self.defaults['capturable'] else torch.tensor(0.) # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) if group['amsgrad']: # Maintains max of all exp. moving avg. of sq. grad. values state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) exp_avgs.append(state['exp_avg']) exp_avg_sqs.append(state['exp_avg_sq']) if group['amsgrad']: max_exp_avg_sqs.append(state['max_exp_avg_sq']) state_steps.append(state['step']) adam(params_with_grad, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, amsgrad=group['amsgrad'], beta1=beta1, beta2=beta2, lr=group['lr'], weight_decay=group['weight_decay'], eps=group['eps'], maximize=group['maximize'], foreach=group['foreach'], capturable=group['capturable']) return loss def adam(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], max_exp_avg_sqs: List[Tensor], state_steps: List[Tensor], # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 # setting this as kwarg for now as functional API is compiled by torch/distributed/optim foreach: bool = None, capturable: bool = False, *, amsgrad: bool, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, maximize: bool): r"""Functional API that performs Adam algorithm computation. See :class:`~torch.optim.Adam` for details. """ if not all(isinstance(t, torch.Tensor) for t in state_steps): raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors") if foreach is None: # Placeholder for more complex foreach logic to be added when value is not set foreach = False if foreach and torch.jit.is_scripting(): raise RuntimeError('torch.jit.script not supported with foreach optimizers') if foreach and not torch.jit.is_scripting(): func = _multi_tensor_adam else: func = _single_tensor_adam func(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, amsgrad=amsgrad, beta1=beta1, beta2=beta2, lr=lr, weight_decay=weight_decay, eps=eps, maximize=maximize, capturable=capturable) def _single_tensor_adam(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], max_exp_avg_sqs: List[Tensor], state_steps: List[Tensor], *, amsgrad: bool, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, maximize: bool, capturable: bool): for i, param in enumerate(params): grad = grads[i] if not maximize else -grads[i] exp_avg = exp_avgs[i] exp_avg_sq = exp_avg_sqs[i] step_t = state_steps[i] if capturable: assert param.is_cuda and step_t.is_cuda, "If capturable=True, params and state_steps must be CUDA tensors." # update step step_t += 1 if weight_decay != 0: grad = grad.add(param, alpha=weight_decay) if torch.is_complex(param): grad = torch.view_as_real(grad) exp_avg = torch.view_as_real(exp_avg) exp_avg_sq = torch.view_as_real(exp_avg_sq) param = torch.view_as_real(param) # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2) if capturable: step = step_t # 1 - beta1 ** step can't be captured in a CUDA graph, even if step is a CUDA tensor # (incurs "RuntimeError: CUDA error: operation not permitted when stream is capturing") bias_correction1 = 1 - torch.pow(beta1, step) bias_correction2 = 1 - torch.pow(beta2, step) step_size = lr / bias_correction1 step_size_neg = step_size.neg() bias_correction2_sqrt = bias_correction2.sqrt() if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i]) # Uses the max. for normalizing running avg. of gradient # Folds in (admittedly ugly) 1-elem step_size math here to avoid extra param-set-sized read+write # (can't fold it into addcdiv_ below because addcdiv_ requires value is a Number, not a Tensor) denom = (max_exp_avg_sqs[i].sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg) else: denom = (exp_avg_sq.sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg) param.addcdiv_(exp_avg, denom) else: step = step_t.item() bias_correction1 = 1 - beta1 ** step bias_correction2 = 1 - beta2 ** step step_size = lr / bias_correction1 bias_correction2_sqrt = math.sqrt(bias_correction2) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i]) # Use the max. for normalizing running avg. of gradient denom = (max_exp_avg_sqs[i].sqrt() / bias_correction2_sqrt).add_(eps) else: denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps) param.addcdiv_(exp_avg, denom, value=-step_size) def _multi_tensor_adam(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], max_exp_avg_sqs: List[Tensor], state_steps: List[Tensor], *, amsgrad: bool, beta1: float, beta2: float, lr: float, weight_decay: float, eps: float, maximize: bool, capturable: bool): if len(params) == 0: return if capturable: assert all(p.is_cuda and step.is_cuda for p, step in zip(params, state_steps)), \ "If capturable=True, params and state_steps must be CUDA tensors." if maximize: grads = torch._foreach_neg(tuple(grads)) # type: ignore[assignment] # Handle complex parameters grads = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grads] exp_avgs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avgs] exp_avg_sqs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avg_sqs] params_ = [torch.view_as_real(x) if torch.is_complex(x) else x for x in params] # update steps torch._foreach_add_(state_steps, 1) if weight_decay != 0: torch._foreach_add_(grads, params, alpha=weight_decay) # Decay the first and second moment running average coefficient torch._foreach_mul_(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) torch._foreach_mul_(exp_avg_sqs, beta2) torch._foreach_addcmul_(exp_avg_sqs, grads, grads, 1 - beta2) if capturable: # TODO: use foreach_pow if/when foreach_pow is added bias_correction1 = [torch.pow(beta1, step) for step in state_steps] bias_correction2 = [torch.pow(beta2, step) for step in state_steps] # foreach_sub doesn't allow a scalar as the first arg torch._foreach_sub_(bias_correction1, 1) torch._foreach_sub_(bias_correction2, 1) torch._foreach_neg_(bias_correction1) torch._foreach_neg_(bias_correction2) # foreach_div doesn't allow a scalar as the first arg step_size = torch._foreach_div(bias_correction1, lr) torch._foreach_reciprocal_(step_size) torch._foreach_neg_(step_size) bias_correction2_sqrt = torch._foreach_sqrt(bias_correction2) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch._foreach_maximum_(max_exp_avg_sqs, exp_avg_sqs) # type: ignore[assignment] # Use the max. for normalizing running avg. of gradient max_exp_avg_sq_sqrt = torch._foreach_sqrt(max_exp_avg_sqs) # Folds in (admittedly ugly) 1-elem step_size math here to avoid extra param-set-sized read+write # (can't fold it into addcdiv_ below because addcdiv_ requires value is a Number, not a Tensor) torch._foreach_div_(max_exp_avg_sq_sqrt, torch._foreach_mul(bias_correction2_sqrt, step_size)) eps_over_step_size = torch._foreach_div(step_size, eps) torch._foreach_reciprocal_(eps_over_step_size) denom = torch._foreach_add(max_exp_avg_sq_sqrt, eps_over_step_size) else: exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) torch._foreach_div_(exp_avg_sq_sqrt, torch._foreach_mul(bias_correction2_sqrt, step_size)) eps_over_step_size = torch._foreach_div(step_size, eps) torch._foreach_reciprocal_(eps_over_step_size) denom = torch._foreach_add(exp_avg_sq_sqrt, eps_over_step_size) torch._foreach_addcdiv_(params_, exp_avgs, denom) else: bias_correction1 = [1 - beta1 ** step.item() for step in state_steps] bias_correction2 = [1 - beta2 ** step.item() for step in state_steps] step_size = [(lr / bc) * -1 for bc in bias_correction1] bias_correction2_sqrt = [math.sqrt(bc) for bc in bias_correction2] if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch._foreach_maximum_(max_exp_avg_sqs, exp_avg_sqs) # Use the max. for normalizing running avg. of gradient max_exp_avg_sq_sqrt = torch._foreach_sqrt(max_exp_avg_sqs) torch._foreach_div_(max_exp_avg_sq_sqrt, bias_correction2_sqrt) denom = torch._foreach_add(max_exp_avg_sq_sqrt, eps) else: exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt) denom = torch._foreach_add(exp_avg_sq_sqrt, eps) torch._foreach_addcdiv_(params_, exp_avgs, denom, step_size)
pytorch-master
torch/optim/adam.py
from collections import defaultdict, abc as container_abcs import torch from copy import deepcopy from itertools import chain import warnings import functools __all__ = ['Optimizer'] class _RequiredParameter(object): """Singleton class representing a required parameter for an Optimizer.""" def __repr__(self): return "<required parameter>" required = _RequiredParameter() def _use_grad_for_differentiable(func): def _use_grad(self, *args, **kwargs): prev_grad = torch.is_grad_enabled() try: torch.set_grad_enabled(self.defaults['differentiable']) ret = func(self, *args, **kwargs) finally: torch.set_grad_enabled(prev_grad) return ret return _use_grad class Optimizer(object): r"""Base class for all optimizers. .. warning:: Parameters need to be specified as collections that have a deterministic ordering that is consistent between runs. Examples of objects that don't satisfy those properties are sets and iterators over values of dictionaries. Args: params (iterable): an iterable of :class:`torch.Tensor` s or :class:`dict` s. Specifies what Tensors should be optimized. defaults: (dict): a dict containing default values of optimization options (used when a parameter group doesn't specify them). """ def __init__(self, params, defaults): torch._C._log_api_usage_once("python.optimizer") self.defaults = defaults self._hook_for_profile() if isinstance(params, torch.Tensor): raise TypeError("params argument given to the optimizer should be " "an iterable of Tensors or dicts, but got " + torch.typename(params)) self.state = defaultdict(dict) self.param_groups = [] param_groups = list(params) if len(param_groups) == 0: raise ValueError("optimizer got an empty parameter list") if not isinstance(param_groups[0], dict): param_groups = [{'params': param_groups}] for param_group in param_groups: self.add_param_group(param_group) # Allows _cuda_graph_capture_health_check to rig a poor man's TORCH_WARN_ONCE in python, # which I don't think exists # https://github.com/pytorch/pytorch/issues/72948 self._warned_capturable_if_run_uncaptured = True def __getstate__(self): return { 'defaults': self.defaults, 'state': self.state, 'param_groups': self.param_groups, } def __setstate__(self, state): self.__dict__.update(state) self._hook_for_profile() # To support multiprocessing pickle/unpickle. self.defaults.setdefault('differentiable', False) def __repr__(self): format_string = self.__class__.__name__ + ' (' for i, group in enumerate(self.param_groups): format_string += '\n' format_string += 'Parameter Group {0}\n'.format(i) for key in sorted(group.keys()): if key != 'params': format_string += ' {0}: {1}\n'.format(key, group[key]) format_string += ')' return format_string # Currently needed by Adam and AdamW def _cuda_graph_capture_health_check(self): if torch.has_cuda and torch.cuda.is_available(): capturing = torch.cuda.is_current_stream_capturing() if capturing and not self.defaults['capturable']: raise RuntimeError("Attempting CUDA graph capture of step() for an instance of " + self.__class__.__name__ + " but this instance was constructed with capturable=False.") if ( (not getattr(self, "_warned_capturable_if_run_uncaptured", False)) and self.defaults["capturable"] and (not capturing) ): print("Warning: This instance was constructed with capturable=True, but step() " + "is running without CUDA graph capture. If you never intend to graph-capture this " + "instance, capturable=True can impair performance, and you should set capturable=False.") self._warned_capturable_if_run_uncaptured = True def _hook_for_profile(self): self._zero_grad_profile_name = "Optimizer.zero_grad#{}.zero_grad".format(self.__class__.__name__) def profile_hook_step(func): @functools.wraps(func) def wrapper(*args, **kwargs): obj, *_ = args profile_name = "Optimizer.step#{}.step".format(obj.__class__.__name__) with torch.autograd.profiler.record_function(profile_name): return func(*args, **kwargs) return wrapper hooked = getattr(self.__class__.step, "hooked", None) if not hooked: self.__class__.step = profile_hook_step(self.__class__.step) self.__class__.step.hooked = True def state_dict(self): r"""Returns the state of the optimizer as a :class:`dict`. It contains two entries: * state - a dict holding current optimization state. Its content differs between optimizer classes. * param_groups - a list containing all parameter groups where each parameter group is a dict """ # Save order indices instead of Tensors param_mappings = {} start_index = 0 def pack_group(group): nonlocal start_index packed = {k: v for k, v in group.items() if k != 'params'} param_mappings.update({id(p): i for i, p in enumerate(group['params'], start_index) if id(p) not in param_mappings}) packed['params'] = [param_mappings[id(p)] for p in group['params']] start_index += len(packed['params']) return packed param_groups = [pack_group(g) for g in self.param_groups] # Remap state to use order indices as keys packed_state = {(param_mappings[id(k)] if isinstance(k, torch.Tensor) else k): v for k, v in self.state.items()} return { 'state': packed_state, 'param_groups': param_groups, } def load_state_dict(self, state_dict): r"""Loads the optimizer state. Args: state_dict (dict): optimizer state. Should be an object returned from a call to :meth:`state_dict`. """ # deepcopy, to be consistent with module API state_dict = deepcopy(state_dict) # Validate the state_dict groups = self.param_groups saved_groups = state_dict['param_groups'] if len(groups) != len(saved_groups): raise ValueError("loaded state dict has a different number of " "parameter groups") param_lens = (len(g['params']) for g in groups) saved_lens = (len(g['params']) for g in saved_groups) if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)): raise ValueError("loaded state dict contains a parameter group " "that doesn't match the size of optimizer's group") # Update the state id_map = {old_id: p for old_id, p in zip(chain.from_iterable((g['params'] for g in saved_groups)), chain.from_iterable((g['params'] for g in groups)))} def cast(param, value, key=None): r"""Make a deep copy of value, casting all tensors to device of param.""" if isinstance(value, torch.Tensor): # Floating-point types are a bit special here. They are the only ones # that are assumed to always match the type of params. # Make sure state['step'] is not casted https://github.com/pytorch/pytorch/issues/74424 if (key != "step"): if param.is_floating_point(): value = value.to(param.dtype) value = value.to(param.device) return value elif isinstance(value, dict): return {k: cast(param, v, key=k) for k, v in value.items()} elif isinstance(value, container_abcs.Iterable): return type(value)(cast(param, v) for v in value) else: return value # Copy state assigned to params (and cast tensors to appropriate types). # State that is not assigned to params is copied as is (needed for # backward compatibility). state = defaultdict(dict) for k, v in state_dict['state'].items(): if k in id_map: param = id_map[k] state[param] = cast(param, v) else: state[k] = v # Update parameter groups, setting their 'params' value def update_group(group, new_group): new_group['params'] = group['params'] return new_group param_groups = [ update_group(g, ng) for g, ng in zip(groups, saved_groups)] self.__setstate__({'state': state, 'param_groups': param_groups}) def zero_grad(self, set_to_none: bool = False): r"""Sets the gradients of all optimized :class:`torch.Tensor` s to zero. Args: set_to_none (bool): instead of setting to zero, set the grads to None. This will in general have lower memory footprint, and can modestly improve performance. However, it changes certain behaviors. For example: 1. When the user tries to access a gradient and perform manual ops on it, a None attribute or a Tensor full of 0s will behave differently. 2. If the user requests ``zero_grad(set_to_none=True)`` followed by a backward pass, ``.grad``\ s are guaranteed to be None for params that did not receive a gradient. 3. ``torch.optim`` optimizers have a different behavior if the gradient is 0 or None (in one case it does the step with a gradient of 0 and in the other it skips the step altogether). """ foreach = self.defaults.get('foreach', False) if not hasattr(self, "_zero_grad_profile_name"): self._hook_for_profile() if foreach: per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) with torch.autograd.profiler.record_function(self._zero_grad_profile_name): for group in self.param_groups: for p in group['params']: if p.grad is not None: if set_to_none: p.grad = None else: if p.grad.grad_fn is not None: p.grad.detach_() else: p.grad.requires_grad_(False) if (not foreach or p.grad.is_sparse): p.grad.zero_() else: per_device_and_dtype_grads[p.grad.device][p.grad.dtype].append(p.grad) if foreach: for _, per_dtype_grads in per_device_and_dtype_grads.items(): for grads in per_dtype_grads.values(): torch._foreach_zero_(grads) def step(self, closure): r"""Performs a single optimization step (parameter update). Args: closure (Callable): A closure that reevaluates the model and returns the loss. Optional for most optimizers. .. note:: Unless otherwise specified, this function should not modify the ``.grad`` field of the parameters. """ raise NotImplementedError def add_param_group(self, param_group): r"""Add a param group to the :class:`Optimizer` s `param_groups`. This can be useful when fine tuning a pre-trained network as frozen layers can be made trainable and added to the :class:`Optimizer` as training progresses. Args: param_group (dict): Specifies what Tensors should be optimized along with group specific optimization options. """ assert isinstance(param_group, dict), "param group must be a dict" params = param_group['params'] if isinstance(params, torch.Tensor): param_group['params'] = [params] elif isinstance(params, set): raise TypeError('optimizer parameters need to be organized in ordered collections, but ' 'the ordering of tensors in sets will change between runs. Please use a list instead.') else: param_group['params'] = list(params) for param in param_group['params']: if not isinstance(param, torch.Tensor): raise TypeError("optimizer can only optimize Tensors, " "but one of the params is " + torch.typename(param)) if not self.defaults.get('differentiable', None) and not (param.is_leaf or param.retains_grad): raise ValueError("can't optimize a non-leaf Tensor") for name, default in self.defaults.items(): if default is required and name not in param_group: raise ValueError("parameter group didn't specify a value of required optimization parameter " + name) else: param_group.setdefault(name, default) params = param_group['params'] if len(params) != len(set(params)): warnings.warn("optimizer contains a parameter group with duplicate parameters; " "in future, this will cause an error; " "see github.com/pytorch/pytorch/issues/40967 for more information", stacklevel=3) param_set = set() for group in self.param_groups: param_set.update(set(group['params'])) if not param_set.isdisjoint(set(param_group['params'])): raise ValueError("some parameters appear in more than one parameter group") self.param_groups.append(param_group)
pytorch-master
torch/optim/optimizer.py
import math import torch from torch import Tensor from .optimizer import Optimizer from typing import List, Optional __all__ = ['NAdam', 'nadam'] class NAdam(Optimizer): r"""Implements NAdam algorithm. .. math:: \begin{aligned} &\rule{110mm}{0.4pt} \\ &\textbf{input} : \gamma_t \text{ (lr)}, \: \beta_1,\beta_2 \text{ (betas)}, \: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)} \\ &\hspace{13mm} \: \lambda \text{ (weight decay)}, \:\psi \text{ (momentum decay)} \\ &\textbf{initialize} : m_0 \leftarrow 0 \text{ ( first moment)}, v_0 \leftarrow 0 \text{ ( second moment)} \\[-1.ex] &\rule{110mm}{0.4pt} \\ &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ &\hspace{5mm}if \: \lambda \neq 0 \\ &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ &\hspace{5mm} \mu_t \leftarrow \beta_1 \big(1 - \frac{1}{2} 0.96^{t \psi} \big) \\ &\hspace{5mm} \mu_{t+1} \leftarrow \beta_1 \big(1 - \frac{1}{2} 0.96^{(t+1)\psi}\big)\\ &\hspace{5mm}m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) g_t \\ &\hspace{5mm}v_t \leftarrow \beta_2 v_{t-1} + (1-\beta_2) g^2_t \\ &\hspace{5mm}\widehat{m_t} \leftarrow \mu_{t+1} m_t/(1-\prod_{i=1}^{t+1}\mu_i)\\[-1.ex] & \hspace{11mm} + (1-\mu_t) g_t /(1-\prod_{i=1}^{t} \mu_{i}) \\ &\hspace{5mm}\widehat{v_t} \leftarrow v_t/\big(1-\beta_2^t \big) \\ &\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}/ \big(\sqrt{\widehat{v_t}} + \epsilon \big) \\ &\rule{110mm}{0.4pt} \\[-1.ex] &\bf{return} \: \theta_t \\[-1.ex] &\rule{110mm}{0.4pt} \\[-1.ex] \end{aligned} For further details regarding the algorithm we refer to `Incorporating Nesterov Momentum into Adam`_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 2e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) momentum_decay (float, optional): momentum momentum_decay (default: 4e-3) foreach (bool, optional): whether foreach implementation of optimizer is used (default: None) .. _Incorporating Nesterov Momentum into Adam: https://openreview.net/forum?id=OM0jvwB8jIp57ZJjtNEZ """ def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, momentum_decay=4e-3, foreach: Optional[bool] = None): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) if not 0.0 <= momentum_decay: raise ValueError("Invalid momentum_decay value: {}".format(momentum_decay)) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, momentum_decay=momentum_decay, foreach=foreach) super(NAdam, self).__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('foreach', None) state_values = list(self.state.values()) step_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['step']) if not step_is_tensor: for s in state_values: s['step'] = torch.tensor(float(s['step'])) mu_product_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['mu_product']) if not mu_product_is_tensor: for s in state_values: s['mu_product'] = torch.tensor(s['mu_product']) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] exp_avgs = [] exp_avg_sqs = [] mu_products = [] state_steps = [] beta1, beta2 = group['betas'] for p in group['params']: if p.grad is not None: params_with_grad.append(p) if p.grad.is_sparse: raise RuntimeError('NAdam does not support sparse gradients') grads.append(p.grad) state = self.state[p] # Lazy state initialization if len(state) == 0: state['step'] = torch.tensor(0.) state['mu_product'] = torch.tensor(1.) # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) exp_avgs.append(state['exp_avg']) exp_avg_sqs.append(state['exp_avg_sq']) mu_products.append(state['mu_product']) state_steps.append(state['step']) nadam(params_with_grad, grads, exp_avgs, exp_avg_sqs, mu_products, state_steps, beta1=beta1, beta2=beta2, lr=group['lr'], weight_decay=group['weight_decay'], momentum_decay=group['momentum_decay'], eps=group['eps'], foreach=group['foreach']) return loss def nadam(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], mu_products: List[Tensor], state_steps: List[Tensor], # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 # setting this as kwarg for now as functional API is compiled by torch/distributed/optim foreach: bool = None, *, beta1: float, beta2: float, lr: float, weight_decay: float, momentum_decay: float, eps: float): r"""Functional API that performs NAdam algorithm computation. See :class:`~torch.optim.NAdam` for details. """ if not all(isinstance(t, torch.Tensor) for t in state_steps): raise RuntimeError("API has changed, `state_steps` argument must contain a list of singleton tensors") if not all(isinstance(t, torch.Tensor) for t in mu_products): raise RuntimeError("API has changed, `mu_products` argument must contain a list of singleton tensors") if foreach is None: # Placeholder for more complex foreach logic to be added when value is not set foreach = False if foreach and torch.jit.is_scripting(): raise RuntimeError('torch.jit.script not supported with foreach optimizers') if foreach and not torch.jit.is_scripting(): func = _multi_tensor_nadam else: func = _single_tensor_nadam func(params, grads, exp_avgs, exp_avg_sqs, mu_products, state_steps, beta1=beta1, beta2=beta2, lr=lr, weight_decay=weight_decay, momentum_decay=momentum_decay, eps=eps) def _single_tensor_nadam(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], mu_products: List[Tensor], state_steps: List[Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, momentum_decay: float, eps: float): for i, param in enumerate(params): grad = grads[i] exp_avg = exp_avgs[i] exp_avg_sq = exp_avg_sqs[i] mu_product = mu_products[i] step_t = state_steps[i] # update step step_t += 1 step = step_t.item() bias_correction2 = 1 - beta2 ** step if weight_decay != 0: grad = grad.add(param, alpha=weight_decay) # calculate the momentum cache \mu^{t} and \mu^{t+1} mu = beta1 * (1. - 0.5 * (0.96 ** (step * momentum_decay))) mu_next = beta1 * (1. - 0.5 * (0.96 ** ((step + 1) * momentum_decay))) # update mu_product mu_product *= mu mu_product_next = mu_product * mu * mu_next # decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) denom = exp_avg_sq.div(bias_correction2).sqrt().add_(eps) param.addcdiv_(grad, denom, value=-lr * (1. - mu) / (1. - mu_product.item())) param.addcdiv_(exp_avg, denom, value=-lr * mu_next / (1. - mu_product_next.item())) def _multi_tensor_nadam(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], mu_products: List[Tensor], state_steps: List[Tensor], *, beta1: float, beta2: float, lr: float, weight_decay: float, momentum_decay: float, eps: float): if len(params) == 0: return # update steps torch._foreach_add_(state_steps, 1) bias_correction1 = [1 - beta1 ** step.item() for step in state_steps] bias_correction2 = [1 - beta2 ** step.item() for step in state_steps] mus = [beta1 * (1. - 0.5 * (0.96 ** (step.item() * momentum_decay))) for step in state_steps] mu_nexts = [beta1 * (1. - 0.5 * (0.96 ** ((step.item() + 1) * momentum_decay))) for step in state_steps] # update mu_products torch._foreach_mul_(mu_products, mus) if weight_decay != 0: torch._foreach_add_(grads, params, alpha=weight_decay) # Decay the first and second moment running average coefficient torch._foreach_mul_(exp_avgs, beta1) torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) torch._foreach_mul_(exp_avg_sqs, beta2) torch._foreach_addcmul_(exp_avg_sqs, grads, grads, 1 - beta2) exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) bias_correction_sqrt = [math.sqrt(bc) for bc in bias_correction2] torch._foreach_div_(exp_avg_sq_sqrt, bias_correction_sqrt) denom = torch._foreach_add(exp_avg_sq_sqrt, eps) step_size_grads = [(lr * (1. - mu) / (1. - mu_product.item())) * -1 for mu_product, mu in zip(mu_products, mus)] step_size_expavg = [(lr * mu_next / (1. - mu_product.item() * mu_next)) * -1 for mu_product, mu_next in zip(mu_products, mu_nexts)] torch._foreach_addcdiv_(params, grads, denom, step_size_grads) torch._foreach_addcdiv_(params, exp_avgs, denom, step_size_expavg)
pytorch-master
torch/optim/nadam.py
import math import torch from torch import Tensor from .optimizer import Optimizer from typing import List, Optional __all__ = ['ASGD', 'asgd'] class ASGD(Optimizer): """Implements Averaged Stochastic Gradient Descent. It has been proposed in `Acceleration of stochastic approximation by averaging`_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-2) lambd (float, optional): decay term (default: 1e-4) alpha (float, optional): power for eta update (default: 0.75) t0 (float, optional): point at which to start averaging (default: 1e6) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) foreach (bool, optional): whether foreach implementation of optimizer is used (default: None) maximize (bool, optional): maximize the params based on the objective, instead of minimizing (default: False) .. _Acceleration of stochastic approximation by averaging: https://dl.acm.org/citation.cfm?id=131098 """ def __init__(self, params, lr=1e-2, lambd=1e-4, alpha=0.75, t0=1e6, weight_decay=0, foreach: Optional[bool] = None, maximize: bool = False): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) defaults = dict(lr=lr, lambd=lambd, alpha=alpha, t0=t0, weight_decay=weight_decay, foreach=foreach, maximize=maximize) super(ASGD, self).__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('foreach', None) group.setdefault('maximize', False) state_values = list(self.state.values()) step_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['step']) if not step_is_tensor: for s in state_values: s['step'] = torch.tensor(float(s['step'])) eta_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['eta']) if not eta_is_tensor: for s in state_values: s['eta'] = torch.tensor(s['eta']) mu_is_tensor = (len(state_values) != 0) and torch.is_tensor(state_values[0]['mu']) if not mu_is_tensor: for s in state_values: s['mu'] = torch.tensor(float(s['mu'])) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] mus = [] axs = [] etas = [] state_steps = [] for p in group['params']: if p.grad is not None: params_with_grad.append(p) if p.grad.is_sparse: raise RuntimeError('ASGD does not support sparse gradients') grads.append(p.grad) state = self.state[p] # State initialization if len(state) == 0: state['step'] = torch.tensor(0.) state['eta'] = torch.tensor(group['lr']) state['mu'] = torch.tensor(1.) state['ax'] = torch.zeros_like(p, memory_format=torch.preserve_format) mus.append(state['mu']) axs.append(state['ax']) etas.append(state['eta']) state_steps.append(state['step']) asgd(params_with_grad, grads, axs, mus, etas, state_steps, lambd=group['lambd'], lr=group['lr'], t0=group['t0'], alpha=group['alpha'], weight_decay=group['weight_decay'], foreach=group['foreach'], maximize=group['maximize']) return loss def asgd(params: List[Tensor], grads: List[Tensor], axs: List[Tensor], mus: List[Tensor], etas: List[Tensor], state_steps: List[Tensor], # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 # setting this as kwarg for now as functional API is compiled by torch/distributed/optim foreach: bool = None, maximize: bool = False, *, lambd: float, lr: float, t0: float, alpha: float, weight_decay: float): r"""Functional API that performs asgd algorithm computation. See :class:`~torch.optim.ASGD` for details. """ if foreach is None: # Placeholder for more complex foreach logic to be added when value is not set foreach = False if foreach and torch.jit.is_scripting(): raise RuntimeError('torch.jit.script not supported with foreach optimizers') if foreach and not torch.jit.is_scripting(): func = _multi_tensor_asgd else: func = _single_tensor_asgd func(params, grads, axs, mus, etas, state_steps, lambd=lambd, lr=lr, t0=t0, alpha=alpha, weight_decay=weight_decay, maximize=maximize) def _single_tensor_asgd(params: List[Tensor], grads: List[Tensor], axs: List[Tensor], mus: List[Tensor], etas: List[Tensor], state_steps: List[Tensor], *, lambd: float, lr: float, t0: float, alpha: float, weight_decay: float, maximize: bool): for i, param in enumerate(params): grad = grads[i] grad = grad if not maximize else -grad mu = mus[i] ax = axs[i] eta = etas[i] step_t = state_steps[i] # update step step_t += 1 step = step_t.item() if weight_decay != 0: grad = grad.add(param, alpha=weight_decay) # decay term param.mul_(1 - lambd * eta.item()) # update parameter param.add_(grad, alpha=-eta.item()) # averaging if mu.item() != 1: ax.add_(param.sub(ax).mul(mu)) else: ax.copy_(param) new_eta = torch.tensor(lr / math.pow((1 + lambd * lr * step), alpha)) eta.copy_(new_eta) new_mu = torch.tensor(1 / max(1, step - t0)) mu.copy_(new_mu) def _multi_tensor_asgd(params: List[Tensor], grads: List[Tensor], axs: List[Tensor], mus: List[Tensor], etas: List[Tensor], state_steps: List[Tensor], *, lambd: float, lr: float, t0: float, alpha: float, weight_decay: float, maximize: bool): if len(params) == 0: return if maximize: grads = torch._foreach_neg(grads) # update step torch._foreach_add_(state_steps, 1) if weight_decay != 0: torch._foreach_add_(grads, params, alpha=weight_decay) # decay term eta = etas[0].item() torch._foreach_mul_(params, 1 - lambd * eta) # update parameter torch._foreach_add_(params, grads, alpha=-eta) # averaging for i in range(len(axs)): if mus[i].item() != 1: axs[i].add_(params[i].sub(axs[i]).mul(mus[i])) else: axs[i].copy_(params[i]) # update eta and mu for i in range(len(mus)): new_eta = torch.tensor(lr / math.pow((1 + lambd * lr * state_steps[i].item()), alpha)) etas[i].copy_(new_eta) new_mu = torch.tensor(1 / max(1, state_steps[i].item() - t0)) mus[i].copy_(new_mu)
pytorch-master
torch/optim/asgd.py
r"""Functional interface""" import math from torch import Tensor from typing import List from .adadelta import adadelta # type: ignore[attr-defined] # noqa: F401 from .adagrad import adagrad, _make_sparse # type: ignore[attr-defined] # noqa: F401 from .adam import adam # type: ignore[attr-defined] # noqa: F401 from .adamw import adamw # type: ignore[attr-defined] # noqa: F401 from .adamax import adamax # type: ignore[attr-defined] # noqa: F401 from .asgd import asgd # type: ignore[attr-defined] # noqa: F401 from .nadam import nadam # type: ignore[attr-defined] # noqa: F401 from .radam import radam # type: ignore[attr-defined] # noqa: F401 from .rmsprop import rmsprop # type: ignore[attr-defined] # noqa: F401 from .rprop import rprop # type: ignore[attr-defined] # noqa: F401 from .sgd import sgd # type: ignore[attr-defined] # noqa: F401 # TODO: use foreach API in optim._functional to do all the computation def sparse_adam(params: List[Tensor], grads: List[Tensor], exp_avgs: List[Tensor], exp_avg_sqs: List[Tensor], state_steps: List[int], *, eps: float, beta1: float, beta2: float, lr: float, maximize: bool): r"""Functional API that performs Sparse Adam algorithm computation. See :class:`~torch.optim.SparseAdam` for details. """ for i, param in enumerate(params): grad = grads[i] grad = grad if not maximize else -grad grad = grad.coalesce() # the update is non-linear so indices must be unique grad_indices = grad._indices() grad_values = grad._values() size = grad.size() exp_avg = exp_avgs[i] exp_avg_sq = exp_avg_sqs[i] step = state_steps[i] def make_sparse(values): constructor = grad.new if grad_indices.dim() == 0 or values.dim() == 0: return constructor().resize_as_(grad) return constructor(grad_indices, values, size) # Decay the first and second moment running average coefficient # old <- b * old + (1 - b) * new # <==> old += (1 - b) * (new - old) old_exp_avg_values = exp_avg.sparse_mask(grad)._values() exp_avg_update_values = grad_values.sub(old_exp_avg_values).mul_(1 - beta1) exp_avg.add_(make_sparse(exp_avg_update_values)) old_exp_avg_sq_values = exp_avg_sq.sparse_mask(grad)._values() exp_avg_sq_update_values = grad_values.pow(2).sub_(old_exp_avg_sq_values).mul_(1 - beta2) exp_avg_sq.add_(make_sparse(exp_avg_sq_update_values)) # Dense addition again is intended, avoiding another sparse_mask numer = exp_avg_update_values.add_(old_exp_avg_values) exp_avg_sq_update_values.add_(old_exp_avg_sq_values) denom = exp_avg_sq_update_values.sqrt_().add_(eps) del exp_avg_update_values, exp_avg_sq_update_values bias_correction1 = 1 - beta1 ** step bias_correction2 = 1 - beta2 ** step step_size = lr * math.sqrt(bias_correction2) / bias_correction1 param.add_(make_sparse(-step_size * numer.div_(denom)))
pytorch-master
torch/optim/_functional.py
import torch from torch import Tensor from .optimizer import Optimizer from typing import List, Optional __all__ = ['Adadelta', 'adadelta'] class Adadelta(Optimizer): r"""Implements Adadelta algorithm. .. math:: \begin{aligned} &\rule{110mm}{0.4pt} \\ &\textbf{input} : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)}, \: \rho \text{ (decay)}, \: \lambda \text{ (weight decay)} \\ &\textbf{initialize} : v_0 \leftarrow 0 \: \text{ (square avg)}, \: u_0 \leftarrow 0 \: \text{ (accumulate variables)} \\[-1.ex] &\rule{110mm}{0.4pt} \\ &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do} \\ &\hspace{5mm}g_t \leftarrow \nabla_{\theta} f_t (\theta_{t-1}) \\ &\hspace{5mm}if \: \lambda \neq 0 \\ &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1} \\ &\hspace{5mm} v_t \leftarrow v_{t-1} \rho + g^2_t (1 - \rho) \\ &\hspace{5mm}\Delta x_t \leftarrow \frac{\sqrt{u_{t-1} + \epsilon }}{ \sqrt{v_t + \epsilon} }g_t \hspace{21mm} \\ &\hspace{5mm} u_t \leftarrow u_{t-1} \rho + \Delta x^2_t (1 - \rho) \\ &\hspace{5mm}\theta_t \leftarrow \theta_{t-1} - \gamma \Delta x_t \\ &\rule{110mm}{0.4pt} \\[-1.ex] &\bf{return} \: \theta_t \\[-1.ex] &\rule{110mm}{0.4pt} \\[-1.ex] \end{aligned} For further details regarding the algorithm we refer to `ADADELTA: An Adaptive Learning Rate Method`_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups rho (float, optional): coefficient used for computing a running average of squared gradients (default: 0.9) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-6) lr (float, optional): coefficient that scale delta before it is applied to the parameters (default: 1.0) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) foreach (bool, optional): whether foreach implementation of optimizer is used (default: None) maximize (bool, optional): maximize the params based on the objective, instead of minimizing (default: False) .. _ADADELTA\: An Adaptive Learning Rate Method: https://arxiv.org/abs/1212.5701 """ def __init__(self, params, lr=1.0, rho=0.9, eps=1e-6, weight_decay=0, foreach: Optional[bool] = None, *, maximize: bool = False): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= rho <= 1.0: raise ValueError("Invalid rho value: {}".format(rho)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= weight_decay: raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) defaults = dict(lr=lr, rho=rho, eps=eps, weight_decay=weight_decay, maximize=maximize, foreach=foreach) super(Adadelta, self).__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault('foreach', None) group.setdefault('maximize', False) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad = [] grads = [] square_avgs = [] acc_deltas = [] lr, rho, eps, weight_decay, foreach, maximize = (group['lr'], group['rho'], group['eps'], group['weight_decay'], group['foreach'], group['maximize']) for p in group['params']: if p.grad is None: continue params_with_grad.append(p) if p.grad.is_sparse: raise RuntimeError('Adadelta does not support sparse gradients') grads.append(p.grad) state = self.state[p] # Lazy state initialization if len(state) == 0: state['step'] = 0 state['square_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) state['acc_delta'] = torch.zeros_like(p, memory_format=torch.preserve_format) square_avgs.append(state['square_avg']) acc_deltas.append(state['acc_delta']) state['step'] += 1 adadelta(params_with_grad, grads, square_avgs, acc_deltas, lr=lr, rho=rho, eps=eps, weight_decay=weight_decay, foreach=foreach, maximize=maximize) return loss def adadelta(params: List[Tensor], grads: List[Tensor], square_avgs: List[Tensor], acc_deltas: List[Tensor], # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 # setting this as kwarg for now as functional API is compiled by torch/distributed/optim foreach: bool = None, *, lr: float, rho: float, eps: float, weight_decay: float, maximize: bool): r"""Functional API that performs Adadelta algorithm computation. See :class:`~torch.optim.Adadelta` for details. """ if foreach is None: # Placeholder for more complex foreach logic to be added when value is not set foreach = False if foreach and torch.jit.is_scripting(): raise RuntimeError('torch.jit.script not supported with foreach optimizers') if foreach and not torch.jit.is_scripting(): func = _multi_tensor_adadelta else: func = _single_tensor_adadelta func(params, grads, square_avgs, acc_deltas, lr=lr, rho=rho, eps=eps, weight_decay=weight_decay, maximize=maximize) def _single_tensor_adadelta(params: List[Tensor], grads: List[Tensor], square_avgs: List[Tensor], acc_deltas: List[Tensor], *, lr: float, rho: float, eps: float, weight_decay: float, maximize: bool): for (param, grad, square_avg, acc_delta) in zip(params, grads, square_avgs, acc_deltas): grad = grad if not maximize else -grad if weight_decay != 0: grad = grad.add(param, alpha=weight_decay) if torch.is_complex(param): square_avg = torch.view_as_real(square_avg) acc_delta = torch.view_as_real(acc_delta) grad = torch.view_as_real(grad) square_avg.mul_(rho).addcmul_(grad, grad, value=1 - rho) std = square_avg.add(eps).sqrt_() delta = acc_delta.add(eps).sqrt_().div_(std).mul_(grad) acc_delta.mul_(rho).addcmul_(delta, delta, value=1 - rho) if torch.is_complex(param): delta = torch.view_as_complex(delta) param.add_(delta, alpha=-lr) def _multi_tensor_adadelta(params: List[Tensor], grads: List[Tensor], square_avgs: List[Tensor], acc_deltas: List[Tensor], *, lr: float, weight_decay: float, rho: float, eps: float, maximize: bool): if len(params) == 0: return if maximize: grads = torch._foreach_neg(grads) if weight_decay != 0: torch._foreach_add_(grads, params, alpha=weight_decay) torch._foreach_mul_(square_avgs, rho) torch._foreach_addcmul_(square_avgs, grads, grads, value=1 - rho) std = torch._foreach_add(square_avgs, eps) torch._foreach_sqrt_(std) deltas = torch._foreach_add(acc_deltas, eps) torch._foreach_sqrt_(deltas) torch._foreach_div_(deltas, std) torch._foreach_mul_(deltas, grads) torch._foreach_add_(params, deltas, alpha=-lr) torch._foreach_mul_(acc_deltas, rho) torch._foreach_addcmul_(acc_deltas, deltas, deltas, value=1 - rho)
pytorch-master
torch/optim/adadelta.py
""" :mod:`torch.optim._multi_tensor` is a package implementing various optimization algorithms. Most commonly used methods are already supported, and the interface is general enough, so that more sophisticated ones can be also easily integrated in the future. """ from functools import partialmethod from torch import optim def partialclass(cls, *args, **kwargs): class NewCls(cls): __init__ = partialmethod(cls.__init__, *args, **kwargs) return NewCls Adam = partialclass(optim.Adam, foreach=True) AdamW = partialclass(optim.AdamW, foreach=True) NAdam = partialclass(optim.NAdam, foreach=True) SGD = partialclass(optim.SGD, foreach=True) RAdam = partialclass(optim.RAdam, foreach=True) RMSprop = partialclass(optim.RMSprop, foreach=True) Rprop = partialclass(optim.Rprop, foreach=True) ASGD = partialclass(optim.ASGD, foreach=True) Adamax = partialclass(optim.Adamax, foreach=True) Adadelta = partialclass(optim.Adadelta, foreach=True) Adagrad = partialclass(optim.Adagrad, foreach=True)
pytorch-master
torch/optim/_multi_tensor/__init__.py
from typing import NamedTuple, Callable, Any, Tuple, List, Dict, Type, cast, Optional, TypeVar import functools from collections import namedtuple, OrderedDict T = TypeVar('T') S = TypeVar('S') """ Contains utility functions for working with nested python data structures. A *pytree* is Python nested data structure. It is a tree in the sense that nodes are Python collections (e.g., list, tuple, dict) and the leaves are Python values. Furthermore, a pytree should not contain reference cycles. pytrees are useful for working with nested collections of Tensors. For example, one can use `tree_map` to map a function over all Tensors inside some nested collection of Tensors and `tree_unflatten` to get a flat list of all Tensors inside some nested collection. pytrees are helpful for implementing nested collection support for PyTorch APIs. This pytree implementation is not very performant due to Python overhead To improve the performance we can move parts of the implementation to C++. """ # A NodeDef holds two callables: # - flatten_fn should take the collection and return a flat list of values. # It can also return some context that is used in reconstructing the # collection. # - unflatten_fn should take a flat list of values and some context # (returned by flatten_fn). It returns the collection by reconstructing # it from the list and the context. Context = Any PyTree = Any FlattenFunc = Callable[[PyTree], Tuple[List, Context]] UnflattenFunc = Callable[[List, Context], PyTree] class NodeDef(NamedTuple): flatten_fn: FlattenFunc unflatten_fn: UnflattenFunc SUPPORTED_NODES: Dict[Type[Any], NodeDef] = {} def _register_pytree_node(typ: Any, flatten_fn: FlattenFunc, unflatten_fn: UnflattenFunc) -> None: SUPPORTED_NODES[typ] = NodeDef(flatten_fn, unflatten_fn) def _dict_flatten(d: Dict[Any, Any]) -> Tuple[List[Any], Context]: return list(d.values()), list(d.keys()) def _dict_unflatten(values: List[Any], context: Context) -> Dict[Any, Any]: return {key: value for key, value in zip(context, values)} def _list_flatten(d: List[Any]) -> Tuple[List[Any], Context]: return d, None def _list_unflatten(values: List[Any], context: Context) -> List[Any]: return list(values) def _tuple_flatten(d: Tuple[Any, ...]) -> Tuple[List[Any], Context]: return list(d), None def _tuple_unflatten(values: List[Any], context: Context) -> Tuple[Any, ...]: return tuple(values) def _namedtuple_flatten(d: NamedTuple) -> Tuple[List[Any], Context]: return list(d), type(d) def _namedtuple_unflatten(values: List[Any], context: Context) -> NamedTuple: return cast(NamedTuple, context(*values)) def _odict_flatten(d: 'OrderedDict[Any, Any]') -> Tuple[List[Any], Context]: return list(d.values()), list(d.keys()) def _odict_unflatten(values: List[Any], context: Context) -> 'OrderedDict[Any, Any]': return OrderedDict((key, value) for key, value in zip(context, values)) _register_pytree_node(dict, _dict_flatten, _dict_unflatten) _register_pytree_node(list, _list_flatten, _list_unflatten) _register_pytree_node(tuple, _tuple_flatten, _tuple_unflatten) _register_pytree_node(namedtuple, _namedtuple_flatten, _namedtuple_unflatten) _register_pytree_node(OrderedDict, _odict_flatten, _odict_unflatten) # h/t https://stackoverflow.com/questions/2166818/how-to-check-if-an-object-is-an-instance-of-a-namedtuple def _is_namedtuple_instance(pytree: Any) -> bool: typ = type(pytree) bases = typ.__bases__ if len(bases) != 1 or bases[0] != tuple: return False fields = getattr(typ, '_fields', None) if not isinstance(fields, tuple): return False return all(type(entry) == str for entry in fields) def _get_node_type(pytree: Any) -> Any: if _is_namedtuple_instance(pytree): return namedtuple return type(pytree) # A leaf is defined as anything that is not a Node. def _is_leaf(pytree: PyTree) -> bool: return _get_node_type(pytree) not in SUPPORTED_NODES.keys() # A TreeSpec represents the structure of a pytree. It holds: # "type": the type of root Node of the pytree # context: some context that is useful in unflattening the pytree # children_specs: specs for each child of the root Node # num_leaves: the number of leaves class TreeSpec: def __init__(self, typ: Any, context: Context, children_specs: List['TreeSpec']) -> None: self.type = typ self.context = context self.children_specs = children_specs self.num_leaves: int = sum([spec.num_leaves for spec in children_specs]) def __repr__(self) -> str: return f'TreeSpec({self.type.__name__}, {self.context}, {self.children_specs})' def __eq__(self, other: Any) -> bool: result = self.type == other.type and self.context == other.context \ and self.children_specs == other.children_specs \ and self.num_leaves == other.num_leaves # This should really not be necessary, but mypy errors out without it. return cast(bool, result) def __ne__(self, other: Any) -> bool: return not self.__eq__(other) class LeafSpec(TreeSpec): def __init__(self) -> None: super().__init__(None, None, []) self.num_leaves = 1 def __repr__(self) -> str: return '*' def tree_flatten(pytree: PyTree) -> Tuple[List[Any], TreeSpec]: """Flattens a pytree into a list of values and a TreeSpec that can be used to reconstruct the pytree. """ if _is_leaf(pytree): return [pytree], LeafSpec() node_type = _get_node_type(pytree) flatten_fn = SUPPORTED_NODES[node_type].flatten_fn child_pytrees, context = flatten_fn(pytree) # Recursively flatten the children result : List[Any] = [] children_specs : List['TreeSpec'] = [] for child in child_pytrees: flat, child_spec = tree_flatten(child) result += flat children_specs.append(child_spec) return result, TreeSpec(node_type, context, children_specs) def tree_unflatten(values: List[Any], spec: TreeSpec) -> PyTree: """Given a list of values and a TreeSpec, builds a pytree. This is the inverse operation of `tree_flatten`. """ if not isinstance(spec, TreeSpec): raise ValueError( f'tree_unflatten(values, spec): Expected `spec` to be instance of ' f'TreeSpec but got item of type {type(spec)}.') if len(values) != spec.num_leaves: raise ValueError( f'tree_unflatten(values, spec): `values` has length {len(values)} ' f'but the spec refers to a pytree that holds {spec.num_leaves} ' f'items ({spec}).') if isinstance(spec, LeafSpec): return values[0] unflatten_fn = SUPPORTED_NODES[spec.type].unflatten_fn # Recursively unflatten the children start = 0 end = 0 child_pytrees = [] for child_spec in spec.children_specs: end += child_spec.num_leaves child_pytrees.append(tree_unflatten(values[start:end], child_spec)) start = end return unflatten_fn(child_pytrees, spec.context) def tree_map(fn: Any, pytree: PyTree) -> PyTree: flat_args, spec = tree_flatten(pytree) return tree_unflatten([fn(i) for i in flat_args], spec) def map_only(ty: Type[T]) -> Callable[[Callable[[T], Any]], Callable[[Any], Any]]: """ Suppose you are writing a tree_map over tensors, leaving everything else unchanged. Ordinarily you would have to write: def go(t): if isinstance(t, Tensor): return ... else: return t With this function, you only need to write: @map_only(Tensor) def go(t): return ... You can also directly use 'tree_map_only' """ def deco(f: Callable[[T], Any]) -> Callable[[Any], Any]: @functools.wraps(f) def inner(x: T) -> Any: if isinstance(x, ty): return f(x) else: return x return inner return deco def tree_map_only(ty: Type[T], fn: Callable[[T], Any], pytree: PyTree) -> PyTree: return tree_map(map_only(ty)(fn), pytree) def tree_all(pred: Callable[[Any], bool], pytree: PyTree) -> bool: flat_args, _ = tree_flatten(pytree) return all(map(pred, flat_args)) def tree_any(pred: Callable[[Any], bool], pytree: PyTree) -> bool: flat_args, _ = tree_flatten(pytree) return any(map(pred, flat_args)) def tree_all_only(ty: Type[T], pred: Callable[[T], bool], pytree: PyTree) -> bool: flat_args, _ = tree_flatten(pytree) return all(pred(x) for x in flat_args if isinstance(x, ty)) def tree_any_only(ty: Type[T], pred: Callable[[T], bool], pytree: PyTree) -> bool: flat_args, _ = tree_flatten(pytree) return any(pred(x) for x in flat_args if isinstance(x, ty)) # Broadcasts a pytree to the provided TreeSpec and returns the flattened # values. If this is not possible, then this function returns None. # # For example, given pytree=0 and spec=TreeSpec(list, None, [LeafSpec(), LeafSpec()]), # would return [0, 0]. This is useful for part of the vmap implementation: # a user can pass in vmap(fn, in_dims)(*inputs). `in_dims` should be # broadcastable to the tree structure of `inputs` and we use # _broadcast_to_and_flatten to check this. def _broadcast_to_and_flatten(pytree: PyTree, spec: TreeSpec) -> Optional[List[Any]]: assert isinstance(spec, TreeSpec) if _is_leaf(pytree): return [pytree] * spec.num_leaves if isinstance(spec, LeafSpec): return None node_type = _get_node_type(pytree) if node_type != spec.type: return None flatten_fn = SUPPORTED_NODES[node_type].flatten_fn child_pytrees, ctx = flatten_fn(pytree) # Check if the Node is different from the spec if len(child_pytrees) != len(spec.children_specs) or ctx != spec.context: return None # Recursively flatten the children result : List[Any] = [] for child, child_spec in zip(child_pytrees, spec.children_specs): flat = _broadcast_to_and_flatten(child, child_spec) if flat is not None: result += flat else: return None return result
pytorch-master
torch/utils/_pytree.py
import logging from typing import Callable, Generic, List from typing_extensions import ParamSpec logger = logging.getLogger(__name__) P = ParamSpec("P") class CallbackRegistry(Generic[P]): def __init__(self, name: str): self.name = name self.callback_list: List[Callable[P, None]] = [] def add_callback(self, cb: Callable[P, None]) -> None: self.callback_list.append(cb) def fire_callbacks(self, *args: P.args, **kwargs: P.kwargs) -> None: for cb in self.callback_list: try: cb(*args, **kwargs) except Exception as e: logger.exception( f"Exception in callback for {self.name} registered with CUDA trace" ) CUDAEventCreationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( "CUDA event creation" ) CUDAEventDeletionCallbacks: "CallbackRegistry[int]" = CallbackRegistry( "CUDA event deletion" ) CUDAEventRecordCallbacks: "CallbackRegistry[int, int]" = CallbackRegistry( "CUDA event record" ) CUDAEventWaitCallbacks: "CallbackRegistry[int, int]" = CallbackRegistry( "CUDA event wait" ) CUDAMemoryAllocationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( "CUDA memory allocation" ) CUDAMemoryDeallocationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( "CUDA memory deallocation" ) CUDAStreamCreationCallbacks: "CallbackRegistry[int]" = CallbackRegistry( "CUDA stream creation" ) def register_callback_for_cuda_event_creation(cb: Callable[[int], None]) -> None: CUDAEventCreationCallbacks.add_callback(cb) def register_callback_for_cuda_event_deletion(cb: Callable[[int], None]) -> None: CUDAEventDeletionCallbacks.add_callback(cb) def register_callback_for_cuda_event_record(cb: Callable[[int, int], None]) -> None: CUDAEventRecordCallbacks.add_callback(cb) def register_callback_for_cuda_event_wait(cb: Callable[[int, int], None]) -> None: CUDAEventWaitCallbacks.add_callback(cb) def register_callback_for_cuda_memory_allocation(cb: Callable[[int], None]) -> None: CUDAMemoryAllocationCallbacks.add_callback(cb) def register_callback_for_cuda_memory_deallocation(cb: Callable[[int], None]) -> None: CUDAMemoryDeallocationCallbacks.add_callback(cb) def register_callback_for_cuda_stream_creation(cb: Callable[[int], None]) -> None: CUDAStreamCreationCallbacks.add_callback(cb)
pytorch-master
torch/utils/_cuda_trace.py
import torch from collections import OrderedDict import weakref import warnings from typing import Any class RemovableHandle(object): """A handle which provides the capability to remove a hook.""" id: int next_id: int = 0 def __init__(self, hooks_dict: Any) -> None: self.hooks_dict_ref = weakref.ref(hooks_dict) self.id = RemovableHandle.next_id RemovableHandle.next_id += 1 def remove(self) -> None: hooks_dict = self.hooks_dict_ref() if hooks_dict is not None and self.id in hooks_dict: del hooks_dict[self.id] def __getstate__(self): return (self.hooks_dict_ref(), self.id) def __setstate__(self, state) -> None: if state[0] is None: # create a dead reference self.hooks_dict_ref = weakref.ref(OrderedDict()) else: self.hooks_dict_ref = weakref.ref(state[0]) self.id = state[1] RemovableHandle.next_id = max(RemovableHandle.next_id, self.id + 1) def __enter__(self) -> 'RemovableHandle': return self def __exit__(self, type: Any, value: Any, tb: Any) -> None: self.remove() def unserializable_hook(f): """ Decorator which marks a function as an unserializable hook. This suppresses warnings that would otherwise arise if you attempt to serialize a tensor that has a hook. """ f.__torch_unserializable__ = True return f def warn_if_has_hooks(tensor): if tensor._backward_hooks: for k in tensor._backward_hooks: hook = tensor._backward_hooks[k] if not hasattr(k, "__torch_unserializable__"): warnings.warn("backward hook {} on tensor will not be " "serialized. If this is expected, you can " "decorate the function with @torch.utils.hooks.unserializable_hook " "to suppress this warning".format(repr(hook))) class BackwardHook(object): """ A wrapper class to implement nn.Module backward hooks. It handles: - Ignoring non-Tensor inputs and replacing them by None before calling the user hook - Generating the proper Node to capture a set of Tensor's gradients - Linking the gradients captures for the outputs with the gradients captured for the input - Calling the user hook once both output and input gradients are available """ def __init__(self, module, user_hooks): self.user_hooks = user_hooks self.module = module self.grad_outputs = None self.n_outputs = -1 self.output_tensors_index = None self.n_inputs = -1 self.input_tensors_index = None def _pack_with_none(self, indices, values, size): res = [None] * size for idx, val in zip(indices, values): res[idx] = val return tuple(res) def _unpack_none(self, indices, values): res = [] for idx in indices: res.append(values[idx]) return tuple(res) def _set_user_hook(self, grad_fn): def hook(grad_input, _): if self.grad_outputs is None: raise RuntimeError("Module backward hook for grad_input is called before " "the grad_output one. This happens because the gradient " "in your nn.Module flows to the Module's input without " "passing through the Module's output. Make sure that the " "output depends on the input and that the loss is computed " "based on the output.") res = self._pack_with_none(self.input_tensors_index, grad_input, self.n_inputs) for hook in self.user_hooks: out = hook(self.module, res, self.grad_outputs) if out is None: continue if len(out) != len(res): raise RuntimeError("Backward hook returned an invalid number of grad_input, " "got {}, but expected {}".format(len(out), len(res))) res = out self.grad_outputs = None return self._unpack_none(self.input_tensors_index, res) grad_fn.register_hook(hook) def _apply_on_tensors(self, fn, args): # Can be used to apply the given function to the tensors contained in the # args. Will return updated args and the tensors indices tensors_idx = [] tensors = [] requires_grad = False for i, arg in enumerate(args): if isinstance(arg, torch.Tensor): tensors_idx.append(i) tensors.append(arg) requires_grad |= arg.requires_grad if not (requires_grad and torch.is_grad_enabled()): return args, None new_tensors = torch.nn.modules._functions.BackwardHookFunction.apply(*tensors) if len(new_tensors) == 0: raise RuntimeError("Cannot set Module backward hook for a Module with no input Tensors.") grad_fns = [t.grad_fn for t in new_tensors if t.grad_fn is not None and t.grad_fn.name() == "BackwardHookFunctionBackward"] if len(grad_fns) == 0: raise RuntimeError("Error while setting up backward hooks. Please open " "an issue with a code sample to reproduce this.") fn(grad_fns[0]) arg_list = list(args) for idx, val in zip(tensors_idx, new_tensors): arg_list[idx] = val return tuple(arg_list), tensors_idx def setup_input_hook(self, args): def fn(grad_fn): self._set_user_hook(grad_fn) res, input_idx = self._apply_on_tensors(fn, args) self.n_inputs = len(args) self.input_tensors_index = input_idx return res def setup_output_hook(self, args): def fn(grad_fn): def hook(_, grad_output): self.grad_outputs = self._pack_with_none(self.output_tensors_index, grad_output, self.n_outputs) # Special case if no input required gradients, this hook should call the user # hook directly if self.input_tensors_index is None: grad_inputs = self._pack_with_none([], [], self.n_inputs) for user_hook in self.user_hooks: res = user_hook(self.module, grad_inputs, self.grad_outputs) if res is not None and not (isinstance(res, tuple) and all(el is None for el in res)): raise RuntimeError("Backward hook for Modules where no input requires " "gradient should always return None or None for all gradients.") self.grad_outputs = None grad_fn.register_hook(hook) is_tuple = True if not isinstance(args, tuple): args = (args,) is_tuple = False res, output_idx = self._apply_on_tensors(fn, args) self.n_outputs = len(args) self.output_tensors_index = output_idx if not is_tuple: res = res[0] return res
pytorch-master
torch/utils/hooks.py
import collections Entry = collections.namedtuple('Entry', 'version, hash') def update_hash(seed, value): # Good old boost::hash_combine # https://www.boost.org/doc/libs/1_35_0/doc/html/boost/hash_combine_id241013.html return seed ^ (hash(value) + 0x9e3779b9 + (seed << 6) + (seed >> 2)) def hash_source_files(hash_value, source_files): for filename in source_files: with open(filename) as file: hash_value = update_hash(hash_value, file.read()) return hash_value def hash_build_arguments(hash_value, build_arguments): for group in build_arguments: if group: for argument in group: hash_value = update_hash(hash_value, argument) return hash_value class ExtensionVersioner(object): def __init__(self): self.entries = {} def get_version(self, name): entry = self.entries.get(name) return None if entry is None else entry.version def bump_version_if_changed(self, name, source_files, build_arguments, build_directory, with_cuda, is_python_module, is_standalone): hash_value = 0 hash_value = hash_source_files(hash_value, source_files) hash_value = hash_build_arguments(hash_value, build_arguments) hash_value = update_hash(hash_value, build_directory) hash_value = update_hash(hash_value, with_cuda) hash_value = update_hash(hash_value, is_python_module) hash_value = update_hash(hash_value, is_standalone) entry = self.entries.get(name) if entry is None: self.entries[name] = entry = Entry(0, hash_value) elif hash_value != entry.hash: self.entries[name] = entry = Entry(entry.version + 1, hash_value) return entry.version
pytorch-master
torch/utils/_cpp_extension_versioner.py
import functools import torch from typing import Iterator, TypeVar from dataclasses import dataclass from contextlib import contextmanager T = TypeVar('T') # This file has all the logic to dedupe logic between torch dispatch and # torch function modes # # Specifically, it has the helper functions for enable_ and push_X_mode and the # ModeInfo class, which is extended by each where they are different def _wrap_init(f): @functools.wraps(f) def wrapped(self, *args, **kwargs): if 'inner' in kwargs: self.inner = kwargs['inner'] del kwargs['inner'] return f(self, *args, **kwargs) return wrapped # in order to dedupe the logic between python mode and torch_function mode, this # is a container to hold all the differences between the modes. Then functions like # _enable_mode are able to use this container to call functions or get correctly # formatted names @dataclass class _ModeInfo: mode_name: str mode_class: type # the class related to the mode that's allowed to be passed in def mode_class_name(self): return self.mode_class.__name__ def get_mode(self): """gets the current mode for this type of mode""" raise NotImplementedError() def set_mode(self, mode): """ set mode to for this type of mode. Note that no checks are done on this, it's the unsafe version where checks are assumed to have been already done by the helper function """ raise NotImplementedError() # shared version of enable_torch_function/enable_torch_dispatch_mode in order to deduplicate the code. # The differences between the modes are captured by `mode_info` and then queried when they're # needed during the function's invocation def _enable_mode(mode: T, mode_info: _ModeInfo, *, replace=None, ignore_preexisting=False) -> Iterator[T]: if not ( mode is None or isinstance(mode, mode_info.mode_class) or (isinstance(mode, type) and not issubclass(mode, mode_info.mode_class)) ): raise ValueError(f'expected to get {mode_info.mode_class_name()}, Tensor-like class, ' f'or None as an argument got {type(mode)} instead') old = mode_info.get_mode() if old is mode: yield mode # type: ignore[misc] return if old is not None and not ignore_preexisting and old is not replace: if isinstance(mode, mode_info.mode_class): help_text = f'Use push_{mode_info.mode_name}_mode instead.' else: help_text = ( 'If you intended to completely override the preexisting mode, ' 'pass ignore_preexisting=True. This can result in unexpected ' 'behavior; please consider rewriting your mode to be a subclass ' f'of {mode_info.mode_class_name()} to make it compositional!' ) raise ValueError( f'Attempted to enable_{mode_info.mode_name}_mode, but there is already an ' f'active mode {old}. {help_text}' ) # NB: we don't require TorchFunctionMode/PythonMode since this is intended to also # let you directly pass a Tensor subclass type to "mode-ify" it. if mode is not None: required_fn = "__" + mode_info.mode_name + "__" if not hasattr(mode, required_fn): raise ValueError( f'The argument passed to enable_{mode_info.mode_name}_mode must implement {required_fn}' ) mode_info.set_mode(mode) try: yield mode # type: ignore[misc] finally: mode_info.set_mode(old) def _restore_mode(mode, mode_info: _ModeInfo): if not hasattr(mode, "ancestors"): raise RuntimeError(f"{mode} does not have any ancestors. Use the standard version instead of restore") old = mode_info.get_mode() if old is not None and old not in mode.ancestors: raise RuntimeError(f"{mode} is not valid in the current state because the current mode is not its ancestor") mode_info.set_mode(mode) try: yield mode finally: mode_info.set_mode(old) # To help with non-lexical scoping, it will error if all the modes are from different scopes or haven't been used def find_outermost_mode(modes): outermost = None for mode in modes: if mode is not None: if not hasattr(mode, "ancestors"): raise RuntimeError(f"{mode}, doesn't have ancestors set so the ordering with other modes is unclear") if outermost is None: outermost = mode elif mode not in outermost.ancestors and outermost not in mode.ancestors: raise RuntimeError(f"modes {mode} and {outermost} are not compatible because they " "don't come from the same scope") elif outermost in mode.ancestors: outermost = mode return outermost # returns if all are the same mode def all_same_mode(modes): return all(tuple(mode == modes[0] for mode in modes)) # returns if all modes are from the current scope, ``cur_mode`` def all_same_mode_scope(modes, cur_mode): if not hasattr(cur_mode, "ancestors"): return False return all(tuple(mode == cur_mode or mode in cur_mode.ancestors for mode in modes)) @contextmanager def no_dispatch(): guard = torch._C._DisableTorchDispatch() # type: ignore[attr-defined] try: yield finally: del guard
pytorch-master
torch/utils/_mode_utils.py
import torch import warnings import weakref from typing import Any, Iterable, List, Tuple __all__ = [ "checkpoint", "checkpoint_sequential", "CheckpointFunction", "check_backward_validity", "detach_variable", "get_device_states", "set_device_states", ] def detach_variable(inputs: Tuple[Any, ...]) -> Tuple[torch.Tensor, ...]: if isinstance(inputs, tuple): out = [] for inp in inputs: if not isinstance(inp, torch.Tensor): out.append(inp) continue x = inp.detach() x.requires_grad = inp.requires_grad out.append(x) return tuple(out) else: raise RuntimeError( "Only tuple of tensors is supported. Got Unsupported input type: ", type(inputs).__name__) def check_backward_validity(inputs: Iterable[Any]) -> None: if not any(inp.requires_grad for inp in inputs if isinstance(inp, torch.Tensor)): warnings.warn("None of the inputs have requires_grad=True. Gradients will be None") # We can't know if the run_fn will internally move some args to different devices, # which would require logic to preserve rng states for those devices as well. # We could paranoically stash and restore ALL the rng states for all visible devices, # but that seems very wasteful for most cases. Compromise: Stash the RNG state for # the device of all Tensor args. # # To consider: maybe get_device_states and set_device_states should reside in torch/random.py? def get_device_states(*args) -> Tuple[List[int], List[torch.Tensor]]: # This will not error out if "arg" is a CPU tensor or a non-tensor type because # the conditionals short-circuit. fwd_gpu_devices = list(set(arg.get_device() for arg in args if isinstance(arg, torch.Tensor) and arg.is_cuda)) fwd_gpu_states = [] for device in fwd_gpu_devices: with torch.cuda.device(device): fwd_gpu_states.append(torch.cuda.get_rng_state()) return fwd_gpu_devices, fwd_gpu_states def set_device_states(devices, states) -> None: for device, state in zip(devices, states): with torch.cuda.device(device): torch.cuda.set_rng_state(state) def _get_autocast_kwargs(): gpu_autocast_kwargs = {"enabled": torch.is_autocast_enabled(), "dtype": torch.get_autocast_gpu_dtype(), "cache_enabled": torch.is_autocast_cache_enabled()} cpu_autocast_kwargs = {"enabled": torch.is_autocast_cpu_enabled(), "dtype": torch.get_autocast_cpu_dtype(), "cache_enabled": torch.is_autocast_cache_enabled()} return gpu_autocast_kwargs, cpu_autocast_kwargs class CheckpointFunction(torch.autograd.Function): @staticmethod def forward(ctx, run_function, preserve_rng_state, *args): check_backward_validity(args) ctx.run_function = run_function ctx.preserve_rng_state = preserve_rng_state # Accommodates the (remote) possibility that autocast is enabled for cpu AND gpu. ctx.gpu_autocast_kwargs, ctx.cpu_autocast_kwargs = _get_autocast_kwargs() if preserve_rng_state: ctx.fwd_cpu_state = torch.get_rng_state() # Don't eagerly initialize the cuda context by accident. # (If the user intends that the context is initialized later, within their # run_function, we SHOULD actually stash the cuda state here. Unfortunately, # we have no way to anticipate this will happen before we run the function.) ctx.had_cuda_in_fwd = False if torch.cuda._initialized: ctx.had_cuda_in_fwd = True ctx.fwd_gpu_devices, ctx.fwd_gpu_states = get_device_states(*args) # Save non-tensor inputs in ctx, keep a placeholder None for tensors # to be filled out during the backward. ctx.inputs = [] ctx.tensor_indices = [] tensor_inputs = [] for i, arg in enumerate(args): if torch.is_tensor(arg): tensor_inputs.append(arg) ctx.tensor_indices.append(i) ctx.inputs.append(None) else: ctx.inputs.append(arg) ctx.save_for_backward(*tensor_inputs) with torch.no_grad(): outputs = run_function(*args) return outputs @staticmethod def backward(ctx, *args): if not torch.autograd._is_checkpoint_valid(): raise RuntimeError( "Checkpointing is not compatible with .grad() or when an `inputs` parameter" " is passed to .backward(). Please use .backward() and do not pass its `inputs`" " argument.") # Copy the list to avoid modifying original list. inputs = list(ctx.inputs) tensor_indices = ctx.tensor_indices tensors = ctx.saved_tensors # Fill in inputs with appropriate saved tensors. for i, idx in enumerate(tensor_indices): inputs[idx] = tensors[i] # Stash the surrounding rng state, and mimic the state that was # present at this time during forward. Restore the surrounding state # when we're done. rng_devices = [] if ctx.preserve_rng_state and ctx.had_cuda_in_fwd: rng_devices = ctx.fwd_gpu_devices with torch.random.fork_rng(devices=rng_devices, enabled=ctx.preserve_rng_state): if ctx.preserve_rng_state: torch.set_rng_state(ctx.fwd_cpu_state) if ctx.had_cuda_in_fwd: set_device_states(ctx.fwd_gpu_devices, ctx.fwd_gpu_states) detached_inputs = detach_variable(tuple(inputs)) with torch.enable_grad(), \ torch.cuda.amp.autocast(**ctx.gpu_autocast_kwargs), \ torch.cpu.amp.autocast(**ctx.cpu_autocast_kwargs): outputs = ctx.run_function(*detached_inputs) if isinstance(outputs, torch.Tensor): outputs = (outputs,) # run backward() with only tensor that requires grad outputs_with_grad = [] args_with_grad = [] for i in range(len(outputs)): if torch.is_tensor(outputs[i]) and outputs[i].requires_grad: outputs_with_grad.append(outputs[i]) args_with_grad.append(args[i]) if len(outputs_with_grad) == 0: raise RuntimeError( "none of output has requires_grad=True," " this checkpoint() is not necessary") torch.autograd.backward(outputs_with_grad, args_with_grad) grads = tuple(inp.grad if isinstance(inp, torch.Tensor) else None for inp in detached_inputs) return (None, None) + grads def checkpoint(function, *args, use_reentrant: bool = True, **kwargs): r"""Checkpoint a model or part of the model Checkpointing works by trading compute for memory. Rather than storing all intermediate activations of the entire computation graph for computing backward, the checkpointed part does **not** save intermediate activations, and instead recomputes them in backward pass. It can be applied on any part of a model. Specifically, in the forward pass, :attr:`function` will run in :func:`torch.no_grad` manner, i.e., not storing the intermediate activations. Instead, the forward pass saves the inputs tuple and the :attr:`function` parameter. In the backwards pass, the saved inputs and :attr:`function` is retrieved, and the forward pass is computed on :attr:`function` again, now tracking the intermediate activations, and then the gradients are calculated using these activation values. The output of :attr:`function` can contain non-Tensor values and gradient recording is only performed for the Tensor values. Note that if the output consists of nested structures (ex: custom objects, lists, dicts etc.) consisting of Tensors, these Tensors nested in custom structures will not be considered as part of autograd. .. warning:: If :attr:`function` invocation during backward does anything different than the one during forward, e.g., due to some global variable, the checkpointed version won't be equivalent, and unfortunately it can't be detected. .. warning:: If ``use_reentrant=True`` is specified, then if the checkpointed segment contains tensors detached from the computational graph by `detach()` or `torch.no_grad()`, the backward pass will raise an error. This is because `checkpoint` makes all the outputs require gradients which causes issues when a tensor is defined to have no gradient in the model. To circumvent this, detach the tensors outside of the `checkpoint` function. Note that the checkpointed segment can contain tensors detached from the computational graph if ``use_reentrant=False`` is specified. .. warning:: If ``use_reentrant=True`` is specified, at least one of the inputs needs to have :code:`requires_grad=True` if grads are needed for model inputs, otherwise the checkpointed part of the model won't have gradients. At least one of the outputs needs to have :code:`requires_grad=True` as well. Note that this does not apply if ``use_reentrant=False`` is specified. .. warning:: If ``use_reentrant=True`` is specified, checkpointing currently only supports :func:`torch.autograd.backward` and only if its `inputs` argument is not passed. :func:`torch.autograd.grad` is not supported. If ``use_reentrant=False`` is specified, checkpointing will work with :func:`torch.autograd.grad`. Args: function: describes what to run in the forward pass of the model or part of the model. It should also know how to handle the inputs passed as the tuple. For example, in LSTM, if user passes ``(activation, hidden)``, :attr:`function` should correctly use the first input as ``activation`` and the second input as ``hidden`` preserve_rng_state(bool, optional): Omit stashing and restoring the RNG state during each checkpoint. Default: ``True`` use_reentrant(bool, optional): Use checkpointing implementation that requires re-entrant autograd. If ``use_reentrant=False`` is specified, ``checkpoint`` will use an implementation that does not require re-entrant autograd. This allows ``checkpoint`` to support additional functionality, such as working as expected with ``torch.autograd.grad`` and support for keyword arguments input into the checkpointed function. Note that future versions of PyTorch will default to ``use_reentrant=False``. Default: ``True`` args: tuple containing inputs to the :attr:`function` Returns: Output of running :attr:`function` on :attr:`*args` """ # Hack to mix *args with **kwargs in a python 2.7-compliant way preserve = kwargs.pop('preserve_rng_state', True) if kwargs and use_reentrant: raise ValueError("Unexpected keyword arguments: " + ",".join(arg for arg in kwargs)) if use_reentrant: return CheckpointFunction.apply(function, preserve, *args) else: return _checkpoint_without_reentrant( function, preserve, *args, **kwargs, ) def checkpoint_sequential(functions, segments, input, **kwargs): r"""A helper function for checkpointing sequential models. Sequential models execute a list of modules/functions in order (sequentially). Therefore, we can divide such a model in various segments and checkpoint each segment. All segments except the last will run in :func:`torch.no_grad` manner, i.e., not storing the intermediate activations. The inputs of each checkpointed segment will be saved for re-running the segment in the backward pass. See :func:`~torch.utils.checkpoint.checkpoint` on how checkpointing works. .. warning:: Checkpointing currently only supports :func:`torch.autograd.backward` and only if its `inputs` argument is not passed. :func:`torch.autograd.grad` is not supported. .. warning: At least one of the inputs needs to have :code:`requires_grad=True` if grads are needed for model inputs, otherwise the checkpointed part of the model won't have gradients. .. warning: Since PyTorch 1.4, it allows only one Tensor as the input and intermediate outputs, just like :class:`torch.nn.Sequential`. Args: functions: A :class:`torch.nn.Sequential` or the list of modules or functions (comprising the model) to run sequentially. segments: Number of chunks to create in the model input: A Tensor that is input to :attr:`functions` preserve_rng_state(bool, optional): Omit stashing and restoring the RNG state during each checkpoint. Default: ``True`` Returns: Output of running :attr:`functions` sequentially on :attr:`*inputs` Example: >>> # xdoctest: +SKIP("stub") >>> model = nn.Sequential(...) >>> input_var = checkpoint_sequential(model, chunks, input_var) """ # Hack for keyword-only parameter in a python 2.7-compliant way preserve = kwargs.pop('preserve_rng_state', True) if kwargs: raise ValueError("Unexpected keyword arguments: " + ",".join(arg for arg in kwargs)) def run_function(start, end, functions): def forward(input): for j in range(start, end + 1): input = functions[j](input) return input return forward if isinstance(functions, torch.nn.Sequential): functions = list(functions.children()) segment_size = len(functions) // segments # the last chunk has to be non-volatile end = -1 for start in range(0, segment_size * (segments - 1), segment_size): end = start + segment_size - 1 input = checkpoint(run_function(start, end, functions), input, preserve_rng_state=preserve) return run_function(end + 1, len(functions) - 1, functions)(input) def _checkpoint_without_reentrant(function, preserve_rng_state=True, *args, **kwargs): """Checkpointining without re-entrant autograd Args: function: describes what to run in the forward pass of the model or part of the model. It should also know how to handle the inputs passed as the tuple. For example, in LSTM, if user passes ``(activation, hidden)``, :attr:`function` should correctly use the first input as ``activation`` and the second input as ``hidden`` preserve_rng_state(bool, optional): Omit stashing and restoring the RNG state during each checkpoint. Default: ``True`` *args: Arguments to pass in to the given ``function``. **kwargs: Keyword arguments to pass into the given ``function``. """ # Accommodates the (remote) possibility that autocast is enabled for cpu AND gpu. gpu_autocast_kwargs, cpu_autocast_kwargs = _get_autocast_kwargs() if preserve_rng_state: fwd_cpu_state = torch.get_rng_state() # Don't eagerly initialize the cuda context by accident. # (If the user intends that the context is initialized later, within their # run_function, we SHOULD actually stash the cuda state here. Unfortunately, # we have no way to anticipate this will happen before we run the function. # If they do so, we raise an error.) had_cuda_in_fwd = False if torch.cuda._initialized: had_cuda_in_fwd = True fwd_gpu_devices, fwd_gpu_states = get_device_states(*args) # Custom class to be able to take weak references class Holder(): pass # The Holder object for each of the saved object is saved directly on the # SavedVariable and is cleared when reset_data() is called on it. We MUST make # sure that this is the only object having an owning reference to ensure that # the Tensor stored in storage is deleted as soon as the corresponding SavedVariable # data is cleared. storage: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary() weak_holder_list = [] def pack(x): # TODO(varal7): Instead of returning abstract object, we can return things metadata (such as # size, device, ...) to catch certain cases of undeterministic behavior of the forward res = Holder() weak_holder_list.append(weakref.ref(res)) return res def unpack(x): unpack_counter = 0 if len(storage) == 0: def inner_pack(inner): nonlocal unpack_counter unpack_counter += 1 # If the holder went out of scope, the SavedVariable is dead and so # the value will never be read from the storage. Skip filling it. if weak_holder_list[unpack_counter - 1]() is None: return # Use detach here to ensure we don't keep the temporary autograd # graph created during the second forward storage[weak_holder_list[unpack_counter - 1]()] = inner.detach() return def inner_unpack(packed): raise RuntimeError("You are calling backwards on a tensor that is never exposed. Please open an issue.") # Stash the surrounding rng state, and mimic the state that was # present at this time during forward. Restore the surrounding state # when we're done. rng_devices = [] if preserve_rng_state and had_cuda_in_fwd: rng_devices = fwd_gpu_devices with torch.random.fork_rng(devices=rng_devices, enabled=preserve_rng_state): if preserve_rng_state: torch.set_rng_state(fwd_cpu_state) if had_cuda_in_fwd: set_device_states(fwd_gpu_devices, fwd_gpu_states) with torch.enable_grad(), \ torch.cuda.amp.autocast(**gpu_autocast_kwargs), \ torch.cpu.amp.autocast(**cpu_autocast_kwargs), \ torch.autograd.graph.saved_tensors_hooks(inner_pack, inner_unpack): _unused = function(*args, **kwargs) if x not in storage: raise RuntimeError( "Attempt to retrieve a tensor saved by autograd multiple times without checkpoint" " recomputation being triggered in between, this is not currently supported. Please" " open an issue with details on your use case so that we can prioritize adding this." ) return storage[x] with torch.autograd.graph.saved_tensors_hooks(pack, unpack): output = function(*args, **kwargs) if torch.cuda._initialized and preserve_rng_state and not had_cuda_in_fwd: # Cuda was not initialized before running the forward, so we didn't # stash the CUDA state. raise RuntimeError( "PyTorch's CUDA state was initialized in the forward pass " "of a Checkpoint, which is not allowed. Please open an issue " "if you need this feature.") return output
pytorch-master
torch/utils/checkpoint.py
#!/usr/bin/env python3 import sys import pickle import struct import pprint import zipfile import fnmatch from typing import Any, IO, BinaryIO, Union class FakeObject(object): def __init__(self, module, name, args): self.module = module self.name = name self.args = args # NOTE: We don't distinguish between state never set and state set to None. self.state = None def __repr__(self): state_str = "" if self.state is None else f"(state={self.state!r})" return f"{self.module}.{self.name}{self.args!r}{state_str}" def __setstate__(self, state): self.state = state @staticmethod def pp_format(printer, obj, stream, indent, allowance, context, level): if not obj.args and obj.state is None: stream.write(repr(obj)) return if obj.state is None: stream.write(f"{obj.module}.{obj.name}") printer._format(obj.args, stream, indent + 1, allowance + 1, context, level) return if not obj.args: stream.write(f"{obj.module}.{obj.name}()(state=\n") indent += printer._indent_per_level stream.write(" " * indent) printer._format(obj.state, stream, indent, allowance + 1, context, level + 1) stream.write(")") return raise Exception("Need to implement") class FakeClass(object): def __init__(self, module, name): self.module = module self.name = name self.__new__ = self.fake_new # type: ignore[assignment] def __repr__(self): return f"{self.module}.{self.name}" def __call__(self, *args): return FakeObject(self.module, self.name, args) def fake_new(self, *args): return FakeObject(self.module, self.name, args[1:]) class DumpUnpickler(pickle._Unpickler): # type: ignore[name-defined] def __init__( self, file, *, catch_invalid_utf8=False, **kwargs): super().__init__(file, **kwargs) self.catch_invalid_utf8 = catch_invalid_utf8 def find_class(self, module, name): return FakeClass(module, name) def persistent_load(self, pid): return FakeObject("pers", "obj", (pid,)) dispatch = dict(pickle._Unpickler.dispatch) # type: ignore[attr-defined] # Custom objects in TorchScript are able to return invalid UTF-8 strings # from their pickle (__getstate__) functions. Install a custom loader # for strings that catches the decode exception and replaces it with # a sentinel object. def load_binunicode(self): strlen, = struct.unpack("<I", self.read(4)) # type: ignore[attr-defined] if strlen > sys.maxsize: raise Exception("String too long.") str_bytes = self.read(strlen) # type: ignore[attr-defined] obj: Any try: obj = str(str_bytes, "utf-8", "surrogatepass") except UnicodeDecodeError as exn: if not self.catch_invalid_utf8: raise obj = FakeObject("builtin", "UnicodeDecodeError", (str(exn),)) self.append(obj) # type: ignore[attr-defined] dispatch[pickle.BINUNICODE[0]] = load_binunicode # type: ignore[assignment] @classmethod def dump(cls, in_stream, out_stream): value = cls(in_stream).load() pprint.pprint(value, stream=out_stream) return value def main(argv, output_stream=None): if len(argv) != 2: # Don't spam stderr if not using stdout. if output_stream is not None: raise Exception("Pass argv of length 2.") sys.stderr.write("usage: show_pickle PICKLE_FILE\n") sys.stderr.write(" PICKLE_FILE can be any of:\n") sys.stderr.write(" path to a pickle file\n") sys.stderr.write(" [email protected]\n") sys.stderr.write(" file.zip@*/pattern.*\n") sys.stderr.write(" (shell glob pattern for members)\n") sys.stderr.write(" (only first match will be shown)\n") return 2 fname = argv[1] handle: Union[IO[bytes], BinaryIO] if "@" not in fname: with open(fname, "rb") as handle: DumpUnpickler.dump(handle, output_stream) else: zfname, mname = fname.split("@", 1) with zipfile.ZipFile(zfname) as zf: if "*" not in mname: with zf.open(mname) as handle: DumpUnpickler.dump(handle, output_stream) else: found = False for info in zf.infolist(): if fnmatch.fnmatch(info.filename, mname): with zf.open(info) as handle: DumpUnpickler.dump(handle, output_stream) found = True break if not found: raise Exception(f"Could not find member matching {mname} in {zfname}") if __name__ == "__main__": # This hack works on every version of Python I've tested. # I've tested on the following versions: # 3.7.4 if True: pprint.PrettyPrinter._dispatch[FakeObject.__repr__] = FakeObject.pp_format # type: ignore[attr-defined] sys.exit(main(sys.argv))
pytorch-master
torch/utils/show_pickle.py
import os import time class FileBaton: '''A primitive, file-based synchronization utility.''' def __init__(self, lock_file_path, wait_seconds=0.1): ''' Creates a new :class:`FileBaton`. Args: lock_file_path: The path to the file used for locking. wait_seconds: The seconds to periorically sleep (spin) when calling ``wait()``. ''' self.lock_file_path = lock_file_path self.wait_seconds = wait_seconds self.fd = None def try_acquire(self): ''' Tries to atomically create a file under exclusive access. Returns: True if the file could be created, else False. ''' try: self.fd = os.open(self.lock_file_path, os.O_CREAT | os.O_EXCL) return True except FileExistsError: return False def wait(self): ''' Periodically sleeps for a certain amount until the baton is released. The amount of time slept depends on the ``wait_seconds`` parameter passed to the constructor. ''' while os.path.exists(self.lock_file_path): time.sleep(self.wait_seconds) def release(self): '''Releases the baton and removes its file.''' if self.fd is not None: os.close(self.fd) os.remove(self.lock_file_path)
pytorch-master
torch/utils/file_baton.py
""" Freeze Python packages. Freezing makes it possible to ship arbitrary Python modules as part of a C++ library. The Python source of the module is compiled to bytecode and written to `.c` files, to be imported by Python's built-in FrozenImporter. In a normal Python installation, FrozenImporter is only used to bootstrap the initialization of the import machinery. Python's importers are defined in Python (see `_bootstrap.py` and `_bootstrap_external.py`) but need to be retrieved before any importers are available. Freezing the module bytecode resolves this circular dependency. This script will freeze the Python standard library. It produces two things: - Bytecode files: A set of `.c` that define C variables containing Python bytecode. - Main file: A `main.c` file listing all of these modules in the right form to be consumed by FrozenImporter. The library that wishes to these modules make them available to the local Python instance by extending `PyImport_FrozenModules` appropriately (see https://docs.python.org/3/c-api/import.html#c.PyImport_FrozenModules). """ import argparse import functools import itertools import marshal import os from dataclasses import dataclass from pathlib import Path from typing import List import types PATH_MARKER = "<Generated by torch::deploy>" MAIN_INCLUDES = """#include <Python.h> """ MAIN_PREFIX_TEMPLATE = """ // Compiled standard library modules. These should be appended to the existing // `PyImport_FrozenModules` that ships with CPython. struct _frozen {}[] = {{ """ FAKE_PREFIX = MAIN_PREFIX_TEMPLATE.format("_PyImport_FrozenModules") MAIN_SUFFIX = """\ {0, 0, 0} /* sentinel */ }; """ # Exclude some standard library modules to: # 1. Slim down the final frozen lib. # 2. Remove functionality we don't want to support. DENY_LIST = [ # Interface to unix databases "dbm", # ncurses bindings (terminal interfaces) "curses", # Tcl/Tk GUI "tkinter", "tkinter", # Tests for the standard library "test", "tests", "idle_test", "__phello__.foo.py", # importlib frozen modules. These are already baked into CPython. "_bootstrap.py", "_bootstrap_external.py", ] NUM_BYTECODE_FILES = 5 def indent_msg(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): args[0].indent += 1 ret = fn(*args, **kwargs) args[0].indent -= 1 return ret return wrapper @dataclass class FrozenModule: # The fully qualified module name, e.g. 'foo.bar.baz' module_name: str # The name of the C variable that holds the bytecode, e.g. 'M_foo__bar__baz' c_name: str # The size of the C variable. Negative if this module is a package. size: int # The frozen bytecode bytecode: bytes class Freezer: def __init__(self, verbose: bool): self.frozen_modules: List[FrozenModule] = [] self.indent: int = 0 self.verbose: bool = verbose def msg(self, path: Path, code: str): if not self.verbose: return # P: package dir # F: python file # S: skipped (not a package dir) # X: skipped (deny-listed) # N: skipped (not a python file) for i in range(self.indent): print(" ", end="") print(f"{code} {path}") def write_bytecode(self, install_root): """ Write the `.c` files containing the frozen bytecode. Shard frozen modules evenly across the files. """ bytecode_file_names = [ f"bytecode_{i}.c" for i in range(NUM_BYTECODE_FILES) ] bytecode_files = [open(os.path.join(install_root, name), "w") for name in bytecode_file_names] it = itertools.cycle(bytecode_files) for m in self.frozen_modules: self.write_frozen(m, next(it)) for f in bytecode_files: f.close() def write_main(self, install_root, oss, symbol_name): """ Write the `main.c` file containing a table enumerating all the frozen modules. """ with open(os.path.join(install_root, "main.c"), "w") as outfp: outfp.write(MAIN_INCLUDES) for m in self.frozen_modules: outfp.write(f"extern unsigned char {m.c_name}[];\n") outfp.write(MAIN_PREFIX_TEMPLATE.format(symbol_name)) for m in self.frozen_modules: outfp.write(f'\t{{"{m.module_name}", {m.c_name}, {m.size}}},\n') outfp.write(MAIN_SUFFIX) if oss: outfp.write(FAKE_PREFIX) outfp.write(MAIN_SUFFIX) def write_frozen(self, m: FrozenModule, outfp): """ Write a single frozen module's bytecode out to a C variable. """ outfp.write(f"unsigned char {m.c_name}[] = {{") for i in range(0, len(m.bytecode), 16): outfp.write("\n\t") for c in bytes(m.bytecode[i : i + 16]): outfp.write("%d," % c) outfp.write("\n};\n") def compile_path(self, path: Path, top_package_path: Path): """Generic entry point for compiling a Path object.""" if path.is_dir(): self.compile_package(path, top_package_path) else: self.compile_file(path, top_package_path) @indent_msg def compile_package(self, path: Path, top_package_path: Path): """Compile all the files within a Python package dir.""" assert path.is_dir() if path.name in DENY_LIST: self.msg(path, "X") return # Python packages are directories that have __init__.py in them. is_package_dir = any([child.name == "__init__.py" for child in path.iterdir()]) if not is_package_dir: self.msg(path, "S") return self.msg(path, "P") # Recursively compile all children in this dir for child in path.iterdir(): self.compile_path(child, top_package_path) def get_module_qualname(self, file_path: Path, top_package_path: Path) -> List[str]: # `path` looks like 'Lib/foo/bar/baz.py' # chop off 'Lib/' to get something that represents a Python module hierarchy. # e.g. 'foo/bar/baz.py', which maps to 'foo.bar.baz' normalized_path = file_path.relative_to(top_package_path.parent) if normalized_path.name == "__init__.py": # Special handling for `__init__.py`. In this case, this file # specifies that the containing directory should be treated as a package. # For 'foo/bar/baz/__init__.py': # - The module name is 'baz' module_basename = normalized_path.parent.name # - The parent is foo.bar (need to shave off the 'baz') module_parent = normalized_path.parent.parent.parts else: module_basename = normalized_path.stem module_parent = normalized_path.parent.parts return list(module_parent) + [module_basename] def compile_string(self, file_content: str) -> types.CodeType: # instead of passing in the real build time path to 'compile', we # pass in a marker instead. This prevents the build time path being # leaked to runtime. That path may not be available at runtime. # Setting the path to a mark make sure it's a hard error rather # than a flaky error when inspect module tries to retrieve python source # code during torchscripting. path_marker = PATH_MARKER return compile(file_content, path_marker, "exec") @indent_msg def compile_file(self, path: Path, top_package_path: Path): """ Compile a Python source file to frozen bytecode. Append the result to `self.frozen_modules`. """ assert path.is_file() if path.suffix != ".py": self.msg(path, "N") return if path.name in DENY_LIST: self.msg(path, "X") return self.msg(path, "F") module_qualname = self.get_module_qualname(path, top_package_path) module_mangled_name = "__".join(module_qualname) c_name = "M_" + module_mangled_name with open(path, "r") as src_file: co = self.compile_string(src_file.read()) bytecode = marshal.dumps(co) size = len(bytecode) if path.name == '__init__.py': # Python packages are signified by negative size. size = -size self.frozen_modules.append( FrozenModule(".".join(module_qualname), c_name, size, bytecode) ) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Compile py source") parser.add_argument("paths", nargs="*", help="Paths to freeze.") parser.add_argument("--verbose", action="store_true", help="Print debug logs") parser.add_argument("--install_dir", help="Root directory for all output files") parser.add_argument("--oss", action="store_true", help="If it's OSS build, add a fake _PyImport_FrozenModules") parser.add_argument( "--symbol_name", help="The name of the frozen module array symbol to generate", default="_PyImport_FrozenModules_torch", ) args = parser.parse_args() f = Freezer(args.verbose) for p in args.paths: path = Path(p) if path.is_dir() and not Path.exists(path / '__init__.py'): # this 'top level path p' is a standard directory containing modules, # not a module itself # each 'mod' could be a dir containing __init__.py or .py file # NB: sorted to make sure this is deterministic for mod in sorted(path.glob("*")): f.compile_path(mod, mod) else: f.compile_path(path, path) f.write_bytecode(args.install_dir) f.write_main(args.install_dir, args.oss, args.symbol_name)
pytorch-master
torch/utils/_freeze.py
# torchvision imports tqdm from here. from torch.hub import tqdm, load_state_dict_from_url as load_url # noqa: F401
pytorch-master
torch/utils/model_zoo.py
import argparse import glob import os from pathlib import Path from zipfile import ZipFile # Exclude some standard library modules to: # 1. Slim down the final zipped file size # 2. Remove functionality we don't want to support. DENY_LIST = [ # Interface to unix databases "dbm", # ncurses bindings (terminal interfaces) "curses", # Tcl/Tk GUI "tkinter", "tkinter", # Tests for the standard library "test", "tests", "idle_test", "__phello__.foo.py", # importlib frozen modules. These are already baked into CPython. "_bootstrap.py", "_bootstrap_external.py", ] def remove_prefix(text, prefix): if text.startswith(prefix): return text[len(prefix):] return text def write_to_zip(file_path, strip_file_path, zf, prepend_str=""): stripped_file_path = prepend_str + remove_prefix(file_path, strip_file_dir + "/") path = Path(stripped_file_path) if path.name in DENY_LIST: return zf.write(file_path, stripped_file_path) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Zip py source") parser.add_argument("paths", nargs="*", help="Paths to zip.") parser.add_argument("--install_dir", help="Root directory for all output files") parser.add_argument("--strip_dir", help="The absolute directory we want to remove from zip") parser.add_argument("--prepend_str", help="A string to prepend onto all paths of a file in the zip", default="") parser.add_argument("--zip_name", help="Output zip name") args = parser.parse_args() zip_file_name = args.install_dir + '/' + args.zip_name strip_file_dir = args.strip_dir prepend_str = args.prepend_str zf = ZipFile(zip_file_name, mode='w') for p in args.paths: if os.path.isdir(p): files = glob.glob(p + "/**/*.py", recursive=True) for file_path in files: # strip the absolute path write_to_zip(file_path, strip_file_dir + "/", zf, prepend_str=prepend_str) else: write_to_zip(p, strip_file_dir + "/", zf, prepend_str=prepend_str)
pytorch-master
torch/utils/_zip.py
from __future__ import print_function # Unlike the rest of the PyTorch this file must be python2 compliant. # This script outputs relevant system environment info # Run it with `python collect_env.py`. import datetime import locale import re import subprocess import sys import os from collections import namedtuple try: import torch TORCH_AVAILABLE = True except (ImportError, NameError, AttributeError, OSError): TORCH_AVAILABLE = False # System Environment Information SystemEnv = namedtuple('SystemEnv', [ 'torch_version', 'is_debug_build', 'cuda_compiled_version', 'gcc_version', 'clang_version', 'cmake_version', 'os', 'libc_version', 'python_version', 'python_platform', 'is_cuda_available', 'cuda_runtime_version', 'nvidia_driver_version', 'nvidia_gpu_models', 'cudnn_version', 'pip_version', # 'pip' or 'pip3' 'pip_packages', 'conda_packages', 'hip_compiled_version', 'hip_runtime_version', 'miopen_runtime_version', 'caching_allocator_config', 'is_xnnpack_available', ]) def run(command): """Returns (return-code, stdout, stderr)""" p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) raw_output, raw_err = p.communicate() rc = p.returncode if get_platform() == 'win32': enc = 'oem' else: enc = locale.getpreferredencoding() output = raw_output.decode(enc) err = raw_err.decode(enc) return rc, output.strip(), err.strip() def run_and_read_all(run_lambda, command): """Runs command using run_lambda; reads and returns entire output if rc is 0""" rc, out, _ = run_lambda(command) if rc != 0: return None return out def run_and_parse_first_match(run_lambda, command, regex): """Runs command using run_lambda, returns the first regex match if it exists""" rc, out, _ = run_lambda(command) if rc != 0: return None match = re.search(regex, out) if match is None: return None return match.group(1) def run_and_return_first_line(run_lambda, command): """Runs command using run_lambda and returns first line if output is not empty""" rc, out, _ = run_lambda(command) if rc != 0: return None return out.split('\n')[0] def get_conda_packages(run_lambda): conda = os.environ.get('CONDA_EXE', 'conda') out = run_and_read_all(run_lambda, "{} list".format(conda)) if out is None: return out return "\n".join( line for line in out.splitlines() if not line.startswith("#") and any( name in line for name in { "torch", "numpy", "cudatoolkit", "soumith", "mkl", "magma", "mkl", } ) ) def get_gcc_version(run_lambda): return run_and_parse_first_match(run_lambda, 'gcc --version', r'gcc (.*)') def get_clang_version(run_lambda): return run_and_parse_first_match(run_lambda, 'clang --version', r'clang version (.*)') def get_cmake_version(run_lambda): return run_and_parse_first_match(run_lambda, 'cmake --version', r'cmake (.*)') def get_nvidia_driver_version(run_lambda): if get_platform() == 'darwin': cmd = 'kextstat | grep -i cuda' return run_and_parse_first_match(run_lambda, cmd, r'com[.]nvidia[.]CUDA [(](.*?)[)]') smi = get_nvidia_smi() return run_and_parse_first_match(run_lambda, smi, r'Driver Version: (.*?) ') def get_gpu_info(run_lambda): if get_platform() == 'darwin' or (TORCH_AVAILABLE and hasattr(torch.version, 'hip') and torch.version.hip is not None): if TORCH_AVAILABLE and torch.cuda.is_available(): return torch.cuda.get_device_name(None) return None smi = get_nvidia_smi() uuid_regex = re.compile(r' \(UUID: .+?\)') rc, out, _ = run_lambda(smi + ' -L') if rc != 0: return None # Anonymize GPUs by removing their UUID return re.sub(uuid_regex, '', out) def get_running_cuda_version(run_lambda): return run_and_parse_first_match(run_lambda, 'nvcc --version', r'release .+ V(.*)') def get_cudnn_version(run_lambda): """This will return a list of libcudnn.so; it's hard to tell which one is being used""" if get_platform() == 'win32': system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows') cuda_path = os.environ.get('CUDA_PATH', "%CUDA_PATH%") where_cmd = os.path.join(system_root, 'System32', 'where') cudnn_cmd = '{} /R "{}\\bin" cudnn*.dll'.format(where_cmd, cuda_path) elif get_platform() == 'darwin': # CUDA libraries and drivers can be found in /usr/local/cuda/. See # https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#install # https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#installmac # Use CUDNN_LIBRARY when cudnn library is installed elsewhere. cudnn_cmd = 'ls /usr/local/cuda/lib/libcudnn*' else: cudnn_cmd = 'ldconfig -p | grep libcudnn | rev | cut -d" " -f1 | rev' rc, out, _ = run_lambda(cudnn_cmd) # find will return 1 if there are permission errors or if not found if len(out) == 0 or (rc != 1 and rc != 0): l = os.environ.get('CUDNN_LIBRARY') if l is not None and os.path.isfile(l): return os.path.realpath(l) return None files_set = set() for fn in out.split('\n'): fn = os.path.realpath(fn) # eliminate symbolic links if os.path.isfile(fn): files_set.add(fn) if not files_set: return None # Alphabetize the result because the order is non-deterministic otherwise files = list(sorted(files_set)) if len(files) == 1: return files[0] result = '\n'.join(files) return 'Probably one of the following:\n{}'.format(result) def get_nvidia_smi(): # Note: nvidia-smi is currently available only on Windows and Linux smi = 'nvidia-smi' if get_platform() == 'win32': system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows') program_files_root = os.environ.get('PROGRAMFILES', 'C:\\Program Files') legacy_path = os.path.join(program_files_root, 'NVIDIA Corporation', 'NVSMI', smi) new_path = os.path.join(system_root, 'System32', smi) smis = [new_path, legacy_path] for candidate_smi in smis: if os.path.exists(candidate_smi): smi = '"{}"'.format(candidate_smi) break return smi def get_platform(): if sys.platform.startswith('linux'): return 'linux' elif sys.platform.startswith('win32'): return 'win32' elif sys.platform.startswith('cygwin'): return 'cygwin' elif sys.platform.startswith('darwin'): return 'darwin' else: return sys.platform def get_mac_version(run_lambda): return run_and_parse_first_match(run_lambda, 'sw_vers -productVersion', r'(.*)') def get_windows_version(run_lambda): system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows') wmic_cmd = os.path.join(system_root, 'System32', 'Wbem', 'wmic') findstr_cmd = os.path.join(system_root, 'System32', 'findstr') return run_and_read_all(run_lambda, '{} os get Caption | {} /v Caption'.format(wmic_cmd, findstr_cmd)) def get_lsb_version(run_lambda): return run_and_parse_first_match(run_lambda, 'lsb_release -a', r'Description:\t(.*)') def check_release_file(run_lambda): return run_and_parse_first_match(run_lambda, 'cat /etc/*-release', r'PRETTY_NAME="(.*)"') def get_os(run_lambda): from platform import machine platform = get_platform() if platform == 'win32' or platform == 'cygwin': return get_windows_version(run_lambda) if platform == 'darwin': version = get_mac_version(run_lambda) if version is None: return None return 'macOS {} ({})'.format(version, machine()) if platform == 'linux': # Ubuntu/Debian based desc = get_lsb_version(run_lambda) if desc is not None: return '{} ({})'.format(desc, machine()) # Try reading /etc/*-release desc = check_release_file(run_lambda) if desc is not None: return '{} ({})'.format(desc, machine()) return '{} ({})'.format(platform, machine()) # Unknown platform return platform def get_python_platform(): import platform return platform.platform() def get_libc_version(): import platform if get_platform() != 'linux': return 'N/A' return '-'.join(platform.libc_ver()) def get_pip_packages(run_lambda): """Returns `pip list` output. Note: will also find conda-installed pytorch and numpy packages.""" # People generally have `pip` as `pip` or `pip3` # But here it is incoved as `python -mpip` def run_with_pip(pip): out = run_and_read_all(run_lambda, "{} list --format=freeze".format(pip)) return "\n".join( line for line in out.splitlines() if any( name in line for name in { "torch", "numpy", "mypy", } ) ) pip_version = 'pip3' if sys.version[0] == '3' else 'pip' out = run_with_pip(sys.executable + ' -mpip') return pip_version, out def get_cachingallocator_config(): ca_config = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', '') return ca_config def is_xnnpack_available(): if TORCH_AVAILABLE: import torch.backends.xnnpack return str(torch.backends.xnnpack.enabled) # type: ignore[attr-defined] else: return "N/A" def get_env_info(): run_lambda = run pip_version, pip_list_output = get_pip_packages(run_lambda) if TORCH_AVAILABLE: version_str = torch.__version__ debug_mode_str = str(torch.version.debug) cuda_available_str = str(torch.cuda.is_available()) cuda_version_str = torch.version.cuda if not hasattr(torch.version, 'hip') or torch.version.hip is None: # cuda version hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A' else: # HIP version cfg = torch._C._show_config().split('\n') hip_runtime_version = [s.rsplit(None, 1)[-1] for s in cfg if 'HIP Runtime' in s][0] miopen_runtime_version = [s.rsplit(None, 1)[-1] for s in cfg if 'MIOpen' in s][0] cuda_version_str = 'N/A' hip_compiled_version = torch.version.hip else: version_str = debug_mode_str = cuda_available_str = cuda_version_str = 'N/A' hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A' sys_version = sys.version.replace("\n", " ") return SystemEnv( torch_version=version_str, is_debug_build=debug_mode_str, python_version='{} ({}-bit runtime)'.format(sys_version, sys.maxsize.bit_length() + 1), python_platform=get_python_platform(), is_cuda_available=cuda_available_str, cuda_compiled_version=cuda_version_str, cuda_runtime_version=get_running_cuda_version(run_lambda), nvidia_gpu_models=get_gpu_info(run_lambda), nvidia_driver_version=get_nvidia_driver_version(run_lambda), cudnn_version=get_cudnn_version(run_lambda), hip_compiled_version=hip_compiled_version, hip_runtime_version=hip_runtime_version, miopen_runtime_version=miopen_runtime_version, pip_version=pip_version, pip_packages=pip_list_output, conda_packages=get_conda_packages(run_lambda), os=get_os(run_lambda), libc_version=get_libc_version(), gcc_version=get_gcc_version(run_lambda), clang_version=get_clang_version(run_lambda), cmake_version=get_cmake_version(run_lambda), caching_allocator_config=get_cachingallocator_config(), is_xnnpack_available=is_xnnpack_available(), ) env_info_fmt = """ PyTorch version: {torch_version} Is debug build: {is_debug_build} CUDA used to build PyTorch: {cuda_compiled_version} ROCM used to build PyTorch: {hip_compiled_version} OS: {os} GCC version: {gcc_version} Clang version: {clang_version} CMake version: {cmake_version} Libc version: {libc_version} Python version: {python_version} Python platform: {python_platform} Is CUDA available: {is_cuda_available} CUDA runtime version: {cuda_runtime_version} GPU models and configuration: {nvidia_gpu_models} Nvidia driver version: {nvidia_driver_version} cuDNN version: {cudnn_version} HIP runtime version: {hip_runtime_version} MIOpen runtime version: {miopen_runtime_version} Is XNNPACK available: {is_xnnpack_available} Versions of relevant libraries: {pip_packages} {conda_packages} """.strip() def pretty_str(envinfo): def replace_nones(dct, replacement='Could not collect'): for key in dct.keys(): if dct[key] is not None: continue dct[key] = replacement return dct def replace_bools(dct, true='Yes', false='No'): for key in dct.keys(): if dct[key] is True: dct[key] = true elif dct[key] is False: dct[key] = false return dct def prepend(text, tag='[prepend]'): lines = text.split('\n') updated_lines = [tag + line for line in lines] return '\n'.join(updated_lines) def replace_if_empty(text, replacement='No relevant packages'): if text is not None and len(text) == 0: return replacement return text def maybe_start_on_next_line(string): # If `string` is multiline, prepend a \n to it. if string is not None and len(string.split('\n')) > 1: return '\n{}\n'.format(string) return string mutable_dict = envinfo._asdict() # If nvidia_gpu_models is multiline, start on the next line mutable_dict['nvidia_gpu_models'] = \ maybe_start_on_next_line(envinfo.nvidia_gpu_models) # If the machine doesn't have CUDA, report some fields as 'No CUDA' dynamic_cuda_fields = [ 'cuda_runtime_version', 'nvidia_gpu_models', 'nvidia_driver_version', ] all_cuda_fields = dynamic_cuda_fields + ['cudnn_version'] all_dynamic_cuda_fields_missing = all( mutable_dict[field] is None for field in dynamic_cuda_fields) if TORCH_AVAILABLE and not torch.cuda.is_available() and all_dynamic_cuda_fields_missing: for field in all_cuda_fields: mutable_dict[field] = 'No CUDA' if envinfo.cuda_compiled_version is None: mutable_dict['cuda_compiled_version'] = 'None' # Replace True with Yes, False with No mutable_dict = replace_bools(mutable_dict) # Replace all None objects with 'Could not collect' mutable_dict = replace_nones(mutable_dict) # If either of these are '', replace with 'No relevant packages' mutable_dict['pip_packages'] = replace_if_empty(mutable_dict['pip_packages']) mutable_dict['conda_packages'] = replace_if_empty(mutable_dict['conda_packages']) # Tag conda and pip packages with a prefix # If they were previously None, they'll show up as ie '[conda] Could not collect' if mutable_dict['pip_packages']: mutable_dict['pip_packages'] = prepend(mutable_dict['pip_packages'], '[{}] '.format(envinfo.pip_version)) if mutable_dict['conda_packages']: mutable_dict['conda_packages'] = prepend(mutable_dict['conda_packages'], '[conda] ') return env_info_fmt.format(**mutable_dict) def get_pretty_env_info(): return pretty_str(get_env_info()) def main(): print("Collecting environment information...") output = get_pretty_env_info() print(output) if TORCH_AVAILABLE and hasattr(torch, 'utils') and hasattr(torch.utils, '_crash_handler'): minidump_dir = torch.utils._crash_handler.DEFAULT_MINIDUMP_DIR if sys.platform == "linux" and os.path.exists(minidump_dir): dumps = [os.path.join(minidump_dir, dump) for dump in os.listdir(minidump_dir)] latest = max(dumps, key=os.path.getctime) ctime = os.path.getctime(latest) creation_time = datetime.datetime.fromtimestamp(ctime).strftime('%Y-%m-%d %H:%M:%S') msg = "\n*** Detected a minidump at {} created on {}, ".format(latest, creation_time) + \ "if this is related to your bug please include it when you file a report ***" print(msg, file=sys.stderr) if __name__ == '__main__': main()
pytorch-master
torch/utils/collect_env.py
import torch._C def format_time(time_us=None, time_ms=None, time_s=None): '''Defines how to format time''' assert sum([time_us is not None, time_ms is not None, time_s is not None]) == 1 US_IN_SECOND = 1e6 US_IN_MS = 1e3 if time_us is None: if time_ms is not None: time_us = time_ms * US_IN_MS elif time_s is not None: time_us = time_s * US_IN_SECOND else: raise AssertionError("Shouldn't reach here :)") if time_us >= US_IN_SECOND: return '{:.3f}s'.format(time_us / US_IN_SECOND) if time_us >= US_IN_MS: return '{:.3f}ms'.format(time_us / US_IN_MS) return '{:.3f}us'.format(time_us) class ExecutionStats(object): def __init__(self, c_stats, benchmark_config): self._c_stats = c_stats self.benchmark_config = benchmark_config @property def latency_avg_ms(self): return self._c_stats.latency_avg_ms @property def num_iters(self): return self._c_stats.num_iters @property def iters_per_second(self): ''' Returns total number of iterations per second across all calling threads ''' return self.num_iters / self.total_time_seconds @property def total_time_seconds(self): return self.num_iters * ( self.latency_avg_ms / 1000.0) / self.benchmark_config.num_calling_threads def __str__(self): return '\n'.join([ "Average latency per example: " + format_time(time_ms=self.latency_avg_ms), "Total number of iterations: {}".format(self.num_iters), "Total number of iterations per second (across all threads): {:.2f}".format(self.iters_per_second), "Total time: " + format_time(time_s=self.total_time_seconds) ]) class ThroughputBenchmark(object): ''' This class is a wrapper around a c++ component throughput_benchmark::ThroughputBenchmark responsible for executing a PyTorch module (nn.Module or ScriptModule) under an inference server like load. It can emulate multiple calling threads to a single module provided. In the future we plan to enhance this component to support inter and intra-op parallelism as well as multiple models running in a single process. Please note that even though nn.Module is supported, it might incur an overhead from the need to hold GIL every time we execute Python code or pass around inputs as Python objects. As soon as you have a ScriptModule version of your model for inference deployment it is better to switch to using it in this benchmark. Example:: >>> # xdoctest: +SKIP("undefined vars") >>> from torch.utils import ThroughputBenchmark >>> bench = ThroughputBenchmark(my_module) >>> # Pre-populate benchmark's data set with the inputs >>> for input in inputs: ... # Both args and kwargs work, same as any PyTorch Module / ScriptModule ... bench.add_input(input[0], x2=input[1]) >>> # Inputs supplied above are randomly used during the execution >>> stats = bench.benchmark( ... num_calling_threads=4, ... num_warmup_iters = 100, ... num_iters = 1000, ... ) >>> print("Avg latency (ms): {}".format(stats.latency_avg_ms)) >>> print("Number of iterations: {}".format(stats.num_iters)) ''' def __init__(self, module): if isinstance(module, torch.jit.ScriptModule): self._benchmark = torch._C.ThroughputBenchmark(module._c) else: self._benchmark = torch._C.ThroughputBenchmark(module) def run_once(self, *args, **kwargs): ''' Given input id (input_idx) run benchmark once and return prediction. This is useful for testing that benchmark actually runs the module you want it to run. input_idx here is an index into inputs array populated by calling add_input() method. ''' return self._benchmark.run_once(*args, **kwargs) def add_input(self, *args, **kwargs): ''' Store a single input to a module into the benchmark memory and keep it there. During the benchmark execution every thread is going to pick up a random input from the all the inputs ever supplied to the benchmark via this function. ''' self._benchmark.add_input(*args, **kwargs) def benchmark( self, num_calling_threads=1, num_warmup_iters=10, num_iters=100, profiler_output_path=""): ''' Args: num_warmup_iters (int): Warmup iters are used to make sure we run a module a few times before actually measuring things. This way we avoid cold caches and any other similar problems. This is the number of warmup iterations for each of the thread in separate num_iters (int): Number of iterations the benchmark should run with. This number is separate from the warmup iterations. Also the number is shared across all the threads. Once the num_iters iterations across all the threads is reached, we will stop execution. Though total number of iterations might be slightly larger. Which is reported as stats.num_iters where stats is the result of this function profiler_output_path (str): Location to save Autograd Profiler trace. If not empty, Autograd Profiler will be enabled for the main benchmark execution (but not the warmup phase). The full trace will be saved into the file path provided by this argument This function returns BenchmarkExecutionStats object which is defined via pybind11. It currently has two fields: - num_iters - number of actual iterations the benchmark have made - avg_latency_ms - average time it took to infer on one input example in milliseconds ''' config = torch._C.BenchmarkConfig() config.num_calling_threads = num_calling_threads config.num_warmup_iters = num_warmup_iters config.num_iters = num_iters config.profiler_output_path = profiler_output_path c_stats = self._benchmark.benchmark(config) return ExecutionStats(c_stats, config)
pytorch-master
torch/utils/throughput_benchmark.py
from typing import Any import torch import enum from torch._C import _from_dlpack from torch._C import _to_dlpack as to_dlpack class DLDeviceType(enum.IntEnum): # Enums as in DLPack specification (aten/src/ATen/dlpack.h) kDLCPU = 1, kDLGPU = 2, kDLCPUPinned = 3, kDLOpenCL = 4, kDLVulkan = 7, kDLMetal = 8, kDLVPI = 9, kDLROCM = 10, kDLExtDev = 12, torch._C._add_docstr(to_dlpack, r"""to_dlpack(tensor) -> PyCapsule Returns an opaque object (a "DLPack capsule") representing the tensor. .. note:: ``to_dlpack`` is a legacy DLPack interface. The capsule it returns cannot be used for anything in Python other than use it as input to ``from_dlpack``. The more idiomatic use of DLPack is to call ``from_dlpack`` directly on the tensor object - this works when that object has a ``__dlpack__`` method, which PyTorch and most other libraries indeed have now. .. warning:: Only call ``from_dlpack`` once per capsule produced with ``to_dlpack``. Behavior when a capsule is consumed multiple times is undefined. Args: tensor: a tensor to be exported The DLPack capsule shares the tensor's memory. """) # TODO: add a typing.Protocol to be able to tell Mypy that only objects with # __dlpack__ and __dlpack_device__ methods are accepted. def from_dlpack(ext_tensor: Any) -> torch.Tensor: """from_dlpack(ext_tensor) -> Tensor Converts a tensor from an external library into a ``torch.Tensor``. The returned PyTorch tensor will share the memory with the input tensor (which may have come from another library). Note that in-place operations will therefore also affect the data of the input tensor. This may lead to unexpected issues (e.g., other libraries may have read-only flags or immutable data structures), so the user should only do this if they know for sure that this is fine. Args: ext_tensor (object with ``__dlpack__`` attribute, or a DLPack capsule): The tensor or DLPack capsule to convert. If ``ext_tensor`` is a tensor (or ndarray) object, it must support the ``__dlpack__`` protocol (i.e., have a ``ext_tensor.__dlpack__`` method). Otherwise ``ext_tensor`` may be a DLPack capsule, which is an opaque ``PyCapsule`` instance, typically produced by a ``to_dlpack`` function or method. Examples:: >>> import torch.utils.dlpack >>> t = torch.arange(4) # Convert a tensor directly (supported in PyTorch >= 1.10) >>> t2 = torch.from_dlpack(t) >>> t2[:2] = -1 # show that memory is shared >>> t2 tensor([-1, -1, 2, 3]) >>> t tensor([-1, -1, 2, 3]) # The old-style DLPack usage, with an intermediate capsule object >>> capsule = torch.utils.dlpack.to_dlpack(t) >>> capsule <capsule object "dltensor" at ...> >>> t3 = torch.from_dlpack(capsule) >>> t3 tensor([-1, -1, 2, 3]) >>> t3[0] = -9 # now we're sharing memory between 3 tensors >>> t3 tensor([-9, -1, 2, 3]) >>> t2 tensor([-9, -1, 2, 3]) >>> t tensor([-9, -1, 2, 3]) """ if hasattr(ext_tensor, '__dlpack__'): device = ext_tensor.__dlpack_device__() # device is either CUDA or ROCm, we need to pass the current # stream if device[0] in (DLDeviceType.kDLGPU, DLDeviceType.kDLROCM): stream = torch.cuda.current_stream('cuda:{}'.format(device[1])) # cuda_stream is the pointer to the stream and it is a public # attribute, but it is not documented # The array API specify that the default legacy stream must be passed # with a value of 1 for CUDA # https://data-apis.org/array-api/latest/API_specification/array_object.html?dlpack-self-stream-none#dlpack-self-stream-none # NOQA is_cuda = device[0] == DLDeviceType.kDLGPU # Since pytorch is not using PTDS by default, lets directly pass # the legacy stream stream_ptr = 1 if is_cuda and stream.cuda_stream == 0 else stream.cuda_stream dlpack = ext_tensor.__dlpack__(stream=stream_ptr) else: dlpack = ext_tensor.__dlpack__() else: # Old versions just call the converter dlpack = ext_tensor return _from_dlpack(dlpack)
pytorch-master
torch/utils/dlpack.py
import os.path as _osp import sys from .throughput_benchmark import ThroughputBenchmark from ._crash_handler import enable_minidumps, disable_minidumps, enable_minidumps_on_exceptions # Set the module for a given object for nicer printing def set_module(obj, mod): if not isinstance(mod, str): raise TypeError("The mod argument should be a string") obj.__module__ = mod if sys.executable == "torch_deploy": # not valid inside torch_deploy interpreter, no paths exists for frozen modules cmake_prefix_path = None else: cmake_prefix_path = _osp.join(_osp.dirname(_osp.dirname(__file__)), 'share', 'cmake')
pytorch-master
torch/utils/__init__.py
#!/usr/bin/env python3 from typing import Any, TypeVar, Optional, Tuple, List, NamedTuple, Union, Sequence, Dict, Callable import textwrap import torch from torch._C import TupleType, ListType from torch.jit._recursive import wrap_cpp_module T = TypeVar("T") MAX_RAW_TENSOR_SIZE = 16 class InflatableArg(NamedTuple): """ Helper type for bundled inputs. 'value' is the compressed/deflated input that is stored in the model. Value must be of the same type as the argument to the function that it is a deflated input for. 'fmt' is a formatable code string that is executed to inflate the compressed data into the appropriate input. It can use 'value' as an input to the format str. It must result in a value of the same type as 'value'. 'fmt_fn' is a formatable function code string that is executed to inflate the compressed data into the appropriate input. It must result in a value of the same type as 'value'. The function name should be the formatable part of the string. Note: Only top level InflatableArgs can be inflated. i.e. you cannot place an inflatable arg inside of some other structure. You should instead create an inflatable arg such that the fmt code string returns the full structure of your input. """ value: Any fmt: str = "{}" fmt_fn: str = "" def bundle_inputs( model: torch.jit.ScriptModule, inputs: Union[Optional[Sequence[Tuple[Any, ...]]], Dict[Callable, Optional[Sequence[Tuple[Any, ...]]]]], info: Optional[Union[List[str], Dict[Callable, List[str]]]] = None, *, _receive_inflate_expr: Optional[List[str]] = None, ) -> torch.jit.ScriptModule: """Creates and returns a copy of the specified model with inputs attached. The original model is not mutated or changed in any way. Models with bundled inputs can be invoked in a uniform manner by benchmarking and code coverage tools. If inputs is passed in as a list then the inputs will be bundled for 'forward'. If inputs is instead passed in as a map then all the methods specified in the map will have their corresponding inputs bundled. Info should match watchever type is chosen for the inputs. The returned model will support the following methods: `get_all_bundled_inputs_for_<function_name>() -> List[Tuple[Any, ...]]` Returns a list of tuples suitable for passing to the model like `for inp in model.get_all_bundled_inputs_for_foo(): model.foo(*inp)` `get_bundled_inputs_functions_and_info() -> Dict[str, Dict[str: List[str]]]` Returns a dictionary mapping function names to a metadata dictionary. This nested dictionary maps preset strings like: 'get_inputs_function_name' -> the name of a function attribute in this model that can be run to get back a list of inputs corresponding to that function. 'info' -> the user provided extra information about the bundled inputs If forward has bundled inputs then these following functions will also be defined on the returned module: `get_all_bundled_inputs() -> List[Tuple[Any, ...]]` Returns a list of tuples suitable for passing to the model like `for inp in model.get_all_bundled_inputs(): model(*inp)` `get_num_bundled_inputs() -> int` Equivalent to `len(model.get_all_bundled_inputs())`, but slightly easier to call from C++. Inputs can be specified in one of two ways: - The model can define `_generate_bundled_inputs_for_<function_name>`. If the user chooses this method inputs[<function>] should map to None - The `inputs` argument to this function can be a dictionary mapping functions to a list of inputs, of the same form that will be returned by get_all_bundled_inputs_for_<function_name>. Alternatively if only bundling inputs for forward the map can be omitted and a singular list of inputs can be provided instead. The type of the inputs is List[Tuple[Any, ...]]. The outer list corresponds with a list of inputs, the inner tuple is the list of args that together make up one input. For inputs of functions that take one arg, this will be a tuple of length one. The Any, ... is the actual data that makes up the args, e.g. a tensor. Info is an optional parameter that maps functions to a list of strings providing extra information about that function's bundled inputs. Alternatively if only bundling inputs for forward the map can be omitted and a singular list of information can be provided instead. This could be descriptions, expected outputs, etc. - Ex: info={model.forward : ['man eating icecream', 'an airplane', 'a dog']} This function will attempt to optimize arguments so that (e.g.) arguments like `torch.zeros(1000)` will be represented compactly. Only top-level arguments will be optimized. Tensors in lists or tuples will not. """ if not isinstance(model, torch.jit.ScriptModule): raise Exception("Only ScriptModule is supported.") ignored_methods, ignored_attrs = _get_bundled_inputs_attributes_and_methods(model) clone = torch._C._hack_do_not_use_clone_module_with_class( # type: ignore[attr-defined] model._c, ignored_methods, ignored_attrs, ) # The above cloning function returns a torch._C.scriptmodule and we need a torch.jit.scriptmodule. # Fortunately theres a function in _recursive that does exactly that conversion. cloned_module = wrap_cpp_module(clone) if isinstance(inputs, dict): assert(isinstance(info, dict) or info is None) augment_many_model_functions_with_bundled_inputs(cloned_module, inputs, _receive_inflate_expr, info) else: assert(isinstance(info, list) or info is None) augment_model_with_bundled_inputs(cloned_module, inputs, _receive_inflate_expr, info) return cloned_module def augment_model_with_bundled_inputs( model: torch.jit.ScriptModule, inputs: Optional[Sequence[Tuple[Any, ...]]] = None, _receive_inflate_expr: Optional[List[str]] = None, # For debugging. info: Optional[List[str]] = None, # Optional argument to provide info about forward or its inputs skip_size_check=False, ) -> None: """ Add bundled sample inputs to a model for the forward function. Models with bundled inputs can be invoked in a uniform manner by benchmarking and code coverage tools. Augmented models will support the following methods: `get_all_bundled_inputs() -> List[Tuple[Any, ...]]` Returns a list of tuples suitable for passing to the model like `for inp in model.get_all_bundled_inputs(): model(*inp)` `get_num_bundled_inputs() -> int` Equivalent to `len(model.get_all_bundled_inputs())`, but slightly easier to call from C++. `get_bundled_inputs_functions_and_info() -> Dict[str, Dict[str: List[str]]]` Returns a dictionary mapping function names to a metadata dictionary. This nested dictionary maps preset strings like: 'get_inputs_function_name' -> the name of a function attribute in this model that can be run to get back a list of inputs corresponding to that function. 'info' -> the user provided extra information about the bundled inputs Inputs can be specified in one of two ways: - The model can define `_generate_bundled_inputs_for_forward`. If the user chooses this method inputs should be None - `inputs` is a list of inputs of form List[Tuple[Any, ...]]. A list of tuples where the elements of each tuple are the args that make up one input. """ if not isinstance(model, torch.jit.ScriptModule): raise Exception("Only ScriptModule is supported.") forward: Callable = model.forward # Sometimes forward won't have a name attached so just in case if not hasattr(forward, "__name__"): forward.__name__ = 'forward' augment_many_model_functions_with_bundled_inputs( model, inputs={forward : inputs}, _receive_inflate_expr=_receive_inflate_expr, info={forward : info} if info else None, skip_size_check=skip_size_check, ) def augment_many_model_functions_with_bundled_inputs( model: torch.jit.ScriptModule, inputs: Dict[Callable, Optional[Sequence[Tuple[Any, ...]]]], _receive_inflate_expr: Optional[List[str]] = None, # For debugging. info: Optional[Dict[Callable, List[str]]] = None, # Optional argument to provide info about the function or its inputs skip_size_check=False, ) -> None: """Add bundled sample inputs to a model for an arbitrary list of public functions. Models with bundled inputs can be invoked in a uniform manner by benchmarking and code coverage tools. Augmented models will support the following methods: `get_all_bundled_inputs_for_<function_name>() -> List[Tuple[Any, ...]]` Returns a list of tuples suitable for passing to the model like `for inp in model.get_all_bundled_inputs_for_foo(): model.foo(*inp)` `get_bundled_inputs_functions_and_info() -> Dict[str, Dict[str: List[str]]]` Returns a dictionary mapping function names to a metadata dictionary. This nested dictionary maps preset strings like: 'get_inputs_function_name' -> the name of a function attribute in this model that can be run to get back a list of inputs corresponding to that function. 'info' -> the user provided extra information about the bundled inputs If forward has bundled inputs then these following functions are also defined: `get_all_bundled_inputs() -> List[Tuple[Any, ...]]` Returns a list of tuples suitable for passing to the model like `for inp in model.get_all_bundled_inputs(): model(*inp)` `get_num_bundled_inputs() -> int` Equivalent to `len(model.get_all_bundled_inputs())`, but slightly easier to call from C++. Inputs can be specified in one of two ways: - The model can define `_generate_bundled_inputs_for_<function_name>`. If the user chooses this method inputs[<function>] should map to None - The `inputs` argument to this function can be a dictionary mapping functions to a list of inputs, of the same form that will be returned by get_all_bundled_inputs_for_<function_name>. The type of the inputs is List[Tuple[Any, ...]]. The outer list corresponds with a list of inputs, the inner tuple is the list of args that together make up one input. For inputs of functions that take one arg, this will be a tuple of length one. The Any, ... is the actual data that makes up the args, e.g. a tensor. Info is an optional parameter that maps functions to a list of strings providing extra information about that function's bundled inputs. This could be descriptions, expected outputs, etc. - Ex: info={model.forward : ['man eating icecream', 'an airplane', 'a dog']} This function will attempt to optimize arguments so that (e.g.) arguments like `torch.zeros(1000)` will be represented compactly. Only top-level arguments will be optimized. Tensors in lists or tuples will not. """ if not isinstance(model, torch.jit.ScriptModule): raise Exception("Only ScriptModule is supported.") if not inputs: raise Exception("Please provide inputs for at least 1 function") if hasattr(model, "get_all_bundled_inputs") or hasattr(model, "get_bundled_inputs_functions_and_info"): raise Exception( "Models can only be augmented with bundled inputs once. " "This Model seems to have already been augmented with " "bundled inputs. Please start afresh with one that " "doesn't have bundled inputs.", ) get_bundled_inputs_functions_and_info_template = "" for function, input_list in inputs.items(): if hasattr(function, "__name__"): function_name = function.__name__ else: if hasattr(function, "name"): function_name = function.name # type: ignore[attr-defined] else: raise Exception( 'At least one of your functions has no attribute name please ensure all have one. m.foo.name = "foo"') if input_list is not None and not isinstance(input_list, Sequence): raise TypeError("Error inputs for function {0} is not a Sequence".format(function_name)) function_arg_types = [arg.type for arg in function.schema.arguments[1:]] # type: ignore[attr-defined] deflated_inputs_type: ListType = ListType(TupleType(function_arg_types)) model._c._register_attribute("_bundled_inputs_deflated_{name}".format(name=function_name), deflated_inputs_type, []) if hasattr(model, "_generate_bundled_inputs_for_" + function_name): if input_list is not None: raise Exception( "inputs[{name}] is not None, but _generate_bundled_inputs_for_{name} is already defined".format( name=function_name ) ) # Model author already defined _generate_bundled_inputs_for_<function_name>. elif input_list is None or len(input_list) == 0: raise Exception( "inputs for {name} must be specified if _generate_bundled_inputs_for_{name} is not already defined".format( name=function_name, ) ) else: # Iterate over the inputs and args in each input. # Accumulate `deflated_inputs` as (possibly) compressed values # and `parts` to be joined into the expression that unpacks them. deflated_inputs = [] parts = [] for inp_idx, args in enumerate(input_list): if not isinstance(args, Tuple) and not isinstance(args, List): # type: ignore[arg-type] raise TypeError( "Error bundled input for function {0} idx: {1} is not a Tuple or a List".format(function_name, inp_idx) ) deflated_args = [] parts.append("(") for arg_idx, arg in enumerate(args): inflate_helper_fn_name = _get_inflate_helper_fn_name(arg_idx, inp_idx, function_name) deflated, inflater, helper_definition = _inflate_expr( arg, f"deflated[{inp_idx}][{arg_idx}]", inflate_helper_fn_name, skip_size_check=skip_size_check, ) deflated_args.append(deflated) parts.append(f" {inflater},") if helper_definition: model.define(textwrap.dedent(helper_definition)) deflated_inputs.append(tuple(deflated_args)) parts.append("),") parts.append("") expr = "\n".join(parts) # Back-channel return this expr for debugging. if _receive_inflate_expr is not None: _receive_inflate_expr.append(expr) setattr(model, "_bundled_inputs_deflated_{name}".format(name=function_name), deflated_inputs) definition = textwrap.dedent(""" def _generate_bundled_inputs_for_{name}(self): deflated = self._bundled_inputs_deflated_{name} return [ {expr} ] """).format(expr=expr, name=function_name) model.define(definition) # Define get_all_bundled_inputs_for_<function_name> that caches the generated inputs. model.define(textwrap.dedent(""" def get_all_bundled_inputs_for_{name}(self): all_inputs = self._generate_bundled_inputs_for_{name}() assert all_inputs is not None return all_inputs """).format(name=function_name)) # Add to the high level helper methods inputs_info = repr(info[function]) if info and function in info else '[]' get_bundled_inputs_functions_and_info_template += """ temp_dict : Dict[str,List[str]] = {{}} info: List[str] = {info} temp_dict['info'] = info temp_dict['get_inputs_function_name'] = ['get_all_bundled_inputs_for_{name}'] all_inputs['{name}'] = temp_dict """.format( name=function_name, info=inputs_info, ) # To ensure backwards compatibility and a streamlined api for forward these wrappers are provided if function_name == 'forward': model.define(textwrap.dedent(""" def get_all_bundled_inputs(self): return self.get_all_bundled_inputs_for_forward() """)) model.define(textwrap.dedent(""" def get_num_bundled_inputs(self): return len(self.get_all_bundled_inputs_for_forward()) """)) # Define some high level helper methods that act on all bundled inputs model.define(textwrap.dedent(""" def get_bundled_inputs_functions_and_info(self): all_inputs : Dict[str, Dict[str,List[str]]] = {{}} {template} return all_inputs """.format(template=get_bundled_inputs_functions_and_info_template))) def _inflate_expr( arg: T, ref: str, inflate_helper_fn_name: str, skip_size_check: bool = False ) -> Tuple[Union[T, torch.Tensor], str, Optional[str]]: # Allow custom inflation expressions any object. # For example, calling custom image-decoding ops. # Or just use "{}" as the format string to ignore size limits. if isinstance(arg, InflatableArg): if arg.fmt_fn: if arg.fmt not in ["{}", ""]: raise Exception( f"Bundled input argument at position '{ref}' has " f"both arg.fmt_fn => \n{arg.fmt_fn} " f"\n and arg.fmt => {arg.fmt}. " "Please choose `arg.fmt` if the deflater is straightforward or " "`arg.fmt_fn` if you need a function." ) helper_definition = arg.fmt_fn.format(inflate_helper_fn_name) expr = f"self.{inflate_helper_fn_name}({ref})" return arg.value, expr, helper_definition else: return arg.value, arg.fmt.format(ref), None if isinstance(arg, torch.Tensor): # Small-storage tensors can just be saved directly. if arg.storage().size() <= MAX_RAW_TENSOR_SIZE or skip_size_check: return arg, ref, None # Small contiguous tensors can be cloned to have small storage. # TODO: Should we do this even for non-contiguous tensors? if arg.is_contiguous() and arg.numel() <= MAX_RAW_TENSOR_SIZE: return arg.clone(), ref, None # Example inputs commonly come from torch.zeros, torch.ones, or torch.full. # These can be represented compactly. for fmt in [torch.contiguous_format, torch.channels_last]: if arg.is_contiguous(memory_format=fmt) and (arg == arg.flatten()[0]).all().item(): return (arg.flatten()[0].clone().expand(*arg.size()), f"{ref}.contiguous(memory_format={fmt})", None) # Prevent big tensors from being bundled by default. # TODO: Provide more useful diagnostics. raise Exception( f"Bundled input argument at position '{ref}' is " f"a tensor with storage size {arg.storage().size()}. " f"You probably don't want to bundle this as an input. " ) else: return arg, ref, None def _get_bundled_inputs_attributes_and_methods(script_module: torch.jit.ScriptModule) -> Tuple[List[str], List[str]]: methods: List[str] = [] attributes: List[str] = [] # Has bundled inputs for forward if hasattr(script_module, 'get_all_bundled_inputs'): methods.append('get_all_bundled_inputs') methods.append('get_num_bundled_inputs') methods.append('run_on_bundled_input') if hasattr(script_module, 'get_bundled_inputs_functions_and_info'): methods.append('get_bundled_inputs_functions_and_info') all_info = script_module.get_bundled_inputs_functions_and_info() for function_name in all_info: methods.append("get_all_bundled_inputs_for_" + function_name) methods.append("_generate_bundled_inputs_for_" + function_name) attributes.append("_bundled_inputs_deflated_" + function_name) bundled_inputs_fn = getattr( script_module, f"get_all_bundled_inputs_for_{function_name}" ) num_bundled_inputs: int = len(bundled_inputs_fn()) # Check inflate helper functions for each function, argument and bundled input func = getattr(script_module, function_name) for arg_idx in range(len(func.schema.arguments) - 1): for input_idx in range(num_bundled_inputs): helper_fn_name = _get_inflate_helper_fn_name( arg_idx=arg_idx, input_idx=input_idx, function_name=function_name ) # if the arg has an InflatableArg with fmt_fn, add the helper function name if hasattr(script_module, helper_fn_name): methods.append(helper_fn_name) return (methods, attributes) def _get_inflate_helper_fn_name( arg_idx: int, input_idx: int, function_name: str, ) -> str: return f"_inflate_helper_for_{function_name}_input_{input_idx}_arg_{arg_idx}" def bundle_randn(*size, dtype=None): """Generate a tensor that will be inflated with torch.randn.""" stub = torch.zeros(1, dtype=dtype).expand(*size) return InflatableArg(value=stub, fmt="torch.randn_like({})") def bundle_large_tensor(t): """Wrap a tensor to allow bundling regardless of size.""" return InflatableArg(value=t, fmt="{}")
pytorch-master
torch/utils/bundled_inputs.py
import os import sys import pathlib import torch DEFAULT_MINIDUMP_DIR = "/tmp/pytorch_crashes" if sys.platform == "win32": DEFAULT_MINIDUMP_DIR = str(pathlib.Path.home() / "AppData" / "pytorch_crashes") def enable_minidumps(directory=DEFAULT_MINIDUMP_DIR): if directory == DEFAULT_MINIDUMP_DIR: pathlib.Path(directory).mkdir(parents=True, exist_ok=True) elif not os.path.exists(directory): raise RuntimeError(f"Directory does not exist: {directory}") torch._C._enable_minidumps(directory) def enable_minidumps_on_exceptions(): torch._C._enable_minidumps_on_exceptions() def disable_minidumps(): torch._C._disable_minidumps()
pytorch-master
torch/utils/_crash_handler.py
import torch class MkldnnLinear(torch.jit.ScriptModule): def __init__(self, dense_module, dtype): super(MkldnnLinear, self).__init__() self.register_buffer('weight', dense_module.weight.to_mkldnn(dtype)) if dense_module.bias is not None: # Bias can be fp32 or bf16 for OneDNN bf16 path, but for good accuracy, # we use fp32 dtype. self.register_buffer('bias', dense_module.bias.to_mkldnn()) else: # TODO: Remove this once ScriptModule supports registering None buffer self.register_buffer( 'bias', torch.zeros([dense_module.weight.size(0)], dtype=torch.float).to_mkldnn()) @torch.jit.script_method def __getstate__(self): return (self.weight.to_dense(), self.bias.to_dense(), self.training) @torch.jit.script_method def __setstate__(self, state): self.weight = state[0].to_mkldnn() self.bias = state[1].to_mkldnn() self.training = state[2] @torch.jit.script_method def forward(self, x): x_mkldnn = x if x.is_mkldnn else x.to_mkldnn() y_mkldnn = torch._C._nn.mkldnn_linear(x_mkldnn, self.weight, self.bias) y = y_mkldnn if x.is_mkldnn else y_mkldnn.to_dense() return y class _MkldnnConvNd(torch.jit.ScriptModule): """Common base of MkldnnConv1d and MkldnnConv2d""" __constants__ = ['stride', 'padding', 'dilation', 'groups'] def __init__(self, dense_module): super(_MkldnnConvNd, self).__init__() self.stride = dense_module.stride self.padding = dense_module.padding self.dilation = dense_module.dilation self.groups = dense_module.groups if dense_module.bias is not None: self.register_buffer('bias', dense_module.bias.to_mkldnn()) else: # Bias can be fp32 or bf16 for OneDNN bf16 path, but for good accuracy, # we use fp32 dtype. # TODO: Remove this once ScriptModule supports registering None buffer self.register_buffer( 'bias', torch.zeros([dense_module.weight.size(0)], dtype=torch.float).to_mkldnn()) @torch.jit.script_method def __getstate__(self): return (self.weight.to_dense(), self.bias.to_dense(), self.training) @torch.jit.script_method def forward(self, x): return torch.mkldnn_convolution( x, self.weight, self.bias, self.padding, self.stride, self.dilation, self.groups) class MkldnnConv1d(_MkldnnConvNd): def __init__(self, dense_module, dtype): super(MkldnnConv1d, self).__init__(dense_module) self.register_buffer('weight', dense_module.weight.to_mkldnn(dtype)) @torch.jit.script_method def __setstate__(self, state): self.weight = state[0].to_mkldnn() self.bias = state[1].to_mkldnn() self.training = state[2] class MkldnnConv2d(_MkldnnConvNd): def __init__(self, dense_module, dtype): super(MkldnnConv2d, self).__init__(dense_module) self.register_buffer('weight', torch._C._nn.mkldnn_reorder_conv2d_weight( dense_module.weight.to_mkldnn(dtype), self.padding, self.stride, self.dilation, self.groups)) @torch.jit.script_method def __setstate__(self, state): self.weight = torch._C._nn.mkldnn_reorder_conv2d_weight( state[0].to_mkldnn(), self.padding, self.stride, self.dilation, self.groups) self.bias = state[1].to_mkldnn() self.training = state[2] class MkldnnConv3d(_MkldnnConvNd): def __init__(self, dense_module, dtype): super(MkldnnConv3d, self).__init__(dense_module) self.register_buffer('weight', torch._C._nn.mkldnn_reorder_conv3d_weight( dense_module.weight.to_mkldnn(dtype), self.padding, self.stride, self.dilation, self.groups)) @torch.jit.script_method def __setstate__(self, state): self.weight = torch._C._nn.mkldnn_reorder_conv3d_weight( state[0].to_mkldnn(), self.padding, self.stride, self.dilation, self.groups) self.bias = state[1].to_mkldnn() self.training = state[2] class MkldnnBatchNorm(torch.jit.ScriptModule): __constants__ = ['exponential_average_factor', 'eps'] def __init__(self, dense_module): super(MkldnnBatchNorm, self).__init__() assert(not dense_module.training) assert(dense_module.track_running_stats) assert(dense_module.affine) if dense_module.momentum is None: self.exponential_average_factor = 0.0 else: self.exponential_average_factor = dense_module.momentum self.eps = dense_module.eps self.register_buffer('weight', dense_module.weight.to_mkldnn()) self.register_buffer('bias', dense_module.bias.to_mkldnn()) self.register_buffer('running_mean', dense_module.running_mean.to_mkldnn()) self.register_buffer('running_var', dense_module.running_var.to_mkldnn()) @torch.jit.script_method def __getstate__(self): weight = self.weight.to_dense() bias = self.bias.to_dense() running_mean = self.running_mean.to_dense() running_var = self.running_var.to_dense() return (weight, bias, running_mean, running_var, self.training) @torch.jit.script_method def __setstate__(self, state): self.weight = state[0].to_mkldnn() self.bias = state[1].to_mkldnn() self.running_mean = state[2].to_mkldnn() self.running_var = state[3].to_mkldnn() self.training = state[4] @torch.jit.script_method def forward(self, x): return torch.batch_norm( x, self.weight, self.bias, self.running_mean, self.running_var, False, # training self.exponential_average_factor, self.eps, False, # cuda_enabled ) class MkldnnPrelu(torch.jit.ScriptModule): def __init__(self, dense_module, dtype): super(MkldnnPrelu, self).__init__() self.register_buffer('weight', dense_module.weight.to_mkldnn(dtype)) @torch.jit.script_method def __getstate__(self): return (self.weight.to_dense(), self.training) @torch.jit.script_method def __setstate__(self, state): self.weight = state[0].to_mkldnn() self.training = state[1] @torch.jit.script_method def forward(self, x): x_mkldnn = x if x.is_mkldnn else x.to_mkldnn() y_mkldnn = torch.prelu(x_mkldnn, self.weight) y = y_mkldnn if x.is_mkldnn else y_mkldnn.to_dense() return y def to_mkldnn(module, dtype=torch.float): assert dtype in [torch.float, torch.bfloat16], \ "MKLDNN only support float or bfloat16 path now" def m_fn(m, d): if isinstance(m, torch.nn.Linear): return MkldnnLinear(m, d) elif isinstance(m, torch.nn.Conv1d): return MkldnnConv1d(m, d) elif isinstance(m, torch.nn.Conv2d): return MkldnnConv2d(m, d) elif isinstance(m, torch.nn.Conv3d): return MkldnnConv3d(m, d) elif isinstance(m, torch.nn.BatchNorm2d) or isinstance(m, torch.nn.BatchNorm3d): # For batchnorm bf16 path, OneDNN requires weight and bias need fp32 dtype. # so it doesn't need dtype argument. return MkldnnBatchNorm(m) elif isinstance(m, torch.nn.PReLU): return MkldnnPrelu(m, d) else: return m def m_fn_rec(m, d): new_m = m_fn(m, d) for name, sub_m in m.named_children(): setattr(new_m, name, m_fn_rec(sub_m, d)) return new_m return m_fn_rec(module, dtype)
pytorch-master
torch/utils/mkldnn.py
import contextlib from typing import Iterator, Set import functools import warnings from torch.utils._mode_utils import _enable_mode, _ModeInfo, _wrap_init, _restore_mode from torch._C import _get_torch_dispatch_mode, _set_torch_dispatch_mode from dataclasses import dataclass @dataclass class TorchDispatchModeInfo(_ModeInfo): def __init__(self): super().__init__(mode_name="torch_dispatch", mode_class=TorchDispatchMode) def get_mode(self): return _get_torch_dispatch_mode() def set_mode(self, mode): return _set_torch_dispatch_mode(mode) # TODO: Limitations and things about enable_torch_dispatch_mode we should fix before exposing it: # - We need a better user-facing api for torch._C._DisableTorchDispatch that # is able to selectively disable __torch_dispatch__ of a particular class. # - It doesn't work with the tensor constructors (torch.tensor, torch.Tensor) # - Better name (see https://github.com/pytorch/pytorch/pull/63496#discussion_r694091694) @contextlib.contextmanager def enable_torch_dispatch_mode(mode, *, replace=None, ignore_preexisting=False) -> Iterator[None]: """ Context manager that causes all pytorch operators to dispatch to the passed-in type's __torch_dispatch__ function, including operations that accept no tensors but return a tensor. This function is non-compositional; if there is already an existing mode, it will raise an error This function is safe to use inside a ``__torch_dispatch__`` mode handler, as the mode is guaranteed to be disabled in this context. You can use this context manager to reinstate the mode so that calls to overridable APIs recursively call back into your mode handler (this can easily cause infinite loops, so use with care!) enable_torch_dispatch_mode is affected by _DisableTorchDispatch. Args: mode (:class:`TorchDispatchMode`, Tensor-like class, or None): the mode to set as current mode. If you pass a Tensor-like class, it will be treated as a non-compositional mode with no state, which is convenient if you have an existing tensor subclass that you'd like to apply globally in a quick and dirty way. Passing None will disable the current mode. replace (:class:`TorchDispatchMode` or Tensor-like class): the mode to replace. You can use this argument to change the mode in a situation where you know what the current mode is (and you are intentionally overwriting it.) If you don't know what the current mode is, use ``ignore_preexisting`` instead. ignore_preexisting (bool): if True, ignore any preexisting mode and overwrite it with the passed mode. """ return _enable_mode(mode, mode_info=TorchDispatchModeInfo(), replace=replace, ignore_preexisting=ignore_preexisting) def _wrap_torch_dispatch(f): @functools.wraps(f) def wrapped(self, *args, **kwargs): if isinstance(f, classmethod): raise RuntimeError("TorchDispatchMode's torch_dispatch function " + "should be a normal method not a class method") inner = getattr(self, "inner", None) with enable_torch_dispatch_mode(inner): return f(self, *args, **kwargs) return wrapped # Implementation note, since this is based on TorchFunctionMode, this had the # same dilemma: I had a choice about how much of mode stacks # to implement in Python versus in C++. At time of writing, I did not care # too much about implementation efficiency; however, I do care about making it # hard for users to implement modes in the wrong way. In the end, it turned # out to be possible to implement mode stacks entirely from userland, with the # C++ API providing only _get_torch_dispatch_mode() and # _set_torch_dispatch_mode(), so I opted to provide some unsafe C++ bindings and # have the bulk of the logic for managing the stack in Python, which helped # simplify the C++ API surface. It would also have been valid to build in the # notion of mode stack directly into C++ but in this design it's substantially # more difficult to interact with TorchDispatchModeMeta. class TorchDispatchModeMeta(type): """ Metaclass for :class:`TorchDispatchMode`; it does two things: * Adds an implicit ``inner`` kwarg to ``__init__``, to allow the modes to be chained together to form a stack. * Reenables the inner mode, so that by default PyTorch API calls will compositionally proceed to the next mode on the stack. The default behavior for the second bullet is important, as it is easy to accidentally write ``_wrap_torch_dispatch`` implementations that are not compositional, and the wrapping here makes the obvious code do the right thing (aka, this is why there is a metaclass). """ def __new__(metacls, name, bases, dct): if '__init__' in dct: dct['__init__'] = _wrap_init(dct['__init__']) if '__torch_dispatch__' in dct: dct['__torch_dispatch__'] = _wrap_torch_dispatch(dct['__torch_dispatch__']) return super().__new__(metacls, name, bases, dct) class TorchDispatchMode(metaclass=TorchDispatchModeMeta): """ A ``TorchDispatchMode`` allows you to override the meaning of all ``__torch_dispatch__`` overrideable functions within a dynamic scope, without having to actually create a tensor subclass or manually monkey-patch functions in the PyTorch API. Some common situations where you should use a mode: * You want to override the meaning of factory functions, or other functions that do not otherwise take a tensor as an argument (these cannot be overridden with tensor subclasses). * You want to override the behavior of all functions without needing to wrap your inputs in tensor subclasses; e.g., if you are just interested in logging intermediate computations. * You want to control the order of execution of various tensor subclasses explicitly, rather than implicitly via the return of ``NotImplemented``. Independent subclasses of :class:`TorchDispatchMode` are compositional: modes can be pushed onto a stack using ``with MyMode():``. When you call functions in the PyTorch API inside your ``__torch_dispatch__`` implementation, by default, they will forward on to the next mode on the mode stack. If you want recursively call back into your current ``__torch_dispatch__`` implementation, either explicitly invoke ``self.__torch_dispatch__(...)``, or use the context manager ``__torch_dispatch__(self)`` to make PyTorch API self-referential (beware of infinite loops, in this case!) """ # Force metaclass to generate constructor at the base of the hierarchy def __init__(self): self.ancestors: Set[TorchDispatchMode] def __torch_dispatch__(self, func, types, args=(), kwargs=None): raise NotImplementedError() def __enter__(self): old = _get_torch_dispatch_mode() if hasattr(self, "inner"): raise RuntimeError(f"{self} has already been used as a mode. Please use a fresh version or use restore") else: self.inner = old if old is None: self.ancestors = set() else: self.ancestors = self.inner.ancestors.union({self.inner}) _set_torch_dispatch_mode(self) return self def __exit__(self, exc_type, exc_val, exc_tb): _set_torch_dispatch_mode(self.inner) @contextlib.contextmanager def restore(self): return _restore_mode(self, mode_info=TorchDispatchModeInfo()) @classmethod def push(cls, *args, **kwargs): warnings.warn("`Mode.push()` is no longer necessary and can be replaced with just `with Mode()`") instance = cls(*args, **kwargs) return instance class BaseTorchDispatchMode(TorchDispatchMode): def __torch_dispatch__(self, func, types, args=(), kwargs=None): if kwargs is None: kwargs = {} return func(*args, **kwargs)
pytorch-master
torch/utils/_python_dispatch.py
import copy import glob import importlib import importlib.abc import os import re import shlex import setuptools import subprocess import sys import sysconfig import warnings import collections import torch import torch._appdirs from .file_baton import FileBaton from ._cpp_extension_versioner import ExtensionVersioner from .hipify import hipify_python from .hipify.hipify_python import GeneratedFileCleaner from typing import List, Optional, Union, Tuple from torch.torch_version import TorchVersion from setuptools.command.build_ext import build_ext from pkg_resources import packaging # type: ignore[attr-defined] IS_WINDOWS = sys.platform == 'win32' IS_MACOS = sys.platform.startswith('darwin') IS_LINUX = sys.platform.startswith('linux') LIB_EXT = '.pyd' if IS_WINDOWS else '.so' EXEC_EXT = '.exe' if IS_WINDOWS else '' CLIB_PREFIX = '' if IS_WINDOWS else 'lib' CLIB_EXT = '.dll' if IS_WINDOWS else '.so' SHARED_FLAG = '/DLL' if IS_WINDOWS else '-shared' _HERE = os.path.abspath(__file__) _TORCH_PATH = os.path.dirname(os.path.dirname(_HERE)) TORCH_LIB_PATH = os.path.join(_TORCH_PATH, 'lib') BUILD_SPLIT_CUDA = os.getenv('BUILD_SPLIT_CUDA') or (os.path.exists(os.path.join( TORCH_LIB_PATH, f'{CLIB_PREFIX}torch_cuda_cu{CLIB_EXT}')) and os.path.exists(os.path.join(TORCH_LIB_PATH, f'{CLIB_PREFIX}torch_cuda_cpp{CLIB_EXT}'))) SUBPROCESS_DECODE_ARGS = ('oem',) if IS_WINDOWS else () MINIMUM_GCC_VERSION = (5, 0, 0) MINIMUM_MSVC_VERSION = (19, 0, 24215) # The following values were taken from the following GitHub gist that # summarizes the minimum valid major versions of g++/clang++ for each supported # CUDA version: https://gist.github.com/ax3l/9489132 CUDA_GCC_VERSIONS = { '10.2': (MINIMUM_GCC_VERSION, (8, 0, 0)), '11.1': (MINIMUM_GCC_VERSION, (10, 0, 0)), '11.2': (MINIMUM_GCC_VERSION, (10, 2, 1)), '11.3': (MINIMUM_GCC_VERSION, (10, 2, 1)), '11.4': ((6, 0, 0), (11, 5, 0)), '11.5': ((6, 0, 0), (11, 5, 0)), '11.6': ((6, 0, 0), (11, 5, 0)), '11.7': ((6, 0, 0), (11, 5, 0)), } CUDA_CLANG_VERSIONS = { '10.2': ((3, 3, 0), (8, 0, 0)), '11.1': ((6, 0, 0), (9, 0, 0)), '11.2': ((6, 0, 0), (9, 0, 0)), '11.3': ((6, 0, 0), (11, 0, 0)), '11.4': ((6, 0, 0), (11, 0, 0)), '11.5': ((6, 0, 0), (12, 0, 0)), '11.6': ((6, 0, 0), (12, 0, 0)), '11.7': ((6, 0, 0), (13, 0, 0)), } # Taken directly from python stdlib < 3.9 # See https://github.com/pytorch/pytorch/issues/48617 def _nt_quote_args(args: Optional[List[str]]) -> List[str]: """Quote command-line arguments for DOS/Windows conventions. Just wraps every argument which contains blanks in double quotes, and returns a new argument list. """ # Cover None-type if not args: return [] return [f'"{arg}"' if ' ' in arg else arg for arg in args] def _find_cuda_home() -> Optional[str]: r'''Finds the CUDA install path.''' # Guess #1 cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH') if cuda_home is None: # Guess #2 try: which = 'where' if IS_WINDOWS else 'which' with open(os.devnull, 'w') as devnull: nvcc = subprocess.check_output([which, 'nvcc'], stderr=devnull).decode(*SUBPROCESS_DECODE_ARGS).rstrip('\r\n') cuda_home = os.path.dirname(os.path.dirname(nvcc)) except Exception: # Guess #3 if IS_WINDOWS: cuda_homes = glob.glob( 'C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v*.*') if len(cuda_homes) == 0: cuda_home = '' else: cuda_home = cuda_homes[0] else: cuda_home = '/usr/local/cuda' if not os.path.exists(cuda_home): cuda_home = None if cuda_home and not torch.cuda.is_available(): print(f"No CUDA runtime is found, using CUDA_HOME='{cuda_home}'", file=sys.stderr) return cuda_home def _find_rocm_home() -> Optional[str]: r'''Finds the ROCm install path.''' # Guess #1 rocm_home = os.environ.get('ROCM_HOME') or os.environ.get('ROCM_PATH') if rocm_home is None: # Guess #2 try: pipe_hipcc = subprocess.Popen( ["which hipcc | xargs readlink -f"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) hipcc, _ = pipe_hipcc.communicate() # this will be either <ROCM_HOME>/hip/bin/hipcc or <ROCM_HOME>/bin/hipcc rocm_home = os.path.dirname(os.path.dirname(hipcc.decode(*SUBPROCESS_DECODE_ARGS).rstrip('\r\n'))) if os.path.basename(rocm_home) == 'hip': rocm_home = os.path.dirname(rocm_home) except Exception: # Guess #3 rocm_home = '/opt/rocm' if not os.path.exists(rocm_home): rocm_home = None if rocm_home and torch.version.hip is None: print(f"No ROCm runtime is found, using ROCM_HOME='{rocm_home}'", file=sys.stderr) return rocm_home def _join_rocm_home(*paths) -> str: r''' Joins paths with ROCM_HOME, or raises an error if it ROCM_HOME is not set. This is basically a lazy way of raising an error for missing $ROCM_HOME only once we need to get any ROCm-specific path. ''' if ROCM_HOME is None: raise EnvironmentError('ROCM_HOME environment variable is not set. ' 'Please set it to your ROCm install root.') elif IS_WINDOWS: raise EnvironmentError('Building PyTorch extensions using ' 'ROCm and Windows is not supported.') return os.path.join(ROCM_HOME, *paths) ABI_INCOMPATIBILITY_WARNING = ''' !! WARNING !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Your compiler ({}) may be ABI-incompatible with PyTorch! Please use a compiler that is ABI-compatible with GCC 5.0 and above. See https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html. See https://gist.github.com/goldsborough/d466f43e8ffc948ff92de7486c5216d6 for instructions on how to install GCC 5 or higher. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! WARNING !! ''' WRONG_COMPILER_WARNING = ''' !! WARNING !! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! Your compiler ({user_compiler}) is not compatible with the compiler Pytorch was built with for this platform, which is {pytorch_compiler} on {platform}. Please use {pytorch_compiler} to to compile your extension. Alternatively, you may compile PyTorch from source using {user_compiler}, and then you can also use {user_compiler} to compile your extension. See https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md for help with compiling PyTorch from source. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! !! WARNING !! ''' CUDA_MISMATCH_MESSAGE = ''' The detected CUDA version ({0}) mismatches the version that was used to compile PyTorch ({1}). Please make sure to use the same CUDA versions. ''' CUDA_MISMATCH_WARN = "The detected CUDA version ({0}) has a minor version mismatch with the version that was used to compile PyTorch ({1}). Most likely this shouldn't be a problem." CUDA_NOT_FOUND_MESSAGE = ''' CUDA was not found on the system, please set the CUDA_HOME or the CUDA_PATH environment variable or add NVCC to your system PATH. The extension compilation will fail. ''' ROCM_HOME = _find_rocm_home() MIOPEN_HOME = _join_rocm_home('miopen') if ROCM_HOME else None HIP_HOME = _join_rocm_home('hip') if ROCM_HOME else None IS_HIP_EXTENSION = True if ((ROCM_HOME is not None) and (torch.version.hip is not None)) else False ROCM_VERSION = None if torch.version.hip is not None: ROCM_VERSION = tuple(int(v) for v in torch.version.hip.split('.')[:2]) CUDA_HOME = _find_cuda_home() CUDNN_HOME = os.environ.get('CUDNN_HOME') or os.environ.get('CUDNN_PATH') # PyTorch releases have the version pattern major.minor.patch, whereas when # PyTorch is built from source, we append the git commit hash, which gives # it the below pattern. BUILT_FROM_SOURCE_VERSION_PATTERN = re.compile(r'\d+\.\d+\.\d+\w+\+\w+') COMMON_MSVC_FLAGS = ['/MD', '/wd4819', '/wd4251', '/wd4244', '/wd4267', '/wd4275', '/wd4018', '/wd4190', '/EHsc'] MSVC_IGNORE_CUDAFE_WARNINGS = [ 'base_class_has_different_dll_interface', 'field_without_dll_interface', 'dll_interface_conflict_none_assumed', 'dll_interface_conflict_dllexport_assumed' ] COMMON_NVCC_FLAGS = [ '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_BFLOAT16_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__', '--expt-relaxed-constexpr' ] COMMON_HIP_FLAGS = [ '-fPIC', '-D__HIP_PLATFORM_HCC__=1', '-DUSE_ROCM=1', ] COMMON_HIPCC_FLAGS = [ '-DCUDA_HAS_FP16=1', '-D__HIP_NO_HALF_OPERATORS__=1', '-D__HIP_NO_HALF_CONVERSIONS__=1', ] JIT_EXTENSION_VERSIONER = ExtensionVersioner() PLAT_TO_VCVARS = { 'win32' : 'x86', 'win-amd64' : 'x86_amd64', } def _is_binary_build() -> bool: return not BUILT_FROM_SOURCE_VERSION_PATTERN.match(torch.version.__version__) def _accepted_compilers_for_platform() -> List[str]: # gnu-c++ and gnu-cc are the conda gcc compilers return ['clang++', 'clang'] if IS_MACOS else ['g++', 'gcc', 'gnu-c++', 'gnu-cc'] def get_default_build_root() -> str: r''' Returns the path to the root folder under which extensions will built. For each extension module built, there will be one folder underneath the folder returned by this function. For example, if ``p`` is the path returned by this function and ``ext`` the name of an extension, the build folder for the extension will be ``p/ext``. This directory is **user-specific** so that multiple users on the same machine won't meet permission issues. ''' return os.path.realpath(torch._appdirs.user_cache_dir(appname='torch_extensions')) def check_compiler_ok_for_platform(compiler: str) -> bool: r''' Verifies that the compiler is the expected one for the current platform. Args: compiler (str): The compiler executable to check. Returns: True if the compiler is gcc/g++ on Linux or clang/clang++ on macOS, and always True for Windows. ''' if IS_WINDOWS: return True which = subprocess.check_output(['which', compiler], stderr=subprocess.STDOUT) # Use os.path.realpath to resolve any symlinks, in particular from 'c++' to e.g. 'g++'. compiler_path = os.path.realpath(which.decode(*SUBPROCESS_DECODE_ARGS).strip()) # Check the compiler name if any(name in compiler_path for name in _accepted_compilers_for_platform()): return True # If compiler wrapper is used try to infer the actual compiler by invoking it with -v flag version_string = subprocess.check_output([compiler, '-v'], stderr=subprocess.STDOUT).decode(*SUBPROCESS_DECODE_ARGS) if IS_LINUX: # Check for 'gcc' or 'g++' for sccache warpper pattern = re.compile("^COLLECT_GCC=(.*)$", re.MULTILINE) results = re.findall(pattern, version_string) if len(results) != 1: return False compiler_path = os.path.realpath(results[0].strip()) # On RHEL/CentOS c++ is a gcc compiler wrapper if os.path.basename(compiler_path) == 'c++' and 'gcc version' in version_string: return True return any(name in compiler_path for name in _accepted_compilers_for_platform()) if IS_MACOS: # Check for 'clang' or 'clang++' return version_string.startswith("Apple clang") return False def get_compiler_abi_compatibility_and_version(compiler) -> Tuple[bool, TorchVersion]: r''' Determine if the given compiler is ABI-compatible with PyTorch alongside its version. Args: compiler (str): The compiler executable name to check (e.g. ``g++``). Must be executable in a shell process. Returns: A tuple that contains a boolean that defines if the compiler is (likely) ABI-incompatible with PyTorch, followed by a `TorchVersion` string that contains the compiler version separated by dots. ''' if not _is_binary_build(): return (True, TorchVersion('0.0.0')) if os.environ.get('TORCH_DONT_CHECK_COMPILER_ABI') in ['ON', '1', 'YES', 'TRUE', 'Y']: return (True, TorchVersion('0.0.0')) # First check if the compiler is one of the expected ones for the particular platform. if not check_compiler_ok_for_platform(compiler): warnings.warn(WRONG_COMPILER_WARNING.format( user_compiler=compiler, pytorch_compiler=_accepted_compilers_for_platform()[0], platform=sys.platform)) return (False, TorchVersion('0.0.0')) if IS_MACOS: # There is no particular minimum version we need for clang, so we're good here. return (True, TorchVersion('0.0.0')) try: if IS_LINUX: minimum_required_version = MINIMUM_GCC_VERSION versionstr = subprocess.check_output([compiler, '-dumpfullversion', '-dumpversion']) version = versionstr.decode(*SUBPROCESS_DECODE_ARGS).strip().split('.') else: minimum_required_version = MINIMUM_MSVC_VERSION compiler_info = subprocess.check_output(compiler, stderr=subprocess.STDOUT) match = re.search(r'(\d+)\.(\d+)\.(\d+)', compiler_info.decode(*SUBPROCESS_DECODE_ARGS).strip()) version = ['0', '0', '0'] if match is None else list(match.groups()) except Exception: _, error, _ = sys.exc_info() warnings.warn(f'Error checking compiler version for {compiler}: {error}') return (False, TorchVersion('0.0.0')) if tuple(map(int, version)) >= minimum_required_version: return (True, TorchVersion('.'.join(version))) compiler = f'{compiler} {".".join(version)}' warnings.warn(ABI_INCOMPATIBILITY_WARNING.format(compiler)) return (False, TorchVersion('.'.join(version))) def _check_cuda_version(compiler_name: str, compiler_version: TorchVersion) -> None: if not CUDA_HOME: raise RuntimeError(CUDA_NOT_FOUND_MESSAGE) nvcc = os.path.join(CUDA_HOME, 'bin', 'nvcc') cuda_version_str = subprocess.check_output([nvcc, '--version']).strip().decode(*SUBPROCESS_DECODE_ARGS) cuda_version = re.search(r'release (\d+[.]\d+)', cuda_version_str) if cuda_version is None: return cuda_str_version = cuda_version.group(1) cuda_ver = packaging.version.parse(cuda_str_version) torch_cuda_version = packaging.version.parse(torch.version.cuda) if cuda_ver != torch_cuda_version: # major/minor attributes are only available in setuptools>=49.6.0 if getattr(cuda_ver, "major", float("nan")) != getattr(torch_cuda_version, "major", float("nan")): raise RuntimeError(CUDA_MISMATCH_MESSAGE.format(cuda_str_version, torch.version.cuda)) warnings.warn(CUDA_MISMATCH_WARN.format(cuda_str_version, torch.version.cuda)) if not (sys.platform.startswith('linux') and os.environ.get('TORCH_DONT_CHECK_COMPILER_ABI') not in ['ON', '1', 'YES', 'TRUE', 'Y'] and _is_binary_build()): return cuda_compiler_bounds = CUDA_CLANG_VERSIONS if compiler_name.startswith('clang') else CUDA_GCC_VERSIONS if cuda_str_version not in cuda_compiler_bounds: warnings.warn(f'There are no {compiler_name} version bounds defined for CUDA version {cuda_str_version}') else: min_compiler_version, max_compiler_version = cuda_compiler_bounds[cuda_str_version] # Special case for 11.4.0, which has lower compiler bounds that 11.4.1 if "V11.4.48" in cuda_version_str and cuda_compiler_bounds == CUDA_GCC_VERSIONS: max_compiler_version = (10, 0, 0) min_compiler_version_str = '.'.join(map(str, min_compiler_version)) max_compiler_version_str = '.'.join(map(str, max_compiler_version)) version_bound_str = f'>={min_compiler_version_str}' version_bound_str = f'{version_bound_str}, <={max_compiler_version_str}' if compiler_version < TorchVersion(min_compiler_version_str): raise RuntimeError( f'The current installed version of {compiler_name} ({compiler_version}) is less ' f'than the minimum required version by CUDA {cuda_str_version} ({min_compiler_version_str}). ' f'Please make sure to use an adequate version of {compiler_name} ({version_bound_str}).' ) if compiler_version > TorchVersion(max_compiler_version_str): raise RuntimeError( f'The current installed version of {compiler_name} ({compiler_version}) is greater ' f'than the maximum required version by CUDA {cuda_str_version} ({max_compiler_version_str}). ' f'Please make sure to use an adequate version of {compiler_name} ({version_bound_str}).' ) # See below for why we inherit BuildExtension from object. # https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj-when class BuildExtension(build_ext, object): r''' A custom :mod:`setuptools` build extension . This :class:`setuptools.build_ext` subclass takes care of passing the minimum required compiler flags (e.g. ``-std=c++14``) as well as mixed C++/CUDA compilation (and support for CUDA files in general). When using :class:`BuildExtension`, it is allowed to supply a dictionary for ``extra_compile_args`` (rather than the usual list) that maps from languages (``cxx`` or ``nvcc``) to a list of additional compiler flags to supply to the compiler. This makes it possible to supply different flags to the C++ and CUDA compiler during mixed compilation. ``use_ninja`` (bool): If ``use_ninja`` is ``True`` (default), then we attempt to build using the Ninja backend. Ninja greatly speeds up compilation compared to the standard ``setuptools.build_ext``. Fallbacks to the standard distutils backend if Ninja is not available. .. note:: By default, the Ninja backend uses #CPUS + 2 workers to build the extension. This may use up too many resources on some systems. One can control the number of workers by setting the `MAX_JOBS` environment variable to a non-negative number. ''' @classmethod def with_options(cls, **options): r''' Returns a subclass with alternative constructor that extends any original keyword arguments to the original constructor with the given options. ''' class cls_with_options(cls): # type: ignore[misc, valid-type] def __init__(self, *args, **kwargs): kwargs.update(options) super().__init__(*args, **kwargs) return cls_with_options def __init__(self, *args, **kwargs) -> None: super(BuildExtension, self).__init__(*args, **kwargs) self.no_python_abi_suffix = kwargs.get("no_python_abi_suffix", False) self.use_ninja = kwargs.get('use_ninja', True) if self.use_ninja: # Test if we can use ninja. Fallback otherwise. msg = ('Attempted to use ninja as the BuildExtension backend but ' '{}. Falling back to using the slow distutils backend.') if not is_ninja_available(): warnings.warn(msg.format('we could not find ninja.')) self.use_ninja = False def finalize_options(self) -> None: super().finalize_options() if self.use_ninja: self.force = True def build_extensions(self) -> None: compiler_name, compiler_version = self._check_abi() cuda_ext = False extension_iter = iter(self.extensions) extension = next(extension_iter, None) while not cuda_ext and extension: for source in extension.sources: _, ext = os.path.splitext(source) if ext == '.cu': cuda_ext = True break extension = next(extension_iter, None) if cuda_ext and not IS_HIP_EXTENSION: _check_cuda_version(compiler_name, compiler_version) for extension in self.extensions: # Ensure at least an empty list of flags for 'cxx' and 'nvcc' when # extra_compile_args is a dict. Otherwise, default torch flags do # not get passed. Necessary when only one of 'cxx' and 'nvcc' is # passed to extra_compile_args in CUDAExtension, i.e. # CUDAExtension(..., extra_compile_args={'cxx': [...]}) # or # CUDAExtension(..., extra_compile_args={'nvcc': [...]}) if isinstance(extension.extra_compile_args, dict): for ext in ['cxx', 'nvcc']: if ext not in extension.extra_compile_args: extension.extra_compile_args[ext] = [] self._add_compile_flag(extension, '-DTORCH_API_INCLUDE_EXTENSION_H') # See note [Pybind11 ABI constants] for name in ["COMPILER_TYPE", "STDLIB", "BUILD_ABI"]: val = getattr(torch._C, f"_PYBIND11_{name}") if val is not None and not IS_WINDOWS: self._add_compile_flag(extension, f'-DPYBIND11_{name}="{val}"') self._define_torch_extension_name(extension) self._add_gnu_cpp_abi_flag(extension) if 'nvcc_dlink' in extension.extra_compile_args: assert self.use_ninja, f"With dlink=True, ninja is required to build cuda extension {extension.name}." # Register .cu, .cuh and .hip as valid source extensions. self.compiler.src_extensions += ['.cu', '.cuh', '.hip'] # Save the original _compile method for later. if self.compiler.compiler_type == 'msvc': self.compiler._cpp_extensions += ['.cu', '.cuh'] original_compile = self.compiler.compile original_spawn = self.compiler.spawn else: original_compile = self.compiler._compile def append_std14_if_no_std_present(cflags) -> None: # NVCC does not allow multiple -std to be passed, so we avoid # overriding the option if the user explicitly passed it. cpp_format_prefix = '/{}:' if self.compiler.compiler_type == 'msvc' else '-{}=' cpp_flag_prefix = cpp_format_prefix.format('std') cpp_flag = cpp_flag_prefix + 'c++14' if not any(flag.startswith(cpp_flag_prefix) for flag in cflags): cflags.append(cpp_flag) def unix_cuda_flags(cflags): cflags = (COMMON_NVCC_FLAGS + ['--compiler-options', "'-fPIC'"] + cflags + _get_cuda_arch_flags(cflags)) # NVCC does not allow multiple -ccbin/--compiler-bindir to be passed, so we avoid # overriding the option if the user explicitly passed it. _ccbin = os.getenv("CC") if ( _ccbin is not None and not any([flag.startswith('-ccbin') or flag.startswith('--compiler-bindir') for flag in cflags]) ): cflags.extend(['-ccbin', _ccbin]) return cflags def convert_to_absolute_paths_inplace(paths): # Helper function. See Note [Absolute include_dirs] if paths is not None: for i in range(len(paths)): if not os.path.isabs(paths[i]): paths[i] = os.path.abspath(paths[i]) def unix_wrap_single_compile(obj, src, ext, cc_args, extra_postargs, pp_opts) -> None: # Copy before we make any modifications. cflags = copy.deepcopy(extra_postargs) try: original_compiler = self.compiler.compiler_so if _is_cuda_file(src): nvcc = [_join_rocm_home('bin', 'hipcc') if IS_HIP_EXTENSION else _join_cuda_home('bin', 'nvcc')] self.compiler.set_executable('compiler_so', nvcc) if isinstance(cflags, dict): cflags = cflags['nvcc'] if IS_HIP_EXTENSION: cflags = COMMON_HIPCC_FLAGS + cflags + _get_rocm_arch_flags(cflags) else: cflags = unix_cuda_flags(cflags) elif isinstance(cflags, dict): cflags = cflags['cxx'] if IS_HIP_EXTENSION: cflags = COMMON_HIP_FLAGS + cflags append_std14_if_no_std_present(cflags) original_compile(obj, src, ext, cc_args, cflags, pp_opts) finally: # Put the original compiler back in place. self.compiler.set_executable('compiler_so', original_compiler) def unix_wrap_ninja_compile(sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): r"""Compiles sources by outputting a ninja file and running it.""" # NB: I copied some lines from self.compiler (which is an instance # of distutils.UnixCCompiler). See the following link. # https://github.com/python/cpython/blob/f03a8f8d5001963ad5b5b28dbd95497e9cc15596/Lib/distutils/ccompiler.py#L564-L567 # This can be fragile, but a lot of other repos also do this # (see https://github.com/search?q=_setup_compile&type=Code) # so it is probably OK; we'll also get CI signal if/when # we update our python version (which is when distutils can be # upgraded) # Use absolute path for output_dir so that the object file paths # (`objects`) get generated with absolute paths. output_dir = os.path.abspath(output_dir) # See Note [Absolute include_dirs] convert_to_absolute_paths_inplace(self.compiler.include_dirs) _, objects, extra_postargs, pp_opts, _ = \ self.compiler._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs) common_cflags = self.compiler._get_cc_args(pp_opts, debug, extra_preargs) extra_cc_cflags = self.compiler.compiler_so[1:] with_cuda = any(map(_is_cuda_file, sources)) # extra_postargs can be either: # - a dict mapping cxx/nvcc to extra flags # - a list of extra flags. if isinstance(extra_postargs, dict): post_cflags = extra_postargs['cxx'] else: post_cflags = list(extra_postargs) if IS_HIP_EXTENSION: post_cflags = COMMON_HIP_FLAGS + post_cflags append_std14_if_no_std_present(post_cflags) cuda_post_cflags = None cuda_cflags = None if with_cuda: cuda_cflags = common_cflags if isinstance(extra_postargs, dict): cuda_post_cflags = extra_postargs['nvcc'] else: cuda_post_cflags = list(extra_postargs) if IS_HIP_EXTENSION: cuda_post_cflags = cuda_post_cflags + _get_rocm_arch_flags(cuda_post_cflags) cuda_post_cflags = COMMON_HIP_FLAGS + COMMON_HIPCC_FLAGS + cuda_post_cflags else: cuda_post_cflags = unix_cuda_flags(cuda_post_cflags) append_std14_if_no_std_present(cuda_post_cflags) cuda_cflags = [shlex.quote(f) for f in cuda_cflags] cuda_post_cflags = [shlex.quote(f) for f in cuda_post_cflags] if isinstance(extra_postargs, dict) and 'nvcc_dlink' in extra_postargs: cuda_dlink_post_cflags = unix_cuda_flags(extra_postargs['nvcc_dlink']) else: cuda_dlink_post_cflags = None _write_ninja_file_and_compile_objects( sources=sources, objects=objects, cflags=[shlex.quote(f) for f in extra_cc_cflags + common_cflags], post_cflags=[shlex.quote(f) for f in post_cflags], cuda_cflags=cuda_cflags, cuda_post_cflags=cuda_post_cflags, cuda_dlink_post_cflags=cuda_dlink_post_cflags, build_directory=output_dir, verbose=True, with_cuda=with_cuda) # Return *all* object filenames, not just the ones we just built. return objects def win_cuda_flags(cflags): return (COMMON_NVCC_FLAGS + cflags + _get_cuda_arch_flags(cflags)) def win_wrap_single_compile(sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): self.cflags = copy.deepcopy(extra_postargs) extra_postargs = None def spawn(cmd): # Using regex to match src, obj and include files src_regex = re.compile('/T(p|c)(.*)') src_list = [ m.group(2) for m in (src_regex.match(elem) for elem in cmd) if m ] obj_regex = re.compile('/Fo(.*)') obj_list = [ m.group(1) for m in (obj_regex.match(elem) for elem in cmd) if m ] include_regex = re.compile(r'((\-|\/)I.*)') include_list = [ m.group(1) for m in (include_regex.match(elem) for elem in cmd) if m ] if len(src_list) >= 1 and len(obj_list) >= 1: src = src_list[0] obj = obj_list[0] if _is_cuda_file(src): nvcc = _join_cuda_home('bin', 'nvcc') if isinstance(self.cflags, dict): cflags = self.cflags['nvcc'] elif isinstance(self.cflags, list): cflags = self.cflags else: cflags = [] cflags = win_cuda_flags(cflags) + ['--use-local-env'] for flag in COMMON_MSVC_FLAGS: cflags = ['-Xcompiler', flag] + cflags for ignore_warning in MSVC_IGNORE_CUDAFE_WARNINGS: cflags = ['-Xcudafe', '--diag_suppress=' + ignore_warning] + cflags cmd = [nvcc, '-c', src, '-o', obj] + include_list + cflags elif isinstance(self.cflags, dict): cflags = COMMON_MSVC_FLAGS + self.cflags['cxx'] cmd += cflags elif isinstance(self.cflags, list): cflags = COMMON_MSVC_FLAGS + self.cflags cmd += cflags return original_spawn(cmd) try: self.compiler.spawn = spawn return original_compile(sources, output_dir, macros, include_dirs, debug, extra_preargs, extra_postargs, depends) finally: self.compiler.spawn = original_spawn def win_wrap_ninja_compile(sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): if not self.compiler.initialized: self.compiler.initialize() output_dir = os.path.abspath(output_dir) # Note [Absolute include_dirs] # Convert relative path in self.compiler.include_dirs to absolute path if any, # For ninja build, the build location is not local, the build happens # in a in script created build folder, relative path lost their correctness. # To be consistent with jit extension, we allow user to enter relative include_dirs # in setuptools.setup, and we convert the relative path to absolute path here convert_to_absolute_paths_inplace(self.compiler.include_dirs) _, objects, extra_postargs, pp_opts, _ = \ self.compiler._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs) common_cflags = extra_preargs or [] cflags = [] if debug: cflags.extend(self.compiler.compile_options_debug) else: cflags.extend(self.compiler.compile_options) common_cflags.extend(COMMON_MSVC_FLAGS) cflags = cflags + common_cflags + pp_opts with_cuda = any(map(_is_cuda_file, sources)) # extra_postargs can be either: # - a dict mapping cxx/nvcc to extra flags # - a list of extra flags. if isinstance(extra_postargs, dict): post_cflags = extra_postargs['cxx'] else: post_cflags = list(extra_postargs) append_std14_if_no_std_present(post_cflags) cuda_post_cflags = None cuda_cflags = None if with_cuda: cuda_cflags = ['--use-local-env'] for common_cflag in common_cflags: cuda_cflags.append('-Xcompiler') cuda_cflags.append(common_cflag) for ignore_warning in MSVC_IGNORE_CUDAFE_WARNINGS: cuda_cflags.append('-Xcudafe') cuda_cflags.append('--diag_suppress=' + ignore_warning) cuda_cflags.extend(pp_opts) if isinstance(extra_postargs, dict): cuda_post_cflags = extra_postargs['nvcc'] else: cuda_post_cflags = list(extra_postargs) cuda_post_cflags = win_cuda_flags(cuda_post_cflags) cflags = _nt_quote_args(cflags) post_cflags = _nt_quote_args(post_cflags) if with_cuda: cuda_cflags = _nt_quote_args(cuda_cflags) cuda_post_cflags = _nt_quote_args(cuda_post_cflags) if isinstance(extra_postargs, dict) and 'nvcc_dlink' in extra_postargs: cuda_dlink_post_cflags = win_cuda_flags(extra_postargs['nvcc_dlink']) else: cuda_dlink_post_cflags = None _write_ninja_file_and_compile_objects( sources=sources, objects=objects, cflags=cflags, post_cflags=post_cflags, cuda_cflags=cuda_cflags, cuda_post_cflags=cuda_post_cflags, cuda_dlink_post_cflags=cuda_dlink_post_cflags, build_directory=output_dir, verbose=True, with_cuda=with_cuda) # Return *all* object filenames, not just the ones we just built. return objects # Monkey-patch the _compile or compile method. # https://github.com/python/cpython/blob/dc0284ee8f7a270b6005467f26d8e5773d76e959/Lib/distutils/ccompiler.py#L511 if self.compiler.compiler_type == 'msvc': if self.use_ninja: self.compiler.compile = win_wrap_ninja_compile else: self.compiler.compile = win_wrap_single_compile else: if self.use_ninja: self.compiler.compile = unix_wrap_ninja_compile else: self.compiler._compile = unix_wrap_single_compile build_ext.build_extensions(self) def get_ext_filename(self, ext_name): # Get the original shared library name. For Python 3, this name will be # suffixed with "<SOABI>.so", where <SOABI> will be something like # cpython-37m-x86_64-linux-gnu. ext_filename = super(BuildExtension, self).get_ext_filename(ext_name) # If `no_python_abi_suffix` is `True`, we omit the Python 3 ABI # component. This makes building shared libraries with setuptools that # aren't Python modules nicer. if self.no_python_abi_suffix: # The parts will be e.g. ["my_extension", "cpython-37m-x86_64-linux-gnu", "so"]. ext_filename_parts = ext_filename.split('.') # Omit the second to last element. without_abi = ext_filename_parts[:-2] + ext_filename_parts[-1:] ext_filename = '.'.join(without_abi) return ext_filename def _check_abi(self) -> Tuple[str, TorchVersion]: # On some platforms, like Windows, compiler_cxx is not available. if hasattr(self.compiler, 'compiler_cxx'): compiler = self.compiler.compiler_cxx[0] elif IS_WINDOWS: compiler = os.environ.get('CXX', 'cl') else: compiler = os.environ.get('CXX', 'c++') _, version = get_compiler_abi_compatibility_and_version(compiler) # Warn user if VC env is activated but `DISTUILS_USE_SDK` is not set. if IS_WINDOWS and 'VSCMD_ARG_TGT_ARCH' in os.environ and 'DISTUTILS_USE_SDK' not in os.environ: msg = ('It seems that the VC environment is activated but DISTUTILS_USE_SDK is not set.' 'This may lead to multiple activations of the VC env.' 'Please set `DISTUTILS_USE_SDK=1` and try again.') raise UserWarning(msg) return compiler, version def _add_compile_flag(self, extension, flag): extension.extra_compile_args = copy.deepcopy(extension.extra_compile_args) if isinstance(extension.extra_compile_args, dict): for args in extension.extra_compile_args.values(): args.append(flag) else: extension.extra_compile_args.append(flag) def _define_torch_extension_name(self, extension): # pybind11 doesn't support dots in the names # so in order to support extensions in the packages # like torch._C, we take the last part of the string # as the library name names = extension.name.split('.') name = names[-1] define = f'-DTORCH_EXTENSION_NAME={name}' self._add_compile_flag(extension, define) def _add_gnu_cpp_abi_flag(self, extension): # use the same CXX ABI as what PyTorch was compiled with self._add_compile_flag(extension, '-D_GLIBCXX_USE_CXX11_ABI=' + str(int(torch._C._GLIBCXX_USE_CXX11_ABI))) def CppExtension(name, sources, *args, **kwargs): r''' Creates a :class:`setuptools.Extension` for C++. Convenience method that creates a :class:`setuptools.Extension` with the bare minimum (but often sufficient) arguments to build a C++ extension. All arguments are forwarded to the :class:`setuptools.Extension` constructor. Example: >>> # xdoctest: +SKIP >>> from setuptools import setup >>> from torch.utils.cpp_extension import BuildExtension, CppExtension >>> setup( ... name='extension', ... ext_modules=[ ... CppExtension( ... name='extension', ... sources=['extension.cpp'], ... extra_compile_args=['-g']), ... ], ... cmdclass={ ... 'build_ext': BuildExtension ... }) ''' include_dirs = kwargs.get('include_dirs', []) include_dirs += include_paths() kwargs['include_dirs'] = include_dirs library_dirs = kwargs.get('library_dirs', []) library_dirs += library_paths() kwargs['library_dirs'] = library_dirs libraries = kwargs.get('libraries', []) libraries.append('c10') libraries.append('torch') libraries.append('torch_cpu') libraries.append('torch_python') kwargs['libraries'] = libraries kwargs['language'] = 'c++' return setuptools.Extension(name, sources, *args, **kwargs) def CUDAExtension(name, sources, *args, **kwargs): r''' Creates a :class:`setuptools.Extension` for CUDA/C++. Convenience method that creates a :class:`setuptools.Extension` with the bare minimum (but often sufficient) arguments to build a CUDA/C++ extension. This includes the CUDA include path, library path and runtime library. All arguments are forwarded to the :class:`setuptools.Extension` constructor. Example: >>> # xdoctest: +SKIP >>> from setuptools import setup >>> from torch.utils.cpp_extension import BuildExtension, CUDAExtension >>> setup( ... name='cuda_extension', ... ext_modules=[ ... CUDAExtension( ... name='cuda_extension', ... sources=['extension.cpp', 'extension_kernel.cu'], ... extra_compile_args={'cxx': ['-g'], ... 'nvcc': ['-O2']}) ... ], ... cmdclass={ ... 'build_ext': BuildExtension ... }) Compute capabilities: By default the extension will be compiled to run on all archs of the cards visible during the building process of the extension, plus PTX. If down the road a new card is installed the extension may need to be recompiled. If a visible card has a compute capability (CC) that's newer than the newest version for which your nvcc can build fully-compiled binaries, Pytorch will make nvcc fall back to building kernels with the newest version of PTX your nvcc does support (see below for details on PTX). You can override the default behavior using `TORCH_CUDA_ARCH_LIST` to explicitly specify which CCs you want the extension to support: TORCH_CUDA_ARCH_LIST="6.1 8.6" python build_my_extension.py TORCH_CUDA_ARCH_LIST="5.2 6.0 6.1 7.0 7.5 8.0 8.6+PTX" python build_my_extension.py The +PTX option causes extension kernel binaries to include PTX instructions for the specified CC. PTX is an intermediate representation that allows kernels to runtime-compile for any CC >= the specified CC (for example, 8.6+PTX generates PTX that can runtime-compile for any GPU with CC >= 8.6). This improves your binary's forward compatibility. However, relying on older PTX to provide forward compat by runtime-compiling for newer CCs can modestly reduce performance on those newer CCs. If you know exact CC(s) of the GPUs you want to target, you're always better off specifying them individually. For example, if you want your extension to run on 8.0 and 8.6, "8.0+PTX" would work functionally because it includes PTX that can runtime-compile for 8.6, but "8.0 8.6" would be better. Note that while it's possible to include all supported archs, the more archs get included the slower the building process will be, as it will build a separate kernel image for each arch. Note that CUDA-11.5 nvcc will hit internal compiler error while parsing torch/extension.h on Windows. To workaround the issue, move python binding logic to pure C++ file. Example use: >>> # xdoctest: +SKIP >>> #include <ATen/ATen.h> >>> at::Tensor SigmoidAlphaBlendForwardCuda(....) Instead of: >>> # xdoctest: +SKIP >>> #include <torch/extension.h> >>> torch::Tensor SigmoidAlphaBlendForwardCuda(...) Currently open issue for nvcc bug: https://github.com/pytorch/pytorch/issues/69460 Complete workaround code example: https://github.com/facebookresearch/pytorch3d/commit/cb170ac024a949f1f9614ffe6af1c38d972f7d48 Relocatable device code linking: If you want to reference device symbols across compilation units (across object files), the object files need to be built with `relocatable device code` (-rdc=true or -dc). An exception to this rule is "dynamic parallelism" (nested kernel launches) which is not used a lot anymore. `Relocatable device code` is less optimized so it needs to be used only on object files that need it. Using `-dlto` (Device Link Time Optimization) at the device code compilation step and `dlink` step help reduce the protentional perf degradation of `-rdc`. Note that it needs to be used at both steps to be useful. If you have `rdc` objects you need to have an extra `-dlink` (device linking) step before the CPU symbol linking step. There is also a case where `-dlink` is used without `-rdc`: when an extension is linked against a static lib containing rdc-compiled objects like the [NVSHMEM library](https://developer.nvidia.com/nvshmem). Note: Ninja is required to build a CUDA Extension with RDC linking. Example: >>> # xdoctest: +SKIP >>> CUDAExtension( ... name='cuda_extension', ... sources=['extension.cpp', 'extension_kernel.cu'], ... dlink=True, ... dlink_libraries=["dlink_lib"], ... extra_compile_args={'cxx': ['-g'], ... 'nvcc': ['-O2', '-rdc=true']}) ''' library_dirs = kwargs.get('library_dirs', []) library_dirs += library_paths(cuda=True) kwargs['library_dirs'] = library_dirs libraries = kwargs.get('libraries', []) libraries.append('c10') libraries.append('torch') libraries.append('torch_cpu') libraries.append('torch_python') if IS_HIP_EXTENSION: assert ROCM_VERSION is not None libraries.append('amdhip64' if ROCM_VERSION >= (3, 5) else 'hip_hcc') libraries.append('c10_hip') libraries.append('torch_hip') else: libraries.append('cudart') libraries.append('c10_cuda') if BUILD_SPLIT_CUDA: libraries.append('torch_cuda_cu') libraries.append('torch_cuda_cpp') else: libraries.append('torch_cuda') kwargs['libraries'] = libraries include_dirs = kwargs.get('include_dirs', []) if IS_HIP_EXTENSION: build_dir = os.getcwd() hipify_result = hipify_python.hipify( project_directory=build_dir, output_directory=build_dir, header_include_dirs=include_dirs, includes=[os.path.join(build_dir, '*')], # limit scope to build_dir only extra_files=[os.path.abspath(s) for s in sources], show_detailed=True, is_pytorch_extension=True, hipify_extra_files_only=True, # don't hipify everything in includes path ) hipified_sources = set() for source in sources: s_abs = os.path.abspath(source) hipified_sources.add(hipify_result[s_abs]["hipified_path"] if (s_abs in hipify_result and hipify_result[s_abs]["hipified_path"] is not None) else s_abs) sources = list(hipified_sources) include_dirs += include_paths(cuda=True) kwargs['include_dirs'] = include_dirs kwargs['language'] = 'c++' dlink_libraries = kwargs.get('dlink_libraries', []) dlink = kwargs.get('dlink', False) or dlink_libraries if dlink: extra_compile_args = kwargs.get('extra_compile_args', {}) extra_compile_args_dlink = extra_compile_args.get('nvcc_dlink', []) extra_compile_args_dlink += ['-dlink'] extra_compile_args_dlink += [f'-L{x}' for x in library_dirs] extra_compile_args_dlink += [f'-l{x}' for x in dlink_libraries] if (torch.version.cuda is not None) and packaging.version.parse(torch.version.cuda) >= packaging.version.parse('11.2'): extra_compile_args_dlink += ['-dlto'] # Device Link Time Optimization started from cuda 11.2 extra_compile_args['nvcc_dlink'] = extra_compile_args_dlink kwargs['extra_compile_args'] = extra_compile_args return setuptools.Extension(name, sources, *args, **kwargs) def include_paths(cuda: bool = False) -> List[str]: ''' Get the include paths required to build a C++ or CUDA extension. Args: cuda: If `True`, includes CUDA-specific include paths. Returns: A list of include path strings. ''' lib_include = os.path.join(_TORCH_PATH, 'include') paths = [ lib_include, # Remove this once torch/torch.h is officially no longer supported for C++ extensions. os.path.join(lib_include, 'torch', 'csrc', 'api', 'include'), # Some internal (old) Torch headers don't properly prefix their includes, # so we need to pass -Itorch/lib/include/TH as well. os.path.join(lib_include, 'TH'), os.path.join(lib_include, 'THC') ] if cuda and IS_HIP_EXTENSION: paths.append(os.path.join(lib_include, 'THH')) paths.append(_join_rocm_home('include')) if MIOPEN_HOME is not None: paths.append(os.path.join(MIOPEN_HOME, 'include')) if HIP_HOME is not None: paths.append(os.path.join(HIP_HOME, 'include')) elif cuda: cuda_home_include = _join_cuda_home('include') # if we have the Debian/Ubuntu packages for cuda, we get /usr as cuda home. # but gcc doesn't like having /usr/include passed explicitly if cuda_home_include != '/usr/include': paths.append(cuda_home_include) if CUDNN_HOME is not None: paths.append(os.path.join(CUDNN_HOME, 'include')) return paths def library_paths(cuda: bool = False) -> List[str]: r''' Get the library paths required to build a C++ or CUDA extension. Args: cuda: If `True`, includes CUDA-specific library paths. Returns: A list of library path strings. ''' # We need to link against libtorch.so paths = [TORCH_LIB_PATH] if cuda and IS_HIP_EXTENSION: lib_dir = 'lib' paths.append(_join_rocm_home(lib_dir)) if HIP_HOME is not None: paths.append(os.path.join(HIP_HOME, 'lib')) elif cuda: if IS_WINDOWS: lib_dir = os.path.join('lib', 'x64') else: lib_dir = 'lib64' if (not os.path.exists(_join_cuda_home(lib_dir)) and os.path.exists(_join_cuda_home('lib'))): # 64-bit CUDA may be installed in 'lib' (see e.g. gh-16955) # Note that it's also possible both don't exist (see # _find_cuda_home) - in that case we stay with 'lib64'. lib_dir = 'lib' paths.append(_join_cuda_home(lib_dir)) if CUDNN_HOME is not None: paths.append(os.path.join(CUDNN_HOME, lib_dir)) return paths def load(name, sources: Union[str, List[str]], extra_cflags=None, extra_cuda_cflags=None, extra_ldflags=None, extra_include_paths=None, build_directory=None, verbose=False, with_cuda: Optional[bool] = None, is_python_module=True, is_standalone=False, keep_intermediates=True): r''' Loads a PyTorch C++ extension just-in-time (JIT). To load an extension, a Ninja build file is emitted, which is used to compile the given sources into a dynamic library. This library is subsequently loaded into the current Python process as a module and returned from this function, ready for use. By default, the directory to which the build file is emitted and the resulting library compiled to is ``<tmp>/torch_extensions/<name>``, where ``<tmp>`` is the temporary folder on the current platform and ``<name>`` the name of the extension. This location can be overridden in two ways. First, if the ``TORCH_EXTENSIONS_DIR`` environment variable is set, it replaces ``<tmp>/torch_extensions`` and all extensions will be compiled into subfolders of this directory. Second, if the ``build_directory`` argument to this function is supplied, it overrides the entire path, i.e. the library will be compiled into that folder directly. To compile the sources, the default system compiler (``c++``) is used, which can be overridden by setting the ``CXX`` environment variable. To pass additional arguments to the compilation process, ``extra_cflags`` or ``extra_ldflags`` can be provided. For example, to compile your extension with optimizations, pass ``extra_cflags=['-O3']``. You can also use ``extra_cflags`` to pass further include directories. CUDA support with mixed compilation is provided. Simply pass CUDA source files (``.cu`` or ``.cuh``) along with other sources. Such files will be detected and compiled with nvcc rather than the C++ compiler. This includes passing the CUDA lib64 directory as a library directory, and linking ``cudart``. You can pass additional flags to nvcc via ``extra_cuda_cflags``, just like with ``extra_cflags`` for C++. Various heuristics for finding the CUDA install directory are used, which usually work fine. If not, setting the ``CUDA_HOME`` environment variable is the safest option. Args: name: The name of the extension to build. This MUST be the same as the name of the pybind11 module! sources: A list of relative or absolute paths to C++ source files. extra_cflags: optional list of compiler flags to forward to the build. extra_cuda_cflags: optional list of compiler flags to forward to nvcc when building CUDA sources. extra_ldflags: optional list of linker flags to forward to the build. extra_include_paths: optional list of include directories to forward to the build. build_directory: optional path to use as build workspace. verbose: If ``True``, turns on verbose logging of load steps. with_cuda: Determines whether CUDA headers and libraries are added to the build. If set to ``None`` (default), this value is automatically determined based on the existence of ``.cu`` or ``.cuh`` in ``sources``. Set it to `True`` to force CUDA headers and libraries to be included. is_python_module: If ``True`` (default), imports the produced shared library as a Python module. If ``False``, behavior depends on ``is_standalone``. is_standalone: If ``False`` (default) loads the constructed extension into the process as a plain dynamic library. If ``True``, build a standalone executable. Returns: If ``is_python_module`` is ``True``: Returns the loaded PyTorch extension as a Python module. If ``is_python_module`` is ``False`` and ``is_standalone`` is ``False``: Returns nothing. (The shared library is loaded into the process as a side effect.) If ``is_standalone`` is ``True``. Return the path to the executable. (On Windows, TORCH_LIB_PATH is added to the PATH environment variable as a side effect.) Example: >>> # xdoctest: +SKIP >>> from torch.utils.cpp_extension import load >>> module = load( ... name='extension', ... sources=['extension.cpp', 'extension_kernel.cu'], ... extra_cflags=['-O2'], ... verbose=True) ''' return _jit_compile( name, [sources] if isinstance(sources, str) else sources, extra_cflags, extra_cuda_cflags, extra_ldflags, extra_include_paths, build_directory or _get_build_directory(name, verbose), verbose, with_cuda, is_python_module, is_standalone, keep_intermediates=keep_intermediates) def load_inline(name, cpp_sources, cuda_sources=None, functions=None, extra_cflags=None, extra_cuda_cflags=None, extra_ldflags=None, extra_include_paths=None, build_directory=None, verbose=False, with_cuda=None, is_python_module=True, with_pytorch_error_handling=True, keep_intermediates=True): r''' Loads a PyTorch C++ extension just-in-time (JIT) from string sources. This function behaves exactly like :func:`load`, but takes its sources as strings rather than filenames. These strings are stored to files in the build directory, after which the behavior of :func:`load_inline` is identical to :func:`load`. See `the tests <https://github.com/pytorch/pytorch/blob/master/test/test_cpp_extensions_jit.py>`_ for good examples of using this function. Sources may omit two required parts of a typical non-inline C++ extension: the necessary header includes, as well as the (pybind11) binding code. More precisely, strings passed to ``cpp_sources`` are first concatenated into a single ``.cpp`` file. This file is then prepended with ``#include <torch/extension.h>``. Furthermore, if the ``functions`` argument is supplied, bindings will be automatically generated for each function specified. ``functions`` can either be a list of function names, or a dictionary mapping from function names to docstrings. If a list is given, the name of each function is used as its docstring. The sources in ``cuda_sources`` are concatenated into a separate ``.cu`` file and prepended with ``torch/types.h``, ``cuda.h`` and ``cuda_runtime.h`` includes. The ``.cpp`` and ``.cu`` files are compiled separately, but ultimately linked into a single library. Note that no bindings are generated for functions in ``cuda_sources`` per se. To bind to a CUDA kernel, you must create a C++ function that calls it, and either declare or define this C++ function in one of the ``cpp_sources`` (and include its name in ``functions``). See :func:`load` for a description of arguments omitted below. Args: cpp_sources: A string, or list of strings, containing C++ source code. cuda_sources: A string, or list of strings, containing CUDA source code. functions: A list of function names for which to generate function bindings. If a dictionary is given, it should map function names to docstrings (which are otherwise just the function names). with_cuda: Determines whether CUDA headers and libraries are added to the build. If set to ``None`` (default), this value is automatically determined based on whether ``cuda_sources`` is provided. Set it to ``True`` to force CUDA headers and libraries to be included. with_pytorch_error_handling: Determines whether pytorch error and warning macros are handled by pytorch instead of pybind. To do this, each function ``foo`` is called via an intermediary ``_safe_foo`` function. This redirection might cause issues in obscure cases of cpp. This flag should be set to ``False`` when this redirect causes issues. Example: >>> from torch.utils.cpp_extension import load_inline >>> source = """ at::Tensor sin_add(at::Tensor x, at::Tensor y) { return x.sin() + y.sin(); } """ >>> module = load_inline(name='inline_extension', ... cpp_sources=[source], ... functions=['sin_add']) .. note:: By default, the Ninja backend uses #CPUS + 2 workers to build the extension. This may use up too many resources on some systems. One can control the number of workers by setting the `MAX_JOBS` environment variable to a non-negative number. ''' build_directory = build_directory or _get_build_directory(name, verbose) if isinstance(cpp_sources, str): cpp_sources = [cpp_sources] cuda_sources = cuda_sources or [] if isinstance(cuda_sources, str): cuda_sources = [cuda_sources] cpp_sources.insert(0, '#include <torch/extension.h>') # If `functions` is supplied, we create the pybind11 bindings for the user. # Here, `functions` is (or becomes, after some processing) a map from # function names to function docstrings. if functions is not None: module_def = [] module_def.append('PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {') if isinstance(functions, str): functions = [functions] if isinstance(functions, list): # Make the function docstring the same as the function name. functions = dict((f, f) for f in functions) elif not isinstance(functions, dict): raise ValueError(f"Expected 'functions' to be a list or dict, but was {type(functions)}") for function_name, docstring in functions.items(): if with_pytorch_error_handling: module_def.append( 'm.def("{0}", torch::wrap_pybind_function({0}), "{1}");' .format(function_name, docstring)) else: module_def.append('m.def("{0}", {0}, "{1}");'.format(function_name, docstring)) module_def.append('}') cpp_sources += module_def cpp_source_path = os.path.join(build_directory, 'main.cpp') with open(cpp_source_path, 'w') as cpp_source_file: cpp_source_file.write('\n'.join(cpp_sources)) sources = [cpp_source_path] if cuda_sources: cuda_sources.insert(0, '#include <torch/types.h>') cuda_sources.insert(1, '#include <cuda.h>') cuda_sources.insert(2, '#include <cuda_runtime.h>') cuda_source_path = os.path.join(build_directory, 'cuda.cu') with open(cuda_source_path, 'w') as cuda_source_file: cuda_source_file.write('\n'.join(cuda_sources)) sources.append(cuda_source_path) return _jit_compile( name, sources, extra_cflags, extra_cuda_cflags, extra_ldflags, extra_include_paths, build_directory, verbose, with_cuda, is_python_module, is_standalone=False, keep_intermediates=keep_intermediates) def _jit_compile(name, sources, extra_cflags, extra_cuda_cflags, extra_ldflags, extra_include_paths, build_directory: str, verbose: bool, with_cuda: Optional[bool], is_python_module, is_standalone, keep_intermediates=True) -> None: if is_python_module and is_standalone: raise ValueError("`is_python_module` and `is_standalone` are mutually exclusive.") if with_cuda is None: with_cuda = any(map(_is_cuda_file, sources)) with_cudnn = any(['cudnn' in f for f in extra_ldflags or []]) old_version = JIT_EXTENSION_VERSIONER.get_version(name) version = JIT_EXTENSION_VERSIONER.bump_version_if_changed( name, sources, build_arguments=[extra_cflags, extra_cuda_cflags, extra_ldflags, extra_include_paths], build_directory=build_directory, with_cuda=with_cuda, is_python_module=is_python_module, is_standalone=is_standalone, ) if version > 0: if version != old_version and verbose: print(f'The input conditions for extension module {name} have changed. ' + f'Bumping to version {version} and re-building as {name}_v{version}...', file=sys.stderr) name = f'{name}_v{version}' if version != old_version: baton = FileBaton(os.path.join(build_directory, 'lock')) if baton.try_acquire(): try: with GeneratedFileCleaner(keep_intermediates=keep_intermediates) as clean_ctx: if IS_HIP_EXTENSION and (with_cuda or with_cudnn): hipify_result = hipify_python.hipify( project_directory=build_directory, output_directory=build_directory, header_include_dirs=(extra_include_paths if extra_include_paths is not None else []), extra_files=[os.path.abspath(s) for s in sources], ignores=[_join_rocm_home('*'), os.path.join(_TORCH_PATH, '*')], # no need to hipify ROCm or PyTorch headers show_detailed=verbose, show_progress=verbose, is_pytorch_extension=True, clean_ctx=clean_ctx ) hipified_sources = set() for source in sources: s_abs = os.path.abspath(source) hipified_sources.add(hipify_result[s_abs]["hipified_path"] if s_abs in hipify_result else s_abs) sources = list(hipified_sources) _write_ninja_file_and_build_library( name=name, sources=sources, extra_cflags=extra_cflags or [], extra_cuda_cflags=extra_cuda_cflags or [], extra_ldflags=extra_ldflags or [], extra_include_paths=extra_include_paths or [], build_directory=build_directory, verbose=verbose, with_cuda=with_cuda, is_standalone=is_standalone) finally: baton.release() else: baton.wait() elif verbose: print('No modifications detected for re-loaded extension ' f'module {name}, skipping build step...', file=sys.stderr) if verbose: print(f'Loading extension module {name}...', file=sys.stderr) if is_standalone: return _get_exec_path(name, build_directory) return _import_module_from_library(name, build_directory, is_python_module) def _write_ninja_file_and_compile_objects( sources: List[str], objects, cflags, post_cflags, cuda_cflags, cuda_post_cflags, cuda_dlink_post_cflags, build_directory: str, verbose: bool, with_cuda: Optional[bool]) -> None: verify_ninja_availability() if IS_WINDOWS: compiler = os.environ.get('CXX', 'cl') else: compiler = os.environ.get('CXX', 'c++') get_compiler_abi_compatibility_and_version(compiler) if with_cuda is None: with_cuda = any(map(_is_cuda_file, sources)) build_file_path = os.path.join(build_directory, 'build.ninja') if verbose: print(f'Emitting ninja build file {build_file_path}...', file=sys.stderr) _write_ninja_file( path=build_file_path, cflags=cflags, post_cflags=post_cflags, cuda_cflags=cuda_cflags, cuda_post_cflags=cuda_post_cflags, cuda_dlink_post_cflags=cuda_dlink_post_cflags, sources=sources, objects=objects, ldflags=None, library_target=None, with_cuda=with_cuda) if verbose: print('Compiling objects...', file=sys.stderr) _run_ninja_build( build_directory, verbose, # It would be better if we could tell users the name of the extension # that failed to build but there isn't a good way to get it here. error_prefix='Error compiling objects for extension') def _write_ninja_file_and_build_library( name, sources: List[str], extra_cflags, extra_cuda_cflags, extra_ldflags, extra_include_paths, build_directory: str, verbose: bool, with_cuda: Optional[bool], is_standalone: bool = False) -> None: verify_ninja_availability() if IS_WINDOWS: compiler = os.environ.get('CXX', 'cl') else: compiler = os.environ.get('CXX', 'c++') get_compiler_abi_compatibility_and_version(compiler) if with_cuda is None: with_cuda = any(map(_is_cuda_file, sources)) extra_ldflags = _prepare_ldflags( extra_ldflags or [], with_cuda, verbose, is_standalone) build_file_path = os.path.join(build_directory, 'build.ninja') if verbose: print(f'Emitting ninja build file {build_file_path}...', file=sys.stderr) # NOTE: Emitting a new ninja build file does not cause re-compilation if # the sources did not change, so it's ok to re-emit (and it's fast). _write_ninja_file_to_build_library( path=build_file_path, name=name, sources=sources, extra_cflags=extra_cflags or [], extra_cuda_cflags=extra_cuda_cflags or [], extra_ldflags=extra_ldflags or [], extra_include_paths=extra_include_paths or [], with_cuda=with_cuda, is_standalone=is_standalone) if verbose: print(f'Building extension module {name}...', file=sys.stderr) _run_ninja_build( build_directory, verbose, error_prefix=f"Error building extension '{name}'") def is_ninja_available(): r''' Returns ``True`` if the `ninja <https://ninja-build.org/>`_ build system is available on the system, ``False`` otherwise. ''' try: subprocess.check_output('ninja --version'.split()) except Exception: return False else: return True def verify_ninja_availability(): r''' Raises ``RuntimeError`` if `ninja <https://ninja-build.org/>`_ build system is not available on the system, does nothing otherwise. ''' if not is_ninja_available(): raise RuntimeError("Ninja is required to load C++ extensions") def _prepare_ldflags(extra_ldflags, with_cuda, verbose, is_standalone): if IS_WINDOWS: python_path = os.path.dirname(sys.executable) python_lib_path = os.path.join(python_path, 'libs') extra_ldflags.append('c10.lib') if with_cuda: extra_ldflags.append('c10_cuda.lib') extra_ldflags.append('torch_cpu.lib') if BUILD_SPLIT_CUDA and with_cuda: extra_ldflags.append('torch_cuda_cu.lib') # See [Note about _torch_cuda_cu_linker_symbol_op and torch_cuda_cu] in native_functions.yaml extra_ldflags.append('-INCLUDE:?_torch_cuda_cu_linker_symbol_op_cuda@native@at@@YA?AVTensor@2@AEBV32@@Z') extra_ldflags.append('torch_cuda_cpp.lib') # /INCLUDE is used to ensure torch_cuda_cpp is linked against in a project that relies on it. # Related issue: https://github.com/pytorch/pytorch/issues/31611 extra_ldflags.append('-INCLUDE:?warp_size@cuda@at@@YAHXZ') elif with_cuda: extra_ldflags.append('torch_cuda.lib') # /INCLUDE is used to ensure torch_cuda is linked against in a project that relies on it. # Related issue: https://github.com/pytorch/pytorch/issues/31611 extra_ldflags.append('-INCLUDE:?warp_size@cuda@at@@YAHXZ') extra_ldflags.append('torch.lib') extra_ldflags.append(f'/LIBPATH:{TORCH_LIB_PATH}') if not is_standalone: extra_ldflags.append('torch_python.lib') extra_ldflags.append(f'/LIBPATH:{python_lib_path}') else: extra_ldflags.append(f'-L{TORCH_LIB_PATH}') extra_ldflags.append('-lc10') if with_cuda: extra_ldflags.append('-lc10_hip' if IS_HIP_EXTENSION else '-lc10_cuda') extra_ldflags.append('-ltorch_cpu') if BUILD_SPLIT_CUDA and with_cuda: extra_ldflags.append('-ltorch_hip' if IS_HIP_EXTENSION else '-ltorch_cuda_cu -ltorch_cuda_cpp') elif with_cuda: extra_ldflags.append('-ltorch_hip' if IS_HIP_EXTENSION else '-ltorch_cuda') extra_ldflags.append('-ltorch') if not is_standalone: extra_ldflags.append('-ltorch_python') if is_standalone and "TBB" in torch.__config__.parallel_info(): extra_ldflags.append('-ltbb') if is_standalone: extra_ldflags.append(f"-Wl,-rpath,{TORCH_LIB_PATH}") if with_cuda: if verbose: print('Detected CUDA files, patching ldflags', file=sys.stderr) if IS_WINDOWS: extra_ldflags.append(f'/LIBPATH:{_join_cuda_home("lib", "x64")}') extra_ldflags.append('cudart.lib') if CUDNN_HOME is not None: extra_ldflags.append(os.path.join(CUDNN_HOME, "lib", "x64")) elif not IS_HIP_EXTENSION: extra_ldflags.append(f'-L{_join_cuda_home("lib64")}') extra_ldflags.append('-lcudart') if CUDNN_HOME is not None: extra_ldflags.append(f'-L{os.path.join(CUDNN_HOME, "lib64")}') elif IS_HIP_EXTENSION: assert ROCM_VERSION is not None extra_ldflags.append(f'-L{_join_rocm_home("lib")}') extra_ldflags.append('-lamdhip64' if ROCM_VERSION >= (3, 5) else '-lhip_hcc') return extra_ldflags def _get_cuda_arch_flags(cflags: Optional[List[str]] = None) -> List[str]: r''' Determine CUDA arch flags to use. For an arch, say "6.1", the added compile flag will be ``-gencode=arch=compute_61,code=sm_61``. For an added "+PTX", an additional ``-gencode=arch=compute_xx,code=compute_xx`` is added. See select_compute_arch.cmake for corresponding named and supported arches when building with CMake. ''' # If cflags is given, there may already be user-provided arch flags in it # (from `extra_compile_args`) if cflags is not None: for flag in cflags: if 'arch' in flag: return [] # Note: keep combined names ("arch1+arch2") above single names, otherwise # string replacement may not do the right thing named_arches = collections.OrderedDict([ ('Kepler+Tesla', '3.7'), ('Kepler', '3.5+PTX'), ('Maxwell+Tegra', '5.3'), ('Maxwell', '5.0;5.2+PTX'), ('Pascal', '6.0;6.1+PTX'), ('Volta', '7.0+PTX'), ('Turing', '7.5+PTX'), ('Ampere', '8.0;8.6+PTX'), ]) supported_arches = ['3.5', '3.7', '5.0', '5.2', '5.3', '6.0', '6.1', '6.2', '7.0', '7.2', '7.5', '8.0', '8.6'] valid_arch_strings = supported_arches + [s + "+PTX" for s in supported_arches] # The default is sm_30 for CUDA 9.x and 10.x # First check for an env var (same as used by the main setup.py) # Can be one or more architectures, e.g. "6.1" or "3.5;5.2;6.0;6.1;7.0+PTX" # See cmake/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake _arch_list = os.environ.get('TORCH_CUDA_ARCH_LIST', None) # If not given, determine what's best for the GPU / CUDA version that can be found if not _arch_list: arch_list = [] # the assumption is that the extension should run on any of the currently visible cards, # which could be of different types - therefore all archs for visible cards should be included for i in range(torch.cuda.device_count()): capability = torch.cuda.get_device_capability(i) supported_sm = [int(arch.split('_')[1]) for arch in torch.cuda.get_arch_list() if 'sm_' in arch] max_supported_sm = max((sm // 10, sm % 10) for sm in supported_sm) # Capability of the device may be higher than what's supported by the user's # NVCC, causing compilation error. User's NVCC is expected to match the one # used to build pytorch, so we use the maximum supported capability of pytorch # to clamp the capability. capability = min(max_supported_sm, capability) arch = f'{capability[0]}.{capability[1]}' if arch not in arch_list: arch_list.append(arch) arch_list = sorted(arch_list) arch_list[-1] += '+PTX' else: # Deal with lists that are ' ' separated (only deal with ';' after) _arch_list = _arch_list.replace(' ', ';') # Expand named arches for named_arch, archval in named_arches.items(): _arch_list = _arch_list.replace(named_arch, archval) arch_list = _arch_list.split(';') flags = [] for arch in arch_list: if arch not in valid_arch_strings: raise ValueError(f"Unknown CUDA arch ({arch}) or GPU not supported") else: num = arch[0] + arch[2] flags.append(f'-gencode=arch=compute_{num},code=sm_{num}') if arch.endswith('+PTX'): flags.append(f'-gencode=arch=compute_{num},code=compute_{num}') return sorted(list(set(flags))) def _get_rocm_arch_flags(cflags: Optional[List[str]] = None) -> List[str]: # If cflags is given, there may already be user-provided arch flags in it # (from `extra_compile_args`) if cflags is not None: for flag in cflags: if 'amdgpu-target' in flag: return ['-fno-gpu-rdc'] # Use same defaults as used for building PyTorch # Allow env var to override, just like during initial cmake build. _archs = os.environ.get('PYTORCH_ROCM_ARCH', None) if not _archs: archFlags = torch._C._cuda_getArchFlags() if archFlags: archs = archFlags.split() else: archs = [] else: archs = _archs.replace(' ', ';').split(';') flags = ['--amdgpu-target=%s' % arch for arch in archs] flags += ['-fno-gpu-rdc'] return flags def _get_build_directory(name: str, verbose: bool) -> str: root_extensions_directory = os.environ.get('TORCH_EXTENSIONS_DIR') if root_extensions_directory is None: root_extensions_directory = get_default_build_root() cu_str = ('cpu' if torch.version.cuda is None else f'cu{torch.version.cuda.replace(".", "")}') # type: ignore[attr-defined] python_version = f'py{sys.version_info.major}{sys.version_info.minor}' build_folder = f'{python_version}_{cu_str}' root_extensions_directory = os.path.join( root_extensions_directory, build_folder) if verbose: print(f'Using {root_extensions_directory} as PyTorch extensions root...', file=sys.stderr) build_directory = os.path.join(root_extensions_directory, name) if not os.path.exists(build_directory): if verbose: print(f'Creating extension directory {build_directory}...', file=sys.stderr) # This is like mkdir -p, i.e. will also create parent directories. os.makedirs(build_directory, exist_ok=True) return build_directory def _get_num_workers(verbose: bool) -> Optional[int]: max_jobs = os.environ.get('MAX_JOBS') if max_jobs is not None and max_jobs.isdigit(): if verbose: print(f'Using envvar MAX_JOBS ({max_jobs}) as the number of workers...', file=sys.stderr) return int(max_jobs) if verbose: print('Allowing ninja to set a default number of workers... ' '(overridable by setting the environment variable MAX_JOBS=N)', file=sys.stderr) return None def _run_ninja_build(build_directory: str, verbose: bool, error_prefix: str) -> None: command = ['ninja', '-v'] num_workers = _get_num_workers(verbose) if num_workers is not None: command.extend(['-j', str(num_workers)]) env = os.environ.copy() # Try to activate the vc env for the users if IS_WINDOWS and 'VSCMD_ARG_TGT_ARCH' not in env: from setuptools import distutils plat_name = distutils.util.get_platform() plat_spec = PLAT_TO_VCVARS[plat_name] vc_env = distutils._msvccompiler._get_vc_env(plat_spec) vc_env = {k.upper(): v for k, v in vc_env.items()} for k, v in env.items(): uk = k.upper() if uk not in vc_env: vc_env[uk] = v env = vc_env try: sys.stdout.flush() sys.stderr.flush() # Warning: don't pass stdout=None to subprocess.run to get output. # subprocess.run assumes that sys.__stdout__ has not been modified and # attempts to write to it by default. However, when we call _run_ninja_build # from ahead-of-time cpp extensions, the following happens: # 1) If the stdout encoding is not utf-8, setuptools detachs __stdout__. # https://github.com/pypa/setuptools/blob/7e97def47723303fafabe48b22168bbc11bb4821/setuptools/dist.py#L1110 # (it probably shouldn't do this) # 2) subprocess.run (on POSIX, with no stdout override) relies on # __stdout__ not being detached: # https://github.com/python/cpython/blob/c352e6c7446c894b13643f538db312092b351789/Lib/subprocess.py#L1214 # To work around this, we pass in the fileno directly and hope that # it is valid. stdout_fileno = 1 subprocess.run( command, stdout=stdout_fileno if verbose else subprocess.PIPE, stderr=subprocess.STDOUT, cwd=build_directory, check=True, env=env) except subprocess.CalledProcessError as e: # Python 2 and 3 compatible way of getting the error object. _, error, _ = sys.exc_info() # error.output contains the stdout and stderr of the build attempt. message = error_prefix # `error` is a CalledProcessError (which has an `ouput`) attribute, but # mypy thinks it's Optional[BaseException] and doesn't narrow if hasattr(error, 'output') and error.output: # type: ignore[union-attr] message += f": {error.output.decode(*SUBPROCESS_DECODE_ARGS)}" # type: ignore[union-attr] raise RuntimeError(message) from e def _get_exec_path(module_name, path): if IS_WINDOWS and TORCH_LIB_PATH not in os.getenv('PATH', '').split(';'): torch_lib_in_path = any( os.path.exists(p) and os.path.samefile(p, TORCH_LIB_PATH) for p in os.getenv('PATH', '').split(';') ) if not torch_lib_in_path: os.environ['PATH'] = f"{TORCH_LIB_PATH};{os.getenv('PATH', '')}" return os.path.join(path, f'{module_name}{EXEC_EXT}') def _import_module_from_library(module_name, path, is_python_module): filepath = os.path.join(path, f"{module_name}{LIB_EXT}") if is_python_module: # https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path spec = importlib.util.spec_from_file_location(module_name, filepath) assert spec is not None module = importlib.util.module_from_spec(spec) assert isinstance(spec.loader, importlib.abc.Loader) spec.loader.exec_module(module) return module else: torch.ops.load_library(filepath) def _write_ninja_file_to_build_library(path, name, sources, extra_cflags, extra_cuda_cflags, extra_ldflags, extra_include_paths, with_cuda, is_standalone) -> None: extra_cflags = [flag.strip() for flag in extra_cflags] extra_cuda_cflags = [flag.strip() for flag in extra_cuda_cflags] extra_ldflags = [flag.strip() for flag in extra_ldflags] extra_include_paths = [flag.strip() for flag in extra_include_paths] # Turn into absolute paths so we can emit them into the ninja build # file wherever it is. user_includes = [os.path.abspath(file) for file in extra_include_paths] # include_paths() gives us the location of torch/extension.h system_includes = include_paths(with_cuda) # sysconfig.get_path('include') gives us the location of Python.h # Explicitly specify 'posix_prefix' scheme on non-Windows platforms to workaround error on some MacOS # installations where default `get_path` points to non-existing `/Library/Python/M.m/include` folder python_include_path = sysconfig.get_path('include', scheme='nt' if IS_WINDOWS else 'posix_prefix') if python_include_path is not None: system_includes.append(python_include_path) # Windows does not understand `-isystem`. if IS_WINDOWS: user_includes += system_includes system_includes.clear() common_cflags = [] if not is_standalone: common_cflags.append(f'-DTORCH_EXTENSION_NAME={name}') common_cflags.append('-DTORCH_API_INCLUDE_EXTENSION_H') # Note [Pybind11 ABI constants] # # Pybind11 before 2.4 used to build an ABI strings using the following pattern: # f"__pybind11_internals_v{PYBIND11_INTERNALS_VERSION}{PYBIND11_INTERNALS_KIND}{PYBIND11_BUILD_TYPE}__" # Since 2.4 compier type, stdlib and build abi parameters are also encoded like this: # f"__pybind11_internals_v{PYBIND11_INTERNALS_VERSION}{PYBIND11_INTERNALS_KIND}{PYBIND11_COMPILER_TYPE}{PYBIND11_STDLIB}{PYBIND11_BUILD_ABI}{PYBIND11_BUILD_TYPE}__" # # This was done in order to further narrow down the chances of compiler ABI incompatibility # that can cause a hard to debug segfaults. # For PyTorch extensions we want to relax those restrictions and pass compiler, stdlib and abi properties # captured during PyTorch native library compilation in torch/csrc/Module.cpp for pname in ["COMPILER_TYPE", "STDLIB", "BUILD_ABI"]: pval = getattr(torch._C, f"_PYBIND11_{pname}") if pval is not None and not IS_WINDOWS: common_cflags.append(f'-DPYBIND11_{pname}=\\"{pval}\\"') common_cflags += [f'-I{include}' for include in user_includes] common_cflags += [f'-isystem {include}' for include in system_includes] common_cflags += ['-D_GLIBCXX_USE_CXX11_ABI=' + str(int(torch._C._GLIBCXX_USE_CXX11_ABI))] if IS_WINDOWS: cflags = common_cflags + COMMON_MSVC_FLAGS + extra_cflags cflags = _nt_quote_args(cflags) else: cflags = common_cflags + ['-fPIC', '-std=c++14'] + extra_cflags if with_cuda and IS_HIP_EXTENSION: cuda_flags = ['-DWITH_HIP'] + cflags + COMMON_HIP_FLAGS + COMMON_HIPCC_FLAGS cuda_flags += extra_cuda_cflags cuda_flags += _get_rocm_arch_flags(cuda_flags) elif with_cuda: cuda_flags = common_cflags + COMMON_NVCC_FLAGS + _get_cuda_arch_flags() if IS_WINDOWS: for flag in COMMON_MSVC_FLAGS: cuda_flags = ['-Xcompiler', flag] + cuda_flags for ignore_warning in MSVC_IGNORE_CUDAFE_WARNINGS: cuda_flags = ['-Xcudafe', '--diag_suppress=' + ignore_warning] + cuda_flags cuda_flags = _nt_quote_args(cuda_flags) cuda_flags += _nt_quote_args(extra_cuda_cflags) else: cuda_flags += ['--compiler-options', "'-fPIC'"] cuda_flags += extra_cuda_cflags if not any(flag.startswith('-std=') for flag in cuda_flags): cuda_flags.append('-std=c++14') if os.getenv("CC") is not None: cuda_flags = ['-ccbin', os.getenv("CC")] + cuda_flags else: cuda_flags = None def object_file_path(source_file: str) -> str: # '/path/to/file.cpp' -> 'file' file_name = os.path.splitext(os.path.basename(source_file))[0] if _is_cuda_file(source_file) and with_cuda: # Use a different object filename in case a C++ and CUDA file have # the same filename but different extension (.cpp vs. .cu). target = f'{file_name}.cuda.o' else: target = f'{file_name}.o' return target objects = [object_file_path(src) for src in sources] ldflags = ([] if is_standalone else [SHARED_FLAG]) + extra_ldflags # The darwin linker needs explicit consent to ignore unresolved symbols. if IS_MACOS: ldflags.append('-undefined dynamic_lookup') elif IS_WINDOWS: ldflags = _nt_quote_args(ldflags) ext = EXEC_EXT if is_standalone else LIB_EXT library_target = f'{name}{ext}' _write_ninja_file( path=path, cflags=cflags, post_cflags=None, cuda_cflags=cuda_flags, cuda_post_cflags=None, cuda_dlink_post_cflags=None, sources=sources, objects=objects, ldflags=ldflags, library_target=library_target, with_cuda=with_cuda) def _write_ninja_file(path, cflags, post_cflags, cuda_cflags, cuda_post_cflags, cuda_dlink_post_cflags, sources, objects, ldflags, library_target, with_cuda) -> None: r"""Write a ninja file that does the desired compiling and linking. `path`: Where to write this file `cflags`: list of flags to pass to $cxx. Can be None. `post_cflags`: list of flags to append to the $cxx invocation. Can be None. `cuda_cflags`: list of flags to pass to $nvcc. Can be None. `cuda_postflags`: list of flags to append to the $nvcc invocation. Can be None. `sources`: list of paths to source files `objects`: list of desired paths to objects, one per source. `ldflags`: list of flags to pass to linker. Can be None. `library_target`: Name of the output library. Can be None; in that case, we do no linking. `with_cuda`: If we should be compiling with CUDA. """ def sanitize_flags(flags): if flags is None: return [] else: return [flag.strip() for flag in flags] cflags = sanitize_flags(cflags) post_cflags = sanitize_flags(post_cflags) cuda_cflags = sanitize_flags(cuda_cflags) cuda_post_cflags = sanitize_flags(cuda_post_cflags) cuda_dlink_post_cflags = sanitize_flags(cuda_dlink_post_cflags) ldflags = sanitize_flags(ldflags) # Sanity checks... assert len(sources) == len(objects) assert len(sources) > 0 if IS_WINDOWS: compiler = os.environ.get('CXX', 'cl') else: compiler = os.environ.get('CXX', 'c++') # Version 1.3 is required for the `deps` directive. config = ['ninja_required_version = 1.3'] config.append(f'cxx = {compiler}') if with_cuda or cuda_dlink_post_cflags: if IS_HIP_EXTENSION: nvcc = _join_rocm_home('bin', 'hipcc') else: nvcc = _join_cuda_home('bin', 'nvcc') config.append(f'nvcc = {nvcc}') if IS_HIP_EXTENSION: post_cflags = COMMON_HIP_FLAGS + post_cflags flags = [f'cflags = {" ".join(cflags)}'] flags.append(f'post_cflags = {" ".join(post_cflags)}') if with_cuda: flags.append(f'cuda_cflags = {" ".join(cuda_cflags)}') flags.append(f'cuda_post_cflags = {" ".join(cuda_post_cflags)}') flags.append(f'cuda_dlink_post_cflags = {" ".join(cuda_dlink_post_cflags)}') flags.append(f'ldflags = {" ".join(ldflags)}') # Turn into absolute paths so we can emit them into the ninja build # file wherever it is. sources = [os.path.abspath(file) for file in sources] # See https://ninja-build.org/build.ninja.html for reference. compile_rule = ['rule compile'] if IS_WINDOWS: compile_rule.append( ' command = cl /showIncludes $cflags -c $in /Fo$out $post_cflags') compile_rule.append(' deps = msvc') else: compile_rule.append( ' command = $cxx -MMD -MF $out.d $cflags -c $in -o $out $post_cflags') compile_rule.append(' depfile = $out.d') compile_rule.append(' deps = gcc') if with_cuda: cuda_compile_rule = ['rule cuda_compile'] nvcc_gendeps = '' # --generate-dependencies-with-compile was added in CUDA 10.2. # Compilation will work on earlier CUDA versions but header file # dependencies are not correctly computed. required_cuda_version = packaging.version.parse('10.2') has_cuda_version = torch.version.cuda is not None if has_cuda_version and packaging.version.parse(torch.version.cuda) >= required_cuda_version: cuda_compile_rule.append(' depfile = $out.d') cuda_compile_rule.append(' deps = gcc') # Note: non-system deps with nvcc are only supported # on Linux so use --generate-dependencies-with-compile # to make this work on Windows too. if IS_WINDOWS: nvcc_gendeps = '--generate-dependencies-with-compile --dependency-output $out.d' cuda_compile_rule.append( f' command = $nvcc {nvcc_gendeps} $cuda_cflags -c $in -o $out $cuda_post_cflags') # Emit one build rule per source to enable incremental build. build = [] for source_file, object_file in zip(sources, objects): is_cuda_source = _is_cuda_file(source_file) and with_cuda rule = 'cuda_compile' if is_cuda_source else 'compile' if IS_WINDOWS: source_file = source_file.replace(':', '$:') object_file = object_file.replace(':', '$:') source_file = source_file.replace(" ", "$ ") object_file = object_file.replace(" ", "$ ") build.append(f'build {object_file}: {rule} {source_file}') if cuda_dlink_post_cflags: devlink_out = os.path.join(os.path.dirname(objects[0]), 'dlink.o') devlink_rule = ['rule cuda_devlink'] devlink_rule.append(' command = $nvcc $in -o $out $cuda_dlink_post_cflags') devlink = [f'build {devlink_out}: cuda_devlink {" ".join(objects)}'] objects += [devlink_out] else: devlink_rule, devlink = [], [] if library_target is not None: link_rule = ['rule link'] if IS_WINDOWS: cl_paths = subprocess.check_output(['where', 'cl']).decode(*SUBPROCESS_DECODE_ARGS).split('\r\n') if len(cl_paths) >= 1: cl_path = os.path.dirname(cl_paths[0]).replace(':', '$:') else: raise RuntimeError("MSVC is required to load C++ extensions") link_rule.append(f' command = "{cl_path}/link.exe" $in /nologo $ldflags /out:$out') else: link_rule.append(' command = $cxx $in $ldflags -o $out') link = [f'build {library_target}: link {" ".join(objects)}'] default = [f'default {library_target}'] else: link_rule, link, default = [], [], [] # 'Blocks' should be separated by newlines, for visual benefit. blocks = [config, flags, compile_rule] if with_cuda: blocks.append(cuda_compile_rule) blocks += [devlink_rule, link_rule, build, devlink, link, default] with open(path, 'w') as build_file: for block in blocks: lines = '\n'.join(block) build_file.write(f'{lines}\n\n') def _join_cuda_home(*paths) -> str: r''' Joins paths with CUDA_HOME, or raises an error if it CUDA_HOME is not set. This is basically a lazy way of raising an error for missing $CUDA_HOME only once we need to get any CUDA-specific path. ''' if CUDA_HOME is None: raise EnvironmentError('CUDA_HOME environment variable is not set. ' 'Please set it to your CUDA install root.') return os.path.join(CUDA_HOME, *paths) def _is_cuda_file(path: str) -> bool: valid_ext = ['.cu', '.cuh'] if IS_HIP_EXTENSION: valid_ext.append('.hip') return os.path.splitext(path)[1] in valid_ext
pytorch-master
torch/utils/cpp_extension.py
""" This module contains utility method for mobile model optimization and lint. """ import torch from enum import Enum from torch._C import MobileOptimizerType from typing import Optional, Set, List, AnyStr class LintCode(Enum): BUNDLED_INPUT = 1 REQUIRES_GRAD = 2 DROPOUT = 3 BATCHNORM = 4 def optimize_for_mobile( script_module: torch.jit.ScriptModule, optimization_blocklist: Optional[Set[MobileOptimizerType]] = None, preserved_methods: Optional[List[AnyStr]] = None, backend: str = 'CPU') -> torch.jit.RecursiveScriptModule: """ Args: script_module: An instance of torch script module with type of ScriptModule. optimization_blocklist: A set with type of MobileOptimizerType. When set is not passed, optimization method will run all the optimizer pass; otherwise, optimizer method will run the optimization pass that is not included inside optimization_blocklist. preserved_methods: A list of methods that needed to be preserved when freeze_module pass is invoked backend: Device type to use for running the result model ('CPU'(default), 'Vulkan' or 'Metal'). Returns: A new optimized torch script module """ if not isinstance(script_module, torch.jit.ScriptModule): raise TypeError( 'Got {}, but ScriptModule is expected.'.format(type(script_module))) if optimization_blocklist is None: optimization_blocklist = set() if preserved_methods is None: preserved_methods = [] # Convert potential byte arrays into strings (if there is any) to pass type checking # Here we use a new name as assigning it back to preserved_methods will invoke # mypy errors (i.e. List[AnyStr] = List[str]) preserved_methods_str: List[str] = [str(method) for method in preserved_methods] bundled_inputs_attributes = _get_bundled_inputs_preserved_attributes(script_module, preserved_methods_str) if all([hasattr(script_module, method) for method in bundled_inputs_attributes]): preserved_methods_str = list(set(preserved_methods_str + bundled_inputs_attributes)) non_exist_methods = [] for method in preserved_methods_str: if not hasattr(script_module, method): non_exist_methods.append(method) if non_exist_methods: raise AttributeError( 'The following methods to preserve do not exist in script_module: {}' .format(', '.join(non_exist_methods))) backend = backend.lower() if backend == 'cpu': optimized_cpp_module = torch._C._jit_pass_optimize_for_mobile( script_module._c, optimization_blocklist, preserved_methods_str) elif backend == 'vulkan': optimized_cpp_module = torch._C._jit_pass_vulkan_optimize_for_mobile(script_module._c, preserved_methods_str) elif backend == 'metal': optimized_cpp_module = torch._C._jit_pass_metal_optimize_for_mobile(script_module._c, preserved_methods_str) else: raise TypeError("Unknown backend, must be one of 'CPU', 'Vulkan' or 'Metal'") return torch.jit._recursive.wrap_cpp_module(optimized_cpp_module) def generate_mobile_module_lints(script_module: torch.jit.ScriptModule): """ Args: script_module: An instance of torch script module with type of ScriptModule Returns: lint_map: A list of dictionary that contains modules lints """ if not isinstance(script_module, torch.jit.ScriptModule): raise TypeError( 'Got {}, but ScriptModule is expected.'.format(type(script_module))) lint_list = [] if not hasattr(script_module, "_generate_bundled_inputs_for_forward"): lint_list.append({"name": LintCode.BUNDLED_INPUT.name, "message": "No bundled input for forward, please add bundled inputs " "before saving the module using torch.utils.bundled_inputs.augment_model_with_bundled_inputs."}) for name, param in script_module.named_parameters(): if param.requires_grad: lint_list.append({"name": LintCode.REQUIRES_GRAD.name, "message": "Param {} requires grad, " "please set torch.no_grad() to reduce memory usage and improve computation speed during " "inference phase.".format(name)}) op_names = torch.jit.export_opnames(script_module) for op_name in op_names: if "dropout" in op_name: lint_list.append({"name": LintCode.DROPOUT.name, "message": "Operator {} exists, remember to call eval() before " "saving the module.and call torch.utils.mobile_optimizer.optimize_for_mobile to drop dropout " "operator.".format(op_name)}) if "batch_norm" in op_name: lint_list.append({"name": LintCode.BATCHNORM.name, "message": "Operator {} exists, remember to call eval() before " "saving the module and call torch.utils.mobile_optimizer.optimize_for_mobile to drop batch_norm " "operator.".format(op_name)}) return lint_list def _get_bundled_inputs_preserved_attributes(script_module: torch.jit.ScriptModule, preserved_methods: List[str]) -> List[str]: bundled_inputs_attributes = [] # Has bundled inputs for forward if hasattr(script_module, 'get_all_bundled_inputs'): bundled_inputs_attributes.append('get_all_bundled_inputs') bundled_inputs_attributes.append('get_num_bundled_inputs') # Bundled inputs in module after the change that introduced bundled inputs for multiple functions if hasattr(script_module, 'get_bundled_inputs_functions_and_info'): bundled_inputs_attributes.append('get_bundled_inputs_functions_and_info') all_info = script_module.get_bundled_inputs_functions_and_info() for function_name in all_info: if function_name not in preserved_methods: bundled_inputs_attributes.append(function_name) bundled_inputs_attributes.append("get_all_bundled_inputs_for_" + function_name) bundled_inputs_attributes.append("_bundled_inputs_deflated_" + function_name) return bundled_inputs_attributes
pytorch-master
torch/utils/mobile_optimizer.py
from torch.utils.benchmark.utils.common import * # noqa: F403 from torch.utils.benchmark.utils.timer import * # noqa: F403 from torch.utils.benchmark.utils.compare import * # noqa: F403 from torch.utils.benchmark.utils.fuzzer import * # noqa: F403 from torch.utils.benchmark.utils.valgrind_wrapper.timer_interface import * # noqa: F403 from torch.utils.benchmark.utils.sparse_fuzzer import * # noqa: F403
pytorch-master
torch/utils/benchmark/__init__.py
import numpy as np import torch from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedTensor _MIN_DIM_SIZE = 16 _MAX_DIM_SIZE = 16 * 1024 ** 2 _POW_TWO_SIZES = tuple(2 ** i for i in range( int(np.log2(_MIN_DIM_SIZE)), int(np.log2(_MAX_DIM_SIZE)) + 1, )) class BinaryOpFuzzer(Fuzzer): def __init__(self, seed, dtype=torch.float32, cuda=False): super().__init__( parameters=[ # Dimensionality of x and y. (e.g. 1D, 2D, or 3D.) FuzzedParameter("dim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True), # Shapes for `x` and `y`. # It is important to test all shapes, however # powers of two are especially important and therefore # warrant special attention. This is done by generating # both a value drawn from all integers between the min and # max allowed values, and another from only the powers of two # (both distributions are loguniform) and then randomly # selecting between the two. # Moreover, `y` will occasionally have singleton # dimensions in order to test broadcasting. [ FuzzedParameter( name=f"k_any_{i}", minval=_MIN_DIM_SIZE, maxval=_MAX_DIM_SIZE, distribution="loguniform", ) for i in range(3) ], [ FuzzedParameter( name=f"k_pow2_{i}", distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES} ) for i in range(3) ], [ FuzzedParameter( name=f"k{i}", distribution={ ParameterAlias(f"k_any_{i}"): 0.8, ParameterAlias(f"k_pow2_{i}"): 0.2, }, strict=True, ) for i in range(3) ], [ FuzzedParameter( name=f"y_k{i}", distribution={ ParameterAlias(f"k{i}"): 0.8, 1: 0.2, }, strict=True, ) for i in range(3) ], # Steps for `x` and `y`. (Benchmarks strided memory access.) [ FuzzedParameter( name=f"{name}_step_{i}", distribution={1: 0.8, 2: 0.06, 4: 0.06, 8: 0.04, 16: 0.04}, ) for i in range(3) for name in ("x", "y") ], # Repeatable entropy for downstream applications. FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"), ], tensors=[ FuzzedTensor( name="x", size=("k0", "k1", "k2"), steps=("x_step_0", "x_step_1", "x_step_2"), probability_contiguous=0.75, min_elements=4 * 1024, max_elements=32 * 1024 ** 2, max_allocation_bytes=2 * 1024**3, # 2 GB dim_parameter="dim", dtype=dtype, cuda=cuda, ), FuzzedTensor( name="y", size=("y_k0", "y_k1", "y_k2"), steps=("x_step_0", "x_step_1", "x_step_2"), probability_contiguous=0.75, max_allocation_bytes=2 * 1024**3, # 2 GB dim_parameter="dim", dtype=dtype, cuda=cuda, ), ], seed=seed, )
pytorch-master
torch/utils/benchmark/op_fuzzers/binary.py
import numpy as np import torch from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedSparseTensor _MIN_DIM_SIZE = 16 _MAX_DIM_SIZE = 16 * 1024 ** 2 _POW_TWO_SIZES = tuple(2 ** i for i in range( int(np.log2(_MIN_DIM_SIZE)), int(np.log2(_MAX_DIM_SIZE)) + 1, )) class BinaryOpSparseFuzzer(Fuzzer): def __init__(self, seed, dtype=torch.float32, cuda=False): super().__init__( parameters=[ # Dimensionality of x and y. (e.g. 1D, 2D, or 3D.) FuzzedParameter("dim_parameter", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True), FuzzedParameter( name="sparse_dim", distribution={1: 0.4, 2: 0.4, 3: 0.2}, strict=True ), # Shapes for `x` and `y`. # It is important to test all shapes, however # powers of two are especially important and therefore # warrant special attention. This is done by generating # both a value drawn from all integers between the min and # max allowed values, and another from only the powers of two # (both distributions are loguniform) and then randomly # selecting between the two. # Moreover, `y` will occasionally have singleton # dimensions in order to test broadcasting. [ FuzzedParameter( name=f"k_any_{i}", minval=_MIN_DIM_SIZE, maxval=_MAX_DIM_SIZE, distribution="loguniform", ) for i in range(3) ], [ FuzzedParameter( name=f"k_pow2_{i}", distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES} ) for i in range(3) ], [ FuzzedParameter( name=f"k{i}", distribution={ ParameterAlias(f"k_any_{i}"): 0.8, ParameterAlias(f"k_pow2_{i}"): 0.2, }, strict=True, ) for i in range(3) ], [ FuzzedParameter( name=f"y_k{i}", distribution={ ParameterAlias(f"k{i}"): 1.0}, strict=True, ) for i in range(3) ], FuzzedParameter( name="density", distribution={0.1: 0.4, 0.05: 0.3, 0.01: 0.3}, ), FuzzedParameter( name="coalesced", distribution={True: 0.5, False: 0.5}, ), # Repeatable entropy for downstream applications. FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"), ], tensors=[ FuzzedSparseTensor( name="x", size=("k0", "k1", "k2"), dim_parameter="dim_parameter", sparse_dim="sparse_dim", density="density", coalesced="coalesced", min_elements=4 * 1024, max_elements=32 * 1024 ** 2, dtype=dtype, cuda=cuda, ), FuzzedSparseTensor( name="y", size=("y_k0", "y_k1", "y_k2"), dim_parameter="dim_parameter", sparse_dim="sparse_dim", density="density", coalesced="coalesced", min_elements=4 * 1024, max_elements=32 * 1024 ** 2, dtype=dtype, cuda=cuda, ), ], seed=seed, )
pytorch-master
torch/utils/benchmark/op_fuzzers/sparse_binary.py
import numpy as np import torch from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedSparseTensor _MIN_DIM_SIZE = 16 _MAX_DIM_SIZE = 16 * 1024 ** 2 _POW_TWO_SIZES = tuple(2 ** i for i in range( int(np.log2(_MIN_DIM_SIZE)), int(np.log2(_MAX_DIM_SIZE)) + 1, )) class UnaryOpSparseFuzzer(Fuzzer): def __init__(self, seed, dtype=torch.float32, cuda=False): super().__init__( parameters=[ # Sparse dim parameter of x. (e.g. 1D, 2D, or 3D.) FuzzedParameter("dim_parameter", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True), FuzzedParameter( name="sparse_dim", distribution={1: 0.4, 2: 0.4, 3: 0.2}, strict=True ), # Shapes for `x`. # It is important to test all shapes, however # powers of two are especially important and therefore # warrant special attention. This is done by generating # both a value drawn from all integers between the min and # max allowed values, and another from only the powers of two # (both distributions are loguniform) and then randomly # selecting between the two. [ FuzzedParameter( name=f"k_any_{i}", minval=_MIN_DIM_SIZE, maxval=_MAX_DIM_SIZE, distribution="loguniform", ) for i in range(3) ], [ FuzzedParameter( name=f"k_pow2_{i}", distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES} ) for i in range(3) ], [ FuzzedParameter( name=f"k{i}", distribution={ ParameterAlias(f"k_any_{i}"): 0.8, ParameterAlias(f"k_pow2_{i}"): 0.2, }, strict=True, ) for i in range(3) ], FuzzedParameter( name="density", distribution={0.1: 0.4, 0.05: 0.3, 0.01: 0.3}, ), FuzzedParameter( name="coalesced", distribution={True: 0.5, False: 0.5}, ), FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"), ], tensors=[ FuzzedSparseTensor( name="x", size=("k0", "k1", "k2"), dim_parameter="dim_parameter", sparse_dim="sparse_dim", min_elements=4 * 1024, max_elements=32 * 1024 ** 2, density="density", coalesced="coalesced", dtype=dtype, cuda=cuda, ), ], seed=seed, )
pytorch-master
torch/utils/benchmark/op_fuzzers/sparse_unary.py
pytorch-master
torch/utils/benchmark/op_fuzzers/__init__.py
import math import torch from torch.utils import benchmark from torch.utils.benchmark import FuzzedParameter, FuzzedTensor, ParameterAlias __all__ = ['SpectralOpFuzzer'] MIN_DIM_SIZE = 16 MAX_DIM_SIZE = 16 * 1024 def power_range(upper_bound, base): return (base ** i for i in range(int(math.log(upper_bound, base)) + 1)) # List of regular numbers from MIN_DIM_SIZE to MAX_DIM_SIZE # These numbers factorize into multiples of prime factors 2, 3, and 5 only # and are usually the fastest in FFT implementations. REGULAR_SIZES = [] for i in power_range(MAX_DIM_SIZE, 2): for j in power_range(MAX_DIM_SIZE // i, 3): ij = i * j for k in power_range(MAX_DIM_SIZE // ij, 5): ijk = ij * k if ijk > MIN_DIM_SIZE: REGULAR_SIZES.append(ijk) REGULAR_SIZES.sort() class SpectralOpFuzzer(benchmark.Fuzzer): def __init__(self, *, seed: int, dtype=torch.float64, cuda: bool = False, probability_regular: float = 1.0): super().__init__( parameters=[ # Dimensionality of x. (e.g. 1D, 2D, or 3D.) FuzzedParameter("ndim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True), # Shapes for `x`. # It is important to test all shapes, however # regular sizes are especially important to the FFT and therefore # warrant special attention. This is done by generating # both a value drawn from all integers between the min and # max allowed values, and another from only the regular numbers # (both distributions are loguniform) and then randomly # selecting between the two. [ FuzzedParameter( name=f"k_any_{i}", minval=MIN_DIM_SIZE, maxval=MAX_DIM_SIZE, distribution="loguniform", ) for i in range(3) ], [ FuzzedParameter( name=f"k_regular_{i}", distribution={size: 1. / len(REGULAR_SIZES) for size in REGULAR_SIZES} ) for i in range(3) ], [ FuzzedParameter( name=f"k{i}", distribution={ ParameterAlias(f"k_regular_{i}"): probability_regular, ParameterAlias(f"k_any_{i}"): 1 - probability_regular, }, strict=True, ) for i in range(3) ], # Steps for `x`. (Benchmarks strided memory access.) [ FuzzedParameter( name=f"step_{i}", distribution={1: 0.8, 2: 0.06, 4: 0.06, 8: 0.04, 16: 0.04}, ) for i in range(3) ], ], tensors=[ FuzzedTensor( name="x", size=("k0", "k1", "k2"), steps=("step_0", "step_1", "step_2"), probability_contiguous=0.75, min_elements=4 * 1024, max_elements=32 * 1024 ** 2, max_allocation_bytes=2 * 1024**3, # 2 GB dim_parameter="ndim", dtype=dtype, cuda=cuda, ), ], seed=seed, )
pytorch-master
torch/utils/benchmark/op_fuzzers/spectral.py
import numpy as np import torch from torch.utils.benchmark import Fuzzer, FuzzedParameter, ParameterAlias, FuzzedTensor _MIN_DIM_SIZE = 16 _MAX_DIM_SIZE = 16 * 1024 ** 2 _POW_TWO_SIZES = tuple(2 ** i for i in range( int(np.log2(_MIN_DIM_SIZE)), int(np.log2(_MAX_DIM_SIZE)) + 1, )) class UnaryOpFuzzer(Fuzzer): def __init__(self, seed, dtype=torch.float32, cuda=False): super().__init__( parameters=[ # Dimensionality of x. (e.g. 1D, 2D, or 3D.) FuzzedParameter("dim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True), # Shapes for `x`. # It is important to test all shapes, however # powers of two are especially important and therefore # warrant special attention. This is done by generating # both a value drawn from all integers between the min and # max allowed values, and another from only the powers of two # (both distributions are loguniform) and then randomly # selecting between the two. [ FuzzedParameter( name=f"k_any_{i}", minval=_MIN_DIM_SIZE, maxval=_MAX_DIM_SIZE, distribution="loguniform", ) for i in range(3) ], [ FuzzedParameter( name=f"k_pow2_{i}", distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES} ) for i in range(3) ], [ FuzzedParameter( name=f"k{i}", distribution={ ParameterAlias(f"k_any_{i}"): 0.8, ParameterAlias(f"k_pow2_{i}"): 0.2, }, strict=True, ) for i in range(3) ], # Steps for `x`. (Benchmarks strided memory access.) [ FuzzedParameter( name=f"x_step_{i}", distribution={1: 0.8, 2: 0.06, 4: 0.06, 8: 0.04, 16: 0.04}, ) for i in range(3) ], # Repeatable entropy for downstream applications. FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"), ], tensors=[ FuzzedTensor( name="x", size=("k0", "k1", "k2"), steps=("x_step_0", "x_step_1", "x_step_2"), probability_contiguous=0.75, min_elements=4 * 1024, max_elements=32 * 1024 ** 2, max_allocation_bytes=2 * 1024**3, # 2 GB dim_parameter="dim", dtype=dtype, cuda=cuda, ), ], seed=seed, )
pytorch-master
torch/utils/benchmark/op_fuzzers/unary.py
"""Timer class based on the timeit.Timer class, but torch aware.""" import enum import timeit import textwrap from typing import overload, Any, Callable, Dict, List, NoReturn, Optional, Tuple, Type, Union import torch from torch.utils.benchmark.utils import common, cpp_jit from torch.utils.benchmark.utils._stubs import TimerClass, TimeitModuleType from torch.utils.benchmark.utils.valgrind_wrapper import timer_interface as valgrind_timer_interface __all__ = ["Timer", "timer", "Language"] if torch.has_cuda and torch.cuda.is_available(): def timer() -> float: torch.cuda.synchronize() return timeit.default_timer() else: timer = timeit.default_timer class Language(enum.Enum): PYTHON = 0 CPP = 1 class CPPTimer: def __init__( self, stmt: str, setup: str, global_setup: str, timer: Callable[[], float], globals: Dict[str, Any], ) -> None: if timer is not timeit.default_timer: raise NotImplementedError( "PyTorch was built with CUDA and a GPU is present; however " "Timer does not yet support GPU measurements. If your " "code is CPU only, pass `timer=timeit.default_timer` to the " "Timer's constructor to indicate this. (Note that this will " "produce incorrect results if the GPU is in fact used, as " "Timer will not synchronize CUDA.)" ) if globals: raise ValueError("C++ timing does not support globals.") self._stmt: str = textwrap.dedent(stmt) self._setup: str = textwrap.dedent(setup) self._global_setup: str = textwrap.dedent(global_setup) self._timeit_module: Optional[TimeitModuleType] = None def timeit(self, number: int) -> float: if self._timeit_module is None: self._timeit_module = cpp_jit.compile_timeit_template( stmt=self._stmt, setup=self._setup, global_setup=self._global_setup, ) return self._timeit_module.timeit(number) class Timer(object): """Helper class for measuring execution time of PyTorch statements. For a full tutorial on how to use this class, see: https://pytorch.org/tutorials/recipes/recipes/benchmark.html The PyTorch Timer is based on `timeit.Timer` (and in fact uses `timeit.Timer` internally), but with several key differences: 1) Runtime aware: Timer will perform warmups (important as some elements of PyTorch are lazily initialized), set threadpool size so that comparisons are apples-to-apples, and synchronize asynchronous CUDA functions when necessary. 2) Focus on replicates: When measuring code, and particularly complex kernels / models, run-to-run variation is a significant confounding factor. It is expected that all measurements should include replicates to quantify noise and allow median computation, which is more robust than mean. To that effect, this class deviates from the `timeit` API by conceptually merging `timeit.Timer.repeat` and `timeit.Timer.autorange`. (Exact algorithms are discussed in method docstrings.) The `timeit` method is replicated for cases where an adaptive strategy is not desired. 3) Optional metadata: When defining a Timer, one can optionally specify `label`, `sub_label`, `description`, and `env`. (Defined later) These fields are included in the representation of result object and by the `Compare` class to group and display results for comparison. 4) Instruction counts In addition to wall times, Timer can run a statement under Callgrind and report instructions executed. Directly analogous to `timeit.Timer` constructor arguments: `stmt`, `setup`, `timer`, `globals` PyTorch Timer specific constructor arguments: `label`, `sub_label`, `description`, `env`, `num_threads` Args: stmt: Code snippet to be run in a loop and timed. setup: Optional setup code. Used to define variables used in `stmt` global_setup: (C++ only) Code which is placed at the top level of the file for things like `#include` statements. timer: Callable which returns the current time. If PyTorch was built without CUDA or there is no GPU present, this defaults to `timeit.default_timer`; otherwise it will synchronize CUDA before measuring the time. globals: A dict which defines the global variables when `stmt` is being executed. This is the other method for providing variables which `stmt` needs. label: String which summarizes `stmt`. For instance, if `stmt` is "torch.nn.functional.relu(torch.add(x, 1, out=out))" one might set label to "ReLU(x + 1)" to improve readability. sub_label: Provide supplemental information to disambiguate measurements with identical stmt or label. For instance, in our example above sub_label might be "float" or "int", so that it is easy to differentiate: "ReLU(x + 1): (float)" "ReLU(x + 1): (int)" when printing Measurements or summarizing using `Compare`. description: String to distinguish measurements with identical label and sub_label. The principal use of `description` is to signal to `Compare` the columns of data. For instance one might set it based on the input size to create a table of the form: :: | n=1 | n=4 | ... ------------- ... ReLU(x + 1): (float) | ... | ... | ... ReLU(x + 1): (int) | ... | ... | ... using `Compare`. It is also included when printing a Measurement. env: This tag indicates that otherwise identical tasks were run in different environments, and are therefore not equivilent, for instance when A/B testing a change to a kernel. `Compare` will treat Measurements with different `env` specification as distinct when merging replicate runs. num_threads: The size of the PyTorch threadpool when executing `stmt`. Single threaded performace is important as both a key inference workload and a good indicator of intrinsic algorithmic efficiency, so the default is set to one. This is in contrast to the default PyTorch threadpool size which tries to utilize all cores. """ _timer_cls: Type[TimerClass] = timeit.Timer def __init__( self, stmt: str = "pass", setup: str = "pass", global_setup: str = "", timer: Callable[[], float] = timer, globals: Optional[Dict[str, Any]] = None, label: Optional[str] = None, sub_label: Optional[str] = None, description: Optional[str] = None, env: Optional[str] = None, num_threads: int = 1, language: Union[Language, str] = Language.PYTHON, ): if not isinstance(stmt, str): raise ValueError("Currently only a `str` stmt is supported.") # We copy `globals` to prevent mutations from leaking. # (For instance, `eval` adds the `__builtins__` key) self._globals = dict(globals or {}) timer_kwargs = {} if language in (Language.PYTHON, "py", "python"): # Include `torch` if not specified as a convenience feature. self._globals.setdefault("torch", torch) self._language: Language = Language.PYTHON if global_setup: raise ValueError( f"global_setup is C++ only, got `{global_setup}`. Most " "likely this code can simply be moved to `setup`." ) elif language in (Language.CPP, "cpp", "c++"): assert self._timer_cls is timeit.Timer, "_timer_cls has already been swapped." self._timer_cls = CPPTimer setup = ("" if setup == "pass" else setup) self._language = Language.CPP timer_kwargs["global_setup"] = global_setup else: raise ValueError(f"Invalid language `{language}`.") # Convenience adjustment so that multi-line code snippets defined in # functions do not IndentationError (Python) or look odd (C++). The # leading newline removal is for the initial newline that appears when # defining block strings. For instance: # textwrap.dedent(""" # print("This is a stmt") # """) # produces '\nprint("This is a stmt")\n'. # # Stripping this down to 'print("This is a stmt")' doesn't change # what gets executed, but it makes __repr__'s nicer. stmt = textwrap.dedent(stmt) stmt = (stmt[1:] if stmt and stmt[0] == "\n" else stmt).rstrip() setup = textwrap.dedent(setup) setup = (setup[1:] if setup and setup[0] == "\n" else setup).rstrip() self._timer = self._timer_cls( stmt=stmt, setup=setup, timer=timer, globals=valgrind_timer_interface.CopyIfCallgrind.unwrap_all(self._globals), **timer_kwargs, ) self._task_spec = common.TaskSpec( stmt=stmt, setup=setup, global_setup=global_setup, label=label, sub_label=sub_label, description=description, env=env, num_threads=num_threads, ) def _timeit(self, number: int) -> float: # Even calling a timer in C++ takes ~50 ns, so no real operation should # take less than 1 ns. (And this prevents divide by zero errors.) return max(self._timer.timeit(number), 1e-9) def timeit(self, number: int = 1000000) -> common.Measurement: """Mirrors the semantics of timeit.Timer.timeit(). Execute the main statement (`stmt`) `number` times. https://docs.python.org/3/library/timeit.html#timeit.Timer.timeit """ with common.set_torch_threads(self._task_spec.num_threads): # Warmup self._timeit(number=max(int(number // 100), 2)) return common.Measurement( number_per_run=number, raw_times=[self._timeit(number=number)], task_spec=self._task_spec ) def repeat(self, repeat: int = -1, number: int = -1) -> None: raise NotImplementedError("See `Timer.blocked_autorange.`") def autorange(self, callback: Optional[Callable[[int, float], NoReturn]] = None) -> None: raise NotImplementedError("See `Timer.blocked_autorange.`") def _threaded_measurement_loop( self, number: int, time_hook: Callable[[], float], stop_hook: Callable[[List[float]], bool], min_run_time: float, max_run_time: Optional[float] = None, callback: Optional[Callable[[int, float], NoReturn]] = None ) -> List[float]: total_time = 0.0 can_stop = False times: List[float] = [] with common.set_torch_threads(self._task_spec.num_threads): while (total_time < min_run_time) or (not can_stop): time_spent = time_hook() times.append(time_spent) total_time += time_spent if callback: callback(number, time_spent) can_stop = stop_hook(times) if max_run_time and total_time > max_run_time: break return times def _estimate_block_size(self, min_run_time: float) -> int: with common.set_torch_threads(self._task_spec.num_threads): # Estimate the block size needed for measurement to be negligible # compared to the inner loop. This also serves as a warmup. overhead = torch.tensor([self._timeit(0) for _ in range(5)]).median().item() number = 1 while True: time_taken = self._timeit(number) relative_overhead = overhead / time_taken if relative_overhead <= 1e-4 and time_taken >= min_run_time / 1000: break if time_taken > min_run_time: break # Avoid overflow in C++ pybind11 interface if number * 10 > 2147483647: break number *= 10 return number def adaptive_autorange( self, threshold: float = 0.1, *, min_run_time: float = 0.01, max_run_time: float = 10.0, callback: Optional[Callable[[int, float], NoReturn]] = None, ) -> common.Measurement: number = self._estimate_block_size(min_run_time=0.05) def time_hook() -> float: return self._timeit(number) def stop_hook(times: List[float]) -> bool: if len(times) > 3: return common.Measurement( number_per_run=number, raw_times=times, task_spec=self._task_spec ).meets_confidence(threshold=threshold) return False times = self._threaded_measurement_loop( number, time_hook, stop_hook, min_run_time, max_run_time, callback=callback) return common.Measurement( number_per_run=number, raw_times=times, task_spec=self._task_spec ) def blocked_autorange( self, callback: Optional[Callable[[int, float], NoReturn]] = None, min_run_time: float = 0.2, ) -> common.Measurement: """Measure many replicates while keeping timer overhead to a minimum. At a high level, blocked_autorange executes the following pseudo-code:: `setup` total_time = 0 while total_time < min_run_time start = timer() for _ in range(block_size): `stmt` total_time += (timer() - start) Note the variable `block_size` in the inner loop. The choice of block size is important to measurement quality, and must balance two competing objectives: 1) A small block size results in more replicates and generally better statistics. 2) A large block size better amortizes the cost of `timer` invocation, and results in a less biased measurement. This is important because CUDA syncronization time is non-trivial (order single to low double digit microseconds) and would otherwise bias the measurement. blocked_autorange sets block_size by running a warmup period, increasing block size until timer overhead is less than 0.1% of the overall computation. This value is then used for the main measurement loop. Returns: A `Measurement` object that contains measured runtimes and repetition counts, and can be used to compute statistics. (mean, median, etc.) """ number = self._estimate_block_size(min_run_time) def time_hook() -> float: return self._timeit(number) def stop_hook(times: List[float]) -> bool: return True times = self._threaded_measurement_loop( number, time_hook, stop_hook, min_run_time=min_run_time, callback=callback) return common.Measurement( number_per_run=number, raw_times=times, task_spec=self._task_spec ) @overload def collect_callgrind( self, number: int, *, repeats: None, collect_baseline: bool, retain_out_file: bool, ) -> valgrind_timer_interface.CallgrindStats: ... @overload def collect_callgrind( self, number: int, *, repeats: int, collect_baseline: bool, retain_out_file: bool, ) -> Tuple[valgrind_timer_interface.CallgrindStats, ...]: ... def collect_callgrind( self, number: int = 100, *, repeats: Optional[int] = None, collect_baseline: bool = True, retain_out_file: bool = False, ) -> Any: """Collect instruction counts using Callgrind. Unlike wall times, instruction counts are deterministic (modulo non-determinism in the program itself and small amounts of jitter from the Python interpreter.) This makes them ideal for detailed performance analysis. This method runs `stmt` in a separate process so that Valgrind can instrument the program. Performance is severely degraded due to the instrumentation, however this is ameliorated by the fact that a small number of iterations is generally sufficient to obtain good measurements. In order to to use this method `valgrind`, `callgrind_control`, and `callgrind_annotate` must be installed. Because there is a process boundary between the caller (this process) and the `stmt` execution, `globals` cannot contain arbitrary in-memory data structures. (Unlike timing methods) Instead, globals are restricted to builtins, `nn.Modules`'s, and TorchScripted functions/modules to reduce the surprise factor from serialization and subsequent deserialization. The `GlobalsBridge` class provides more detail on this subject. Take particular care with nn.Modules: they rely on pickle and you may need to add an import to `setup` for them to transfer properly. By default, a profile for an empty statement will be collected and cached to indicate how many instructions are from the Python loop which drives `stmt`. Returns: A `CallgrindStats` object which provides instruction counts and some basic facilities for analyzing and manipulating results. """ if not isinstance(self._task_spec.stmt, str): raise ValueError("`collect_callgrind` currently only supports string `stmt`") if repeats is not None and repeats < 1: raise ValueError("If specified, `repeats` must be >= 1") # Check that the statement is valid. It doesn't guarantee success, but it's much # simpler and quicker to raise an exception for a faulty `stmt` or `setup` in # the parent process rather than the valgrind subprocess. self._timeit(1) is_python = (self._language == Language.PYTHON) assert is_python or not self._globals result = valgrind_timer_interface.wrapper_singleton().collect_callgrind( task_spec=self._task_spec, globals=self._globals, number=number, repeats=repeats or 1, collect_baseline=collect_baseline and is_python, is_python=is_python, retain_out_file=retain_out_file, ) return (result[0] if repeats is None else result)
pytorch-master
torch/utils/benchmark/utils/timer.py
pytorch-master
torch/utils/benchmark/utils/__init__.py
from typing import Optional, Tuple, Union from numbers import Number import torch from torch.utils.benchmark import FuzzedTensor import math class FuzzedSparseTensor(FuzzedTensor): def __init__( self, name: str, size: Tuple[Union[str, int], ...], min_elements: Optional[int] = None, max_elements: Optional[int] = None, dim_parameter: Optional[str] = None, sparse_dim: Optional[str] = None, nnz: Optional[str] = None, density: Optional[str] = None, coalesced: Optional[str] = None, dtype=torch.float32, cuda=False ): """ Args: name: A string identifier for the generated Tensor. size: A tuple of integers or strings specifying the size of the generated Tensor. String values will replaced with a concrete int during the generation process, while ints are simply passed as literals. min_elements: The minimum number of parameters that this Tensor must have for a set of parameters to be valid. (Otherwise they are resampled.) max_elements: Like `min_elements`, but setting an upper bound. dim_parameter: The length of `size` will be truncated to this value. This allows Tensors of varying dimensions to be generated by the Fuzzer. sparse_dim: The number of sparse dimensions in a sparse tensor. density: This value allows tensors of varying sparsities to be generated by the Fuzzer. coalesced: The sparse tensor format permits uncoalesced sparse tensors, where there may be duplicate coordinates in the indices. dtype: The PyTorch dtype of the generated Tensor. cuda: Whether to place the Tensor on a GPU. """ super().__init__(name=name, size=size, min_elements=min_elements, max_elements=max_elements, dim_parameter=dim_parameter, dtype=dtype, cuda=cuda) self._density = density self._coalesced = coalesced self._sparse_dim = sparse_dim @staticmethod def sparse_tensor_constructor(size, dtype, sparse_dim, nnz, is_coalesced): """sparse_tensor_constructor creates a sparse tensor with coo format. Note that when `is_coalesced` is False, the number of elements is doubled but the number of indices represents the same amount of number of non zeros `nnz`, i.e, this is virtually the same tensor with the same sparsity pattern. Moreover, most of the sparse operation will use coalesce() method and what we want here is to get a sparse tensor with the same `nnz` even if this is coalesced or not. In the other hand when `is_coalesced` is True the number of elements is reduced in the coalescing process by an unclear amount however the probability to generate duplicates indices are low for most of the cases. This decision was taken on purpose to maintain the construction cost as low as possible. """ if isinstance(size, Number): size = [size] * sparse_dim assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments' v_size = [nnz] + list(size[sparse_dim:]) if dtype.is_floating_point: v = torch.rand(size=v_size, dtype=dtype, device="cpu") else: v = torch.randint(1, 127, size=v_size, dtype=dtype, device="cpu") i = torch.rand(sparse_dim, nnz, device="cpu") i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i)) i = i.to(torch.long) if not is_coalesced: v = torch.cat([v, torch.randn_like(v)], 0) i = torch.cat([i, i], 1) x = torch.sparse_coo_tensor(i, v, torch.Size(size)) if is_coalesced: x = x.coalesce() return x def _make_tensor(self, params, state): size, _, _ = self._get_size_and_steps(params) density = params['density'] nnz = math.ceil(sum(size) * density) assert nnz <= sum(size) is_coalesced = params['coalesced'] sparse_dim = params['sparse_dim'] if self._sparse_dim else len(size) sparse_dim = len(size) if len(size) < sparse_dim else sparse_dim tensor = self.sparse_tensor_constructor(size, self._dtype, sparse_dim, nnz, is_coalesced) if self._cuda: tensor = tensor.cuda() sparse_dim = tensor.sparse_dim() dense_dim = tensor.dense_dim() is_hybrid = len(size[sparse_dim:]) > 0 properties = { "numel": int(tensor.numel()), "shape": tensor.size(), "is_coalesced": tensor.is_coalesced(), "density": density, "sparsity": 1.0 - density, "sparse_dim": sparse_dim, "dense_dim": dense_dim, "is_hybrid": is_hybrid, "dtype": str(self._dtype), } return tensor, properties
pytorch-master
torch/utils/benchmark/utils/sparse_fuzzer.py
"""Base shared classes and utilities.""" import collections import contextlib import dataclasses import os import shutil import tempfile import textwrap import time from typing import cast, Any, DefaultDict, Dict, Iterable, Iterator, List, Optional, Tuple import uuid import torch __all__ = ["TaskSpec", "Measurement", "select_unit", "unit_to_english", "trim_sigfig", "ordered_unique", "set_torch_threads"] _MAX_SIGNIFICANT_FIGURES = 4 _MIN_CONFIDENCE_INTERVAL = 25e-9 # 25 ns # Measurement will include a warning if the distribution is suspect. All # runs are expected to have some variation; these parameters set the # thresholds. _IQR_WARN_THRESHOLD = 0.1 _IQR_GROSS_WARN_THRESHOLD = 0.25 @dataclasses.dataclass(init=True, repr=False, eq=True, frozen=True) class TaskSpec: """Container for information used to define a Timer. (except globals)""" stmt: str setup: str global_setup: str = "" label: Optional[str] = None sub_label: Optional[str] = None description: Optional[str] = None env: Optional[str] = None num_threads: int = 1 @property def title(self) -> str: """Best effort attempt at a string label for the measurement.""" if self.label is not None: return self.label + (f": {self.sub_label}" if self.sub_label else "") elif "\n" not in self.stmt: return self.stmt + (f": {self.sub_label}" if self.sub_label else "") return ( f"stmt:{f' ({self.sub_label})' if self.sub_label else ''}\n" f"{textwrap.indent(self.stmt, ' ')}" ) def setup_str(self) -> str: return ( "" if (self.setup == "pass" or not self.setup) else f"setup:\n{textwrap.indent(self.setup, ' ')}" if "\n" in self.setup else f"setup: {self.setup}" ) def summarize(self) -> str: """Build TaskSpec portion of repr string for other containers.""" sections = [ self.title, self.description or "", self.setup_str(), ] return "\n".join([f"{i}\n" if "\n" in i else i for i in sections if i]) _TASKSPEC_FIELDS = tuple(i.name for i in dataclasses.fields(TaskSpec)) @dataclasses.dataclass(init=True, repr=False) class Measurement: """The result of a Timer measurement. This class stores one or more measurements of a given statement. It is serializable and provides several convenience methods (including a detailed __repr__) for downstream consumers. """ number_per_run: int raw_times: List[float] task_spec: TaskSpec metadata: Optional[Dict[Any, Any]] = None # Reserved for user payloads. def __post_init__(self) -> None: self._sorted_times: Tuple[float, ...] = () self._warnings: Tuple[str, ...] = () self._median: float = -1.0 self._mean: float = -1.0 self._p25: float = -1.0 self._p75: float = -1.0 def __getattr__(self, name: str) -> Any: # Forward TaskSpec fields for convenience. if name in _TASKSPEC_FIELDS: return getattr(self.task_spec, name) return super().__getattribute__(name) # ========================================================================= # == Convenience methods for statistics =================================== # ========================================================================= # # These methods use raw time divided by number_per_run; this is an # extrapolation and hides the fact that different number_per_run will # result in different amortization of overheads, however if Timer has # selected an appropriate number_per_run then this is a non-issue, and # forcing users to handle that division would result in a poor experience. @property def times(self) -> List[float]: return [t / self.number_per_run for t in self.raw_times] @property def median(self) -> float: self._lazy_init() return self._median @property def mean(self) -> float: self._lazy_init() return self._mean @property def iqr(self) -> float: self._lazy_init() return self._p75 - self._p25 @property def significant_figures(self) -> int: """Approximate significant figure estimate. This property is intended to give a convenient way to estimate the precision of a measurement. It only uses the interquartile region to estimate statistics to try to mitigate skew from the tails, and uses a static z value of 1.645 since it is not expected to be used for small values of `n`, so z can approximate `t`. The significant figure estimation used in conjunction with the `trim_sigfig` method to provide a more human interpretable data summary. __repr__ does not use this method; it simply displays raw values. Significant figure estimation is intended for `Compare`. """ self._lazy_init() n_total = len(self._sorted_times) lower_bound = int(n_total // 4) upper_bound = int(torch.tensor(3 * n_total / 4).ceil()) interquartile_points: Tuple[float, ...] = self._sorted_times[lower_bound:upper_bound] std = torch.tensor(interquartile_points).std(unbiased=False).item() sqrt_n = torch.tensor(len(interquartile_points)).sqrt().item() # Rough estimates. These are by no means statistically rigorous. confidence_interval = max(1.645 * std / sqrt_n, _MIN_CONFIDENCE_INTERVAL) relative_ci = torch.tensor(self._median / confidence_interval).log10().item() num_significant_figures = int(torch.tensor(relative_ci).floor()) return min(max(num_significant_figures, 1), _MAX_SIGNIFICANT_FIGURES) @property def has_warnings(self) -> bool: self._lazy_init() return bool(self._warnings) def _lazy_init(self) -> None: if self.raw_times and not self._sorted_times: self._sorted_times = tuple(sorted(self.times)) _sorted_times = torch.tensor(self._sorted_times, dtype=torch.float64) self._median = _sorted_times.quantile(.5).item() self._mean = _sorted_times.mean().item() self._p25 = _sorted_times.quantile(.25).item() self._p75 = _sorted_times.quantile(.75).item() def add_warning(msg: str) -> None: rel_iqr = self.iqr / self.median * 100 self._warnings += ( f" WARNING: Interquartile range is {rel_iqr:.1f}% " f"of the median measurement.\n {msg}", ) if not self.meets_confidence(_IQR_GROSS_WARN_THRESHOLD): add_warning("This suggests significant environmental influence.") elif not self.meets_confidence(_IQR_WARN_THRESHOLD): add_warning("This could indicate system fluctuation.") def meets_confidence(self, threshold: float = _IQR_WARN_THRESHOLD) -> bool: return self.iqr / self.median < threshold @property def title(self) -> str: return self.task_spec.title @property def env(self) -> str: return ( "Unspecified env" if self.taskspec.env is None else cast(str, self.taskspec.env) ) @property def as_row_name(self) -> str: return self.sub_label or self.stmt or "[Unknown]" def __repr__(self) -> str: """ Example repr: <utils.common.Measurement object at 0x7f395b6ac110> Broadcasting add (4x8) Median: 5.73 us IQR: 2.25 us (4.01 to 6.26) 372 measurements, 100 runs per measurement, 1 thread WARNING: Interquartile range is 39.4% of the median measurement. This suggests significant environmental influence. """ self._lazy_init() skip_line, newline = "MEASUREMENT_REPR_SKIP_LINE", "\n" n = len(self._sorted_times) time_unit, time_scale = select_unit(self._median) iqr_filter = '' if n >= 4 else skip_line repr_str = f""" {super().__repr__()} {self.task_spec.summarize()} {'Median: ' if n > 1 else ''}{self._median / time_scale:.2f} {time_unit} {iqr_filter}IQR: {self.iqr / time_scale:.2f} {time_unit} ({self._p25 / time_scale:.2f} to {self._p75 / time_scale:.2f}) {n} measurement{'s' if n > 1 else ''}, {self.number_per_run} runs {'per measurement,' if n > 1 else ','} {self.num_threads} thread{'s' if self.num_threads > 1 else ''} {newline.join(self._warnings)}""".strip() # noqa: B950 return "\n".join(l for l in repr_str.splitlines(keepends=False) if skip_line not in l) @staticmethod def merge(measurements: Iterable["Measurement"]) -> List["Measurement"]: """Convenience method for merging replicates. Merge will extrapolate times to `number_per_run=1` and will not transfer any metadata. (Since it might differ between replicates) """ grouped_measurements: DefaultDict[TaskSpec, List["Measurement"]] = collections.defaultdict(list) for m in measurements: grouped_measurements[m.task_spec].append(m) def merge_group(task_spec: TaskSpec, group: List["Measurement"]) -> "Measurement": times: List[float] = [] for m in group: # Different measurements could have different `number_per_run`, # so we call `.times` which normalizes the results. times.extend(m.times) return Measurement( number_per_run=1, raw_times=times, task_spec=task_spec, metadata=None, ) return [merge_group(t, g) for t, g in grouped_measurements.items()] def select_unit(t: float) -> Tuple[str, float]: """Determine how to scale times for O(1) magnitude. This utility is used to format numbers for human consumption. """ time_unit = {-3: "ns", -2: "us", -1: "ms"}.get(int(torch.tensor(t).log10().item() // 3), "s") time_scale = {"ns": 1e-9, "us": 1e-6, "ms": 1e-3, "s": 1}[time_unit] return time_unit, time_scale def unit_to_english(u: str) -> str: return { "ns": "nanosecond", "us": "microsecond", "ms": "millisecond", "s": "second", }[u] def trim_sigfig(x: float, n: int) -> float: """Trim `x` to `n` significant figures. (e.g. 3.14159, 2 -> 3.10000)""" assert n == int(n) magnitude = int(torch.tensor(x).abs().log10().ceil().item()) scale = 10 ** (magnitude - n) return float(torch.tensor(x / scale).round() * scale) def ordered_unique(elements: Iterable[Any]) -> List[Any]: return list(collections.OrderedDict({i: None for i in elements}).keys()) @contextlib.contextmanager def set_torch_threads(n: int) -> Iterator[None]: prior_num_threads = torch.get_num_threads() try: torch.set_num_threads(n) yield finally: torch.set_num_threads(prior_num_threads) def _make_temp_dir(prefix: Optional[str] = None, gc_dev_shm: bool = False) -> str: """Create a temporary directory. The caller is responsible for cleanup. This function is conceptually similar to `tempfile.mkdtemp`, but with the key additional feature that it will use shared memory if the `BENCHMARK_USE_DEV_SHM` environment variable is set. This is an implementation detail, but an important one for cases where many Callgrind measurements are collected at once. (Such as when collecting microbenchmarks.) This is an internal utility, and is exported solely so that microbenchmarks can reuse the util. """ use_dev_shm: bool = (os.getenv("BENCHMARK_USE_DEV_SHM") or "").lower() in ("1", "true") if use_dev_shm: root = "/dev/shm/pytorch_benchmark_utils" assert os.name == "posix", f"tmpfs (/dev/shm) is POSIX only, current platform is {os.name}" assert os.path.exists("/dev/shm"), "This system does not appear to support tmpfs (/dev/shm)." os.makedirs(root, exist_ok=True) # Because we're working in shared memory, it is more important than # usual to clean up ALL intermediate files. However we don't want every # worker to walk over all outstanding directories, so instead we only # check when we are sure that it won't lead to contention. if gc_dev_shm: for i in os.listdir(root): owner_file = os.path.join(root, i, "owner.pid") if not os.path.exists(owner_file): continue with open(owner_file, "rt") as f: owner_pid = int(f.read()) if owner_pid == os.getpid(): continue try: # https://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid-in-python os.kill(owner_pid, 0) except OSError: print(f"Detected that {os.path.join(root, i)} was orphaned in shared memory. Cleaning up.") shutil.rmtree(os.path.join(root, i)) else: root = tempfile.gettempdir() # We include the time so names sort by creation time, and add a UUID # to ensure we don't collide. name = f"{prefix or tempfile.gettempprefix()}__{int(time.time())}__{uuid.uuid4()}" path = os.path.join(root, name) os.makedirs(path, exist_ok=False) if use_dev_shm: with open(os.path.join(path, "owner.pid"), "wt") as f: f.write(str(os.getpid())) return path
pytorch-master
torch/utils/benchmark/utils/common.py
import functools import itertools as it from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np import torch __all__ = [ "Fuzzer", "FuzzedParameter", "ParameterAlias", "FuzzedTensor", ] _DISTRIBUTIONS = ( "loguniform", "uniform", ) class FuzzedParameter(object): """Specification for a parameter to be generated during fuzzing.""" def __init__( self, name: str, minval: Optional[Union[int, float]] = None, maxval: Optional[Union[int, float]] = None, distribution: Optional[Union[str, Dict[Any, float]]] = None, strict: bool = False, ): """ Args: name: A string name with which to identify the parameter. FuzzedTensors can reference this string in their specifications. minval: The lower bound for the generated value. See the description of `distribution` for type behavior. maxval: The upper bound for the generated value. Type behavior is identical to `minval`. distribution: Specifies the distribution from which this parameter should be drawn. There are three possibilities: - "loguniform" Samples between `minval` and `maxval` (inclusive) such that the probabilities are uniform in log space. As a concrete example, if minval=1 and maxval=100, a sample is as likely to fall in [1, 10) as it is [10, 100]. - "uniform" Samples are chosen with uniform probability between `minval` and `maxval` (inclusive). If either `minval` or `maxval` is a float then the distribution is the continuous uniform distribution; otherwise samples are constrained to the integers. - dict: If a dict is passed, the keys are taken to be choices for the variables and the values are interpreted as probabilities. (And must sum to one.) If a dict is passed, `minval` and `maxval` must not be set. Otherwise, they must be set. strict: If a parameter is strict, it will not be included in the iterative resampling process which Fuzzer uses to find a valid parameter configuration. This allows an author to prevent skew from resampling for a given parameter (for instance, a low size limit could inadvertently bias towards Tensors with fewer dimensions) at the cost of more iterations when generating parameters. """ self._name = name self._minval = minval self._maxval = maxval self._distribution = self._check_distribution(distribution) self.strict = strict @property def name(self): return self._name def sample(self, state): if self._distribution == "loguniform": return self._loguniform(state) if self._distribution == "uniform": return self._uniform(state) if isinstance(self._distribution, dict): return self._custom_distribution(state) def _check_distribution(self, distribution): if not isinstance(distribution, dict): assert distribution in _DISTRIBUTIONS else: assert not any(i < 0 for i in distribution.values()), "Probabilities cannot be negative" assert abs(sum(distribution.values()) - 1) <= 1e-5, "Distribution is not normalized" assert self._minval is None assert self._maxval is None return distribution def _loguniform(self, state): output = int(2 ** state.uniform( low=np.log2(self._minval) if self._minval is not None else None, high=np.log2(self._maxval) if self._maxval is not None else None, )) if self._minval is not None and output < self._minval: return self._minval if self._maxval is not None and output > self._maxval: return self._maxval return output def _uniform(self, state): if isinstance(self._minval, int) and isinstance(self._maxval, int): return int(state.randint(low=self._minval, high=self._maxval + 1)) return state.uniform(low=self._minval, high=self._maxval) def _custom_distribution(self, state): # If we directly pass the keys to `choice`, numpy will convert # them to numpy dtypes. index = state.choice( np.arange(len(self._distribution)), p=tuple(self._distribution.values())) return list(self._distribution.keys())[index] class ParameterAlias(object): """Indicates that a parameter should alias the value of another parameter. When used in conjunction with a custom distribution, this allows fuzzed tensors to represent a broader range of behaviors. For example, the following sometimes produces Tensors which broadcast: Fuzzer( parameters=[ FuzzedParameter("x_len", 4, 1024, distribution="uniform"), # `y` will either be size one, or match the size of `x`. FuzzedParameter("y_len", distribution={ 0.5: 1, 0.5: ParameterAlias("x_len") }), ], tensors=[ FuzzedTensor("x", size=("x_len",)), FuzzedTensor("y", size=("y_len",)), ], ) Chains of alias' are allowed, but may not contain cycles. """ def __init__(self, alias_to): self.alias_to = alias_to def __repr__(self): return f"ParameterAlias[alias_to: {self.alias_to}]" def dtype_size(dtype): if dtype == torch.bool: return 1 if dtype.is_floating_point or dtype.is_complex: return int(torch.finfo(dtype).bits / 8) return int(torch.iinfo(dtype).bits / 8) def prod(values, base=1): """np.prod can overflow, so for sizes the product should be done in Python. Even though np.prod type promotes to int64, it can still overflow in which case the negative value will pass the size check and OOM when attempting to actually allocate the Tensor. """ return functools.reduce(lambda x, y: int(x) * int(y), values, base) class FuzzedTensor(object): def __init__( self, name: str, size: Tuple[Union[str, int], ...], steps: Optional[Tuple[Union[str, int], ...]] = None, probability_contiguous: float = 0.5, min_elements: Optional[int] = None, max_elements: Optional[int] = None, max_allocation_bytes: Optional[int] = None, dim_parameter: Optional[str] = None, roll_parameter: Optional[str] = None, dtype=torch.float32, cuda=False, tensor_constructor: Optional[Callable] = None ): """ Args: name: A string identifier for the generated Tensor. size: A tuple of integers or strings specifying the size of the generated Tensor. String values will replaced with a concrete int during the generation process, while ints are simply passed as literals. steps: An optional tuple with the same length as `size`. This indicates that a larger Tensor should be allocated, and then sliced to produce the generated Tensor. For instance, if size is (4, 8) and steps is (1, 4), then a tensor `t` of size (4, 32) will be created and then `t[:, ::4]` will be used. (Allowing one to test Tensors with strided memory.) probability_contiguous: A number between zero and one representing the chance that the generated Tensor has a contiguous memory layout. This is achieved by randomly permuting the shape of a Tensor, calling `.contiguous()`, and then permuting back. This is applied before `steps`, which can also cause a Tensor to be non-contiguous. min_elements: The minimum number of parameters that this Tensor must have for a set of parameters to be valid. (Otherwise they are resampled.) max_elemnts: Like `min_elements`, but setting an upper bound. max_allocation_bytes: Like `max_elements`, but for the size of Tensor that must be allocated prior to slicing for `steps` (if applicable). For example, a FloatTensor with size (1024, 1024) and steps (4, 4) would have 1M elements, but would require a 64 MB allocation. dim_parameter: The length of `size` and `steps` will be truncated to this value. This allows Tensors of varying dimensions to be generated by the Fuzzer. dtype: The PyTorch dtype of the generated Tensor. cuda: Whether to place the Tensor on a GPU. tensor_constructor: Callable which will be used instead of the default Tensor construction method. This allows the author to enforce properties of the Tensor (e.g. it can only have certain values). The dtype and concrete shape of the Tensor to be created will be passed, and concrete values of all parameters will be passed as kwargs. Note that transformations to the result (permuting, slicing) will be performed by the Fuzzer; the tensor_constructor is only responsible for creating an appropriately sized Tensor. """ self._name = name self._size = size self._steps = steps self._probability_contiguous = probability_contiguous self._min_elements = min_elements self._max_elements = max_elements self._max_allocation_bytes = max_allocation_bytes self._dim_parameter = dim_parameter self._dtype = dtype self._cuda = cuda self._tensor_constructor = tensor_constructor @property def name(self): return self._name @staticmethod def default_tensor_constructor(size, dtype, **kwargs): if dtype.is_floating_point or dtype.is_complex: return torch.rand(size=size, dtype=dtype, device="cpu") else: return torch.randint(1, 127, size=size, dtype=dtype, device="cpu") def _make_tensor(self, params, state): size, steps, allocation_size = self._get_size_and_steps(params) constructor = ( self._tensor_constructor or self.default_tensor_constructor ) raw_tensor = constructor(size=allocation_size, dtype=self._dtype, **params) if self._cuda: raw_tensor = raw_tensor.cuda() # Randomly permute the Tensor and call `.contiguous()` to force re-ordering # of the memory, and then permute it back to the original shape. dim = len(size) order = np.arange(dim) if state.rand() > self._probability_contiguous: while dim > 1 and np.all(order == np.arange(dim)): order = state.permutation(raw_tensor.dim()) raw_tensor = raw_tensor.permute(tuple(order)).contiguous() raw_tensor = raw_tensor.permute(tuple(np.argsort(order))) slices = [slice(0, size * step, step) for size, step in zip(size, steps)] tensor = raw_tensor[slices] properties = { "numel": int(tensor.numel()), "order": order, "steps": steps, "is_contiguous": tensor.is_contiguous(), "dtype": str(self._dtype), } return tensor, properties def _get_size_and_steps(self, params): dim = ( params[self._dim_parameter] if self._dim_parameter is not None else len(self._size) ) def resolve(values, dim): """Resolve values into concrete integers.""" values = tuple(params.get(i, i) for i in values) if len(values) > dim: values = values[:dim] if len(values) < dim: values = values + tuple(1 for _ in range(dim - len(values))) return values size = resolve(self._size, dim) steps = resolve(self._steps or (), dim) allocation_size = tuple(size_i * step_i for size_i, step_i in zip(size, steps)) return size, steps, allocation_size def satisfies_constraints(self, params): size, _, allocation_size = self._get_size_and_steps(params) # Product is computed in Python to avoid integer overflow. num_elements = prod(size) assert num_elements >= 0 allocation_bytes = prod(allocation_size, base=dtype_size(self._dtype)) def nullable_greater(left, right): if left is None or right is None: return False return left > right return not any(( nullable_greater(num_elements, self._max_elements), nullable_greater(self._min_elements, num_elements), nullable_greater(allocation_bytes, self._max_allocation_bytes), )) class Fuzzer(object): def __init__( self, parameters: List[Union[FuzzedParameter, List[FuzzedParameter]]], tensors: List[Union[FuzzedTensor, List[FuzzedTensor]]], constraints: Optional[List[Callable]] = None, seed: Optional[int] = None ): """ Args: parameters: List of FuzzedParameters which provide specifications for generated parameters. Iterable elements will be unpacked, though arbitrary nested structures will not. tensors: List of FuzzedTensors which define the Tensors which will be created each step based on the parameters for that step. Iterable elements will be unpacked, though arbitrary nested structures will not. constraints: List of callables. They will be called with params as kwargs, and if any of them return False the current set of parameters will be rejected. seed: Seed for the RandomState used by the Fuzzer. This will also be used to set the PyTorch random seed so that random ops will create reproducible Tensors. """ if seed is None: seed = np.random.RandomState().randint(0, 2**63) self._seed = seed self._parameters = Fuzzer._unpack(parameters, FuzzedParameter) self._tensors = Fuzzer._unpack(tensors, FuzzedTensor) self._constraints = constraints or () p_names = {p.name for p in self._parameters} t_names = {t.name for t in self._tensors} name_overlap = p_names.intersection(t_names) if name_overlap: raise ValueError(f"Duplicate names in parameters and tensors: {name_overlap}") self._rejections = 0 self._total_generated = 0 @staticmethod def _unpack(values, cls): return tuple(it.chain( *[[i] if isinstance(i, cls) else i for i in values] )) def take(self, n): state = np.random.RandomState(self._seed) torch.manual_seed(state.randint(low=0, high=2 ** 63)) for _ in range(n): params = self._generate(state) tensors = {} tensor_properties = {} for t in self._tensors: tensor, properties = t._make_tensor(params, state) tensors[t.name] = tensor tensor_properties[t.name] = properties yield tensors, tensor_properties, params @property def rejection_rate(self): if not self._total_generated: return 0. return self._rejections / self._total_generated def _generate(self, state): strict_params: Dict[str, Union[float, int, ParameterAlias]] = {} for _ in range(1000): candidate_params: Dict[str, Union[float, int, ParameterAlias]] = {} for p in self._parameters: if p.strict: if p.name in strict_params: candidate_params[p.name] = strict_params[p.name] else: candidate_params[p.name] = p.sample(state) strict_params[p.name] = candidate_params[p.name] else: candidate_params[p.name] = p.sample(state) candidate_params = self._resolve_aliases(candidate_params) self._total_generated += 1 if not all(f(candidate_params) for f in self._constraints): self._rejections += 1 continue if not all(t.satisfies_constraints(candidate_params) for t in self._tensors): self._rejections += 1 continue return candidate_params raise ValueError("Failed to generate a set of valid parameters.") @staticmethod def _resolve_aliases(params): params = dict(params) alias_count = sum(isinstance(v, ParameterAlias) for v in params.values()) keys = list(params.keys()) while alias_count: for k in keys: v = params[k] if isinstance(v, ParameterAlias): params[k] = params[v.alias_to] alias_count_new = sum(isinstance(v, ParameterAlias) for v in params.values()) if alias_count == alias_count_new: raise ValueError(f"ParameterAlias cycle detected\n{params}") alias_count = alias_count_new return params
pytorch-master
torch/utils/benchmark/utils/fuzzer.py
"""JIT C++ strings into executables.""" import atexit import os import re import shutil import textwrap import threading from typing import Any, List, Optional import torch from torch.utils.benchmark.utils._stubs import CallgrindModuleType, TimeitModuleType from torch.utils.benchmark.utils.common import _make_temp_dir from torch.utils import cpp_extension LOCK = threading.Lock() SOURCE_ROOT = os.path.split(os.path.abspath(__file__))[0] # We calculate uuid once at import time so that separate processes will have # separate build roots, but threads will share the same build root. # `cpp_extension` uses build root as part of the cache key, so per-invocation # uuid's (e.g. different build root per _compile_template call) would lead to # a 0% cache hit rate and spurious recompilation. Consider the following: # ``` # setup = "auto x = torch::ones({1024, 1024});" # stmt = "torch::mm(x, x);" # for num_threads in [1, 2, 4, 8]: # print(Timer(stmt, setup, num_threads=num_threads, language="c++").blocked_autorange()) # ```` # `setup` and `stmt` do not change, so we can reuse the executable from the # first pass through the loop. _BUILD_ROOT: Optional[str] = None def _get_build_root() -> str: global _BUILD_ROOT if _BUILD_ROOT is None: _BUILD_ROOT = _make_temp_dir(prefix="benchmark_utils_jit_build") atexit.register(shutil.rmtree, _BUILD_ROOT) return _BUILD_ROOT # BACK_TESTING_NOTE: # There are two workflows where this code could be used. One is the obvious # case where someone simply builds or installs PyTorch and uses Timer. # The other is that the entire `torch/utils/benchmark` folder from a CURRENT # PyTorch checkout is copy-pasted into a much OLDER version of the PyTorch # source code. This is what we refer to here as "back testing". The rationale # is that we might want to use current tooling to study some aspect of an # earlier version of PyTorch. (e.g. a regression.) # # The problem is that Timer relies on several aspects of core PyTorch, namely # some binding functions for Valgrind symbols in `torch._C` and the # `torch.__config__._cxx_flags()` method. If we were to naively copy code # around this wouldn't work as the symbols of interest aren't present in # earlier versions of PyTorch. In order to work around this, we must add back # testing shims. These shims will never activate during normal use, but will # allow Timer to function outside of the "correct" version of PyTorch by # emulating functionality that was added later. # # These shims are temporary, and as Timer becomes more integrated with # PyTorch the cost and complexity of such shims will increase. Once back # testing is no longer required (which is to say we have done enough historic # analysis and the shims no longer justify their maintenance and code # complexity costs) back testing paths will be removed. CXX_FLAGS: Optional[List[str]] if hasattr(torch.__config__, "_cxx_flags"): try: CXX_FLAGS = torch.__config__._cxx_flags().strip().split() if CXX_FLAGS is not None and "-g" not in CXX_FLAGS: CXX_FLAGS.append("-g") except RuntimeError: # We are in FBCode. CXX_FLAGS = None else: # FIXME: Remove when back testing is no longer required. CXX_FLAGS = ["-O2", "-fPIC", "-g"] EXTRA_INCLUDE_PATHS: List[str] = [os.path.join(SOURCE_ROOT, "valgrind_wrapper")] CONDA_PREFIX = os.getenv("CONDA_PREFIX") if CONDA_PREFIX is not None: # Load will automatically search /usr/include, but not conda include. EXTRA_INCLUDE_PATHS.append(os.path.join(CONDA_PREFIX, "include")) COMPAT_CALLGRIND_BINDINGS: Optional[CallgrindModuleType] = None def get_compat_bindings() -> CallgrindModuleType: with LOCK: global COMPAT_CALLGRIND_BINDINGS if COMPAT_CALLGRIND_BINDINGS is None: COMPAT_CALLGRIND_BINDINGS = cpp_extension.load( name="callgrind_bindings", sources=[os.path.join( SOURCE_ROOT, "valgrind_wrapper", "compat_bindings.cpp" )], extra_cflags=CXX_FLAGS, extra_include_paths=EXTRA_INCLUDE_PATHS, ) return COMPAT_CALLGRIND_BINDINGS def _compile_template( *, stmt: str, setup: str, global_setup: str, src: str, is_standalone: bool ) -> Any: for before, after, indentation in ( ("// GLOBAL_SETUP_TEMPLATE_LOCATION", global_setup, 0), ("// SETUP_TEMPLATE_LOCATION", setup, 4), ("// STMT_TEMPLATE_LOCATION", stmt, 8) ): # C++ doesn't care about indentation so this code isn't load # bearing the way it is with Python, but this makes the source # look nicer if a human has to look at it. src = re.sub( before, textwrap.indent(after, " " * indentation)[indentation:], src ) # We want to isolate different Timers. However `cpp_extension` will # cache builds which will significantly reduce the cost of repeated # invocations. with LOCK: name = f"timer_cpp_{abs(hash(src))}" build_dir = os.path.join(_get_build_root(), name) os.makedirs(build_dir, exist_ok=True) src_path = os.path.join(build_dir, "timer_src.cpp") with open(src_path, "wt") as f: f.write(src) # `cpp_extension` has its own locking scheme, so we don't need our lock. return cpp_extension.load( name=name, sources=[src_path], build_directory=build_dir, extra_cflags=CXX_FLAGS, extra_include_paths=EXTRA_INCLUDE_PATHS, is_python_module=not is_standalone, is_standalone=is_standalone, ) def compile_timeit_template(*, stmt: str, setup: str, global_setup: str) -> TimeitModuleType: template_path: str = os.path.join(SOURCE_ROOT, "timeit_template.cpp") with open(template_path, "rt") as f: src: str = f.read() module = _compile_template(stmt=stmt, setup=setup, global_setup=global_setup, src=src, is_standalone=False) assert isinstance(module, TimeitModuleType) return module def compile_callgrind_template(*, stmt: str, setup: str, global_setup: str) -> str: template_path: str = os.path.join(SOURCE_ROOT, "valgrind_wrapper", "timer_callgrind_template.cpp") with open(template_path, "rt") as f: src: str = f.read() target = _compile_template(stmt=stmt, setup=setup, global_setup=global_setup, src=src, is_standalone=True) assert isinstance(target, str) return target
pytorch-master
torch/utils/benchmark/utils/cpp_jit.py
"""Display class to aggregate and print the results of many measurements.""" import collections import enum import itertools as it from typing import DefaultDict, List, Optional, Tuple from torch.utils.benchmark.utils import common from torch import tensor as _tensor __all__ = ["Colorize", "Compare"] BEST = "\033[92m" GOOD = "\033[34m" BAD = "\033[2m\033[91m" VERY_BAD = "\033[31m" BOLD = "\033[1m" TERMINATE = "\033[0m" class Colorize(enum.Enum): NONE = "none" COLUMNWISE = "columnwise" ROWWISE = "rowwise" # Classes to separate internal bookkeeping from what is rendered. class _Column(object): def __init__( self, grouped_results: List[Tuple[Optional[common.Measurement], ...]], time_scale: float, time_unit: str, trim_significant_figures: bool, highlight_warnings: bool, ): self._grouped_results = grouped_results self._flat_results = list(it.chain(*grouped_results)) self._time_scale = time_scale self._time_unit = time_unit self._trim_significant_figures = trim_significant_figures self._highlight_warnings = ( highlight_warnings and any(r.has_warnings for r in self._flat_results if r) ) leading_digits = [ int(_tensor(r.median / self._time_scale).log10().ceil()) if r else None for r in self._flat_results ] unit_digits = max(d for d in leading_digits if d is not None) decimal_digits = min( max(m.significant_figures - digits, 0) for digits, m in zip(leading_digits, self._flat_results) if (m is not None) and (digits is not None) ) if self._trim_significant_figures else 1 length = unit_digits + decimal_digits + (1 if decimal_digits else 0) self._template = f"{{:>{length}.{decimal_digits}f}}{{:>{7 if self._highlight_warnings else 0}}}" def get_results_for(self, group): return self._grouped_results[group] def num_to_str(self, value: Optional[float], estimated_sigfigs: int, spread: Optional[float]): if value is None: return " " * len(self.num_to_str(1, estimated_sigfigs, None)) if self._trim_significant_figures: value = common.trim_sigfig(value, estimated_sigfigs) return self._template.format( value, f" (! {spread * 100:.0f}%)" if self._highlight_warnings and spread is not None else "") def optional_min(seq): l = list(seq) return None if len(l) == 0 else min(l) class _Row(object): def __init__(self, results, row_group, render_env, env_str_len, row_name_str_len, time_scale, colorize, num_threads=None): super(_Row, self).__init__() self._results = results self._row_group = row_group self._render_env = render_env self._env_str_len = env_str_len self._row_name_str_len = row_name_str_len self._time_scale = time_scale self._colorize = colorize self._columns: Tuple[_Column, ...] = () self._num_threads = num_threads def register_columns(self, columns: Tuple[_Column, ...]): self._columns = columns def as_column_strings(self): concrete_results = [r for r in self._results if r is not None] env = f"({concrete_results[0].env})" if self._render_env else "" env = env.ljust(self._env_str_len + 4) output = [" " + env + concrete_results[0].as_row_name] for m, col in zip(self._results, self._columns or ()): if m is None: output.append(col.num_to_str(None, 1, None)) else: output.append(col.num_to_str( m.median / self._time_scale, m.significant_figures, m.iqr / m.median if m.has_warnings else None )) return output @staticmethod def color_segment(segment, value, best_value): if value <= best_value * 1.01 or value <= best_value + 100e-9: return BEST + BOLD + segment + TERMINATE * 2 if value <= best_value * 1.1: return GOOD + BOLD + segment + TERMINATE * 2 if value >= best_value * 5: return VERY_BAD + BOLD + segment + TERMINATE * 2 if value >= best_value * 2: return BAD + segment + TERMINATE * 2 return segment def row_separator(self, overall_width): return ( [f"{self._num_threads} threads: ".ljust(overall_width, "-")] if self._num_threads is not None else [] ) def finalize_column_strings(self, column_strings, col_widths): best_values = [-1 for _ in column_strings] if self._colorize == Colorize.ROWWISE: row_min = min(r.median for r in self._results if r is not None) best_values = [row_min for _ in column_strings] elif self._colorize == Colorize.COLUMNWISE: best_values = [ optional_min(r.median for r in column.get_results_for(self._row_group) if r is not None) for column in (self._columns or ()) ] row_contents = [column_strings[0].ljust(col_widths[0])] for col_str, width, result, best_value in zip(column_strings[1:], col_widths[1:], self._results, best_values): col_str = col_str.center(width) if self._colorize != Colorize.NONE and result is not None and best_value is not None: col_str = self.color_segment(col_str, result.median, best_value) row_contents.append(col_str) return row_contents class Table(object): def __init__( self, results: List[common.Measurement], colorize: Colorize, trim_significant_figures: bool, highlight_warnings: bool ): assert len(set(r.label for r in results)) == 1 self.results = results self._colorize = colorize self._trim_significant_figures = trim_significant_figures self._highlight_warnings = highlight_warnings self.label = results[0].label self.time_unit, self.time_scale = common.select_unit( min(r.median for r in results) ) self.row_keys = common.ordered_unique([self.row_fn(i) for i in results]) self.row_keys.sort(key=lambda args: args[:2]) # preserve stmt order self.column_keys = common.ordered_unique([self.col_fn(i) for i in results]) self.rows, self.columns = self.populate_rows_and_columns() @staticmethod def row_fn(m: common.Measurement) -> Tuple[int, Optional[str], str]: return m.num_threads, m.env, m.as_row_name @staticmethod def col_fn(m: common.Measurement) -> Optional[str]: return m.description def populate_rows_and_columns(self) -> Tuple[Tuple[_Row, ...], Tuple[_Column, ...]]: rows: List[_Row] = [] columns: List[_Column] = [] ordered_results: List[List[Optional[common.Measurement]]] = [ [None for _ in self.column_keys] for _ in self.row_keys ] row_position = {key: i for i, key in enumerate(self.row_keys)} col_position = {key: i for i, key in enumerate(self.column_keys)} for r in self.results: i = row_position[self.row_fn(r)] j = col_position[self.col_fn(r)] ordered_results[i][j] = r unique_envs = {r.env for r in self.results} render_env = len(unique_envs) > 1 env_str_len = max(len(i) for i in unique_envs) if render_env else 0 row_name_str_len = max(len(r.as_row_name) for r in self.results) prior_num_threads = -1 prior_env = "" row_group = -1 rows_by_group: List[List[List[Optional[common.Measurement]]]] = [] for (num_threads, env, _), row in zip(self.row_keys, ordered_results): thread_transition = (num_threads != prior_num_threads) if thread_transition: prior_num_threads = num_threads prior_env = "" row_group += 1 rows_by_group.append([]) rows.append( _Row( results=row, row_group=row_group, render_env=(render_env and env != prior_env), env_str_len=env_str_len, row_name_str_len=row_name_str_len, time_scale=self.time_scale, colorize=self._colorize, num_threads=num_threads if thread_transition else None, ) ) rows_by_group[-1].append(row) prior_env = env for i in range(len(self.column_keys)): grouped_results = [tuple(row[i] for row in g) for g in rows_by_group] column = _Column( grouped_results=grouped_results, time_scale=self.time_scale, time_unit=self.time_unit, trim_significant_figures=self._trim_significant_figures, highlight_warnings=self._highlight_warnings,) columns.append(column) rows_tuple, columns_tuple = tuple(rows), tuple(columns) for ri in rows_tuple: ri.register_columns(columns_tuple) return rows_tuple, columns_tuple def render(self) -> str: string_rows = [[""] + self.column_keys] for r in self.rows: string_rows.append(r.as_column_strings()) num_cols = max(len(i) for i in string_rows) for sr in string_rows: sr.extend(["" for _ in range(num_cols - len(sr))]) col_widths = [max(len(j) for j in i) for i in zip(*string_rows)] finalized_columns = [" | ".join(i.center(w) for i, w in zip(string_rows[0], col_widths))] overall_width = len(finalized_columns[0]) for string_row, row in zip(string_rows[1:], self.rows): finalized_columns.extend(row.row_separator(overall_width)) finalized_columns.append(" | ".join(row.finalize_column_strings(string_row, col_widths))) newline = "\n" has_warnings = self._highlight_warnings and any(ri.has_warnings for ri in self.results) return f""" [{(' ' + (self.label or '') + ' ').center(overall_width - 2, '-')}] {newline.join(finalized_columns)} Times are in {common.unit_to_english(self.time_unit)}s ({self.time_unit}). {'(! XX%) Measurement has high variance, where XX is the IQR / median * 100.' + newline if has_warnings else ""}"""[1:] class Compare(object): def __init__(self, results: List[common.Measurement]): self._results: List[common.Measurement] = [] self.extend_results(results) self._trim_significant_figures = False self._colorize = Colorize.NONE self._highlight_warnings = False def __str__(self): return "\n".join(self._render()) def extend_results(self, results): for r in results: if not isinstance(r, common.Measurement): raise ValueError( "Expected an instance of `Measurement`, " f"got {type(r)} instead." ) self._results.extend(results) def trim_significant_figures(self): self._trim_significant_figures = True def colorize(self, rowwise=False): self._colorize = Colorize.ROWWISE if rowwise else Colorize.COLUMNWISE def highlight_warnings(self): self._highlight_warnings = True def print(self): print(str(self)) def _render(self): results = common.Measurement.merge(self._results) grouped_results = self._group_by_label(results) output = [] for group in grouped_results.values(): output.append(self._layout(group)) return output def _group_by_label(self, results: List[common.Measurement]): grouped_results: DefaultDict[str, List[common.Measurement]] = collections.defaultdict(list) for r in results: grouped_results[r.label].append(r) return grouped_results def _layout(self, results: List[common.Measurement]): table = Table( results, self._colorize, self._trim_significant_figures, self._highlight_warnings ) return table.render()
pytorch-master
torch/utils/benchmark/utils/compare.py
import sys from typing import Any, Callable, Dict, TYPE_CHECKING if TYPE_CHECKING or sys.version_info >= (3, 8): from typing import runtime_checkable, Protocol else: from typing_extensions import runtime_checkable, Protocol class TimerClass(Protocol): """This is the portion of the `timeit.Timer` API used by benchmark utils.""" def __init__( self, stmt: str, setup: str, timer: Callable[[], float], globals: Dict[str, Any], **kwargs: Any, ) -> None: ... def timeit(self, number: int) -> float: ... @runtime_checkable class TimeitModuleType(Protocol): """Modules generated from `timeit_template.cpp`.""" def timeit(self, number: int) -> float: ... class CallgrindModuleType(Protocol): """Replicates the valgrind endpoints in `torch._C`. These bindings are used to collect Callgrind profiles on earlier versions of PyTorch and will eventually be removed. """ __file__: str __name__: str def _valgrind_supported_platform(self) -> bool: ... def _valgrind_toggle(self) -> None: ...
pytorch-master
torch/utils/benchmark/utils/_stubs.py
"""Intermediate layer between `Timer` and `valgrind`.""" import collections import enum import dataclasses import itertools as it import os import pickle import re import shutil import subprocess import sys import textwrap from typing import ( cast, Any, Callable, DefaultDict, Dict, Generator, List, NamedTuple, Optional, Tuple, Union, TYPE_CHECKING) import torch from torch.utils.benchmark.utils import common, cpp_jit from torch.utils.benchmark.utils._stubs import CallgrindModuleType __all__ = ["FunctionCount", "FunctionCounts", "CallgrindStats", "CopyIfCallgrind"] if TYPE_CHECKING: CompletedProcessType = subprocess.CompletedProcess[str] else: CompletedProcessType = subprocess.CompletedProcess FunctionCount = NamedTuple("FunctionCount", [("count", int), ("function", str)]) @dataclasses.dataclass(repr=False, eq=False, frozen=True) class FunctionCounts(object): """Container for manipulating Callgrind results. It supports: 1) Addition and subtraction to combine or diff results. 2) Tuple-like indexing. 3) A `denoise` function which strips CPython calls which are known to be non-deterministic and quite noisy. 4) Two higher order methods (`filter` and `transform`) for custom manipulation. """ _data: Tuple[FunctionCount, ...] inclusive: bool truncate_rows: bool = True # For normal use, torch._tensor_str.PRINT_OPTS.linewidth determines # the print settings. This is simply to allow hermetic unit tests. _linewidth: Optional[int] = None def __iter__(self) -> Generator[FunctionCount, None, None]: for i in self._data: yield i def __len__(self) -> int: return len(self._data) def __getitem__(self, item: Any) -> Union[FunctionCount, "FunctionCounts"]: data: Union[FunctionCount, Tuple[FunctionCount, ...]] = self._data[item] return ( FunctionCounts(cast(Tuple[FunctionCount, ...], data), self.inclusive, truncate_rows=False) if isinstance(data, tuple) else data ) def __repr__(self) -> str: count_len = 0 for c, _ in self: # Account for sign in string length. count_len = max(count_len, len(str(c)) + int(c < 0)) lines = [] linewidth = self._linewidth or torch._tensor_str.PRINT_OPTS.linewidth fn_str_len = max(linewidth - count_len - 4, 40) for c, fn in self: if len(fn) > fn_str_len: left_len = int((fn_str_len - 5) // 2) fn = fn[:left_len] + " ... " + fn[-(fn_str_len - left_len - 5):] lines.append(f" {c:>{count_len}} {fn}") if self.truncate_rows and len(lines) > 18: lines = lines[:9] + ["...".rjust(count_len + 2)] + lines[-9:] if not self.inclusive: lines.extend(["", f"Total: {self.sum()}"]) return "\n".join([super().__repr__()] + lines) def __add__( self, other: "FunctionCounts", ) -> "FunctionCounts": return self._merge(other, lambda c: c) def __sub__( self, other: "FunctionCounts", ) -> "FunctionCounts": return self._merge(other, lambda c: -c) def __mul__(self, other: Union[int, float]) -> "FunctionCounts": return self._from_dict({ fn: int(c * other) for c, fn in self._data }, self.inclusive) def transform(self, map_fn: Callable[[str], str]) -> "FunctionCounts": """Apply `map_fn` to all of the function names. This can be used to regularize function names (e.g. stripping irrelevant parts of the file path), coalesce entries by mapping multiple functions to the same name (in which case the counts are added together), etc. """ counts: DefaultDict[str, int] = collections.defaultdict(int) for c, fn in self._data: counts[map_fn(fn)] += c return self._from_dict(counts, self.inclusive) def filter(self, filter_fn: Callable[[str], bool]) -> "FunctionCounts": """Keep only the elements where `filter_fn` applied to function name returns True.""" return FunctionCounts(tuple(i for i in self if filter_fn(i.function)), self.inclusive) def sum(self) -> int: return sum(c for c, _ in self) def denoise(self) -> "FunctionCounts": """Remove known noisy instructions. Several instructions in the CPython interpreter are rather noisy. These instructions involve unicode to dictionary lookups which Python uses to map variable names. FunctionCounts is generally a content agnostic container, however this is sufficiently important for obtaining reliable results to warrant an exception.""" return self.filter(lambda fn: "dictobject.c:lookdict_unicode" not in fn) def _merge( self, second: "FunctionCounts", merge_fn: Callable[[int], int] ) -> "FunctionCounts": assert self.inclusive == second.inclusive, "Cannot merge inclusive and exclusive counts." counts: DefaultDict[str, int] = collections.defaultdict(int) for c, fn in self: counts[fn] += c for c, fn in second: counts[fn] += merge_fn(c) return self._from_dict(counts, self.inclusive) @staticmethod def _from_dict(counts: Dict[str, int], inclusive: bool) -> "FunctionCounts": flat_counts = (FunctionCount(c, fn) for fn, c in counts.items() if c) return FunctionCounts(tuple(sorted(flat_counts, reverse=True)), inclusive) @dataclasses.dataclass(repr=False, eq=False, frozen=True) class CallgrindStats(object): """Top level container for Callgrind results collected by Timer. Manipulation is generally done using the FunctionCounts class, which is obtained by calling `CallgrindStats.stats(...)`. Several convenience methods are provided as well; the most significant is `CallgrindStats.as_standardized()`. """ task_spec: common.TaskSpec number_per_run: int built_with_debug_symbols: bool baseline_inclusive_stats: FunctionCounts baseline_exclusive_stats: FunctionCounts stmt_inclusive_stats: FunctionCounts stmt_exclusive_stats: FunctionCounts stmt_callgrind_out: Optional[str] def __repr__(self) -> str: newline = "\n" # `\` cannot appear in fstring code section. base_stats = self.baseline_exclusive_stats output = f""" {super().__repr__()} {self.task_spec.summarize()} {'':>25}All{'':>10}Noisy symbols removed Instructions: {self.counts(denoise=False):>12}{'':>15}{self.counts(denoise=True):>12} Baseline: {base_stats.sum():>12}{'':>15}{base_stats.denoise().sum():>12} {self.number_per_run} runs per measurement, {self.task_spec.num_threads} thread{'s' if self.task_spec.num_threads > 1 else ''} """.strip() if not self.built_with_debug_symbols: output += textwrap.dedent(""" Warning: PyTorch was not built with debug symbols. Source information may be limited. Rebuild with REL_WITH_DEB_INFO=1 for more detailed results.""") return output def stats(self, inclusive: bool = False) -> FunctionCounts: """Returns detailed function counts. Conceptually, the FunctionCounts returned can be thought of as a tuple of (count, path_and_function_name) tuples. `inclusive` matches the semantics of callgrind. If True, the counts include instructions executed by children. `inclusive=True` is useful for identifying hot spots in code; `inclusive=False` is useful for reducing noise when diffing counts from two different runs. (See CallgrindStats.delta(...) for more details) """ return self.stmt_inclusive_stats if inclusive else self.stmt_exclusive_stats def counts(self, *, denoise: bool = False) -> int: """Returns the total number of instructions executed. See `FunctionCounts.denoise()` for an explation of the `denoise` arg. """ stats = self.stmt_exclusive_stats return (stats.denoise() if denoise else stats).sum() # FIXME: Once 3.7 is the minimum version, type annotate `other` per PEP 563 def delta( self, other: "CallgrindStats", inclusive: bool = False, ) -> FunctionCounts: """Diff two sets of counts. One common reason to collect instruction counts is to determine the the effect that a particular change will have on the number of instructions needed to perform some unit of work. If a change increases that number, the next logical question is "why". This generally involves looking at what part if the code increased in instruction count. This function automates that process so that one can easily diff counts on both an inclusive and exclusive basis. """ return self.stats(inclusive=inclusive) - other.stats(inclusive=inclusive) def as_standardized(self) -> "CallgrindStats": """Strip library names and some prefixes from function strings. When comparing two different sets of instruction counts, on stumbling block can be path prefixes. Callgrind includes the full filepath when reporting a function (as it should). However, this can cause issues when diffing profiles. If a key component such as Python or PyTorch was built in separate locations in the two profiles, which can result in something resembling:: 23234231 /tmp/first_build_dir/thing.c:foo(...) 9823794 /tmp/first_build_dir/thing.c:bar(...) ... 53453 .../aten/src/Aten/...:function_that_actually_changed(...) ... -9823794 /tmp/second_build_dir/thing.c:bar(...) -23234231 /tmp/second_build_dir/thing.c:foo(...) Stripping prefixes can ameliorate this issue by regularizing the strings and causing better cancellation of equivilent call sites when diffing. """ def strip(stats: FunctionCounts) -> FunctionCounts: transforms = ( # PyTorch may have been built in different locations. (r"^.+build/\.\./", "build/../"), (r"^.+/" + re.escape("build/aten/"), "build/aten/"), # "Python" and "Objects" come from CPython. (r"^.+/" + re.escape("Python/"), "Python/"), (r"^.+/" + re.escape("Objects/"), "Objects/"), # Strip library name. e.g. `libtorch.so` (r"\s\[.+\]$", ""), ) for before, after in transforms: stats = stats.transform(lambda fn: re.sub(before, after, fn)) return stats return CallgrindStats( task_spec=self.task_spec, number_per_run=self.number_per_run, built_with_debug_symbols=self.built_with_debug_symbols, baseline_inclusive_stats=strip(self.baseline_inclusive_stats), baseline_exclusive_stats=strip(self.baseline_exclusive_stats), stmt_inclusive_stats=strip(self.stmt_inclusive_stats), stmt_exclusive_stats=strip(self.stmt_exclusive_stats), # `as_standardized` will change symbol names, so the contents will # no longer map directly to `callgrind.out` stmt_callgrind_out=None, ) class Serialization(enum.Enum): PICKLE = 0 TORCH = 1 TORCH_JIT = 2 _GLOBALS_ALLOWED_TYPES: Dict[Serialization, Tuple[Any, ...]] = { Serialization.PICKLE: (str, bytes, bool, int, float, complex), Serialization.TORCH_JIT: (torch.jit.ScriptFunction, torch.jit.ScriptModule), Serialization.TORCH: (torch.nn.Module,), } class CopyIfCallgrind: """Signal that a global may be replaced with a deserialized copy. See `GlobalsBridge` for why this matters. """ def __init__(self, value: Any, *, setup: Optional[str] = None): for method, supported_types in _GLOBALS_ALLOWED_TYPES.items(): if any(isinstance(value, t) for t in supported_types): self._value: Any = value self._setup: Optional[str] = setup self._serialization: Serialization = method break else: supported_str = "\n".join([ getattr(t, "__name__", repr(t)) for t in it.chain(_GLOBALS_ALLOWED_TYPES.values())]) raise ValueError( f"Unsupported type: {type(value)}\n" f"`collect_callgrind` restricts globals to the following types:\n" f"{textwrap.indent(supported_str, ' ')}" ) @property def value(self) -> Any: return self._value @property def setup(self) -> Optional[str]: return self._setup @property def serialization(self) -> Serialization: return self._serialization @staticmethod def unwrap_all(globals: Dict[str, Any]) -> Dict[str, Any]: return { k: (v.value if isinstance(v, CopyIfCallgrind) else v) for k, v in globals.items() } class GlobalsBridge: """Handle the transfer of (certain) globals when collecting Callgrind statistics. Key takeaway: Any globals passed must be wrapped in `CopyIfCallgrind` to work with `Timer.collect_callgrind`. Consider the following code snippet: ``` import pickle import timeit class Counter: value = 0 def __call__(self): self.value += 1 counter = Counter() timeit.Timer("counter()", globals={"counter": counter}).timeit(10) print(counter.value) # 10 timeit.Timer( "counter()", globals={"counter": pickle.loads(pickle.dumps(counter))} ).timeit(20) print(counter.value) # Still 10 ``` In the first case, `stmt` is executed using the objects in `globals`; however, the addition of serialization and deserialization changes the semantics and may meaningfully change behavior. This is a practical consideration when collecting Callgrind statistics. Unlike `exec` based execution (which `timeit` uses under the hood) which can share in-memory data structures with the caller, Callgrind collection requires an entirely new process in order to run under Valgrind. This means that any data structures used for statement execution will have to be serialized and deserialized in the subprocess. In order to avoid surprising semantics from (user invisible) process boundaries, what can be passed through `globals` is severely restricted for `Timer.collect_callgrind`. It is expected that most setup should be achievable (albeit perhaps less ergonomically) by passing a `setup` string. There are, however, exceptions. One such class are TorchScripted functions. Because they require a concrete file with source code it is not possible to define them using a `setup` string. Another group are torch.nn.Modules, whose construction can be complex and prohibitively cumbersome to coerce into a `setup` string. Finally, most builtin types are sufficiently well behaved and sufficiently common to warrant allowing as well. (e.g. `globals={"n": 1}` is very convenient.) Fortunately, all have well defined serialization semantics. This class is responsible for enabling the Valgrind subprocess to use elements in `globals` so long as they are an allowed type. Caveats: The user is required to acknowledge this serialization by wrapping elements in `globals` with `CopyIfCallgrind`. While ScriptFunction and ScriptModule are expected to save and load quite robustly, it is up to the user to ensure that an nn.Module can un-pickle successfully. `torch.Tensor` and `np.ndarray` are deliberately excluded. The serialization/deserialization process perturbs the representation of a tensor in ways that could result in incorrect measurements. For example, if a tensor lives in pinned CPU memory, this fact would not be preserved by a dump, and that will in turn change the performance of certain CUDA operations. """ def __init__(self, globals: Dict[str, Any], data_dir: str) -> None: self._globals: Dict[str, CopyIfCallgrind] = {} self._data_dir = data_dir if not os.path.exists(data_dir): os.mkdir(data_dir) if globals.get("torch", torch) is not torch: raise ValueError("`collect_callgrind` does not support mocking out `torch`.") for name, value in globals.items(): if name in ("torch", "__builtins__"): # Torch will be imported by the collection script, and # __builtins__ is added by Timer. continue if not isinstance(value, CopyIfCallgrind): raise ValueError( "`collect_callgrind` requires that globals be wrapped in " "`CopyIfCallgrind` so that serialization is explicit." ) self._globals[name] = value def construct(self) -> str: load_lines = [] for name, wrapped_value in self._globals.items(): if wrapped_value.setup is not None: load_lines.append(textwrap.dedent(wrapped_value.setup)) if wrapped_value.serialization == Serialization.PICKLE: path = os.path.join(self._data_dir, f"{name}.pkl") load_lines.append( f"with open({repr(path)}, 'rb') as f:\n {name} = pickle.load(f)") with open(path, "wb") as f: pickle.dump(wrapped_value.value, f) elif wrapped_value.serialization == Serialization.TORCH: path = os.path.join(self._data_dir, f"{name}.pt") load_lines.append(f"{name} = torch.load({repr(path)})") torch.save(wrapped_value.value, path) elif wrapped_value.serialization == Serialization.TORCH_JIT: path = os.path.join(self._data_dir, f"{name}.pt") load_lines.append(f"{name} = torch.jit.load({repr(path)})") with open(path, "wb") as f: torch.jit.save(wrapped_value.value, f) else: raise NotImplementedError( f"Unknown serialization method: {wrapped_value.serialization}") return "\n".join(load_lines) class _ValgrindWrapper(object): def __init__(self) -> None: self._bindings_module: Optional[CallgrindModuleType] = None valgrind_symbols = ( "_valgrind_supported_platform", "_valgrind_toggle", "_valgrind_toggle_and_dump_stats", ) if all(hasattr(torch._C, symbol) for symbol in valgrind_symbols): self._supported_platform: bool = torch._C._valgrind_supported_platform() else: print("Callgrind bindings are not present in `torch._C`. JIT-ing bindings.") self._bindings_module = cpp_jit.get_compat_bindings() assert all(hasattr(self._bindings_module, symbol) for symbol in valgrind_symbols) self._supported_platform = self._bindings_module._valgrind_supported_platform() self._commands_available: Dict[str, bool] = {} if self._supported_platform: # Only bother checking on supported platforms. for cmd in ("valgrind", "callgrind_control", "callgrind_annotate"): self._commands_available[cmd] = not subprocess.run( ["which", cmd], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ).returncode self._build_type: Optional[str] = None build_search = re.search("BUILD_TYPE=(.+),", torch.__config__.show()) if build_search is not None: self._build_type = build_search.groups()[0].split(",")[0] def _validate(self) -> None: if not self._supported_platform: raise OSError("Valgrind is not supported on this platform.") missing_cmds = [cmd for cmd, available in self._commands_available.items() if not available] if missing_cmds: raise OSError("Missing: " + ", ".join(missing_cmds)) def collect_callgrind( self, task_spec: common.TaskSpec, globals: Dict[str, Any], *, number: int, repeats: int, collect_baseline: bool, is_python: bool, retain_out_file: bool, ) -> Tuple[CallgrindStats, ...]: """Collect stats, and attach a reference run which can be used to filter interpreter overhead.""" self._validate() assert is_python or not collect_baseline *task_stats, baseline_stats = self._invoke( task_spec=task_spec, globals=globals, number=number, repeats=repeats, collect_baseline=collect_baseline, is_python=is_python, retain_out_file=retain_out_file, ) assert len(task_stats) == repeats return tuple( CallgrindStats( task_spec=task_spec, number_per_run=number, built_with_debug_symbols=self._build_type == "RelWithDebInfo", baseline_inclusive_stats=baseline_stats[0], baseline_exclusive_stats=baseline_stats[1], stmt_inclusive_stats=stmt_inclusive_stats, stmt_exclusive_stats=stmt_exclusive_stats, stmt_callgrind_out=out_contents, ) for stmt_inclusive_stats, stmt_exclusive_stats, out_contents in task_stats ) def _invoke( self, *, task_spec: common.TaskSpec, globals: Dict[str, Any], number: int, repeats: int, collect_baseline: bool, is_python: bool, retain_out_file: bool, ) -> Tuple[Tuple[FunctionCounts, FunctionCounts, Optional[str]], ...]: """Core invocation method for Callgrind collection. Valgrind operates by effectively replacing the CPU with an emulated version which allows it to instrument any code at the cost of severe performance degradation. This has the practical effect that in order to collect Callgrind statistics, a new process has to be created running under `valgrind`. The steps for this process are: 1) Create a scratch directory. 2) Codegen a run script. (_ValgrindWrapper._construct_script) Inside the run script: * Validate that Python and torch match the parent process * Validate that it is indeed running under valgrind * Execute `setup` and warm up `stmt` * Begin collecting stats * Run the `stmt` loop * Stop collecting stats 3) Parse the run results. 4) Cleanup the scratch directory. """ working_dir = common._make_temp_dir(prefix="callgrind") data_dir = os.path.join(working_dir, "data") script_file = os.path.join(working_dir, "timer_callgrind.py") callgrind_out = os.path.join(working_dir, "callgrind.out") error_log = os.path.join(working_dir, "error.txt") stat_log = os.path.join(working_dir, "callgrind_stat.txt") stdout_stderr_log = os.path.join(working_dir, "stdout_stderr.log") def run(args: List[str], **kwargs: Any) -> Tuple[CompletedProcessType, str]: # https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/ f_stdout_stderr = open(stdout_stderr_log, "wb") try: invocation = subprocess.run( args, stdout=f_stdout_stderr, stderr=subprocess.STDOUT, **kwargs, ) with open(stdout_stderr_log, "rt") as f: return invocation, f.read() finally: f_stdout_stderr.close() try: if is_python: if self._bindings_module is not None: shutil.copy( self._bindings_module.__file__, os.path.join(working_dir, os.path.split(self._bindings_module.__file__)[1]) ) script_file = os.path.join(working_dir, "timer_callgrind.py") with open(script_file, "wt") as f: f.write(self._construct_script( task_spec, globals=GlobalsBridge(globals, data_dir), number=number, repeats=repeats, collect_baseline=collect_baseline, error_log=error_log, stat_log=stat_log, bindings=self._bindings_module)) run_loop_cmd = ["python", script_file] else: assert not collect_baseline run_loop_exec = cpp_jit.compile_callgrind_template( stmt=task_spec.stmt, setup=task_spec.setup, global_setup=task_spec.global_setup, ) run_loop_cmd = [ run_loop_exec, "--number", str(number), "--number_warmup", str(min(number, 10)), "--repeats", str(repeats), "--number_threads", str(task_spec.num_threads), ] valgrind_invocation, valgrind_invocation_output = run([ "valgrind", "--tool=callgrind", f"--callgrind-out-file={callgrind_out}", "--dump-line=yes", "--dump-instr=yes", "--instr-atstart=yes", "--collect-atstart=no", ] + run_loop_cmd) if valgrind_invocation.returncode: error_report = "" if os.path.exists(error_log): with open(error_log, "rt") as f: error_report = f.read() if not error_report: error_report = "Unknown error.\n" + valgrind_invocation_output raise OSError(f"Failed to collect callgrind profile:\n{error_report}") def parse_output(fpath: str, inclusive: bool) -> FunctionCounts: annotate_invocation, annotate_invocation_output = run([ "callgrind_annotate", f"--inclusive={'yes' if inclusive else 'no'}", "--threshold=100", "--show-percs=no", fpath ], check=True) total_pattern = re.compile(r"^([0-9,]+)\s+PROGRAM TOTALS") begin_pattern = re.compile(r"Ir\s+file:function") function_pattern = re.compile(r"^\s*([0-9,]+)\s+(.+:.+)$") class ScanState(enum.Enum): SCANNING_FOR_TOTAL = 0 SCANNING_FOR_START = 1 PARSING = 2 scan_state = ScanState.SCANNING_FOR_TOTAL fn_counts = [] for l in annotate_invocation_output.splitlines(keepends=False): if scan_state == ScanState.SCANNING_FOR_TOTAL: total_match = total_pattern.match(l) if total_match: program_totals = int(total_match.groups()[0].replace(",", "")) scan_state = ScanState.SCANNING_FOR_START elif scan_state == ScanState.SCANNING_FOR_START: if begin_pattern.match(l): scan_state = ScanState.PARSING else: assert scan_state == ScanState.PARSING fn_match = function_pattern.match(l) if fn_match: ir_str, file_function = fn_match.groups() ir = int(ir_str.replace(",", "")) if ir == program_totals: # Callgrind includes some top level red herring symbols when # a program dumps multiple profiles. continue fn_counts.append(FunctionCount(ir, file_function)) elif re.match(r"-+", l): # Ignore heading separator lines. continue else: break assert scan_state == ScanState.PARSING, f"Failed to parse {fpath}" return FunctionCounts(tuple(sorted(fn_counts, reverse=True)), inclusive=inclusive) def read_results(i: int) -> Tuple[FunctionCounts, FunctionCounts, Optional[str]]: if i == repeats and not collect_baseline: # Null baseline. return ( FunctionCounts((), inclusive=True), FunctionCounts((), inclusive=False), None, ) fpath = f"{callgrind_out}.{i + 1}" # Callgrind one-indexes files. callgrind_out_contents: Optional[str] = None if retain_out_file: with open(fpath, "rt") as f: callgrind_out_contents = f.read() return ( parse_output(fpath, inclusive=True), parse_output(fpath, inclusive=False), callgrind_out_contents ) return tuple(read_results(i) for i in range(repeats + 1)) finally: shutil.rmtree(working_dir) @staticmethod def _construct_script( task_spec: common.TaskSpec, globals: GlobalsBridge, *, number: int, repeats: int, collect_baseline: bool, error_log: str, stat_log: str, bindings: Optional[CallgrindModuleType], ) -> str: def block_stmt(stmt: str, indent: int = 0) -> str: """Partially unroll benchmark loop. The naive template looks something like: "for _ in range({number}): {stmt}" However a loop in Python is surprisingly expensive, and significantly increases the number of background Python instructions. So instead we partially unroll the loops, with a block size of 100 chosen to keep the instruction overhead from `range` low while also not ballooning the size of the generated file. """ block_size = 100 loop_count = number // block_size if loop_count == 1: # There is no point in having `for _ in range(1): ...` rather # than just `...`, and this lets us save shave a few background # instructions. loop_count = 0 remainder = number - block_size * loop_count blocked_stmt = "" if loop_count: unrolled_stmts = textwrap.indent("\n".join([stmt] * block_size), " " * 4) blocked_stmt += f"for _ in range({loop_count}):\n{unrolled_stmts}\n" if remainder: blocked_stmt += "\n".join([stmt] * remainder) return textwrap.indent(blocked_stmt, " " * indent) pass_baseline = ( "callgrind_bindings._valgrind_toggle()\n" f"{block_stmt('pass')}\n" "callgrind_bindings._valgrind_toggle_and_dump_stats()" ) return textwrap.dedent(r""" import gc import os import pickle import subprocess import sys import time # Mitigate https://github.com/pytorch/pytorch/issues/37377 # which can sometimes cause the subprocess call to fail. import numpy as np import torch torch.set_num_threads({num_threads}) {bindings_import} PID = os.getpid() def log_failure(msg): with open({error_log_repr}, "wt") as f: f.write(msg) sys.exit(1) def check_result(completed_process): if completed_process.returncode: log_failure(f"Command failed: {{' '.join(completed_process.args)}}") return completed_process # ============================================================================= # == Check that subprocess matches parent ===================================== # ============================================================================= if sys.executable != "{parent_interpreter}": log_failure( "Interpreter mismatch:\n" f" {{sys.executable}}\n vs.\n {parent_interpreter}" ) if torch.__file__ != "{torch_file}": log_failure( "PyTorch does not match expected file:\n" f" {{torch.__file__}}\n vs.\n {torch_file}" ) # ============================================================================= # == User specified setup ===================================================== # ============================================================================= # Load serialized globals {load_globals} # User setup str {setup} for _ in range({warmup_number}): {indented_stmt} # ============================================================================= # == Callgrind management ===================================================== # ============================================================================= with open("{stat_log}", "wb") as stat_file: # If many instances of callgrind are running at once, the output of # `callgrind_control` may exceed 16kb which would cause `subprocess.PIPE` # to deadlock. So instead we use a file. callgrind_stat = check_result(subprocess.run( ["callgrind_control", "--stat"], stdout=stat_file, stderr=subprocess.STDOUT, )) with open("{stat_log}", "rt") as stat_file: stat_lines = stat_file.read().splitlines() if f"PID {{PID}}: python {{__file__}}" not in stat_lines: log_failure("Process does not appear to be running callgrind.") gc.collect() time.sleep(0.01) # ============================================================================= # == User code block ========================================================== # ============================================================================= for _ in range({repeats}): callgrind_bindings._valgrind_toggle() {blocked_stmt} callgrind_bindings._valgrind_toggle_and_dump_stats() gc.collect() {baseline} """).strip().format( indented_stmt=textwrap.indent(task_spec.stmt, " " * 4), blocked_stmt=block_stmt(task_spec.stmt, indent=4), baseline=(pass_baseline if collect_baseline else ""), number=number, repeats=repeats, load_globals=globals.construct(), setup=task_spec.setup, warmup_number=min(number, 10), num_threads=task_spec.num_threads, error_log_repr=repr(error_log), stat_log=stat_log, parent_interpreter=sys.executable, torch_file=torch.__file__, bindings_import=( "import torch._C as callgrind_bindings" if bindings is None else f"import {bindings.__name__} as callgrind_bindings"), ) CALLGRIND_SINGLETON: Optional[_ValgrindWrapper] = None def wrapper_singleton() -> _ValgrindWrapper: global CALLGRIND_SINGLETON if CALLGRIND_SINGLETON is None: CALLGRIND_SINGLETON = _ValgrindWrapper() return CALLGRIND_SINGLETON
pytorch-master
torch/utils/benchmark/utils/valgrind_wrapper/timer_interface.py
pytorch-master
torch/utils/benchmark/utils/valgrind_wrapper/__init__.py
"""Example use of Timer and op fuzzers to measure kernel performance. $ python -m examples.op_benchmark """ import numpy as np import torch from torch.utils.benchmark import Timer from torch.utils.benchmark.op_fuzzers.binary import BinaryOpFuzzer from torch.utils.benchmark.op_fuzzers.unary import UnaryOpFuzzer _MEASURE_TIME = 1.0 def assert_dicts_equal(dict_0, dict_1): """Builtin dict comparison will not compare numpy arrays. e.g. x = {"a": np.ones((2, 1))} x == x # Raises ValueError """ assert set(dict_0.keys()) == set(dict_0.keys()) assert all(np.all(v == dict_1[k]) for k, v in dict_0.items() if k != "dtype") def run(n, stmt, fuzzer_cls): float_iter = fuzzer_cls(seed=0, dtype=torch.float32).take(n) int_iter = fuzzer_cls(seed=0, dtype=torch.int32).take(n) raw_results = [] for i, (float_values, int_values) in enumerate(zip(float_iter, int_iter)): float_tensors, float_tensor_params, float_params = float_values int_tensors, int_tensor_params, int_params = int_values # This benchmark assumes that the two fuzzers generate identically # sized and strided Tensors, since the same seed is used. assert_dicts_equal(float_params, int_params) assert_dicts_equal(float_tensor_params["x"], int_tensor_params["x"]) float_measurement, int_measurement = [ Timer( stmt, globals=tensors, ).blocked_autorange(min_run_time=_MEASURE_TIME) for tensors in (float_tensors, int_tensors) ] descriptions = [] for name in float_tensors: shape_str = "(" + ", ".join([ f"2 ** {int(np.log2(i))}" if 2 ** int(np.log2(i)) == i and i > 1 else str(i) for i in float_tensors[name].shape ]) + ")" order = float_tensor_params[name]["order"] order_str = ("" if all(order == np.arange(len(order))) else str(tuple(order))) steps = float_tensor_params[name]["steps"] steps_str = str(steps) if sum(steps) > len(steps) else "" descriptions.append((name, shape_str, order_str, steps_str)) raw_results.append((float_measurement, int_measurement, descriptions)) print(f"\r{i + 1} / {n}", end="") print() parsed_results, name_len, shape_len, order_len, steps_len = [], 0, 0, 0, 0 for float_measurement, int_measurement, descriptions in raw_results: t_float = float_measurement.median * 1e6 t_int = int_measurement.median * 1e6 rel_diff = abs(t_float - t_int) / (t_float + t_int) * 2 parsed_results.append((t_float, t_int, rel_diff, descriptions)) for name, shape, order, steps in descriptions: name_len = max(name_len, len(name)) shape_len = max(shape_len, len(shape)) order_len = max(order_len, len(order)) steps_len = max(steps_len, len(steps)) parsed_results.sort(key=lambda x: x[2]) print(f"stmt: {stmt}") print(f" diff faster{'':>17}{' ' * name_len} ", end="") print(f"{'shape'.ljust(shape_len)}{'':>16}{'order'.ljust(order_len)}", end="") print(f" steps\n{'-' * 100}") for results, spacer in [(parsed_results[:10], "..."), (parsed_results[-10:], "")]: for t_float, t_int, rel_diff, descriptions in results: time_str = [f"{rel_diff * 100:>4.1f}% {'int' if t_int < t_float else 'float':<20}"] time_str.extend(["".ljust(len(time_str[0])) for _ in descriptions[:-1]]) for t_str, (name, shape, order, steps) in zip(time_str, descriptions): name = f"{name}:".ljust(name_len + 1) shape = shape.ljust(shape_len + 10) order = order.ljust(order_len) print(f"{t_str} {name} {shape}| {order} | {steps}") print(spacer) def main(): run(n=100, stmt="torch.median(x, dim=0)", fuzzer_cls=UnaryOpFuzzer) run(n=100, stmt="torch.square(x)", fuzzer_cls=UnaryOpFuzzer) run(n=100, stmt="x + y", fuzzer_cls=BinaryOpFuzzer) if __name__ == "__main__": main()
pytorch-master
torch/utils/benchmark/examples/op_benchmark.py
pytorch-master
torch/utils/benchmark/examples/__init__.py
"""Trivial use of Timer API: $ python -m examples.simple_timeit """ import torch import torch.utils.benchmark as benchmark_utils def main(): timer = benchmark_utils.Timer( stmt="x + y", globals={"x": torch.ones((4, 8)), "y": torch.ones((1, 8))}, label="Broadcasting add (4x8)", ) for i in range(3): print(f"Run: {i}\n{'-' * 40}") print(f"timeit:\n{timer.timeit(10000)}\n") print(f"autorange:\n{timer.blocked_autorange()}\n\n") if __name__ == "__main__": main()
pytorch-master
torch/utils/benchmark/examples/simple_timeit.py
"""Example of the Timer and Fuzzer APIs: $ python -m examples.fuzzer """ import sys import torch.utils.benchmark as benchmark_utils def main(): add_fuzzer = benchmark_utils.Fuzzer( parameters=[ [ benchmark_utils.FuzzedParameter( name=f"k{i}", minval=16, maxval=16 * 1024, distribution="loguniform", ) for i in range(3) ], benchmark_utils.FuzzedParameter( name="d", distribution={2: 0.6, 3: 0.4}, ), ], tensors=[ [ benchmark_utils.FuzzedTensor( name=name, size=("k0", "k1", "k2"), dim_parameter="d", probability_contiguous=0.75, min_elements=64 * 1024, max_elements=128 * 1024, ) for name in ("x", "y") ], ], seed=0, ) n = 250 measurements = [] for i, (tensors, tensor_properties, _) in enumerate(add_fuzzer.take(n=n)): x, x_order = tensors["x"], str(tensor_properties["x"]["order"]) y, y_order = tensors["y"], str(tensor_properties["y"]["order"]) shape = ", ".join(tuple(f'{i:>4}' for i in x.shape)) description = "".join([ f"{x.numel():>7} | {shape:<16} | ", f"{'contiguous' if x.is_contiguous() else x_order:<12} | ", f"{'contiguous' if y.is_contiguous() else y_order:<12} | ", ]) timer = benchmark_utils.Timer( stmt="x + y", globals=tensors, description=description, ) measurements.append(timer.blocked_autorange(min_run_time=0.1)) measurements[-1].metadata = {"numel": x.numel()} print(f"\r{i + 1} / {n}", end="") sys.stdout.flush() print() # More string munging to make pretty output. print(f"Average attemts per valid config: {1. / (1. - add_fuzzer.rejection_rate):.1f}") def time_fn(m): return m.median / m.metadata["numel"] measurements.sort(key=time_fn) template = f"{{:>6}}{' ' * 19}Size Shape{' ' * 13}X order Y order\n{'-' * 80}" print(template.format("Best:")) for m in measurements[:15]: print(f"{time_fn(m) * 1e9:>4.1f} ns / element {m.description}") print("\n" + template.format("Worst:")) for m in measurements[-15:]: print(f"{time_fn(m) * 1e9:>4.1f} ns / element {m.description}") if __name__ == "__main__": main()
pytorch-master
torch/utils/benchmark/examples/fuzzer.py
# -*- coding: utf-8 -*- """End-to-end example to test a PR for regressions: $ python -m examples.end_to_end --pr 39850 $ python -m examples.end_to_end --pr 39967 $ python -m examples.end_to_end --pr 39744 NOTE: This example assumes that you have and environment prefixed with `ref_`, and another prefixed with `pr_` for the PR in question. (e.g. `ref_39850` and `pr_39850`). A helper script (examples/prepare_e2e.sh) is provided to build the required environments with the correct configuration. """ import argparse import itertools as it import multiprocessing import multiprocessing.dummy import os import pickle import queue import subprocess import tempfile import textwrap import numpy as np import torch from torch.utils.benchmark.op_fuzzers import unary from torch.utils.benchmark import Timer, Measurement from typing import Dict, Tuple, List _MAIN, _SUBPROCESS = "main", "subprocess" _PR_ENV_TEMPLATE = "pr_{pr}" _REF_ENV_TEMPLATE = "ref_{pr}" _PR_LIST = ( # Optimize topk performance for tensor with a large dimension size "39850", # Migrate `var` & `std` to ATen "39967", # Introducing (Const)StridedRandomAccessor + CompositeRandomAccessor + migrate `sort` to ATen (CPU) "39744", ) _CPU, _GPU = "cpu", "gpu" _MIN_RUN_SEC = 1 _REPLICATES = { _CPU: 5, # CPU has a higher variance. _GPU: 1, } _RUNS_PER_LOOP = 3 _NUM_LOOPS = { _CPU: 32, _GPU: 64, } _DEVICES_TO_TEST = { "39850": {_CPU: False, _GPU: True}, "39967": {_CPU: True, _GPU: True}, "39744": {_CPU: True, _GPU: True}, } _AVAILABLE_GPUS = queue.Queue[int]() _DTYPES_TO_TEST = { "39850": ("int8", "float32", "float64"), "39967": ("float32", "float64"), "39744": ("int8", "float32", "float64"), } _DTYPE_STR_TO_DTYPE = { "float64": torch.float64, "float32": torch.float32, "int8": torch.int8, } def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--pr", type=str, default=_PR_LIST[0], choices=_PR_LIST) parser.add_argument("--num_gpus", type=int, default=None) parser.add_argument("--test_variance", action="store_true") # (Implementation details) parser.add_argument("--DETAIL_context", type=str, choices=(_MAIN, _SUBPROCESS), default=_MAIN) parser.add_argument("--DETAIL_device", type=str, choices=(_CPU, _GPU), default=None) parser.add_argument("--DETAIL_env", type=str, default=None) parser.add_argument("--DETAIL_result_file", type=str, default=None) parser.add_argument("--DETAIL_seed", type=int, default=None) args = parser.parse_args() if args.num_gpus is None: args.num_gpus = torch.cuda.device_count() return args _SUBPROCESS_CMD_TEMPLATE = ( "source activate {source_env} && python -m examples.end_to_end " "--pr {pr} " "--DETAIL_context subprocess " "--DETAIL_device {device} " "--DETAIL_env {env} " "--DETAIL_result_file {result_file} " "--DETAIL_seed {seed}" ) def construct_stmt_and_label(pr, params): if pr == "39850": k0, k1, k2, dim = [params[i] for i in ["k0", "k1", "k2", "dim"]] state = np.random.RandomState(params["random_value"]) topk_dim = state.randint(low=0, high=dim) dim_size = [k0, k1, k2][topk_dim] k = max(int(np.floor(2 ** state.uniform(low=0, high=np.log2(dim_size)))), 1) return f"torch.topk(x, dim={topk_dim}, k={k})", "topk" if pr == "39967": return "torch.std(x)", "std" if pr == "39744": state = np.random.RandomState(params["random_value"]) sort_dim = state.randint(low=0, high=params["dim"]) return f"torch.sort(x, dim={sort_dim})", "sort" raise ValueError("Unknown PR") def subprocess_main(args): seed = args.DETAIL_seed cuda = (args.DETAIL_device == _GPU) with open(args.DETAIL_result_file, "ab") as f: for dtype_str in _DTYPES_TO_TEST[args.pr]: dtype = _DTYPE_STR_TO_DTYPE[dtype_str] iterator = unary.UnaryOpFuzzer( seed=seed, dtype=dtype, cuda=cuda).take(_RUNS_PER_LOOP) for i, (tensors, tensor_parameters, params) in enumerate(iterator): params["dtype_str"] = dtype_str stmt, label = construct_stmt_and_label(args.pr, params) timer = Timer( stmt=stmt, globals=tensors, label=label, description=f"[{i}, seed={seed}] ({dtype_str}), stmt = {stmt}", env=args.DETAIL_env, ) measurement = timer.blocked_autorange(min_run_time=_MIN_RUN_SEC) measurement.metadata = { "tensor_parameters": tensor_parameters, "params": params, } print(measurement) pickle.dump(measurement, f) def _main(args): pools, map_iters, finished_counts = {}, {}, {} pr = args.pr envs = (_REF_ENV_TEMPLATE.format(pr=pr), _PR_ENV_TEMPLATE.format(pr=pr)) # We initialize both pools at the start so that they run simultaneously # if applicable if _DEVICES_TO_TEST[args.pr][_GPU]: finished_counts[_GPU] = 0 for i in range(args.num_gpus): _AVAILABLE_GPUS.put(i) pools[_GPU] = multiprocessing.dummy.Pool(args.num_gpus) trials = [ (seed, envs, pr, True, finished_counts, args.test_variance) for seed in range(_NUM_LOOPS[_GPU])] * _REPLICATES[_GPU] map_iters[_GPU] = pools[_GPU].imap(map_fn, trials) if _DEVICES_TO_TEST[args.pr][_CPU]: finished_counts[_CPU] = 0 cpu_workers = int(multiprocessing.cpu_count() / 3) pools[_CPU] = multiprocessing.dummy.Pool(cpu_workers) trials = [ (seed, envs, pr, False, finished_counts, args.test_variance) for seed in range(_NUM_LOOPS[_CPU])] * _REPLICATES[_CPU] map_iters[_CPU] = pools[_CPU].imap(map_fn, trials) results = [] for map_iter in map_iters.values(): for r in map_iter: results.append(r) progress = [ f"{k}: {v} / {_NUM_LOOPS[k] * _REPLICATES[k]}" for k, v in finished_counts.items()] print(f"\r{(' ' * 10).join(progress)}", end="") print() for pool in pools.values(): pool.close() process_results(results, args.test_variance) # \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ # == Data processing and string formatting ==================================== # ///////////////////////////////////////////////////////////////////////////// def merge(measurements): if not measurements: return None states = [m.__getstate__() for m in measurements] for k in states[0].keys(): if k in ("number_per_run", "times", "metadata"): continue assert all(s[k] == states[0][k] for s in states) numbers_per_run = {m.number_per_run for m in measurements} n = numbers_per_run.pop() if len(numbers_per_run) == 1 else 1 merged_state = states[0] times = [[t / m.number_per_run * n for t in m.times] for m in measurements] merged_state["times"] = list(it.chain(*times)) merged_state["number_per_run"] = n merged_state["metadata"] = states[0]["metadata"] return Measurement(**merged_state) def process_results(results, test_variance): paired_results: Dict[Tuple[str, str, int, bool, int], List] = {} for (seed, use_gpu), result_batch in results: for r in result_batch: key = (r.label, r.description, r.num_threads, use_gpu, seed) paired_results.setdefault(key, [[], []]) index = 0 if r.env.startswith("ref") else 1 paired_results[key][index].append(r) paired_results = { key: [merge(r_ref_list), merge(r_pr_list)] for key, (r_ref_list, r_pr_list) in paired_results.items() } flagged_for_removal = set() for key, (r_ref, r_pr) in paired_results.items(): if any(r is None or r.has_warnings for r in (r_ref, r_pr)): flagged_for_removal.add(key) paired_results = { k: v for k, v in paired_results.items() if k not in flagged_for_removal } print(f"{len(flagged_for_removal)} samples were culled, {len(paired_results)} remain") gpu_results = [(k, v) for k, v in paired_results.items() if k[3]] cpu_results = [(k, v) for k, v in paired_results.items() if not k[3]] if cpu_results: construct_table(cpu_results, "CPU", test_variance) if gpu_results: construct_table(gpu_results, "GPU", test_variance) def construct_table(results, device_str, test_variance): device_str = f"== {device_str} {' (Variance Test)' if test_variance else ''} ".ljust(40, "=") print(f"{'=' * 40}\n{device_str}\n{'=' * 40}\n") results = sorted(( (key, (r_ref, r_pr), r_pr.median / r_ref.median - 1) for key, (r_ref, r_pr) in results ), key=lambda i: i[2]) n = len(results) n_regressed = len([i for i in results if i[2] > 0.05]) n_improved = len([i for i in results if i[2] < -0.05]) n_unchanged = n - n_improved - n_regressed legends = ["Improved (>5%):", "Regressed (>5%):", "Within 5%:"] for legend, count in zip(legends, [n_improved, n_regressed, n_unchanged]): print(f"{legend:<17} {count:>6} ({count / len(results) * 100:>3.0f}%)") keys_to_print = ( {i[0] for i in results[20:30]} | {i[0] for i in results[int(n // 2 - 5):int(n // 2 + 5)]} | {i[0] for i in results[-30:-20]} ) ellipsis_after = {results[29][0], results[int(n // 2 + 4)][0]} column_labels = ( f"Relative Δ Absolute Δ | numel{'':>8}dtype{'':>14}" f"shape{'':>10}steps{'':>10}layout{'':>7}task specific\n{'=' * 126}" ) _, result_log_file = tempfile.mkstemp(suffix=".log") with open(result_log_file, "wt") as f: f.write(f"{device_str}\n\n{column_labels}\n") print(f"\n{column_labels}\n[First twenty omitted (these tend to be noisy) ]") for key, (r_ref, r_pr), rel_diff in results: row = row_str(rel_diff, r_pr.median - r_ref.median, r_ref) f.write(f"{row}\n") if key in keys_to_print: print(row) if key in ellipsis_after: print("...") print("[Last twenty omitted (these tend to be noisy) ]") print(textwrap.dedent(""" steps: Indicates that `x` is sliced from a larger Tensor. For instance, if shape is [12, 4] and steps are [2, 1], then a larger Tensor of size [24, 4] was created, and then x = base_tensor[::2, ::1]. Omitted if all elements are ones. layout: Indicates that `x` is not contiguous due to permutation. Invoking `x.permute(layout)` (e.g. x.permute((2, 0, 1)) if layout = [2, 0, 1]) would produce a Tensor with physical memory layout matching logical memory layout. (Though still not contiguous if `steps` contains non-one elements.) """)) print(f"\nComplete results in: {result_log_file}") def row_str(rel_diff, diff_seconds, measurement): params = measurement.metadata["params"] tensor_parameters = measurement.metadata["tensor_parameters"] dim = params["dim"] x_numel = tensor_parameters["x"]["numel"] steps = [params[f"x_step_{i}"] for i in range(dim)] order = tensor_parameters['x']["order"] order = str("" if all(i == j for i, j in zip(order, range(dim))) else order) task_specific = "" if measurement.stmt.startswith("torch.topk"): dim_str, k_str = measurement.stmt[:-1].replace("torch.topk(x, ", "").split(", ") task_specific = f"{dim_str}, {k_str:<8}" elif measurement.stmt.startswith("torch.std"): pass elif measurement.stmt.startswith("torch.sort"): task_specific = measurement.stmt[:-1].replace("torch.sort(x, ", "") return ( f"{rel_diff * 100:>5.0f}% {abs(diff_seconds) * 1e6:>11.1f} us{'':>6}|" f"{x_numel:>12} {params['dtype_str']:>10} " f"{str([params[f'k{i}'] for i in range(dim)]):>17} " f"{str(steps) if not all(i == 1 for i in steps) else '':>12} {order:>12}" f"{'':>8}{task_specific}" ) # \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ # == Subprocess and environment management ==================================== # ///////////////////////////////////////////////////////////////////////////// def read_results(result_file: str): output = [] with open(result_file, "rb") as f: while True: try: output.append(pickle.load(f)) except EOFError: break return output def run(cmd, cuda_visible_devices=""): return subprocess.run( cmd, env={ "CUDA_VISIBLE_DEVICES": str(cuda_visible_devices), "PATH": os.getenv("PATH", ""), }, stdout=subprocess.PIPE, shell=True ) def test_source(envs): """Ensure that subprocess""" for env in envs: result = run(f"source activate {env}") if result.returncode != 0: raise ValueError(f"Failed to source environment `{env}`") def map_fn(args): seed, envs, pr, use_gpu, finished_counts, test_variance = args gpu = _AVAILABLE_GPUS.get() if use_gpu else None try: _, result_file = tempfile.mkstemp(suffix=".pkl") for env in envs: cmd = _SUBPROCESS_CMD_TEMPLATE.format( source_env=envs[0] if test_variance else env, env=env, pr=pr, device=_GPU if use_gpu else _CPU, result_file=result_file, seed=seed, ) run(cmd=cmd, cuda_visible_devices=gpu if use_gpu else "") finished_counts[_GPU if use_gpu else _CPU] += 1 return (seed, use_gpu), read_results(result_file) except KeyboardInterrupt: pass # Handle ctrl-c gracefully. finally: if gpu is not None: _AVAILABLE_GPUS.put(gpu) if os.path.exists(result_file): os.remove(result_file) def main(args): test_source([ _REF_ENV_TEMPLATE.format(pr=args.pr), _PR_ENV_TEMPLATE.format(pr=args.pr), ]) _main(args) if __name__ == "__main__": args = parse_args() if args.DETAIL_context == "main": main(args) if args.DETAIL_context == "subprocess": try: subprocess_main(args) except KeyboardInterrupt: pass # Handle ctrl-c gracefully.
pytorch-master
torch/utils/benchmark/examples/end_to_end.py
import argparse import datetime import itertools as it import multiprocessing import multiprocessing.dummy import os import queue import pickle import shutil import subprocess import sys import tempfile import threading import time from typing import Tuple, Dict from . import blas_compare_setup MIN_RUN_TIME = 1 NUM_REPLICATES = 20 NUM_THREAD_SETTINGS = (1, 2, 4) RESULT_FILE = os.path.join(blas_compare_setup.WORKING_ROOT, "blas_results.pkl") SCRATCH_DIR = os.path.join(blas_compare_setup.WORKING_ROOT, "scratch") BLAS_CONFIGS = ( ("MKL (2020.3)", blas_compare_setup.MKL_2020_3, None), ("MKL (2020.0)", blas_compare_setup.MKL_2020_0, None), ("OpenBLAS", blas_compare_setup.OPEN_BLAS, None) ) _RESULT_FILE_LOCK = threading.Lock() _WORKER_POOL: queue.Queue[Tuple[str, str, int]] = queue.Queue() def clear_worker_pool(): while not _WORKER_POOL.empty(): _, result_file, _ = _WORKER_POOL.get_nowait() os.remove(result_file) if os.path.exists(SCRATCH_DIR): shutil.rmtree(SCRATCH_DIR) def fill_core_pool(n: int): clear_worker_pool() os.makedirs(SCRATCH_DIR) # Reserve two cores so that bookkeeping does not interfere with runs. cpu_count = multiprocessing.cpu_count() - 2 # Adjacent cores sometimes share cache, so we space out single core runs. step = max(n, 2) for i in range(0, cpu_count, step): core_str = f"{i}" if n == 1 else f"{i},{i + n - 1}" _, result_file = tempfile.mkstemp(suffix=".pkl", prefix=SCRATCH_DIR) _WORKER_POOL.put((core_str, result_file, n)) def _subprocess_main(seed=0, num_threads=1, sub_label="N/A", result_file=None, env=None): import torch from torch.utils.benchmark import Timer conda_prefix = os.getenv("CONDA_PREFIX") assert conda_prefix if not torch.__file__.startswith(conda_prefix): raise ValueError( f"PyTorch mismatch: `import torch` resolved to `{torch.__file__}`, " f"which is not in the correct conda env: {conda_prefix}" ) torch.manual_seed(seed) results = [] for n in [4, 8, 16, 32, 64, 128, 256, 512, 1024, 7, 96, 150, 225]: dtypes = (("Single", torch.float32), ("Double", torch.float64)) shapes = ( # Square MatMul ((n, n), (n, n), "(n x n) x (n x n)", "Matrix-Matrix Product"), # Matrix-Vector product ((n, n), (n, 1), "(n x n) x (n x 1)", "Matrix-Vector Product"), ) for (dtype_name, dtype), (x_shape, y_shape, shape_str, blas_type) in it.product(dtypes, shapes): t = Timer( stmt="torch.mm(x, y)", label=f"torch.mm {shape_str} {blas_type} ({dtype_name})", sub_label=sub_label, description=f"n = {n}", env=os.path.split(env or "")[1] or None, globals={ "x": torch.rand(x_shape, dtype=dtype), "y": torch.rand(y_shape, dtype=dtype), }, num_threads=num_threads, ).blocked_autorange(min_run_time=MIN_RUN_TIME) results.append(t) if result_file is not None: with open(result_file, "wb") as f: pickle.dump(results, f) def run_subprocess(args): seed, env, sub_label, extra_env_vars = args core_str = None try: core_str, result_file, num_threads = _WORKER_POOL.get() with open(result_file, "wb"): pass env_vars: Dict[str, str] = { "PATH": os.getenv("PATH") or "", "PYTHONPATH": os.getenv("PYTHONPATH") or "", # NumPy "OMP_NUM_THREADS": str(num_threads), "MKL_NUM_THREADS": str(num_threads), "NUMEXPR_NUM_THREADS": str(num_threads), } env_vars.update(extra_env_vars or {}) subprocess.run( f"source activate {env} && " f"taskset --cpu-list {core_str} " f"python {os.path.abspath(__file__)} " "--DETAIL_in_subprocess " f"--DETAIL_seed {seed} " f"--DETAIL_num_threads {num_threads} " f"--DETAIL_sub_label '{sub_label}' " f"--DETAIL_result_file {result_file} " f"--DETAIL_env {env}", env=env_vars, stdout=subprocess.PIPE, shell=True ) with open(result_file, "rb") as f: result_bytes = f.read() with _RESULT_FILE_LOCK, \ open(RESULT_FILE, "ab") as f: f.write(result_bytes) except KeyboardInterrupt: pass # Handle ctrl-c gracefully. finally: if core_str is not None: _WORKER_POOL.put((core_str, result_file, num_threads)) def _compare_main(): results = [] with open(RESULT_FILE, "rb") as f: while True: try: results.extend(pickle.load(f)) except EOFError: break from torch.utils.benchmark import Compare comparison = Compare(results) comparison.trim_significant_figures() comparison.colorize() comparison.print() def main(): with open(RESULT_FILE, "wb"): pass for num_threads in NUM_THREAD_SETTINGS: fill_core_pool(num_threads) workers = _WORKER_POOL.qsize() trials = [] for seed in range(NUM_REPLICATES): for sub_label, env, extra_env_vars in BLAS_CONFIGS: env_path = os.path.join(blas_compare_setup.WORKING_ROOT, env) trials.append((seed, env_path, sub_label, extra_env_vars)) n = len(trials) with multiprocessing.dummy.Pool(workers) as pool: start_time = time.time() for i, r in enumerate(pool.imap(run_subprocess, trials)): n_trials_done = i + 1 time_per_result = (time.time() - start_time) / n_trials_done eta = int((n - n_trials_done) * time_per_result) print(f"\r{i + 1} / {n} ETA:{datetime.timedelta(seconds=eta)}".ljust(80), end="") sys.stdout.flush() print(f"\r{n} / {n} Total time: {datetime.timedelta(seconds=int(time.time() - start_time))}") print() # Any env will do, it just needs to have torch for benchmark utils. env_path = os.path.join(blas_compare_setup.WORKING_ROOT, BLAS_CONFIGS[0][1]) subprocess.run( f"source activate {env_path} && " f"python {os.path.abspath(__file__)} " "--DETAIL_in_compare", shell=True ) if __name__ == "__main__": # These flags are for subprocess control, not controlling the main loop. parser = argparse.ArgumentParser() parser.add_argument("--DETAIL_in_subprocess", action="store_true") parser.add_argument("--DETAIL_in_compare", action="store_true") parser.add_argument("--DETAIL_seed", type=int, default=None) parser.add_argument("--DETAIL_num_threads", type=int, default=None) parser.add_argument("--DETAIL_sub_label", type=str, default="N/A") parser.add_argument("--DETAIL_result_file", type=str, default=None) parser.add_argument("--DETAIL_env", type=str, default=None) args = parser.parse_args() if args.DETAIL_in_subprocess: try: _subprocess_main( args.DETAIL_seed, args.DETAIL_num_threads, args.DETAIL_sub_label, args.DETAIL_result_file, args.DETAIL_env, ) except KeyboardInterrupt: pass # Handle ctrl-c gracefully. elif args.DETAIL_in_compare: _compare_main() else: main()
pytorch-master
torch/utils/benchmark/examples/blas_compare.py
import collections import os import shutil import subprocess try: # no type stub for conda command line interface import conda.cli.python_api # type: ignore[import] from conda.cli.python_api import Commands as conda_commands except ImportError: # blas_compare.py will fail to import these when it's inside a conda env, # but that's fine as it only wants the constants. pass WORKING_ROOT = "/tmp/pytorch_blas_compare_environments" MKL_2020_3 = "mkl_2020_3" MKL_2020_0 = "mkl_2020_0" OPEN_BLAS = "open_blas" EIGEN = "eigen" GENERIC_ENV_VARS = ("USE_CUDA=0", "USE_ROCM=0") BASE_PKG_DEPS = ( "cffi", "cmake", "hypothesis", "ninja", "numpy", "pyyaml", "setuptools", "typing_extensions", ) SubEnvSpec = collections.namedtuple( "SubEnvSpec", ( "generic_installs", "special_installs", "environment_variables", # Validate install. "expected_blas_symbols", "expected_mkl_version", )) SUB_ENVS = { MKL_2020_3: SubEnvSpec( generic_installs=(), special_installs=("intel", ("mkl=2020.3", "mkl-include=2020.3")), environment_variables=("BLAS=MKL",) + GENERIC_ENV_VARS, expected_blas_symbols=("mkl_blas_sgemm",), expected_mkl_version="2020.0.3", ), MKL_2020_0: SubEnvSpec( generic_installs=(), special_installs=("intel", ("mkl=2020.0", "mkl-include=2020.0")), environment_variables=("BLAS=MKL",) + GENERIC_ENV_VARS, expected_blas_symbols=("mkl_blas_sgemm",), expected_mkl_version="2020.0.0", ), OPEN_BLAS: SubEnvSpec( generic_installs=("openblas",), special_installs=(), environment_variables=("BLAS=OpenBLAS",) + GENERIC_ENV_VARS, expected_blas_symbols=("exec_blas",), expected_mkl_version=None, ), # EIGEN: SubEnvSpec( # generic_installs=(), # special_installs=(), # environment_variables=("BLAS=Eigen",) + GENERIC_ENV_VARS, # expected_blas_symbols=(), # ), } def conda_run(*args): """Convenience method.""" stdout, stderr, retcode = conda.cli.python_api.run_command(*args) if retcode: raise OSError(f"conda error: {str(args)} retcode: {retcode}\n{stderr}") return stdout def main(): if os.path.exists(WORKING_ROOT): print("Cleaning: removing old working root.") shutil.rmtree(WORKING_ROOT) os.makedirs(WORKING_ROOT) git_root = subprocess.check_output( "git rev-parse --show-toplevel", shell=True, cwd=os.path.dirname(os.path.realpath(__file__)) ).decode("utf-8").strip() for env_name, env_spec in SUB_ENVS.items(): env_path = os.path.join(WORKING_ROOT, env_name) print(f"Creating env: {env_name}: ({env_path})") conda_run( conda_commands.CREATE, "--no-default-packages", "--prefix", env_path, "python=3", ) print("Testing that env can be activated:") base_source = subprocess.run( f"source activate {env_path}", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) if base_source.returncode: raise OSError( "Failed to source base environment:\n" f" stdout: {base_source.stdout.decode('utf-8')}\n" f" stderr: {base_source.stderr.decode('utf-8')}" ) print("Installing packages:") conda_run( conda_commands.INSTALL, "--prefix", env_path, *(BASE_PKG_DEPS + env_spec.generic_installs) ) if env_spec.special_installs: channel, channel_deps = env_spec.special_installs print(f"Installing packages from channel: {channel}") conda_run( conda_commands.INSTALL, "--prefix", env_path, "-c", channel, *channel_deps ) if env_spec.environment_variables: print("Setting environment variables.") # This does not appear to be possible using the python API. env_set = subprocess.run( f"source activate {env_path} && " f"conda env config vars set {' '.join(env_spec.environment_variables)}", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) if env_set.returncode: raise OSError( "Failed to set environment variables:\n" f" stdout: {env_set.stdout.decode('utf-8')}\n" f" stderr: {env_set.stderr.decode('utf-8')}" ) # Check that they were actually set correctly. actual_env_vars = subprocess.run( f"source activate {env_path} && env", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ).stdout.decode("utf-8").strip().splitlines() for e in env_spec.environment_variables: assert e in actual_env_vars, f"{e} not in envs" print(f"Building PyTorch for env: `{env_name}`") # We have to re-run during each build to pick up the new # build config settings. build_run = subprocess.run( f"source activate {env_path} && " f"cd {git_root} && " "python setup.py install --cmake", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) print("Checking configuration:") check_run = subprocess.run( # Shameless abuse of `python -c ...` f"source activate {env_path} && " "python -c \"" "import torch;" "from torch.utils.benchmark import Timer;" "print(torch.__config__.show());" "setup = 'x=torch.ones((128, 128));y=torch.ones((128, 128))';" "counts = Timer('torch.mm(x, y)', setup).collect_callgrind(collect_baseline=False);" "stats = counts.as_standardized().stats(inclusive=True);" "print(stats.filter(lambda l: 'blas' in l.lower()))\"", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) if check_run.returncode: raise OSError( "Failed to set environment variables:\n" f" stdout: {check_run.stdout.decode('utf-8')}\n" f" stderr: {check_run.stderr.decode('utf-8')}" ) check_run_stdout = check_run.stdout.decode('utf-8') print(check_run_stdout) for e in env_spec.environment_variables: if "BLAS" in e: assert e in check_run_stdout, f"PyTorch build did not respect `BLAS=...`: {e}" for s in env_spec.expected_blas_symbols: assert s in check_run_stdout if env_spec.expected_mkl_version is not None: assert f"- Intel(R) Math Kernel Library Version {env_spec.expected_mkl_version}" in check_run_stdout print(f"Build complete: {env_name}") if __name__ == "__main__": main()
pytorch-master
torch/utils/benchmark/examples/blas_compare_setup.py
"""Microbenchmarks for the torch.fft module""" from argparse import ArgumentParser from collections import namedtuple from collections.abc import Iterable import torch import torch.fft from torch.utils import benchmark from torch.utils.benchmark.op_fuzzers.spectral import SpectralOpFuzzer def _dim_options(ndim): if ndim == 1: return [None] elif ndim == 2: return [0, 1, None] elif ndim == 3: return [0, 1, 2, (0, 1), (0, 2), None] raise ValueError(f"Expected ndim in range 1-3, got {ndim}") def run_benchmark(name: str, function: object, dtype: torch.dtype, seed: int, device: str, samples: int, probability_regular: float): cuda = device == 'cuda' spectral_fuzzer = SpectralOpFuzzer(seed=seed, dtype=dtype, cuda=cuda, probability_regular=probability_regular) results = [] for tensors, tensor_params, params in spectral_fuzzer.take(samples): shape = [params['k0'], params['k1'], params['k2']][:params['ndim']] str_shape = ' x '.join(["{:<4}".format(s) for s in shape]) sub_label = f"{str_shape} {'' if tensor_params['x']['is_contiguous'] else '(discontiguous)'}" for dim in _dim_options(params['ndim']): for nthreads in (1, 4, 16) if not cuda else (1,): measurement = benchmark.Timer( stmt='func(x, dim=dim)', globals={'func': function, 'x': tensors['x'], 'dim': dim}, label=f"{name}_{device}", sub_label=sub_label, description=f"dim={dim}", num_threads=nthreads, ).blocked_autorange(min_run_time=1) measurement.metadata = { 'name': name, 'device': device, 'dim': dim, 'shape': shape, } measurement.metadata.update(tensor_params['x']) results.append(measurement) return results Benchmark = namedtuple('Benchmark', ['name', 'function', 'dtype']) BENCHMARKS = [ Benchmark('fft_real', torch.fft.fftn, torch.float32), Benchmark('fft_complex', torch.fft.fftn, torch.complex64), Benchmark('ifft', torch.fft.ifftn, torch.complex64), Benchmark('rfft', torch.fft.rfftn, torch.float32), Benchmark('irfft', torch.fft.irfftn, torch.complex64), ] BENCHMARK_MAP = {b.name: b for b in BENCHMARKS} BENCHMARK_NAMES = [b.name for b in BENCHMARKS] DEVICE_NAMES = ['cpu', 'cuda'] def _output_csv(file, results): file.write('benchmark,device,num_threads,numel,shape,contiguous,dim,mean (us),median (us),iqr (us)\n') for measurement in results: metadata = measurement.metadata device, dim, shape, name, numel, contiguous = ( metadata['device'], metadata['dim'], metadata['shape'], metadata['name'], metadata['numel'], metadata['is_contiguous']) if isinstance(dim, Iterable): dim_str = '-'.join(str(d) for d in dim) else: dim_str = str(dim) shape_str = 'x'.join(str(s) for s in shape) print(name, device, measurement.task_spec.num_threads, numel, shape_str, contiguous, dim_str, measurement.mean * 1e6, measurement.median * 1e6, measurement.iqr * 1e6, sep=',', file=file) if __name__ == '__main__': parser = ArgumentParser(description=__doc__) parser.add_argument('--device', type=str, choices=DEVICE_NAMES, nargs='+', default=DEVICE_NAMES) parser.add_argument('--bench', type=str, choices=BENCHMARK_NAMES, nargs='+', default=BENCHMARK_NAMES) parser.add_argument('--seed', type=int, default=0) parser.add_argument('--samples', type=int, default=10) parser.add_argument('--probability_regular', type=float, default=1.0) parser.add_argument('-o', '--output', type=str) args = parser.parse_args() num_benchmarks = len(args.device) * len(args.bench) i = 0 results = [] for device in args.device: for bench in (BENCHMARK_MAP[b] for b in args.bench): results += run_benchmark( name=bench.name, function=bench.function, dtype=bench.dtype, seed=args.seed, device=device, samples=args.samples, probability_regular=args.probability_regular) i += 1 print(f'Completed {bench.name} benchmark on {device} ({i} of {num_benchmarks})') if args.output is not None: with open(args.output, 'w') as f: _output_csv(f, results) compare = benchmark.Compare(results) compare.trim_significant_figures() compare.colorize() compare.print()
pytorch-master
torch/utils/benchmark/examples/spectral_ops_fuzz_test.py
"""Example of Timer and Compare APIs: $ python -m examples.compare """ import pickle import sys import time import torch import torch.utils.benchmark as benchmark_utils class FauxTorch(object): """Emulate different versions of pytorch. In normal circumstances this would be done with multiple processes writing serialized measurements, but this simplifies that model to make the example clearer. """ def __init__(self, real_torch, extra_ns_per_element): self._real_torch = real_torch self._extra_ns_per_element = extra_ns_per_element def extra_overhead(self, result): # time.sleep has a ~65 us overhead, so only fake a # per-element overhead if numel is large enough. numel = int(result.numel()) if numel > 5000: time.sleep(numel * self._extra_ns_per_element * 1e-9) return result def add(self, *args, **kwargs): return self.extra_overhead(self._real_torch.add(*args, **kwargs)) def mul(self, *args, **kwargs): return self.extra_overhead(self._real_torch.mul(*args, **kwargs)) def cat(self, *args, **kwargs): return self.extra_overhead(self._real_torch.cat(*args, **kwargs)) def matmul(self, *args, **kwargs): return self.extra_overhead(self._real_torch.matmul(*args, **kwargs)) def main(): tasks = [ ("add", "add", "torch.add(x, y)"), ("add", "add (extra +0)", "torch.add(x, y + zero)"), ] serialized_results = [] repeats = 2 timers = [ benchmark_utils.Timer( stmt=stmt, globals={ "torch": torch if branch == "master" else FauxTorch(torch, overhead_ns), "x": torch.ones((size, 4)), "y": torch.ones((1, 4)), "zero": torch.zeros(()), }, label=label, sub_label=sub_label, description=f"size: {size}", env=branch, num_threads=num_threads, ) for branch, overhead_ns in [("master", None), ("my_branch", 1), ("severe_regression", 5)] for label, sub_label, stmt in tasks for size in [1, 10, 100, 1000, 10000, 50000] for num_threads in [1, 4] ] for i, timer in enumerate(timers * repeats): serialized_results.append(pickle.dumps( timer.blocked_autorange(min_run_time=0.05) )) print(f"\r{i + 1} / {len(timers) * repeats}", end="") sys.stdout.flush() print() comparison = benchmark_utils.Compare([ pickle.loads(i) for i in serialized_results ]) print("== Unformatted " + "=" * 80 + "\n" + "/" * 95 + "\n") comparison.print() print("== Formatted " + "=" * 80 + "\n" + "/" * 93 + "\n") comparison.trim_significant_figures() comparison.colorize() comparison.print() if __name__ == "__main__": main()
pytorch-master
torch/utils/benchmark/examples/compare.py
"""Example use of Timer and sparse op fuzzers to measure kernel performance. $ python -m examples.sparse.op_benchmark """ import numpy as np import torch from torch.utils.benchmark import Timer from torch.utils.benchmark.op_fuzzers.sparse_unary import UnaryOpSparseFuzzer from torch.utils.benchmark.op_fuzzers.sparse_binary import BinaryOpSparseFuzzer _MEASURE_TIME = 1.0 def assert_dicts_equal(dict_0, dict_1): """Builtin dict comparison will not compare numpy arrays. e.g. x = {"a": np.ones((2, 1))} x == x # Raises ValueError """ assert set(dict_0.keys()) == set(dict_0.keys()) assert all(np.all(v == dict_1[k]) for k, v in dict_0.items() if k != "dtype") def run(n, stmt, fuzzer_cls): float_iter = fuzzer_cls(seed=0, dtype=torch.float32).take(n) double_iter = fuzzer_cls(seed=0, dtype=torch.float64).take(n) raw_results = [] for i, (float_values, int_values) in enumerate(zip(float_iter, double_iter)): float_tensors, float_tensor_params, float_params = float_values int_tensors, int_tensor_params, int_params = int_values assert_dicts_equal(float_params, int_params) assert_dicts_equal(float_tensor_params["x"], int_tensor_params["x"]) float_measurement, int_measurement = [ Timer( stmt, globals=tensors, ).blocked_autorange(min_run_time=_MEASURE_TIME) for tensors in (float_tensors, int_tensors) ] descriptions = [] for name in float_tensors: shape_str = "(" + ", ".join([ f"2 ** {int(np.log2(i))}" if 2 ** int(np.log2(i)) == i and i > 1 else str(i) for i in float_tensors[name].shape ]) + ")" sparse_dim = float_tensor_params[name]["sparse_dim"] sparse_dim_str = str(sparse_dim) is_coalesced = float_tensor_params[name]["is_coalesced"] is_coalesced_str = "True" if is_coalesced else "False" descriptions.append((name, shape_str, sparse_dim_str, is_coalesced_str)) raw_results.append((float_measurement, int_measurement, descriptions)) print(f"\r{i + 1} / {n}", end="") print() parsed_results, name_len, shape_len, sparse_dim_len, is_coalesced_len = [], 0, 0, 0, 0 for float_measurement, int_measurement, descriptions in raw_results: t_float = float_measurement.median * 1e6 t_int = int_measurement.median * 1e6 rel_diff = abs(t_float - t_int) / (t_float + t_int) * 2 parsed_results.append((t_float, t_int, rel_diff, descriptions)) for name, shape, sparse_dim, is_coalesced in descriptions: name_len = max(name_len, len(name)) shape_len = max(shape_len, len(shape)) sparse_dim_len = max(sparse_dim_len, len(sparse_dim)) is_coalesced_len = max(is_coalesced_len, len(is_coalesced)) parsed_results.sort(key=lambda x: x[2]) print(f"stmt: {stmt}") print(f" diff faster{'':>17}{' ' * name_len} ", end="") print(f"{'shape'.ljust(shape_len)}{'':>12}{'sparse_dim'.ljust(sparse_dim_len)}", end="") print(f" is_coalesced\n{'-' * 100}") for results, spacer in [(parsed_results[:10], "..."), (parsed_results[-10:], "")]: for t_float, t_int, rel_diff, descriptions in results: time_str = [f"{rel_diff * 100:>4.1f}% {'int' if t_int < t_float else 'float':<20}"] time_str.extend(["".ljust(len(time_str[0])) for _ in descriptions[:-1]]) for t_str, (name, shape, sparse_dim, is_coalesced) in zip(time_str, descriptions): name = f"{name}:".ljust(name_len + 1) shape = shape.ljust(shape_len + 10) sparse_dim = sparse_dim.ljust(sparse_dim_len) print(f"{t_str} {name} {shape}| {sparse_dim} | {is_coalesced}") print(spacer) def main(): run(n=100, stmt="torch.sparse.sum(x, dim=0)", fuzzer_cls=UnaryOpSparseFuzzer) run(n=100, stmt="torch.sparse.softmax(x, dim=0)", fuzzer_cls=UnaryOpSparseFuzzer) run(n=100, stmt="x + y", fuzzer_cls=BinaryOpSparseFuzzer) if __name__ == "__main__": main()
pytorch-master
torch/utils/benchmark/examples/sparse/op_benchmark.py
"""Example of the Timer and Sparse Fuzzer APIs: $ python -m examples.sparse.fuzzer """ import sys import torch.utils.benchmark as benchmark_utils def main(): add_fuzzer = benchmark_utils.Fuzzer( parameters=[ [ benchmark_utils.FuzzedParameter( name=f"k{i}", minval=16, maxval=16 * 1024, distribution="loguniform", ) for i in range(3) ], benchmark_utils.FuzzedParameter( name="dim_parameter", distribution={2: 0.6, 3: 0.4}, ), benchmark_utils.FuzzedParameter( name="sparse_dim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, ), benchmark_utils.FuzzedParameter( name="density", distribution={0.1: 0.4, 0.05: 0.3, 0.01: 0.3}, ), benchmark_utils.FuzzedParameter( name="coalesced", distribution={True: 0.7, False: 0.3}, ) ], tensors=[ [ benchmark_utils.FuzzedSparseTensor( name=name, size=tuple([f"k{i}" for i in range(3)]), min_elements=64 * 1024, max_elements=128 * 1024, sparse_dim="sparse_dim", density="density", dim_parameter="dim_parameter", coalesced="coalesced" ) for name in ("x", "y") ], ], seed=0, ) n = 100 measurements = [] for i, (tensors, tensor_properties, _) in enumerate(add_fuzzer.take(n=n)): x = tensors["x"] y = tensors["y"] shape = ", ".join(tuple(f'{i:>4}' for i in x.shape)) x_tensor_properties = tensor_properties["x"] description = "".join([ f"| {shape:<20} | ", f"{x_tensor_properties['sparsity']:>9.2f} | ", f"{x_tensor_properties['sparse_dim']:>9d} | ", f"{x_tensor_properties['dense_dim']:>9d} | ", f"{('True' if x_tensor_properties['is_hybrid'] else 'False'):>9} | ", f"{('True' if x.is_coalesced() else 'False'):>9} | " ]) timer = benchmark_utils.Timer( stmt="torch.sparse.sum(x) + torch.sparse.sum(y)", globals=tensors, description=description, ) measurements.append(timer.blocked_autorange(min_run_time=0.1)) measurements[-1].metadata = {"nnz": x._nnz()} print(f"\r{i + 1} / {n}", end="") sys.stdout.flush() print() # More string munging to make pretty output. print(f"Average attemts per valid config: {1. / (1. - add_fuzzer.rejection_rate):.1f}") def time_fn(m): return m.mean / m.metadata["nnz"] measurements.sort(key=time_fn) template = f"{{:>6}}{' ' * 16} Shape{' ' * 17}\ sparsity{' ' * 4}sparse_dim{' ' * 4}dense_dim{' ' * 4}hybrid{' ' * 4}coalesced\n{'-' * 108}" print(template.format("Best:")) for m in measurements[:10]: print(f"{time_fn(m) * 1e9:>5.2f} ns / element {m.description}") print("\n" + template.format("Worst:")) for m in measurements[-10:]: print(f"{time_fn(m) * 1e9:>5.2f} ns / element {m.description}") if __name__ == "__main__": main()
pytorch-master
torch/utils/benchmark/examples/sparse/fuzzer.py
"""Example of Timer and Compare APIs: $ python -m examples.sparse.compare """ import pickle import sys import time import torch import torch.utils.benchmark as benchmark_utils class FauxTorch(object): """Emulate different versions of pytorch. In normal circumstances this would be done with multiple processes writing serialized measurements, but this simplifies that model to make the example clearer. """ def __init__(self, real_torch, extra_ns_per_element): self._real_torch = real_torch self._extra_ns_per_element = extra_ns_per_element @property def sparse(self): return self.Sparse(self._real_torch, self._extra_ns_per_element) class Sparse: def __init__(self, real_torch, extra_ns_per_element): self._real_torch = real_torch self._extra_ns_per_element = extra_ns_per_element def extra_overhead(self, result): # time.sleep has a ~65 us overhead, so only fake a # per-element overhead if numel is large enough. size = sum(result.size()) if size > 5000: time.sleep(size * self._extra_ns_per_element * 1e-9) return result def mm(self, *args, **kwargs): return self.extra_overhead(self._real_torch.sparse.mm(*args, **kwargs)) def generate_coo_data(size, sparse_dim, nnz, dtype, device): """ Parameters ---------- size : tuple sparse_dim : int nnz : int dtype : torch.dtype device : str Returns ------- indices : torch.tensor values : torch.tensor """ if dtype is None: dtype = 'float32' indices = torch.rand(sparse_dim, nnz, device=device) indices.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(indices)) indices = indices.to(torch.long) values = torch.rand([nnz, ], dtype=dtype, device=device) return indices, values def gen_sparse(size, density, dtype, device='cpu'): sparse_dim = len(size) nnz = int(size[0] * size[1] * density) indices, values = generate_coo_data(size, sparse_dim, nnz, dtype, device) return torch.sparse_coo_tensor(indices, values, size, dtype=dtype, device=device) def main(): tasks = [ ("matmul", "x @ y", "torch.sparse.mm(x, y)"), ("matmul", "x @ y + 0", "torch.sparse.mm(x, y) + zero"), ] serialized_results = [] repeats = 2 timers = [ benchmark_utils.Timer( stmt=stmt, globals={ "torch": torch if branch == "master" else FauxTorch(torch, overhead_ns), "x": gen_sparse(size=size, density=density, dtype=torch.float32), "y": torch.rand(size, dtype=torch.float32), "zero": torch.zeros(()), }, label=label, sub_label=sub_label, description=f"size: {size}", env=branch, num_threads=num_threads, ) for branch, overhead_ns in [("master", None), ("my_branch", 1), ("severe_regression", 10)] for label, sub_label, stmt in tasks for density in [0.05, 0.1] for size in [(8, 8), (32, 32), (64, 64), (128, 128)] for num_threads in [1, 4] ] for i, timer in enumerate(timers * repeats): serialized_results.append(pickle.dumps( timer.blocked_autorange(min_run_time=0.05) )) print(f"\r{i + 1} / {len(timers) * repeats}", end="") sys.stdout.flush() print() comparison = benchmark_utils.Compare([ pickle.loads(i) for i in serialized_results ]) print("== Unformatted " + "=" * 80 + "\n" + "/" * 95 + "\n") comparison.print() print("== Formatted " + "=" * 80 + "\n" + "/" * 93 + "\n") comparison.trim_significant_figures() comparison.colorize() comparison.print() if __name__ == "__main__": main()
pytorch-master
torch/utils/benchmark/examples/sparse/compare.py
from torch._C import _set_backcompat_broadcast_warn from torch._C import _get_backcompat_broadcast_warn from torch._C import _set_backcompat_keepdim_warn from torch._C import _get_backcompat_keepdim_warn class Warning(object): def __init__(self, setter, getter): self.setter = setter self.getter = getter def set_enabled(self, value): self.setter(value) def get_enabled(self): return self.getter() enabled = property(get_enabled, set_enabled) broadcast_warning = Warning(_set_backcompat_broadcast_warn, _get_backcompat_broadcast_warn) keepdim_warning = Warning(_set_backcompat_keepdim_warn, _get_backcompat_keepdim_warn)
pytorch-master
torch/utils/backcompat/__init__.py
__version__ = '1.0.0'
pytorch-master
torch/utils/hipify/version.py
import collections import os import re import subprocess from .constants import (API_BLAS, API_C10, API_CAFFE2, API_DRIVER, API_FFT, API_PYTORCH, API_RAND, API_ROCTX, API_RTC, API_RUNTIME, API_SPARSE, CONV_CACHE, CONV_CONTEXT, CONV_D3D9, CONV_D3D10, CONV_D3D11, CONV_DEF, CONV_DEVICE, CONV_DEVICE_FUNC, CONV_EGL, CONV_ERROR, CONV_EVENT, CONV_EXEC, CONV_GL, CONV_GRAPHICS, CONV_INCLUDE, CONV_INCLUDE_CUDA_MAIN_H, CONV_INIT, CONV_JIT, CONV_MATH_FUNC, CONV_MEM, CONV_MODULE, CONV_NUMERIC_LITERAL, CONV_OCCUPANCY, CONV_OTHER, CONV_PEER, CONV_SPECIAL_FUNC, CONV_STREAM, CONV_SURFACE, CONV_TEX, CONV_THREAD, CONV_TYPE, CONV_VDPAU, CONV_VERSION, HIP_UNSUPPORTED) """ Mapping of CUDA functions, include files, constants, and types to ROCm/HIP equivalents This closely follows the implementation in hipify-clang https://github.com/ROCm-Developer-Tools/HIP/blob/master/hipify-clang/src/CUDA2HipMap.cpp and its structure. There are different maps for fundamental names, include files, identifies, sparse, and PyTorch specific translations. Each of the entries in these maps translates a CUDA string to a tuple containing the ROCm/HIP string, a type and API annotation and - optionally - an annotation if it is not supported in ROCm/HIP yet. """ # We need to know the ROCm version so we can conditionalize some of the mappings later. # As of ROCm 5.0, the version is found in rocm_version.h header file under /opt/rocm/include. rocm_path = os.environ.get('ROCM_HOME') or os.environ.get('ROCM_PATH') or "/opt/rocm" try: rocm_path = subprocess.check_output(["hipconfig", "--rocmpath"]).decode("utf-8") except subprocess.CalledProcessError: print(f"Warning: hipconfig --rocmpath failed, assuming {rocm_path}") except FileNotFoundError: # Do not print warning. This is okay. This file can also be imported for non-ROCm builds. pass except PermissionError: pass rocm_version = (0, 0, 0) rocm_version_h = f"{rocm_path}/include/rocm_version.h" # The file could be missing due to 1) ROCm version < 5.0, or 2) no ROCm install. if os.path.isfile(rocm_version_h): RE_MAJOR = re.compile(r"#define\s+ROCM_VERSION_MAJOR\s+(\d+)") RE_MINOR = re.compile(r"#define\s+ROCM_VERSION_MINOR\s+(\d+)") RE_PATCH = re.compile(r"#define\s+ROCM_VERSION_PATCH\s+(\d+)") major, minor, patch = 0, 0, 0 for line in open(rocm_version_h, "r"): match = RE_MAJOR.search(line) if match: major = int(match.group(1)) match = RE_MINOR.search(line) if match: minor = int(match.group(1)) match = RE_PATCH.search(line) if match: patch = int(match.group(1)) rocm_version = (major, minor, patch) # List of math functions that should be replaced inside device code only. MATH_TRANSPILATIONS = collections.OrderedDict( [ ("std::max", ("::max")), ("std::min", ("::min")), ("std::ceil", ("::ceil")), ("std::floor", ("::floor")), ("std::exp", ("::exp")), ("std::log", ("::log")), ("std::pow", ("::pow")), ("std::fabs", ("::fabs")), ("std::fmod", ("::fmod")), ("std::remainder", ("::remainder")), ("std::frexp", ("::frexp")), ] ) CUDA_TYPE_NAME_MAP = collections.OrderedDict( [ ("CUresult", ("hipError_t", CONV_TYPE, API_DRIVER)), ("cudaError_t", ("hipError_t", CONV_TYPE, API_RUNTIME)), ( "CUDA_ARRAY3D_DESCRIPTOR", ("HIP_ARRAY3D_DESCRIPTOR", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ("CUDA_ARRAY_DESCRIPTOR", ("HIP_ARRAY_DESCRIPTOR", CONV_TYPE, API_DRIVER)), ("CUDA_MEMCPY2D", ("hip_Memcpy2D", CONV_TYPE, API_DRIVER)), ("CUDA_MEMCPY3D", ("HIP_MEMCPY3D", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), ( "CUDA_MEMCPY3D_PEER", ("HIP_MEMCPY3D_PEER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUDA_POINTER_ATTRIBUTE_P2P_TOKENS", ( "HIP_POINTER_ATTRIBUTE_P2P_TOKENS", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CUDA_RESOURCE_DESC", ("HIP_RESOURCE_DESC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUDA_RESOURCE_VIEW_DESC", ("HIP_RESOURCE_VIEW_DESC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUipcEventHandle", ("hipIpcEventHandle", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ("CUipcMemHandle", ("hipIpcMemHandle", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), ("CUaddress_mode", ("hipAddress_mode", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), ( "CUarray_cubemap_face", ("hipArray_cubemap_face", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ("CUarray_format", ("hipArray_format", CONV_TYPE, API_DRIVER)), ("CUcomputemode", ("hipComputemode", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), ("CUmem_advise", ("hipMemAdvise", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), ( "CUmem_range_attribute", ("hipMemRangeAttribute", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ("CUctx_flags", ("hipCctx_flags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), ("CUdevice", ("hipDevice_t", CONV_TYPE, API_DRIVER)), ("CUdevice_attribute_enum", ("hipDeviceAttribute_t", CONV_TYPE, API_DRIVER)), ("CUdevice_attribute", ("hipDeviceAttribute_t", CONV_TYPE, API_DRIVER)), ("CUdeviceptr", ("hipDeviceptr_t", CONV_TYPE, API_DRIVER)), ("CUarray_st", ("hipArray", CONV_TYPE, API_DRIVER)), ("CUarray", ("hipArray *", CONV_TYPE, API_DRIVER)), ("CUdevprop_st", ("hipDeviceProp_t", CONV_TYPE, API_DRIVER)), ("CUdevprop", ("hipDeviceProp_t", CONV_TYPE, API_DRIVER)), ("CUfunction", ("hipFunction_t", CONV_TYPE, API_DRIVER)), ( "CUgraphicsResource", ("hipGraphicsResource_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUmipmappedArray", ("hipMipmappedArray_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUfunction_attribute", ("hipFuncAttribute_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUfunction_attribute_enum", ("hipFuncAttribute_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUgraphicsMapResourceFlags", ("hipGraphicsMapFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUgraphicsMapResourceFlags_enum", ("hipGraphicsMapFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUgraphicsRegisterFlags", ("hipGraphicsRegisterFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUgraphicsRegisterFlags_enum", ("hipGraphicsRegisterFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUoccupancy_flags", ("hipOccupancyFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUoccupancy_flags_enum", ("hipOccupancyFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ("CUfunc_cache_enum", ("hipFuncCache", CONV_TYPE, API_DRIVER)), ("CUfunc_cache", ("hipFuncCache", CONV_TYPE, API_DRIVER)), ("CUipcMem_flags", ("hipIpcMemFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), ( "CUipcMem_flags_enum", ("hipIpcMemFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ("CUjit_cacheMode", ("hipJitCacheMode", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)), ( "CUjit_cacheMode_enum", ("hipJitCacheMode", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ("CUjit_fallback", ("hipJitFallback", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)), ( "CUjit_fallback_enum", ("hipJitFallback", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ("CUjit_option", ("hipJitOption", CONV_JIT, API_DRIVER)), ("CUjit_option_enum", ("hipJitOption", CONV_JIT, API_DRIVER)), ("CUjit_target", ("hipJitTarget", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)), ("CUjit_target_enum", ("hipJitTarget", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)), ("CUjitInputType", ("hipJitInputType", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED)), ( "CUjitInputType_enum", ("hipJitInputType", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ("CUlimit", ("hipLimit_t", CONV_TYPE, API_DRIVER)), ("CUlimit_enum", ("hipLimit_t", CONV_TYPE, API_DRIVER)), ( "CUmemAttach_flags", ("hipMemAttachFlags_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUmemAttach_flags_enum", ("hipMemAttachFlags_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ("CUmemorytype", ("hipMemType_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), ("CUmemorytype_enum", ("hipMemType_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), ("CUresourcetype", ("hipResourceType", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)), ( "CUresourcetype_enum", ("hipResourceType", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ("CUresourceViewFormat", ("hipResourceViewFormat", CONV_TEX, API_DRIVER)), ("CUresourceViewFormat_enum", ("hipResourceViewFormat", CONV_TEX, API_DRIVER)), ("CUsharedconfig", ("hipSharedMemConfig", CONV_TYPE, API_DRIVER)), ("CUsharedconfig_enum", ("hipSharedMemConfig", CONV_TYPE, API_DRIVER)), ("CUcontext", ("hipCtx_t", CONV_TYPE, API_DRIVER)), ("CUmodule", ("hipModule_t", CONV_TYPE, API_DRIVER)), ("CUstream", ("hipStream_t", CONV_TYPE, API_DRIVER)), ("CUstream_st", ("ihipStream_t", CONV_TYPE, API_DRIVER)), ("CUstreamCallback", ("hipStreamCallback_t", CONV_TYPE, API_DRIVER)), ("CUsurfObject", ("hipSurfaceObject", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), ( "CUsurfref", ("hipSurfaceReference_t", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ("CUtexObject", ("hipTextureObject_t", CONV_TYPE, API_DRIVER)), ("CUtexref", ("textureReference", CONV_TYPE, API_DRIVER)), ("CUstream_flags", ("hipStreamFlags", CONV_TYPE, API_DRIVER)), ( "CUstreamWaitValue_flags", ("hipStreamWaitValueFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUstreamWriteValue_flags", ("hipStreamWriteValueFlags", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUstreamBatchMemOpType", ("hipStreamBatchMemOpType", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUdevice_P2PAttribute", ("hipDeviceP2PAttribute", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ("CUevent", ("hipEvent_t", CONV_TYPE, API_DRIVER)), ("CUevent_st", ("ihipEvent_t", CONV_TYPE, API_DRIVER)), ("CUevent_flags", ("hipEventFlags", CONV_EVENT, API_DRIVER, HIP_UNSUPPORTED)), ("CUfilter_mode", ("hipTextureFilterMode", CONV_TEX, API_DRIVER)), ("CUGLDeviceList", ("hipGLDeviceList", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)), ("CUGLmap_flags", ("hipGLMapFlags", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)), ( "CUd3d9DeviceList", ("hipD3D9DeviceList", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUd3d9map_flags", ("hipD3D9MapFlags", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUd3d9register_flags", ("hipD3D9RegisterFlags", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUd3d10DeviceList", ("hipd3d10DeviceList", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUd3d10map_flags", ("hipD3D10MapFlags", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUd3d10register_flags", ("hipD3D10RegisterFlags", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUd3d11DeviceList", ("hipd3d11DeviceList", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUeglStreamConnection_st", ("hipEglStreamConnection", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUeglStreamConnection", ("hipEglStreamConnection", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), ), ( "libraryPropertyType_t", ("hipLibraryPropertyType_t", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "libraryPropertyType", ("hipLibraryPropertyType_t", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaStreamCallback_t", ("hipStreamCallback_t", CONV_TYPE, API_RUNTIME)), ("cudaArray", ("hipArray", CONV_MEM, API_RUNTIME)), ("cudaArray_t", ("hipArray_t", CONV_MEM, API_RUNTIME)), ("cudaArray_const_t", ("hipArray_const_t", CONV_MEM, API_RUNTIME)), ("cudaMipmappedArray_t", ("hipMipmappedArray_t", CONV_MEM, API_RUNTIME)), ( "cudaMipmappedArray_const_t", ("hipMipmappedArray_const_t", CONV_MEM, API_RUNTIME), ), ("cudaArrayDefault", ("hipArrayDefault", CONV_MEM, API_RUNTIME)), ("cudaArrayLayered", ("hipArrayLayered", CONV_MEM, API_RUNTIME)), ( "cudaArraySurfaceLoadStore", ("hipArraySurfaceLoadStore", CONV_MEM, API_RUNTIME), ), ("cudaArrayCubemap", ("hipArrayCubemap", CONV_MEM, API_RUNTIME)), ("cudaArrayTextureGather", ("hipArrayTextureGather", CONV_MEM, API_RUNTIME)), ("cudaMemoryAdvise", ("hipMemAdvise", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), ( "cudaMemRangeAttribute", ("hipMemRangeAttribute", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaMemcpyKind", ("hipMemcpyKind", CONV_MEM, API_RUNTIME)), ("cudaMemoryType", ("hipMemoryType", CONV_MEM, API_RUNTIME)), ("cudaExtent", ("hipExtent", CONV_MEM, API_RUNTIME)), ("cudaPitchedPtr", ("hipPitchedPtr", CONV_MEM, API_RUNTIME)), ("cudaPos", ("hipPos", CONV_MEM, API_RUNTIME)), ("cudaEvent_t", ("hipEvent_t", CONV_TYPE, API_RUNTIME)), ("cudaStream_t", ("hipStream_t", CONV_TYPE, API_RUNTIME)), ("cudaPointerAttributes", ("hipPointerAttribute_t", CONV_TYPE, API_RUNTIME)), ("cudaDeviceAttr", ("hipDeviceAttribute_t", CONV_TYPE, API_RUNTIME)), ("cudaDeviceProp", ("hipDeviceProp_t", CONV_TYPE, API_RUNTIME)), ( "cudaDeviceP2PAttr", ("hipDeviceP2PAttribute", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaComputeMode", ("hipComputeMode", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaFuncCache", ("hipFuncCache_t", CONV_CACHE, API_RUNTIME)), ( "cudaFuncAttributes", ("hipFuncAttributes", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaSharedMemConfig", ("hipSharedMemConfig", CONV_TYPE, API_RUNTIME)), ("cudaLimit", ("hipLimit_t", CONV_TYPE, API_RUNTIME)), ("cudaOutputMode", ("hipOutputMode", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED)), ("cudaTextureReadMode", ("hipTextureReadMode", CONV_TEX, API_RUNTIME)), ("cudaTextureFilterMode", ("hipTextureFilterMode", CONV_TEX, API_RUNTIME)), ("cudaChannelFormatKind", ("hipChannelFormatKind", CONV_TEX, API_RUNTIME)), ("cudaChannelFormatDesc", ("hipChannelFormatDesc", CONV_TEX, API_RUNTIME)), ("cudaResourceDesc", ("hipResourceDesc", CONV_TEX, API_RUNTIME)), ("cudaResourceViewDesc", ("hipResourceViewDesc", CONV_TEX, API_RUNTIME)), ("cudaTextureDesc", ("hipTextureDesc", CONV_TEX, API_RUNTIME)), ( "surfaceReference", ("hipSurfaceReference", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaTextureObject_t", ("hipTextureObject_t", CONV_TEX, API_RUNTIME)), ("cudaResourceType", ("hipResourceType", CONV_TEX, API_RUNTIME)), ("cudaResourceViewFormat", ("hipResourceViewFormat", CONV_TEX, API_RUNTIME)), ("cudaTextureAddressMode", ("hipTextureAddressMode", CONV_TEX, API_RUNTIME)), ( "cudaSurfaceBoundaryMode", ("hipSurfaceBoundaryMode", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaSurfaceFormatMode", ("hipSurfaceFormatMode", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaTextureType1D", ("hipTextureType1D", CONV_TEX, API_RUNTIME)), ("cudaTextureType2D", ("hipTextureType2D", CONV_TEX, API_RUNTIME)), ("cudaTextureType3D", ("hipTextureType3D", CONV_TEX, API_RUNTIME)), ("cudaTextureTypeCubemap", ("hipTextureTypeCubemap", CONV_TEX, API_RUNTIME)), ( "cudaTextureType1DLayered", ("hipTextureType1DLayered", CONV_TEX, API_RUNTIME), ), ( "cudaTextureType2DLayered", ("hipTextureType2DLayered", CONV_TEX, API_RUNTIME), ), ( "cudaTextureTypeCubemapLayered", ("hipTextureTypeCubemapLayered", CONV_TEX, API_RUNTIME), ), ("cudaIpcEventHandle_t", ("hipIpcEventHandle_t", CONV_TYPE, API_RUNTIME)), ("cudaIpcEventHandle_st", ("hipIpcEventHandle_t", CONV_TYPE, API_RUNTIME)), ("cudaIpcMemHandle_t", ("hipIpcMemHandle_t", CONV_TYPE, API_RUNTIME)), ("cudaIpcMemHandle_st", ("hipIpcMemHandle_t", CONV_TYPE, API_RUNTIME)), ( "cudaGraphicsCubeFace", ("hipGraphicsCubeFace", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGraphicsMapFlags", ("hipGraphicsMapFlags", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGraphicsRegisterFlags", ("hipGraphicsRegisterFlags", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGLDeviceList", ("hipGLDeviceList", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaGLMapFlags", ("hipGLMapFlags", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED)), ( "cudaD3D9DeviceList", ("hipD3D9DeviceList", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D9MapFlags", ("hipD3D9MapFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D9RegisterFlags", ("hipD3D9RegisterFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D10DeviceList", ("hipd3d10DeviceList", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D10MapFlags", ("hipD3D10MapFlags", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D10RegisterFlags", ("hipD3D10RegisterFlags", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D11DeviceList", ("hipd3d11DeviceList", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaEglStreamConnection", ("hipEglStreamConnection", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED), ), ("cublasHandle_t", ("rocblas_handle", CONV_TYPE, API_BLAS)), ("cublasOperation_t", ("rocblas_operation", CONV_TYPE, API_BLAS)), ("cublasStatus_t", ("rocblas_status", CONV_TYPE, API_BLAS)), ("cublasFillMode_t", ("rocblas_fill", CONV_TYPE, API_BLAS)), ("cublasDiagType_t", ("rocblas_diagonal", CONV_TYPE, API_BLAS)), ("cublasSideMode_t", ("rocblas_side", CONV_TYPE, API_BLAS)), ("cublasPointerMode_t", ("rocblas_pointer_mode", CONV_TYPE, API_BLAS)), ( "cublasAtomicsMode_t", ("rocblas_atomics_mode", CONV_TYPE, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDataType_t", ("rocblas_data_type", CONV_TYPE, API_BLAS, HIP_UNSUPPORTED), ), ("curandStatus", ("hiprandStatus_t", CONV_TYPE, API_RAND)), ("curandStatus_t", ("hiprandStatus_t", CONV_TYPE, API_RAND)), ("curandRngType", ("hiprandRngType_t", CONV_TYPE, API_RAND)), ("curandRngType_t", ("hiprandRngType_t", CONV_TYPE, API_RAND)), ("curandGenerator_st", ("hiprandGenerator_st", CONV_TYPE, API_RAND)), ("curandGenerator_t", ("hiprandGenerator_t", CONV_TYPE, API_RAND)), ( "curandDirectionVectorSet", ("hiprandDirectionVectorSet_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), ), ( "curandDirectionVectorSet_t", ("hiprandDirectionVectorSet_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), ), ("curandOrdering", ("hiprandOrdering_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)), ( "curandOrdering_t", ("hiprandOrdering_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), ), ( "curandDistribution_st", ("hiprandDistribution_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), ), ( "curandHistogramM2V_st", ("hiprandDistribution_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), ), ( "curandDistribution_t", ("hiprandDistribution_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), ), ( "curandHistogramM2V_t", ("hiprandDistribution_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), ), ( "curandDistributionShift_st", ("hiprandDistributionShift_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), ), ( "curandDistributionShift_t", ("hiprandDistributionShift_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), ), ( "curandDistributionM2Shift_st", ("hiprandDistributionM2Shift_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), ), ( "curandDistributionM2Shift_t", ("hiprandDistributionM2Shift_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), ), ( "curandHistogramM2_st", ("hiprandHistogramM2_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), ), ( "curandHistogramM2_t", ("hiprandHistogramM2_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), ), ( "curandHistogramM2K_st", ("hiprandHistogramM2K_st", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), ), ( "curandHistogramM2K_t", ("hiprandHistogramM2K_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), ), ( "curandDiscreteDistribution_st", ("hiprandDiscreteDistribution_st", CONV_TYPE, API_RAND), ), ( "curandDiscreteDistribution_t", ("hiprandDiscreteDistribution_t", CONV_TYPE, API_RAND), ), ("curandMethod", ("hiprandMethod_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)), ("curandMethod_t", ("hiprandMethod_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED)), ( "curandDirectionVectors32_t", ("hiprandDirectionVectors32_t", CONV_TYPE, API_RAND), ), ( "curandDirectionVectors64_t", ("hiprandDirectionVectors64_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), ), ("curandStateMtgp32_t", ("hiprandStateMtgp32_t", CONV_TYPE, API_RAND)), ("curandStateMtgp32", ("hiprandStateMtgp32_t", CONV_TYPE, API_RAND)), ( "curandStateScrambledSobol64_t", ("hiprandStateScrambledSobol64_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), ), ( "curandStateSobol64_t", ("hiprandStateSobol64_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), ), ( "curandStateScrambledSobol32_t", ("hiprandStateScrambledSobol32_t", CONV_TYPE, API_RAND, HIP_UNSUPPORTED), ), ("curandStateSobol32_t", ("hiprandStateSobol32_t", CONV_TYPE, API_RAND)), ("curandStateMRG32k3a_t", ("hiprandStateMRG32k3a_t", CONV_TYPE, API_RAND)), ( "curandStatePhilox4_32_10_t", ("hiprandStatePhilox4_32_10_t", CONV_TYPE, API_RAND), ), ("curandStateXORWOW_t", ("hiprandStateXORWOW_t", CONV_TYPE, API_RAND)), ("curandState_t", ("hiprandState_t", CONV_TYPE, API_RAND)), ("curandState", ("hiprandState_t", CONV_TYPE, API_RAND)), ] ) CUDA_INCLUDE_MAP = collections.OrderedDict( [ # since pytorch uses "\b{pattern}\b" as the actual re pattern, # patterns listed here have to begin and end with alnum chars ( "include <cuda.h", ("include <hip/hip_runtime.h", CONV_INCLUDE_CUDA_MAIN_H, API_DRIVER), ), ( 'include "cuda.h', ('include "hip/hip_runtime.h', CONV_INCLUDE_CUDA_MAIN_H, API_DRIVER), ), ( "cuda_runtime.h", ("hip/hip_runtime.h", CONV_INCLUDE_CUDA_MAIN_H, API_RUNTIME), ), ("cuda_runtime_api.h", ("hip/hip_runtime_api.h", CONV_INCLUDE, API_RUNTIME)), ( "channel_descriptor.h", ("hip/channel_descriptor.h", CONV_INCLUDE, API_RUNTIME), ), ("device_functions.h", ("hip/device_functions.h", CONV_INCLUDE, API_RUNTIME)), ("driver_types.h", ("hip/driver_types.h", CONV_INCLUDE, API_RUNTIME)), ("library_types.h", ("hip/library_types.h", CONV_INCLUDE, API_RUNTIME)), ("cuComplex.h", ("hip/hip_complex.h", CONV_INCLUDE, API_RUNTIME)), ("cuda_fp16.h", ("hip/hip_fp16.h", CONV_INCLUDE, API_RUNTIME)), ( "cuda_texture_types.h", ("hip/hip_texture_types.h", CONV_INCLUDE, API_RUNTIME), ), ("vector_types.h", ("hip/hip_vector_types.h", CONV_INCLUDE, API_RUNTIME)), ("cublas.h", ("rocblas.h" if rocm_version < (5, 2, 0) else "rocblas/rocblas.h", CONV_INCLUDE_CUDA_MAIN_H, API_BLAS)), ("cublas_v2.h", ("rocblas.h" if rocm_version < (5, 2, 0) else "rocblas/rocblas.h", CONV_INCLUDE_CUDA_MAIN_H, API_BLAS)), ("curand.h", ("hiprand/hiprand.h", CONV_INCLUDE_CUDA_MAIN_H, API_RAND)), ("curand_kernel.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)), ("curand_discrete.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)), ("curand_discrete2.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)), ("curand_globals.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)), ("curand_lognormal.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)), ("curand_mrg32k3a.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)), ("curand_mtgp32.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)), ("curand_mtgp32_host.h", ("hiprand/hiprand_mtgp32_host.h", CONV_INCLUDE, API_RAND)), ("curand_mtgp32_kernel.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)), ( "curand_mtgp32dc_p_11213.h", ("rocrand/rocrand_mtgp32_11213.h", CONV_INCLUDE, API_RAND), ), ("curand_normal.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)), ("curand_normal_static.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)), ("curand_philox4x32_x.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)), ("curand_poisson.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)), ("curand_precalc.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)), ("curand_uniform.h", ("hiprand/hiprand_kernel.h", CONV_INCLUDE, API_RAND)), ("cusparse.h", ("hipsparse.h", CONV_INCLUDE, API_RAND)), ("cufft.h", ("hipfft.h", CONV_INCLUDE, API_BLAS)), ("cufftXt.h", ("hipfft.h", CONV_INCLUDE, API_BLAS)), # PyTorch also has a source file named "nccl.h", so we need to "<"">" to differentiate ("<nccl.h>", ("<rccl.h>", CONV_INCLUDE, API_RUNTIME)), ("nvrtc.h", ("hip/hiprtc.h", CONV_INCLUDE, API_RTC)), ("thrust/system/cuda", ("thrust/system/hip", CONV_INCLUDE, API_BLAS)), ("cub/util_allocator.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), ("cub/block/block_reduce.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), ("cub/cub.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), ("cub/block/block_load.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), ("cub/device/device_radix_sort.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), ("cub/device/device_reduce.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), ("cub/device/device_scan.cuh", ("hipcub/hipcub.hpp", CONV_INCLUDE, API_BLAS)), ("nvToolsExt.h", ("roctracer/roctx.h", CONV_INCLUDE, API_ROCTX)), ] ) CUDA_IDENTIFIER_MAP = collections.OrderedDict( [ ("__CUDACC__", ("__HIPCC__", CONV_DEF, API_RUNTIME)), ( "CUDA_ERROR_INVALID_CONTEXT", ("hipErrorInvalidContext", CONV_TYPE, API_DRIVER), ), ( "CUDA_ERROR_CONTEXT_ALREADY_CURRENT", ("hipErrorContextAlreadyCurrent", CONV_TYPE, API_DRIVER), ), ( "CUDA_ERROR_ARRAY_IS_MAPPED", ("hipErrorArrayIsMapped", CONV_TYPE, API_DRIVER), ), ("CUDA_ERROR_ALREADY_MAPPED", ("hipErrorAlreadyMapped", CONV_TYPE, API_DRIVER)), ( "CUDA_ERROR_ALREADY_ACQUIRED", ("hipErrorAlreadyAcquired", CONV_TYPE, API_DRIVER), ), ("CUDA_ERROR_NOT_MAPPED", ("hipErrorNotMapped", CONV_TYPE, API_DRIVER)), ( "CUDA_ERROR_NOT_MAPPED_AS_ARRAY", ("hipErrorNotMappedAsArray", CONV_TYPE, API_DRIVER), ), ( "CUDA_ERROR_NOT_MAPPED_AS_POINTER", ("hipErrorNotMappedAsPointer", CONV_TYPE, API_DRIVER), ), ( "CUDA_ERROR_CONTEXT_ALREADY_IN_USE", ("hipErrorContextAlreadyInUse", CONV_TYPE, API_DRIVER), ), ("CUDA_ERROR_INVALID_SOURCE", ("hipErrorInvalidSource", CONV_TYPE, API_DRIVER)), ("CUDA_ERROR_FILE_NOT_FOUND", ("hipErrorFileNotFound", CONV_TYPE, API_DRIVER)), ("CUDA_ERROR_NOT_FOUND", ("hipErrorNotFound", CONV_TYPE, API_DRIVER)), ( "CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING", ( "hipErrorLaunchIncompatibleTexturing", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE", ("hipErrorPrimaryContextActive", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_CONTEXT_IS_DESTROYED", ("hipErrorContextIsDestroyed", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_NOT_PERMITTED", ("hipErrorNotPermitted", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_NOT_SUPPORTED", ("hipErrorNotSupported", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "cudaErrorMissingConfiguration", ("hipErrorMissingConfiguration", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorPriorLaunchFailure", ("hipErrorPriorLaunchFailure", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorInvalidDeviceFunction", ("hipErrorInvalidDeviceFunction", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorInvalidConfiguration", ("hipErrorInvalidConfiguration", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorInvalidPitchValue", ("hipErrorInvalidPitchValue", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorInvalidSymbol", ("hipErrorInvalidSymbol", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorInvalidHostPointer", ("hipErrorInvalidHostPointer", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorInvalidDevicePointer", ("hipErrorInvalidDevicePointer", CONV_TYPE, API_RUNTIME), ), ( "cudaErrorInvalidTexture", ("hipErrorInvalidTexture", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorInvalidTextureBinding", ("hipErrorInvalidTextureBinding", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorInvalidChannelDescriptor", ( "hipErrorInvalidChannelDescriptor", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaErrorInvalidMemcpyDirection", ("hipErrorInvalidMemcpyDirection", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorAddressOfConstant", ("hipErrorAddressOfConstant", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorTextureFetchFailed", ("hipErrorTextureFetchFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorTextureNotBound", ("hipErrorTextureNotBound", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorSynchronizationError", ("hipErrorSynchronizationError", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorInvalidFilterSetting", ("hipErrorInvalidFilterSetting", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorInvalidNormSetting", ("hipErrorInvalidNormSetting", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorMixedDeviceExecution", ("hipErrorMixedDeviceExecution", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorNotYetImplemented", ("hipErrorNotYetImplemented", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorMemoryValueTooLarge", ("hipErrorMemoryValueTooLarge", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorInsufficientDriver", ("hipErrorInsufficientDriver", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorSetOnActiveProcess", ("hipErrorSetOnActiveProcess", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorInvalidSurface", ("hipErrorInvalidSurface", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorDuplicateVariableName", ("hipErrorDuplicateVariableName", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorDuplicateTextureName", ("hipErrorDuplicateTextureName", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorDuplicateSurfaceName", ("hipErrorDuplicateSurfaceName", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorDevicesUnavailable", ("hipErrorDevicesUnavailable", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorIncompatibleDriverContext", ( "hipErrorIncompatibleDriverContext", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaErrorDeviceAlreadyInUse", ("hipErrorDeviceAlreadyInUse", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorLaunchMaxDepthExceeded", ("hipErrorLaunchMaxDepthExceeded", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorLaunchFileScopedTex", ("hipErrorLaunchFileScopedTex", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorLaunchFileScopedSurf", ("hipErrorLaunchFileScopedSurf", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorSyncDepthExceeded", ("hipErrorSyncDepthExceeded", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorLaunchPendingCountExceeded", ( "hipErrorLaunchPendingCountExceeded", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaErrorNotPermitted", ("hipErrorNotPermitted", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorNotSupported", ("hipErrorNotSupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorStartupFailure", ("hipErrorStartupFailure", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaErrorApiFailureBase", ("hipErrorApiFailureBase", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ("CUDA_SUCCESS", ("hipSuccess", CONV_TYPE, API_DRIVER)), ("cudaSuccess", ("hipSuccess", CONV_TYPE, API_RUNTIME)), ("CUDA_ERROR_INVALID_VALUE", ("hipErrorInvalidValue", CONV_TYPE, API_DRIVER)), ("cudaErrorInvalidValue", ("hipErrorInvalidValue", CONV_TYPE, API_RUNTIME)), ( "CUDA_ERROR_OUT_OF_MEMORY", ("hipErrorMemoryAllocation", CONV_TYPE, API_DRIVER), ), ( "cudaErrorMemoryAllocation", ("hipErrorMemoryAllocation", CONV_TYPE, API_RUNTIME), ), ( "CUDA_ERROR_NOT_INITIALIZED", ("hipErrorNotInitialized", CONV_TYPE, API_DRIVER), ), ( "cudaErrorInitializationError", ("hipErrorInitializationError", CONV_TYPE, API_RUNTIME), ), ("CUDA_ERROR_DEINITIALIZED", ("hipErrorDeinitialized", CONV_TYPE, API_DRIVER)), ( "cudaErrorCudartUnloading", ("hipErrorDeinitialized", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_PROFILER_DISABLED", ("hipErrorProfilerDisabled", CONV_TYPE, API_DRIVER), ), ( "cudaErrorProfilerDisabled", ("hipErrorProfilerDisabled", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_PROFILER_NOT_INITIALIZED", ("hipErrorProfilerNotInitialized", CONV_TYPE, API_DRIVER), ), ( "cudaErrorProfilerNotInitialized", ("hipErrorProfilerNotInitialized", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_PROFILER_ALREADY_STARTED", ("hipErrorProfilerAlreadyStarted", CONV_TYPE, API_DRIVER), ), ( "cudaErrorProfilerAlreadyStarted", ("hipErrorProfilerAlreadyStarted", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_PROFILER_ALREADY_STOPPED", ("hipErrorProfilerAlreadyStopped", CONV_TYPE, API_DRIVER), ), ( "cudaErrorProfilerAlreadyStopped", ("hipErrorProfilerAlreadyStopped", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ("CUDA_ERROR_NO_DEVICE", ("hipErrorNoDevice", CONV_TYPE, API_DRIVER)), ("cudaErrorNoDevice", ("hipErrorNoDevice", CONV_TYPE, API_RUNTIME)), ("CUDA_ERROR_INVALID_DEVICE", ("hipErrorInvalidDevice", CONV_TYPE, API_DRIVER)), ("cudaErrorInvalidDevice", ("hipErrorInvalidDevice", CONV_TYPE, API_RUNTIME)), ("CUDA_ERROR_INVALID_IMAGE", ("hipErrorInvalidImage", CONV_TYPE, API_DRIVER)), ( "cudaErrorInvalidKernelImage", ("hipErrorInvalidImage", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ("CUDA_ERROR_MAP_FAILED", ("hipErrorMapFailed", CONV_TYPE, API_DRIVER)), ( "cudaErrorMapBufferObjectFailed", ("hipErrorMapFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ("CUDA_ERROR_UNMAP_FAILED", ("hipErrorUnmapFailed", CONV_TYPE, API_DRIVER)), ( "cudaErrorUnmapBufferObjectFailed", ("hipErrorUnmapFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_NO_BINARY_FOR_GPU", ("hipErrorNoBinaryForGpu", CONV_TYPE, API_DRIVER), ), ( "cudaErrorNoKernelImageForDevice", ("hipErrorNoBinaryForGpu", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_ECC_UNCORRECTABLE", ("hipErrorECCNotCorrectable", CONV_TYPE, API_DRIVER), ), ( "cudaErrorECCUncorrectable", ("hipErrorECCNotCorrectable", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_UNSUPPORTED_LIMIT", ("hipErrorUnsupportedLimit", CONV_TYPE, API_DRIVER), ), ( "cudaErrorUnsupportedLimit", ("hipErrorUnsupportedLimit", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_PEER_ACCESS_UNSUPPORTED", ("hipErrorPeerAccessUnsupported", CONV_TYPE, API_DRIVER), ), ( "cudaErrorPeerAccessUnsupported", ("hipErrorPeerAccessUnsupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_INVALID_PTX", ("hipErrorInvalidKernelFile", CONV_TYPE, API_DRIVER), ), ( "cudaErrorInvalidPtx", ("hipErrorInvalidKernelFile", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_INVALID_GRAPHICS_CONTEXT", ("hipErrorInvalidGraphicsContext", CONV_TYPE, API_DRIVER), ), ( "cudaErrorInvalidGraphicsContext", ("hipErrorInvalidGraphicsContext", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_NVLINK_UNCORRECTABLE", ("hipErrorNvlinkUncorrectable", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "cudaErrorNvlinkUncorrectable", ("hipErrorNvlinkUncorrectable", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND", ("hipErrorSharedObjectSymbolNotFound", CONV_TYPE, API_DRIVER), ), ( "cudaErrorSharedObjectSymbolNotFound", ( "hipErrorSharedObjectSymbolNotFound", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "CUDA_ERROR_SHARED_OBJECT_INIT_FAILED", ("hipErrorSharedObjectInitFailed", CONV_TYPE, API_DRIVER), ), ( "cudaErrorSharedObjectInitFailed", ("hipErrorSharedObjectInitFailed", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_OPERATING_SYSTEM", ("hipErrorOperatingSystem", CONV_TYPE, API_DRIVER), ), ( "cudaErrorOperatingSystem", ("hipErrorOperatingSystem", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_INVALID_HANDLE", ("hipErrorInvalidResourceHandle", CONV_TYPE, API_DRIVER), ), ( "cudaErrorInvalidResourceHandle", ("hipErrorInvalidResourceHandle", CONV_TYPE, API_RUNTIME), ), ("CUDA_ERROR_NOT_READY", ("hipErrorNotReady", CONV_TYPE, API_DRIVER)), ("cudaErrorNotReady", ("hipErrorNotReady", CONV_TYPE, API_RUNTIME)), ( "CUDA_ERROR_ILLEGAL_ADDRESS", ("hipErrorIllegalAddress", CONV_TYPE, API_DRIVER), ), ( "cudaErrorIllegalAddress", ("hipErrorIllegalAddress", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES", ("hipErrorLaunchOutOfResources", CONV_TYPE, API_DRIVER), ), ( "cudaErrorLaunchOutOfResources", ("hipErrorLaunchOutOfResources", CONV_TYPE, API_RUNTIME), ), ("CUDA_ERROR_LAUNCH_TIMEOUT", ("hipErrorLaunchTimeOut", CONV_TYPE, API_DRIVER)), ( "cudaErrorLaunchTimeout", ("hipErrorLaunchTimeOut", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED", ("hipErrorPeerAccessAlreadyEnabled", CONV_TYPE, API_DRIVER), ), ( "cudaErrorPeerAccessAlreadyEnabled", ("hipErrorPeerAccessAlreadyEnabled", CONV_TYPE, API_RUNTIME), ), ( "CUDA_ERROR_PEER_ACCESS_NOT_ENABLED", ("hipErrorPeerAccessNotEnabled", CONV_TYPE, API_DRIVER), ), ( "cudaErrorPeerAccessNotEnabled", ("hipErrorPeerAccessNotEnabled", CONV_TYPE, API_RUNTIME), ), ( "CUDA_ERROR_ASSERT", ("hipErrorAssert", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "cudaErrorAssert", ("hipErrorAssert", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_TOO_MANY_PEERS", ("hipErrorTooManyPeers", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "cudaErrorTooManyPeers", ("hipErrorTooManyPeers", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED", ("hipErrorHostMemoryAlreadyRegistered", CONV_TYPE, API_DRIVER), ), ( "cudaErrorHostMemoryAlreadyRegistered", ("hipErrorHostMemoryAlreadyRegistered", CONV_TYPE, API_RUNTIME), ), ( "CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED", ("hipErrorHostMemoryNotRegistered", CONV_TYPE, API_DRIVER), ), ( "cudaErrorHostMemoryNotRegistered", ("hipErrorHostMemoryNotRegistered", CONV_TYPE, API_RUNTIME), ), ( "CUDA_ERROR_HARDWARE_STACK_ERROR", ("hipErrorHardwareStackError", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "cudaErrorHardwareStackError", ("hipErrorHardwareStackError", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_ILLEGAL_INSTRUCTION", ("hipErrorIllegalInstruction", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "cudaErrorIllegalInstruction", ("hipErrorIllegalInstruction", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_MISALIGNED_ADDRESS", ("hipErrorMisalignedAddress", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "cudaErrorMisalignedAddress", ("hipErrorMisalignedAddress", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_INVALID_ADDRESS_SPACE", ("hipErrorInvalidAddressSpace", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "cudaErrorInvalidAddressSpace", ("hipErrorInvalidAddressSpace", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_INVALID_PC", ("hipErrorInvalidPc", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "cudaErrorInvalidPc", ("hipErrorInvalidPc", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_LAUNCH_FAILED", ("hipErrorLaunchFailure", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "cudaErrorLaunchFailure", ("hipErrorLaunchFailure", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "CUDA_ERROR_UNKNOWN", ("hipErrorUnknown", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ("cudaErrorUnknown", ("hipErrorUnknown", CONV_TYPE, API_RUNTIME)), ( "CU_TR_ADDRESS_MODE_WRAP", ("HIP_TR_ADDRESS_MODE_WRAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TR_ADDRESS_MODE_CLAMP", ("HIP_TR_ADDRESS_MODE_CLAMP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TR_ADDRESS_MODE_MIRROR", ("HIP_TR_ADDRESS_MODE_MIRROR", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TR_ADDRESS_MODE_BORDER", ("HIP_TR_ADDRESS_MODE_BORDER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_CUBEMAP_FACE_POSITIVE_X", ("HIP_CUBEMAP_FACE_POSITIVE_X", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_CUBEMAP_FACE_NEGATIVE_X", ("HIP_CUBEMAP_FACE_NEGATIVE_X", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_CUBEMAP_FACE_POSITIVE_Y", ("HIP_CUBEMAP_FACE_POSITIVE_Y", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_CUBEMAP_FACE_NEGATIVE_Y", ("HIP_CUBEMAP_FACE_NEGATIVE_Y", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_CUBEMAP_FACE_POSITIVE_Z", ("HIP_CUBEMAP_FACE_POSITIVE_Z", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_CUBEMAP_FACE_NEGATIVE_Z", ("HIP_CUBEMAP_FACE_NEGATIVE_Z", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_AD_FORMAT_UNSIGNED_INT8", ("HIP_AD_FORMAT_UNSIGNED_INT8", CONV_TYPE, API_DRIVER), ), ( "CU_AD_FORMAT_UNSIGNED_INT16", ("HIP_AD_FORMAT_UNSIGNED_INT16", CONV_TYPE, API_DRIVER), ), ( "CU_AD_FORMAT_UNSIGNED_INT32", ("HIP_AD_FORMAT_UNSIGNED_INT32", CONV_TYPE, API_DRIVER), ), ( "CU_AD_FORMAT_SIGNED_INT8", ("HIP_AD_FORMAT_SIGNED_INT8", CONV_TYPE, API_DRIVER), ), ( "CU_AD_FORMAT_SIGNED_INT16", ("HIP_AD_FORMAT_SIGNED_INT16", CONV_TYPE, API_DRIVER), ), ( "CU_AD_FORMAT_SIGNED_INT32", ("HIP_AD_FORMAT_SIGNED_INT32", CONV_TYPE, API_DRIVER), ), ("CU_AD_FORMAT_HALF", ("HIP_AD_FORMAT_HALF", CONV_TYPE, API_DRIVER)), ("CU_AD_FORMAT_FLOAT", ("HIP_AD_FORMAT_FLOAT", CONV_TYPE, API_DRIVER)), ( "CU_COMPUTEMODE_DEFAULT", ("hipComputeModeDefault", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_COMPUTEMODE_EXCLUSIVE", ("hipComputeModeExclusive", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_COMPUTEMODE_PROHIBITED", ("hipComputeModeProhibited", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_COMPUTEMODE_EXCLUSIVE_PROCESS", ("hipComputeModeExclusiveProcess", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_MEM_ADVISE_SET_READ_MOSTLY", ("hipMemAdviseSetReadMostly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_MEM_ADVISE_UNSET_READ_MOSTLY", ("hipMemAdviseUnsetReadMostly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_MEM_ADVISE_SET_PREFERRED_LOCATION", ( "hipMemAdviseSetPreferredLocation", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION", ( "hipMemAdviseUnsetPreferredLocation", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_MEM_ADVISE_SET_ACCESSED_BY", ("hipMemAdviseSetAccessedBy", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_MEM_ADVISE_UNSET_ACCESSED_BY", ("hipMemAdviseUnsetAccessedBy", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY", ("hipMemRangeAttributeReadMostly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION", ( "hipMemRangeAttributePreferredLocation", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY", ("hipMemRangeAttributeAccessedBy", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION", ( "hipMemRangeAttributeLastPrefetchLocation", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_CTX_SCHED_AUTO", ("HIP_CTX_SCHED_AUTO", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_CTX_SCHED_SPIN", ("HIP_CTX_SCHED_SPIN", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_CTX_SCHED_YIELD", ("HIP_CTX_SCHED_YIELD", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_CTX_SCHED_BLOCKING_SYNC", ("HIP_CTX_SCHED_BLOCKING_SYNC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_CTX_BLOCKING_SYNC", ("HIP_CTX_BLOCKING_SYNC", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_CTX_SCHED_MASK", ("HIP_CTX_SCHED_MASK", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_CTX_MAP_HOST", ("HIP_CTX_MAP_HOST", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_CTX_LMEM_RESIZE_TO_MAX", ("HIP_CTX_LMEM_RESIZE_TO_MAX", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_CTX_FLAGS_MASK", ("HIP_CTX_FLAGS_MASK", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_LAUNCH_PARAM_BUFFER_POINTER", ("HIP_LAUNCH_PARAM_BUFFER_POINTER", CONV_TYPE, API_DRIVER), ), ( "CU_LAUNCH_PARAM_BUFFER_SIZE", ("HIP_LAUNCH_PARAM_BUFFER_SIZE", CONV_TYPE, API_DRIVER), ), ("CU_LAUNCH_PARAM_END", ("HIP_LAUNCH_PARAM_END", CONV_TYPE, API_DRIVER)), ( "CU_IPC_HANDLE_SIZE", ("HIP_LAUNCH_PARAM_END", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_MEMHOSTALLOC_DEVICEMAP", ("HIP_MEMHOSTALLOC_DEVICEMAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_MEMHOSTALLOC_PORTABLE", ("HIP_MEMHOSTALLOC_PORTABLE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_MEMHOSTALLOC_WRITECOMBINED", ("HIP_MEMHOSTALLOC_WRITECOMBINED", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_MEMHOSTREGISTER_DEVICEMAP", ("HIP_MEMHOSTREGISTER_DEVICEMAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_MEMHOSTREGISTER_IOMEMORY", ("HIP_MEMHOSTREGISTER_IOMEMORY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_MEMHOSTREGISTER_PORTABLE", ("HIP_MEMHOSTREGISTER_PORTABLE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_PARAM_TR_DEFAULT", ("HIP_PARAM_TR_DEFAULT", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_STREAM_LEGACY", ("HIP_STREAM_LEGACY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_STREAM_PER_THREAD", ("HIP_STREAM_PER_THREAD", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TRSA_OVERRIDE_FORMAT", ("HIP_TRSA_OVERRIDE_FORMAT", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TRSF_NORMALIZED_COORDINATES", ("HIP_TRSF_NORMALIZED_COORDINATES", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TRSF_READ_AS_INTEGER", ("HIP_TRSF_READ_AS_INTEGER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ("CU_TRSF_SRGB", ("HIP_TRSF_SRGB", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED)), ( "CUDA_ARRAY3D_2DARRAY", ("HIP_ARRAY3D_LAYERED", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUDA_ARRAY3D_CUBEMAP", ("HIP_ARRAY3D_CUBEMAP", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUDA_ARRAY3D_DEPTH_TEXTURE", ("HIP_ARRAY3D_DEPTH_TEXTURE", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUDA_ARRAY3D_LAYERED", ("HIP_ARRAY3D_LAYERED", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUDA_ARRAY3D_SURFACE_LDST", ("HIP_ARRAY3D_SURFACE_LDST", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CUDA_ARRAY3D_TEXTURE_GATHER", ("HIP_ARRAY3D_TEXTURE_GATHER", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK", ( "hipDeviceAttributeMaxThreadsPerBlock", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X", ("hipDeviceAttributeMaxBlockDimX", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y", ("hipDeviceAttributeMaxBlockDimY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z", ("hipDeviceAttributeMaxBlockDimZ", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X", ("hipDeviceAttributeMaxGridDimX", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y", ("hipDeviceAttributeMaxGridDimY", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z", ("hipDeviceAttributeMaxGridDimZ", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK", ( "hipDeviceAttributeMaxSharedMemoryPerBlock", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK", ( "hipDeviceAttributeMaxSharedMemoryPerBlock", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY", ( "hipDeviceAttributeTotalConstantMemory", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_WARP_SIZE", ("hipDeviceAttributeWarpSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_DEVICE_ATTRIBUTE_MAX_PITCH", ("hipDeviceAttributeMaxPitch", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK", ( "hipDeviceAttributeMaxRegistersPerBlock", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK", ( "hipDeviceAttributeMaxRegistersPerBlock", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_CLOCK_RATE", ("hipDeviceAttributeClockRate", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT", ( "hipDeviceAttributeTextureAlignment", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_GPU_OVERLAP", ( "hipDeviceAttributeAsyncEngineCount", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT", ( "hipDeviceAttributeMultiprocessorCount", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT", ( "hipDeviceAttributeKernelExecTimeout", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_INTEGRATED", ("hipDeviceAttributeIntegrated", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY", ( "hipDeviceAttributeCanMapHostMemory", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_COMPUTE_MODE", ("hipDeviceAttributeComputeMode", CONV_TYPE, API_DRIVER), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH", ( "hipDeviceAttributeMaxTexture1DWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH", ( "hipDeviceAttributeMaxTexture2DWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT", ( "hipDeviceAttributeMaxTexture2DHeight", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH", ( "hipDeviceAttributeMaxTexture3DWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT", ( "hipDeviceAttributeMaxTexture3DHeight", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH", ( "hipDeviceAttributeMaxTexture3DDepth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH", ( "hipDeviceAttributeMaxTexture2DLayeredWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT", ( "hipDeviceAttributeMaxTexture2DLayeredHeight", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS", ( "hipDeviceAttributeMaxTexture2DLayeredLayers", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH", ( "hipDeviceAttributeMaxTexture2DLayeredWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT", ( "hipDeviceAttributeMaxTexture2DLayeredHeight", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES", ( "hipDeviceAttributeMaxTexture2DLayeredLayers", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT", ( "hipDeviceAttributeSurfaceAlignment", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS", ("hipDeviceAttributeConcurrentKernels", CONV_TYPE, API_DRIVER), ), ( "CU_DEVICE_ATTRIBUTE_ECC_ENABLED", ("hipDeviceAttributeEccEnabled", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_DEVICE_ATTRIBUTE_PCI_BUS_ID", ("hipDeviceAttributePciBusId", CONV_TYPE, API_DRIVER), ), ( "CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID", ("hipDeviceAttributePciDeviceId", CONV_TYPE, API_DRIVER), ), ( "CU_DEVICE_ATTRIBUTE_TCC_DRIVER", ("hipDeviceAttributeTccDriver", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE", ( "hipDeviceAttributeMemoryClockRate", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH", ("hipDeviceAttributeMemoryBusWidth", CONV_TYPE, API_DRIVER), ), ( "CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE", ("hipDeviceAttributeL2CacheSize", CONV_TYPE, API_DRIVER), ), ( "CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR", ("hipDeviceAttributeMaxThreadsPerMultiProcessor", CONV_TYPE, API_DRIVER), ), ( "CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT", ( "hipDeviceAttributeAsyncEngineCount", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING", ( "hipDeviceAttributeUnifiedAddressing", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH", ( "hipDeviceAttributeMaxTexture1DLayeredWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS", ( "hipDeviceAttributeMaxTexture1DLayeredLayers", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER", ( "hipDeviceAttributeCanTex2DGather", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH", ( "hipDeviceAttributeMaxTexture2DGatherWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT", ( "hipDeviceAttributeMaxTexture2DGatherHeight", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE", ( "hipDeviceAttributeMaxTexture3DWidthAlternate", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE", ( "hipDeviceAttributeMaxTexture3DHeightAlternate", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE", ( "hipDeviceAttributeMaxTexture3DDepthAlternate", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID", ("hipDeviceAttributePciDomainId", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT", ( "hipDeviceAttributeTexturePitchAlignment", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH", ( "hipDeviceAttributeMaxTextureCubemapWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH", ( "hipDeviceAttributeMaxTextureCubemapLayeredWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS", ( "hipDeviceAttributeMaxTextureCubemapLayeredLayers", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH", ( "hipDeviceAttributeMaxSurface1DWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH", ( "hipDeviceAttributeMaxSurface2DWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT", ( "hipDeviceAttributeMaxSurface2DHeight", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH", ( "hipDeviceAttributeMaxSurface3DWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT", ( "hipDeviceAttributeMaxSurface3DHeight", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH", ( "hipDeviceAttributeMaxSurface3DDepth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH", ( "hipDeviceAttributeMaxSurface1DLayeredWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS", ( "hipDeviceAttributeMaxSurface1DLayeredLayers", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH", ( "hipDeviceAttributeMaxSurface2DLayeredWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT", ( "hipDeviceAttributeMaxSurface2DLayeredHeight", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS", ( "hipDeviceAttributeMaxSurface2DLayeredLayers", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH", ( "hipDeviceAttributeMaxSurfaceCubemapWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH", ( "hipDeviceAttributeMaxSurfaceCubemapLayeredWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS", ( "hipDeviceAttributeMaxSurfaceCubemapLayeredLayers", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH", ( "hipDeviceAttributeMaxTexture1DLinearWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH", ( "hipDeviceAttributeMaxTexture2DLinearWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT", ( "hipDeviceAttributeMaxTexture2DLinearHeight", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH", ( "hipDeviceAttributeMaxTexture2DLinearPitch", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH", ( "hipDeviceAttributeMaxTexture2DMipmappedWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT", ( "hipDeviceAttributeMaxTexture2DMipmappedHeight", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR", ("hipDeviceAttributeComputeCapabilityMajor", CONV_TYPE, API_DRIVER), ), ( "CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR", ("hipDeviceAttributeComputeCapabilityMinor", CONV_TYPE, API_DRIVER), ), ( "CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH", ( "hipDeviceAttributeMaxTexture1DMipmappedWidth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED", ( "hipDeviceAttributeStreamPrioritiesSupported", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED", ( "hipDeviceAttributeGlobalL1CacheSupported", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED", ( "hipDeviceAttributeLocalL1CacheSupported", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR", ( "hipDeviceAttributeMaxSharedMemoryPerMultiprocessor", CONV_TYPE, API_DRIVER, ), ), ( "CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR", ( "hipDeviceAttributeMaxRegistersPerMultiprocessor", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY", ("hipDeviceAttributeManagedMemory", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD", ("hipDeviceAttributeIsMultiGpuBoard", CONV_TYPE, API_DRIVER), ), ( "CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID", ( "hipDeviceAttributeMultiGpuBoardGroupId", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED", ( "hipDeviceAttributeHostNativeAtomicSupported", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO", ( "hipDeviceAttributeSingleToDoublePrecisionPerfRatio", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS", ( "hipDeviceAttributePageableMemoryAccess", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS", ( "hipDeviceAttributeConcurrentManagedAccess", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED", ( "hipDeviceAttributeComputePreemptionSupported", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM", ( "hipDeviceAttributeCanUseHostPointerForRegisteredMem", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_ATTRIBUTE_MAX", ("hipDeviceAttributeMax", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_POINTER_ATTRIBUTE_CONTEXT", ("hipPointerAttributeContext", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_POINTER_ATTRIBUTE_MEMORY_TYPE", ("hipPointerAttributeMemoryType", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_POINTER_ATTRIBUTE_DEVICE_POINTER", ( "hipPointerAttributeDevicePointer", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_POINTER_ATTRIBUTE_HOST_POINTER", ("hipPointerAttributeHostPointer", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_POINTER_ATTRIBUTE_P2P_TOKENS", ("hipPointerAttributeP2pTokens", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_POINTER_ATTRIBUTE_SYNC_MEMOPS", ("hipPointerAttributeSyncMemops", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_POINTER_ATTRIBUTE_BUFFER_ID", ("hipPointerAttributeBufferId", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_POINTER_ATTRIBUTE_IS_MANAGED", ("hipPointerAttributeIsManaged", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK", ( "hipFuncAttributeMaxThreadsPerBlocks", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES", ("hipFuncAttributeSharedSizeBytes", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES", ("hipFuncAttributeConstSizeBytes", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES", ("hipFuncAttributeLocalSizeBytes", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_FUNC_ATTRIBUTE_NUM_REGS", ("hipFuncAttributeNumRegs", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_FUNC_ATTRIBUTE_PTX_VERSION", ("hipFuncAttributePtxVersion", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_FUNC_ATTRIBUTE_BINARY_VERSION", ("hipFuncAttributeBinaryVersion", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_FUNC_ATTRIBUTE_CACHE_MODE_CA", ("hipFuncAttributeCacheModeCA", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_FUNC_ATTRIBUTE_MAX", ("hipFuncAttributeMax", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE", ("hipGraphicsMapFlagsNone", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY", ("hipGraphicsMapFlagsReadOnly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD", ("hipGraphicsMapFlagsWriteDiscard", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_GRAPHICS_REGISTER_FLAGS_NONE", ("hipGraphicsRegisterFlagsNone", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY", ( "hipGraphicsRegisterFlagsReadOnly", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD", ( "hipGraphicsRegisterFlagsWriteDiscard", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST", ( "hipGraphicsRegisterFlagsSurfaceLoadStore", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER", ( "hipGraphicsRegisterFlagsTextureGather", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_OCCUPANCY_DEFAULT", ("hipOccupancyDefault", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE", ( "hipOccupancyDisableCachingOverride", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_FUNC_CACHE_PREFER_NONE", ("hipFuncCachePreferNone", CONV_CACHE, API_DRIVER), ), ( "CU_FUNC_CACHE_PREFER_SHARED", ("hipFuncCachePreferShared", CONV_CACHE, API_DRIVER), ), ("CU_FUNC_CACHE_PREFER_L1", ("hipFuncCachePreferL1", CONV_CACHE, API_DRIVER)), ( "CU_FUNC_CACHE_PREFER_EQUAL", ("hipFuncCachePreferEqual", CONV_CACHE, API_DRIVER), ), ( "CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS", ("hipIpcMemLazyEnablePeerAccess", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ("CUDA_IPC_HANDLE_SIZE", ("HIP_IPC_HANDLE_SIZE", CONV_TYPE, API_DRIVER)), ( "CU_JIT_CACHE_OPTION_NONE", ("hipJitCacheModeOptionNone", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_JIT_CACHE_OPTION_CG", ("hipJitCacheModeOptionCG", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_JIT_CACHE_OPTION_CA", ("hipJitCacheModeOptionCA", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_PREFER_PTX", ("hipJitFallbackPreferPtx", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_PREFER_BINARY", ("hipJitFallbackPreferBinary", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ("CU_JIT_MAX_REGISTERS", ("hipJitOptionMaxRegisters", CONV_JIT, API_DRIVER)), ( "CU_JIT_THREADS_PER_BLOCK", ("hipJitOptionThreadsPerBlock", CONV_JIT, API_DRIVER), ), ("CU_JIT_WALL_TIME", ("hipJitOptionWallTime", CONV_JIT, API_DRIVER)), ("CU_JIT_INFO_LOG_BUFFER", ("hipJitOptionInfoLogBuffer", CONV_JIT, API_DRIVER)), ( "CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES", ("hipJitOptionInfoLogBufferSizeBytes", CONV_JIT, API_DRIVER), ), ( "CU_JIT_ERROR_LOG_BUFFER", ("hipJitOptionErrorLogBuffer", CONV_JIT, API_DRIVER), ), ( "CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES", ("hipJitOptionErrorLogBufferSizeBytes", CONV_JIT, API_DRIVER), ), ( "CU_JIT_OPTIMIZATION_LEVEL", ("hipJitOptionOptimizationLevel", CONV_JIT, API_DRIVER), ), ( "CU_JIT_TARGET_FROM_CUCONTEXT", ("hipJitOptionTargetFromContext", CONV_JIT, API_DRIVER), ), ("CU_JIT_TARGET", ("hipJitOptionTarget", CONV_JIT, API_DRIVER)), ( "CU_JIT_FALLBACK_STRATEGY", ("hipJitOptionFallbackStrategy", CONV_JIT, API_DRIVER), ), ( "CU_JIT_GENERATE_DEBUG_INFO", ("hipJitOptionGenerateDebugInfo", CONV_JIT, API_DRIVER), ), ("CU_JIT_LOG_VERBOSE", ("hipJitOptionLogVerbose", CONV_JIT, API_DRIVER)), ( "CU_JIT_GENERATE_LINE_INFO", ("hipJitOptionGenerateLineInfo", CONV_JIT, API_DRIVER), ), ("CU_JIT_CACHE_MODE", ("hipJitOptionCacheMode", CONV_JIT, API_DRIVER)), ("CU_JIT_NEW_SM3X_OPT", ("hipJitOptionSm3xOpt", CONV_JIT, API_DRIVER)), ("CU_JIT_FAST_COMPILE", ("hipJitOptionFastCompile", CONV_JIT, API_DRIVER)), ("CU_JIT_NUM_OPTIONS", ("hipJitOptionNumOptions", CONV_JIT, API_DRIVER)), ( "CU_TARGET_COMPUTE_10", ("hipJitTargetCompute10", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TARGET_COMPUTE_11", ("hipJitTargetCompute11", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TARGET_COMPUTE_12", ("hipJitTargetCompute12", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TARGET_COMPUTE_13", ("hipJitTargetCompute13", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TARGET_COMPUTE_20", ("hipJitTargetCompute20", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TARGET_COMPUTE_21", ("hipJitTargetCompute21", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TARGET_COMPUTE_30", ("hipJitTargetCompute30", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TARGET_COMPUTE_32", ("hipJitTargetCompute32", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TARGET_COMPUTE_35", ("hipJitTargetCompute35", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TARGET_COMPUTE_37", ("hipJitTargetCompute37", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TARGET_COMPUTE_50", ("hipJitTargetCompute50", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TARGET_COMPUTE_52", ("hipJitTargetCompute52", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TARGET_COMPUTE_53", ("hipJitTargetCompute53", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TARGET_COMPUTE_60", ("hipJitTargetCompute60", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TARGET_COMPUTE_61", ("hipJitTargetCompute61", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_TARGET_COMPUTE_62", ("hipJitTargetCompute62", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_JIT_INPUT_CUBIN", ("hipJitInputTypeBin", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_JIT_INPUT_PTX", ("hipJitInputTypePtx", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_JIT_INPUT_FATBINARY", ("hipJitInputTypeFatBinary", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_JIT_INPUT_OBJECT", ("hipJitInputTypeObject", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_JIT_INPUT_LIBRARY", ("hipJitInputTypeLibrary", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_JIT_NUM_INPUT_TYPES", ("hipJitInputTypeNumInputTypes", CONV_JIT, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_LIMIT_STACK_SIZE", ("hipLimitStackSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_LIMIT_PRINTF_FIFO_SIZE", ("hipLimitPrintfFifoSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_LIMIT_MALLOC_HEAP_SIZE", ("hipLimitMallocHeapSize", CONV_TYPE, API_DRIVER), ), ( "CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH", ("hipLimitDevRuntimeSyncDepth", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT", ( "hipLimitDevRuntimePendingLaunchCount", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_LIMIT_STACK_SIZE", ("hipLimitStackSize", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_MEM_ATTACH_GLOBAL", ("hipMemAttachGlobal", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_MEM_ATTACH_HOST", ("hipMemAttachHost", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_MEM_ATTACH_SINGLE", ("hipMemAttachSingle", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_MEMORYTYPE_HOST", ("hipMemTypeHost", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_MEMORYTYPE_DEVICE", ("hipMemTypeDevice", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_MEMORYTYPE_ARRAY", ("hipMemTypeArray", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_MEMORYTYPE_UNIFIED", ("hipMemTypeUnified", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_RESOURCE_TYPE_ARRAY", ("hipResourceTypeArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_RESOURCE_TYPE_MIPMAPPED_ARRAY", ("hipResourceTypeMipmappedArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_RESOURCE_TYPE_LINEAR", ("hipResourceTypeLinear", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_RESOURCE_TYPE_PITCH2D", ("hipResourceTypePitch2D", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ("CU_RES_VIEW_FORMAT_NONE", ("hipResViewFormatNone", CONV_TEX, API_DRIVER)), ( "CU_RES_VIEW_FORMAT_UINT_1X8", ("hipResViewFormatUnsignedChar1", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_UINT_2X8", ("hipResViewFormatUnsignedChar2", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_UINT_4X8", ("hipResViewFormatUnsignedChar4", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_SINT_1X8", ("hipResViewFormatSignedChar1", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_SINT_2X8", ("hipResViewFormatSignedChar2", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_SINT_4X8", ("hipResViewFormatSignedChar4", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_UINT_1X16", ("hipResViewFormatUnsignedShort1", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_UINT_2X16", ("hipResViewFormatUnsignedShort2", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_UINT_4X16", ("hipResViewFormatUnsignedShort4", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_SINT_1X16", ("hipResViewFormatSignedShort1", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_SINT_2X16", ("hipResViewFormatSignedShort2", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_SINT_4X16", ("hipResViewFormatSignedShort4", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_UINT_1X32", ("hipResViewFormatUnsignedInt1", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_UINT_2X32", ("hipResViewFormatUnsignedInt2", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_UINT_4X32", ("hipResViewFormatUnsignedInt4", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_SINT_1X32", ("hipResViewFormatSignedInt1", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_SINT_2X32", ("hipResViewFormatSignedInt2", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_SINT_4X32", ("hipResViewFormatSignedInt4", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_FLOAT_1X16", ("hipResViewFormatHalf1", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_FLOAT_2X16", ("hipResViewFormatHalf2", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_FLOAT_4X16", ("hipResViewFormatHalf4", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_FLOAT_1X32", ("hipResViewFormatFloat1", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_FLOAT_2X32", ("hipResViewFormatFloat2", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_FLOAT_4X32", ("hipResViewFormatFloat4", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_UNSIGNED_BC1", ("hipResViewFormatUnsignedBlockCompressed1", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_UNSIGNED_BC2", ("hipResViewFormatUnsignedBlockCompressed2", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_UNSIGNED_BC3", ("hipResViewFormatUnsignedBlockCompressed3", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_UNSIGNED_BC4", ("hipResViewFormatUnsignedBlockCompressed4", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_SIGNED_BC4", ("hipResViewFormatSignedBlockCompressed4", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_UNSIGNED_BC5", ("hipResViewFormatUnsignedBlockCompressed5", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_SIGNED_BC5", ("hipResViewFormatSignedBlockCompressed5", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_UNSIGNED_BC6H", ("hipResViewFormatUnsignedBlockCompressed6H", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_SIGNED_BC6H", ("hipResViewFormatSignedBlockCompressed6H", CONV_TEX, API_DRIVER), ), ( "CU_RES_VIEW_FORMAT_UNSIGNED_BC7", ("hipResViewFormatUnsignedBlockCompressed7", CONV_TEX, API_DRIVER), ), ( "CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE", ("hipSharedMemBankSizeDefault", CONV_TYPE, API_DRIVER), ), ( "CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE", ("hipSharedMemBankSizeFourByte", CONV_TYPE, API_DRIVER), ), ( "CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE", ("hipSharedMemBankSizeEightByte", CONV_TYPE, API_DRIVER), ), ("CU_STREAM_DEFAULT", ("hipStreamDefault", CONV_TYPE, API_DRIVER)), ("CU_STREAM_NON_BLOCKING", ("hipStreamNonBlocking", CONV_TYPE, API_DRIVER)), ( "CU_STREAM_WAIT_VALUE_GEQ", ("hipStreamWaitValueGeq", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_STREAM_WAIT_VALUE_EQ", ("hipStreamWaitValueEq", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_STREAM_WAIT_VALUE_AND", ("hipStreamWaitValueAnd", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_STREAM_WAIT_VALUE_FLUSH", ("hipStreamWaitValueFlush", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_STREAM_WRITE_VALUE_DEFAULT", ("hipStreamWriteValueDefault", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER", ( "hipStreamWriteValueNoMemoryBarrier", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_STREAM_MEM_OP_WAIT_VALUE_32", ("hipStreamBatchMemOpWaitValue32", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_STREAM_MEM_OP_WRITE_VALUE_32", ("hipStreamBatchMemOpWriteValue32", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES", ( "hipStreamBatchMemOpFlushRemoteWrites", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "cuGetErrorName", ("hipGetErrorName___", CONV_ERROR, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuGetErrorString", ("hipGetErrorString___", CONV_ERROR, API_DRIVER, HIP_UNSUPPORTED), ), ("cuInit", ("hipInit", CONV_INIT, API_DRIVER)), ("cuDriverGetVersion", ("hipDriverGetVersion", CONV_VERSION, API_DRIVER)), ("cuCtxCreate_v2", ("hipCtxCreate", CONV_CONTEXT, API_DRIVER)), ("cuCtxDestroy_v2", ("hipCtxDestroy", CONV_CONTEXT, API_DRIVER)), ("cuCtxGetApiVersion", ("hipCtxGetApiVersion", CONV_CONTEXT, API_DRIVER)), ("cuCtxGetCacheConfig", ("hipCtxGetCacheConfig", CONV_CONTEXT, API_DRIVER)), ("cuCtxGetCurrent", ("hipCtxGetCurrent", CONV_CONTEXT, API_DRIVER)), ("cuCtxGetDevice", ("hipCtxGetDevice", CONV_CONTEXT, API_DRIVER)), ("cuCtxGetFlags", ("hipCtxGetFlags", CONV_CONTEXT, API_DRIVER)), ( "cuCtxGetLimit", ("hipCtxGetLimit", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuCtxGetSharedMemConfig", ("hipCtxGetSharedMemConfig", CONV_CONTEXT, API_DRIVER), ), ( "cuCtxGetStreamPriorityRange", ("hipCtxGetStreamPriorityRange", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED), ), ("cuCtxPopCurrent_v2", ("hipCtxPopCurrent", CONV_CONTEXT, API_DRIVER)), ("cuCtxPushCurrent_v2", ("hipCtxPushCurrent", CONV_CONTEXT, API_DRIVER)), ("cuCtxSetCacheConfig", ("hipCtxSetCacheConfig", CONV_CONTEXT, API_DRIVER)), ("cuCtxSetCurrent", ("hipCtxSetCurrent", CONV_CONTEXT, API_DRIVER)), ( "cuCtxSetLimit", ("hipCtxSetLimit", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuCtxSetSharedMemConfig", ("hipCtxSetSharedMemConfig", CONV_CONTEXT, API_DRIVER), ), ("cuCtxSynchronize", ("hipCtxSynchronize", CONV_CONTEXT, API_DRIVER)), ("cuCtxAttach", ("hipCtxAttach", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED)), ("cuCtxDetach", ("hipCtxDetach", CONV_CONTEXT, API_DRIVER, HIP_UNSUPPORTED)), ("cuCtxEnablePeerAccess", ("hipCtxEnablePeerAccess", CONV_PEER, API_DRIVER)), ("cuCtxDisablePeerAccess", ("hipCtxDisablePeerAccess", CONV_PEER, API_DRIVER)), ("cuDeviceCanAccessPeer", ("hipDeviceCanAccessPeer", CONV_PEER, API_DRIVER)), ( "cuDeviceGetP2PAttribute", ("hipDeviceGetP2PAttribute", CONV_PEER, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuDevicePrimaryCtxGetState", ("hipDevicePrimaryCtxGetState", CONV_CONTEXT, API_DRIVER), ), ( "cuDevicePrimaryCtxRelease", ("hipDevicePrimaryCtxRelease", CONV_CONTEXT, API_DRIVER), ), ( "cuDevicePrimaryCtxReset", ("hipDevicePrimaryCtxReset", CONV_CONTEXT, API_DRIVER), ), ( "cuDevicePrimaryCtxRetain", ("hipDevicePrimaryCtxRetain", CONV_CONTEXT, API_DRIVER), ), ( "cuDevicePrimaryCtxSetFlags", ("hipDevicePrimaryCtxSetFlags", CONV_CONTEXT, API_DRIVER), ), ("cuDeviceGet", ("hipGetDevice", CONV_DEVICE, API_DRIVER)), ("cuDeviceGetName", ("hipDeviceGetName", CONV_DEVICE, API_DRIVER)), ("cuDeviceGetCount", ("hipGetDeviceCount", CONV_DEVICE, API_DRIVER)), ("cuDeviceGetAttribute", ("hipDeviceGetAttribute", CONV_DEVICE, API_DRIVER)), ("cuDeviceGetPCIBusId", ("hipDeviceGetPCIBusId", CONV_DEVICE, API_DRIVER)), ("cuDeviceGetByPCIBusId", ("hipDeviceGetByPCIBusId", CONV_DEVICE, API_DRIVER)), ("cuDeviceTotalMem_v2", ("hipDeviceTotalMem", CONV_DEVICE, API_DRIVER)), ( "cuDeviceComputeCapability", ("hipDeviceComputeCapability", CONV_DEVICE, API_DRIVER), ), ("cuDeviceGetProperties", ("hipGetDeviceProperties", CONV_DEVICE, API_DRIVER)), ("cuLinkAddData", ("hipLinkAddData", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), ("cuLinkAddFile", ("hipLinkAddFile", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), ( "cuLinkComplete", ("hipLinkComplete", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), ), ("cuLinkCreate", ("hipLinkCreate", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), ("cuLinkDestroy", ("hipLinkDestroy", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), ("cuModuleGetFunction", ("hipModuleGetFunction", CONV_MODULE, API_DRIVER)), ("cuModuleGetGlobal_v2", ("hipModuleGetGlobal", CONV_MODULE, API_DRIVER)), ( "cuModuleGetSurfRef", ("hipModuleGetSurfRef", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), ), ("cuModuleGetTexRef", ("hipModuleGetTexRef", CONV_MODULE, API_DRIVER)), ("cuModuleLoad", ("hipModuleLoad", CONV_MODULE, API_DRIVER)), ("cuModuleLoadData", ("hipModuleLoadData", CONV_MODULE, API_DRIVER)), ("cuModuleLoadDataEx", ("hipModuleLoadDataEx", CONV_MODULE, API_DRIVER)), ( "cuModuleLoadFatBinary", ("hipModuleLoadFatBinary", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), ), ("cuModuleUnload", ("hipModuleUnload", CONV_MODULE, API_DRIVER)), ( "CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK", ( "hipDeviceP2PAttributePerformanceRank", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED", ( "hipDeviceP2PAttributeAccessSupported", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED", ( "hipDeviceP2PAttributeNativeAtomicSupported", CONV_TYPE, API_DRIVER, HIP_UNSUPPORTED, ), ), ("CU_EVENT_DEFAULT", ("hipEventDefault", CONV_EVENT, API_DRIVER)), ("CU_EVENT_BLOCKING_SYNC", ("hipEventBlockingSync", CONV_EVENT, API_DRIVER)), ("CU_EVENT_DISABLE_TIMING", ("hipEventDisableTiming", CONV_EVENT, API_DRIVER)), ("CU_EVENT_INTERPROCESS", ("hipEventInterprocess", CONV_EVENT, API_DRIVER)), ("cuEventCreate", ("hipEventCreate", CONV_EVENT, API_DRIVER)), ("cuEventDestroy_v2", ("hipEventDestroy", CONV_EVENT, API_DRIVER)), ("cuEventElapsedTime", ("hipEventElapsedTime", CONV_EVENT, API_DRIVER)), ("cuEventQuery", ("hipEventQuery", CONV_EVENT, API_DRIVER)), ("cuEventRecord", ("hipEventRecord", CONV_EVENT, API_DRIVER)), ("cuEventSynchronize", ("hipEventSynchronize", CONV_EVENT, API_DRIVER)), ( "cuFuncGetAttribute", ("hipFuncGetAttribute", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), ), ("cuFuncSetCacheConfig", ("hipFuncSetCacheConfig", CONV_MODULE, API_DRIVER)), ( "cuFuncSetSharedMemConfig", ("hipFuncSetSharedMemConfig", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), ), ("cuLaunchKernel", ("hipModuleLaunchKernel", CONV_MODULE, API_DRIVER)), ( "cuFuncSetBlockShape", ("hipFuncSetBlockShape", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuFuncSetSharedSize", ("hipFuncSetSharedSize", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), ), ("cuLaunch", ("hipLaunch", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), ("cuLaunchGrid", ("hipLaunchGrid", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), ( "cuLaunchGridAsync", ("hipLaunchGridAsync", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), ), ("cuParamSetf", ("hipParamSetf", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), ("cuParamSeti", ("hipParamSeti", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), ( "cuParamSetSize", ("hipParamSetSize", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuParamSetSize", ("hipParamSetSize", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED), ), ("cuParamSetv", ("hipParamSetv", CONV_MODULE, API_DRIVER, HIP_UNSUPPORTED)), ( "cuOccupancyMaxActiveBlocksPerMultiprocessor", ( "hipModuleOccupancyMaxActiveBlocksPerMultiprocessor", CONV_OCCUPANCY, API_DRIVER, ), ), ( "cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", ( "hipModuleOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", CONV_OCCUPANCY, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "cuOccupancyMaxPotentialBlockSize", ("hipModuleOccupancyMaxPotentialBlockSize", CONV_OCCUPANCY, API_DRIVER), ), ( "cuOccupancyMaxPotentialBlockSizeWithFlags", ( "hipModuleOccupancyMaxPotentialBlockSizeWithFlags", CONV_OCCUPANCY, API_DRIVER, HIP_UNSUPPORTED, ), ), ("cuStreamAddCallback", ("hipStreamAddCallback", CONV_STREAM, API_DRIVER)), ( "cuStreamAttachMemAsync", ("hipStreamAttachMemAsync", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuStreamCreate", ("hipStreamCreate__", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuStreamCreateWithPriority", ("hipStreamCreateWithPriority", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), ), ("cuStreamDestroy_v2", ("hipStreamDestroy", CONV_STREAM, API_DRIVER)), ("cuStreamGetFlags", ("hipStreamGetFlags", CONV_STREAM, API_DRIVER)), ( "cuStreamGetPriority", ("hipStreamGetPriority", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), ), ("cuStreamQuery", ("hipStreamQuery", CONV_STREAM, API_DRIVER)), ("cuStreamSynchronize", ("hipStreamSynchronize", CONV_STREAM, API_DRIVER)), ("cuStreamWaitEvent", ("hipStreamWaitEvent", CONV_STREAM, API_DRIVER)), ( "cuStreamWaitValue32", ("hipStreamWaitValue32", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuStreamWriteValue32", ("hipStreamWriteValue32", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuStreamBatchMemOp", ("hipStreamBatchMemOp", CONV_STREAM, API_DRIVER, HIP_UNSUPPORTED), ), ("cuArray3DCreate", ("hipArray3DCreate", CONV_MEM, API_DRIVER)), ( "cuArray3DGetDescriptor", ("hipArray3DGetDescriptor", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ("cuArrayCreate", ("hipArrayCreate", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), ("cuArrayDestroy", ("hipArrayDestroy", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), ( "cuArrayGetDescriptor", ("hipArrayGetDescriptor", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuIpcCloseMemHandle", ("hipIpcCloseMemHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuIpcGetEventHandle", ("hipIpcGetEventHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuIpcGetMemHandle", ("hipIpcGetMemHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuIpcOpenEventHandle", ("hipIpcOpenEventHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuIpcOpenMemHandle", ("hipIpcOpenMemHandle", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ("cuMemAlloc_v2", ("hipMalloc", CONV_MEM, API_DRIVER)), ("cuMemAllocHost", ("hipMemAllocHost", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), ( "cuMemAllocManaged", ("hipMemAllocManaged", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuMemAllocPitch", ("hipMemAllocPitch__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ("cuMemcpy", ("hipMemcpy__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), ("cuMemcpy2D", ("hipMemcpy2D__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), ( "cuMemcpy2DAsync", ("hipMemcpy2DAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuMemcpy2DUnaligned", ("hipMemcpy2DUnaligned", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ("cuMemcpy3D", ("hipMemcpy3D__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), ( "cuMemcpy3DAsync", ("hipMemcpy3DAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuMemcpy3DPeer", ("hipMemcpy3DPeer__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuMemcpy3DPeerAsync", ("hipMemcpy3DPeerAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ("cuMemcpyAsync", ("hipMemcpyAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), ("cuMemcpyAtoA", ("hipMemcpyAtoA", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), ("cuMemcpyAtoD", ("hipMemcpyAtoD", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), ("cuMemcpyAtoH", ("hipMemcpyAtoH", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), ( "cuMemcpyAtoHAsync", ("hipMemcpyAtoHAsync", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ("cuMemcpyDtoA", ("hipMemcpyDtoA", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), ("cuMemcpyDtoD_v2", ("hipMemcpyDtoD", CONV_MEM, API_DRIVER)), ("cuMemcpyDtoDAsync_v2", ("hipMemcpyDtoDAsync", CONV_MEM, API_DRIVER)), ("cuMemcpyDtoH_v2", ("hipMemcpyDtoH", CONV_MEM, API_DRIVER)), ("cuMemcpyDtoHAsync_v2", ("hipMemcpyDtoHAsync", CONV_MEM, API_DRIVER)), ("cuMemcpyHtoA", ("hipMemcpyHtoA", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), ( "cuMemcpyHtoAAsync", ("hipMemcpyHtoAAsync", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ("cuMemcpyHtoD_v2", ("hipMemcpyHtoD", CONV_MEM, API_DRIVER)), ("cuMemcpyHtoDAsync_v2", ("hipMemcpyHtoDAsync", CONV_MEM, API_DRIVER)), ( "cuMemcpyPeerAsync", ("hipMemcpyPeerAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ("cuMemcpyPeer", ("hipMemcpyPeer__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), ("cuMemFree_v2", ("hipFree", CONV_MEM, API_DRIVER)), ("cuMemFreeHost", ("hipHostFree", CONV_MEM, API_DRIVER)), ( "cuMemGetAddressRange", ("hipMemGetAddressRange", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ("cuMemGetInfo_v2", ("hipMemGetInfo", CONV_MEM, API_DRIVER)), ("cuMemHostAlloc", ("hipHostMalloc", CONV_MEM, API_DRIVER)), ( "cuMemHostGetDevicePointer", ("hipMemHostGetDevicePointer", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuMemHostGetFlags", ("hipMemHostGetFlags", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ("cuMemHostRegister_v2", ("hipHostRegister", CONV_MEM, API_DRIVER)), ("cuMemHostUnregister", ("hipHostUnregister", CONV_MEM, API_DRIVER)), ("cuMemsetD16_v2", ("hipMemsetD16", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), ( "cuMemsetD16Async", ("hipMemsetD16Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ("cuMemsetD2D16_v2", ("hipMemsetD2D16", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), ( "cuMemsetD2D16Async", ("hipMemsetD2D16Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ("cuMemsetD2D32_v2", ("hipMemsetD2D32", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), ( "cuMemsetD2D32Async", ("hipMemsetD2D32Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ("cuMemsetD2D8_v2", ("hipMemsetD2D8", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), ( "cuMemsetD2D8Async", ("hipMemsetD2D8Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ("cuMemsetD32_v2", ("hipMemset", CONV_MEM, API_DRIVER)), ("cuMemsetD32Async", ("hipMemsetAsync", CONV_MEM, API_DRIVER)), ("cuMemsetD8_v2", ("hipMemsetD8", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), ( "cuMemsetD8Async", ("hipMemsetD8Async", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuMipmappedArrayCreate", ("hipMipmappedArrayCreate", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuMipmappedArrayDestroy", ("hipMipmappedArrayDestroy", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuMipmappedArrayGetLevel", ("hipMipmappedArrayGetLevel", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuMemPrefetchAsync", ("hipMemPrefetchAsync__", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ("cuMemAdvise", ("hipMemAdvise", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED)), ( "cuMemRangeGetAttribute", ("hipMemRangeGetAttribute", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuMemRangeGetAttributes", ("hipMemRangeGetAttributes", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuPointerGetAttribute", ("hipPointerGetAttribute", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuPointerGetAttributes", ("hipPointerGetAttributes", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuPointerSetAttribute", ("hipPointerSetAttribute", CONV_MEM, API_DRIVER, HIP_UNSUPPORTED), ), ("CU_TR_FILTER_MODE_POINT", ("hipFilterModePoint", CONV_TEX, API_DRIVER)), ( "CU_TR_FILTER_MODE_LINEAR", ("hipFilterModeLinear", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexRefGetAddress", ("hipTexRefGetAddress", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexRefGetAddressMode", ("hipTexRefGetAddressMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexRefGetArray", ("hipTexRefGetArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexRefGetBorderColor", ("hipTexRefGetBorderColor", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexRefGetFilterMode", ("hipTexRefGetFilterMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexRefGetFlags", ("hipTexRefGetFlags", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexRefGetFormat", ("hipTexRefGetFormat", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexRefGetMaxAnisotropy", ("hipTexRefGetMaxAnisotropy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexRefGetMipmapFilterMode", ("hipTexRefGetMipmapFilterMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexRefGetMipmapLevelBias", ("hipTexRefGetMipmapLevelBias", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexRefGetMipmapLevelClamp", ("hipTexRefGetMipmapLevelClamp", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexRefGetMipmappedArray", ("hipTexRefGetMipmappedArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexRefSetAddress", ("hipTexRefSetAddress", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexRefSetAddress2D", ("hipTexRefSetAddress2D", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ("cuTexRefSetAddressMode", ("hipTexRefSetAddressMode", CONV_TEX, API_DRIVER)), ("cuTexRefSetArray", ("hipTexRefSetArray", CONV_TEX, API_DRIVER)), ( "cuTexRefSetBorderColor", ("hipTexRefSetBorderColor", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ("cuTexRefSetFilterMode", ("hipTexRefSetFilterMode", CONV_TEX, API_DRIVER)), ("cuTexRefSetFlags", ("hipTexRefSetFlags", CONV_TEX, API_DRIVER)), ("cuTexRefSetFormat", ("hipTexRefSetFormat", CONV_TEX, API_DRIVER)), ( "cuTexRefSetMaxAnisotropy", ("hipTexRefSetMaxAnisotropy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexRefSetMipmapFilterMode", ("hipTexRefSetMipmapFilterMode", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexRefSetMipmapLevelBias", ("hipTexRefSetMipmapLevelBias", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexRefSetMipmapLevelClamp", ("hipTexRefSetMipmapLevelClamp", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexRefSetMipmappedArray", ("hipTexRefSetMipmappedArray", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ("cuTexRefCreate", ("hipTexRefCreate", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED)), ( "cuTexRefDestroy", ("hipTexRefDestroy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuSurfRefGetArray", ("hipSurfRefGetArray", CONV_SURFACE, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuSurfRefSetArray", ("hipSurfRefSetArray", CONV_SURFACE, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexObjectCreate", ("hipTexObjectCreate", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexObjectDestroy", ("hipTexObjectDestroy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexObjectGetResourceDesc", ("hipTexObjectGetResourceDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexObjectGetResourceViewDesc", ("hipTexObjectGetResourceViewDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuTexObjectGetTextureDesc", ("hipTexObjectGetTextureDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuSurfObjectCreate", ("hipSurfObjectCreate", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuSurfObjectDestroy", ("hipSurfObjectDestroy", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuSurfObjectGetResourceDesc", ("hipSurfObjectGetResourceDesc", CONV_TEX, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuGraphicsMapResources", ("hipGraphicsMapResources", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuGraphicsResourceGetMappedMipmappedArray", ( "hipGraphicsResourceGetMappedMipmappedArray", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "cuGraphicsResourceGetMappedPointer", ( "hipGraphicsResourceGetMappedPointer", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "cuGraphicsResourceSetMapFlags", ( "hipGraphicsResourceSetMapFlags", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "cuGraphicsSubResourceGetMappedArray", ( "hipGraphicsSubResourceGetMappedArray", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "cuGraphicsUnmapResources", ("hipGraphicsUnmapResources", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuGraphicsUnregisterResource", ( "hipGraphicsUnregisterResource", CONV_GRAPHICS, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "cuProfilerInitialize", ("hipProfilerInitialize", CONV_OTHER, API_DRIVER, HIP_UNSUPPORTED), ), ("cuProfilerStart", ("hipProfilerStart", CONV_OTHER, API_DRIVER)), ("cuProfilerStop", ("hipProfilerStop", CONV_OTHER, API_DRIVER)), ( "CU_GL_DEVICE_LIST_ALL", ("HIP_GL_DEVICE_LIST_ALL", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_GL_DEVICE_LIST_CURRENT_FRAME", ("HIP_GL_DEVICE_LIST_CURRENT_FRAME", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_GL_DEVICE_LIST_NEXT_FRAME", ("HIP_GL_DEVICE_LIST_NEXT_FRAME", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), ), ("cuGLGetDevices", ("hipGLGetDevices", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)), ( "cuGraphicsGLRegisterBuffer", ("hipGraphicsGLRegisterBuffer", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuGraphicsGLRegisterImage", ("hipGraphicsGLRegisterImage", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), ), ("cuWGLGetDevice", ("hipWGLGetDevice", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)), ( "CU_GL_MAP_RESOURCE_FLAGS_NONE", ("HIP_GL_MAP_RESOURCE_FLAGS_NONE", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY", ( "HIP_GL_MAP_RESOURCE_FLAGS_READ_ONLY", CONV_GL, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD", ( "HIP_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD", CONV_GL, API_DRIVER, HIP_UNSUPPORTED, ), ), ("cuGLCtxCreate", ("hipGLCtxCreate", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)), ("cuGLInit", ("hipGLInit", CONV_GL, API_DRIVER, HIP_UNSUPPORTED)), ( "cuGLMapBufferObject", ("hipGLMapBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuGLMapBufferObjectAsync", ("hipGLMapBufferObjectAsync", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuGLRegisterBufferObject", ("hipGLRegisterBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuGLSetBufferObjectMapFlags", ("hipGLSetBufferObjectMapFlags", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuGLUnmapBufferObject", ("hipGLUnmapBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuGLUnmapBufferObjectAsync", ("hipGLUnmapBufferObjectAsync", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuGLUnregisterBufferObject", ("hipGLUnregisterBufferObject", CONV_GL, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_D3D9_DEVICE_LIST_ALL", ("HIP_D3D9_DEVICE_LIST_ALL", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_D3D9_DEVICE_LIST_CURRENT_FRAME", ( "HIP_D3D9_DEVICE_LIST_CURRENT_FRAME", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_D3D9_DEVICE_LIST_NEXT_FRAME", ("HIP_D3D9_DEVICE_LIST_NEXT_FRAME", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D9CtxCreate", ("hipD3D9CtxCreate", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D9CtxCreateOnDevice", ("hipD3D9CtxCreateOnDevice", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D9GetDevice", ("hipD3D9GetDevice", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D9GetDevices", ("hipD3D9GetDevices", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D9GetDirect3DDevice", ("hipD3D9GetDirect3DDevice", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuGraphicsD3D9RegisterResource", ("hipGraphicsD3D9RegisterResource", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_D3D9_MAPRESOURCE_FLAGS_NONE", ("HIP_D3D9_MAPRESOURCE_FLAGS_NONE", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_D3D9_MAPRESOURCE_FLAGS_READONLY", ( "HIP_D3D9_MAPRESOURCE_FLAGS_READONLY", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD", ( "HIP_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_D3D9_REGISTER_FLAGS_NONE", ("HIP_D3D9_REGISTER_FLAGS_NONE", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_D3D9_REGISTER_FLAGS_ARRAY", ("HIP_D3D9_REGISTER_FLAGS_ARRAY", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D9MapResources", ("hipD3D9MapResources", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D9RegisterResource", ("hipD3D9RegisterResource", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D9ResourceGetMappedArray", ("hipD3D9ResourceGetMappedArray", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D9ResourceGetMappedPitch", ("hipD3D9ResourceGetMappedPitch", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D9ResourceGetMappedPointer", ("hipD3D9ResourceGetMappedPointer", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D9ResourceGetMappedSize", ("hipD3D9ResourceGetMappedSize", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D9ResourceGetSurfaceDimensions", ( "hipD3D9ResourceGetSurfaceDimensions", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "cuD3D9ResourceSetMapFlags", ("hipD3D9ResourceSetMapFlags", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D9UnmapResources", ("hipD3D9UnmapResources", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D9UnregisterResource", ("hipD3D9UnregisterResource", CONV_D3D9, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_D3D10_DEVICE_LIST_ALL", ("HIP_D3D10_DEVICE_LIST_ALL", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_D3D10_DEVICE_LIST_CURRENT_FRAME", ( "HIP_D3D10_DEVICE_LIST_CURRENT_FRAME", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_D3D10_DEVICE_LIST_NEXT_FRAME", ( "HIP_D3D10_DEVICE_LIST_NEXT_FRAME", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "cuD3D10GetDevice", ("hipD3D10GetDevice", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D10GetDevices", ("hipD3D10GetDevices", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuGraphicsD3D10RegisterResource", ( "hipGraphicsD3D10RegisterResource", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_D3D10_MAPRESOURCE_FLAGS_NONE", ( "HIP_D3D10_MAPRESOURCE_FLAGS_NONE", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_D3D10_MAPRESOURCE_FLAGS_READONLY", ( "HIP_D3D10_MAPRESOURCE_FLAGS_READONLY", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD", ( "HIP_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_D3D10_REGISTER_FLAGS_NONE", ("HIP_D3D10_REGISTER_FLAGS_NONE", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_D3D10_REGISTER_FLAGS_ARRAY", ("HIP_D3D10_REGISTER_FLAGS_ARRAY", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D10CtxCreate", ("hipD3D10CtxCreate", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D10CtxCreateOnDevice", ("hipD3D10CtxCreateOnDevice", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D10GetDirect3DDevice", ("hipD3D10GetDirect3DDevice", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D10MapResources", ("hipD3D10MapResources", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D10RegisterResource", ("hipD3D10RegisterResource", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D10ResourceGetMappedArray", ("hipD3D10ResourceGetMappedArray", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D10ResourceGetMappedPitch", ("hipD3D10ResourceGetMappedPitch", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D10ResourceGetMappedPointer", ( "hipD3D10ResourceGetMappedPointer", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "cuD3D10ResourceGetMappedSize", ("hipD3D10ResourceGetMappedSize", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D10ResourceGetSurfaceDimensions", ( "hipD3D10ResourceGetSurfaceDimensions", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "cuD310ResourceSetMapFlags", ("hipD3D10ResourceSetMapFlags", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D10UnmapResources", ("hipD3D10UnmapResources", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D10UnregisterResource", ("hipD3D10UnregisterResource", CONV_D3D10, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_D3D11_DEVICE_LIST_ALL", ("HIP_D3D11_DEVICE_LIST_ALL", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), ), ( "CU_D3D11_DEVICE_LIST_CURRENT_FRAME", ( "HIP_D3D11_DEVICE_LIST_CURRENT_FRAME", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "CU_D3D11_DEVICE_LIST_NEXT_FRAME", ( "HIP_D3D11_DEVICE_LIST_NEXT_FRAME", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "cuD3D11GetDevice", ("hipD3D11GetDevice", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D11GetDevices", ("hipD3D11GetDevices", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuGraphicsD3D11RegisterResource", ( "hipGraphicsD3D11RegisterResource", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "cuD3D11CtxCreate", ("hipD3D11CtxCreate", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D11CtxCreateOnDevice", ("hipD3D11CtxCreateOnDevice", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuD3D11GetDirect3DDevice", ("hipD3D11GetDirect3DDevice", CONV_D3D11, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuGraphicsVDPAURegisterOutputSurface", ( "hipGraphicsVDPAURegisterOutputSurface", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "cuGraphicsVDPAURegisterVideoSurface", ( "hipGraphicsVDPAURegisterVideoSurface", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "cuVDPAUGetDevice", ("hipVDPAUGetDevice", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuVDPAUCtxCreate", ("hipVDPAUCtxCreate", CONV_VDPAU, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuEGLStreamConsumerAcquireFrame", ("hipEGLStreamConsumerAcquireFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuEGLStreamConsumerConnect", ("hipEGLStreamConsumerConnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuEGLStreamConsumerConnectWithFlags", ( "hipEGLStreamConsumerConnectWithFlags", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED, ), ), ( "cuEGLStreamConsumerDisconnect", ("hipEGLStreamConsumerDisconnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuEGLStreamConsumerReleaseFrame", ("hipEGLStreamConsumerReleaseFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuEGLStreamProducerConnect", ("hipEGLStreamProducerConnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuEGLStreamProducerDisconnect", ("hipEGLStreamProducerDisconnect", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuEGLStreamProducerPresentFrame", ("hipEGLStreamProducerPresentFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuEGLStreamProducerReturnFrame", ("hipEGLStreamProducerReturnFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuGraphicsEGLRegisterImage", ("hipGraphicsEGLRegisterImage", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED), ), ( "cuGraphicsResourceGetMappedEglFrame", ( "hipGraphicsResourceGetMappedEglFrame", CONV_EGL, API_DRIVER, HIP_UNSUPPORTED, ), ), ("cudaDataType_t", ("hipDataType_t", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), ("cudaDataType", ("hipDataType", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), ("CUDA_R_16F", ("HIP_R_16F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), ("CUDA_C_16F", ("HIP_C_16F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), ("CUDA_R_32F", ("HIP_R_32F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), ("CUDA_C_32F", ("HIP_C_32F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), ("CUDA_R_64F", ("HIP_R_64F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), ("CUDA_C_64F", ("HIP_C_64F", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), ("CUDA_R_8I", ("HIP_R_8I", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), ("CUDA_C_8I", ("HIP_C_8I", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), ("CUDA_R_8U", ("HIP_R_8U", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), ("CUDA_C_8U", ("HIP_C_8U", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), ("CUDA_R_32I", ("HIP_R_32I", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), ("CUDA_C_32I", ("HIP_C_32I", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), ("CUDA_R_32U", ("HIP_R_32U", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), ("CUDA_C_32U", ("HIP_C_32U", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), ( "MAJOR_VERSION", ("hipLibraryMajorVersion", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "MINOR_VERSION", ("hipLibraryMinorVersion", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "PATCH_LEVEL", ("hipLibraryPatchVersion", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaMemAttachGlobal", ("hipMemAttachGlobal", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaMemAttachHost", ("hipMemAttachHost", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaMemAttachSingle", ("hipMemAttachSingle", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaOccupancyDefault", ("hipOccupancyDefault", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaOccupancyDisableCachingOverride", ( "hipOccupancyDisableCachingOverride", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ("cudaGetLastError", ("hipGetLastError", CONV_ERROR, API_RUNTIME)), ("cudaPeekAtLastError", ("hipPeekAtLastError", CONV_ERROR, API_RUNTIME)), ("cudaGetErrorName", ("hipGetErrorName", CONV_ERROR, API_RUNTIME)), ("cudaGetErrorString", ("hipGetErrorString", CONV_ERROR, API_RUNTIME)), ("cudaMemcpy3DParms", ("hipMemcpy3DParms", CONV_MEM, API_RUNTIME)), ( "cudaMemcpy3DPeerParms", ("hipMemcpy3DPeerParms", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaMemcpy", ("hipMemcpy", CONV_MEM, API_RUNTIME)), ("cudaMemcpyToArray", ("hipMemcpyToArray", CONV_MEM, API_RUNTIME)), ("cudaMemcpyToSymbol", ("hipMemcpyToSymbol", CONV_MEM, API_RUNTIME)), ("cudaMemcpyToSymbolAsync", ("hipMemcpyToSymbolAsync", CONV_MEM, API_RUNTIME)), ("cudaMemcpyAsync", ("hipMemcpyAsync", CONV_MEM, API_RUNTIME)), ("cudaMemcpy2D", ("hipMemcpy2D", CONV_MEM, API_RUNTIME)), ("cudaMemcpy2DAsync", ("hipMemcpy2DAsync", CONV_MEM, API_RUNTIME)), ("cudaMemcpy2DToArray", ("hipMemcpy2DToArray", CONV_MEM, API_RUNTIME)), ( "cudaMemcpy2DArrayToArray", ("hipMemcpy2DArrayToArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaMemcpy2DFromArray", ("hipMemcpy2DFromArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaMemcpy2DFromArrayAsync", ("hipMemcpy2DFromArrayAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaMemcpy2DToArrayAsync", ("hipMemcpy2DToArrayAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaMemcpy3D", ("hipMemcpy3D", CONV_MEM, API_RUNTIME)), ( "cudaMemcpy3DAsync", ("hipMemcpy3DAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaMemcpy3DPeer", ("hipMemcpy3DPeer", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaMemcpy3DPeerAsync", ("hipMemcpy3DPeerAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaMemcpyArrayToArray", ("hipMemcpyArrayToArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaMemcpyFromArrayAsync", ("hipMemcpyFromArrayAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaMemcpyFromSymbol", ("hipMemcpyFromSymbol", CONV_MEM, API_RUNTIME)), ( "cudaMemcpyFromSymbolAsync", ("hipMemcpyFromSymbolAsync", CONV_MEM, API_RUNTIME), ), ("cudaMemAdvise", ("hipMemAdvise", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)), ( "cudaMemRangeGetAttribute", ("hipMemRangeGetAttribute", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaMemRangeGetAttributes", ("hipMemRangeGetAttributes", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaMemAdviseSetReadMostly", ("hipMemAdviseSetReadMostly", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaMemAdviseUnsetReadMostly", ("hipMemAdviseUnsetReadMostly", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaMemAdviseSetPreferredLocation", ( "hipMemAdviseSetPreferredLocation", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaMemAdviseUnsetPreferredLocation", ( "hipMemAdviseUnsetPreferredLocation", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaMemAdviseSetAccessedBy", ("hipMemAdviseSetAccessedBy", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaMemAdviseUnsetAccessedBy", ("hipMemAdviseUnsetAccessedBy", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaMemRangeAttributeReadMostly", ("hipMemRangeAttributeReadMostly", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaMemRangeAttributePreferredLocation", ( "hipMemRangeAttributePreferredLocation", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaMemRangeAttributeAccessedBy", ("hipMemRangeAttributeAccessedBy", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaMemRangeAttributeLastPrefetchLocation", ( "hipMemRangeAttributeLastPrefetchLocation", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ("cudaMemcpyHostToHost", ("hipMemcpyHostToHost", CONV_MEM, API_RUNTIME)), ("cudaMemcpyHostToDevice", ("hipMemcpyHostToDevice", CONV_MEM, API_RUNTIME)), ("cudaMemcpyDeviceToHost", ("hipMemcpyDeviceToHost", CONV_MEM, API_RUNTIME)), ( "cudaMemcpyDeviceToDevice", ("hipMemcpyDeviceToDevice", CONV_MEM, API_RUNTIME), ), ("cudaMemcpyDefault", ("hipMemcpyDefault", CONV_MEM, API_RUNTIME)), ("cudaMemset", ("hipMemset", CONV_MEM, API_RUNTIME)), ("cudaMemsetAsync", ("hipMemsetAsync", CONV_MEM, API_RUNTIME)), ("cudaMemset2D", ("hipMemset2D", CONV_MEM, API_RUNTIME)), ( "cudaMemset2DAsync", ("hipMemset2DAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaMemset3D", ("hipMemset3D", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED)), ( "cudaMemset3DAsync", ("hipMemset3DAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaMemGetInfo", ("hipMemGetInfo", CONV_MEM, API_RUNTIME)), ( "cudaArrayGetInfo", ("hipArrayGetInfo", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaFreeMipmappedArray", ("hipFreeMipmappedArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGetMipmappedArrayLevel", ("hipGetMipmappedArrayLevel", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGetSymbolAddress", ("hipGetSymbolAddress", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGetSymbolSize", ("hipGetSymbolSize", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaMemPrefetchAsync", ("hipMemPrefetchAsync", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaMallocHost", ("hipHostMalloc", CONV_MEM, API_RUNTIME)), ("cudaMallocArray", ("hipMallocArray", CONV_MEM, API_RUNTIME)), ("cudaMalloc", ("hipMalloc", CONV_MEM, API_RUNTIME)), ("cudaMalloc3D", ("hipMalloc3D", CONV_MEM, API_RUNTIME)), ("cudaMalloc3DArray", ("hipMalloc3DArray", CONV_MEM, API_RUNTIME)), ( "cudaMallocManaged", ("hipMallocManaged", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaMallocMipmappedArray", ("hipMallocMipmappedArray", CONV_MEM, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaMallocPitch", ("hipMallocPitch", CONV_MEM, API_RUNTIME)), ("cudaFreeHost", ("hipHostFree", CONV_MEM, API_RUNTIME)), ("cudaFreeArray", ("hipFreeArray", CONV_MEM, API_RUNTIME)), ("cudaFree", ("hipFree", CONV_MEM, API_RUNTIME)), ("cudaHostRegister", ("hipHostRegister", CONV_MEM, API_RUNTIME)), ("cudaHostUnregister", ("hipHostUnregister", CONV_MEM, API_RUNTIME)), ("cudaHostAlloc", ("hipHostMalloc", CONV_MEM, API_RUNTIME)), ("cudaMemoryTypeHost", ("hipMemoryTypeHost", CONV_MEM, API_RUNTIME)), ("cudaMemoryTypeDevice", ("hipMemoryTypeDevice", CONV_MEM, API_RUNTIME)), ("make_cudaExtent", ("make_hipExtent", CONV_MEM, API_RUNTIME)), ("make_cudaPitchedPtr", ("make_hipPitchedPtr", CONV_MEM, API_RUNTIME)), ("make_cudaPos", ("make_hipPos", CONV_MEM, API_RUNTIME)), ("cudaHostAllocDefault", ("hipHostMallocDefault", CONV_MEM, API_RUNTIME)), ("cudaHostAllocPortable", ("hipHostMallocPortable", CONV_MEM, API_RUNTIME)), ("cudaHostAllocMapped", ("hipHostMallocMapped", CONV_MEM, API_RUNTIME)), ( "cudaHostAllocWriteCombined", ("hipHostMallocWriteCombined", CONV_MEM, API_RUNTIME), ), ("cudaHostGetFlags", ("hipHostGetFlags", CONV_MEM, API_RUNTIME)), ("cudaHostRegisterDefault", ("hipHostRegisterDefault", CONV_MEM, API_RUNTIME)), ( "cudaHostRegisterPortable", ("hipHostRegisterPortable", CONV_MEM, API_RUNTIME), ), ("cudaHostRegisterMapped", ("hipHostRegisterMapped", CONV_MEM, API_RUNTIME)), ( "cudaHostRegisterIoMemory", ("hipHostRegisterIoMemory", CONV_MEM, API_RUNTIME), ), # ("warpSize", ("hipWarpSize", CONV_SPECIAL_FUNC, API_RUNTIME), (HIP actually uses warpSize...)), ("cudaEventCreate", ("hipEventCreate", CONV_EVENT, API_RUNTIME)), ( "cudaEventCreateWithFlags", ("hipEventCreateWithFlags", CONV_EVENT, API_RUNTIME), ), ("cudaEventDestroy", ("hipEventDestroy", CONV_EVENT, API_RUNTIME)), ("cudaEventRecord", ("hipEventRecord", CONV_EVENT, API_RUNTIME)), ("cudaEventElapsedTime", ("hipEventElapsedTime", CONV_EVENT, API_RUNTIME)), ("cudaEventSynchronize", ("hipEventSynchronize", CONV_EVENT, API_RUNTIME)), ("cudaEventQuery", ("hipEventQuery", CONV_EVENT, API_RUNTIME)), ("cudaEventDefault", ("hipEventDefault", CONV_EVENT, API_RUNTIME)), ("cudaEventBlockingSync", ("hipEventBlockingSync", CONV_EVENT, API_RUNTIME)), ("cudaEventDisableTiming", ("hipEventDisableTiming", CONV_EVENT, API_RUNTIME)), ("cudaEventInterprocess", ("hipEventInterprocess", CONV_EVENT, API_RUNTIME)), ("cudaStreamCreate", ("hipStreamCreate", CONV_STREAM, API_RUNTIME)), ( "cudaStreamCreateWithFlags", ("hipStreamCreateWithFlags", CONV_STREAM, API_RUNTIME), ), ( "cudaStreamCreateWithPriority", ("hipStreamCreateWithPriority", CONV_STREAM, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaStreamDestroy", ("hipStreamDestroy", CONV_STREAM, API_RUNTIME)), ("cudaStreamWaitEvent", ("hipStreamWaitEvent", CONV_STREAM, API_RUNTIME)), ("cudaStreamSynchronize", ("hipStreamSynchronize", CONV_STREAM, API_RUNTIME)), ("cudaStreamGetFlags", ("hipStreamGetFlags", CONV_STREAM, API_RUNTIME)), ("cudaStreamQuery", ("hipStreamQuery", CONV_STREAM, API_RUNTIME)), ("cudaStreamAddCallback", ("hipStreamAddCallback", CONV_STREAM, API_RUNTIME)), ( "cudaStreamAttachMemAsync", ("hipStreamAttachMemAsync", CONV_STREAM, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaStreamGetPriority", ("hipStreamGetPriority", CONV_STREAM, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaStreamDefault", ("hipStreamDefault", CONV_TYPE, API_RUNTIME)), ("cudaStreamNonBlocking", ("hipStreamNonBlocking", CONV_TYPE, API_RUNTIME)), ("cudaDeviceSynchronize", ("hipDeviceSynchronize", CONV_DEVICE, API_RUNTIME)), ("cudaDeviceReset", ("hipDeviceReset", CONV_DEVICE, API_RUNTIME)), ("cudaSetDevice", ("hipSetDevice", CONV_DEVICE, API_RUNTIME)), ("cudaGetDevice", ("hipGetDevice", CONV_DEVICE, API_RUNTIME)), ("cudaGetDeviceCount", ("hipGetDeviceCount", CONV_DEVICE, API_RUNTIME)), ("cudaChooseDevice", ("hipChooseDevice", CONV_DEVICE, API_RUNTIME)), ("cudaThreadExit", ("hipDeviceReset", CONV_THREAD, API_RUNTIME)), ( "cudaThreadGetCacheConfig", ("hipDeviceGetCacheConfig", CONV_THREAD, API_RUNTIME), ), ( "cudaThreadGetLimit", ("hipThreadGetLimit", CONV_THREAD, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaThreadSetCacheConfig", ("hipDeviceSetCacheConfig", CONV_THREAD, API_RUNTIME), ), ( "cudaThreadSetLimit", ("hipThreadSetLimit", CONV_THREAD, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaThreadSynchronize", ("hipDeviceSynchronize", CONV_THREAD, API_RUNTIME)), ("cudaDeviceGetAttribute", ("hipDeviceGetAttribute", CONV_DEVICE, API_RUNTIME)), ( "cudaDevAttrMaxThreadsPerBlock", ("hipDeviceAttributeMaxThreadsPerBlock", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrMaxBlockDimX", ("hipDeviceAttributeMaxBlockDimX", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrMaxBlockDimY", ("hipDeviceAttributeMaxBlockDimY", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrMaxBlockDimZ", ("hipDeviceAttributeMaxBlockDimZ", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrMaxGridDimX", ("hipDeviceAttributeMaxGridDimX", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrMaxGridDimY", ("hipDeviceAttributeMaxGridDimY", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrMaxGridDimZ", ("hipDeviceAttributeMaxGridDimZ", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrMaxSharedMemoryPerBlock", ("hipDeviceAttributeMaxSharedMemoryPerBlock", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrTotalConstantMemory", ("hipDeviceAttributeTotalConstantMemory", CONV_TYPE, API_RUNTIME), ), ("cudaDevAttrWarpSize", ("hipDeviceAttributeWarpSize", CONV_TYPE, API_RUNTIME)), ( "cudaDevAttrMaxPitch", ("hipDeviceAttributeMaxPitch", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaDevAttrMaxRegistersPerBlock", ("hipDeviceAttributeMaxRegistersPerBlock", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrClockRate", ("hipDeviceAttributeClockRate", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrTextureAlignment", ( "hipDeviceAttributeTextureAlignment", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrGpuOverlap", ("hipDeviceAttributeGpuOverlap", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaDevAttrMultiProcessorCount", ("hipDeviceAttributeMultiprocessorCount", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrKernelExecTimeout", ( "hipDeviceAttributeKernelExecTimeout", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrIntegrated", ("hipDeviceAttributeIntegrated", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaDevAttrCanMapHostMemory", ( "hipDeviceAttributeCanMapHostMemory", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrComputeMode", ("hipDeviceAttributeComputeMode", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrMaxTexture1DWidth", ( "hipDeviceAttributeMaxTexture1DWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture2DWidth", ( "hipDeviceAttributeMaxTexture2DWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture2DHeight", ( "hipDeviceAttributeMaxTexture2DHeight", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture3DWidth", ( "hipDeviceAttributeMaxTexture3DWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture3DHeight", ( "hipDeviceAttributeMaxTexture3DHeight", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture3DDepth", ( "hipDeviceAttributeMaxTexture3DDepth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture2DLayeredWidth", ( "hipDeviceAttributeMaxTexture2DLayeredWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture2DLayeredHeight", ( "hipDeviceAttributeMaxTexture2DLayeredHeight", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture2DLayeredLayers", ( "hipDeviceAttributeMaxTexture2DLayeredLayers", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrSurfaceAlignment", ( "hipDeviceAttributeSurfaceAlignment", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrConcurrentKernels", ("hipDeviceAttributeConcurrentKernels", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrEccEnabled", ("hipDeviceAttributeEccEnabled", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaDevAttrPciBusId", ("hipDeviceAttributePciBusId", CONV_TYPE, API_RUNTIME)), ( "cudaDevAttrPciDeviceId", ("hipDeviceAttributePciDeviceId", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrTccDriver", ("hipDeviceAttributeTccDriver", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaDevAttrMemoryClockRate", ("hipDeviceAttributeMemoryClockRate", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrGlobalMemoryBusWidth", ("hipDeviceAttributeMemoryBusWidth", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrL2CacheSize", ("hipDeviceAttributeL2CacheSize", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrMaxThreadsPerMultiProcessor", ("hipDeviceAttributeMaxThreadsPerMultiProcessor", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrAsyncEngineCount", ( "hipDeviceAttributeAsyncEngineCount", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrUnifiedAddressing", ( "hipDeviceAttributeUnifiedAddressing", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture1DLayeredWidth", ( "hipDeviceAttributeMaxTexture1DLayeredWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture1DLayeredLayers", ( "hipDeviceAttributeMaxTexture1DLayeredLayers", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture2DGatherWidth", ( "hipDeviceAttributeMaxTexture2DGatherWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture2DGatherHeight", ( "hipDeviceAttributeMaxTexture2DGatherHeight", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture3DWidthAlt", ( "hipDeviceAttributeMaxTexture3DWidthAlternate", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture3DHeightAlt", ( "hipDeviceAttributeMaxTexture3DHeightAlternate", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture3DDepthAlt", ( "hipDeviceAttributeMaxTexture3DDepthAlternate", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrPciDomainId", ("hipDeviceAttributePciDomainId", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaDevAttrTexturePitchAlignment", ( "hipDeviceAttributeTexturePitchAlignment", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTextureCubemapWidth", ( "hipDeviceAttributeMaxTextureCubemapWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTextureCubemapLayeredWidth", ( "hipDeviceAttributeMaxTextureCubemapLayeredWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTextureCubemapLayeredLayers", ( "hipDeviceAttributeMaxTextureCubemapLayeredLayers", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxSurface1DWidth", ( "hipDeviceAttributeMaxSurface1DWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxSurface2DWidth", ( "hipDeviceAttributeMaxSurface2DWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxSurface2DHeight", ( "hipDeviceAttributeMaxSurface2DHeight", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxSurface3DWidth", ( "hipDeviceAttributeMaxSurface3DWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxSurface3DHeight", ( "hipDeviceAttributeMaxSurface3DHeight", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxSurface3DDepth", ( "hipDeviceAttributeMaxSurface3DDepth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxSurface1DLayeredWidth", ( "hipDeviceAttributeMaxSurface1DLayeredWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxSurface1DLayeredLayers", ( "hipDeviceAttributeMaxSurface1DLayeredLayers", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxSurface2DLayeredWidth", ( "hipDeviceAttributeMaxSurface2DLayeredWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxSurface2DLayeredHeight", ( "hipDeviceAttributeMaxSurface2DLayeredHeight", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxSurface2DLayeredLayers", ( "hipDeviceAttributeMaxSurface2DLayeredLayers", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxSurfaceCubemapWidth", ( "hipDeviceAttributeMaxSurfaceCubemapWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxSurfaceCubemapLayeredWidth", ( "hipDeviceAttributeMaxSurfaceCubemapLayeredWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxSurfaceCubemapLayeredLayers", ( "hipDeviceAttributeMaxSurfaceCubemapLayeredLayers", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture1DLinearWidth", ( "hipDeviceAttributeMaxTexture1DLinearWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture2DLinearWidth", ( "hipDeviceAttributeMaxTexture2DLinearWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture2DLinearHeight", ( "hipDeviceAttributeMaxTexture2DLinearHeight", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture2DLinearPitch", ( "hipDeviceAttributeMaxTexture2DLinearPitch", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture2DMipmappedWidth", ( "hipDeviceAttributeMaxTexture2DMipmappedWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxTexture2DMipmappedHeight", ( "hipDeviceAttributeMaxTexture2DMipmappedHeight", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrComputeCapabilityMajor", ("hipDeviceAttributeComputeCapabilityMajor", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrComputeCapabilityMinor", ("hipDeviceAttributeComputeCapabilityMinor", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrMaxTexture1DMipmappedWidth", ( "hipDeviceAttributeMaxTexture1DMipmappedWidth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrStreamPrioritiesSupported", ( "hipDeviceAttributeStreamPrioritiesSupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrGlobalL1CacheSupported", ( "hipDeviceAttributeGlobalL1CacheSupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrLocalL1CacheSupported", ( "hipDeviceAttributeLocalL1CacheSupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrMaxSharedMemoryPerMultiprocessor", ( "hipDeviceAttributeMaxSharedMemoryPerMultiprocessor", CONV_TYPE, API_RUNTIME, ), ), ( "cudaDevAttrMaxRegistersPerMultiprocessor", ( "hipDeviceAttributeMaxRegistersPerMultiprocessor", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrManagedMemory", ( "hipDeviceAttributeManagedMemory", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrIsMultiGpuBoard", ("hipDeviceAttributeIsMultiGpuBoard", CONV_TYPE, API_RUNTIME), ), ( "cudaDevAttrMultiGpuBoardGroupID", ( "hipDeviceAttributeMultiGpuBoardGroupID", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrHostNativeAtomicSupported", ( "hipDeviceAttributeHostNativeAtomicSupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrSingleToDoublePrecisionPerfRatio", ( "hipDeviceAttributeSingleToDoublePrecisionPerfRatio", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrPageableMemoryAccess", ( "hipDeviceAttributePageableMemoryAccess", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrConcurrentManagedAccess", ( "hipDeviceAttributeConcurrentManagedAccess", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrComputePreemptionSupported", ( "hipDeviceAttributeComputePreemptionSupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevAttrCanUseHostPointerForRegisteredMem", ( "hipDeviceAttributeCanUseHostPointerForRegisteredMem", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaPointerGetAttributes", ("hipPointerGetAttributes", CONV_MEM, API_RUNTIME), ), ( "cudaHostGetDevicePointer", ("hipHostGetDevicePointer", CONV_MEM, API_RUNTIME), ), ( "cudaGetDeviceProperties", ("hipGetDeviceProperties", CONV_DEVICE, API_RUNTIME), ), ("cudaDeviceGetPCIBusId", ("hipDeviceGetPCIBusId", CONV_DEVICE, API_RUNTIME)), ( "cudaDeviceGetByPCIBusId", ("hipDeviceGetByPCIBusId", CONV_DEVICE, API_RUNTIME), ), ( "cudaDeviceGetStreamPriorityRange", ( "hipDeviceGetStreamPriorityRange", CONV_DEVICE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaSetValidDevices", ("hipSetValidDevices", CONV_DEVICE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaDevP2PAttrPerformanceRank", ( "hipDeviceP2PAttributePerformanceRank", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevP2PAttrAccessSupported", ( "hipDeviceP2PAttributeAccessSupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDevP2PAttrNativeAtomicSupported", ( "hipDeviceP2PAttributeNativeAtomicSupported", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaDeviceGetP2PAttribute", ("hipDeviceGetP2PAttribute", CONV_DEVICE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaComputeModeDefault", ("hipComputeModeDefault", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaComputeModeExclusive", ("hipComputeModeExclusive", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaComputeModeProhibited", ("hipComputeModeProhibited", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaComputeModeExclusiveProcess", ("hipComputeModeExclusiveProcess", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGetDeviceFlags", ("hipGetDeviceFlags", CONV_DEVICE, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaSetDeviceFlags", ("hipSetDeviceFlags", CONV_DEVICE, API_RUNTIME)), ("cudaDeviceScheduleAuto", ("hipDeviceScheduleAuto", CONV_TYPE, API_RUNTIME)), ("cudaDeviceScheduleSpin", ("hipDeviceScheduleSpin", CONV_TYPE, API_RUNTIME)), ("cudaDeviceScheduleYield", ("hipDeviceScheduleYield", CONV_TYPE, API_RUNTIME)), ( "cudaDeviceBlockingSync", ("hipDeviceScheduleBlockingSync", CONV_TYPE, API_RUNTIME), ), ( "cudaDeviceScheduleBlockingSync", ("hipDeviceScheduleBlockingSync", CONV_TYPE, API_RUNTIME), ), ( "cudaDeviceScheduleMask", ("hipDeviceScheduleMask", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaDeviceMapHost", ("hipDeviceMapHost", CONV_TYPE, API_RUNTIME)), ( "cudaDeviceLmemResizeToMax", ("hipDeviceLmemResizeToMax", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaDeviceMask", ("hipDeviceMask", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED)), ( "cudaDeviceSetCacheConfig", ("hipDeviceSetCacheConfig", CONV_CACHE, API_RUNTIME), ), ( "cudaDeviceGetCacheConfig", ("hipDeviceGetCacheConfig", CONV_CACHE, API_RUNTIME), ), ("cudaFuncSetCacheConfig", ("hipFuncSetCacheConfig", CONV_CACHE, API_RUNTIME)), ( "cudaFuncCachePreferNone", ("hipFuncCachePreferNone", CONV_CACHE, API_RUNTIME), ), ( "cudaFuncCachePreferShared", ("hipFuncCachePreferShared", CONV_CACHE, API_RUNTIME), ), ("cudaFuncCachePreferL1", ("hipFuncCachePreferL1", CONV_CACHE, API_RUNTIME)), ( "cudaFuncCachePreferEqual", ("hipFuncCachePreferEqual", CONV_CACHE, API_RUNTIME), ), ( "cudaFuncGetAttributes", ("hipFuncGetAttributes", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaFuncSetSharedMemConfig", ("hipFuncSetSharedMemConfig", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGetParameterBuffer", ("hipGetParameterBuffer", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaSetDoubleForDevice", ("hipSetDoubleForDevice", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaSetDoubleForHost", ("hipSetDoubleForHost", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaConfigureCall", ("hipConfigureCall", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaLaunch", ("hipLaunch", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED)), ( "cudaSetupArgument", ("hipSetupArgument", CONV_EXEC, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaDriverGetVersion", ("hipDriverGetVersion", CONV_VERSION, API_RUNTIME)), ( "cudaRuntimeGetVersion", ("hipRuntimeGetVersion", CONV_VERSION, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaOccupancyMaxPotentialBlockSize", ("hipOccupancyMaxPotentialBlockSize", CONV_OCCUPANCY, API_RUNTIME), ), ( "cudaOccupancyMaxPotentialBlockSizeWithFlags", ( "hipOccupancyMaxPotentialBlockSizeWithFlags", CONV_OCCUPANCY, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaOccupancyMaxActiveBlocksPerMultiprocessor", ( "hipOccupancyMaxActiveBlocksPerMultiprocessor", CONV_OCCUPANCY, API_RUNTIME, ), ), ( "cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", ( "hipOccupancyMaxActiveBlocksPerMultiprocessorWithFlags", CONV_OCCUPANCY, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaOccupancyMaxPotentialBlockSizeVariableSMem", ( "hipOccupancyMaxPotentialBlockSizeVariableSMem", CONV_OCCUPANCY, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags", ( "hipOccupancyMaxPotentialBlockSizeVariableSMemWithFlags", CONV_OCCUPANCY, API_RUNTIME, HIP_UNSUPPORTED, ), ), ("cudaDeviceCanAccessPeer", ("hipDeviceCanAccessPeer", CONV_PEER, API_RUNTIME)), ( "cudaDeviceDisablePeerAccess", ("hipDeviceDisablePeerAccess", CONV_PEER, API_RUNTIME), ), ( "cudaDeviceEnablePeerAccess", ("hipDeviceEnablePeerAccess", CONV_PEER, API_RUNTIME), ), ("cudaMemcpyPeerAsync", ("hipMemcpyPeerAsync", CONV_MEM, API_RUNTIME)), ("cudaMemcpyPeer", ("hipMemcpyPeer", CONV_MEM, API_RUNTIME)), ( "cudaIpcMemLazyEnablePeerAccess", ("hipIpcMemLazyEnablePeerAccess", CONV_TYPE, API_RUNTIME), ), ( "cudaDeviceSetSharedMemConfig", ("hipDeviceSetSharedMemConfig", CONV_DEVICE, API_RUNTIME), ), ( "cudaDeviceGetSharedMemConfig", ("hipDeviceGetSharedMemConfig", CONV_DEVICE, API_RUNTIME), ), ( "cudaSharedMemBankSizeDefault", ("hipSharedMemBankSizeDefault", CONV_TYPE, API_RUNTIME), ), ( "cudaSharedMemBankSizeFourByte", ("hipSharedMemBankSizeFourByte", CONV_TYPE, API_RUNTIME), ), ( "cudaSharedMemBankSizeEightByte", ("hipSharedMemBankSizeEightByte", CONV_TYPE, API_RUNTIME), ), ( "cudaLimitStackSize", ("hipLimitStackSize", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaLimitPrintfFifoSize", ("hipLimitPrintfFifoSize", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaLimitMallocHeapSize", ("hipLimitMallocHeapSize", CONV_TYPE, API_RUNTIME)), ( "cudaLimitDevRuntimeSyncDepth", ("hipLimitDevRuntimeSyncDepth", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaLimitDevRuntimePendingLaunchCount", ( "hipLimitDevRuntimePendingLaunchCount", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ("cudaDeviceGetLimit", ("hipDeviceGetLimit", CONV_DEVICE, API_RUNTIME)), ( "cudaProfilerInitialize", ("hipProfilerInitialize", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaProfilerStart", ("hipProfilerStart", CONV_OTHER, API_RUNTIME)), ("cudaProfilerStop", ("hipProfilerStop", CONV_OTHER, API_RUNTIME)), ( "cudaKeyValuePair", ("hipKeyValuePair", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED), ), ("cudaCSV", ("hipCSV", CONV_OTHER, API_RUNTIME, HIP_UNSUPPORTED)), ("cudaReadModeElementType", ("hipReadModeElementType", CONV_TEX, API_RUNTIME)), ( "cudaReadModeNormalizedFloat", ("hipReadModeNormalizedFloat", CONV_TEX, API_RUNTIME), ), ("cudaFilterModePoint", ("hipFilterModePoint", CONV_TEX, API_RUNTIME)), ("cudaFilterModeLinear", ("hipFilterModeLinear", CONV_TEX, API_RUNTIME)), ("cudaBindTexture", ("hipBindTexture", CONV_TEX, API_RUNTIME)), ("cudaUnbindTexture", ("hipUnbindTexture", CONV_TEX, API_RUNTIME)), ("cudaBindTexture2D", ("hipBindTexture2D", CONV_TEX, API_RUNTIME)), ("cudaBindTextureToArray", ("hipBindTextureToArray", CONV_TEX, API_RUNTIME)), ( "cudaBindTextureToMipmappedArray", ("hipBindTextureToMipmappedArray", CONV_TEX, API_RUNTIME), ), ( "cudaGetTextureAlignmentOffset", ("hipGetTextureAlignmentOffset", CONV_TEX, API_RUNTIME), ), ("cudaGetTextureReference", ("hipGetTextureReference", CONV_TEX, API_RUNTIME)), ( "cudaChannelFormatKindSigned", ("hipChannelFormatKindSigned", CONV_TEX, API_RUNTIME), ), ( "cudaChannelFormatKindUnsigned", ("hipChannelFormatKindUnsigned", CONV_TEX, API_RUNTIME), ), ( "cudaChannelFormatKindFloat", ("hipChannelFormatKindFloat", CONV_TEX, API_RUNTIME), ), ( "cudaChannelFormatKindNone", ("hipChannelFormatKindNone", CONV_TEX, API_RUNTIME), ), ("cudaCreateChannelDesc", ("hipCreateChannelDesc", CONV_TEX, API_RUNTIME)), ("cudaGetChannelDesc", ("hipGetChannelDesc", CONV_TEX, API_RUNTIME)), ("cudaResourceTypeArray", ("hipResourceTypeArray", CONV_TEX, API_RUNTIME)), ( "cudaResourceTypeMipmappedArray", ("hipResourceTypeMipmappedArray", CONV_TEX, API_RUNTIME), ), ("cudaResourceTypeLinear", ("hipResourceTypeLinear", CONV_TEX, API_RUNTIME)), ("cudaResourceTypePitch2D", ("hipResourceTypePitch2D", CONV_TEX, API_RUNTIME)), ("cudaResViewFormatNone", ("hipResViewFormatNone", CONV_TEX, API_RUNTIME)), ( "cudaResViewFormatUnsignedChar1", ("hipResViewFormatUnsignedChar1", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatUnsignedChar2", ("hipResViewFormatUnsignedChar2", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatUnsignedChar4", ("hipResViewFormatUnsignedChar4", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatSignedChar1", ("hipResViewFormatSignedChar1", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatSignedChar2", ("hipResViewFormatSignedChar2", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatSignedChar4", ("hipResViewFormatSignedChar4", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatUnsignedShort1", ("hipResViewFormatUnsignedShort1", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatUnsignedShort2", ("hipResViewFormatUnsignedShort2", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatUnsignedShort4", ("hipResViewFormatUnsignedShort4", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatSignedShort1", ("hipResViewFormatSignedShort1", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatSignedShort2", ("hipResViewFormatSignedShort2", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatSignedShort4", ("hipResViewFormatSignedShort4", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatUnsignedInt1", ("hipResViewFormatUnsignedInt1", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatUnsignedInt2", ("hipResViewFormatUnsignedInt2", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatUnsignedInt4", ("hipResViewFormatUnsignedInt4", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatSignedInt1", ("hipResViewFormatSignedInt1", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatSignedInt2", ("hipResViewFormatSignedInt2", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatSignedInt4", ("hipResViewFormatSignedInt4", CONV_TEX, API_RUNTIME), ), ("cudaResViewFormatHalf1", ("hipResViewFormatHalf1", CONV_TEX, API_RUNTIME)), ("cudaResViewFormatHalf2", ("hipResViewFormatHalf2", CONV_TEX, API_RUNTIME)), ("cudaResViewFormatHalf4", ("hipResViewFormatHalf4", CONV_TEX, API_RUNTIME)), ("cudaResViewFormatFloat1", ("hipResViewFormatFloat1", CONV_TEX, API_RUNTIME)), ("cudaResViewFormatFloat2", ("hipResViewFormatFloat2", CONV_TEX, API_RUNTIME)), ("cudaResViewFormatFloat4", ("hipResViewFormatFloat4", CONV_TEX, API_RUNTIME)), ( "cudaResViewFormatUnsignedBlockCompressed1", ("hipResViewFormatUnsignedBlockCompressed1", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatUnsignedBlockCompressed2", ("hipResViewFormatUnsignedBlockCompressed2", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatUnsignedBlockCompressed3", ("hipResViewFormatUnsignedBlockCompressed3", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatUnsignedBlockCompressed4", ("hipResViewFormatUnsignedBlockCompressed4", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatSignedBlockCompressed4", ("hipResViewFormatSignedBlockCompressed4", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatUnsignedBlockCompressed5", ("hipResViewFormatUnsignedBlockCompressed5", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatSignedBlockCompressed5", ("hipResViewFormatSignedBlockCompressed5", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatUnsignedBlockCompressed6H", ("hipResViewFormatUnsignedBlockCompressed6H", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatSignedBlockCompressed6H", ("hipResViewFormatSignedBlockCompressed6H", CONV_TEX, API_RUNTIME), ), ( "cudaResViewFormatUnsignedBlockCompressed7", ("hipResViewFormatUnsignedBlockCompressed7", CONV_TEX, API_RUNTIME), ), ("cudaAddressModeWrap", ("hipAddressModeWrap", CONV_TEX, API_RUNTIME)), ("cudaAddressModeClamp", ("hipAddressModeClamp", CONV_TEX, API_RUNTIME)), ("cudaAddressModeMirror", ("hipAddressModeMirror", CONV_TEX, API_RUNTIME)), ("cudaAddressModeBorder", ("hipAddressModeBorder", CONV_TEX, API_RUNTIME)), ("cudaCreateTextureObject", ("hipCreateTextureObject", CONV_TEX, API_RUNTIME)), ( "cudaDestroyTextureObject", ("hipDestroyTextureObject", CONV_TEX, API_RUNTIME), ), ( "cudaGetTextureObjectResourceDesc", ("hipGetTextureObjectResourceDesc", CONV_TEX, API_RUNTIME), ), ( "cudaGetTextureObjectResourceViewDesc", ("hipGetTextureObjectResourceViewDesc", CONV_TEX, API_RUNTIME), ), ( "cudaGetTextureObjectTextureDesc", ("hipGetTextureObjectTextureDesc", CONV_TEX, API_RUNTIME), ), ( "cudaBindSurfaceToArray", ("hipBindSurfaceToArray", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGetSurfaceReference", ("hipGetSurfaceReference", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaBoundaryModeZero", ("hipBoundaryModeZero", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaBoundaryModeClamp", ("hipBoundaryModeClamp", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaBoundaryModeTrap", ("hipBoundaryModeTrap", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaFormatModeForced", ("hipFormatModeForced", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaFormatModeAuto", ("hipFormatModeAuto", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaCreateSurfaceObject", ("hipCreateSurfaceObject", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaDestroySurfaceObject", ("hipDestroySurfaceObject", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGetSurfaceObjectResourceDesc", ( "hipGetSurfaceObjectResourceDesc", CONV_SURFACE, API_RUNTIME, HIP_UNSUPPORTED, ), ), ("cudaIpcCloseMemHandle", ("hipIpcCloseMemHandle", CONV_DEVICE, API_RUNTIME)), ("cudaIpcGetEventHandle", ("hipIpcGetEventHandle", CONV_DEVICE, API_RUNTIME)), ("cudaIpcGetMemHandle", ("hipIpcGetMemHandle", CONV_DEVICE, API_RUNTIME)), ("cudaIpcOpenEventHandle", ("hipIpcOpenEventHandle", CONV_DEVICE, API_RUNTIME)), ("cudaIpcOpenMemHandle", ("hipIpcOpenMemHandle", CONV_DEVICE, API_RUNTIME)), ( "cudaGLGetDevices", ("hipGLGetDevices", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGraphicsGLRegisterBuffer", ("hipGraphicsGLRegisterBuffer", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGraphicsGLRegisterImage", ("hipGraphicsGLRegisterImage", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaWGLGetDevice", ("hipWGLGetDevice", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGraphicsMapResources", ("hipGraphicsMapResources", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGraphicsResourceGetMappedMipmappedArray", ( "hipGraphicsResourceGetMappedMipmappedArray", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGraphicsResourceGetMappedPointer", ( "hipGraphicsResourceGetMappedPointer", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGraphicsResourceSetMapFlags", ( "hipGraphicsResourceSetMapFlags", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGraphicsSubResourceGetMappedArray", ( "hipGraphicsSubResourceGetMappedArray", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGraphicsUnmapResources", ("hipGraphicsUnmapResources", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGraphicsUnregisterResource", ( "hipGraphicsUnregisterResource", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGraphicsCubeFacePositiveX", ( "hipGraphicsCubeFacePositiveX", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGraphicsCubeFaceNegativeX", ( "hipGraphicsCubeFaceNegativeX", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGraphicsCubeFacePositiveY", ( "hipGraphicsCubeFacePositiveY", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGraphicsCubeFaceNegativeY", ( "hipGraphicsCubeFaceNegativeY", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGraphicsCubeFacePositiveZ", ( "hipGraphicsCubeFacePositiveZ", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGraphicsCubeFaceNegativeZ", ( "hipGraphicsCubeFaceNegativeZ", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGraphicsMapFlagsNone", ("hipGraphicsMapFlagsNone", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGraphicsMapFlagsReadOnly", ( "hipGraphicsMapFlagsReadOnly", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGraphicsMapFlagsWriteDiscard", ( "hipGraphicsMapFlagsWriteDiscard", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGraphicsRegisterFlagsNone", ( "hipGraphicsRegisterFlagsNone", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGraphicsRegisterFlagsReadOnly", ( "hipGraphicsRegisterFlagsReadOnly", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGraphicsRegisterFlagsWriteDiscard", ( "hipGraphicsRegisterFlagsWriteDiscard", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGraphicsRegisterFlagsSurfaceLoadStore", ( "hipGraphicsRegisterFlagsSurfaceLoadStore", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGraphicsRegisterFlagsTextureGather", ( "hipGraphicsRegisterFlagsTextureGather", CONV_GRAPHICS, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGLDeviceListAll", ("HIP_GL_DEVICE_LIST_ALL", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGLDeviceListCurrentFrame", ("HIP_GL_DEVICE_LIST_CURRENT_FRAME", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGLDeviceListNextFrame", ("HIP_GL_DEVICE_LIST_NEXT_FRAME", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGLGetDevices", ("hipGLGetDevices", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGraphicsGLRegisterBuffer", ("hipGraphicsGLRegisterBuffer", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGraphicsGLRegisterImage", ("hipGraphicsGLRegisterImage", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaWGLGetDevice", ("hipWGLGetDevice", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGLMapFlagsNone", ("HIP_GL_MAP_RESOURCE_FLAGS_NONE", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGLMapFlagsReadOnly", ( "HIP_GL_MAP_RESOURCE_FLAGS_READ_ONLY", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGLMapFlagsWriteDiscard", ( "HIP_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGLMapBufferObject", ("hipGLMapBufferObject__", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGLMapBufferObjectAsync", ("hipGLMapBufferObjectAsync__", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGLRegisterBufferObject", ("hipGLRegisterBufferObject", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGLSetBufferObjectMapFlags", ("hipGLSetBufferObjectMapFlags", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGLSetGLDevice", ("hipGLSetGLDevice", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGLUnmapBufferObject", ("hipGLUnmapBufferObject", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGLUnmapBufferObjectAsync", ("hipGLUnmapBufferObjectAsync", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGLUnregisterBufferObject", ("hipGLUnregisterBufferObject", CONV_GL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D9DeviceListAll", ("HIP_D3D9_DEVICE_LIST_ALL", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D9DeviceListCurrentFrame", ( "HIP_D3D9_DEVICE_LIST_CURRENT_FRAME", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D9DeviceListNextFrame", ( "HIP_D3D9_DEVICE_LIST_NEXT_FRAME", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D9GetDevice", ("hipD3D9GetDevice", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D9GetDevices", ("hipD3D9GetDevices", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D9GetDirect3DDevice", ("hipD3D9GetDirect3DDevice", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D9SetDirect3DDevice", ("hipD3D9SetDirect3DDevice", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGraphicsD3D9RegisterResource", ( "hipGraphicsD3D9RegisterResource", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D9MapFlags", ("hipD3D9MapFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D9MapFlagsNone", ( "HIP_D3D9_MAPRESOURCE_FLAGS_NONE", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D9MapFlagsReadOnly", ( "HIP_D3D9_MAPRESOURCE_FLAGS_READONLY", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D9MapFlagsWriteDiscard", ( "HIP_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D9RegisterFlagsNone", ("HIP_D3D9_REGISTER_FLAGS_NONE", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D9RegisterFlagsArray", ("HIP_D3D9_REGISTER_FLAGS_ARRAY", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D9MapResources", ("hipD3D9MapResources", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D9RegisterResource", ("hipD3D9RegisterResource", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D9ResourceGetMappedArray", ("hipD3D9ResourceGetMappedArray", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D9ResourceGetMappedPitch", ("hipD3D9ResourceGetMappedPitch", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D9ResourceGetMappedPointer", ( "hipD3D9ResourceGetMappedPointer", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D9ResourceGetMappedSize", ("hipD3D9ResourceGetMappedSize", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D9ResourceGetSurfaceDimensions", ( "hipD3D9ResourceGetSurfaceDimensions", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D9ResourceSetMapFlags", ("hipD3D9ResourceSetMapFlags", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D9UnmapResources", ("hipD3D9UnmapResources", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D9UnregisterResource", ("hipD3D9UnregisterResource", CONV_D3D9, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D10DeviceListAll", ("HIP_D3D10_DEVICE_LIST_ALL", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D10DeviceListCurrentFrame", ( "HIP_D3D10_DEVICE_LIST_CURRENT_FRAME", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D10DeviceListNextFrame", ( "HIP_D3D10_DEVICE_LIST_NEXT_FRAME", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D10GetDevice", ("hipD3D10GetDevice", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D10GetDevices", ("hipD3D10GetDevices", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGraphicsD3D10RegisterResource", ( "hipGraphicsD3D10RegisterResource", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D10MapFlagsNone", ( "HIP_D3D10_MAPRESOURCE_FLAGS_NONE", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D10MapFlagsReadOnly", ( "HIP_D3D10_MAPRESOURCE_FLAGS_READONLY", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D10MapFlagsWriteDiscard", ( "HIP_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D10RegisterFlagsNone", ("HIP_D3D10_REGISTER_FLAGS_NONE", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D10RegisterFlagsArray", ( "HIP_D3D10_REGISTER_FLAGS_ARRAY", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D10GetDirect3DDevice", ("hipD3D10GetDirect3DDevice", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D10MapResources", ("hipD3D10MapResources", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D10RegisterResource", ("hipD3D10RegisterResource", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D10ResourceGetMappedArray", ( "hipD3D10ResourceGetMappedArray", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D10ResourceGetMappedPitch", ( "hipD3D10ResourceGetMappedPitch", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D10ResourceGetMappedPointer", ( "hipD3D10ResourceGetMappedPointer", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D10ResourceGetMappedSize", ("hipD3D10ResourceGetMappedSize", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D10ResourceGetSurfaceDimensions", ( "hipD3D10ResourceGetSurfaceDimensions", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D10ResourceSetMapFlags", ("hipD3D10ResourceSetMapFlags", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D10SetDirect3DDevice", ("hipD3D10SetDirect3DDevice", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D10UnmapResources", ("hipD3D10UnmapResources", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D10UnregisterResource", ("hipD3D10UnregisterResource", CONV_D3D10, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D11DeviceListAll", ("HIP_D3D11_DEVICE_LIST_ALL", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D11DeviceListCurrentFrame", ( "HIP_D3D11_DEVICE_LIST_CURRENT_FRAME", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D11DeviceListNextFrame", ( "HIP_D3D11_DEVICE_LIST_NEXT_FRAME", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D11GetDevice", ("hipD3D11GetDevice", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D11GetDevices", ("hipD3D11GetDevices", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGraphicsD3D11RegisterResource", ( "hipGraphicsD3D11RegisterResource", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaD3D11GetDevice", ("hipD3D11GetDevice", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaD3D11GetDevices", ("hipD3D11GetDevices", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGraphicsD3D11RegisterResource", ( "hipGraphicsD3D11RegisterResource", CONV_D3D11, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGraphicsVDPAURegisterOutputSurface", ( "hipGraphicsVDPAURegisterOutputSurface", CONV_VDPAU, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaGraphicsVDPAURegisterVideoSurface", ( "hipGraphicsVDPAURegisterVideoSurface", CONV_VDPAU, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaVDPAUGetDevice", ("hipVDPAUGetDevice", CONV_VDPAU, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaVDPAUSetVDPAUDevice", ("hipVDPAUSetDevice", CONV_VDPAU, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaEGLStreamConsumerAcquireFrame", ( "hipEGLStreamConsumerAcquireFrame", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaEGLStreamConsumerConnect", ("hipEGLStreamConsumerConnect", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaEGLStreamConsumerConnectWithFlags", ( "hipEGLStreamConsumerConnectWithFlags", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaEGLStreamConsumerReleaseFrame", ( "hipEGLStreamConsumerReleaseFrame", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaEGLStreamProducerConnect", ("hipEGLStreamProducerConnect", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaEGLStreamProducerDisconnect", ("hipEGLStreamProducerDisconnect", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaEGLStreamProducerPresentFrame", ( "hipEGLStreamProducerPresentFrame", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED, ), ), ( "cudaEGLStreamProducerReturnFrame", ("hipEGLStreamProducerReturnFrame", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGraphicsEGLRegisterImage", ("hipGraphicsEGLRegisterImage", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED), ), ( "cudaGraphicsResourceGetMappedEglFrame", ( "hipGraphicsResourceGetMappedEglFrame", CONV_EGL, API_RUNTIME, HIP_UNSUPPORTED, ), ), ("cublasInit", ("rocblas_init", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ( "cublasShutdown", ("rocblas_shutdown", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasGetVersion", ("rocblas_get_version", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasGetError", ("rocblas_get_error", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasAlloc", ("rocblas_alloc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasFree", ("rocblas_free", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ( "cublasSetKernelStream", ("rocblas_set_kernel_stream", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasGetAtomicsMode", ("rocblas_get_atomics_mode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasSetAtomicsMode", ("rocblas_set_atomics_mode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasGetMathMode", ("rocblas_get_math_mode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasSetMathMode", ("rocblas_set_math_mode", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("CUBLAS_OP_N", ("rocblas_operation_none", CONV_NUMERIC_LITERAL, API_BLAS)), ( "CUBLAS_OP_T", ("rocblas_operation_transpose", CONV_NUMERIC_LITERAL, API_BLAS), ), ( "CUBLAS_OP_C", ("rocblas_operation_conjugate_transpose", CONV_NUMERIC_LITERAL, API_BLAS), ), ( "CUBLAS_STATUS_SUCCESS", ("rocblas_status_success", CONV_NUMERIC_LITERAL, API_BLAS), ), ( "CUBLAS_STATUS_NOT_INITIALIZED", ("rocblas_status_invalid_handle", CONV_NUMERIC_LITERAL, API_BLAS), ), ( "CUBLAS_STATUS_ALLOC_FAILED", ("rocblas_status_memory_error", CONV_NUMERIC_LITERAL, API_BLAS), ), ( "CUBLAS_STATUS_INVALID_VALUE", ("rocblas_status_invalid_pointer", CONV_NUMERIC_LITERAL, API_BLAS), ), ( "CUBLAS_STATUS_MAPPING_ERROR", ("rocblas_status_internal_error", CONV_NUMERIC_LITERAL, API_BLAS), ), ( "CUBLAS_STATUS_EXECUTION_FAILED", ("rocblas_status_internal_error", CONV_NUMERIC_LITERAL, API_BLAS), ), ( "CUBLAS_STATUS_INTERNAL_ERROR", ("rocblas_status_internal_error", CONV_NUMERIC_LITERAL, API_BLAS), ), ( "CUBLAS_STATUS_NOT_SUPPORTED", ("rocblas_status_not_implemented", CONV_NUMERIC_LITERAL, API_BLAS), ), ( "CUBLAS_STATUS_ARCH_MISMATCH", ("rocblas_status_not_implemented", CONV_NUMERIC_LITERAL, API_BLAS), ), ( "CUBLAS_FILL_MODE_LOWER", ("rocblas_fill_lower", CONV_NUMERIC_LITERAL, API_BLAS), ), ( "CUBLAS_FILL_MODE_UPPER", ("rocblas_fill_upper", CONV_NUMERIC_LITERAL, API_BLAS), ), ( "CUBLAS_DIAG_NON_UNIT", ("rocblas_diagonal_non_unit", CONV_NUMERIC_LITERAL, API_BLAS), ), ("CUBLAS_DIAG_UNIT", ("rocblas_diagonal_unit", CONV_NUMERIC_LITERAL, API_BLAS)), ("CUBLAS_SIDE_LEFT", ("rocblas_side_left", CONV_NUMERIC_LITERAL, API_BLAS)), ("CUBLAS_SIDE_RIGHT", ("rocblas_side_right", CONV_NUMERIC_LITERAL, API_BLAS)), ( "CUBLAS_POINTER_MODE_HOST", ("rocblas_pointer_mode_host", CONV_NUMERIC_LITERAL, API_BLAS), ), ( "CUBLAS_POINTER_MODE_DEVICE", ("rocblas_pointer_mode_device", CONV_NUMERIC_LITERAL, API_BLAS), ), ( "CUBLAS_ATOMICS_NOT_ALLOWED", ( "rocblas_atomics_not_allowed", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED, ), ), ( "CUBLAS_ATOMICS_ALLOWED", ( "rocblas_atomics_allowed", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED, ), ), ( "CUBLAS_DATA_FLOAT", ( "rocblas_precision_float", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED, ), ), ( "CUBLAS_DATA_DOUBLE", ( "rocblas_precision_double", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED, ), ), ( "CUBLAS_DATA_HALF", ("rocblas_precision_half", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED), ), ( "CUBLAS_DATA_INT8", ("rocblas_precision_int8", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED), ), ("cublasCreate", ("rocblas_create_handle", CONV_MATH_FUNC, API_BLAS)), ("cublasDestroy", ("rocblas_destroy_handle", CONV_MATH_FUNC, API_BLAS)), ("cublasSetVector", ("rocblas_set_vector", CONV_MATH_FUNC, API_BLAS)), ("cublasGetVector", ("rocblas_get_vector", CONV_MATH_FUNC, API_BLAS)), ( "cublasSetVectorAsync", ("rocblas_set_vector_async", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasGetVectorAsync", ("rocblas_get_vector_async", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasSetMatrix", ("rocblas_set_matrix", CONV_MATH_FUNC, API_BLAS)), ("cublasGetMatrix", ("rocblas_get_matrix", CONV_MATH_FUNC, API_BLAS)), ( "cublasGetMatrixAsync", ("rocblas_get_matrix_async", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasSetMatrixAsync", ("rocblas_set_matrix_async", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasXerbla", ("rocblas_xerbla", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSnrm2", ("rocblas_snrm2", CONV_MATH_FUNC, API_BLAS)), ("cublasDnrm2", ("rocblas_dnrm2", CONV_MATH_FUNC, API_BLAS)), ("cublasScnrm2", ("rocblas_scnrm2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDznrm2", ("rocblas_dznrm2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ( "cublasNrm2Ex", ("rocblas_nrm2_ex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasSdot", ("rocblas_sdot", CONV_MATH_FUNC, API_BLAS)), ( "cublasSdotBatched", ("rocblas_sdot_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasDdot", ("rocblas_ddot", CONV_MATH_FUNC, API_BLAS)), ( "cublasDdotBatched", ("rocblas_ddot_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasCdotu", ("rocblas_cdotu", CONV_MATH_FUNC, API_BLAS)), ("cublasCdotc", ("rocblas_cdotc", CONV_MATH_FUNC, API_BLAS)), ("cublasZdotu", ("rocblas_zdotu", CONV_MATH_FUNC, API_BLAS)), ("cublasZdotc", ("rocblas_zdotc", CONV_MATH_FUNC, API_BLAS)), ("cublasSscal", ("rocblas_sscal", CONV_MATH_FUNC, API_BLAS)), ( "cublasSscalBatched", ("rocblas_sscal_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasDscal", ("rocblas_dscal", CONV_MATH_FUNC, API_BLAS)), ( "cublasDscalBatched", ("rocblas_dscal_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasCscal", ("rocblas_cscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCsscal", ("rocblas_csscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZscal", ("rocblas_zscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZdscal", ("rocblas_zdscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSaxpy", ("rocblas_saxpy", CONV_MATH_FUNC, API_BLAS)), ( "cublasSaxpyBatched", ("rocblas_saxpy_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasDaxpy", ("rocblas_daxpy", CONV_MATH_FUNC, API_BLAS)), ("cublasCaxpy", ("rocblas_caxpy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZaxpy", ("rocblas_zaxpy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasScopy", ("rocblas_scopy", CONV_MATH_FUNC, API_BLAS)), ( "cublasScopyBatched", ("rocblas_scopy_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasDcopy", ("rocblas_dcopy", CONV_MATH_FUNC, API_BLAS)), ( "cublasDcopyBatched", ("rocblas_dcopy_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasCcopy", ("rocblas_ccopy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZcopy", ("rocblas_zcopy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSswap", ("rocblas_sswap", CONV_MATH_FUNC, API_BLAS)), ("cublasDswap", ("rocblas_dswap", CONV_MATH_FUNC, API_BLAS)), ("cublasCswap", ("rocblas_cswap", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZswap", ("rocblas_zswap", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasIsamax", ("rocblas_isamax", CONV_MATH_FUNC, API_BLAS)), ("cublasIdamax", ("rocblas_idamax", CONV_MATH_FUNC, API_BLAS)), ("cublasIcamax", ("rocblas_icamax", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasIzamax", ("rocblas_izamax", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasIsamin", ("rocblas_isamin", CONV_MATH_FUNC, API_BLAS)), ("cublasIdamin", ("rocblas_idamin", CONV_MATH_FUNC, API_BLAS)), ("cublasIcamin", ("rocblas_icamin", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasIzamin", ("rocblas_izamin", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSasum", ("rocblas_sasum", CONV_MATH_FUNC, API_BLAS)), ( "cublasSasumBatched", ("rocblas_sasum_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasDasum", ("rocblas_dasum", CONV_MATH_FUNC, API_BLAS)), ( "cublasDasumBatched", ("rocblas_dasum_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasScasum", ("rocblas_scasum", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDzasum", ("rocblas_dzasum", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSrot", ("rocblas_srot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDrot", ("rocblas_drot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCrot", ("rocblas_crot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCsrot", ("rocblas_csrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZrot", ("rocblas_zrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZdrot", ("rocblas_zdrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSrotg", ("rocblas_srotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDrotg", ("rocblas_drotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCrotg", ("rocblas_crotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZrotg", ("rocblas_zrotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSrotm", ("rocblas_srotm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDrotm", ("rocblas_drotm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSrotmg", ("rocblas_srotmg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDrotmg", ("rocblas_drotmg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSgemv", ("rocblas_sgemv", CONV_MATH_FUNC, API_BLAS)), ( "cublasSgemvBatched", ("rocblas_sgemv_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasDgemv", ("rocblas_dgemv", CONV_MATH_FUNC, API_BLAS)), ("cublasCgemv", ("rocblas_cgemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZgemv", ("rocblas_zgemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSgbmv", ("rocblas_sgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDgbmv", ("rocblas_dgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCgbmv", ("rocblas_cgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZgbmv", ("rocblas_zgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasStrmv", ("rocblas_strmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDtrmv", ("rocblas_dtrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCtrmv", ("rocblas_ctrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZtrmv", ("rocblas_ztrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasStbmv", ("rocblas_stbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDtbmv", ("rocblas_dtbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCtbmv", ("rocblas_ctbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZtbmv", ("rocblas_ztbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasStpmv", ("rocblas_stpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDtpmv", ("rocblas_dtpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCtpmv", ("rocblas_ctpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZtpmv", ("rocblas_ztpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasStrsv", ("rocblas_strsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDtrsv", ("rocblas_dtrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCtrsv", ("rocblas_ctrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZtrsv", ("rocblas_ztrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasStpsv", ("rocblas_stpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDtpsv", ("rocblas_dtpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCtpsv", ("rocblas_ctpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZtpsv", ("rocblas_ztpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasStbsv", ("rocblas_stbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDtbsv", ("rocblas_dtbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCtbsv", ("rocblas_ctbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZtbsv", ("rocblas_ztbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSsymv", ("rocblas_ssymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDsymv", ("rocblas_dsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCsymv", ("rocblas_csymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZsymv", ("rocblas_zsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasChemv", ("rocblas_chemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZhemv", ("rocblas_zhemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSsbmv", ("rocblas_ssbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDsbmv", ("rocblas_dsbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasChbmv", ("rocblas_chbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZhbmv", ("rocblas_zhbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSspmv", ("rocblas_sspmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDspmv", ("rocblas_dspmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasChpmv", ("rocblas_chpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZhpmv", ("rocblas_zhpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSger", ("rocblas_sger", CONV_MATH_FUNC, API_BLAS)), ("cublasDger", ("rocblas_dger", CONV_MATH_FUNC, API_BLAS)), ("cublasCgeru", ("rocblas_cgeru", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCgerc", ("rocblas_cgerc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZgeru", ("rocblas_zgeru", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZgerc", ("rocblas_zgerc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSsyr", ("rocblas_ssyr", CONV_MATH_FUNC, API_BLAS)), ("cublasDsyr", ("rocblas_dsyr", CONV_MATH_FUNC, API_BLAS)), ("cublasCher", ("rocblas_cher", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZher", ("rocblas_zher", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSspr", ("rocblas_sspr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDspr", ("rocblas_dspr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasChpr", ("rocblas_chpr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZhpr", ("rocblas_zhpr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSsyr2", ("rocblas_ssyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDsyr2", ("rocblas_dsyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCher2", ("rocblas_cher2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZher2", ("rocblas_zher2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSspr2", ("rocblas_sspr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDspr2", ("rocblas_dspr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasChpr2", ("rocblas_chpr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZhpr2", ("rocblas_zhpr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ( "cublasSgemmBatched", ("rocblas_sgemm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDgemmBatched", ("rocblas_dgemm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasHgemmBatched", ("rocblas_hgemm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasSgemmStridedBatched", ("rocblas_sgemm_strided_batched", CONV_MATH_FUNC, API_BLAS), ), ( "cublasDgemmStridedBatched", ("rocblas_dgemm_strided_batched", CONV_MATH_FUNC, API_BLAS), ), ( "cublasHgemmStridedBatched", ("rocblas_hgemm_strided_batched", CONV_MATH_FUNC, API_BLAS), ), ( "cublasCgemmBatched", ("rocblas_cgemm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCgemm3mBatched", ("rocblas_cgemm_3m_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZgemmBatched", ("rocblas_zgemm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCgemmStridedBatched", ( "rocblas_cgemm_strided_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED, ), ), ( "cublasCgemm3mStridedBatched", ( "rocblas_cgemm_3m_strided_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED, ), ), ( "cublasZgemmStridedBatched", ( "rocblas_zgemm_strided_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED, ), ), ( "cublasHgemmStridedBatched", ( "rocblas_hgemm_strided_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED, ), ), ("cublasSgemm", ("rocblas_sgemm", CONV_MATH_FUNC, API_BLAS)), ("cublasDgemm", ("rocblas_dgemm", CONV_MATH_FUNC, API_BLAS)), ("cublasCgemm", ("rocblas_cgemm", CONV_MATH_FUNC, API_BLAS)), ("cublasZgemm", ("rocblas_zgemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasHgemm", ("rocblas_hgemm", CONV_MATH_FUNC, API_BLAS)), ("cublasSsyrk", ("rocblas_ssyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDsyrk", ("rocblas_dsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCsyrk", ("rocblas_csyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZsyrk", ("rocblas_zsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCherk", ("rocblas_cherk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZherk", ("rocblas_zherk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSsyr2k", ("rocblas_ssyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDsyr2k", ("rocblas_dsyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCsyr2k", ("rocblas_csyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZsyr2k", ("rocblas_zyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSsyrkx", ("rocblas_ssyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDsyrkx", ("rocblas_dsyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCsyrkx", ("rocblas_csyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZsyrkx", ("rocblas_zsyrkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCher2k", ("rocblas_cher2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZher2k", ("rocblas_zher2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCherkx", ("rocblas_cherkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZherkx", ("rocblas_zherkx", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSsymm", ("rocblas_ssymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDsymm", ("rocblas_dsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCsymm", ("rocblas_csymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZsymm", ("rocblas_zsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasChemm", ("rocblas_chemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZhemm", ("rocblas_zhemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasStrsm", ("rocblas_strsm", CONV_MATH_FUNC, API_BLAS)), ("cublasDtrsm", ("rocblas_dtrsm", CONV_MATH_FUNC, API_BLAS)), ("cublasCtrsm", ("rocblas_ctrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZtrsm", ("rocblas_ztrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ( "cublasStrsmBatched", ("rocblas_strsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDtrsmBatched", ("rocblas_dtrsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCtrsmBatched", ("rocblas_ctrsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZtrsmBatched", ("rocblas_ztrsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasStrmm", ("rocblas_strmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDtrmm", ("rocblas_dtrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCtrmm", ("rocblas_ctrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZtrmm", ("rocblas_ztrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSgeam", ("rocblas_sgeam", CONV_MATH_FUNC, API_BLAS)), ("cublasDgeam", ("rocblas_dgeam", CONV_MATH_FUNC, API_BLAS)), ("cublasCgeam", ("rocblas_cgeam", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZgeam", ("rocblas_zgeam", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ( "cublasSgetrfBatched", ("rocblas_sgetrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDgetrfBatched", ("rocblas_dgetrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCgetrfBatched", ("rocblas_cgetrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZgetrfBatched", ("rocblas_zgetrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasSgetriBatched", ("rocblas_sgetri_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDgetriBatched", ("rocblas_dgetri_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCgetriBatched", ("rocblas_cgetri_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZgetriBatched", ("rocblas_zgetri_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasSgetrsBatched", ("rocblas_sgetrs_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDgetrsBatched", ("rocblas_dgetrs_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCgetrsBatched", ("rocblas_cgetrs_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZgetrsBatched", ("rocblas_zgetrs_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasStrsmBatched", ("rocblas_strsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDtrsmBatched", ("rocblas_dtrsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCtrsmBatched", ("rocblas_ctrsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZtrsmBatched", ("rocblas_ztrsm_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasSmatinvBatched", ("rocblas_smatinv_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDmatinvBatched", ("rocblas_dmatinv_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCmatinvBatched", ("rocblas_cmatinv_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZmatinvBatched", ("rocblas_zmatinv_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasSgeqrfBatched", ("rocblas_sgeqrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDgeqrfBatched", ("rocblas_dgeqrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCgeqrfBatched", ("rocblas_cgeqrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZgeqrfBatched", ("rocblas_zgeqrf_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasSgelsBatched", ("rocblas_sgels_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDgelsBatched", ("rocblas_dgels_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCgelsBatched", ("rocblas_cgels_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZgelsBatched", ("rocblas_zgels_batched", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasSdgmm", ("rocblas_sdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDdgmm", ("rocblas_ddgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCdgmm", ("rocblas_cdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZdgmm", ("rocblas_zdgmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasStpttr", ("rocblas_stpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDtpttr", ("rocblas_dtpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCtpttr", ("rocblas_ctpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZtpttr", ("rocblas_ztpttr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasStrttp", ("rocblas_strttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDtrttp", ("rocblas_dtrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCtrttp", ("rocblas_ctrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZtrttp", ("rocblas_ztrttp", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCreate_v2", ("rocblas_create_handle", CONV_MATH_FUNC, API_BLAS)), ("cublasDestroy_v2", ("rocblas_destroy_handle", CONV_MATH_FUNC, API_BLAS)), ( "cublasGetVersion_v2", ("rocblas_get_version", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasSetStream", ("rocblas_set_stream", CONV_MATH_FUNC, API_BLAS)), ("cublasGetStream", ("rocblas_get_stream", CONV_MATH_FUNC, API_BLAS)), ("cublasSetStream_v2", ("rocblas_set_stream", CONV_MATH_FUNC, API_BLAS)), ("cublasGetStream_v2", ("rocblas_get_stream", CONV_MATH_FUNC, API_BLAS)), ( "cublasGetPointerMode", ("rocblas_get_pointer_mode", CONV_MATH_FUNC, API_BLAS), ), ( "cublasSetPointerMode", ("rocblas_set_pointer_mode", CONV_MATH_FUNC, API_BLAS), ), ( "cublasGetPointerMode_v2", ("rocblas_get_pointer_mode", CONV_MATH_FUNC, API_BLAS), ), ( "cublasSetPointerMode_v2", ("rocblas_set_pointer_mode", CONV_MATH_FUNC, API_BLAS), ), ("cublasSgemv_v2", ("rocblas_sgemv", CONV_MATH_FUNC, API_BLAS)), ("cublasDgemv_v2", ("rocblas_dgemv", CONV_MATH_FUNC, API_BLAS)), ( "cublasCgemv_v2", ("rocblas_cgemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZgemv_v2", ("rocblas_zgemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasSgbmv_v2", ("rocblas_sgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDgbmv_v2", ("rocblas_dgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCgbmv_v2", ("rocblas_cgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZgbmv_v2", ("rocblas_zgbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasStrmv_v2", ("rocblas_strmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDtrmv_v2", ("rocblas_dtrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCtrmv_v2", ("rocblas_ctrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZtrmv_v2", ("rocblas_ztrmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasStbmv_v2", ("rocblas_stbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDtbmv_v2", ("rocblas_dtbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCtbmv_v2", ("rocblas_ctbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZtbmv_v2", ("rocblas_ztbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasStpmv_v2", ("rocblas_stpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDtpmv_v2", ("rocblas_dtpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCtpmv_v2", ("rocblas_ctpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZtpmv_v2", ("rocblas_ztpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasStrsv_v2", ("rocblas_strsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDtrsv_v2", ("rocblas_dtrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCtrsv_v2", ("rocblas_ctrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZtrsv_v2", ("rocblas_ztrsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasStpsv_v2", ("rocblas_stpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDtpsv_v2", ("rocblas_dtpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCtpsv_v2", ("rocblas_ctpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZtpsv_v2", ("rocblas_ztpsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasStbsv_v2", ("rocblas_stbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDtbsv_v2", ("rocblas_dtbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCtbsv_v2", ("rocblas_ctbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZtbsv_v2", ("rocblas_ztbsv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasSsymv_v2", ("rocblas_ssymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDsymv_v2", ("rocblas_dsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCsymv_v2", ("rocblas_csymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZsymv_v2", ("rocblas_zsymv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasChemv_v2", ("rocblas_chemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZhemv_v2", ("rocblas_zhemv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasSsbmv_v2", ("rocblas_ssbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDsbmv_v2", ("rocblas_dsbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasChbmv_v2", ("rocblas_chbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZhbmv_v2", ("rocblas_zhbmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasSspmv_v2", ("rocblas_sspmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDspmv_v2", ("rocblas_dspmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasChpmv_v2", ("rocblas_chpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZhpmv_v2", ("rocblas_zhpmv", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasSger_v2", ("rocblas_sger", CONV_MATH_FUNC, API_BLAS)), ("cublasDger_v2", ("rocblas_dger", CONV_MATH_FUNC, API_BLAS)), ( "cublasCgeru_v2", ("rocblas_cgeru", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCgerc_v2", ("rocblas_cergc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZgeru_v2", ("rocblas_zgeru", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZgerc_v2", ("rocblas_zgerc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasSsyr_v2", ("rocblas_ssyr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDsyr_v2", ("rocblas_dsyr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCsyr_v2", ("rocblas_csyr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZsyr_v2", ("rocblas_zsyr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCher_v2", ("rocblas_cher", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZher_v2", ("rocblas_zher", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSspr_v2", ("rocblas_sspr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDspr_v2", ("rocblas_dspr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasChpr_v2", ("rocblas_chpr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasZhpr_v2", ("rocblas_zhpr", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ( "cublasSsyr2_v2", ("rocblas_ssyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDsyr2_v2", ("rocblas_dsyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCsyr2_v2", ("rocblas_csyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZsyr2_v2", ("rocblas_zsyr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCher2_v2", ("rocblas_cher2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZher2_v2", ("rocblas_zher2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasSspr2_v2", ("rocblas_sspr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDspr2_v2", ("rocblas_dspr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasChpr2_v2", ("rocblas_chpr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZhpr2_v2", ("rocblas_zhpr2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasSgemm_v2", ("rocblas_sgemm", CONV_MATH_FUNC, API_BLAS)), ("cublasDgemm_v2", ("rocblas_dgemm", CONV_MATH_FUNC, API_BLAS)), ( "cublasCgemm_v2", ("rocblas_cgemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCgemm3m", ("rocblas_cgemm_3m", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCgemm3mEx", ("rocblas_cgemm_3mex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZgemm_v2", ("rocblas_zgemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZgemm3m", ("rocblas_zgemm_3m", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), # NB: The function rocblas_sgemmex doesn't actually exist in # rocblas, as of 2018-12-05 ( "cublasSgemmEx", ("rocblas_sgemmex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasGemmEx", ("rocblas_gemmex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ( "cublasCgemmEx", ("rocblas_cgemmex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasUint8gemmBias", ("rocblas_uint8gemmbias", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasSsyrk_v2", ("rocblas_ssyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDsyrk_v2", ("rocblas_dsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCsyrk_v2", ("rocblas_csyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZsyrk_v2", ("rocblas_zsyrk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCsyrkEx", ("rocblas_csyrkex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCsyrk3mEx", ("rocblas_csyrk3mex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCherk_v2", ("rocblas_cherk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCherkEx", ("rocblas_cherkex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCherk3mEx", ("rocblas_cherk3mex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZherk_v2", ("rocblas_zherk", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasSsyr2k_v2", ("rocblas_ssyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDsyr2k_v2", ("rocblas_dsyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCsyr2k_v2", ("rocblas_csyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZsyr2k_v2", ("rocblas_zsyr2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCher2k_v2", ("rocblas_cher2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZher2k_v2", ("rocblas_zher2k", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasSsymm_v2", ("rocblas_ssymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDsymm_v2", ("rocblas_dsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCsymm_v2", ("rocblas_csymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZsymm_v2", ("rocblas_zsymm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasChemm_v2", ("rocblas_chemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZhemm_v2", ("rocblas_zhemm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasStrsm_v2", ("rocblas_strsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDtrsm_v2", ("rocblas_dtrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCtrsm_v2", ("rocblas_ctrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZtrsm_v2", ("rocblas_ztrsm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasStrmm_v2", ("rocblas_strmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDtrmm_v2", ("rocblas_dtrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCtrmm_v2", ("rocblas_ctrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZtrmm_v2", ("rocblas_ztrmm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasSnrm2_v2", ("rocblas_snrm2", CONV_MATH_FUNC, API_BLAS)), ("cublasDnrm2_v2", ("rocblas_dnrm2", CONV_MATH_FUNC, API_BLAS)), ( "cublasScnrm2_v2", ("rocblas_scnrm2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDznrm2_v2", ("rocblas_dznrm2", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasDotEx", ("rocblas_dotex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDotcEx", ("rocblas_dotcex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSdot_v2", ("rocblas_sdot", CONV_MATH_FUNC, API_BLAS)), ("cublasDdot_v2", ("rocblas_ddot", CONV_MATH_FUNC, API_BLAS)), ( "cublasCdotu_v2", ("rocblas_cdotu", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCdotc_v2", ("rocblas_cdotc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZdotu_v2", ("rocblas_zdotu", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZdotc_v2", ("rocblas_zdotc", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasScalEx", ("rocblas_scalex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSscal_v2", ("rocblas_sscal", CONV_MATH_FUNC, API_BLAS)), ("cublasDscal_v2", ("rocblas_dscal", CONV_MATH_FUNC, API_BLAS)), ( "cublasCscal_v2", ("rocblas_cscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCsscal_v2", ("rocblas_csscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZscal_v2", ("rocblas_zcsal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZdscal_v2", ("rocblas_zdscal", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasAxpyEx", ("rocblas_axpyex", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasSaxpy_v2", ("rocblas_saxpy", CONV_MATH_FUNC, API_BLAS)), ("cublasDaxpy_v2", ("rocblas_daxpy", CONV_MATH_FUNC, API_BLAS)), ( "cublasCaxpy_v2", ("rocblas_caxpy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZaxpy_v2", ("rocblas_zaxpy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasScopy_v2", ("rocblas_scopy", CONV_MATH_FUNC, API_BLAS)), ("cublasDcopy_v2", ("rocblas_dcopy", CONV_MATH_FUNC, API_BLAS)), ( "cublasCcopy_v2", ("rocblas_ccopy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZcopy_v2", ("rocblas_zcopy", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasSswap_v2", ("rocblas_sswap", CONV_MATH_FUNC, API_BLAS)), ("cublasDswap_v2", ("rocblas_dswap", CONV_MATH_FUNC, API_BLAS)), ( "cublasCswap_v2", ("rocblas_cswap", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZswap_v2", ("rocblas_zswap", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasIsamax_v2", ("rocblas_isamax", CONV_MATH_FUNC, API_BLAS)), ("cublasIdamax_v2", ("rocblas_idamax", CONV_MATH_FUNC, API_BLAS)), ( "cublasIcamax_v2", ("rocblas_icamax", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasIzamax_v2", ("rocblas_izamax", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasIsamin_v2", ("rocblas_isamin", CONV_MATH_FUNC, API_BLAS)), ("cublasIdamin_v2", ("rocblas_idamin", CONV_MATH_FUNC, API_BLAS)), ( "cublasIcamin_v2", ("rocblas_icamin", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasIzamin_v2", ("rocblas_izamin", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasSasum_v2", ("rocblas_sasum", CONV_MATH_FUNC, API_BLAS)), ("cublasDasum_v2", ("rocblas_dasum", CONV_MATH_FUNC, API_BLAS)), ( "cublasScasum_v2", ("rocblas_scasum", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDzasum_v2", ("rocblas_dzasum", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasSrot_v2", ("rocblas_srot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasDrot_v2", ("rocblas_drot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ("cublasCrot_v2", ("rocblas_crot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ( "cublasCsrot_v2", ("rocblas_csrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ("cublasZrot_v2", ("rocblas_zrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED)), ( "cublasZdrot_v2", ("rocblas_zdrot", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasSrotg_v2", ("rocblas_srotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDrotg_v2", ("rocblas_drotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasCrotg_v2", ("rocblas_crotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasZrotg_v2", ("rocblas_zrotg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasSrotm_v2", ("rocblas_srotm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDrotm_v2", ("rocblas_drotm", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasSrotmg_v2", ("rocblas_srotmg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "cublasDrotmg_v2", ("rocblas_drotmg", CONV_MATH_FUNC, API_BLAS, HIP_UNSUPPORTED), ), ( "CURAND_STATUS_SUCCESS", ("HIPRAND_STATUS_SUCCESS", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_STATUS_VERSION_MISMATCH", ("HIPRAND_STATUS_VERSION_MISMATCH", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_STATUS_NOT_INITIALIZED", ("HIPRAND_STATUS_NOT_INITIALIZED", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_STATUS_ALLOCATION_FAILED", ("HIPRAND_STATUS_ALLOCATION_FAILED", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_STATUS_TYPE_ERROR", ("HIPRAND_STATUS_TYPE_ERROR", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_STATUS_OUT_OF_RANGE", ("HIPRAND_STATUS_OUT_OF_RANGE", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_STATUS_LENGTH_NOT_MULTIPLE", ("HIPRAND_STATUS_LENGTH_NOT_MULTIPLE", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_STATUS_DOUBLE_PRECISION_REQUIRED", ( "HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED", CONV_NUMERIC_LITERAL, API_RAND, ), ), ( "CURAND_STATUS_LAUNCH_FAILURE", ("HIPRAND_STATUS_LAUNCH_FAILURE", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_STATUS_PREEXISTING_FAILURE", ("HIPRAND_STATUS_PREEXISTING_FAILURE", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_STATUS_INITIALIZATION_FAILED", ("HIPRAND_STATUS_INITIALIZATION_FAILED", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_STATUS_ARCH_MISMATCH", ("HIPRAND_STATUS_ARCH_MISMATCH", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_STATUS_INTERNAL_ERROR", ("HIPRAND_STATUS_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_RAND), ), ("CURAND_RNG_TEST", ("HIPRAND_RNG_TEST", CONV_NUMERIC_LITERAL, API_RAND)), ( "mtgp32dc_params_fast_11213", ("mtgp32dc_params_fast_11213", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_RNG_PSEUDO_DEFAULT", ("HIPRAND_RNG_PSEUDO_DEFAULT", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_RNG_PSEUDO_XORWOW", ("HIPRAND_RNG_PSEUDO_XORWOW", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_RNG_PSEUDO_MRG32K3A", ("HIPRAND_RNG_PSEUDO_MRG32K3A", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_RNG_PSEUDO_MTGP32", ("HIPRAND_RNG_PSEUDO_MTGP32", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_RNG_PSEUDO_MT19937", ("HIPRAND_RNG_PSEUDO_MT19937", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_RNG_PSEUDO_PHILOX4_32_10", ("HIPRAND_RNG_PSEUDO_PHILOX4_32_10", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_RNG_QUASI_DEFAULT", ("HIPRAND_RNG_QUASI_DEFAULT", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_RNG_QUASI_SOBOL32", ("HIPRAND_RNG_QUASI_SOBOL32", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_RNG_QUASI_SCRAMBLED_SOBOL32", ("HIPRAND_RNG_QUASI_SCRAMBLED_SOBOL32", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_RNG_QUASI_SOBOL64", ("HIPRAND_RNG_QUASI_SOBOL64", CONV_NUMERIC_LITERAL, API_RAND), ), ( "CURAND_RNG_QUASI_SCRAMBLED_SOBOL64", ("HIPRAND_RNG_QUASI_SCRAMBLED_SOBOL64", CONV_NUMERIC_LITERAL, API_RAND), ), ( "curand_ORDERING_PSEUDO_BEST", ( "HIPRAND_ORDERING_PSEUDO_BEST", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED, ), ), ( "curand_ORDERING_PSEUDO_DEFAULT", ( "HIPRAND_ORDERING_PSEUDO_DEFAULT", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED, ), ), ( "curand_ORDERING_PSEUDO_SEEDED", ( "HIPRAND_ORDERING_PSEUDO_SEEDED", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED, ), ), ( "curand_ORDERING_QUASI_DEFAULT", ( "HIPRAND_ORDERING_QUASI_DEFAULT", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED, ), ), ( "curand_DIRECTION_VECTORS_32_JOEKUO6", ( "HIPRAND_DIRECTION_VECTORS_32_JOEKUO6", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED, ), ), ( "curand_SCRAMBLED_DIRECTION_VECTORS_32_JOEKUO6", ( "HIPRAND_SCRAMBLED_DIRECTION_VECTORS_32_JOEKUO6", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED, ), ), ( "curand_DIRECTION_VECTORS_64_JOEKUO6", ( "HIPRAND_DIRECTION_VECTORS_64_JOEKUO6", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED, ), ), ( "curand_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6", ( "HIPRAND_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED, ), ), ( "curand_CHOOSE_BEST", ("HIPRAND_CHOOSE_BEST", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), ), ( "curand_ITR", ("HIPRAND_ITR", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), ), ( "curand_KNUTH", ("HIPRAND_KNUTH", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), ), ( "curand_HITR", ("HIPRAND_HITR", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), ), ("curand_M1", ("HIPRAND_M1", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)), ("curand_M2", ("HIPRAND_M2", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED)), ( "curand_BINARY_SEARCH", ("HIPRAND_BINARY_SEARCH", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), ), ( "curand_DISCRETE_GAUSS", ("HIPRAND_DISCRETE_GAUSS", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), ), ( "curand_REJECTION", ("HIPRAND_REJECTION", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), ), ( "curand_DEVICE_API", ("HIPRAND_DEVICE_API", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), ), ( "curand_FAST_REJECTION", ("HIPRAND_FAST_REJECTION", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), ), ( "curand_3RD", ("HIPRAND_3RD", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), ), ( "curand_DEFINITION", ("HIPRAND_DEFINITION", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), ), ( "curand_POISSON", ("HIPRAND_POISSON", CONV_NUMERIC_LITERAL, API_RAND, HIP_UNSUPPORTED), ), ("curandCreateGenerator", ("hiprandCreateGenerator", CONV_MATH_FUNC, API_RAND)), ( "curandCreateGeneratorHost", ("hiprandCreateGeneratorHost", CONV_MATH_FUNC, API_RAND), ), ( "curandCreatePoissonDistribution", ("hiprandCreatePoissonDistribution", CONV_MATH_FUNC, API_RAND), ), ( "curandDestroyDistribution", ("hiprandDestroyDistribution", CONV_MATH_FUNC, API_RAND), ), ( "curandDestroyGenerator", ("hiprandDestroyGenerator", CONV_MATH_FUNC, API_RAND), ), ("curandGenerate", ("hiprandGenerate", CONV_MATH_FUNC, API_RAND)), ( "curandGenerateLogNormal", ("hiprandGenerateLogNormal", CONV_MATH_FUNC, API_RAND), ), ( "curandGenerateLogNormalDouble", ("hiprandGenerateLogNormalDouble", CONV_MATH_FUNC, API_RAND), ), ( "curandGenerateLongLong", ("hiprandGenerateLongLong", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED), ), ("curandGenerateNormal", ("hiprandGenerateNormal", CONV_MATH_FUNC, API_RAND)), ( "curandGenerateNormalDouble", ("hiprandGenerateNormalDouble", CONV_MATH_FUNC, API_RAND), ), ("curandGeneratePoisson", ("hiprandGeneratePoisson", CONV_MATH_FUNC, API_RAND)), ("curandGenerateSeeds", ("hiprandGenerateSeeds", CONV_MATH_FUNC, API_RAND)), ("curandGenerateUniform", ("hiprandGenerateUniform", CONV_MATH_FUNC, API_RAND)), ( "curandGenerateUniformDouble", ("hiprandGenerateUniformDouble", CONV_MATH_FUNC, API_RAND), ), ( "curandGetDirectionVectors32", ("hiprandGetDirectionVectors32", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED), ), ( "curandGetDirectionVectors64", ("hiprandGetDirectionVectors64", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED), ), ( "curandGetProperty", ("hiprandGetProperty", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED), ), ( "curandGetScrambleConstants32", ( "hiprandGetScrambleConstants32", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED, ), ), ( "curandGetScrambleConstants64", ( "hiprandGetScrambleConstants64", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED, ), ), ("curandGetVersion", ("hiprandGetVersion", CONV_MATH_FUNC, API_RAND)), ( "curandSetGeneratorOffset", ("hiprandSetGeneratorOffset", CONV_MATH_FUNC, API_RAND), ), ( "curandSetGeneratorOrdering", ("hiprandSetGeneratorOrdering", CONV_MATH_FUNC, API_RAND, HIP_UNSUPPORTED), ), ( "curandSetPseudoRandomGeneratorSeed", ("hiprandSetPseudoRandomGeneratorSeed", CONV_MATH_FUNC, API_RAND), ), ( "curandSetQuasiRandomGeneratorDimensions", ("hiprandSetQuasiRandomGeneratorDimensions", CONV_MATH_FUNC, API_RAND), ), ("curandSetStream", ("hiprandSetStream", CONV_MATH_FUNC, API_RAND)), ("curand", ("hiprand", CONV_DEVICE_FUNC, API_RAND)), ("curand4", ("hiprand4", CONV_DEVICE_FUNC, API_RAND)), ("curand_init", ("hiprand_init", CONV_DEVICE_FUNC, API_RAND)), ("curand_log_normal", ("hiprand_log_normal", CONV_DEVICE_FUNC, API_RAND)), ( "curand_log_normal_double", ("hiprand_log_normal_double", CONV_DEVICE_FUNC, API_RAND), ), ("curand_log_normal2", ("hiprand_log_normal2", CONV_DEVICE_FUNC, API_RAND)), ( "curand_log_normal2_double", ("hiprand_log_normal2_double", CONV_DEVICE_FUNC, API_RAND), ), ("curand_log_normal4", ("hiprand_log_normal4", CONV_DEVICE_FUNC, API_RAND)), ( "curand_log_normal4_double", ("hiprand_log_normal4_double", CONV_DEVICE_FUNC, API_RAND), ), ( "curand_mtgp32_single", ("hiprand_mtgp32_single", CONV_DEVICE_FUNC, API_RAND, HIP_UNSUPPORTED), ), ( "curand_mtgp32_single_specific", ( "hiprand_mtgp32_single_specific", CONV_DEVICE_FUNC, API_RAND, HIP_UNSUPPORTED, ), ), ( "curand_mtgp32_specific", ("hiprand_mtgp32_specific", CONV_DEVICE_FUNC, API_RAND, HIP_UNSUPPORTED), ), ("curand_normal", ("hiprand_normal", CONV_DEVICE_FUNC, API_RAND)), ( "curandMakeMTGP32Constants", ("hiprandMakeMTGP32Constants", CONV_DEVICE_FUNC, API_RAND), ), ( "curandMakeMTGP32KernelState", ("hiprandMakeMTGP32KernelState", CONV_DEVICE_FUNC, API_RAND), ), ("curand_normal_double", ("hiprand_normal_double", CONV_DEVICE_FUNC, API_RAND)), ("curand_normal2", ("hiprand_normal2", CONV_DEVICE_FUNC, API_RAND)), ( "curand_normal2_double", ("hiprand_normal2_double", CONV_DEVICE_FUNC, API_RAND), ), ("curand_normal4", ("hiprand_normal4", CONV_DEVICE_FUNC, API_RAND)), ( "curand_normal4_double", ("hiprand_normal4_double", CONV_DEVICE_FUNC, API_RAND), ), ("curand_uniform", ("hiprand_uniform", CONV_DEVICE_FUNC, API_RAND)), ( "curand_uniform_double", ("hiprand_uniform_double", CONV_DEVICE_FUNC, API_RAND), ), ( "curand_uniform2_double", ("hiprand_uniform2_double", CONV_DEVICE_FUNC, API_RAND), ), ("curand_uniform4", ("hiprand_uniform4", CONV_DEVICE_FUNC, API_RAND)), ( "curand_uniform4_double", ("hiprand_uniform4_double", CONV_DEVICE_FUNC, API_RAND), ), ("curand_discrete", ("hiprand_discrete", CONV_DEVICE_FUNC, API_RAND)), ("curand_discrete4", ("hiprand_discrete4", CONV_DEVICE_FUNC, API_RAND)), ("curand_poisson", ("hiprand_poisson", CONV_DEVICE_FUNC, API_RAND)), ("curand_poisson4", ("hiprand_poisson4", CONV_DEVICE_FUNC, API_RAND)), ( "curand_Philox4x32_10", ("hiprand_Philox4x32_10", CONV_DEVICE_FUNC, API_RAND, HIP_UNSUPPORTED), ), ("mtgp32_kernel_params", ("mtgp32_kernel_params_t", CONV_MATH_FUNC, API_RAND)), ("CUFFT_FORWARD", ("HIPFFT_FORWARD", CONV_NUMERIC_LITERAL, API_BLAS)), ("CUFFT_INVERSE", ("HIPFFT_BACKWARD", CONV_NUMERIC_LITERAL, API_BLAS)), ( "CUFFT_COMPATIBILITY_DEFAULT", ( "HIPFFT_COMPATIBILITY_DEFAULT", CONV_NUMERIC_LITERAL, API_BLAS, HIP_UNSUPPORTED, ), ), ("cuComplex", ("rocblas_float_complex", CONV_TYPE, API_BLAS)), ("cuDoubleComplex", ("rocblas_double_complex", CONV_TYPE, API_BLAS)), ("cufftResult_t", ("hipfftResult_t", CONV_TYPE, API_FFT)), ("cufftResult", ("hipfftResult", CONV_TYPE, API_FFT)), ("CUFFT_SUCCESS", ("HIPFFT_SUCCESS", CONV_NUMERIC_LITERAL, API_FFT)), ("CUFFT_INVALID_PLAN", ("HIPFFT_INVALID_PLAN", CONV_NUMERIC_LITERAL, API_FFT)), ("CUFFT_ALLOC_FAILED", ("HIPFFT_ALLOC_FAILED", CONV_NUMERIC_LITERAL, API_FFT)), ("CUFFT_INVALID_TYPE", ("HIPFFT_INVALID_TYPE", CONV_NUMERIC_LITERAL, API_FFT)), ( "CUFFT_INVALID_VALUE", ("HIPFFT_INVALID_VALUE", CONV_NUMERIC_LITERAL, API_FFT), ), ( "CUFFT_INTERNAL_ERROR", ("HIPFFT_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_FFT), ), ("CUFFT_EXEC_FAILED", ("HIPFFT_EXEC_FAILED", CONV_NUMERIC_LITERAL, API_FFT)), ("CUFFT_SETUP_FAILED", ("HIPFFT_SETUP_FAILED", CONV_NUMERIC_LITERAL, API_FFT)), ("CUFFT_INVALID_SIZE", ("HIPFFT_INVALID_SIZE", CONV_NUMERIC_LITERAL, API_FFT)), ( "CUFFT_UNALIGNED_DATA", ("HIPFFT_UNALIGNED_DATA", CONV_NUMERIC_LITERAL, API_FFT), ), ( "CUFFT_INCOMPLETE_PARAMETER_LIST", ("HIPFFT_INCOMPLETE_PARAMETER_LIST", CONV_NUMERIC_LITERAL, API_FFT), ), ( "CUFFT_INVALID_DEVICE", ("HIPFFT_INVALID_DEVICE", CONV_NUMERIC_LITERAL, API_FFT), ), ("CUFFT_PARSE_ERROR", ("HIPFFT_PARSE_ERROR", CONV_NUMERIC_LITERAL, API_FFT)), ("CUFFT_NO_WORKSPACE", ("HIPFFT_NO_WORKSPACE", CONV_NUMERIC_LITERAL, API_FFT)), ( "CUFFT_NOT_IMPLEMENTED", ("HIPFFT_NOT_IMPLEMENTED", CONV_NUMERIC_LITERAL, API_FFT), ), ( "CUFFT_LICENSE_ERROR", ("HIPFFT_LICENSE_ERROR", CONV_NUMERIC_LITERAL, API_FFT, HIP_UNSUPPORTED), ), ( "CUFFT_NOT_SUPPORTED", ("HIPFFT_NOT_SUPPORTED", CONV_NUMERIC_LITERAL, API_FFT), ), ("cufftType_t", ("hipfftType_t", CONV_TYPE, API_FFT)), ("cufftType", ("hipfftType", CONV_TYPE, API_FFT)), ("CUFFT_R2C", ("HIPFFT_R2C", CONV_NUMERIC_LITERAL, API_FFT)), ("CUFFT_C2R", ("HIPFFT_C2R", CONV_NUMERIC_LITERAL, API_FFT)), ("CUFFT_C2C", ("HIPFFT_C2C", CONV_NUMERIC_LITERAL, API_FFT)), ("CUFFT_D2Z", ("HIPFFT_D2Z", CONV_NUMERIC_LITERAL, API_FFT)), ("CUFFT_Z2D", ("HIPFFT_Z2D", CONV_NUMERIC_LITERAL, API_FFT)), ("CUFFT_Z2Z", ("HIPFFT_Z2Z", CONV_NUMERIC_LITERAL, API_FFT)), ( "cufftCompatibility_t", ("hipfftCompatibility_t", CONV_TYPE, API_FFT, HIP_UNSUPPORTED), ), ( "cufftCompatibility", ("hipfftCompatibility", CONV_TYPE, API_FFT, HIP_UNSUPPORTED), ), ( "CUFFT_COMPATIBILITY_FFTW_PADDING", ( "HIPFFT_COMPATIBILITY_FFTW_PADDING", CONV_NUMERIC_LITERAL, API_FFT, HIP_UNSUPPORTED, ), ), ("cufftReal", ("hipfftReal", CONV_TYPE, API_FFT)), ("cufftDoubleReal", ("hipfftDoubleReal", CONV_TYPE, API_FFT)), ("cufftComplex", ("hipfftComplex", CONV_TYPE, API_FFT)), ("cufftDoubleComplex", ("hipfftDoubleComplex", CONV_TYPE, API_FFT)), ("cufftHandle", ("hipfftHandle", CONV_TYPE, API_FFT)), ("cufftPlan1d", ("hipfftPlan1d", CONV_MATH_FUNC, API_FFT)), ("cufftPlan2d", ("hipfftPlan2d", CONV_MATH_FUNC, API_FFT)), ("cufftPlan3d", ("hipfftPlan3d", CONV_MATH_FUNC, API_FFT)), ("cufftPlanMany", ("hipfftPlanMany", CONV_MATH_FUNC, API_FFT)), ("cufftMakePlan1d", ("hipfftMakePlan1d", CONV_MATH_FUNC, API_FFT)), ("cufftMakePlan2d", ("hipfftMakePlan2d", CONV_MATH_FUNC, API_FFT)), ("cufftMakePlan3d", ("hipfftMakePlan3d", CONV_MATH_FUNC, API_FFT)), ("cufftMakePlanMany", ("hipfftMakePlanMany", CONV_MATH_FUNC, API_FFT)), ("cufftMakePlanMany64", ("hipfftMakePlanMany64", CONV_MATH_FUNC, API_FFT)), ("cufftGetSizeMany64", ("hipfftGetSizeMany64", CONV_MATH_FUNC, API_FFT)), ("cufftEstimate1d", ("hipfftEstimate1d", CONV_MATH_FUNC, API_FFT)), ("cufftEstimate2d", ("hipfftEstimate2d", CONV_MATH_FUNC, API_FFT)), ("cufftEstimate3d", ("hipfftEstimate3d", CONV_MATH_FUNC, API_FFT)), ("cufftEstimateMany", ("hipfftEstimateMany", CONV_MATH_FUNC, API_FFT)), ("cufftCreate", ("hipfftCreate", CONV_MATH_FUNC, API_FFT)), ("cufftGetSize1d", ("hipfftGetSize1d", CONV_MATH_FUNC, API_FFT)), ("cufftGetSize2d", ("hipfftGetSize2d", CONV_MATH_FUNC, API_FFT)), ("cufftGetSize3d", ("hipfftGetSize3d", CONV_MATH_FUNC, API_FFT)), ("cufftGetSizeMany", ("hipfftGetSizeMany", CONV_MATH_FUNC, API_FFT)), ("cufftGetSize", ("hipfftGetSize", CONV_MATH_FUNC, API_FFT)), ("cufftSetWorkArea", ("hipfftSetWorkArea", CONV_MATH_FUNC, API_FFT)), ( "cufftSetAutoAllocation", ("hipfftSetAutoAllocation", CONV_MATH_FUNC, API_FFT), ), ("cufftExecC2C", ("hipfftExecC2C", CONV_MATH_FUNC, API_FFT)), ("cufftExecR2C", ("hipfftExecR2C", CONV_MATH_FUNC, API_FFT)), ("cufftExecC2R", ("hipfftExecC2R", CONV_MATH_FUNC, API_FFT)), ("cufftExecZ2Z", ("hipfftExecZ2Z", CONV_MATH_FUNC, API_FFT)), ("cufftExecD2Z", ("hipfftExecD2Z", CONV_MATH_FUNC, API_FFT)), ("cufftExecZ2D", ("hipfftExecZ2D", CONV_MATH_FUNC, API_FFT)), ("cufftSetStream", ("hipfftSetStream", CONV_MATH_FUNC, API_FFT)), ("cufftDestroy", ("hipfftDestroy", CONV_MATH_FUNC, API_FFT)), ("cufftGetVersion", ("hipfftGetVersion", CONV_MATH_FUNC, API_FFT)), ( "cufftGetProperty", ("hipfftGetProperty", CONV_MATH_FUNC, API_FFT, HIP_UNSUPPORTED), ), ("nvrtcResult", ("hiprtcResult", CONV_TYPE, API_RTC)), ("NVRTC_SUCCESS", ("HIPRTC_SUCCESS", CONV_TYPE, API_RTC)), ( "NVRTC_ERROR_OUT_OF_MEMORY", ("HIPRTC_ERROR_OUT_OF_MEMORY", CONV_TYPE, API_RTC), ), ( "NVRTC_ERROR_PROGRAM_CREATION_FAILURE", ("HIPRTC_ERROR_PROGRAM_CREATION_FAILURE", CONV_TYPE, API_RTC), ), ( "NVRTC_ERROR_INVALID_INPUT", ("HIPRTC_ERROR_INVALID_INPUT", CONV_TYPE, API_RTC), ), ( "NVRTC_ERROR_INVALID_PROGRAM", ("HIPRTC_ERROR_INVALID_PROGRAM", CONV_TYPE, API_RTC), ), ("NVRTC_ERROR_COMPILATION", ("HIPRTC_ERROR_COMPILATION", CONV_TYPE, API_RTC)), ( "NVRTC_ERROR_BUILTIN_OPERATION_FAILURE", ("HIPRTC_ERROR_BUILTIN_OPERATION_FAILURE", CONV_TYPE, API_RTC), ), ( "NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION", ("HIPRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION", CONV_TYPE, API_RTC), ), ( "NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID", ("HIPRTC_ERROR_NAME_EXPRESSION_NOT_VALID", CONV_TYPE, API_RTC), ), ( "NVRTC_ERROR_INTERNAL_ERROR", ("HIPRTC_ERROR_INTERNAL_ERROR", CONV_TYPE, API_RTC), ), ("nvrtcGetErrorString", ("hiprtcGetErrorString", CONV_JIT, API_RTC)), ("nvrtcVersion", ("hiprtcVersion", CONV_JIT, API_RTC)), ("nvrtcProgram", ("hiprtcProgram", CONV_TYPE, API_RTC)), ("nvrtcAddNameExpression", ("hiprtcAddNameExpression", CONV_JIT, API_RTC)), ("nvrtcCompileProgram", ("hiprtcCompileProgram", CONV_JIT, API_RTC)), ("nvrtcCreateProgram", ("hiprtcCreateProgram", CONV_JIT, API_RTC)), ("nvrtcDestroyProgram", ("hiprtcDestroyProgram", CONV_JIT, API_RTC)), ("nvrtcGetLoweredName", ("hiprtcGetLoweredName", CONV_JIT, API_RTC)), ("nvrtcGetProgramLog", ("hiprtcGetProgramLog", CONV_JIT, API_RTC)), ("nvrtcGetProgramLogSize", ("hiprtcGetProgramLogSize", CONV_JIT, API_RTC)), ("nvrtcGetPTX", ("hiprtcGetCode", CONV_JIT, API_RTC)), ("nvrtcGetPTXSize", ("hiprtcGetCodeSize", CONV_JIT, API_RTC)), ("thrust::cuda", ("thrust::hip", CONV_MATH_FUNC, API_BLAS)), # The caffe2 directory does a string match; pytorch does a word-boundary match. # Patterns such as 'cub::' will not match for pytorch. # We list all current uses of cub symbols for this reason. ("cub::", ("hipcub::", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::ArgMax", ("hipcub::ArgMax", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::ArgMin", ("hipcub::ArgMin", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::BLOCK_REDUCE_WARP_REDUCTIONS", ("hipcub::BLOCK_REDUCE_WARP_REDUCTIONS", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::BlockReduce", ("hipcub::BlockReduce", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::BlockScan", ("hipcub::BlockScan", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::CachingDeviceAllocator", ("hipcub::CachingDeviceAllocator", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::CountingInputIterator", ("hipcub::CountingInputIterator", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::DeviceRadixSort", ("hipcub::DeviceRadixSort", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::DeviceReduce", ("hipcub::DeviceReduce", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::DeviceRunLengthEncode", ("hipcub::DeviceRunLengthEncode", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::DeviceScan", ("hipcub::DeviceScan", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::DeviceSegmentedRadixSort", ("hipcub::DeviceSegmentedRadixSort", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::DeviceSegmentedReduce", ("hipcub::DeviceSegmentedReduce", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::DeviceSelect", ("hipcub::DeviceSelect", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::KeyValuePair", ("hipcub::KeyValuePair", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::Max", ("hipcub::Max", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::Min", ("hipcub::Min", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::Sum", ("hipcub::Sum", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::ArgIndexInputIterator", ("hipcub::ArgIndexInputIterator", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::TransformInputIterator", ("hipcub::TransformInputIterator", CONV_SPECIAL_FUNC, API_RUNTIME)), ("cub::WarpReduce", ("hipcub::WarpReduce", CONV_SPECIAL_FUNC, API_RUNTIME)), ("nvtxMark", ("roctxMark", CONV_OTHER, API_ROCTX)), ("nvtxMarkA", ("roctxMarkA", CONV_OTHER, API_ROCTX)), ("nvtxRangePushA", ("roctxRangePushA", CONV_OTHER, API_ROCTX)), ("nvtxRangePop", ("roctxRangePop", CONV_OTHER, API_ROCTX)), ("nvtxRangeStartA", ("roctxRangeStartA", CONV_OTHER, API_ROCTX)), ("nvtxRangeEnd", ("roctxRangeStop", CONV_OTHER, API_ROCTX)), ] ) CUDA_SPARSE_MAP = collections.OrderedDict( [ ("cusparseStatus_t", ("hipsparseStatus_t", CONV_MATH_FUNC, API_SPARSE)), ("cusparseHandle_t", ("hipsparseHandle_t", CONV_MATH_FUNC, API_SPARSE)), ("cuComplex", ("hipComplex", CONV_TYPE, API_SPARSE)), ("cuDoubleComplex", ("hipDoubleComplex", CONV_TYPE, API_SPARSE)), ( "CUSPARSE_POINTER_MODE_HOST", ("HIPSPARSE_POINTER_MODE_HOST", CONV_NUMERIC_LITERAL, API_SPARSE), ), ("cusparseOperation_t", ("hipsparseOperation_t", CONV_TYPE, API_SPARSE)), ( "cusparseCreateMatDescr", ("hipsparseCreateMatDescr", CONV_MATH_FUNC, API_SPARSE), ), ("cusparseCreate", ("hipsparseCreate", CONV_MATH_FUNC, API_SPARSE)), ( "cusparseDestroyMatDescr", ("hipsparseDestroyMatDescr", CONV_MATH_FUNC, API_SPARSE), ), ("cusparseDestroy", ("hipsparseDestroy", CONV_MATH_FUNC, API_SPARSE)), ("cusparseXcoo2csr", ("hipsparseXcoo2csr", CONV_MATH_FUNC, API_SPARSE)), ("cusparseMatDescr_t", ("hipsparseMatDescr_t", CONV_MATH_FUNC, API_SPARSE)), ("cusparseDiagType_t", ("hipsparseDiagType_t", CONV_TYPE, API_SPARSE)), ("CUSPARSE_DIAG_TYPE_UNIT", ("HIPSPARSE_DIAG_TYPE_UNIT", CONV_NUMERIC_LITERAL, API_SPARSE)), ("CUSPARSE_DIAG_TYPE_NON_UNIT", ("HIPSPARSE_DIAG_TYPE_NON_UNIT", CONV_NUMERIC_LITERAL, API_SPARSE)), ("cusparseSetMatDiagType", ("hipsparseSetMatDiagType", CONV_MATH_FUNC, API_SPARSE)), ("cusparseFillMode_t", ("hipsparseFillMode_t", CONV_TYPE, API_SPARSE)), ("CUSPARSE_FILL_MODE_UPPER", ("HIPSPARSE_FILL_MODE_UPPER", CONV_NUMERIC_LITERAL, API_SPARSE)), ("CUSPARSE_FILL_MODE_LOWER", ("HIPSPARSE_FILL_MODE_LOWER", CONV_NUMERIC_LITERAL, API_SPARSE)), ("cusparseSetMatFillMode", ("hipsparseSetMatFillMode", CONV_MATH_FUNC, API_SPARSE)), ("cusparseDirection_t", ("hipsparseDirection_t", CONV_TYPE, API_SPARSE)), ("CUSPARSE_DIRECTION_ROW", ("HIPSPARSE_DIRECTION_ROW", CONV_NUMERIC_LITERAL, API_SPARSE)), ("CUSPARSE_DIRECTION_COLUMN", ("HIPSPARSE_DIRECTION_COLUMN", CONV_NUMERIC_LITERAL, API_SPARSE)), ("cusparseSolvePolicy_t", ("hipsparseSolvePolicy_t", CONV_TYPE, API_SPARSE)), ("CUSPARSE_SOLVE_POLICY_NO_LEVEL", ("HIPSPARSE_SOLVE_POLICY_NO_LEVEL", CONV_NUMERIC_LITERAL, API_SPARSE)), ("CUSPARSE_SOLVE_POLICY_USE_LEVEL", ("HIPSPARSE_SOLVE_POLICY_USE_LEVEL", CONV_NUMERIC_LITERAL, API_SPARSE)), ("cusparseCreateBsrsv2Info", ("hipsparseCreateBsrsv2Info", CONV_MATH_FUNC, API_SPARSE)), ("cusparseCreateBsrsm2Info", ("hipsparseCreateBsrsm2Info", CONV_MATH_FUNC, API_SPARSE)), ("cusparseDestroyBsrsv2Info", ("hipsparseDestroyBsrsv2Info", CONV_MATH_FUNC, API_SPARSE)), ("cusparseDestroyBsrsm2Info", ("hipsparseDestroyBsrsm2Info", CONV_MATH_FUNC, API_SPARSE)), ("cusparseSbsrmm", ("hipsparseSbsrmm", CONV_MATH_FUNC, API_SPARSE)), ("cusparseDbsrmm", ("hipsparseDbsrmm", CONV_MATH_FUNC, API_SPARSE)), ("cusparseCbsrmm", ("hipsparseCbsrmm", CONV_MATH_FUNC, API_SPARSE)), ("cusparseZbsrmm", ("hipsparseZbsrmm", CONV_MATH_FUNC, API_SPARSE)), ("cusparseSbsrmv", ("hipsparseSbsrmv", CONV_MATH_FUNC, API_SPARSE)), ("cusparseDbsrmv", ("hipsparseDbsrmv", CONV_MATH_FUNC, API_SPARSE)), ("cusparseCbsrmv", ("hipsparseCbsrmv", CONV_MATH_FUNC, API_SPARSE)), ("cusparseZbsrmv", ("hipsparseZbsrmv", CONV_MATH_FUNC, API_SPARSE)), ("cusparseSbsrsv2_bufferSize", ("hipsparseSbsrsv2_bufferSize", CONV_MATH_FUNC, API_SPARSE)), ("cusparseDbsrsv2_bufferSize", ("hipsparseDbsrsv2_bufferSize", CONV_MATH_FUNC, API_SPARSE)), ("cusparseCbsrsv2_bufferSize", ("hipsparseCbsrsv2_bufferSize", CONV_MATH_FUNC, API_SPARSE)), ("cusparseZbsrsv2_bufferSize", ("hipsparseZbsrsv2_bufferSize", CONV_MATH_FUNC, API_SPARSE)), ("cusparseSbsrsv2_analysis", ("hipsparseSbsrsv2_analysis", CONV_MATH_FUNC, API_SPARSE)), ("cusparseDbsrsv2_analysis", ("hipsparseDbsrsv2_analysis", CONV_MATH_FUNC, API_SPARSE)), ("cusparseCbsrsv2_analysis", ("hipsparseCbsrsv2_analysis", CONV_MATH_FUNC, API_SPARSE)), ("cusparseZbsrsv2_analysis", ("hipsparseZbsrsv2_analysis", CONV_MATH_FUNC, API_SPARSE)), ("cusparseSbsrsv2_solve", ("hipsparseSbsrsv2_solve", CONV_MATH_FUNC, API_SPARSE)), ("cusparseDbsrsv2_solve", ("hipsparseDbsrsv2_solve", CONV_MATH_FUNC, API_SPARSE)), ("cusparseCbsrsv2_solve", ("hipsparseCbsrsv2_solve", CONV_MATH_FUNC, API_SPARSE)), ("cusparseZbsrsv2_solve", ("hipsparseZbsrsv2_solve", CONV_MATH_FUNC, API_SPARSE)), ("cusparseSbsrsm2_bufferSize", ("hipsparseSbsrsm2_bufferSize", CONV_MATH_FUNC, API_SPARSE)), ("cusparseDbsrsm2_bufferSize", ("hipsparseDbsrsm2_bufferSize", CONV_MATH_FUNC, API_SPARSE)), ("cusparseCbsrsm2_bufferSize", ("hipsparseCbsrsm2_bufferSize", CONV_MATH_FUNC, API_SPARSE)), ("cusparseZbsrsm2_bufferSize", ("hipsparseZbsrsm2_bufferSize", CONV_MATH_FUNC, API_SPARSE)), ("cusparseSbsrsm2_analysis", ("hipsparseSbsrsm2_analysis", CONV_MATH_FUNC, API_SPARSE)), ("cusparseDbsrsm2_analysis", ("hipsparseDbsrsm2_analysis", CONV_MATH_FUNC, API_SPARSE)), ("cusparseCbsrsm2_analysis", ("hipsparseCbsrsm2_analysis", CONV_MATH_FUNC, API_SPARSE)), ("cusparseZbsrsm2_analysis", ("hipsparseZbsrsm2_analysis", CONV_MATH_FUNC, API_SPARSE)), ("cusparseSbsrsm2_solve", ("hipsparseSbsrsm2_solve", CONV_MATH_FUNC, API_SPARSE)), ("cusparseDbsrsm2_solve", ("hipsparseDbsrsm2_solve", CONV_MATH_FUNC, API_SPARSE)), ("cusparseCbsrsm2_solve", ("hipsparseCbsrsm2_solve", CONV_MATH_FUNC, API_SPARSE)), ("cusparseZbsrsm2_solve", ("hipsparseZbsrsm2_solve", CONV_MATH_FUNC, API_SPARSE)), ("cusparseScsrmm2", ("hipsparseScsrmm2", CONV_MATH_FUNC, API_SPARSE)), ("cusparseDcsrmm2", ("hipsparseDcsrmm2", CONV_MATH_FUNC, API_SPARSE)), ("cusparseCcsrmm2", ("hipsparseCcsrmm2", CONV_MATH_FUNC, API_SPARSE)), ("cusparseZcsrmm2", ("hipsparseZcsrmm2", CONV_MATH_FUNC, API_SPARSE)), ("cusparseScsrmm", ("hipsparseScsrmm", CONV_MATH_FUNC, API_SPARSE)), ("cusparseDcsrmm", ("hipsparseDcsrmm", CONV_MATH_FUNC, API_SPARSE)), ( "cusparseXcsrsort_bufferSizeExt", ("hipsparseXcsrsort_bufferSizeExt", CONV_MATH_FUNC, API_SPARSE), ), ("cusparseCreateCsrgemm2Info", ("hipsparseCreateCsrgemm2Info", CONV_MATH_FUNC, API_SPARSE)), ( "cusparseDestroyCsrgemm2Info", ("hipsparseDestroyCsrgemm2Info", CONV_MATH_FUNC, API_SPARSE), ), ("cusparseXcsrgemm2Nnz", ("hipsparseXcsrgemm2Nnz", CONV_MATH_FUNC, API_SPARSE)), ("cusparseDcsrgemm2_bufferSizeExt", ("hipsparseDcsrgemm2_bufferSizeExt", CONV_MATH_FUNC, API_SPARSE)), ("cusparseScsrgemm2_bufferSizeExt", ("hipsparseScsrgemm2_bufferSizeExt", CONV_MATH_FUNC, API_SPARSE)), ("cusparseDcsrgemm2", ("hipsparseDcsrgemm2", CONV_MATH_FUNC, API_SPARSE)), ("cusparseScsrgemm2", ("hipsparseScsrgemm2", CONV_MATH_FUNC, API_SPARSE)), ("cusparseSetPointerMode", ("hipsparseSetPointerMode", CONV_MATH_FUNC, API_SPARSE)), ("cusparseXcsrgeam2Nnz", ("hipsparseXcsrgeam2Nnz", CONV_MATH_FUNC, API_SPARSE)), ("cusparseScsrgeam2_bufferSizeExt", ("hipsparseScsrgeam2_bufferSizeExt", CONV_MATH_FUNC, API_SPARSE)), ("cusparseDcsrgeam2_bufferSizeExt", ("hipsparseDcsrgeam2_bufferSizeExt", CONV_MATH_FUNC, API_SPARSE)), ("cusparseCcsrgeam2_bufferSizeExt", ("hipsparseCcsrgeam2_bufferSizeExt", CONV_MATH_FUNC, API_SPARSE)), ("cusparseZcsrgeam2_bufferSizeExt", ("hipsparseZcsrgeam2_bufferSizeExt", CONV_MATH_FUNC, API_SPARSE)), ("cusparseScsrgeam2", ("hipsparseScsrgeam2", CONV_MATH_FUNC, API_SPARSE)), ("cusparseDcsrgeam2", ("hipsparseDcsrgeam2", CONV_MATH_FUNC, API_SPARSE)), ("cusparseCcsrgeam2", ("hipsparseCcsrgeam2", CONV_MATH_FUNC, API_SPARSE)), ("cusparseZcsrgeam2", ("hipsparseZcsrgeam2", CONV_MATH_FUNC, API_SPARSE)), ("cusparseXcsrsort", ("hipsparseXcsrsort", CONV_MATH_FUNC, API_SPARSE)), ("cusparseXbsrsm2_zeroPivot", ("hipsparseXbsrsm2_zeroPivot", CONV_MATH_FUNC, API_SPARSE)), ("cusparseXbsrsv2_zeroPivot", ("hipsparseXbsrsv2_zeroPivot", CONV_MATH_FUNC, API_SPARSE)), ( "cusparseXcoosort_bufferSizeExt", ("hipsparseXcoosort_bufferSizeExt", CONV_MATH_FUNC, API_SPARSE), ), ( "cusparseXcoosortByRow", ("hipsparseXcoosortByRow", CONV_MATH_FUNC, API_SPARSE), ), ("cusparseSetStream", ("hipsparseSetStream", CONV_MATH_FUNC, API_SPARSE)), ( "cusparseCreateIdentityPermutation", ("hipsparseCreateIdentityPermutation", CONV_MATH_FUNC, API_SPARSE), ), ( "cusparseSetMatIndexBase", ("hipsparseSetMatIndexBase", CONV_MATH_FUNC, API_SPARSE), ), ("cusparseSetMatType", ("hipsparseSetMatType", CONV_MATH_FUNC, API_SPARSE)), ( "CUSPARSE_STATUS_SUCCESS", ("HIPSPARSE_STATUS_SUCCESS", CONV_NUMERIC_LITERAL, API_SPARSE), ), ( "CUSPARSE_STATUS_NOT_INITIALIZED", ("HIPSPARSE_STATUS_NOT_INITIALIZED", CONV_NUMERIC_LITERAL, API_SPARSE), ), ( "CUSPARSE_STATUS_ALLOC_FAILED", ("HIPSPARSE_STATUS_ALLOC_FAILED", CONV_NUMERIC_LITERAL, API_SPARSE), ), ( "CUSPARSE_STATUS_INVALID_VALUE", ("HIPSPARSE_STATUS_INVALID_VALUE", CONV_NUMERIC_LITERAL, API_SPARSE), ), ( "CUSPARSE_STATUS_MAPPING_ERROR", ("HIPSPARSE_STATUS_MAPPING_ERROR", CONV_NUMERIC_LITERAL, API_SPARSE), ), ( "CUSPARSE_STATUS_EXECUTION_FAILED", ("HIPSPARSE_STATUS_EXECUTION_FAILED", CONV_NUMERIC_LITERAL, API_SPARSE), ), ( "CUSPARSE_STATUS_INTERNAL_ERROR", ("HIPSPARSE_STATUS_INTERNAL_ERROR", CONV_NUMERIC_LITERAL, API_SPARSE), ), ( "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED", ( "HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED", CONV_NUMERIC_LITERAL, API_SPARSE, ), ), ( "CUSPARSE_STATUS_ARCH_MISMATCH", ("HIPSPARSE_STATUS_ARCH_MISMATCH", CONV_NUMERIC_LITERAL, API_SPARSE), ), ( "CUSPARSE_STATUS_ZERO_PIVOT", ("HIPSPARSE_STATUS_ZERO_PIVOT", CONV_NUMERIC_LITERAL, API_SPARSE), ), ( "CUSPARSE_OPERATION_TRANSPOSE", ("HIPSPARSE_OPERATION_TRANSPOSE", CONV_NUMERIC_LITERAL, API_SPARSE), ), ( "CUSPARSE_OPERATION_NON_TRANSPOSE", ("HIPSPARSE_OPERATION_NON_TRANSPOSE", CONV_NUMERIC_LITERAL, API_SPARSE), ), ( "CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE", ( "HIPSPARSE_OPERATION_CONJUGATE_TRANSPOSE", CONV_NUMERIC_LITERAL, API_SPARSE, ), ), ( "CUSPARSE_INDEX_BASE_ZERO", ("HIPSPARSE_INDEX_BASE_ZERO", CONV_NUMERIC_LITERAL, API_SPARSE), ), ( "CUSPARSE_INDEX_BASE_ONE", ("HIPSPARSE_INDEX_BASE_ONE", CONV_NUMERIC_LITERAL, API_SPARSE), ), ( "CUSPARSE_MATRIX_TYPE_GENERAL", ("HIPSPARSE_MATRIX_TYPE_GENERAL", CONV_NUMERIC_LITERAL, API_SPARSE), ), ] ) PYTORCH_SPECIFIC_MAPPINGS = collections.OrderedDict( [ ("USE_CUDA", ("USE_ROCM", API_PYTORCH)), ("CUDA_VERSION", ("TORCH_HIP_VERSION", API_PYTORCH)), ("cudaHostAllocator", ("hipHostAllocator", API_PYTORCH)), ("cudaDeviceAllocator", ("hipDeviceAllocator", API_PYTORCH)), ("define MAX_NUM_BLOCKS 200", ("define MAX_NUM_BLOCKS 64", API_PYTORCH)), ("cuda::CUDAGuard", ("hip::HIPGuardMasqueradingAsCUDA", API_PYTORCH)), ("CUDAGuard", ("HIPGuardMasqueradingAsCUDA", API_PYTORCH)), ( "cuda::OptionalCUDAGuard", ("hip::OptionalHIPGuardMasqueradingAsCUDA", API_PYTORCH), ), ("OptionalCUDAGuard", ("OptionalHIPGuardMasqueradingAsCUDA", API_PYTORCH)), ( "cuda::CUDAStreamGuard", ("hip::HIPStreamGuardMasqueradingAsCUDA", API_PYTORCH), ), ("CUDAStreamGuard", ("HIPStreamGuardMasqueradingAsCUDA", API_PYTORCH)), ( "cuda::OptionalCUDAStreamGuard", ("hip::OptionalHIPStreamGuardMasqueradingAsCUDA", API_PYTORCH), ), ( "OptionalCUDAStreamGuard", ("OptionalHIPStreamGuardMasqueradingAsCUDA", API_PYTORCH), ), ( "cuda::CUDAMultiStreamGuard", ("hip::HIPMultiStreamGuardMasqueradingAsCUDA", API_PYTORCH), ), ( "CUDAMultiStreamGuard", ("HIPMultiStreamGuardMasqueradingAsCUDA", API_PYTORCH), ), # Only get needs to be transformed this way; all the other ones can go # straight to the normal versions hip::HIPCachingAllocator ( "cuda::CUDACachingAllocator::get", ("hip::HIPCachingAllocatorMasqueradingAsCUDA::get", API_PYTORCH), ), ( "CUDACachingAllocator::get", ("HIPCachingAllocatorMasqueradingAsCUDA::get", API_PYTORCH), ), ( "cuda::CUDACachingAllocator::recordStream", ( "hip::HIPCachingAllocatorMasqueradingAsCUDA::recordStreamMasqueradingAsCUDA", API_PYTORCH, ), ), ( "CUDACachingAllocator::recordStream", ( "HIPCachingAllocatorMasqueradingAsCUDA::recordStreamMasqueradingAsCUDA", API_PYTORCH, ), ), ("cuda::CUDAStream", ("hip::HIPStreamMasqueradingAsCUDA", API_PYTORCH)), ("CUDAStream", ("HIPStreamMasqueradingAsCUDA", API_PYTORCH)), ( "cuda::getStreamFromPool", ("hip::getStreamFromPoolMasqueradingAsCUDA", API_PYTORCH), ), ("getStreamFromPool", ("getStreamFromPoolMasqueradingAsCUDA", API_PYTORCH)), ( "cuda::getDefaultCUDAStream", ("hip::getDefaultHIPStreamMasqueradingAsCUDA", API_PYTORCH), ), ( "cuda::getStreamFromExternal", ("hip::getStreamFromExternalMasqueradingAsCUDA", API_PYTORCH), ), ("getStreamFromExternal", ("getStreamFromExternalMasqueradingAsCUDA", API_PYTORCH)), ( "cuda::getDefaultCUDAStream", ("hip::getDefaultHIPStreamMasqueradingAsCUDA", API_PYTORCH), ), ( "getDefaultCUDAStream", ("getDefaultHIPStreamMasqueradingAsCUDA", API_PYTORCH), ), ( "cuda::getCurrentCUDAStream", ("hip::getCurrentHIPStreamMasqueradingAsCUDA", API_PYTORCH), ), ( "getCurrentCUDAStream", ("getCurrentHIPStreamMasqueradingAsCUDA", API_PYTORCH), ), ( "cuda::setCurrentCUDAStream", ("hip::setCurrentHIPStreamMasqueradingAsCUDA", API_PYTORCH), ), ( "setCurrentCUDAStream", ("setCurrentHIPStreamMasqueradingAsCUDA", API_PYTORCH), ), # TODO: Undo this special-case; see the header for motivation behind this # hack. It's VERY important this is only applied to PyTorch HIPify. ( "c10/cuda/CUDAGuard.h", ("ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h", API_PYTORCH), ), ( "c10/cuda/CUDACachingAllocator.h", ("ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h", API_PYTORCH), ), ( "c10/cuda/CUDAStream.h", ("ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h", API_PYTORCH), ), ("gloo/cuda.h", ("gloo/hip.h", API_PYTORCH)), ( "gloo/cuda_allreduce_halving_doubling.h", ("gloo/hip_allreduce_halving_doubling.h", API_PYTORCH), ), ( "gloo/cuda_allreduce_halving_doubling_pipelined.h", ("gloo/hip_allreduce_halving_doubling_pipelined.h", API_PYTORCH), ), ("gloo/cuda_allreduce_ring.h", ("gloo/hip_allreduce_ring.h", API_PYTORCH)), ( "gloo/cuda_broadcast_one_to_all.h", ("gloo/hip_broadcast_one_to_all.h", API_PYTORCH), ), ( "gloo::CudaAllreduceHalvingDoublingPipelined", ("gloo::HipAllreduceHalvingDoublingPipelined", API_PYTORCH), ), ("gloo::CudaBroadcastOneToAll", ("gloo::HipBroadcastOneToAll", API_PYTORCH)), ("gloo::CudaHostWorkspace", ("gloo::HipHostWorkspace", API_PYTORCH)), ("gloo::CudaDeviceWorkspace", ("gloo::HipDeviceWorkspace", API_PYTORCH)), ("CUDNN_RNN_RELU", ("miopenRNNRELU", API_PYTORCH)), ("CUDNN_RNN_TANH", ("miopenRNNTANH", API_PYTORCH)), ("CUDNN_LSTM", ("miopenLSTM", API_PYTORCH)), ("CUDNN_GRU", ("miopenGRU", API_PYTORCH)), ("cudnnRNNMode_t", ("miopenRNNMode_t", API_PYTORCH)), ("magma_queue_create_from_cuda", ("magma_queue_create_from_hip", API_PYTORCH)), ] ) CAFFE2_SPECIFIC_MAPPINGS = collections.OrderedDict( [ ("cuda_stream", ("hip_stream", API_CAFFE2)), # if the header is a native hip folder (under hip directory), # there is no need to add a hip path to it; the trie in hipify script # takes this mapping order to forbid further replacement ("/hip/", ("/hip/", API_CAFFE2)), ("/context_gpu", ("/hip/context_gpu", API_CAFFE2)), ("/common_gpu", ("/hip/common_gpu", API_CAFFE2)), ("/cuda_nccl_gpu", ("/hip/hip_nccl_gpu", API_CAFFE2)), ("/mixed_utils", ("/hip/mixed_utils", API_CAFFE2)), ("/operator_fallback_gpu", ("/hip/operator_fallback_gpu", API_CAFFE2)), ( "/spatial_batch_norm_op_impl", ("/hip/spatial_batch_norm_op_impl", API_CAFFE2), ), ( "/recurrent_network_executor_gpu", ("/hip/recurrent_network_executor_gpu", API_CAFFE2), ), ( "/generate_proposals_op_util_nms_gpu", ("/hip/generate_proposals_op_util_nms_gpu", API_CAFFE2), ), ("/max_pool_with_index_gpu", ("/hip/max_pool_with_index_gpu", API_CAFFE2)), ("/THCCachingAllocator_gpu", ("/hip/THCCachingAllocator_gpu", API_CAFFE2)), ("/top_k_heap_selection", ("/hip/top_k_heap_selection", API_CAFFE2)), ("/top_k_radix_selection", ("/hip/top_k_radix_selection", API_CAFFE2)), ("/GpuAtomics", ("/hip/GpuAtomics", API_CAFFE2)), ("/GpuDefs", ("/hip/GpuDefs", API_CAFFE2)), ("/GpuScanUtils", ("/hip/GpuScanUtils", API_CAFFE2)), ("/GpuBitonicSort", ("/hip/GpuBitonicSort", API_CAFFE2)), ("/math/reduce.cuh", ("/math/hip/reduce.cuh", API_CAFFE2)), ("/sgd/adagrad_fused_op_gpu.cuh", ("/sgd/hip/adagrad_fused_op_gpu.cuh", API_CAFFE2)), ("/operators/segment_reduction_op_gpu.cuh", ("/operators/hip/segment_reduction_op_gpu.cuh", API_CAFFE2)), ("/gather_op.cuh", ("/hip/gather_op.cuh", API_CAFFE2)), ("caffe2/core/common_cudnn.h", ("caffe2/core/hip/common_miopen.h", API_CAFFE2)), ("REGISTER_CUDA_OPERATOR", ("REGISTER_HIP_OPERATOR", API_CAFFE2)), ("CUDA_1D_KERNEL_LOOP", ("HIP_1D_KERNEL_LOOP", API_CAFFE2)), ("CUDAContext", ("HIPContext", API_CAFFE2)), ("CAFFE_CUDA_NUM_THREADS", ("CAFFE_HIP_NUM_THREADS", API_CAFFE2)), ("HasCudaGPU", ("HasHipGPU", API_CAFFE2)), ("__expf", ("expf", API_CAFFE2)), ("CUBLAS_ENFORCE", ("ROCBLAS_ENFORCE", API_CAFFE2)), ("CUBLAS_CHECK", ("ROCBLAS_CHECK", API_CAFFE2)), ("cublas_handle", ("rocblashandle", API_CAFFE2)), ("CURAND_ENFORCE", ("HIPRAND_ENFORCE", API_CAFFE2)), ("CURAND_CHECK", ("HIPRAND_CHECK", API_CAFFE2)), ("curandGenerateUniform", ("hiprandGenerateUniform", API_CAFFE2)), ("curand_generator", ("hiprand_generator", API_CAFFE2)), ("CaffeCudaGetDevice", ("CaffeHipGetDevice", API_CAFFE2)), # do not rename CUDA_KERNEL_ASSERT, lazyInitCUDA in caffe2 sources # the ordered dict guarantees this pattern will match first, before "CUDA" ("CUDA_KERNEL_ASSERT", ("CUDA_KERNEL_ASSERT", API_CAFFE2)), ("lazyInitCUDA", ("lazyInitCUDA", API_CAFFE2)), ("CUDA_VERSION", ("TORCH_HIP_VERSION", API_CAFFE2)), ("CUDA", ("HIP", API_CAFFE2)), ("Cuda", ("Hip", API_CAFFE2)), ("cuda_", ("hip_", API_CAFFE2)), ("_cuda", ("_hip", API_CAFFE2)), ("CUDNN", ("MIOPEN", API_CAFFE2)), ("CuDNN", ("MIOPEN", API_CAFFE2)), ("cudnn", ("miopen", API_CAFFE2)), ("namespace cuda", ("namespace hip", API_CAFFE2)), ("cuda::CUDAGuard", ("hip::HIPGuard", API_CAFFE2)), ("cuda::OptionalCUDAGuard", ("hip::OptionalHIPGuard", API_CAFFE2)), ("cuda::CUDAStreamGuard", ("hip::HIPStreamGuard", API_CAFFE2)), ("cuda::OptionalCUDAStreamGuard", ("hip::OptionalHIPStreamGuard", API_CAFFE2)), ("c10/cuda/CUDAGuard.h", ("c10/hip/HIPGuard.h", API_CAFFE2)), ("gloo/cuda", ("gloo/hip", API_CAFFE2)), ] ) # We must tread very carefully here. Blanket conversions like are done # in CAFFE2_SPECIFIC_MAPPINGS are not presently supported on PyTorch, # because a regex for CUDA will also match a filename like CUDAGuard.h, # but the HIPIFY script doesn't presently move the file and so the substitution # will be invalid. Instead, we specifically list out every identifier # and file from c10/cuda which may be used externally, and do substitutions this # way. # # NB: if you want a transformation to ONLY apply to the c10/ directory, # put it as API_CAFFE2 C10_MAPPINGS = collections.OrderedDict( [ ("cuda::compat::", ("hip::compat::", API_C10)), ("c10/cuda/CUDAAlgorithm.h", ("c10/hip/HIPAlgorithm.h", API_C10)), ("c10/cuda/CUDAException.h", ("c10/hip/HIPException.h", API_C10)), ("c10/cuda/CUDAMacros.h", ("c10/hip/HIPMacros.h", API_C10)), ("c10/cuda/CUDAMathCompat.h", ("c10/hip/HIPMathCompat.h", API_C10)), ("c10/cuda/CUDAFunctions.h", ("c10/hip/HIPFunctions.h", API_C10)), ("c10/cuda/CUDAMiscFunctions.h", ("c10/hip/HIPMiscFunctions.h", API_C10)), ("c10/cuda/CUDAStream.h", ("c10/hip/HIPStream.h", API_C10)), ("c10/cuda/CUDAGraphsC10Utils.h", ("c10/hip/HIPGraphsC10Utils.h", API_C10)), ("c10/cuda/CUDACachingAllocator.h", ("c10/hip/HIPCachingAllocator.h", API_C10)), ("c10/cuda/impl/CUDATest.h", ("c10/hip/impl/HIPTest.h", API_C10)), ("c10/cuda/impl/CUDAGuardImpl.h", ("c10/hip/impl/HIPGuardImpl.h", API_C10)), ( "c10/cuda/impl/cuda_cmake_macros.h", ("c10/hip/impl/hip_cmake_macros.h", API_C10), ), ("C10_CUDA_CHECK", ("C10_HIP_CHECK", API_C10)), ("C10_CUDA_CHECK_WARN", ("C10_HIP_CHECK_WARN", API_C10)), ("c10::cuda", ("c10::hip", API_C10)), ("cuda::CUDAStream", ("hip::HIPStream", API_C10)), ("CUDAStream", ("HIPStream", API_C10)), # This substitution is not permissible, because there's another copy of this # function in torch/cuda.h # ("cuda::device_count", ("hip::device_count", API_C10)), ("cuda::current_device", ("hip::current_device", API_C10)), ("cuda::set_device", ("hip::set_device", API_C10)), ("cuda::device_synchronize", ("hip::device_synchronize", API_C10)), ("cuda::getStreamFromPool", ("hip::getStreamFromPool", API_C10)), ("getStreamFromPool", ("getStreamFromPool", API_C10)), ("cuda::getDefaultCUDAStream", ("hip::getDefaultHIPStream", API_C10)), ("getDefaultCUDAStream", ("getDefaultHIPStream", API_C10)), ("cuda::getCurrentCUDAStream", ("hip::getCurrentHIPStream", API_C10)), ("getCurrentCUDAStream", ("getCurrentHIPStream", API_C10)), ("cuda::get_cuda_check_prefix", ("hip::get_cuda_check_prefix", API_C10)), ("cuda::setCurrentCUDAStream", ("hip::setCurrentHIPStream", API_C10)), ("setCurrentCUDAStream", ("setCurrentHIPStream", API_C10)), ("cuda::CUDACachingAllocator", ("hip::HIPCachingAllocator", API_C10)), ("CUDACachingAllocator", ("HIPCachingAllocator", API_C10)), ("C10_CUDA_KERNEL_LAUNCH_CHECK", ("C10_HIP_KERNEL_LAUNCH_CHECK", API_C10)) ] ) # NB: C10 mappings are more specific than Caffe2 mappings, so run them # first CUDA_TO_HIP_MAPPINGS = [ CUDA_IDENTIFIER_MAP, CUDA_TYPE_NAME_MAP, CUDA_INCLUDE_MAP, CUDA_SPARSE_MAP, C10_MAPPINGS, PYTORCH_SPECIFIC_MAPPINGS, CAFFE2_SPECIFIC_MAPPINGS, ]
pytorch-master
torch/utils/hipify/cuda_to_hip_mappings.py
""" Constants for annotations in the mapping. The constants defined here are used to annotate the mapping tuples in cuda_to_hip_mappings.py. They are based on https://github.com/ROCm-Developer-Tools/HIP/blob/master/hipify-clang/src/Statistics.h and fall in three categories: 1) type of mapping, 2) API of mapping, 3) unsupported mapping. """ CONV_VERSION = 0, CONV_INIT = 1 CONV_DEVICE = 2 CONV_MEM = 3 CONV_KERN = 4 CONV_COORD_FUNC = 5 CONV_MATH_FUNC = 6 CONV_DEVICE_FUNC = 7 CONV_SPECIAL_FUNC = 8 CONV_STREAM = 9 CONV_EVENT = 10 CONV_OCCUPANCY = 11 CONV_CONTEXT = 12 CONV_PEER = 13 CONV_MODULE = 14 CONV_CACHE = 15 CONV_EXEC = 16 CONV_ERROR = 17 CONV_DEF = 18 CONV_TEX = 19 CONV_GL = 20 CONV_GRAPHICS = 21 CONV_SURFACE = 22 CONV_JIT = 23 CONV_D3D9 = 24 CONV_D3D10 = 25 CONV_D3D11 = 26 CONV_VDPAU = 27 CONV_EGL = 28 CONV_THREAD = 29 CONV_OTHER = 30 CONV_INCLUDE = 31 CONV_INCLUDE_CUDA_MAIN_H = 32 CONV_TYPE = 33 CONV_LITERAL = 34 CONV_NUMERIC_LITERAL = 35 CONV_LAST = 36 API_DRIVER = 37 API_RUNTIME = 38 API_BLAS = 39 API_SPARSE = 40 API_RAND = 41 API_LAST = 42 API_FFT = 43 API_RTC = 44 API_ROCTX = 45 HIP_UNSUPPORTED = 46 API_PYTORCH = 1337 API_CAFFE2 = 1338 API_C10 = 1339
pytorch-master
torch/utils/hipify/constants.py
from .version import __version__
pytorch-master
torch/utils/hipify/__init__.py
#!/usr/bin/env python3 """ The Python Hipify script. ## # Copyright (c) 2015-2016 Advanced Micro Devices, Inc. All rights reserved. # 2017-2018 Advanced Micro Devices, Inc. and # Facebook Inc. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ import argparse import fnmatch import re import shutil import sys import os from . import constants from .cuda_to_hip_mappings import CUDA_TO_HIP_MAPPINGS from .cuda_to_hip_mappings import MATH_TRANSPILATIONS from typing import Dict, List, Iterator, Optional from collections.abc import Mapping, Iterable HipifyResult = Dict[str, Optional[str]] HipifyFinalResult = Dict[str, HipifyResult] HIPIFY_C_BREADCRUMB = "// !!! This is a file automatically generated by hipify!!!\n" HIPIFY_FINAL_RESULT: HipifyFinalResult = {} # Hardcode the PyTorch template map """This dictionary provides the mapping from PyTorch kernel template types to their actual types.""" PYTORCH_TEMPLATE_MAP = {"Dtype": "scalar_t", "T": "scalar_t"} __all__ = ['InputError', 'openf', 'bcolors', 'GeneratedFileCleaner', 'match_extensions', 'matched_files_iter', 'preprocess_file_and_save_result', 'compute_stats', 'add_dim3', 'processKernelLaunches', 'find_closure_group', 'find_bracket_group', 'find_parentheses_group', 'replace_math_functions', 'hip_header_magic', 'replace_extern_shared', 'get_hip_file_path', 'is_out_of_place', 'is_pytorch_file', 'is_cusparse_file', 'is_caffe2_gpu_file', 'is_caffe2_gpu_file', 'Trie', 'preprocessor', 'file_specific_replacement', 'file_add_header', 'fix_static_global_kernels', 'extract_arguments', 'str2bool', 'hipify'] class InputError(Exception): # Exception raised for errors in the input. def __init__(self, message): super(InputError, self).__init__(message) self.message = message def __str__(self): return "{}: {}".format("Input error", self.message) def openf(filename, mode): return open(filename, mode, errors='ignore') # Color coding for printing class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' # To the programmer, the output of hipify most likely are intermediates. # This class allows users of hipify to ask for a cleanup by running the # hipify and compilation in a with instantiating this context manager class # with keep_intermediates=False. # The main usecase is the cpp_extensions, specifically the load method. # It is a good idea to keep intermediates (in case of errors or to # not recompile unchanged files), but in cases where you don't want to # keep them (e.g. in the CI), this can be used to remove files. class GeneratedFileCleaner: """Context Manager to clean up generated files""" def __init__(self, keep_intermediates=False): self.keep_intermediates = keep_intermediates self.files_to_clean = set() self.dirs_to_clean = [] def __enter__(self): return self def open(self, fn, *args, **kwargs): if not os.path.exists(fn): self.files_to_clean.add(os.path.abspath(fn)) return open(fn, *args, **kwargs) def makedirs(self, dn, exist_ok=False): parent, n = os.path.split(dn) if not n: parent, n = os.path.split(parent) if parent and n and not os.path.exists(parent): self.makedirs(parent, exist_ok=True) if not os.path.isdir(dn) or not exist_ok: os.mkdir(dn) self.dirs_to_clean.append(os.path.abspath(dn)) def __exit__(self, type, value, traceback): if not self.keep_intermediates: for f in self.files_to_clean: os.unlink(f) for d in self.dirs_to_clean[::-1]: os.rmdir(d) def match_extensions(filename: str, extensions: Iterable) -> bool: """Helper method to see if filename ends with certain extension""" return any(filename.endswith(e) for e in extensions) def _fnmatch(filepath, patterns): return any(fnmatch.fnmatch(filepath, pattern) for pattern in patterns) def matched_files_iter( root_path: str, includes: Iterable = (), ignores: Iterable = (), extensions: Iterable = (), out_of_place_only: bool = False, is_pytorch_extension: bool = False) -> Iterator[str]: exact_matches = set(includes) # This is a very rough heuristic; really, we want to avoid scanning # any file which is not checked into source control, but this script # needs to work even if you're in a Git or Hg checkout, so easier to # just block the biggest time sinks that won't matter in the # end. for (abs_dirpath, dirs, filenames) in os.walk(root_path, topdown=True): rel_dirpath = os.path.relpath(abs_dirpath, root_path) if rel_dirpath == '.': # Blah blah blah O(n) blah blah if ".git" in dirs: dirs.remove(".git") if "build" in dirs: dirs.remove("build") if "third_party" in dirs: dirs.remove("third_party") for filename in filenames: filepath = os.path.join(abs_dirpath, filename) rel_filepath = os.path.join(rel_dirpath, filename) # We respect extensions, UNLESS you wrote the entire # filename verbatim, in which case we always accept it if ( _fnmatch(filepath, includes) and (not _fnmatch(filepath, ignores)) and (match_extensions(filepath, extensions) or filepath in exact_matches) ): if not is_pytorch_extension: # for pytorch extensions, consider all files if not is_pytorch_file(rel_filepath) and not is_caffe2_gpu_file(rel_filepath): continue if out_of_place_only and not is_out_of_place(rel_filepath): continue yield filepath def preprocess_file_and_save_result( output_directory: str, filepath: str, all_files: Iterable, header_include_dirs: Iterable, stats: Dict[str, List], hip_clang_launch: bool, is_pytorch_extension: bool, clean_ctx: GeneratedFileCleaner, show_progress: bool) -> None: result = preprocessor(output_directory, filepath, all_files, header_include_dirs, stats, hip_clang_launch, is_pytorch_extension, clean_ctx, show_progress) fin_path = os.path.abspath(os.path.join(output_directory, filepath)) # Show what happened if show_progress and "ignored" not in str(result["status"]): print( fin_path, "->", result["hipified_path"], result["status"], flush=True) HIPIFY_FINAL_RESULT[fin_path] = result def compute_stats(stats): unsupported_calls = {cuda_call for (cuda_call, _filepath) in stats["unsupported_calls"]} # Print the number of unsupported calls print("Total number of unsupported CUDA function calls: {0:d}".format(len(unsupported_calls))) # Print the list of unsupported calls print(", ".join(unsupported_calls)) # Print the number of kernel launches print("\nTotal number of replaced kernel launches: {0:d}".format(len(stats["kernel_launches"]))) def add_dim3(kernel_string, cuda_kernel): '''adds dim3() to the second and third arguments in the kernel launch''' count = 0 closure = 0 kernel_string = kernel_string.replace("<<<", "").replace(">>>", "") arg_locs: List[Dict[str, int]] = [{} for _ in range(2)] arg_locs[count]['start'] = 0 for ind, c in enumerate(kernel_string): if count > 1: break if c == "(": closure += 1 elif c == ")": closure -= 1 if (c == "," or ind == len(kernel_string) - 1) and closure == 0: arg_locs[count]['end'] = ind + (c != ",") count += 1 if count < 2: arg_locs[count]['start'] = ind + 1 first_arg_raw = kernel_string[arg_locs[0]['start']:arg_locs[0]['end'] + 1] second_arg_raw = kernel_string[arg_locs[1]['start']:arg_locs[1]['end']] first_arg_clean = kernel_string[arg_locs[0]['start']:arg_locs[0]['end']].replace("\n", "").strip(" ") second_arg_clean = kernel_string[arg_locs[1]['start']:arg_locs[1]['end']].replace("\n", "").strip(" ") first_arg_dim3 = "dim3({})".format(first_arg_clean) second_arg_dim3 = "dim3({})".format(second_arg_clean) first_arg_raw_dim3 = first_arg_raw.replace(first_arg_clean, first_arg_dim3) second_arg_raw_dim3 = second_arg_raw.replace(second_arg_clean, second_arg_dim3) cuda_kernel = cuda_kernel.replace(first_arg_raw + second_arg_raw, first_arg_raw_dim3 + second_arg_raw_dim3) return cuda_kernel RE_KERNEL_LAUNCH = re.compile(r'([ ]+)(detail?)::[ ]+\\\n[ ]+') def processKernelLaunches(string, stats): """ Replace the CUDA style Kernel launches with the HIP style kernel launches.""" # Concat the namespace with the kernel names. (Find cleaner way of doing this later). string = RE_KERNEL_LAUNCH.sub(lambda inp: "{0}{1}::".format(inp.group(1), inp.group(2)), string) def grab_method_and_template(in_kernel): # The positions for relevant kernel components. pos = { "kernel_launch": {"start": in_kernel["start"], "end": in_kernel["end"]}, "kernel_name": {"start": -1, "end": -1}, "template": {"start": -1, "end": -1} } # Count for balancing template count = {"<>": 0} # Status for whether we are parsing a certain item. START = 0 AT_TEMPLATE = 1 AFTER_TEMPLATE = 2 AT_KERNEL_NAME = 3 status = START # Parse the string character by character for i in range(pos["kernel_launch"]["start"] - 1, -1, -1): char = string[i] # Handle Templating Arguments if status == START or status == AT_TEMPLATE: if char == ">": if status == START: status = AT_TEMPLATE pos["template"]["end"] = i count["<>"] += 1 if char == "<": count["<>"] -= 1 if count["<>"] == 0 and (status == AT_TEMPLATE): pos["template"]["start"] = i status = AFTER_TEMPLATE # Handle Kernel Name if status != AT_TEMPLATE: if string[i].isalnum() or string[i] in {'(', ')', '_', ':', '#'}: if status != AT_KERNEL_NAME: status = AT_KERNEL_NAME pos["kernel_name"]["end"] = i # Case: Kernel name starts the string. if i == 0: pos["kernel_name"]["start"] = 0 # Finished return [(pos["kernel_name"]), (pos["template"]), (pos["kernel_launch"])] else: # Potential ending point if we're already traversing a kernel's name. if status == AT_KERNEL_NAME: pos["kernel_name"]["start"] = i # Finished return [(pos["kernel_name"]), (pos["template"]), (pos["kernel_launch"])] def find_kernel_bounds(string): """Finds the starting and ending points for all kernel launches in the string.""" kernel_end = 0 kernel_positions = [] # Continue until we cannot find any more kernels anymore. while string.find("<<<", kernel_end) != -1: # Get kernel starting position (starting from the previous ending point) kernel_start = string.find("<<<", kernel_end) # Get kernel ending position (adjust end point past the >>>) kernel_end = string.find(">>>", kernel_start) + 3 if kernel_end <= 0: raise InputError("no kernel end found") # Add to list of traversed kernels kernel_positions.append({"start": kernel_start, "end": kernel_end, "group": string[kernel_start: kernel_end]}) return kernel_positions # Replace comments and string literals from the code so that find_kernel_bounds does not # wrongly capture kernels in comments and string literals. # This function replaces them with "x" to keep positions. def mask_comments(string): in_comment = '' prev_c = '' new_string = '' for c in string: if in_comment == '': # Outside comments if c == '/' and prev_c == '/': in_comment = '//' elif c == '*' and prev_c == '/': in_comment = '/*' elif c == '"' and prev_c != '\\' and prev_c != "'": in_comment = '"' elif in_comment == '//': # In // xxx if c == '\r' or c == '\n': in_comment = '' elif in_comment == '/*': # In /* xxx */ if c == '/' and prev_c == '*': in_comment = '' elif in_comment == '"': # In "" if c == '"' and prev_c != '\\': in_comment = '' prev_c = c if in_comment == '': new_string += c else: new_string += 'x' return new_string # Grab positional ranges of all kernel launches get_kernel_positions = list(find_kernel_bounds(mask_comments(string))) output_string = string # Replace each CUDA kernel with a HIP kernel. for kernel in get_kernel_positions: # Get kernel components params = grab_method_and_template(kernel) # Find parenthesis after kernel launch parenthesis = string.find("(", kernel["end"]) # Extract cuda kernel cuda_kernel = string[params[0]["start"]:parenthesis + 1] kernel_string = string[kernel['start']:kernel['end']] end_param_index = 0 if params[1]['end'] == -1 else 1 kernel_name_with_template = string[params[0]['start']:params[end_param_index]['end'] + 1] cuda_kernel_dim3 = add_dim3(kernel_string, cuda_kernel) # Keep number of kernel launch params consistent (grid dims, group dims, stream, dynamic shared size) num_klp = len(extract_arguments(0, kernel["group"].replace("<<<", "(").replace(">>>", ")"))) hip_kernel = "hipLaunchKernelGGL(" + cuda_kernel_dim3[0:-1].replace( ">>>", ", 0" * (4 - num_klp) + ">>>").replace("<<<", ", ").replace( ">>>", ", ").replace(kernel_name_with_template, "(" + kernel_name_with_template + ")") # Replace cuda kernel with hip kernel output_string = output_string.replace(cuda_kernel, hip_kernel) # Update the statistics stats["kernel_launches"].append(hip_kernel) return output_string def find_closure_group(input_string, start, group): """Generalization for finding a balancing closure group if group = ["(", ")"], then finds the first balanced parentheses. if group = ["{", "}"], then finds the first balanced bracket. Given an input string, a starting position in the input string, and the group type, find_closure_group returns the positions of group[0] and group[1] as a tuple. Example: find_closure_group("(hi)", 0, ["(", ")"]) Returns: 0, 3 """ inside_parenthesis = False parens = 0 pos = start p_start, p_end = -1, -1 while pos < len(input_string): if input_string[pos] == group[0]: if inside_parenthesis is False: inside_parenthesis = True parens = 1 p_start = pos else: parens += 1 elif input_string[pos] == group[1] and inside_parenthesis: parens -= 1 if parens == 0: p_end = pos return p_start, p_end pos += 1 return None, None def find_bracket_group(input_string, start): """Finds the first balanced parantheses.""" return find_closure_group(input_string, start, group=["{", "}"]) def find_parentheses_group(input_string, start): """Finds the first balanced bracket.""" return find_closure_group(input_string, start, group=["(", ")"]) RE_ASSERT = re.compile(r"\bassert[ ]*\(") def replace_math_functions(input_string): """FIXME: Temporarily replace std:: invocations of math functions with non-std:: versions to prevent linker errors NOTE: This can lead to correctness issues when running tests, since the correct version of the math function (exp/expf) might not get called. Plan is to remove this function once HIP supports std:: math function calls inside device code """ output_string = input_string for func in MATH_TRANSPILATIONS: output_string = output_string.replace(r'{}('.format(func), '{}('.format(MATH_TRANSPILATIONS[func])) return output_string RE_SYNCTHREADS = re.compile(r":?:?\b(__syncthreads)\b(\w*\()") def hip_header_magic(input_string): """If the file makes kernel builtin calls and does not include the cuda_runtime.h header, then automatically add an #include to match the "magic" includes provided by NVCC. TODO: Update logic to ignore cases where the cuda_runtime.h is included by another file. """ # Copy the input. output_string = input_string # Check if one of the following headers is already included. headers = ["hip/hip_runtime.h", "hip/hip_runtime_api.h"] if any(re.search(r'#include ("{0}"|<{0}>)'.format(ext), output_string) for ext in headers): return output_string # Rough logic to detect if we're inside device code hasDeviceLogic: int hasDeviceLogic = "hipLaunchKernelGGL" in output_string hasDeviceLogic += "__global__" in output_string hasDeviceLogic += "__shared__" in output_string hasDeviceLogic += RE_SYNCTHREADS.search(output_string) is not None # If device logic found, provide the necessary header. if hasDeviceLogic: output_string = '#include "hip/hip_runtime.h"\n' + input_string return output_string RE_EXTERN_SHARED = re.compile(r"extern\s+([\w\(\)]+)?\s*__shared__\s+([\w:<>\s]+)\s+(\w+)\s*\[\s*\]\s*;") def replace_extern_shared(input_string): """Match extern __shared__ type foo[]; syntax and use HIP_DYNAMIC_SHARED() MACRO instead. https://github.com/ROCm-Developer-Tools/HIP/blob/master/docs/markdown/hip_kernel_language.md#__shared__ Example: "extern __shared__ char smemChar[];" => "HIP_DYNAMIC_SHARED( char, smemChar)" "extern __shared__ unsigned char smem[];" => "HIP_DYNAMIC_SHARED( unsigned char, my_smem)" """ output_string = input_string output_string = RE_EXTERN_SHARED.sub( lambda inp: "HIP_DYNAMIC_SHARED({0} {1}, {2})".format( inp.group(1) or "", inp.group(2), inp.group(3)), output_string) return output_string def get_hip_file_path(rel_filepath, is_pytorch_extension=False): """ Returns the new name of the hipified file """ # At the moment, some PyTorch source files are HIPified in place. The predicate # is_out_of_place tells us if this is the case or not. assert(not os.path.isabs(rel_filepath)) if not is_pytorch_extension and not is_out_of_place(rel_filepath): return rel_filepath dirpath, filename = os.path.split(rel_filepath) root, ext = os.path.splitext(filename) # Here's the plan: # # In general, we need to disambiguate the HIPified filename so that # it gets a different name from the original filename, so # that we don't overwrite the original file # # There's a lot of different naming conventions across PyTorch # and Caffe2, but the general recipe is to convert occurrences # of cuda/gpu to hip, and add hip if there are no occurrences # of cuda/gpu anywhere. # # Concretely, we do the following: # # - If there is a directory component named "cuda", replace # it with "hip", AND # # - If the file name contains "CUDA", replace it with "HIP", AND # # - ALWAYS replace '.cu' with '.hip', because those files # contain CUDA kernels that needs to be hipified and processed with # hip compiler # # - If we are not hipifying a PyTorch extension, and the parent # directory name did not change as a result of the above # transformations, insert "hip" in the file path # as the direct parent folder of the file # # - If we are hipifying a PyTorch extension, and the parent directory # name as well as the filename (incl. extension) did not change as # a result of the above transformations, insert "_hip" in the filename # # This isn't set in stone; we might adjust this to support other # naming conventions. if ext == '.cu': ext = '.hip' orig_filename = filename orig_dirpath = dirpath dirpath = dirpath.replace('cuda', 'hip') dirpath = dirpath.replace('CUDA', 'HIP') dirpath = dirpath.replace('THC', 'THH') root = root.replace('cuda', 'hip') root = root.replace('CUDA', 'HIP') # Special case to handle caffe2/core/THCCachingAllocator if dirpath != "caffe2/core": root = root.replace('THC', 'THH') if not is_pytorch_extension and dirpath == orig_dirpath: dirpath = os.path.join(dirpath, 'hip') if is_pytorch_extension and dirpath == orig_dirpath and (root + ext) == orig_filename: root = root + "_hip" return os.path.join(dirpath, root + ext) def is_out_of_place(rel_filepath): assert(not os.path.isabs(rel_filepath)) if rel_filepath.startswith("torch/"): return False if rel_filepath.startswith("tools/autograd/templates/"): return False return True # Keep this synchronized with includes/ignores in build_amd.py def is_pytorch_file(rel_filepath): assert(not os.path.isabs(rel_filepath)) if rel_filepath.startswith("aten/"): if rel_filepath.startswith("aten/src/ATen/core/"): return False return True if rel_filepath.startswith("torch/"): return True if rel_filepath.startswith("tools/autograd/templates/"): return True return False def is_cusparse_file(rel_filepath): if is_pytorch_file(rel_filepath): return "sparse" in rel_filepath.lower() return False def is_caffe2_gpu_file(rel_filepath): assert(not os.path.isabs(rel_filepath)) if rel_filepath.startswith("c10/cuda"): return True filename = os.path.basename(rel_filepath) _, ext = os.path.splitext(filename) return ('gpu' in filename or ext in ['.cu', '.cuh']) and ('cudnn' not in filename) # Cribbed from https://stackoverflow.com/questions/42742810/speed-up-millions-of-regex-replacements-in-python-3/42789508#42789508 class Trie(): """Regex::Trie in Python. Creates a Trie out of a list of words. The trie can be exported to a Regex pattern. The corresponding Regex should match much faster than a simple Regex union.""" def __init__(self): self.data = {} def add(self, word): ref = self.data for char in word: ref[char] = char in ref and ref[char] or {} ref = ref[char] ref[''] = 1 def dump(self): return self.data def quote(self, char): return re.escape(char) def _pattern(self, pData): data = pData if "" in data and len(data.keys()) == 1: return None alt = [] cc = [] q = 0 for char in sorted(data.keys()): if isinstance(data[char], dict): try: recurse = self._pattern(data[char]) alt.append(self.quote(char) + recurse) except Exception: cc.append(self.quote(char)) else: q = 1 cconly = not len(alt) > 0 if len(cc) > 0: if len(cc) == 1: alt.append(cc[0]) else: alt.append('[' + ''.join(cc) + ']') if len(alt) == 1: result = alt[0] else: result = "(?:" + "|".join(alt) + ")" if q: if cconly: result += "?" else: result = "(?:%s)?" % result return result def pattern(self): return self._pattern(self.dump()) CAFFE2_TRIE = Trie() CAFFE2_MAP = {} PYTORCH_TRIE = Trie() PYTORCH_MAP: Dict[str, object] = {} # In PyTorch, we map cuBLAS->rocBLAS and cuSPARSE->hipSPARSE. Note the prefix, roc versus hip. # The 'hip' APIs offer a more direct CUDA-friendly mapping, but calling rocBLAS directly has better performance. # Unfortunately, the roc* types and hip* types differ, i.e., rocblas_float_complex versus hipComplex. # In the case of SPARSE, we must use the hip types for complex instead of the roc types, # but the pytorch mappings assume roc. Therefore, we create a new SPARSE mapping that has a higher priority. # Its mappings will trigger first, and only when a miss occurs will the lower-priority pytorch mapping take place. # When a file contains "sparse" in the filename, a mapping marked with API_SPARSE is preferred over other choices. PYTORCH_SPARSE_MAP = {} for mapping in CUDA_TO_HIP_MAPPINGS: assert isinstance(mapping, Mapping) for src, value in mapping.items(): dst = value[0] meta_data = value[1:] if constants.API_CAFFE2 not in meta_data: PYTORCH_TRIE.add(src) # if src is already in PYTORCH_MAP and dst belongs to API_SPARSE # do not overwrite PYTORCH_MAP, store dst separately if constants.API_SPARSE in meta_data and PYTORCH_MAP.get(src, ""): PYTORCH_SPARSE_MAP[src] = dst else: PYTORCH_MAP[src] = dst if constants.API_PYTORCH not in meta_data: CAFFE2_TRIE.add(src) CAFFE2_MAP[src] = dst RE_CAFFE2_PREPROCESSOR = re.compile(CAFFE2_TRIE.pattern()) RE_PYTORCH_PREPROCESSOR = re.compile(r'(?<=\W)({0})(?=\W)'.format(PYTORCH_TRIE.pattern())) RE_QUOTE_HEADER = re.compile(r'#include "([^"]+)"') RE_ANGLE_HEADER = re.compile(r'#include <([^>]+)>') RE_THC_GENERIC_FILE = re.compile(r'#define THC_GENERIC_FILE "([^"]+)"') RE_CU_SUFFIX = re.compile(r'\.cu\b') # be careful not to pick up .cuh """ Returns a dict with the following keys: "hipified_path" : absolute path of hipified source file "status" : "ok" if hipified file was written out "skipped" if an identical hipified file already existed or hipified file couldn't be written out "ignored" if the source file was a hipified file itself or not meant to be hipified """ def preprocessor( output_directory: str, filepath: str, all_files: Iterable, header_include_dirs: Iterable, stats: Dict[str, List], hip_clang_launch: bool, is_pytorch_extension: bool, clean_ctx: GeneratedFileCleaner, show_progress: bool) -> HipifyResult: """ Executes the CUDA -> HIP conversion on the specified file. """ if filepath not in all_files: return {"hipified_path": None, "status": "[ignored, not to be hipified]"} fin_path = os.path.abspath(os.path.join(output_directory, filepath)) rel_filepath = os.path.relpath(filepath, output_directory) with open(fin_path, 'r', encoding='utf-8') as fin: if fin.readline() == HIPIFY_C_BREADCRUMB: return {"hipified_path": None, "status": "[ignored, input is hipified output]"} fin.seek(0) output_source = fin.read() orig_output_source = output_source # get_hip_file_path needs a relative path to work correctly fout_path = os.path.abspath(os.path.join(output_directory, get_hip_file_path(rel_filepath, is_pytorch_extension))) if not os.path.exists(os.path.dirname(fout_path)): clean_ctx.makedirs(os.path.dirname(fout_path)) # unsupported_calls statistics reporting is broken atm def pt_repl(m): return PYTORCH_MAP[m.group(0)] def pt_sparse_repl(m): # checks SPARSE map first, and if a miss occurs, falls back to pytorch mappings return PYTORCH_SPARSE_MAP.get(m.group(0), pt_repl(m)) if is_pytorch_extension: output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_repl, output_source) else: if is_cusparse_file(rel_filepath): output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_sparse_repl, output_source) elif is_pytorch_file(rel_filepath): output_source = RE_PYTORCH_PREPROCESSOR.sub(pt_repl, output_source) else: def c2_repl(m): return CAFFE2_MAP[m.group(0)] output_source = RE_CAFFE2_PREPROCESSOR.sub(c2_repl, output_source) # Header rewrites def mk_repl(templ, include_current_dir=True): def repl(m): f = m.group(1) dirpath, filename = os.path.split(f) if ( f.startswith("ATen/cuda") or f.startswith("ATen/native/cuda") or f.startswith("ATen/native/nested/cuda") or f.startswith("ATen/native/quantized/cuda") or f.startswith("ATen/native/sparse/cuda") or f.startswith("ATen/native/transformers/cuda") or f.startswith("THC/") or (f.startswith("THC") and not f.startswith("THCP")) ): return templ.format(get_hip_file_path(m.group(1), is_pytorch_extension)) # if filename is one of the files being hipified for this extension if (is_pytorch_extension and any(s.endswith(filename) for s in all_files)): header_dir = None header_filepath = None # If include_current_dir True, look first in same dir as the including source file if include_current_dir: header_dir_to_check = os.path.dirname(fin_path) header_path_to_check = os.path.abspath(os.path.join(header_dir_to_check, f)) if os.path.exists(header_path_to_check): header_dir = header_dir_to_check header_filepath = header_path_to_check # If not found, look in include dirs one by one and first match wins if header_filepath is None: for header_include_dir in header_include_dirs: header_dir_to_check = os.path.join(output_directory, header_include_dir) header_path_to_check = os.path.abspath(os.path.join(header_dir_to_check, f)) if os.path.exists(header_path_to_check): header_dir = header_dir_to_check header_filepath = header_path_to_check # If header file not found, keep as is if header_filepath is None: return m.group(0) # Hipify header file first if needed if header_filepath not in HIPIFY_FINAL_RESULT: preprocess_file_and_save_result(output_directory, header_filepath, all_files, header_include_dirs, stats, hip_clang_launch, is_pytorch_extension, clean_ctx, show_progress) hipified_header_filepath = HIPIFY_FINAL_RESULT[header_filepath]["hipified_path"] return templ.format(os.path.relpath(hipified_header_filepath if hipified_header_filepath is not None else header_filepath, header_dir)) return m.group(0) return repl output_source = RE_QUOTE_HEADER.sub(mk_repl('#include "{0}"', True), output_source) output_source = RE_ANGLE_HEADER.sub(mk_repl('#include <{0}>', False), output_source) output_source = RE_THC_GENERIC_FILE.sub(mk_repl('#define THC_GENERIC_FILE "{0}"'), output_source) # CMakeLists.txt rewrites if filepath.endswith('CMakeLists.txt'): output_source = output_source.replace('CUDA', 'HIP') output_source = output_source.replace('THC', 'THH') output_source = RE_CU_SUFFIX.sub('.hip', output_source) # Perform Kernel Launch Replacements if not hip_clang_launch: output_source = processKernelLaunches(output_source, stats) # Replace std:: with non-std:: versions if (filepath.endswith(".cu") or filepath.endswith(".cuh")) and "PowKernel" not in filepath: output_source = replace_math_functions(output_source) # Include header if device code is contained. output_source = hip_header_magic(output_source) # Replace the extern __shared__ # NOTE: No longer needed after transition from hcc to hipclang. # output_source = replace_extern_shared(output_source) # Don't write out identical hipified files for extensions if dirpath has not changed if ( is_pytorch_extension and orig_output_source == output_source and os.path.dirname(fin_path) == os.path.dirname(fout_path) ): return {"hipified_path": fin_path, "status": "[skipped, no changes]"} # Add hipify breadcrumb for C-style files to avoid re-hipification if fin_path != fout_path and match_extensions(fin_path, (".cu", ".cuh", ".c", ".cc", ".cpp", ".h", ".hpp")): output_source = HIPIFY_C_BREADCRUMB + output_source do_write = True if os.path.exists(fout_path): with open(fout_path, 'r', encoding='utf-8') as fout_old: do_write = fout_old.read() != output_source if do_write: try: with clean_ctx.open(fout_path, 'w', encoding='utf-8') as fout: fout.write(output_source) return {"hipified_path": fout_path, "status": "[ok]"} except PermissionError as e: print(f"{bcolors.WARNING}Failed to save {fout_path} with \"{e.strerror}\", leaving {fin_path} unchanged.{bcolors.ENDC}", file=sys.stderr) return {"hipified_path": fin_path, "status": "[skipped, no permissions]"} else: return {"hipified_path": fout_path, "status": "[skipped, already hipified]"} def file_specific_replacement(filepath, search_string, replace_string, strict=False): with openf(filepath, "r+") as f: contents = f.read() if strict: contents = re.sub(r'\b({0})\b'.format(re.escape(search_string)), lambda x: replace_string, contents) else: contents = contents.replace(search_string, replace_string) f.seek(0) f.write(contents) f.truncate() def file_add_header(filepath, header): with openf(filepath, "r+") as f: contents = f.read() if header[0] != "<" and header[-1] != ">": header = '"{0}"'.format(header) contents = ('#include {0} \n'.format(header)) + contents f.seek(0) f.write(contents) f.truncate() def fix_static_global_kernels(in_txt): """Static global kernels in HIP results in a compilation error.""" in_txt = in_txt.replace(" __global__ static", "__global__") return in_txt RE_INCLUDE = re.compile(r"#include .*\n") def extract_arguments(start, string): """ Return the list of arguments in the upcoming function parameter closure. Example: string (input): '(blocks, threads, 0, THCState_getCurrentStream(state))' arguments (output): '[{'start': 1, 'end': 7}, {'start': 8, 'end': 16}, {'start': 17, 'end': 19}, {'start': 20, 'end': 53}]' """ arguments = [] closures = { "<": 0, "(": 0 } current_position = start argument_start_pos = current_position + 1 # Search for final parenthesis while current_position < len(string): if string[current_position] == "(": closures["("] += 1 elif string[current_position] == ")": closures["("] -= 1 elif string[current_position] == "<": closures["<"] += 1 elif string[current_position] == ">" and string[current_position - 1] != "-" and closures["<"] > 0: closures["<"] -= 1 # Finished all arguments if closures["("] == 0 and closures["<"] == 0: # Add final argument arguments.append({"start": argument_start_pos, "end": current_position}) break # Finished current argument if closures["("] == 1 and closures["<"] == 0 and string[current_position] == ",": arguments.append({"start": argument_start_pos, "end": current_position}) argument_start_pos = current_position + 1 current_position += 1 return arguments def str2bool(v): """ArgumentParser doesn't support type=bool. Thus, this helper method will convert from possible string types to True / False.""" if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') def hipify( project_directory: str, show_detailed: bool = False, extensions: Iterable = (".cu", ".cuh", ".c", ".cc", ".cpp", ".h", ".in", ".hpp"), header_extensions: Iterable = (".cuh", ".h", ".hpp"), output_directory: str = "", header_include_dirs: Iterable = (), includes: Iterable = ('*',), extra_files: Iterable = (), out_of_place_only: bool = False, ignores: Iterable = (), show_progress: bool = True, hip_clang_launch: bool = False, is_pytorch_extension: bool = False, hipify_extra_files_only: bool = False, clean_ctx: Optional[GeneratedFileCleaner] = None ) -> HipifyFinalResult: if project_directory == "": project_directory = os.getcwd() # Verify the project directory exists. if not os.path.exists(project_directory): print("The project folder specified does not exist.") sys.exit(1) # If no output directory, provide a default one. if not output_directory: project_directory.rstrip("/") output_directory = project_directory + "_amd" if project_directory != output_directory: includes = [include.replace(project_directory, output_directory) for include in includes] ignores = [ignore.replace(project_directory, output_directory) for ignore in ignores] # Copy from project directory to output directory if not done already. if not os.path.exists(output_directory): shutil.copytree(project_directory, output_directory) all_files = list(matched_files_iter(output_directory, includes=includes, ignores=ignores, extensions=extensions, out_of_place_only=out_of_place_only, is_pytorch_extension=is_pytorch_extension)) all_files_set = set(all_files) for f in extra_files: if not os.path.isabs(f): f = os.path.join(output_directory, f) if f not in all_files_set: all_files.append(f) # List all files in header_include_paths to ensure they are hipified from pathlib import Path for header_include_dir in header_include_dirs: if os.path.isabs(header_include_dir): header_include_dir_path = Path(header_include_dir) else: header_include_dir_path = Path(os.path.join(output_directory, header_include_dir)) for path in header_include_dir_path.rglob('*'): if ( path.is_file() and _fnmatch(str(path), includes) and (not _fnmatch(str(path), ignores)) and match_extensions(path.name, header_extensions) ): all_files.append(str(path)) if clean_ctx is None: clean_ctx = GeneratedFileCleaner(keep_intermediates=True) # Preprocessing statistics. stats: Dict[str, List] = {"unsupported_calls": [], "kernel_launches": []} for filepath in (all_files if not hipify_extra_files_only else extra_files): preprocess_file_and_save_result(output_directory, filepath, all_files, header_include_dirs, stats, hip_clang_launch, is_pytorch_extension, clean_ctx, show_progress) print(bcolors.OKGREEN + "Successfully preprocessed all matching files." + bcolors.ENDC, file=sys.stderr) # Show detailed summary if show_detailed: compute_stats(stats) return HIPIFY_FINAL_RESULT
pytorch-master
torch/utils/hipify/hipify_python.py
pytorch-master
torch/utils/bottleneck/__init__.py
import argparse import cProfile import pstats import sys import os from typing import Dict import torch from torch.autograd import profiler from torch.utils.collect_env import get_env_info def redirect_argv(new_argv): sys.argv[:] = new_argv[:] def compiled_with_cuda(sysinfo): if sysinfo.cuda_compiled_version: return 'compiled w/ CUDA {}'.format(sysinfo.cuda_compiled_version) return 'not compiled w/ CUDA' env_summary = """ -------------------------------------------------------------------------------- Environment Summary -------------------------------------------------------------------------------- PyTorch {pytorch_version}{debug_str} {cuda_compiled} Running with Python {py_version} and {cuda_runtime} `{pip_version} list` truncated output: {pip_list_output} """.strip() def run_env_analysis(): print('Running environment analysis...') info = get_env_info() result: Dict[str, str] = dict() debug_str = '' if info.is_debug_build: debug_str = ' DEBUG' cuda_avail = '' if info.is_cuda_available: cuda = info.cuda_runtime_version if cuda is not None: cuda_avail = 'CUDA ' + cuda else: cuda = 'CUDA unavailable' pip_version = info.pip_version pip_list_output = info.pip_packages if pip_list_output is None: pip_list_output = 'Unable to fetch' result = { 'debug_str': debug_str, 'pytorch_version': info.torch_version, 'cuda_compiled': compiled_with_cuda(info), 'py_version': '{}.{}'.format(sys.version_info[0], sys.version_info[1]), 'cuda_runtime': cuda_avail, 'pip_version': pip_version, 'pip_list_output': pip_list_output, } return env_summary.format(**result) def run_cprofile(code, globs, launch_blocking=False): print('Running your script with cProfile') prof = cProfile.Profile() prof.enable() exec(code, globs, None) prof.disable() return prof cprof_summary = """ -------------------------------------------------------------------------------- cProfile output -------------------------------------------------------------------------------- """.strip() def print_cprofile_summary(prof, sortby='tottime', topk=15): print(cprof_summary) cprofile_stats = pstats.Stats(prof).sort_stats(sortby) cprofile_stats.print_stats(topk) def run_autograd_prof(code, globs): def run_prof(use_cuda=False): with profiler.profile(use_cuda=use_cuda) as prof: exec(code, globs, None) return prof print('Running your script with the autograd profiler...') result = [run_prof(use_cuda=False)] if torch.cuda.is_available(): result.append(run_prof(use_cuda=True)) else: result.append(None) return result autograd_prof_summary = """ -------------------------------------------------------------------------------- autograd profiler output ({mode} mode) -------------------------------------------------------------------------------- {description} {cuda_warning} {output} """.strip() def print_autograd_prof_summary(prof, mode, sortby='cpu_time', topk=15): valid_sortby = ['cpu_time', 'cuda_time', 'cpu_time_total', 'cuda_time_total', 'count'] if sortby not in valid_sortby: warn = ('WARNING: invalid sorting option for autograd profiler results: {}\n' 'Expected `cpu_time`, `cpu_time_total`, or `count`. ' 'Defaulting to `cpu_time`.') print(warn.format(sortby)) sortby = 'cpu_time' if mode == 'CUDA': cuda_warning = ('\n\tBecause the autograd profiler uses the CUDA event API,\n' '\tthe CUDA time column reports approximately max(cuda_time, cpu_time).\n' '\tPlease ignore this output if your code does not use CUDA.\n') else: cuda_warning = '' sorted_events = sorted(prof.function_events, key=lambda x: getattr(x, sortby), reverse=True) topk_events = sorted_events[:topk] result = { 'mode': mode, 'description': 'top {} events sorted by {}'.format(topk, sortby), 'output': torch.autograd.profiler_util._build_table(topk_events), 'cuda_warning': cuda_warning } print(autograd_prof_summary.format(**result)) descript = """ `bottleneck` is a tool that can be used as an initial step for debugging bottlenecks in your program. It summarizes runs of your script with the Python profiler and PyTorch\'s autograd profiler. Because your script will be profiled, please ensure that it exits in a finite amount of time. For more complicated uses of the profilers, please see https://docs.python.org/3/library/profile.html and https://pytorch.org/docs/master/autograd.html#profiler for more information. """.strip() def parse_args(): parser = argparse.ArgumentParser(description=descript) parser.add_argument('scriptfile', type=str, help='Path to the script to be run. ' 'Usually run with `python path/to/script`.') parser.add_argument('args', type=str, nargs=argparse.REMAINDER, help='Command-line arguments to be passed to the script.') return parser.parse_args() def cpu_time_total(autograd_prof): return sum([event.cpu_time_total for event in autograd_prof.function_events]) def main(): args = parse_args() # Customizable constants. scriptfile = args.scriptfile scriptargs = [] if args.args is None else args.args scriptargs.insert(0, scriptfile) cprofile_sortby = 'tottime' cprofile_topk = 15 autograd_prof_sortby = 'cpu_time_total' autograd_prof_topk = 15 redirect_argv(scriptargs) sys.path.insert(0, os.path.dirname(scriptfile)) with open(scriptfile, 'rb') as stream: code = compile(stream.read(), scriptfile, 'exec') globs = { '__file__': scriptfile, '__name__': '__main__', '__package__': None, '__cached__': None, } print(descript) env_summary = run_env_analysis() if torch.cuda.is_available(): torch.cuda.init() cprofile_prof = run_cprofile(code, globs) autograd_prof_cpu, autograd_prof_cuda = run_autograd_prof(code, globs) print(env_summary) print_cprofile_summary(cprofile_prof, cprofile_sortby, cprofile_topk) if not torch.cuda.is_available(): print_autograd_prof_summary(autograd_prof_cpu, 'CPU', autograd_prof_sortby, autograd_prof_topk) return # Print both the result of the CPU-mode and CUDA-mode autograd profilers # if their execution times are very different. cuda_prof_exec_time = cpu_time_total(autograd_prof_cuda) if len(autograd_prof_cpu.function_events) > 0: cpu_prof_exec_time = cpu_time_total(autograd_prof_cpu) pct_diff = (cuda_prof_exec_time - cpu_prof_exec_time) / cuda_prof_exec_time if abs(pct_diff) > 0.05: print_autograd_prof_summary(autograd_prof_cpu, 'CPU', autograd_prof_sortby, autograd_prof_topk) print_autograd_prof_summary(autograd_prof_cuda, 'CUDA', autograd_prof_sortby, autograd_prof_topk) if __name__ == '__main__': main()
pytorch-master
torch/utils/bottleneck/__main__.py
pytorch-master
torch/utils/jit/__init__.py
from contextlib import contextmanager from typing import Any, List, Tuple, cast import random import torch import time from torch.utils.benchmark import Timer def extract_ir(filename: str) -> List[str]: BEGIN = "<GRAPH_EXPORT>" END = "</GRAPH_EXPORT>" pfx = None current = "" graphs = [] with open(filename, "r") as f: split_strs = f.read().split(BEGIN) for i, split_str in enumerate(split_strs): if i == 0: continue end_loc = split_str.find(END) if end_loc == -1: continue s = split_str[:end_loc] pfx = split_strs[i - 1].splitlines()[-1] lines = [x[len(pfx):] for x in s.splitlines(keepends=True)] graphs.append(''.join(lines)) return graphs def make_tensor_from_type(inp_type: torch._C.TensorType): size = inp_type.sizes() stride = inp_type.strides() device = inp_type.device() dtype = inp_type.dtype() assert size is not None assert stride is not None assert device is not None assert dtype is not None return torch.empty_strided(size=size, stride=stride, device=device, dtype=dtype) def load_graph_and_inputs(ir: str) -> Tuple[Any, List[Any]]: graph = torch._C.parse_ir(ir, parse_tensor_constants=True) graph.makeMultiOutputIntoTuple() inputs = [] for inp in graph.inputs(): if isinstance(inp.type(), torch._C.FloatType): inputs.append(random.uniform(.1, 100)) elif isinstance(inp.type(), torch._C.IntType): inputs.append(random.randint(1, 100)) elif isinstance(inp.type(), torch._C.TensorType): tensorType = cast(torch._C.TensorType, inp.type()) inputs.append(make_tensor_from_type(tensorType)) elif isinstance(inp.type(), torch._C.BoolType): inputs.append(random.randint(0, 1) == 1) else: raise NotImplementedError(f"A default value is not implemented for type {inp.type()}") func = torch._C._create_function_from_graph("forward", graph) torch._C._jit_pass_erase_shape_information(func.graph) return (func, inputs) def time_cuda(fn, inputs, test_runs): t = Timer(stmt="fn(*inputs)", globals={"fn": fn, "inputs" : inputs}) times = t.blocked_autorange() return times.median * 1000 # time in ms def time_cpu(fn, inputs, test_runs): s = time.perf_counter() for _ in range(test_runs): fn(*inputs) e = time.perf_counter() return (e - s) / test_runs * 1000 # time in ms def run_test(ir, inputs, *, warmup_runs=10, test_runs=20) -> float: graph, _ = load_graph_and_inputs(ir) for _ in range(warmup_runs): graph(*inputs) is_cpu = None for input in inputs: if isinstance(input, torch.Tensor): is_cpu = input.device.type == "cpu" break assert is_cpu is not None out = time_cpu(graph, inputs, test_runs) if is_cpu else time_cuda(graph, inputs, test_runs) return out @contextmanager def no_fuser(*args, **kwargs): old_optimize = torch._C._get_graph_executor_optimize(False) try: yield finally: torch._C._get_graph_executor_optimize(old_optimize) def run_baseline_no_fusion(ir, inputs) -> float: with no_fuser(): return run_test(ir, inputs) def run_nnc(ir, inputs, dynamic) -> float: try: strat = [("DYNAMIC", 10)] if dynamic else [("STATIC", 10)] old_strat = torch.jit.set_fusion_strategy(strat) with torch.jit.fuser("fuser1"): return run_test(ir, inputs) finally: torch.jit.set_fusion_strategy(old_strat) def run_nvfuser(ir, inputs) -> float: with torch.jit.fuser("fuser2"): return run_test(ir, inputs)
pytorch-master
torch/utils/jit/log_extract.py
from collections import OrderedDict import contextlib from typing import Dict, Any from tensorboard.compat.proto.config_pb2 import RunMetadata from tensorboard.compat.proto.graph_pb2 import GraphDef from tensorboard.compat.proto.step_stats_pb2 import StepStats, DeviceStepStats from tensorboard.compat.proto.versions_pb2 import VersionDef import torch from ._proto_graph import node_proto methods_OP = [ "attributeNames", "hasMultipleOutputs", "hasUses", "inputs", "kind", "outputs", "outputsSize", "scopeName", ] # Some additional methods to explure for methods_IO are # # 'unique' (type int) # 'type' (type <Tensor<class 'torch._C.Type'>>) # # But the below are sufficient for now. methods_IO = ["node", "offset", "debugName"] GETATTR_KIND = "prim::GetAttr" CLASSTYPE_KIND = "ClassType" class NodeBase(object): def __init__( self, debugName=None, inputs=None, scope=None, tensor_size=None, op_type="UnSpecified", attributes="", ): # TODO; Specify a __slots__ for this class or potentially # used namedtuple instead self.debugName = debugName self.inputs = inputs self.tensor_size = tensor_size self.kind = op_type self.attributes = attributes self.scope = scope def __repr__(self): repr = [] repr.append(str(type(self))) for m in dir(self): if "__" not in m: repr.append( m + ": " + str(getattr(self, m)) + str(type(getattr(self, m))) ) return "\n".join(repr) + "\n\n" class NodePy(NodeBase): def __init__(self, node_cpp, valid_methods): super(NodePy, self).__init__(node_cpp) valid_methods = valid_methods[:] self.inputs = [] for m in valid_methods: if m == "inputs" or m == "outputs": list_of_node = list(getattr(node_cpp, m)()) io_unique_names = [] io_tensor_sizes = [] for n in list_of_node: io_unique_names.append(n.debugName()) if n.isCompleteTensor(): io_tensor_sizes.append(n.type().sizes()) else: io_tensor_sizes.append(None) setattr(self, m, io_unique_names) setattr(self, m + "tensor_size", io_tensor_sizes) else: setattr(self, m, getattr(node_cpp, m)()) class NodePyIO(NodePy): def __init__(self, node_cpp, input_or_output=None): super(NodePyIO, self).__init__(node_cpp, methods_IO) try: tensor_size = node_cpp.type().sizes() except RuntimeError: tensor_size = [ 1, ] # fail when constant model is used. self.tensor_size = tensor_size # Kind attribute string is purely descriptive and will be shown # in detailed information for the node in TensorBoard's graph plugin. # # NodePyOP nodes get this from their kind() method. self.kind = "Parameter" if input_or_output: self.input_or_output = input_or_output self.kind = "IO Node" class NodePyOP(NodePy): def __init__(self, node_cpp): super(NodePyOP, self).__init__(node_cpp, methods_OP) # Replace single quote which causes strange behavior in TensorBoard # TODO: See if we can remove this in the future self.attributes = str( {k: _node_get(node_cpp, k) for k in node_cpp.attributeNames()} ).replace("'", " ") self.kind = node_cpp.kind() class GraphPy(object): """Helper class to convert torch.nn.Module to GraphDef proto and visualization with TensorBoard. GraphDef generation operates in two passes: In the first pass, all nodes are read and saved to two lists. One list is for input/output nodes (nodes_io), which only have inbound or outbound connections, but not both. Another list is for internal operator nodes (nodes_op). The first pass also saves all scope name appeared in the nodes in scope_name_appeared list for later processing. In the second pass, scope names are fully applied to all nodes. debugNameToScopedName is a mapping from a node's ID to its fully qualified scope name. e.g. Net1/Linear[0]/1. Unfortunately torch.jit doesn't have totally correct scope output, so this is nontrivial. The function populate_namespace_from_OP_to_IO and find_common_root are used to assign scope name to a node based on the connection between nodes in a heuristic kind of way. Bookkeeping is done with shallowest_scope_name and scope_name_appeared. """ def __init__(self): self.nodes_op = [] self.nodes_io = OrderedDict() self.unique_name_to_scoped_name = {} self.shallowest_scope_name = "default" self.scope_name_appeared = [] def append(self, x): if isinstance(x, NodePyIO): self.nodes_io[x.debugName] = x if isinstance(x, NodePyOP): self.nodes_op.append(x) def printall(self): print("all nodes") for node in self.nodes_op: print(node) for key in self.nodes_io: print(self.nodes_io[key]) def find_common_root(self): for fullscope in self.scope_name_appeared: if fullscope: self.shallowest_scope_name = fullscope.split("/")[0] def populate_namespace_from_OP_to_IO(self): for node in self.nodes_op: for node_output, outputSize in zip(node.outputs, node.outputstensor_size): self.scope_name_appeared.append(node.scopeName) self.nodes_io[node_output] = NodeBase( node_output, node.inputs, node.scopeName, outputSize, op_type=node.kind, attributes=node.attributes, ) self.find_common_root() for node in self.nodes_op: for input_node_id in node.inputs: self.unique_name_to_scoped_name[input_node_id] = ( node.scopeName + "/" + input_node_id ) for key, node in self.nodes_io.items(): if type(node) == NodeBase: self.unique_name_to_scoped_name[key] = node.scope + "/" + node.debugName if hasattr(node, "input_or_output"): self.unique_name_to_scoped_name[key] = ( node.input_or_output + "/" + node.debugName ) if hasattr(node, "scope") and node.scope is not None: self.unique_name_to_scoped_name[key] = node.scope + "/" + node.debugName if node.scope == "" and self.shallowest_scope_name: self.unique_name_to_scoped_name[node.debugName] = ( self.shallowest_scope_name + "/" + node.debugName ) # replace name for key, node in self.nodes_io.items(): self.nodes_io[key].inputs = [ self.unique_name_to_scoped_name[node_input_id] for node_input_id in node.inputs ] if node.debugName in self.unique_name_to_scoped_name: self.nodes_io[key].debugName = self.unique_name_to_scoped_name[ node.debugName ] def to_proto(self): """ Converts graph representation of GraphPy object to TensorBoard required format. """ # TODO: compute correct memory usage and CPU time once # PyTorch supports it nodes = [] for v in self.nodes_io.values(): nodes.append( node_proto( v.debugName, input=v.inputs, outputsize=v.tensor_size, op=v.kind, attributes=v.attributes, ) ) return nodes def parse(graph, trace, args=None, omit_useless_nodes=True): """This method parses an optimized PyTorch model graph and produces a list of nodes and node stats for eventual conversion to TensorBoard protobuf format. Args: graph (PyTorch module): The model graph to be parsed. trace (PyTorch JIT TracedModule): The model trace to be parsed. args (tuple): input tensor[s] for the model. omit_useless_nodes (boolean): Whether to remove nodes from the graph. """ n_inputs = len(args) scope = {} nodes_py = GraphPy() for node in graph.inputs(): if omit_useless_nodes: if ( len(node.uses()) == 0 ): # number of user of the node (= number of outputs/ fanout) continue if node.type().kind() != CLASSTYPE_KIND: nodes_py.append(NodePyIO(node, "input")) attr_to_scope: Dict[Any, str] = dict() for node in graph.nodes(): if node.kind() == GETATTR_KIND: attr_name = node.s("name") attr_key = node.output().debugName() parent = node.input().node() if ( parent.kind() == GETATTR_KIND ): # If the parent node is not the top-level "self" node parent_attr_name = parent.s("name") parent_attr_key = parent.output().debugName() parent_scope = attr_to_scope[parent_attr_key] attr_scope = parent_scope.split("/")[-1] attr_to_scope[attr_key] = "{}/{}.{}".format( parent_scope, attr_scope, attr_name ) else: attr_to_scope[attr_key] = "__module.{}".format(attr_name) # We don't need classtype nodes; scope will provide this information if node.output().type().kind() != CLASSTYPE_KIND: node_py = NodePyOP(node) node_py.scopeName = attr_to_scope[attr_key] # type: ignore[attr-defined] nodes_py.append(node_py) else: nodes_py.append(NodePyOP(node)) for i, node in enumerate(graph.outputs()): # Create sink nodes for output ops node_pyio = NodePyIO(node, "output") node_pyio.debugName = "output.{}".format(i + 1) node_pyio.inputs = [node.debugName()] nodes_py.append(node_pyio) def parse_traced_name(module): if isinstance(module, torch.jit.TracedModule): module_name = module._name else: module_name = getattr(module, "original_name", "Module") return module_name alias_to_name = dict() base_name = parse_traced_name(trace) for name, module in trace.named_modules(prefix="__module"): mod_name = parse_traced_name(module) attr_name = name.split(".")[-1] alias_to_name[name] = "{}[{}]".format(mod_name, attr_name) for node in nodes_py.nodes_op: module_aliases = node.scopeName.split("/") replacements = [ alias_to_name[alias] if alias in alias_to_name else alias.split(".")[-1] for alias in module_aliases ] node.scopeName = base_name if any(replacements): node.scopeName += "/" + "/".join(replacements) nodes_py.populate_namespace_from_OP_to_IO() return nodes_py.to_proto() def graph(model, args, verbose=False, use_strict_trace=True): """ This method processes a PyTorch model and produces a `GraphDef` proto that can be logged to TensorBoard. Args: model (PyTorch module): The model to be parsed. args (tuple): input tensor[s] for the model. verbose (bool): Whether to print out verbose information while processing. use_strict_trace (bool): Whether to pass keyword argument `strict` to `torch.jit.trace`. Pass False when you want the tracer to record your mutable container types (list, dict) """ with _set_model_to_eval(model): try: trace = torch.jit.trace(model, args, strict=use_strict_trace) graph = trace.graph torch._C._jit_pass_inline(graph) except RuntimeError as e: print(e) print("Error occurs, No graph saved") raise e if verbose: print(graph) list_of_nodes = parse(graph, trace, args) # We are hardcoding that this was run on CPU even though it might have actually # run on GPU. Note this is what is shown in TensorBoard and has no bearing # on actual execution. # TODO: See if we can extract GPU vs CPU information from the PyTorch model # and pass it correctly to TensorBoard. # # Definition of StepStats and DeviceStepStats can be found at # https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/graph/tf_graph_common/test/graph-test.ts # and # https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/step_stats.proto stepstats = RunMetadata( step_stats=StepStats(dev_stats=[DeviceStepStats(device="/device:CPU:0")]) ) return GraphDef(node=list_of_nodes, versions=VersionDef(producer=22)), stepstats # The producer version has been reverse engineered from standard # TensorBoard logged data. @contextlib.contextmanager def _set_model_to_eval(model): """A context manager to temporarily set the training mode of ``model`` to eval.""" if not isinstance(model, torch.jit.ScriptFunction): originally_training = model.training model.train(False) try: yield finally: model.train(originally_training) else: # Do nothing for ScriptFunction try: yield finally: pass def _node_get(node: torch._C.Node, key: str): """Gets attributes of a node which is polymorphic over return type.""" sel = node.kindOf(key) return getattr(node, sel)(key)
pytorch-master
torch/utils/tensorboard/_pytorch_graph.py
from typing import Optional from tensorboard.compat.proto.node_def_pb2 import NodeDef from tensorboard.compat.proto.attr_value_pb2 import AttrValue from tensorboard.compat.proto.tensor_shape_pb2 import TensorShapeProto def attr_value_proto(dtype, shape, s): """Creates a dict of objects matching https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/attr_value.proto specifically designed for a NodeDef. The values have been reverse engineered from standard TensorBoard logged data. """ attr = {} if s is not None: attr["attr"] = AttrValue(s=s.encode(encoding="utf_8")) if shape is not None: shapeproto = tensor_shape_proto(shape) attr["_output_shapes"] = AttrValue(list=AttrValue.ListValue(shape=[shapeproto])) return attr def tensor_shape_proto(outputsize): """Creates an object matching https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/tensor_shape.proto """ return TensorShapeProto(dim=[TensorShapeProto.Dim(size=d) for d in outputsize]) def node_proto( name, op="UnSpecified", input=None, dtype=None, shape: Optional[tuple] = None, outputsize=None, attributes="", ): """Creates an object matching https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/node_def.proto """ if input is None: input = [] if not isinstance(input, list): input = [input] return NodeDef( name=name.encode(encoding="utf_8"), op=op, input=input, attr=attr_value_proto(dtype, outputsize, attributes), )
pytorch-master
torch/utils/tensorboard/_proto_graph.py